Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/ABI/testing/sysfs-bus-mdio b/Documentation/ABI/testing/sysfs-bus-mdio
new file mode 100644
index 0000000..6349749
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-mdio
@@ -0,0 +1,9 @@
+What:		/sys/bus/mdio_bus/devices/.../phy_id
+Date:		November 2012
+KernelVersion:	3.8
+Contact:	netdev@vger.kernel.org
+Description:
+		This attribute contains the 32-bit PHY Identifier as reported
+		by the device during bus enumeration, encoded in hexadecimal.
+		This ID is used to match the device with the appropriate
+		driver.
diff --git a/Documentation/ABI/testing/sysfs-class-net-batman-adv b/Documentation/ABI/testing/sysfs-class-net-batman-adv
index 38dd762de..bdc0070 100644
--- a/Documentation/ABI/testing/sysfs-class-net-batman-adv
+++ b/Documentation/ABI/testing/sysfs-class-net-batman-adv
@@ -1,4 +1,10 @@
 
+What:           /sys/class/net/<iface>/batman-adv/iface_status
+Date:           May 2010
+Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Description:
+                Indicates the status of <iface> as it is seen by batman.
+
 What:           /sys/class/net/<iface>/batman-adv/mesh_iface
 Date:           May 2010
 Contact:        Marek Lindner <lindner_marek@yahoo.de>
@@ -7,8 +13,3 @@
                 displays the batman mesh interface this <iface>
                 currently is associated with.
 
-What:           /sys/class/net/<iface>/batman-adv/iface_status
-Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
-Description:
-                Indicates the status of <iface> as it is seen by batman.
diff --git a/Documentation/ABI/testing/sysfs-class-net-grcan b/Documentation/ABI/testing/sysfs-class-net-grcan
new file mode 100644
index 0000000..f418c92
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-net-grcan
@@ -0,0 +1,35 @@
+
+What:		/sys/class/net/<iface>/grcan/enable0
+Date:		October 2012
+KernelVersion:	3.8
+Contact:	Andreas Larsson <andreas@gaisler.com>
+Description:
+		Hardware configuration of physical interface 0. This file reads
+		and writes the "Enable 0" bit of the configuration register.
+		Possible values: 0 or 1. See the GRCAN chapter of the GRLIB IP
+		core library documentation for details. The default value is 0
+		or set by the module parameter grcan.enable0 and can be read at
+		/sys/module/grcan/parameters/enable0.
+
+What:		/sys/class/net/<iface>/grcan/enable1
+Date:		October 2012
+KernelVersion:	3.8
+Contact:	Andreas Larsson <andreas@gaisler.com>
+Description:
+		Hardware configuration of physical interface 1. This file reads
+		and writes the "Enable 1" bit of the configuration register.
+		Possible values: 0 or 1. See the GRCAN chapter of the GRLIB IP
+		core library documentation for details. The default value is 0
+		or set by the module parameter grcan.enable1 and can be read at
+		/sys/module/grcan/parameters/enable1.
+
+What:		/sys/class/net/<iface>/grcan/select
+Date:		October 2012
+KernelVersion:	3.8
+Contact:	Andreas Larsson <andreas@gaisler.com>
+Description:
+		Configuration of which physical interface to be used. Possible
+		values: 0 or 1. See the GRCAN chapter of the GRLIB IP core
+		library documentation for details. The default value is 0 or is
+		set by the module parameter grcan.select and can be read at
+		/sys/module/grcan/parameters/select.
diff --git a/Documentation/ABI/testing/sysfs-class-net-mesh b/Documentation/ABI/testing/sysfs-class-net-mesh
index c81fe89..bc41da6 100644
--- a/Documentation/ABI/testing/sysfs-class-net-mesh
+++ b/Documentation/ABI/testing/sysfs-class-net-mesh
@@ -6,6 +6,14 @@
                 Indicates whether the batman protocol messages of the
                 mesh <mesh_iface> shall be aggregated or not.
 
+What:           /sys/class/net/<mesh_iface>/mesh/ap_isolation
+Date:           May 2011
+Contact:        Antonio Quartulli <ordex@autistici.org>
+Description:
+                Indicates whether the data traffic going from a
+                wireless client to another wireless client will be
+                silently dropped.
+
 What:           /sys/class/net/<mesh_iface>/mesh/bonding
 Date:           June 2010
 Contact:        Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
@@ -31,14 +39,6 @@
                 mesh will be fragmented or silently discarded if the
                 packet size exceeds the outgoing interface MTU.
 
-What:		/sys/class/net/<mesh_iface>/mesh/ap_isolation
-Date:		May 2011
-Contact:	Antonio Quartulli <ordex@autistici.org>
-Description:
-		Indicates whether the data traffic going from a
-		wireless client to another wireless client will be
-		silently dropped.
-
 What:           /sys/class/net/<mesh_iface>/mesh/gw_bandwidth
 Date:           October 2010
 Contact:        Marek Lindner <lindner_marek@yahoo.de>
@@ -60,6 +60,13 @@
                 Defines the selection criteria this node will use
                 to choose a gateway if gw_mode was set to 'client'.
 
+What:           /sys/class/net/<mesh_iface>/mesh/hop_penalty
+Date:           Oct 2010
+Contact:        Linus Lüssing <linus.luessing@web.de>
+Description:
+                Defines the penalty which will be applied to an
+                originator message's tq-field on every hop.
+
 What:           /sys/class/net/<mesh_iface>/mesh/orig_interval
 Date:           May 2010
 Contact:        Marek Lindner <lindner_marek@yahoo.de>
@@ -67,19 +74,12 @@
                 Defines the interval in milliseconds in which batman
                 sends its protocol messages.
 
-What:           /sys/class/net/<mesh_iface>/mesh/hop_penalty
-Date:           Oct 2010
-Contact:        Linus Lüssing <linus.luessing@web.de>
+What:           /sys/class/net/<mesh_iface>/mesh/routing_algo
+Date:           Dec 2011
+Contact:        Marek Lindner <lindner_marek@yahoo.de>
 Description:
-		Defines the penalty which will be applied to an
-		originator message's tq-field on every hop.
-
-What:		/sys/class/net/<mesh_iface>/mesh/routing_algo
-Date:		Dec 2011
-Contact:	Marek Lindner <lindner_marek@yahoo.de>
-Description:
-		Defines the routing procotol this mesh instance
-		uses to find the optimal paths through the mesh.
+                Defines the routing procotol this mesh instance
+                uses to find the optimal paths through the mesh.
 
 What:           /sys/class/net/<mesh_iface>/mesh/vis_mode
 Date:           May 2010
diff --git a/Documentation/devicetree/bindings/net/can/grcan.txt b/Documentation/devicetree/bindings/net/can/grcan.txt
new file mode 100644
index 0000000..34ef349
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/grcan.txt
@@ -0,0 +1,28 @@
+Aeroflex Gaisler GRCAN and GRHCAN CAN controllers.
+
+The GRCAN and CRHCAN CAN controllers are available in the GRLIB VHDL IP core
+library.
+
+Note: These properties are built from the AMBA plug&play in a Leon SPARC system
+(the ordinary environment for GRCAN and GRHCAN). There are no dts files for
+sparc.
+
+Required properties:
+
+- name : Should be "GAISLER_GRCAN", "01_03d", "GAISLER_GRHCAN" or "01_034"
+
+- reg : Address and length of the register set for the device
+
+- freq : Frequency of the external oscillator clock in Hz (the frequency of
+	the amba bus in the ordinary case)
+
+- interrupts : Interrupt number for this device
+
+Optional properties:
+
+- systemid : If not present or if the value of the least significant 16 bits
+	of this 32-bit property is smaller than GRCAN_TXBUG_SAFE_GRLIB_VERSION
+	a bug workaround is activated.
+
+For further information look in the documentation for the GLIB IP core library:
+http://www.gaisler.com/products/grlib/grip.pdf
diff --git a/Documentation/devicetree/bindings/net/cdns-emac.txt b/Documentation/devicetree/bindings/net/cdns-emac.txt
new file mode 100644
index 0000000..09055c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cdns-emac.txt
@@ -0,0 +1,23 @@
+* Cadence EMAC Ethernet controller
+
+Required properties:
+- compatible: Should be "cdns,[<chip>-]{emac}"
+  Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC.
+  or the generic form: "cdns,emac".
+- reg: Address and length of the register set for the device
+- interrupts: Should contain macb interrupt
+- phy-mode: String, operation mode of the PHY interface.
+  Supported values are: "mii", "rmii".
+
+Optional properties:
+- local-mac-address: 6 bytes, mac address
+
+Examples:
+
+	macb0: ethernet@fffc4000 {
+		compatible = "cdns,at91rm9200-emac";
+		reg = <0xfffc4000 0x4000>;
+		interrupts = <21>;
+		phy-mode = "rmii";
+		local-mac-address = [3a 0e 03 04 05 06];
+	};
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index dcaabe9..6ddd028 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -9,21 +9,15 @@
 			  number
 - interrupt-parent	: The parent interrupt controller
 - cpdma_channels 	: Specifies number of channels in CPDMA
-- host_port_no		: Specifies host port shift
-- cpdma_reg_ofs		: Specifies CPDMA submodule register offset
-- cpdma_sram_ofs	: Specifies CPDMA SRAM offset
-- ale_reg_ofs		: Specifies ALE submodule register offset
 - ale_entries		: Specifies No of entries ALE can hold
-- host_port_reg_ofs	: Specifies host port register offset
-- hw_stats_reg_ofs	: Specifies hardware statistics register offset
-- bd_ram_ofs		: Specifies internal desciptor RAM offset
 - bd_ram_size		: Specifies internal descriptor RAM size
 - rx_descs		: Specifies number of Rx descriptors
 - mac_control		: Specifies Default MAC control register content
 			  for the specific platform
 - slaves		: Specifies number for slaves
-- slave_reg_ofs		: Specifies slave register offset
-- sliver_reg_ofs	: Specifies slave sliver register offset
+- cpts_active_slave	: Specifies the slave to use for time stamping
+- cpts_clock_mult	: Numerator to convert input clock ticks into nanoseconds
+- cpts_clock_shift	: Denominator to convert input clock ticks into nanoseconds
 - phy_id		: Specifies slave phy id
 - mac-address		: Specifies slave MAC address
 
@@ -45,30 +39,22 @@
 		interrupts = <55 0x4>;
 		interrupt-parent = <&intc>;
 		cpdma_channels = <8>;
-		host_port_no = <0>;
-		cpdma_reg_ofs = <0x800>;
-		cpdma_sram_ofs = <0xa00>;
-		ale_reg_ofs = <0xd00>;
 		ale_entries = <1024>;
-		host_port_reg_ofs = <0x108>;
-		hw_stats_reg_ofs = <0x900>;
-		bd_ram_ofs = <0x2000>;
 		bd_ram_size = <0x2000>;
 		no_bd_ram = <0>;
 		rx_descs = <64>;
 		mac_control = <0x20>;
 		slaves = <2>;
+		cpts_active_slave = <0>;
+		cpts_clock_mult = <0x80000000>;
+		cpts_clock_shift = <29>;
 		cpsw_emac0: slave@0 {
-			slave_reg_ofs = <0x208>;
-			sliver_reg_ofs = <0xd80>;
-			phy_id = "davinci_mdio.16:00";
+			phy_id = <&davinci_mdio>, <0>;
 			/* Filled in by U-Boot */
 			mac-address = [ 00 00 00 00 00 00 ];
 		};
 		cpsw_emac1: slave@1 {
-			slave_reg_ofs = <0x308>;
-			sliver_reg_ofs = <0xdc0>;
-			phy_id = "davinci_mdio.16:01";
+			phy_id = <&davinci_mdio>, <1>;
 			/* Filled in by U-Boot */
 			mac-address = [ 00 00 00 00 00 00 ];
 		};
@@ -79,30 +65,22 @@
 		compatible = "ti,cpsw";
 		ti,hwmods = "cpgmac0";
 		cpdma_channels = <8>;
-		host_port_no = <0>;
-		cpdma_reg_ofs = <0x800>;
-		cpdma_sram_ofs = <0xa00>;
-		ale_reg_ofs = <0xd00>;
 		ale_entries = <1024>;
-		host_port_reg_ofs = <0x108>;
-		hw_stats_reg_ofs = <0x900>;
-		bd_ram_ofs = <0x2000>;
 		bd_ram_size = <0x2000>;
 		no_bd_ram = <0>;
 		rx_descs = <64>;
 		mac_control = <0x20>;
 		slaves = <2>;
+		cpts_active_slave = <0>;
+		cpts_clock_mult = <0x80000000>;
+		cpts_clock_shift = <29>;
 		cpsw_emac0: slave@0 {
-			slave_reg_ofs = <0x208>;
-			sliver_reg_ofs = <0xd80>;
-			phy_id = "davinci_mdio.16:00";
+			phy_id = <&davinci_mdio>, <0>;
 			/* Filled in by U-Boot */
 			mac-address = [ 00 00 00 00 00 00 ];
 		};
 		cpsw_emac1: slave@1 {
-			slave_reg_ofs = <0x308>;
-			sliver_reg_ofs = <0xdc0>;
-			phy_id = "davinci_mdio.16:01";
+			phy_id = <&davinci_mdio>, <1>;
 			/* Filled in by U-Boot */
 			mac-address = [ 00 00 00 00 00 00 ];
 		};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9776f06..3da4f96 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -905,6 +905,24 @@
 	gpt		[EFI] Forces disk with valid GPT signature but
 			invalid Protective MBR to be treated as GPT.
 
+	grcan.enable0=	[HW] Configuration of physical interface 0. Determines
+			the "Enable 0" bit of the configuration register.
+			Format: 0 | 1
+			Default: 0
+	grcan.enable1=	[HW] Configuration of physical interface 1. Determines
+			the "Enable 0" bit of the configuration register.
+			Format: 0 | 1
+			Default: 0
+	grcan.select=	[HW] Select which physical interface to use.
+			Format: 0 | 1
+			Default: 0
+	grcan.txsize=	[HW] Sets the size of the tx buffer.
+			Format: <unsigned int> such that (txsize & ~0x1fffc0) == 0.
+			Default: 1024
+	grcan.rxsize=	[HW] Sets the size of the rx buffer.
+			Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
+			Default: 1024
+
 	hashdist=	[KNL,NUMA] Large hashes allocated during boot
 			are distributed across NUMA nodes.  Defaults on
 			for 64-bit NUMA, off otherwise.
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index a173d2a..c1d8204 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -203,7 +203,8 @@
 2 - Enable messages related to route added / changed / deleted
 4 - Enable messages related to translation table operations
 8 - Enable messages related to bridge loop avoidance
-15 - enable all messages
+16 - Enable messaged related to DAT, ARP snooping and parsing
+31 - Enable all messages
 
 The debug output can be changed at runtime  using  the  file
 /sys/class/net/bat0/mesh/log_level. e.g.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index c7fc107..98ac0d7 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1514,6 +1514,20 @@
 
 	Default: 1
 
+cookie_hmac_alg - STRING
+	Select the hmac algorithm used when generating the cookie value sent by
+	a listening sctp socket to a connecting client in the INIT-ACK chunk.
+	Valid values are:
+	* md5
+	* sha1
+	* none
+	Ability to assign md5 or sha1 as the selected alg is predicated on the
+	configuarion of those algorithms at build time (CONFIG_CRYPTO_MD5 and
+	CONFIG_CRYPTO_SHA1).
+
+	Default: Dependent on configuration.  MD5 if available, else SHA1 if
+	available, else none.
+
 rcvbuf_policy - INTEGER
 	Determines if the receive buffer is attributed to the socket or to
 	association.   SCTP supports the capability to create multiple
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 1c08a4b..94444b1 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -3,9 +3,9 @@
 --------------------------------------------------------------------------------
 
 This file documents the mmap() facility available with the PACKET
-socket interface on 2.4 and 2.6 kernels. This type of sockets is used for 
-capture network traffic with utilities like tcpdump or any other that needs
-raw access to network interface.
+socket interface on 2.4/2.6/3.x kernels. This type of sockets is used for
+i) capture network traffic with utilities like tcpdump, ii) transmit network
+traffic, or any other that needs raw access to network interface.
 
 You can find the latest version of this document at:
     http://wiki.ipxwarzone.com/index.php5?title=Linux_packet_mmap
@@ -21,19 +21,18 @@
 + Why use PACKET_MMAP
 --------------------------------------------------------------------------------
 
-In Linux 2.4/2.6 if PACKET_MMAP is not enabled, the capture process is very
-inefficient. It uses very limited buffers and requires one system call
-to capture each packet, it requires two if you want to get packet's 
-timestamp (like libpcap always does).
+In Linux 2.4/2.6/3.x if PACKET_MMAP is not enabled, the capture process is very
+inefficient. It uses very limited buffers and requires one system call to
+capture each packet, it requires two if you want to get packet's timestamp
+(like libpcap always does).
 
 In the other hand PACKET_MMAP is very efficient. PACKET_MMAP provides a size 
 configurable circular buffer mapped in user space that can be used to either
 send or receive packets. This way reading packets just needs to wait for them,
 most of the time there is no need to issue a single system call. Concerning
 transmission, multiple packets can be sent through one system call to get the
-highest bandwidth.
-By using a shared buffer between the kernel and the user also has the benefit
-of minimizing packet copies.
+highest bandwidth. By using a shared buffer between the kernel and the user
+also has the benefit of minimizing packet copies.
 
 It's fine to use PACKET_MMAP to improve the performance of the capture and
 transmission process, but it isn't everything. At least, if you are capturing
@@ -41,7 +40,8 @@
 device driver of your network interface card supports some sort of interrupt
 load mitigation or (even better) if it supports NAPI, also make sure it is
 enabled. For transmission, check the MTU (Maximum Transmission Unit) used and
-supported by devices of your network.
+supported by devices of your network. CPU IRQ pinning of your network interface
+card can also be an advantage.
 
 --------------------------------------------------------------------------------
 + How to use mmap() to improve capture process
@@ -87,9 +87,7 @@
 socket creation and destruction is straight forward, and is done 
 the same way with or without PACKET_MMAP:
 
-int fd;
-
-fd= socket(PF_PACKET, mode, htons(ETH_P_ALL))
+ int fd = socket(PF_PACKET, mode, htons(ETH_P_ALL));
 
 where mode is SOCK_RAW for the raw interface were link level
 information can be captured or SOCK_DGRAM for the cooked
@@ -163,11 +161,23 @@
 
  A complete tutorial is available at: http://wiki.gnu-log.net/
 
+By default, the user should put data at :
+ frame base + TPACKET_HDRLEN - sizeof(struct sockaddr_ll)
+
+So, whatever you choose for the socket mode (SOCK_DGRAM or SOCK_RAW),
+the beginning of the user data will be at :
+ frame base + TPACKET_ALIGN(sizeof(struct tpacket_hdr))
+
+If you wish to put user data at a custom offset from the beginning of
+the frame (for payload alignment with SOCK_RAW mode for instance) you
+can set tp_net (with SOCK_DGRAM) or tp_mac (with SOCK_RAW). In order
+to make this work it must be enabled previously with setsockopt()
+and the PACKET_TX_HAS_OFF option.
+
 --------------------------------------------------------------------------------
 + PACKET_MMAP settings
 --------------------------------------------------------------------------------
 
-
 To setup PACKET_MMAP from user level code is done with a call like
 
  - Capture process
@@ -201,7 +211,6 @@
 
     frames_per_block * tp_block_nr == tp_frame_nr
 
-
 Lets see an example, with the following values:
 
      tp_block_size= 4096
@@ -227,7 +236,6 @@
 account when choosing the frame_size. See "Mapping and use of the circular 
 buffer (ring)".
 
-
 --------------------------------------------------------------------------------
 + PACKET_MMAP setting constraints
 --------------------------------------------------------------------------------
@@ -264,7 +272,6 @@
 The pagesize can also be determined dynamically with the getpagesize (2) 
 system call. 
 
-
  Block number limit
 --------------------
 
@@ -284,7 +291,6 @@
       v  block #2
      block #1
 
-
 kmalloc allocates any number of bytes of physically contiguous memory from 
 a pool of pre-determined sizes. This pool of memory is maintained by the slab 
 allocator which is at the end the responsible for doing the allocation and 
@@ -299,7 +305,6 @@
 
      131072/4 = 32768 blocks
 
-
  PACKET_MMAP buffer size calculator
 ------------------------------------
 
@@ -340,7 +345,6 @@
 and hence the buffer will have a 262144 MiB size. So it can hold 
 262144 MiB / 2048 bytes = 134217728 frames
 
-
 Actually, this buffer size is not possible with an i386 architecture. 
 Remember that the memory is allocated in kernel space, in the case of 
 an i386 kernel's memory size is limited to 1GiB.
@@ -372,7 +376,6 @@
    - Start+tp_net: Packet data, aligned to TPACKET_ALIGNMENT=16.
    - Pad to align to TPACKET_ALIGNMENT=16
  */
-           
  
  The following are conditions that are checked in packet_set_ring
 
@@ -413,7 +416,6 @@
      #define TP_STATUS_LOSING        4 
      #define TP_STATUS_CSUMNOTREADY  8 
 
-
 TP_STATUS_COPY        : This flag indicates that the frame (and associated
                         meta information) has been truncated because it's 
                         larger than tp_frame_size. This packet can be 
@@ -462,7 +464,6 @@
 It doesn't incur in a race condition to first check the status value and 
 then poll for frames.
 
-
 ++ Transmission process
 Those defines are also used for transmission:
 
@@ -494,6 +495,196 @@
     retval = poll(&pfd, 1, timeout);
 
 -------------------------------------------------------------------------------
++ What TPACKET versions are available and when to use them?
+-------------------------------------------------------------------------------
+
+ int val = tpacket_version;
+ setsockopt(fd, SOL_PACKET, PACKET_VERSION, &val, sizeof(val));
+ getsockopt(fd, SOL_PACKET, PACKET_VERSION, &val, sizeof(val));
+
+where 'tpacket_version' can be TPACKET_V1 (default), TPACKET_V2, TPACKET_V3.
+
+TPACKET_V1:
+	- Default if not otherwise specified by setsockopt(2)
+	- RX_RING, TX_RING available
+	- VLAN metadata information available for packets
+	  (TP_STATUS_VLAN_VALID)
+
+TPACKET_V1 --> TPACKET_V2:
+	- Made 64 bit clean due to unsigned long usage in TPACKET_V1
+	  structures, thus this also works on 64 bit kernel with 32 bit
+	  userspace and the like
+	- Timestamp resolution in nanoseconds instead of microseconds
+	- RX_RING, TX_RING available
+	- How to switch to TPACKET_V2:
+		1. Replace struct tpacket_hdr by struct tpacket2_hdr
+		2. Query header len and save
+		3. Set protocol version to 2, set up ring as usual
+		4. For getting the sockaddr_ll,
+		   use (void *)hdr + TPACKET_ALIGN(hdrlen) instead of
+		   (void *)hdr + TPACKET_ALIGN(sizeof(struct tpacket_hdr))
+
+TPACKET_V2 --> TPACKET_V3:
+	- Flexible buffer implementation:
+		1. Blocks can be configured with non-static frame-size
+		2. Read/poll is at a block-level (as opposed to packet-level)
+		3. Added poll timeout to avoid indefinite user-space wait
+		   on idle links
+		4. Added user-configurable knobs:
+			4.1 block::timeout
+			4.2 tpkt_hdr::sk_rxhash
+	- RX Hash data available in user space
+	- Currently only RX_RING available
+
+-------------------------------------------------------------------------------
++ AF_PACKET fanout mode
+-------------------------------------------------------------------------------
+
+In the AF_PACKET fanout mode, packet reception can be load balanced among
+processes. This also works in combination with mmap(2) on packet sockets.
+
+Minimal example code by David S. Miller (try things like "./test eth0 hash",
+"./test eth0 lb", etc.):
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+
+#include <unistd.h>
+
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+
+#include <net/if.h>
+
+static const char *device_name;
+static int fanout_type;
+static int fanout_id;
+
+#ifndef PACKET_FANOUT
+# define PACKET_FANOUT			18
+# define PACKET_FANOUT_HASH		0
+# define PACKET_FANOUT_LB		1
+#endif
+
+static int setup_socket(void)
+{
+	int err, fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_IP));
+	struct sockaddr_ll ll;
+	struct ifreq ifr;
+	int fanout_arg;
+
+	if (fd < 0) {
+		perror("socket");
+		return EXIT_FAILURE;
+	}
+
+	memset(&ifr, 0, sizeof(ifr));
+	strcpy(ifr.ifr_name, device_name);
+	err = ioctl(fd, SIOCGIFINDEX, &ifr);
+	if (err < 0) {
+		perror("SIOCGIFINDEX");
+		return EXIT_FAILURE;
+	}
+
+	memset(&ll, 0, sizeof(ll));
+	ll.sll_family = AF_PACKET;
+	ll.sll_ifindex = ifr.ifr_ifindex;
+	err = bind(fd, (struct sockaddr *) &ll, sizeof(ll));
+	if (err < 0) {
+		perror("bind");
+		return EXIT_FAILURE;
+	}
+
+	fanout_arg = (fanout_id | (fanout_type << 16));
+	err = setsockopt(fd, SOL_PACKET, PACKET_FANOUT,
+			 &fanout_arg, sizeof(fanout_arg));
+	if (err) {
+		perror("setsockopt");
+		return EXIT_FAILURE;
+	}
+
+	return fd;
+}
+
+static void fanout_thread(void)
+{
+	int fd = setup_socket();
+	int limit = 10000;
+
+	if (fd < 0)
+		exit(fd);
+
+	while (limit-- > 0) {
+		char buf[1600];
+		int err;
+
+		err = read(fd, buf, sizeof(buf));
+		if (err < 0) {
+			perror("read");
+			exit(EXIT_FAILURE);
+		}
+		if ((limit % 10) == 0)
+			fprintf(stdout, "(%d) \n", getpid());
+	}
+
+	fprintf(stdout, "%d: Received 10000 packets\n", getpid());
+
+	close(fd);
+	exit(0);
+}
+
+int main(int argc, char **argp)
+{
+	int fd, err;
+	int i;
+
+	if (argc != 3) {
+		fprintf(stderr, "Usage: %s INTERFACE {hash|lb}\n", argp[0]);
+		return EXIT_FAILURE;
+	}
+
+	if (!strcmp(argp[2], "hash"))
+		fanout_type = PACKET_FANOUT_HASH;
+	else if (!strcmp(argp[2], "lb"))
+		fanout_type = PACKET_FANOUT_LB;
+	else {
+		fprintf(stderr, "Unknown fanout type [%s]\n", argp[2]);
+		exit(EXIT_FAILURE);
+	}
+
+	device_name = argp[1];
+	fanout_id = getpid() & 0xffff;
+
+	for (i = 0; i < 4; i++) {
+		pid_t pid = fork();
+
+		switch (pid) {
+		case 0:
+			fanout_thread();
+
+		case -1:
+			perror("fork");
+			exit(EXIT_FAILURE);
+		}
+	}
+
+	for (i = 0; i < 4; i++) {
+		int status;
+
+		wait(&status);
+	}
+
+	return 0;
+}
+
+-------------------------------------------------------------------------------
 + PACKET_TIMESTAMP
 -------------------------------------------------------------------------------
 
@@ -519,6 +710,13 @@
 See include/linux/net_tstamp.h and Documentation/networking/timestamping
 for more information on hardware timestamps.
 
+-------------------------------------------------------------------------------
++ Miscellaneous bits
+-------------------------------------------------------------------------------
+
+- Packet sockets work well together with Linux socket filters, thus you also
+  might want to have a look at Documentation/networking/filter.txt
+
 --------------------------------------------------------------------------------
 + THANKS
 --------------------------------------------------------------------------------
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index ef9ee71..f9fa6db 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -29,11 +29,9 @@
 	dma_txsize: DMA tx ring size;
 	buf_sz: DMA buffer size;
 	tc: control the HW FIFO threshold;
-	tx_coe: Enable/Disable Tx Checksum Offload engine;
 	watchdog: transmit timeout (in milliseconds);
 	flow_ctrl: Flow control ability [on/off];
 	pause: Flow Control Pause Time;
-	tmrate: timer period (only if timer optimisation is configured).
 
 3) Command line options
 Driver parameters can be also passed in command line by using:
@@ -60,17 +58,19 @@
 The incoming packets are stored, by the DMA, in a list of pre-allocated socket
 buffers in order to avoid the memcpy (Zero-copy).
 
-4.3) Timer-Driver Interrupt
-Instead of having the device that asynchronously notifies the frame receptions,
-the driver configures a timer to generate an interrupt at regular intervals.
-Based on the granularity of the timer, the frames that are received by the
-device will experience different levels of latency. Some NICs have dedicated
-timer device to perform this task. STMMAC can use either the RTC device or the
-TMU channel 2  on STLinux platforms.
-The timers frequency can be passed to the driver as parameter; when change it,
-take care of both hardware capability and network stability/performance impact.
-Several performance tests on STM platforms showed this optimisation allows to
-spare the CPU while having the maximum throughput.
+4.3) Interrupt Mitigation
+The driver is able to mitigate the number of its DMA interrupts
+using NAPI for the reception on chips older than the 3.50.
+New chips have an HW RX-Watchdog used for this mitigation.
+
+On Tx-side, the mitigation schema is based on a SW timer that calls the
+tx function (stmmac_tx) to reclaim the resource after transmitting the
+frames.
+Also there is another parameter (like a threshold) used to program
+the descriptors avoiding to set the interrupt on completion bit in
+when the frame is sent (xmit).
+
+Mitigation parameters can be tuned by ethtool.
 
 4.4) WOL
 Wake up on Lan feature through Magic and Unicast frames are supported for the
@@ -121,6 +121,7 @@
 	int bugged_jumbo;
 	int pmt;
 	int force_sf_dma_mode;
+	int riwt_off;
 	void (*fix_mac_speed)(void *priv, unsigned int speed);
 	void (*bus_setup)(void __iomem *ioaddr);
 	int (*init)(struct platform_device *pdev);
@@ -156,6 +157,7 @@
  o pmt: core has the embedded power module (optional).
  o force_sf_dma_mode: force DMA to use the Store and Forward mode
 		     instead of the Threshold.
+ o riwt_off: force to disable the RX watchdog feature and switch to NAPI mode.
  o fix_mac_speed: this callback is used for modifying some syscfg registers
 		 (on ST SoCs) according to the link speed negotiated by the
 		 physical layer .
diff --git a/MAINTAINERS b/MAINTAINERS
index 9386a63..5d72dd5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3936,7 +3936,9 @@
 M:	Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
 M:	Alex Duyck <alexander.h.duyck@intel.com>
 M:	John Ronciak <john.ronciak@intel.com>
+M:	Tushar Dave <tushar.n.dave@intel.com>
 L:	e1000-devel@lists.sourceforge.net
+W:	http://www.intel.com/support/feedback.htm
 W:	http://e1000.sourceforge.net/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
@@ -5062,7 +5064,7 @@
 M:	"David S. Miller" <davem@davemloft.net>
 L:	netdev@vger.kernel.org
 W:	http://www.linuxfoundation.org/en/Net
-W:	http://patchwork.ozlabs.org/project/netdev/list/
+Q:	http://patchwork.ozlabs.org/project/netdev/list/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
 S:	Maintained
@@ -5122,6 +5124,7 @@
 NETWORKING DRIVERS
 L:	netdev@vger.kernel.org
 W:	http://www.linuxfoundation.org/en/Net
+Q:	http://patchwork.ozlabs.org/project/netdev/list/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
 S:	Odd Fixes
@@ -5154,6 +5157,7 @@
 F:	include/linux/nfc.h
 F:	include/net/nfc/
 F:	drivers/nfc/
+F:	include/linux/platform_data/pn544.h
 
 NFS, SUNRPC, AND LOCKD CLIENTS
 M:	Trond Myklebust <Trond.Myklebust@netapp.com>
@@ -6423,6 +6427,7 @@
 SCTP PROTOCOL
 M:	Vlad Yasevich <vyasevich@gmail.com>
 M:	Sridhar Samudrala <sri@us.ibm.com>
+M:	Neil Horman <nhorman@tuxdriver.com>
 L:	linux-sctp@vger.kernel.org
 W:	http://lksctp.sourceforge.net
 S:	Maintained
@@ -7572,6 +7577,12 @@
 F:	Documentation/usb/acm.txt
 F:	drivers/usb/class/cdc-acm.*
 
+USB AR5523 WIRELESS DRIVER
+M:	Pontus Fuchs <pontus.fuchs@gmail.com>
+L:	linux-wireless@vger.kernel.org
+S:	Maintained
+F:	drivers/net/wireless/ath/ar5523/
+
 USB ATTACHED SCSI
 M:	Matthew Wilcox <willy@linux.intel.com>
 M:	Sarah Sharp <sarah.a.sharp@linux.intel.com>
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h
index 7d2f75b..0087d05 100644
--- a/arch/alpha/include/asm/socket.h
+++ b/arch/alpha/include/asm/socket.h
@@ -47,6 +47,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME		28
 #define SO_TIMESTAMP		29
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts
index c634f87..4fcd218 100644
--- a/arch/arm/boot/dts/am335x-bone.dts
+++ b/arch/arm/boot/dts/am335x-bone.dts
@@ -78,3 +78,11 @@
 		};
 	};
 };
+
+&cpsw_emac0 {
+	phy_id = <&davinci_mdio>, <0>;
+};
+
+&cpsw_emac1 {
+	phy_id = <&davinci_mdio>, <1>;
+};
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 185d632..366d929 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -118,3 +118,11 @@
 		};
 	};
 };
+
+&cpsw_emac0 {
+	phy_id = <&davinci_mdio>, <0>;
+};
+
+&cpsw_emac1 {
+	phy_id = <&davinci_mdio>, <1>;
+};
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index bb31bff..a4615b4 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -210,5 +210,53 @@
 			interrupt-parent = <&intc>;
 			interrupts = <91>;
 		};
+
+		mac: ethernet@4a100000 {
+			compatible = "ti,cpsw";
+			ti,hwmods = "cpgmac0";
+			cpdma_channels = <8>;
+			ale_entries = <1024>;
+			bd_ram_size = <0x2000>;
+			no_bd_ram = <0>;
+			rx_descs = <64>;
+			mac_control = <0x20>;
+			slaves = <2>;
+			cpts_active_slave = <0>;
+			cpts_clock_mult = <0x80000000>;
+			cpts_clock_shift = <29>;
+			reg = <0x4a100000 0x800
+			       0x4a101200 0x100>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			interrupt-parent = <&intc>;
+			/*
+			 * c0_rx_thresh_pend
+			 * c0_rx_pend
+			 * c0_tx_pend
+			 * c0_misc_pend
+			 */
+			interrupts = <40 41 42 43>;
+			ranges;
+
+			davinci_mdio: mdio@4a101000 {
+				compatible = "ti,davinci_mdio";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				ti,hwmods = "davinci_mdio";
+				bus_freq = <1000000>;
+				reg = <0x4a101000 0x100>;
+			};
+
+			cpsw_emac0: slave@4a100200 {
+				/* Filled in by U-Boot */
+				mac-address = [ 00 00 00 00 00 00 ];
+			};
+
+			cpsw_emac1: slave@4a100300 {
+				/* Filled in by U-Boot */
+				mac-address = [ 00 00 00 00 00 00 ];
+			};
+
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index f3990b0..3290e61 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -580,6 +580,7 @@
 							66  0x1b0b0	/* MX6Q_PAD_RGMII_RD2__ENET_RGMII_RD2 */
 							70  0x1b0b0	/* MX6Q_PAD_RGMII_RD3__ENET_RGMII_RD3 */
 							48  0x1b0b0	/* MX6Q_PAD_RGMII_RX_CTL__RGMII_RX_CTL */
+							1033 0x4001b0a8	/* MX6Q_PAD_GPIO_16__ENET_ANATOP_ETHERNET_REF_OUT*/
 						>;
 					};
 
@@ -833,8 +834,8 @@
 				compatible = "fsl,imx6q-fec";
 				reg = <0x02188000 0x4000>;
 				interrupts = <0 118 0x04 0 119 0x04>;
-				clocks = <&clks 117>, <&clks 117>;
-				clock-names = "ipg", "ahb";
+				clocks = <&clks 117>, <&clks 117>, <&clks 177>;
+				clock-names = "ipg", "ahb", "ptp";
 				status = "disabled";
 			};
 
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 6230304..a1dc5c0 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -240,3 +240,6 @@
 CONFIG_CRC7=y
 CONFIG_LIBCRC32C=y
 CONFIG_SOC_OMAP5=y
+CONFIG_TI_DAVINCI_MDIO=y
+CONFIG_TI_DAVINCI_CPDMA=y
+CONFIG_TI_CPSW=y
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 0436242..9285768 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -39,7 +39,6 @@
 config SOC_AT91SAM9260
 	bool "AT91SAM9260, AT91SAM9XE or AT91SAM9G20"
 	select HAVE_AT91_DBGU0
-	select HAVE_NET_MACB
 	select SOC_AT91SAM9
 	help
 	  Select this if you are using one of Atmel's AT91SAM9260, AT91SAM9XE
@@ -57,7 +56,6 @@
 	bool "AT91SAM9263"
 	select HAVE_AT91_DBGU1
 	select HAVE_FB_ATMEL
-	select HAVE_NET_MACB
 	select SOC_AT91SAM9
 
 config SOC_AT91SAM9RL
@@ -70,7 +68,6 @@
 	bool "AT91SAM9G45 or AT91SAM9M10 families"
 	select HAVE_AT91_DBGU1
 	select HAVE_FB_ATMEL
-	select HAVE_NET_MACB
 	select SOC_AT91SAM9
 	help
 	  Select this if you are using one of Atmel's AT91SAM9G45 family SoC.
@@ -80,7 +77,6 @@
 	bool "AT91SAM9x5 family"
 	select HAVE_AT91_DBGU0
 	select HAVE_FB_ATMEL
-	select HAVE_NET_MACB
 	select SOC_AT91SAM9
 	help
 	  Select this if you are using one of Atmel's AT91SAM9x5 family SoC.
diff --git a/arch/arm/mach-at91/board-csb337.c b/arch/arm/mach-at91/board-csb337.c
index 3e37437..aa9b320 100644
--- a/arch/arm/mach-at91/board-csb337.c
+++ b/arch/arm/mach-at91/board-csb337.c
@@ -53,6 +53,8 @@
 static struct macb_platform_data __initdata csb337_eth_data = {
 	.phy_irq_pin	= AT91_PIN_PC2,
 	.is_rmii	= 0,
+	/* The CSB337 bootloader stores the MAC the wrong-way around */
+	.rev_eth_addr	= 1,
 };
 
 static struct at91_usbh_data __initdata csb337_usbh_data = {
diff --git a/arch/arm/mach-at91/include/mach/at91rm9200_emac.h b/arch/arm/mach-at91/include/mach/at91rm9200_emac.h
deleted file mode 100644
index b8260cd..0000000
--- a/arch/arm/mach-at91/include/mach/at91rm9200_emac.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91rm9200_emac.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * Ethernet MAC registers.
- * Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91RM9200_EMAC_H
-#define AT91RM9200_EMAC_H
-
-#define	AT91_EMAC_CTL		0x00	/* Control Register */
-#define		AT91_EMAC_LB		(1 <<  0)	/* Loopback */
-#define		AT91_EMAC_LBL		(1 <<  1)	/* Loopback Local */
-#define		AT91_EMAC_RE		(1 <<  2)	/* Receive Enable */
-#define		AT91_EMAC_TE		(1 <<  3)	/* Transmit Enable */
-#define		AT91_EMAC_MPE		(1 <<  4)	/* Management Port Enable */
-#define		AT91_EMAC_CSR		(1 <<  5)	/* Clear Statistics Registers */
-#define		AT91_EMAC_INCSTAT	(1 <<  6)	/* Increment Statistics Registers */
-#define		AT91_EMAC_WES		(1 <<  7)	/* Write Enable for Statistics Registers */
-#define		AT91_EMAC_BP		(1 <<  8)	/* Back Pressure */
-
-#define	AT91_EMAC_CFG		0x04	/* Configuration Register */
-#define		AT91_EMAC_SPD		(1 <<  0)	/* Speed */
-#define		AT91_EMAC_FD		(1 <<  1)	/* Full Duplex */
-#define		AT91_EMAC_BR		(1 <<  2)	/* Bit Rate */
-#define		AT91_EMAC_CAF		(1 <<  4)	/* Copy All Frames */
-#define		AT91_EMAC_NBC		(1 <<  5)	/* No Broadcast */
-#define		AT91_EMAC_MTI		(1 <<  6)	/* Multicast Hash Enable */
-#define		AT91_EMAC_UNI		(1 <<  7)	/* Unicast Hash Enable */
-#define		AT91_EMAC_BIG		(1 <<  8)	/* Receive 1522 Bytes */
-#define		AT91_EMAC_EAE		(1 <<  9)	/* External Address Match Enable */
-#define		AT91_EMAC_CLK		(3 << 10)	/* MDC Clock Divisor */
-#define		AT91_EMAC_CLK_DIV8		(0 << 10)
-#define		AT91_EMAC_CLK_DIV16		(1 << 10)
-#define		AT91_EMAC_CLK_DIV32		(2 << 10)
-#define		AT91_EMAC_CLK_DIV64		(3 << 10)
-#define		AT91_EMAC_RTY		(1 << 12)	/* Retry Test */
-#define		AT91_EMAC_RMII		(1 << 13)	/* Reduce MII (RMII) */
-
-#define	AT91_EMAC_SR		0x08	/* Status Register */
-#define		AT91_EMAC_SR_LINK	(1 <<  0)	/* Link */
-#define		AT91_EMAC_SR_MDIO	(1 <<  1)	/* MDIO pin */
-#define		AT91_EMAC_SR_IDLE	(1 <<  2)	/* PHY idle */
-
-#define	AT91_EMAC_TAR		0x0c	/* Transmit Address Register */
-
-#define	AT91_EMAC_TCR		0x10	/* Transmit Control Register */
-#define		AT91_EMAC_LEN		(0x7ff << 0)	/* Transmit Frame Length */
-#define		AT91_EMAC_NCRC		(1     << 15)	/* No CRC */
-
-#define	AT91_EMAC_TSR		0x14	/* Transmit Status Register */
-#define		AT91_EMAC_TSR_OVR	(1 <<  0)	/* Transmit Buffer Overrun */
-#define		AT91_EMAC_TSR_COL	(1 <<  1)	/* Collision Occurred */
-#define		AT91_EMAC_TSR_RLE	(1 <<  2)	/* Retry Limit Exceeded */
-#define		AT91_EMAC_TSR_IDLE	(1 <<  3)	/* Transmitter Idle */
-#define		AT91_EMAC_TSR_BNQ	(1 <<  4)	/* Transmit Buffer not Queued */
-#define		AT91_EMAC_TSR_COMP	(1 <<  5)	/* Transmit Complete */
-#define		AT91_EMAC_TSR_UND	(1 <<  6)	/* Transmit Underrun */
-
-#define	AT91_EMAC_RBQP		0x18	/* Receive Buffer Queue Pointer */
-
-#define	AT91_EMAC_RSR		0x20	/* Receive Status Register */
-#define		AT91_EMAC_RSR_BNA	(1 <<  0)	/* Buffer Not Available */
-#define		AT91_EMAC_RSR_REC	(1 <<  1)	/* Frame Received */
-#define		AT91_EMAC_RSR_OVR	(1 <<  2)	/* RX Overrun */
-
-#define	AT91_EMAC_ISR		0x24	/* Interrupt Status Register */
-#define		AT91_EMAC_DONE		(1 <<  0)	/* Management Done */
-#define		AT91_EMAC_RCOM		(1 <<  1)	/* Receive Complete */
-#define		AT91_EMAC_RBNA		(1 <<  2)	/* Receive Buffer Not Available */
-#define		AT91_EMAC_TOVR		(1 <<  3)	/* Transmit Buffer Overrun */
-#define		AT91_EMAC_TUND		(1 <<  4)	/* Transmit Buffer Underrun */
-#define		AT91_EMAC_RTRY		(1 <<  5)	/* Retry Limit */
-#define		AT91_EMAC_TBRE		(1 <<  6)	/* Transmit Buffer Register Empty */
-#define		AT91_EMAC_TCOM		(1 <<  7)	/* Transmit Complete */
-#define		AT91_EMAC_TIDLE		(1 <<  8)	/* Transmit Idle */
-#define		AT91_EMAC_LINK		(1 <<  9)	/* Link */
-#define		AT91_EMAC_ROVR		(1 << 10)	/* RX Overrun */
-#define		AT91_EMAC_ABT		(1 << 11)	/* Abort */
-
-#define	AT91_EMAC_IER		0x28	/* Interrupt Enable Register */
-#define	AT91_EMAC_IDR		0x2c	/* Interrupt Disable Register */
-#define	AT91_EMAC_IMR		0x30	/* Interrupt Mask Register */
-
-#define	AT91_EMAC_MAN		0x34	/* PHY Maintenance Register */
-#define		AT91_EMAC_DATA		(0xffff << 0)	/* MDIO Data */
-#define		AT91_EMAC_REGA		(0x1f	<< 18)	/* MDIO Register */
-#define		AT91_EMAC_PHYA		(0x1f	<< 23)	/* MDIO PHY Address */
-#define		AT91_EMAC_RW		(3	<< 28)	/* Read/Write operation */
-#define			AT91_EMAC_RW_W		(1 << 28)
-#define			AT91_EMAC_RW_R		(2 << 28)
-#define		AT91_EMAC_MAN_802_3	0x40020000	/* IEEE 802.3 value */
-
-/*
- * Statistics Registers.
- */
-#define AT91_EMAC_FRA		0x40	/* Frames Transmitted OK */
-#define AT91_EMAC_SCOL		0x44	/* Single Collision Frame */
-#define AT91_EMAC_MCOL		0x48	/* Multiple Collision Frame */
-#define AT91_EMAC_OK		0x4c	/* Frames Received OK */
-#define AT91_EMAC_SEQE		0x50	/* Frame Check Sequence Error */
-#define AT91_EMAC_ALE		0x54	/* Alignmemt Error */
-#define AT91_EMAC_DTE		0x58	/* Deffered Transmission Frame */
-#define AT91_EMAC_LCOL		0x5c	/* Late Collision */
-#define AT91_EMAC_ECOL		0x60	/* Excessive Collision */
-#define AT91_EMAC_TUE		0x64	/* Transmit Underrun Error */
-#define AT91_EMAC_CSE		0x68	/* Carrier Sense Error */
-#define AT91_EMAC_DRFC		0x6c	/* Discard RX Frame */
-#define AT91_EMAC_ROV		0x70	/* Receive Overrun */
-#define AT91_EMAC_CDE		0x74	/* Code Error */
-#define AT91_EMAC_ELR		0x78	/* Excessive Length Error */
-#define AT91_EMAC_RJB		0x7c	/* Receive Jabber */
-#define AT91_EMAC_USF		0x80	/* Undersize Frame */
-#define AT91_EMAC_SQEE		0x84	/* SQE Test Error */
-
-/*
- * Address Registers.
- */
-#define AT91_EMAC_HSL		0x90	/* Hash Address Low [31:0] */
-#define AT91_EMAC_HSH		0x94	/* Hash Address High [63:32] */
-#define AT91_EMAC_SA1L		0x98	/* Specific Address 1 Low, bytes 0-3 */
-#define AT91_EMAC_SA1H		0x9c	/* Specific Address 1 High, bytes 4-5 */
-#define AT91_EMAC_SA2L		0xa0	/* Specific Address 2 Low, bytes 0-3 */
-#define AT91_EMAC_SA2H		0xa4	/* Specific Address 2 High, bytes 4-5 */
-#define AT91_EMAC_SA3L		0xa8	/* Specific Address 3 Low, bytes 0-3 */
-#define AT91_EMAC_SA3H		0xac	/* Specific Address 3 High, bytes 4-5 */
-#define AT91_EMAC_SA4L		0xb0	/* Specific Address 4 Low, bytes 0-3 */
-#define AT91_EMAC_SA4H		0xb4	/* Specific Address 4 High, bytes 4-5 */
-
-#endif
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 47c91f7..38d6910 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -117,6 +117,17 @@
 	imx6q_sabrelite_cko1_setup();
 }
 
+static void __init imx6q_1588_init(void)
+{
+	struct regmap *gpr;
+
+	gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+	if (!IS_ERR(gpr))
+		regmap_update_bits(gpr, 0x4, 1 << 21, 1 << 21);
+	else
+		pr_err("failed to find fsl,imx6q-iomux-gpr regmap\n");
+
+}
 static void __init imx6q_usb_init(void)
 {
 	struct regmap *anatop;
@@ -153,6 +164,7 @@
 
 	imx6q_pm_init();
 	imx6q_usb_init();
+	imx6q_1588_init();
 }
 
 static struct cpuidle_driver imx6q_cpuidle_driver = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 59d5c1c..3125835 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -674,6 +674,7 @@
 	.name		= "cpgmac0",
 	.class		= &am33xx_cpgmac0_hwmod_class,
 	.clkdm_name	= "cpsw_125mhz_clkdm",
+	.flags		= (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
 	.mpu_irqs	= am33xx_cpgmac0_irqs,
 	.main_clk	= "cpsw_125mhz_gclk",
 	.prcm		= {
@@ -685,6 +686,20 @@
 };
 
 /*
+ * mdio class
+ */
+static struct omap_hwmod_class am33xx_mdio_hwmod_class = {
+	.name		= "davinci_mdio",
+};
+
+static struct omap_hwmod am33xx_mdio_hwmod = {
+	.name		= "davinci_mdio",
+	.class		= &am33xx_mdio_hwmod_class,
+	.clkdm_name	= "cpsw_125mhz_clkdm",
+	.main_clk	= "cpsw_125mhz_gclk",
+};
+
+/*
  * dcan class
  */
 static struct omap_hwmod_class am33xx_dcan_hwmod_class = {
@@ -2501,6 +2516,21 @@
 	.user		= OCP_USER_MPU,
 };
 
+struct omap_hwmod_addr_space am33xx_mdio_addr_space[] = {
+	{
+		.pa_start	= 0x4A101000,
+		.pa_end		= 0x4A101000 + SZ_256 - 1,
+	},
+	{ }
+};
+
+struct omap_hwmod_ocp_if am33xx_cpgmac0__mdio = {
+	.master		= &am33xx_cpgmac0_hwmod,
+	.slave		= &am33xx_mdio_hwmod,
+	.addr		= am33xx_mdio_addr_space,
+	.user		= OCP_USER_MPU,
+};
+
 static struct omap_hwmod_addr_space am33xx_elm_addr_space[] = {
 	{
 		.pa_start	= 0x48080000,
@@ -3371,6 +3401,7 @@
 	&am33xx_l3_main__tptc2,
 	&am33xx_l3_s__usbss,
 	&am33xx_l4_hs__cpgmac0,
+	&am33xx_cpgmac0__mdio,
 	NULL,
 };
 
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index c641fb6..9af9a69 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -16,6 +16,7 @@
 #include <linux/netdevice.h>
 #include <linux/string.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
 #include <asm/cacheflush.h>
 #include <asm/hwcap.h>
 
@@ -168,6 +169,8 @@
 	case BPF_S_ANC_MARK:
 	case BPF_S_ANC_PROTOCOL:
 	case BPF_S_ANC_RXHASH:
+	case BPF_S_ANC_VLAN_TAG:
+	case BPF_S_ANC_VLAN_TAG_PRESENT:
 	case BPF_S_ANC_QUEUE:
 		return true;
 	default:
@@ -646,6 +649,16 @@
 			update_on_xread(ctx);
 			emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
 			break;
+		case BPF_S_ALU_XOR_K:
+			/* A ^= K; */
+			OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
+			break;
+		case BPF_S_ANC_ALU_XOR_X:
+		case BPF_S_ALU_XOR_X:
+			/* A ^= X */
+			update_on_xread(ctx);
+			emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
+			break;
 		case BPF_S_ALU_AND_K:
 			/* A &= K */
 			OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
@@ -762,11 +775,6 @@
 			update_on_xread(ctx);
 			emit(ARM_MOV_R(r_A, r_X), ctx);
 			break;
-		case BPF_S_ANC_ALU_XOR_X:
-			/* A ^= X */
-			update_on_xread(ctx);
-			emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
-			break;
 		case BPF_S_ANC_PROTOCOL:
 			/* A = ntohs(skb->protocol) */
 			ctx->seen |= SEEN_SKB;
@@ -810,6 +818,17 @@
 			off = offsetof(struct sk_buff, rxhash);
 			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
 			break;
+		case BPF_S_ANC_VLAN_TAG:
+		case BPF_S_ANC_VLAN_TAG_PRESENT:
+			ctx->seen |= SEEN_SKB;
+			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+			off = offsetof(struct sk_buff, vlan_tci);
+			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
+			if (inst->code == BPF_S_ANC_VLAN_TAG)
+				OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
+			else
+				OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
+			break;
 		case BPF_S_ANC_QUEUE:
 			ctx->seen |= SEEN_SKB;
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index 7fa2f7d..afb8462 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -69,6 +69,7 @@
 #define ARM_INST_CMP_I		0x03500000
 
 #define ARM_INST_EOR_R		0x00200000
+#define ARM_INST_EOR_I		0x02200000
 
 #define ARM_INST_LDRB_I		0x05d00000
 #define ARM_INST_LDRB_R		0x07d00000
@@ -135,6 +136,7 @@
 #define ARM_CMP_I(rn, imm)	_AL3_I(ARM_INST_CMP, 0, rn, imm)
 
 #define ARM_EOR_R(rd, rn, rm)	_AL3_R(ARM_INST_EOR, rd, rn, rm)
+#define ARM_EOR_I(rd, rn, imm)	_AL3_I(ARM_INST_EOR, rd, rn, imm)
 
 #define ARM_LDR_I(rt, rn, off)	(ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
 				 | (off))
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 06e73bf..09f9fa8 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -80,7 +80,6 @@
 	select ARCH_REQUIRE_GPIOLIB
 	select GENERIC_ALLOCATOR
 	select HAVE_FB_ATMEL
-	select HAVE_NET_MACB
 
 #
 # CPU types
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index a473f8c..486df68 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -40,6 +40,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME		28
 #define SO_TIMESTAMP		29
diff --git a/arch/cris/include/asm/socket.h b/arch/cris/include/asm/socket.h
index ae52825..b681b04 100644
--- a/arch/cris/include/asm/socket.h
+++ b/arch/cris/include/asm/socket.h
@@ -42,6 +42,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME		28
 #define SO_TIMESTAMP           29
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index a5b1d7d..871f89b 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -40,6 +40,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME             28
 #define SO_TIMESTAMP		29
diff --git a/arch/h8300/include/asm/socket.h b/arch/h8300/include/asm/socket.h
index ec4554e..90a2e57 100644
--- a/arch/h8300/include/asm/socket.h
+++ b/arch/h8300/include/asm/socket.h
@@ -40,6 +40,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME             28
 #define SO_TIMESTAMP		29
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 41fc28a..23d6759 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -49,6 +49,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER	26
 #define SO_DETACH_FILTER	27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME		28
 #define SO_TIMESTAMP		29
diff --git a/arch/m32r/include/asm/socket.h b/arch/m32r/include/asm/socket.h
index a15f40b..5e7088a 100644
--- a/arch/m32r/include/asm/socket.h
+++ b/arch/m32r/include/asm/socket.h
@@ -40,6 +40,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME		28
 #define SO_TIMESTAMP		29
diff --git a/arch/m68k/include/uapi/asm/socket.h b/arch/m68k/include/uapi/asm/socket.h
index d1be684..285da3b 100644
--- a/arch/m68k/include/uapi/asm/socket.h
+++ b/arch/m68k/include/uapi/asm/socket.h
@@ -40,6 +40,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME             28
 #define SO_TIMESTAMP		29
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index d43ceff..48a4c70 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -43,8 +43,8 @@
 #ifdef CONFIG_BCM47XX_SSB
 	case BCM47XX_BUS_TYPE_SSB:
 		mcore_ssb = &bcm47xx_bus.ssb.mipscore;
-		base = mcore_ssb->flash_window;
-		lim = mcore_ssb->flash_window_size;
+		base = mcore_ssb->pflash.window;
+		lim = mcore_ssb->pflash.window_size;
 		break;
 #endif
 #ifdef CONFIG_BCM47XX_BCMA
diff --git a/arch/mips/bcm47xx/wgt634u.c b/arch/mips/bcm47xx/wgt634u.c
index e9f9ec8..e80d585 100644
--- a/arch/mips/bcm47xx/wgt634u.c
+++ b/arch/mips/bcm47xx/wgt634u.c
@@ -156,10 +156,10 @@
 					    SSB_CHIPCO_IRQ_GPIO);
 		}
 
-		wgt634u_flash_data.width = mcore->flash_buswidth;
-		wgt634u_flash_resource.start = mcore->flash_window;
-		wgt634u_flash_resource.end = mcore->flash_window
-					   + mcore->flash_window_size
+		wgt634u_flash_data.width = mcore->pflash.buswidth;
+		wgt634u_flash_resource.start = mcore->pflash.window;
+		wgt634u_flash_resource.end = mcore->pflash.window
+					   + mcore->pflash.window_size
 					   - 1;
 		return platform_add_devices(wgt634u_devices,
 					    ARRAY_SIZE(wgt634u_devices));
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index c5ed595..17307ab 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -63,6 +63,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME             28
 #define SO_TIMESTAMP		29
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index 820463a..af5366b 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -40,6 +40,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME		28
 #define SO_TIMESTAMP		29
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 1b52c2c..d9ff473 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -48,6 +48,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        0x401a
 #define SO_DETACH_FILTER        0x401b
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_ACCEPTCONN		0x401c
 
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 5f73ce6..42b1f43 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -168,9 +168,12 @@
 #define PPC_INST_AND			0x7c000038
 #define PPC_INST_ANDDOT			0x7c000039
 #define PPC_INST_OR			0x7c000378
+#define PPC_INST_XOR			0x7c000278
 #define PPC_INST_ANDI			0x70000000
 #define PPC_INST_ORI			0x60000000
 #define PPC_INST_ORIS			0x64000000
+#define PPC_INST_XORI			0x68000000
+#define PPC_INST_XORIS			0x6c000000
 #define PPC_INST_NEG			0x7c0000d0
 #define PPC_INST_BRANCH			0x48000000
 #define PPC_INST_BRANCH_COND		0x40800000
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index 3d5179b..eb0b186 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -47,6 +47,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER	26
 #define SO_DETACH_FILTER	27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME		28
 #define SO_TIMESTAMP		29
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 1fc8109..8a5dfaf 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -134,6 +134,12 @@
 				     ___PPC_RS(a) | IMM_L(i))
 #define PPC_ORIS(d, a, i)	EMIT(PPC_INST_ORIS | ___PPC_RA(d) |	      \
 				     ___PPC_RS(a) | IMM_L(i))
+#define PPC_XOR(d, a, b)	EMIT(PPC_INST_XOR | ___PPC_RA(d) |	      \
+				     ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_XORI(d, a, i)	EMIT(PPC_INST_XORI | ___PPC_RA(d) |	      \
+				     ___PPC_RS(a) | IMM_L(i))
+#define PPC_XORIS(d, a, i)	EMIT(PPC_INST_XORIS | ___PPC_RA(d) |	      \
+				     ___PPC_RS(a) | IMM_L(i))
 #define PPC_SLW(d, a, s)	EMIT(PPC_INST_SLW | ___PPC_RA(d) |	      \
 				     ___PPC_RS(a) | ___PPC_RB(s))
 #define PPC_SRW(d, a, s)	EMIT(PPC_INST_SRW | ___PPC_RA(d) |	      \
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index dd11306..e834f1e 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -13,6 +13,8 @@
 #include <asm/cacheflush.h>
 #include <linux/netdevice.h>
 #include <linux/filter.h>
+#include <linux/if_vlan.h>
+
 #include "bpf_jit.h"
 
 #ifndef __BIG_ENDIAN
@@ -89,6 +91,8 @@
 	case BPF_S_ANC_IFINDEX:
 	case BPF_S_ANC_MARK:
 	case BPF_S_ANC_RXHASH:
+	case BPF_S_ANC_VLAN_TAG:
+	case BPF_S_ANC_VLAN_TAG_PRESENT:
 	case BPF_S_ANC_CPU:
 	case BPF_S_ANC_QUEUE:
 	case BPF_S_LD_W_ABS:
@@ -232,6 +236,17 @@
 			if (K >= 65536)
 				PPC_ORIS(r_A, r_A, IMM_H(K));
 			break;
+		case BPF_S_ANC_ALU_XOR_X:
+		case BPF_S_ALU_XOR_X: /* A ^= X */
+			ctx->seen |= SEEN_XREG;
+			PPC_XOR(r_A, r_A, r_X);
+			break;
+		case BPF_S_ALU_XOR_K: /* A ^= K */
+			if (IMM_L(K))
+				PPC_XORI(r_A, r_A, IMM_L(K));
+			if (K >= 65536)
+				PPC_XORIS(r_A, r_A, IMM_H(K));
+			break;
 		case BPF_S_ALU_LSH_X: /* A <<= X; */
 			ctx->seen |= SEEN_XREG;
 			PPC_SLW(r_A, r_A, r_X);
@@ -371,6 +386,16 @@
 			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 							  rxhash));
 			break;
+		case BPF_S_ANC_VLAN_TAG:
+		case BPF_S_ANC_VLAN_TAG_PRESENT:
+			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
+							  vlan_tci));
+			if (filter[i].code == BPF_S_ANC_VLAN_TAG)
+				PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
+			else
+				PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
+			break;
 		case BPF_S_ANC_QUEUE:
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 						  queue_mapping) != 2);
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 69718cd..436d07c 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -46,6 +46,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME		28
 #define SO_TIMESTAMP		29
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index bea1568..c83a937 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -41,6 +41,7 @@
 
 #define SO_ATTACH_FILTER	0x001a
 #define SO_DETACH_FILTER        0x001b
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME		0x001c
 #define SO_TIMESTAMP		0x001d
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 2836870..3109ca6 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -3,6 +3,7 @@
 #include <linux/netdevice.h>
 #include <linux/filter.h>
 #include <linux/cache.h>
+#include <linux/if_vlan.h>
 
 #include <asm/cacheflush.h>
 #include <asm/ptrace.h>
@@ -312,6 +313,12 @@
 #define emit_addi(R1, IMM, R3) \
 	*prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3))
 
+#define emit_and(R1, R2, R3) \
+	*prog++ = (AND | RS1(R1) | RS2(R2) | RD(R3))
+
+#define emit_andi(R1, IMM, R3) \
+	*prog++ = (AND | IMMED | RS1(R1) | S13(IMM) | RD(R3))
+
 #define emit_alloc_stack(SZ) \
 	*prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP))
 
@@ -415,6 +422,8 @@
 		case BPF_S_ANC_IFINDEX:
 		case BPF_S_ANC_MARK:
 		case BPF_S_ANC_RXHASH:
+		case BPF_S_ANC_VLAN_TAG:
+		case BPF_S_ANC_VLAN_TAG_PRESENT:
 		case BPF_S_ANC_CPU:
 		case BPF_S_ANC_QUEUE:
 		case BPF_S_LD_W_ABS:
@@ -600,6 +609,16 @@
 			case BPF_S_ANC_RXHASH:
 				emit_skb_load32(rxhash, r_A);
 				break;
+			case BPF_S_ANC_VLAN_TAG:
+			case BPF_S_ANC_VLAN_TAG_PRESENT:
+				emit_skb_load16(vlan_tci, r_A);
+				if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
+					emit_andi(r_A, VLAN_VID_MASK, r_A);
+				} else {
+					emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
+					emit_and(r_A, r_TMP, r_A);
+				}
+				break;
 
 			case BPF_S_LD_IMM:
 				emit_loadimm(K, r_A);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 520d2bd..d11a470 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -11,6 +11,7 @@
 #include <asm/cacheflush.h>
 #include <linux/netdevice.h>
 #include <linux/filter.h>
+#include <linux/if_vlan.h>
 
 /*
  * Conventions :
@@ -212,6 +213,8 @@
 		case BPF_S_ANC_MARK:
 		case BPF_S_ANC_RXHASH:
 		case BPF_S_ANC_CPU:
+		case BPF_S_ANC_VLAN_TAG:
+		case BPF_S_ANC_VLAN_TAG_PRESENT:
 		case BPF_S_ANC_QUEUE:
 		case BPF_S_LD_W_ABS:
 		case BPF_S_LD_H_ABS:
@@ -515,6 +518,24 @@
 				CLEAR_A();
 #endif
 				break;
+			case BPF_S_ANC_VLAN_TAG:
+			case BPF_S_ANC_VLAN_TAG_PRESENT:
+				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+				if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
+					/* movzwl off8(%rdi),%eax */
+					EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
+				} else {
+					EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
+					EMIT(offsetof(struct sk_buff, vlan_tci), 4);
+				}
+				BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+				if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
+					EMIT3(0x80, 0xe4, 0xef); /* and    $0xef,%ah */
+				} else {
+					EMIT3(0xc1, 0xe8, 0x0c); /* shr    $0xc,%eax */
+					EMIT3(0x83, 0xe0, 0x01); /* and    $0x1,%eax */
+				}
+				break;
 			case BPF_S_LD_W_ABS:
 				func = CHOOSE_LOAD_FUNC(K, sk_load_word);
 common_load:			seen |= SEEN_DATAREF;
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index e36c681..38079be 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -52,6 +52,7 @@
 
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME		28
 #define SO_TIMESTAMP		29
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index a4c3ebc..ffd74e5 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -22,6 +22,23 @@
 	return value;
 }
 
+void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
+{
+	if (cc->early_setup_done)
+		return;
+
+	if (cc->core->id.rev >= 11)
+		cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
+	cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
+	if (cc->core->id.rev >= 35)
+		cc->capabilities_ext = bcma_cc_read32(cc, BCMA_CC_CAP_EXT);
+
+	if (cc->capabilities & BCMA_CC_CAP_PMU)
+		bcma_pmu_early_init(cc);
+
+	cc->early_setup_done = true;
+}
+
 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
 {
 	u32 leddc_on = 10;
@@ -30,11 +47,7 @@
 	if (cc->setup_done)
 		return;
 
-	if (cc->core->id.rev >= 11)
-		cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
-	cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
-	if (cc->core->id.rev >= 35)
-		cc->capabilities_ext = bcma_cc_read32(cc, BCMA_CC_CAP_EXT);
+	bcma_core_chipcommon_early_init(cc);
 
 	if (cc->core->id.rev >= 20) {
 		bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, 0);
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
index 9042781..dbda91e 100644
--- a/drivers/bcma/driver_chipcommon_nflash.c
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -32,6 +32,9 @@
 	}
 
 	cc->nflash.present = true;
+	if (cc->core->id.rev == 38 &&
+	    (cc->status & BCMA_CC_CHIPST_5357_NAND_BOOT))
+		cc->nflash.boot = true;
 
 	/* Prepare platform device, but don't register it yet. It's too early,
 	 * malloc (required by device_private_init) is not available yet. */
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index 201faf1..a63ddd9 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -144,7 +144,7 @@
 	}
 }
 
-void bcma_pmu_init(struct bcma_drv_cc *cc)
+void bcma_pmu_early_init(struct bcma_drv_cc *cc)
 {
 	u32 pmucap;
 
@@ -153,7 +153,10 @@
 
 	bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
 		   cc->pmu.rev, pmucap);
+}
 
+void bcma_pmu_init(struct bcma_drv_cc *cc)
+{
 	if (cc->pmu.rev == 1)
 		bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
 			      ~BCMA_CC_PMU_CTL_NOILPONW);
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 2c4eec2..63e6883 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -12,7 +12,7 @@
 
 static struct resource bcma_sflash_resource = {
 	.name	= "bcma_sflash",
-	.start	= BCMA_SFLASH,
+	.start	= BCMA_SOC_FLASH2,
 	.end	= 0,
 	.flags  = IORESOURCE_MEM | IORESOURCE_READONLY,
 };
@@ -31,15 +31,42 @@
 };
 
 static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
-	{ "", 0x14, 0x10000, 32, },
+	{ "M25P20", 0x11, 0x10000, 4, },
+	{ "M25P40", 0x12, 0x10000, 8, },
+
+	{ "M25P16", 0x14, 0x10000, 32, },
+	{ "M25P32", 0x14, 0x10000, 64, },
+	{ "M25P64", 0x16, 0x10000, 128, },
+	{ "M25FL128", 0x17, 0x10000, 256, },
 	{ 0 },
 };
 
 static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
+	{ "SST25WF512", 1, 0x1000, 16, },
+	{ "SST25VF512", 0x48, 0x1000, 16, },
+	{ "SST25WF010", 2, 0x1000, 32, },
+	{ "SST25VF010", 0x49, 0x1000, 32, },
+	{ "SST25WF020", 3, 0x1000, 64, },
+	{ "SST25VF020", 0x43, 0x1000, 64, },
+	{ "SST25WF040", 4, 0x1000, 128, },
+	{ "SST25VF040", 0x44, 0x1000, 128, },
+	{ "SST25VF040B", 0x8d, 0x1000, 128, },
+	{ "SST25WF080", 5, 0x1000, 256, },
+	{ "SST25VF080B", 0x8e, 0x1000, 256, },
+	{ "SST25VF016", 0x41, 0x1000, 512, },
+	{ "SST25VF032", 0x4a, 0x1000, 1024, },
+	{ "SST25VF064", 0x4b, 0x1000, 2048, },
 	{ 0 },
 };
 
 static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
+	{ "AT45DB011", 0xc, 256, 512, },
+	{ "AT45DB021", 0x14, 256, 1024, },
+	{ "AT45DB041", 0x1c, 256, 2048, },
+	{ "AT45DB081", 0x24, 256, 4096, },
+	{ "AT45DB161", 0x2c, 512, 4096, },
+	{ "AT45DB321", 0x34, 512, 8192, },
+	{ "AT45DB642", 0x3c, 1024, 8192, },
 	{ 0 },
 };
 
@@ -84,6 +111,8 @@
 					break;
 			}
 			break;
+		case 0x13:
+			return -ENOTSUPP;
 		default:
 			for (e = bcma_sflash_st_tbl; e->name; e++) {
 				if (e->id == id)
@@ -116,7 +145,7 @@
 		return -ENOTSUPP;
 	}
 
-	sflash->window = BCMA_SFLASH;
+	sflash->window = BCMA_SOC_FLASH2;
 	sflash->blocksize = e->blocksize;
 	sflash->numblocks = e->numblocks;
 	sflash->size = sflash->blocksize * sflash->numblocks;
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index cc65b45..170822e 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -181,47 +181,66 @@
 static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
 {
 	struct bcma_bus *bus = mcore->core->bus;
+	struct bcma_drv_cc *cc = &bus->drv_cc;
 
-	switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) {
+	switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
 	case BCMA_CC_FLASHT_STSER:
 	case BCMA_CC_FLASHT_ATSER:
 		bcma_debug(bus, "Found serial flash\n");
-		bcma_sflash_init(&bus->drv_cc);
+		bcma_sflash_init(cc);
 		break;
 	case BCMA_CC_FLASHT_PARA:
 		bcma_debug(bus, "Found parallel flash\n");
-		bus->drv_cc.pflash.window = 0x1c000000;
-		bus->drv_cc.pflash.window_size = 0x02000000;
+		cc->pflash.present = true;
+		cc->pflash.window = BCMA_SOC_FLASH2;
+		cc->pflash.window_size = BCMA_SOC_FLASH2_SZ;
 
-		if ((bcma_read32(bus->drv_cc.core, BCMA_CC_FLASH_CFG) &
+		if ((bcma_read32(cc->core, BCMA_CC_FLASH_CFG) &
 		     BCMA_CC_FLASH_CFG_DS) == 0)
-			bus->drv_cc.pflash.buswidth = 1;
+			cc->pflash.buswidth = 1;
 		else
-			bus->drv_cc.pflash.buswidth = 2;
+			cc->pflash.buswidth = 2;
 		break;
 	default:
 		bcma_err(bus, "Flash type not supported\n");
 	}
 
-	if (bus->drv_cc.core->id.rev == 38 ||
+	if (cc->core->id.rev == 38 ||
 	    bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
-		if (bus->drv_cc.capabilities & BCMA_CC_CAP_NFLASH) {
+		if (cc->capabilities & BCMA_CC_CAP_NFLASH) {
 			bcma_debug(bus, "Found NAND flash\n");
-			bcma_nflash_init(&bus->drv_cc);
+			bcma_nflash_init(cc);
 		}
 	}
 }
 
+void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
+{
+	struct bcma_bus *bus = mcore->core->bus;
+
+	if (mcore->early_setup_done)
+		return;
+
+	bcma_chipco_serial_init(&bus->drv_cc);
+	bcma_core_mips_flash_detect(mcore);
+
+	mcore->early_setup_done = true;
+}
+
 void bcma_core_mips_init(struct bcma_drv_mips *mcore)
 {
 	struct bcma_bus *bus;
 	struct bcma_device *core;
 	bus = mcore->core->bus;
 
+	if (mcore->setup_done)
+		return;
+
 	bcma_info(bus, "Initializing MIPS core...\n");
 
-	if (!mcore->setup_done)
-		mcore->assigned_irqs = 1;
+	bcma_core_mips_early_init(mcore);
+
+	mcore->assigned_irqs = 1;
 
 	/* Assign IRQs to all cores on the bus */
 	list_for_each_entry(core, &bus->cores, list) {
@@ -256,10 +275,5 @@
 	bcma_info(bus, "IRQ reconfiguration done\n");
 	bcma_core_mips_dump_irq(bus);
 
-	if (mcore->setup_done)
-		return;
-
-	bcma_chipco_serial_init(&bus->drv_cc);
-	bcma_core_mips_flash_detect(mcore);
 	mcore->setup_done = true;
 }
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index 9baf886..e564495 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -35,11 +35,6 @@
 	    chipid_top != 0x5300)
 		return false;
 
-	if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
-		bcma_info(bus, "This PCI core is disabled and not working\n");
-		return false;
-	}
-
 	bcma_core_enable(pc->core, 0);
 
 	return !mips_busprobe32(tmp, pc->core->io_addr);
@@ -396,6 +391,11 @@
 
 	bcma_info(bus, "PCIEcore in host mode found\n");
 
+	if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
+		bcma_info(bus, "This PCIE core is disabled and not working\n");
+		return;
+	}
+
 	pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
 	if (!pc_host)  {
 		bcma_err(bus, "can not allocate memory");
@@ -452,6 +452,8 @@
 			pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
 			pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
 						    BCMA_SOC_PCI_MEM_SZ - 1;
+			pc_host->io_resource.start = 0x100;
+			pc_host->io_resource.end = 0x47F;
 			pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
 			pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
 					tmp | BCMA_SOC_PCI_MEM);
@@ -459,6 +461,8 @@
 			pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
 			pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
 						    BCMA_SOC_PCI_MEM_SZ - 1;
+			pc_host->io_resource.start = 0x480;
+			pc_host->io_resource.end = 0x7FF;
 			pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
 			pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
 			pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index b6b4b5e..98fdc3e 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -238,7 +238,7 @@
 	pci_set_drvdata(dev, NULL);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int bcma_host_pci_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -261,11 +261,11 @@
 			 bcma_host_pci_resume);
 #define BCMA_PM_OPS	(&bcma_pm_ops)
 
-#else /* CONFIG_PM */
+#else /* CONFIG_PM_SLEEP */
 
 #define BCMA_PM_OPS     NULL
 
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
 
 static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index d865470..a971889 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -81,6 +81,18 @@
 }
 EXPORT_SYMBOL_GPL(bcma_find_core);
 
+static struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
+					       u8 unit)
+{
+	struct bcma_device *core;
+
+	list_for_each_entry(core, &bus->cores, list) {
+		if (core->id.id == coreid && core->core_unit == unit)
+			return core;
+	}
+	return NULL;
+}
+
 static void bcma_release_core_dev(struct device *dev)
 {
 	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
@@ -183,6 +195,20 @@
 		return -1;
 	}
 
+	/* Early init CC core */
+	core = bcma_find_core(bus, bcma_cc_core_id(bus));
+	if (core) {
+		bus->drv_cc.core = core;
+		bcma_core_chipcommon_early_init(&bus->drv_cc);
+	}
+
+	/* Try to get SPROM */
+	err = bcma_sprom_get(bus);
+	if (err == -ENOENT) {
+		bcma_err(bus, "No SPROM available\n");
+	} else if (err)
+		bcma_err(bus, "Failed to get SPROM: %d\n", err);
+
 	/* Init CC core */
 	core = bcma_find_core(bus, bcma_cc_core_id(bus));
 	if (core) {
@@ -198,10 +224,17 @@
 	}
 
 	/* Init PCIE core */
-	core = bcma_find_core(bus, BCMA_CORE_PCIE);
+	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
 	if (core) {
-		bus->drv_pci.core = core;
-		bcma_core_pci_init(&bus->drv_pci);
+		bus->drv_pci[0].core = core;
+		bcma_core_pci_init(&bus->drv_pci[0]);
+	}
+
+	/* Init PCIE core */
+	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
+	if (core) {
+		bus->drv_pci[1].core = core;
+		bcma_core_pci_init(&bus->drv_pci[1]);
 	}
 
 	/* Init GBIT MAC COMMON core */
@@ -211,13 +244,6 @@
 		bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
 	}
 
-	/* Try to get SPROM */
-	err = bcma_sprom_get(bus);
-	if (err == -ENOENT) {
-		bcma_err(bus, "No SPROM available\n");
-	} else if (err)
-		bcma_err(bus, "Failed to get SPROM: %d\n", err);
-
 	/* Register found cores */
 	bcma_register_cores(bus);
 
@@ -275,18 +301,18 @@
 		return -1;
 	}
 
-	/* Init CC core */
+	/* Early init CC core */
 	core = bcma_find_core(bus, bcma_cc_core_id(bus));
 	if (core) {
 		bus->drv_cc.core = core;
-		bcma_core_chipcommon_init(&bus->drv_cc);
+		bcma_core_chipcommon_early_init(&bus->drv_cc);
 	}
 
-	/* Init MIPS core */
+	/* Early init MIPS core */
 	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
 	if (core) {
 		bus->drv_mips.core = core;
-		bcma_core_mips_init(&bus->drv_mips);
+		bcma_core_mips_early_init(&bus->drv_mips);
 	}
 
 	bcma_info(bus, "Early bus registered\n");
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 0d546b6..4adf9ef 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -595,8 +595,11 @@
 		bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
 
 	err = bcma_sprom_valid(sprom);
-	if (err)
+	if (err) {
+		bcma_warn(bus, "invalid sprom read from the PCIe card, try to use fallback sprom\n");
+		err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
 		goto out;
+	}
 
 	bcma_sprom_extract_r8(bus, sprom);
 
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 3f4bfc8..9959d4c 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -492,7 +492,7 @@
 static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
 {
 	u16 buf_len = 0;
-	int ret, buf_block_len, blksz;
+	int ret, num_blocks, blksz;
 	struct sk_buff *skb = NULL;
 	u32 type;
 	u8 *payload = NULL;
@@ -514,18 +514,17 @@
 	}
 
 	blksz = SDIO_BLOCK_SIZE;
-	buf_block_len = (buf_len + blksz - 1) / blksz;
+	num_blocks = DIV_ROUND_UP(buf_len, blksz);
 
 	if (buf_len <= SDIO_HEADER_LEN
-			|| (buf_block_len * blksz) > ALLOC_BUF_SIZE) {
+	    || (num_blocks * blksz) > ALLOC_BUF_SIZE) {
 		BT_ERR("invalid packet length: %d", buf_len);
 		ret = -EINVAL;
 		goto exit;
 	}
 
 	/* Allocate buffer */
-	skb = bt_skb_alloc(buf_block_len * blksz + BTSDIO_DMA_ALIGN,
-								GFP_ATOMIC);
+	skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
 	if (skb == NULL) {
 		BT_ERR("No free skb");
 		goto exit;
@@ -541,7 +540,7 @@
 	payload = skb->data;
 
 	ret = sdio_readsb(card->func, payload, card->ioport,
-			  buf_block_len * blksz);
+			  num_blocks * blksz);
 	if (ret < 0) {
 		BT_ERR("readsb failed: %d", ret);
 		ret = -EIO;
@@ -553,7 +552,16 @@
 	 */
 
 	buf_len = payload[0];
-	buf_len |= (u16) payload[1] << 8;
+	buf_len |= payload[1] << 8;
+	buf_len |= payload[2] << 16;
+
+	if (buf_len > blksz * num_blocks) {
+		BT_ERR("Skip incorrect packet: hdrlen %d buffer %d",
+		       buf_len, blksz * num_blocks);
+		ret = -EIO;
+		goto exit;
+	}
+
 	type = payload[3];
 
 	switch (type) {
@@ -589,8 +597,7 @@
 
 	default:
 		BT_ERR("Unknown packet type:%d", type);
-		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, payload,
-						blksz * buf_block_len);
+		BT_ERR("hex: %*ph", blksz * num_blocks, payload);
 
 		kfree_skb(skb);
 		skb = NULL;
@@ -849,8 +856,7 @@
 		if (ret < 0) {
 			i++;
 			BT_ERR("i=%d writesb failed: %d", i, ret);
-			print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
-						payload, nb);
+			BT_ERR("hex: %*ph", nb, payload);
 			ret = -EIO;
 			if (i > MAX_WRITE_IOMEM_RETRY)
 				goto exit;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index ee82f2f..a1d4ede 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -96,6 +96,7 @@
 	{ USB_DEVICE(0x0c10, 0x0000) },
 
 	/* Broadcom BCM20702A0 */
+	{ USB_DEVICE(0x0b05, 0x17b5) },
 	{ USB_DEVICE(0x04ca, 0x2003) },
 	{ USB_DEVICE(0x0489, 0xe042) },
 	{ USB_DEVICE(0x413c, 0x8197) },
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index abd9038..d666807 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -604,6 +604,23 @@
 	return slots;
 }
 
+static inline int dca3_tag_map_invalid(u8 *tag_map)
+{
+	/*
+	 * If the tag map is not programmed by the BIOS the default is:
+	 * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00
+	 *
+	 * This an invalid map and will result in only 2 possible tags
+	 * 0x1F and 0x00.  0x00 is an invalid DCA tag so we know that
+	 * this entire definition is invalid.
+	 */
+	return ((tag_map[0] == DCA_TAG_MAP_VALID) &&
+		(tag_map[1] == DCA_TAG_MAP_VALID) &&
+		(tag_map[2] == DCA_TAG_MAP_VALID) &&
+		(tag_map[3] == DCA_TAG_MAP_VALID) &&
+		(tag_map[4] == DCA_TAG_MAP_VALID));
+}
+
 struct dca_provider * __devinit
 ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 {
@@ -674,6 +691,12 @@
 		ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
 	}
 
+	if (dca3_tag_map_invalid(ioatdca->tag_map)) {
+		dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n");
+		free_dca_provider(dca);
+		return NULL;
+	}
+
 	err = register_dca_provider(dca, &pdev->dev);
 	if (err) {
 		free_dca_provider(dca);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 81363ff..6e99d73 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -490,7 +490,7 @@
 		    (df->data[le16_to_cpu(zp->z1)])) {
 			if (dch->debug & DEBUG_HW)
 				printk(KERN_DEBUG
-				       "empty_fifo hfcpci paket inv. len "
+				       "empty_fifo hfcpci packet inv. len "
 				       "%d or crc %d\n",
 				       rcnt,
 				       df->data[le16_to_cpu(zp->z1)]);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 182ecf0..feafa91 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -1302,7 +1302,7 @@
 						   &ch->is->Flags))
 				ch->dpath = 1;
 			else {
-				pr_info("modeisar both pathes in use\n");
+				pr_info("modeisar both paths in use\n");
 				return -EBUSY;
 			}
 			if (bprotocol == ISDN_P_B_HDLC)
diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c
index a47637b..ddec47a 100644
--- a/drivers/isdn/hisax/callc.c
+++ b/drivers/isdn/hisax/callc.c
@@ -35,7 +35,7 @@
 /* experimental REJECT after ALERTING for CALLBACK to beat the 4s delay */
 #define ALERT_REJECT 0
 
-/* Value to delay the sending of the first B-channel paket after CONNECT
+/* Value to delay the sending of the first B-channel packet after CONNECT
  * here is no value given by ITU, but experience shows that 300 ms will
  * work on many networks, if you or your other side is behind local exchanges
  * a greater value may be recommented. If the delay is to short the first paket
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 334fa90..f60d4be 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -354,7 +354,7 @@
 		if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
 		    (df->data[zp->z1])) {
 			if (cs->debug & L1_DEB_WARN)
-				debugl1(cs, "empty_fifo hfcpci paket inv. len %d or crc %d", rcnt, df->data[zp->z1]);
+				debugl1(cs, "empty_fifo hfcpci packet inv. len %d or crc %d", rcnt, df->data[zp->z1]);
 #ifdef ERROR_STATISTIC
 			cs->err_rx++;
 #endif
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 4db846b..4ec279c 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -270,7 +270,7 @@
 
 		if ((count > fifo_size) || (count < 4)) {
 			if (cs->debug & L1_DEB_WARN)
-				debugl1(cs, "hfcsx_read_fifo %d paket inv. len %d ", fifo , count);
+				debugl1(cs, "hfcsx_read_fifo %d packet inv. len %d ", fifo , count);
 			while (count) {
 				count--; /* empty fifo */
 				Read_hfc(cs, HFCSX_FIF_DRD);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index db50f78..f8e405c 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -277,7 +277,6 @@
 		  u16 timebase, u8 *buf, int len)
 {
 	u8 *p;
-	int multi = 0;
 	u8 frame[len + 32];
 	struct socket *socket = NULL;
 
@@ -317,9 +316,7 @@
 		*p++ = hc->id >> 8;
 		*p++ = hc->id;
 	}
-	*p++ = (multi == 1) ? 0x80 : 0x00 + channel; /* m-flag, channel */
-	if (multi == 1)
-		*p++ = len; /* length */
+	*p++ =  0x00 + channel; /* m-flag, channel */
 	*p++ = timebase >> 8; /* time base */
 	*p++ = timebase;
 
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index a18e639..42ecfef 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -508,7 +508,7 @@
 		return IRQ_NONE;
 	}
 	if (dev->interrupt) {
-		printk(KERN_DEBUG "pcbit: reentering interrupt hander\n");
+		printk(KERN_DEBUG "pcbit: reentering interrupt handler\n");
 		return IRQ_HANDLED;
 	}
 	dev->interrupt = 1;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5f5b69f..c8bff3e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1838,7 +1838,7 @@
 		 * anyway (it holds no special properties of the bond device),
 		 * so we can change it without calling change_active_interface()
 		 */
-		if (!bond->curr_active_slave)
+		if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP)
 			bond->curr_active_slave = new_slave;
 
 		break;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index bb709fd..b56bd9e 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -110,6 +110,15 @@
 	  is an IOH for x86 embedded processor (Intel Atom E6xx series).
 	  This driver can access CAN bus.
 
+config CAN_GRCAN
+	tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
+	depends on CAN_DEV && OF
+	---help---
+	  Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN.
+	  Note that the driver supports little endian, even though little
+	  endian syntheses of the cores would need some modifications on
+	  the hardware level to work.
+
 source "drivers/net/can/mscan/Kconfig"
 
 source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 938be37..7de5986 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -22,5 +22,6 @@
 obj-$(CONFIG_CAN_JANZ_ICAN3)	+= janz-ican3.o
 obj-$(CONFIG_CAN_FLEXCAN)	+= flexcan.o
 obj-$(CONFIG_PCH_CAN)		+= pch_can.o
+obj-$(CONFIG_CAN_GRCAN)		+= grcan.o
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index fcff73a..14b166b 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1372,6 +1372,7 @@
 		/* sentinel */
 	}
 };
+MODULE_DEVICE_TABLE(platform, at91_can_id_table);
 
 static struct platform_driver at91_can_driver = {
 	.probe = at91_can_probe,
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index f2d6d25..a3f8de9 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -691,3 +691,4 @@
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index e5180dfd..5233b8f 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -233,6 +233,12 @@
 		pm_runtime_put_sync(priv->device);
 }
 
+static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
+{
+	if (priv->raminit)
+		priv->raminit(priv, enable);
+}
+
 static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
 {
 	return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
@@ -1090,6 +1096,7 @@
 	struct c_can_priv *priv = netdev_priv(dev);
 
 	c_can_pm_runtime_get_sync(priv);
+	c_can_reset_ram(priv, true);
 
 	/* open the can device */
 	err = open_candev(dev);
@@ -1118,6 +1125,7 @@
 exit_irq_fail:
 	close_candev(dev);
 exit_open_fail:
+	c_can_reset_ram(priv, false);
 	c_can_pm_runtime_put_sync(priv);
 	return err;
 }
@@ -1131,6 +1139,8 @@
 	c_can_stop(dev);
 	free_irq(dev->irq, dev);
 	close_candev(dev);
+
+	c_can_reset_ram(priv, false);
 	c_can_pm_runtime_put_sync(priv);
 
 	return 0;
@@ -1188,6 +1198,7 @@
 
 	c_can_stop(dev);
 
+	c_can_reset_ram(priv, false);
 	c_can_pm_runtime_put_sync(priv);
 
 	return 0;
@@ -1206,6 +1217,7 @@
 	WARN_ON(priv->type != BOSCH_D_CAN);
 
 	c_can_pm_runtime_get_sync(priv);
+	c_can_reset_ram(priv, true);
 
 	/* Clear PDR and INIT bits */
 	val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index e5ed41d..d2e1c21 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -169,6 +169,9 @@
 	void *priv;		/* for board-specific data */
 	u16 irqstatus;
 	enum c_can_dev_id type;
+	u32 __iomem *raminit_ctrlreg;
+	unsigned int instance;
+	void (*raminit) (const struct c_can_priv *priv, bool enable);
 };
 
 struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index ee14161..0044fd8 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -38,6 +38,8 @@
 
 #include "c_can.h"
 
+#define CAN_RAMINIT_START_MASK(i)	(1 << (i))
+
 /*
  * 16-bit c_can registers can be arranged differently in the memory
  * architecture of different implementations. For example: 16-bit
@@ -68,6 +70,18 @@
 	writew(val, priv->base + 2 * priv->regs[index]);
 }
 
+static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+{
+	u32 val;
+
+	val = readl(priv->raminit_ctrlreg);
+	if (enable)
+		val |= CAN_RAMINIT_START_MASK(priv->instance);
+	else
+		val &= ~CAN_RAMINIT_START_MASK(priv->instance);
+	writel(val, priv->raminit_ctrlreg);
+}
+
 static struct platform_device_id c_can_id_table[] = {
 	[BOSCH_C_CAN_PLATFORM] = {
 		.name = KBUILD_MODNAME,
@@ -83,12 +97,14 @@
 	}, {
 	}
 };
+MODULE_DEVICE_TABLE(platform, c_can_id_table);
 
 static const struct of_device_id c_can_of_table[] = {
 	{ .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] },
 	{ .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] },
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, c_can_of_table);
 
 static int __devinit c_can_plat_probe(struct platform_device *pdev)
 {
@@ -99,7 +115,7 @@
 	const struct of_device_id *match;
 	const struct platform_device_id *id;
 	struct pinctrl *pinctrl;
-	struct resource *mem;
+	struct resource *mem, *res;
 	int irq;
 	struct clk *clk;
 
@@ -178,6 +194,18 @@
 		priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
 		priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
 		priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+
+		if (pdev->dev.of_node)
+			priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
+		else
+			priv->instance = pdev->id;
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		priv->raminit_ctrlreg =	devm_request_and_ioremap(&pdev->dev, res);
+		if (!priv->raminit_ctrlreg || priv->instance < 0)
+			dev_info(&pdev->dev, "control memory is not used for raminit\n");
+		else
+			priv->raminit = c_can_hw_raminit;
 		break;
 	default:
 		ret = -EINVAL;
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 688371c..3da6cbb 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -60,6 +60,7 @@
 MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
 MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
 
 #define CC770_PLATFORM_CAN_CLOCK  16000000
 
@@ -258,6 +259,7 @@
 	{.compatible = "intc,82527"},  /* AN82527 from Intel CP */
 	{},
 };
+MODULE_DEVICE_TABLE(of, cc770_platform_table);
 
 static struct platform_driver cc770_platform_driver = {
 	.driver = {
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index a412bf6..9a17965 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -979,11 +979,13 @@
 	{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, flexcan_of_match);
 
 static const struct platform_device_id flexcan_id_table[] = {
 	{ .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, },
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(platform, flexcan_id_table);
 
 static int __devinit flexcan_probe(struct platform_device *pdev)
 {
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
new file mode 100644
index 0000000..391f484
--- /dev/null
+++ b/drivers/net/can/grcan.c
@@ -0,0 +1,1756 @@
+/*
+ * Socket CAN driver for Aeroflex Gaisler GRCAN and GRHCAN.
+ *
+ * 2012 (c) Aeroflex Gaisler AB
+ *
+ * This driver supports GRCAN and GRHCAN CAN controllers available in the GRLIB
+ * VHDL IP core library.
+ *
+ * Full documentation of the GRCAN core can be found here:
+ * http://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * See "Documentation/devicetree/bindings/net/can/grcan.txt" for information on
+ * open firmware properties.
+ *
+ * See "Documentation/ABI/testing/sysfs-class-net-grcan" for information on the
+ * sysfs interface.
+ *
+ * See "Documentation/kernel-parameters.txt" for information on the module
+ * parameters.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Contributors: Andreas Larsson <andreas@gaisler.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/can/dev.h>
+#include <linux/spinlock.h>
+
+#include <linux/of_platform.h>
+#include <asm/prom.h>
+
+#include <linux/of_irq.h>
+
+#include <linux/dma-mapping.h>
+
+#define DRV_NAME	"grcan"
+
+#define GRCAN_NAPI_WEIGHT	32
+
+#define GRCAN_RESERVE_SIZE(slot1, slot2) (((slot2) - (slot1)) / 4 - 1)
+
+struct grcan_registers {
+	u32 conf;	/* 0x00 */
+	u32 stat;	/* 0x04 */
+	u32 ctrl;	/* 0x08 */
+	u32 __reserved1[GRCAN_RESERVE_SIZE(0x08, 0x18)];
+	u32 smask;	/* 0x18 - CanMASK */
+	u32 scode;	/* 0x1c - CanCODE */
+	u32 __reserved2[GRCAN_RESERVE_SIZE(0x1c, 0x100)];
+	u32 pimsr;	/* 0x100 */
+	u32 pimr;	/* 0x104 */
+	u32 pisr;	/* 0x108 */
+	u32 pir;	/* 0x10C */
+	u32 imr;	/* 0x110 */
+	u32 picr;	/* 0x114 */
+	u32 __reserved3[GRCAN_RESERVE_SIZE(0x114, 0x200)];
+	u32 txctrl;	/* 0x200 */
+	u32 txaddr;	/* 0x204 */
+	u32 txsize;	/* 0x208 */
+	u32 txwr;	/* 0x20C */
+	u32 txrd;	/* 0x210 */
+	u32 txirq;	/* 0x214 */
+	u32 __reserved4[GRCAN_RESERVE_SIZE(0x214, 0x300)];
+	u32 rxctrl;	/* 0x300 */
+	u32 rxaddr;	/* 0x304 */
+	u32 rxsize;	/* 0x308 */
+	u32 rxwr;	/* 0x30C */
+	u32 rxrd;	/* 0x310 */
+	u32 rxirq;	/* 0x314 */
+	u32 rxmask;	/* 0x318 */
+	u32 rxcode;	/* 0x31C */
+};
+
+#define GRCAN_CONF_ABORT	0x00000001
+#define GRCAN_CONF_ENABLE0	0x00000002
+#define GRCAN_CONF_ENABLE1	0x00000004
+#define GRCAN_CONF_SELECT	0x00000008
+#define GRCAN_CONF_SILENT	0x00000010
+#define GRCAN_CONF_SAM		0x00000020 /* Available in some hardware */
+#define GRCAN_CONF_BPR		0x00000300 /* Note: not BRP */
+#define GRCAN_CONF_RSJ		0x00007000
+#define GRCAN_CONF_PS1		0x00f00000
+#define GRCAN_CONF_PS2		0x000f0000
+#define GRCAN_CONF_SCALER	0xff000000
+#define GRCAN_CONF_OPERATION						\
+	(GRCAN_CONF_ABORT | GRCAN_CONF_ENABLE0 | GRCAN_CONF_ENABLE1	\
+	 | GRCAN_CONF_SELECT | GRCAN_CONF_SILENT | GRCAN_CONF_SAM)
+#define GRCAN_CONF_TIMING						\
+	(GRCAN_CONF_BPR | GRCAN_CONF_RSJ | GRCAN_CONF_PS1		\
+	 | GRCAN_CONF_PS2 | GRCAN_CONF_SCALER)
+
+#define GRCAN_CONF_RSJ_MIN	1
+#define GRCAN_CONF_RSJ_MAX	4
+#define GRCAN_CONF_PS1_MIN	1
+#define GRCAN_CONF_PS1_MAX	15
+#define GRCAN_CONF_PS2_MIN	2
+#define GRCAN_CONF_PS2_MAX	8
+#define GRCAN_CONF_SCALER_MIN	0
+#define GRCAN_CONF_SCALER_MAX	255
+#define GRCAN_CONF_SCALER_INC	1
+
+#define GRCAN_CONF_BPR_BIT	8
+#define GRCAN_CONF_RSJ_BIT	12
+#define GRCAN_CONF_PS1_BIT	20
+#define GRCAN_CONF_PS2_BIT	16
+#define GRCAN_CONF_SCALER_BIT	24
+
+#define GRCAN_STAT_PASS		0x000001
+#define GRCAN_STAT_OFF		0x000002
+#define GRCAN_STAT_OR		0x000004
+#define GRCAN_STAT_AHBERR	0x000008
+#define GRCAN_STAT_ACTIVE	0x000010
+#define GRCAN_STAT_RXERRCNT	0x00ff00
+#define GRCAN_STAT_TXERRCNT	0xff0000
+
+#define GRCAN_STAT_ERRCTR_RELATED	(GRCAN_STAT_PASS | GRCAN_STAT_OFF)
+
+#define GRCAN_STAT_RXERRCNT_BIT	8
+#define GRCAN_STAT_TXERRCNT_BIT	16
+
+#define GRCAN_STAT_ERRCNT_WARNING_LIMIT	96
+#define GRCAN_STAT_ERRCNT_PASSIVE_LIMIT	127
+
+#define GRCAN_CTRL_RESET	0x2
+#define GRCAN_CTRL_ENABLE	0x1
+
+#define GRCAN_TXCTRL_ENABLE	0x1
+#define GRCAN_TXCTRL_ONGOING	0x2
+#define GRCAN_TXCTRL_SINGLE	0x4
+
+#define GRCAN_RXCTRL_ENABLE	0x1
+#define GRCAN_RXCTRL_ONGOING	0x2
+
+/* Relative offset of IRQ sources to AMBA Plug&Play */
+#define GRCAN_IRQIX_IRQ		0
+#define GRCAN_IRQIX_TXSYNC	1
+#define GRCAN_IRQIX_RXSYNC	2
+
+#define GRCAN_IRQ_PASS		0x00001
+#define GRCAN_IRQ_OFF		0x00002
+#define GRCAN_IRQ_OR		0x00004
+#define GRCAN_IRQ_RXAHBERR	0x00008
+#define GRCAN_IRQ_TXAHBERR	0x00010
+#define GRCAN_IRQ_RXIRQ		0x00020
+#define GRCAN_IRQ_TXIRQ		0x00040
+#define GRCAN_IRQ_RXFULL	0x00080
+#define GRCAN_IRQ_TXEMPTY	0x00100
+#define GRCAN_IRQ_RX		0x00200
+#define GRCAN_IRQ_TX		0x00400
+#define GRCAN_IRQ_RXSYNC	0x00800
+#define GRCAN_IRQ_TXSYNC	0x01000
+#define GRCAN_IRQ_RXERRCTR	0x02000
+#define GRCAN_IRQ_TXERRCTR	0x04000
+#define GRCAN_IRQ_RXMISS	0x08000
+#define GRCAN_IRQ_TXLOSS	0x10000
+
+#define GRCAN_IRQ_NONE	0
+#define GRCAN_IRQ_ALL							\
+	(GRCAN_IRQ_PASS | GRCAN_IRQ_OFF | GRCAN_IRQ_OR			\
+	 | GRCAN_IRQ_RXAHBERR | GRCAN_IRQ_TXAHBERR			\
+	 | GRCAN_IRQ_RXIRQ | GRCAN_IRQ_TXIRQ				\
+	 | GRCAN_IRQ_RXFULL | GRCAN_IRQ_TXEMPTY				\
+	 | GRCAN_IRQ_RX | GRCAN_IRQ_TX | GRCAN_IRQ_RXSYNC		\
+	 | GRCAN_IRQ_TXSYNC | GRCAN_IRQ_RXERRCTR			\
+	 | GRCAN_IRQ_TXERRCTR | GRCAN_IRQ_RXMISS			\
+	 | GRCAN_IRQ_TXLOSS)
+
+#define GRCAN_IRQ_ERRCTR_RELATED (GRCAN_IRQ_RXERRCTR | GRCAN_IRQ_TXERRCTR \
+				  | GRCAN_IRQ_PASS | GRCAN_IRQ_OFF)
+#define GRCAN_IRQ_ERRORS (GRCAN_IRQ_ERRCTR_RELATED | GRCAN_IRQ_OR	\
+			  | GRCAN_IRQ_TXAHBERR | GRCAN_IRQ_RXAHBERR	\
+			  | GRCAN_IRQ_TXLOSS)
+#define GRCAN_IRQ_DEFAULT (GRCAN_IRQ_RX | GRCAN_IRQ_TX | GRCAN_IRQ_ERRORS)
+
+#define GRCAN_MSG_SIZE		16
+
+#define GRCAN_MSG_IDE		0x80000000
+#define GRCAN_MSG_RTR		0x40000000
+#define GRCAN_MSG_BID		0x1ffc0000
+#define GRCAN_MSG_EID		0x1fffffff
+#define GRCAN_MSG_IDE_BIT	31
+#define GRCAN_MSG_RTR_BIT	30
+#define GRCAN_MSG_BID_BIT	18
+#define GRCAN_MSG_EID_BIT	0
+
+#define GRCAN_MSG_DLC		0xf0000000
+#define GRCAN_MSG_TXERRC	0x00ff0000
+#define GRCAN_MSG_RXERRC	0x0000ff00
+#define GRCAN_MSG_DLC_BIT	28
+#define GRCAN_MSG_TXERRC_BIT	16
+#define GRCAN_MSG_RXERRC_BIT	8
+#define GRCAN_MSG_AHBERR	0x00000008
+#define GRCAN_MSG_OR		0x00000004
+#define GRCAN_MSG_OFF		0x00000002
+#define GRCAN_MSG_PASS		0x00000001
+
+#define GRCAN_MSG_DATA_SLOT_INDEX(i) (2 + (i) / 4)
+#define GRCAN_MSG_DATA_SHIFT(i) ((3 - (i) % 4) * 8)
+
+#define GRCAN_BUFFER_ALIGNMENT		1024
+#define GRCAN_DEFAULT_BUFFER_SIZE	1024
+#define GRCAN_VALID_TR_SIZE_MASK	0x001fffc0
+
+#define GRCAN_INVALID_BUFFER_SIZE(s)			\
+	((s) == 0 || ((s) & ~GRCAN_VALID_TR_SIZE_MASK))
+
+#if GRCAN_INVALID_BUFFER_SIZE(GRCAN_DEFAULT_BUFFER_SIZE)
+#error "Invalid default buffer size"
+#endif
+
+struct grcan_dma_buffer {
+	size_t size;
+	void *buf;
+	dma_addr_t handle;
+};
+
+struct grcan_dma {
+	size_t base_size;
+	void *base_buf;
+	dma_addr_t base_handle;
+	struct grcan_dma_buffer tx;
+	struct grcan_dma_buffer rx;
+};
+
+/* GRCAN configuration parameters */
+struct grcan_device_config {
+	unsigned short enable0;
+	unsigned short enable1;
+	unsigned short select;
+	unsigned int txsize;
+	unsigned int rxsize;
+};
+
+#define GRCAN_DEFAULT_DEVICE_CONFIG {				\
+		.enable0	= 0,				\
+		.enable1	= 0,				\
+		.select		= 0,				\
+		.txsize		= GRCAN_DEFAULT_BUFFER_SIZE,	\
+		.rxsize		= GRCAN_DEFAULT_BUFFER_SIZE,	\
+		}
+
+#define GRCAN_TXBUG_SAFE_GRLIB_VERSION	0x4100
+#define GRLIB_VERSION_MASK		0xffff
+
+/* GRCAN private data structure */
+struct grcan_priv {
+	struct can_priv can;	/* must be the first member */
+	struct net_device *dev;
+	struct napi_struct napi;
+
+	struct grcan_registers __iomem *regs;	/* ioremap'ed registers */
+	struct grcan_device_config config;
+	struct grcan_dma dma;
+
+	struct sk_buff **echo_skb;	/* We allocate this on our own */
+	u8 *txdlc;			/* Length of queued frames */
+
+	/* The echo skb pointer, pointing into echo_skb and indicating which
+	 * frames can be echoed back. See the "Notes on the tx cyclic buffer
+	 * handling"-comment for grcan_start_xmit for more details.
+	 */
+	u32 eskbp;
+
+	/* Lock for controlling changes to the netif tx queue state, accesses to
+	 * the echo_skb pointer eskbp and for making sure that a running reset
+	 * and/or a close of the interface is done without interference from
+	 * other parts of the code.
+	 *
+	 * The echo_skb pointer, eskbp, should only be accessed under this lock
+	 * as it can be changed in several places and together with decisions on
+	 * whether to wake up the tx queue.
+	 *
+	 * The tx queue must never be woken up if there is a running reset or
+	 * close in progress.
+	 *
+	 * A running reset (see below on need_txbug_workaround) should never be
+	 * done if the interface is closing down and several running resets
+	 * should never be scheduled simultaneously.
+	 */
+	spinlock_t lock;
+
+	/* Whether a workaround is needed due to a bug in older hardware. In
+	 * this case, the driver both tries to prevent the bug from being
+	 * triggered and recovers, if the bug nevertheless happens, by doing a
+	 * running reset. A running reset, resets the device and continues from
+	 * where it were without being noticeable from outside the driver (apart
+	 * from slight delays).
+	 */
+	bool need_txbug_workaround;
+
+	/* To trigger initization of running reset and to trigger running reset
+	 * respectively in the case of a hanged device due to a txbug.
+	 */
+	struct timer_list hang_timer;
+	struct timer_list rr_timer;
+
+	/* To avoid waking up the netif queue and restarting timers
+	 * when a reset is scheduled or when closing of the device is
+	 * undergoing
+	 */
+	bool resetting;
+	bool closing;
+};
+
+/* Wait time for a short wait for ongoing to clear */
+#define GRCAN_SHORTWAIT_USECS	10
+
+/* Limit on the number of transmitted bits of an eff frame according to the CAN
+ * specification: 1 bit start of frame, 32 bits arbitration field, 6 bits
+ * control field, 8 bytes data field, 16 bits crc field, 2 bits ACK field and 7
+ * bits end of frame
+ */
+#define GRCAN_EFF_FRAME_MAX_BITS	(1+32+6+8*8+16+2+7)
+
+#if defined(__BIG_ENDIAN)
+static inline u32 grcan_read_reg(u32 __iomem *reg)
+{
+	return ioread32be(reg);
+}
+
+static inline void grcan_write_reg(u32 __iomem *reg, u32 val)
+{
+	iowrite32be(val, reg);
+}
+#else
+static inline u32 grcan_read_reg(u32 __iomem *reg)
+{
+	return ioread32(reg);
+}
+
+static inline void grcan_write_reg(u32 __iomem *reg, u32 val)
+{
+	iowrite32(val, reg);
+}
+#endif
+
+static inline void grcan_clear_bits(u32 __iomem *reg, u32 mask)
+{
+	grcan_write_reg(reg, grcan_read_reg(reg) & ~mask);
+}
+
+static inline void grcan_set_bits(u32 __iomem *reg, u32 mask)
+{
+	grcan_write_reg(reg, grcan_read_reg(reg) | mask);
+}
+
+static inline u32 grcan_read_bits(u32 __iomem *reg, u32 mask)
+{
+	return grcan_read_reg(reg) & mask;
+}
+
+static inline void grcan_write_bits(u32 __iomem *reg, u32 value, u32 mask)
+{
+	u32 old = grcan_read_reg(reg);
+
+	grcan_write_reg(reg, (old & ~mask) | (value & mask));
+}
+
+/* a and b should both be in [0,size] and a == b == size should not hold */
+static inline u32 grcan_ring_add(u32 a, u32 b, u32 size)
+{
+	u32 sum = a + b;
+
+	if (sum < size)
+		return sum;
+	else
+		return sum - size;
+}
+
+/* a and b should both be in [0,size) */
+static inline u32 grcan_ring_sub(u32 a, u32 b, u32 size)
+{
+	return grcan_ring_add(a, size - b, size);
+}
+
+/* Available slots for new transmissions */
+static inline u32 grcan_txspace(size_t txsize, u32 txwr, u32 eskbp)
+{
+	u32 slots = txsize / GRCAN_MSG_SIZE - 1;
+	u32 used = grcan_ring_sub(txwr, eskbp, txsize) / GRCAN_MSG_SIZE;
+
+	return slots - used;
+}
+
+/* Configuration parameters that can be set via module parameters */
+static struct grcan_device_config grcan_module_config =
+	GRCAN_DEFAULT_DEVICE_CONFIG;
+
+static const struct can_bittiming_const grcan_bittiming_const = {
+	.name		= DRV_NAME,
+	.tseg1_min	= GRCAN_CONF_PS1_MIN + 1,
+	.tseg1_max	= GRCAN_CONF_PS1_MAX + 1,
+	.tseg2_min	= GRCAN_CONF_PS2_MIN,
+	.tseg2_max	= GRCAN_CONF_PS2_MAX,
+	.sjw_max	= GRCAN_CONF_RSJ_MAX,
+	.brp_min	= GRCAN_CONF_SCALER_MIN + 1,
+	.brp_max	= GRCAN_CONF_SCALER_MAX + 1,
+	.brp_inc	= GRCAN_CONF_SCALER_INC,
+};
+
+static int grcan_set_bittiming(struct net_device *dev)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	struct can_bittiming *bt = &priv->can.bittiming;
+	u32 timing = 0;
+	int bpr, rsj, ps1, ps2, scaler;
+
+	/* Should never happen - function will not be called when
+	 * device is up
+	 */
+	if (grcan_read_bits(&regs->ctrl, GRCAN_CTRL_ENABLE))
+		return -EBUSY;
+
+	bpr = 0; /* Note bpr and brp are different concepts */
+	rsj = bt->sjw;
+	ps1 = (bt->prop_seg + bt->phase_seg1) - 1; /* tseg1 - 1 */
+	ps2 = bt->phase_seg2;
+	scaler = (bt->brp - 1);
+	netdev_dbg(dev, "Request for BPR=%d, RSJ=%d, PS1=%d, PS2=%d, SCALER=%d",
+		   bpr, rsj, ps1, ps2, scaler);
+	if (!(ps1 > ps2)) {
+		netdev_err(dev, "PS1 > PS2 must hold: PS1=%d, PS2=%d\n",
+			   ps1, ps2);
+		return -EINVAL;
+	}
+	if (!(ps2 >= rsj)) {
+		netdev_err(dev, "PS2 >= RSJ must hold: PS2=%d, RSJ=%d\n",
+			   ps2, rsj);
+		return -EINVAL;
+	}
+
+	timing |= (bpr << GRCAN_CONF_BPR_BIT) & GRCAN_CONF_BPR;
+	timing |= (rsj << GRCAN_CONF_RSJ_BIT) & GRCAN_CONF_RSJ;
+	timing |= (ps1 << GRCAN_CONF_PS1_BIT) & GRCAN_CONF_PS1;
+	timing |= (ps2 << GRCAN_CONF_PS2_BIT) & GRCAN_CONF_PS2;
+	timing |= (scaler << GRCAN_CONF_SCALER_BIT) & GRCAN_CONF_SCALER;
+	netdev_info(dev, "setting timing=0x%x\n", timing);
+	grcan_write_bits(&regs->conf, timing, GRCAN_CONF_TIMING);
+
+	return 0;
+}
+
+static int grcan_get_berr_counter(const struct net_device *dev,
+				  struct can_berr_counter *bec)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	u32 status = grcan_read_reg(&regs->stat);
+
+	bec->txerr = (status & GRCAN_STAT_TXERRCNT) >> GRCAN_STAT_TXERRCNT_BIT;
+	bec->rxerr = (status & GRCAN_STAT_RXERRCNT) >> GRCAN_STAT_RXERRCNT_BIT;
+	return 0;
+}
+
+static int grcan_poll(struct napi_struct *napi, int budget);
+
+/* Reset device, but keep configuration information */
+static void grcan_reset(struct net_device *dev)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	u32 config = grcan_read_reg(&regs->conf);
+
+	grcan_set_bits(&regs->ctrl, GRCAN_CTRL_RESET);
+	grcan_write_reg(&regs->conf, config);
+
+	priv->eskbp = grcan_read_reg(&regs->txrd);
+	priv->can.state = CAN_STATE_STOPPED;
+
+	/* Turn off hardware filtering - regs->rxcode set to 0 by reset */
+	grcan_write_reg(&regs->rxmask, 0);
+}
+
+/* stop device without changing any configurations */
+static void grcan_stop_hardware(struct net_device *dev)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+
+	grcan_write_reg(&regs->imr, GRCAN_IRQ_NONE);
+	grcan_clear_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE);
+	grcan_clear_bits(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
+	grcan_clear_bits(&regs->ctrl, GRCAN_CTRL_ENABLE);
+}
+
+/* Let priv->eskbp catch up to regs->txrd and echo back the skbs if echo
+ * is true and free them otherwise.
+ *
+ * If budget is >= 0, stop after handling at most budget skbs. Otherwise,
+ * continue until priv->eskbp catches up to regs->txrd.
+ *
+ * priv->lock *must* be held when calling this function
+ */
+static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	struct grcan_dma *dma = &priv->dma;
+	struct net_device_stats *stats = &dev->stats;
+	int i, work_done;
+
+	/* Updates to priv->eskbp and wake-ups of the queue needs to
+	 * be atomic towards the reads of priv->eskbp and shut-downs
+	 * of the queue in grcan_start_xmit.
+	 */
+	u32 txrd = grcan_read_reg(&regs->txrd);
+
+	for (work_done = 0; work_done < budget || budget < 0; work_done++) {
+		if (priv->eskbp == txrd)
+			break;
+		i = priv->eskbp / GRCAN_MSG_SIZE;
+		if (echo) {
+			/* Normal echo of messages */
+			stats->tx_packets++;
+			stats->tx_bytes += priv->txdlc[i];
+			priv->txdlc[i] = 0;
+			can_get_echo_skb(dev, i);
+		} else {
+			/* For cleanup of untransmitted messages */
+			can_free_echo_skb(dev, i);
+		}
+
+		priv->eskbp = grcan_ring_add(priv->eskbp, GRCAN_MSG_SIZE,
+					     dma->tx.size);
+		txrd = grcan_read_reg(&regs->txrd);
+	}
+	return work_done;
+}
+
+static void grcan_lost_one_shot_frame(struct net_device *dev)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	struct grcan_dma *dma = &priv->dma;
+	u32 txrd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	catch_up_echo_skb(dev, -1, true);
+
+	if (unlikely(grcan_read_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE))) {
+		/* Should never happen */
+		netdev_err(dev, "TXCTRL enabled at TXLOSS in one shot mode\n");
+	} else {
+		/* By the time an GRCAN_IRQ_TXLOSS is generated in
+		 * one-shot mode there is no problem in writing
+		 * to TXRD even in versions of the hardware in
+		 * which GRCAN_TXCTRL_ONGOING is not cleared properly
+		 * in one-shot mode.
+		 */
+
+		/* Skip message and discard echo-skb */
+		txrd = grcan_read_reg(&regs->txrd);
+		txrd = grcan_ring_add(txrd, GRCAN_MSG_SIZE, dma->tx.size);
+		grcan_write_reg(&regs->txrd, txrd);
+		catch_up_echo_skb(dev, -1, false);
+
+		if (!priv->resetting && !priv->closing &&
+		    !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) {
+			netif_wake_queue(dev);
+			grcan_set_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE);
+		}
+	}
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void grcan_err(struct net_device *dev, u32 sources, u32 status)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	struct grcan_dma *dma = &priv->dma;
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame cf;
+
+	/* Zero potential error_frame */
+	memset(&cf, 0, sizeof(cf));
+
+	/* Message lost interrupt. This might be due to arbitration error, but
+	 * is also triggered when there is no one else on the can bus or when
+	 * there is a problem with the hardware interface or the bus itself. As
+	 * arbitration errors can not be singled out, no error frames are
+	 * generated reporting this event as an arbitration error.
+	 */
+	if (sources & GRCAN_IRQ_TXLOSS) {
+		/* Take care of failed one-shot transmit */
+		if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+			grcan_lost_one_shot_frame(dev);
+
+		/* Stop printing as soon as error passive or bus off is in
+		 * effect to limit the amount of txloss debug printouts.
+		 */
+		if (!(status & GRCAN_STAT_ERRCTR_RELATED)) {
+			netdev_dbg(dev, "tx message lost\n");
+			stats->tx_errors++;
+		}
+	}
+
+	/* Conditions dealing with the error counters. There is no interrupt for
+	 * error warning, but there are interrupts for increases of the error
+	 * counters.
+	 */
+	if ((sources & GRCAN_IRQ_ERRCTR_RELATED) ||
+	    (status & GRCAN_STAT_ERRCTR_RELATED)) {
+		enum can_state state = priv->can.state;
+		enum can_state oldstate = state;
+		u32 txerr = (status & GRCAN_STAT_TXERRCNT)
+			>> GRCAN_STAT_TXERRCNT_BIT;
+		u32 rxerr = (status & GRCAN_STAT_RXERRCNT)
+			>> GRCAN_STAT_RXERRCNT_BIT;
+
+		/* Figure out current state */
+		if (status & GRCAN_STAT_OFF) {
+			state = CAN_STATE_BUS_OFF;
+		} else if (status & GRCAN_STAT_PASS) {
+			state = CAN_STATE_ERROR_PASSIVE;
+		} else if (txerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT ||
+			   rxerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT) {
+			state = CAN_STATE_ERROR_WARNING;
+		} else {
+			state = CAN_STATE_ERROR_ACTIVE;
+		}
+
+		/* Handle and report state changes */
+		if (state != oldstate) {
+			switch (state) {
+			case CAN_STATE_BUS_OFF:
+				netdev_dbg(dev, "bus-off\n");
+				netif_carrier_off(dev);
+				priv->can.can_stats.bus_off++;
+
+				/* Prevent the hardware from recovering from bus
+				 * off on its own if restart is disabled.
+				 */
+				if (!priv->can.restart_ms)
+					grcan_stop_hardware(dev);
+
+				cf.can_id |= CAN_ERR_BUSOFF;
+				break;
+
+			case CAN_STATE_ERROR_PASSIVE:
+				netdev_dbg(dev, "Error passive condition\n");
+				priv->can.can_stats.error_passive++;
+
+				cf.can_id |= CAN_ERR_CRTL;
+				if (txerr >= GRCAN_STAT_ERRCNT_PASSIVE_LIMIT)
+					cf.data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+				if (rxerr >= GRCAN_STAT_ERRCNT_PASSIVE_LIMIT)
+					cf.data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+				break;
+
+			case CAN_STATE_ERROR_WARNING:
+				netdev_dbg(dev, "Error warning condition\n");
+				priv->can.can_stats.error_warning++;
+
+				cf.can_id |= CAN_ERR_CRTL;
+				if (txerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT)
+					cf.data[1] |= CAN_ERR_CRTL_TX_WARNING;
+				if (rxerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT)
+					cf.data[1] |= CAN_ERR_CRTL_RX_WARNING;
+				break;
+
+			case CAN_STATE_ERROR_ACTIVE:
+				netdev_dbg(dev, "Error active condition\n");
+				cf.can_id |= CAN_ERR_CRTL;
+				break;
+
+			default:
+				/* There are no others at this point */
+				break;
+			}
+			cf.data[6] = txerr;
+			cf.data[7] = rxerr;
+			priv->can.state = state;
+		}
+
+		/* Report automatic restarts */
+		if (priv->can.restart_ms && oldstate == CAN_STATE_BUS_OFF) {
+			unsigned long flags;
+
+			cf.can_id |= CAN_ERR_RESTARTED;
+			netdev_dbg(dev, "restarted\n");
+			priv->can.can_stats.restarts++;
+			netif_carrier_on(dev);
+
+			spin_lock_irqsave(&priv->lock, flags);
+
+			if (!priv->resetting && !priv->closing) {
+				u32 txwr = grcan_read_reg(&regs->txwr);
+
+				if (grcan_txspace(dma->tx.size, txwr,
+						  priv->eskbp))
+					netif_wake_queue(dev);
+			}
+
+			spin_unlock_irqrestore(&priv->lock, flags);
+		}
+	}
+
+	/* Data overrun interrupt */
+	if ((sources & GRCAN_IRQ_OR) || (status & GRCAN_STAT_OR)) {
+		netdev_dbg(dev, "got data overrun interrupt\n");
+		stats->rx_over_errors++;
+		stats->rx_errors++;
+
+		cf.can_id |= CAN_ERR_CRTL;
+		cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+	}
+
+	/* AHB bus error interrupts (not CAN bus errors) - shut down the
+	 * device.
+	 */
+	if (sources & (GRCAN_IRQ_TXAHBERR | GRCAN_IRQ_RXAHBERR) ||
+	    (status & GRCAN_STAT_AHBERR)) {
+		char *txrx = "";
+		unsigned long flags;
+
+		if (sources & GRCAN_IRQ_TXAHBERR) {
+			txrx = "on tx ";
+			stats->tx_errors++;
+		} else if (sources & GRCAN_IRQ_RXAHBERR) {
+			txrx = "on rx ";
+			stats->rx_errors++;
+		}
+		netdev_err(dev, "Fatal AHB buss error %s- halting device\n",
+			   txrx);
+
+		spin_lock_irqsave(&priv->lock, flags);
+
+		/* Prevent anything to be enabled again and halt device */
+		priv->closing = true;
+		netif_stop_queue(dev);
+		grcan_stop_hardware(dev);
+		priv->can.state = CAN_STATE_STOPPED;
+
+		spin_unlock_irqrestore(&priv->lock, flags);
+	}
+
+	/* Pass on error frame if something to report,
+	 * i.e. id contains some information
+	 */
+	if (cf.can_id) {
+		struct can_frame *skb_cf;
+		struct sk_buff *skb = alloc_can_err_skb(dev, &skb_cf);
+
+		if (skb == NULL) {
+			netdev_dbg(dev, "could not allocate error frame\n");
+			return;
+		}
+		skb_cf->can_id |= cf.can_id;
+		memcpy(skb_cf->data, cf.data, sizeof(cf.data));
+
+		netif_rx(skb);
+	}
+}
+
+static irqreturn_t grcan_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	u32 sources, status;
+
+	/* Find out the source */
+	sources = grcan_read_reg(&regs->pimsr);
+	if (!sources)
+		return IRQ_NONE;
+	grcan_write_reg(&regs->picr, sources);
+	status = grcan_read_reg(&regs->stat);
+
+	/* If we got TX progress, the device has not hanged,
+	 * so disable the hang timer
+	 */
+	if (priv->need_txbug_workaround &&
+	    (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_TXLOSS))) {
+		del_timer(&priv->hang_timer);
+	}
+
+	/* Frame(s) received or transmitted */
+	if (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_RX)) {
+		/* Disable tx/rx interrupts and schedule poll(). No need for
+		 * locking as interference from a running reset at worst leads
+		 * to an extra interrupt.
+		 */
+		grcan_clear_bits(&regs->imr, GRCAN_IRQ_TX | GRCAN_IRQ_RX);
+		napi_schedule(&priv->napi);
+	}
+
+	/* (Potential) error conditions to take care of */
+	if (sources & GRCAN_IRQ_ERRORS)
+		grcan_err(dev, sources, status);
+
+	return IRQ_HANDLED;
+}
+
+/* Reset device and restart operations from where they were.
+ *
+ * This assumes that RXCTRL & RXCTRL is properly disabled and that RX
+ * is not ONGOING (TX might be stuck in ONGOING due to a harwrware bug
+ * for single shot)
+ */
+static void grcan_running_reset(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	unsigned long flags;
+
+	/* This temporarily messes with eskbp, so we need to lock
+	 * priv->lock
+	 */
+	spin_lock_irqsave(&priv->lock, flags);
+
+	priv->resetting = false;
+	del_timer(&priv->hang_timer);
+	del_timer(&priv->rr_timer);
+
+	if (!priv->closing) {
+		/* Save and reset - config register preserved by grcan_reset */
+		u32 imr = grcan_read_reg(&regs->imr);
+
+		u32 txaddr = grcan_read_reg(&regs->txaddr);
+		u32 txsize = grcan_read_reg(&regs->txsize);
+		u32 txwr = grcan_read_reg(&regs->txwr);
+		u32 txrd = grcan_read_reg(&regs->txrd);
+		u32 eskbp = priv->eskbp;
+
+		u32 rxaddr = grcan_read_reg(&regs->rxaddr);
+		u32 rxsize = grcan_read_reg(&regs->rxsize);
+		u32 rxwr = grcan_read_reg(&regs->rxwr);
+		u32 rxrd = grcan_read_reg(&regs->rxrd);
+
+		grcan_reset(dev);
+
+		/* Restore */
+		grcan_write_reg(&regs->txaddr, txaddr);
+		grcan_write_reg(&regs->txsize, txsize);
+		grcan_write_reg(&regs->txwr, txwr);
+		grcan_write_reg(&regs->txrd, txrd);
+		priv->eskbp = eskbp;
+
+		grcan_write_reg(&regs->rxaddr, rxaddr);
+		grcan_write_reg(&regs->rxsize, rxsize);
+		grcan_write_reg(&regs->rxwr, rxwr);
+		grcan_write_reg(&regs->rxrd, rxrd);
+
+		/* Turn on device again */
+		grcan_write_reg(&regs->imr, imr);
+		priv->can.state = CAN_STATE_ERROR_ACTIVE;
+		grcan_write_reg(&regs->txctrl, GRCAN_TXCTRL_ENABLE
+				| (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT
+				   ? GRCAN_TXCTRL_SINGLE : 0));
+		grcan_write_reg(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
+		grcan_write_reg(&regs->ctrl, GRCAN_CTRL_ENABLE);
+
+		/* Start queue if there is size and listen-onle mode is not
+		 * enabled
+		 */
+		if (grcan_txspace(priv->dma.tx.size, txwr, priv->eskbp) &&
+		    !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+			netif_wake_queue(dev);
+	}
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	netdev_err(dev, "Device reset and restored\n");
+}
+
+/* Waiting time in usecs corresponding to the transmission of three maximum
+ * sized can frames in the given bitrate (in bits/sec). Waiting for this amount
+ * of time makes sure that the can controller have time to finish sending or
+ * receiving a frame with a good margin.
+ *
+ * usecs/sec * number of frames * bits/frame / bits/sec
+ */
+static inline u32 grcan_ongoing_wait_usecs(__u32 bitrate)
+{
+	return 1000000 * 3 * GRCAN_EFF_FRAME_MAX_BITS / bitrate;
+}
+
+/* Set timer so that it will not fire until after a period in which the can
+ * controller have a good margin to finish transmitting a frame unless it has
+ * hanged
+ */
+static inline void grcan_reset_timer(struct timer_list *timer, __u32 bitrate)
+{
+	u32 wait_jiffies = usecs_to_jiffies(grcan_ongoing_wait_usecs(bitrate));
+
+	mod_timer(timer, jiffies + wait_jiffies);
+}
+
+/* Disable channels and schedule a running reset */
+static void grcan_initiate_running_reset(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	unsigned long flags;
+
+	netdev_err(dev, "Device seems hanged - reset scheduled\n");
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	/* The main body of this function must never be executed again
+	 * until after an execution of grcan_running_reset
+	 */
+	if (!priv->resetting && !priv->closing) {
+		priv->resetting = true;
+		netif_stop_queue(dev);
+		grcan_clear_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE);
+		grcan_clear_bits(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
+		grcan_reset_timer(&priv->rr_timer, priv->can.bittiming.bitrate);
+	}
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void grcan_free_dma_buffers(struct net_device *dev)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_dma *dma = &priv->dma;
+
+	dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
+			  dma->base_handle);
+	memset(dma, 0, sizeof(*dma));
+}
+
+static int grcan_allocate_dma_buffers(struct net_device *dev,
+				      size_t tsize, size_t rsize)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_dma *dma = &priv->dma;
+	struct grcan_dma_buffer *large = rsize > tsize ? &dma->rx : &dma->tx;
+	struct grcan_dma_buffer *small = rsize > tsize ? &dma->tx : &dma->rx;
+	size_t shift;
+
+	/* Need a whole number of GRCAN_BUFFER_ALIGNMENT for the large,
+	 * i.e. first buffer
+	 */
+	size_t maxs = max(tsize, rsize);
+	size_t lsize = ALIGN(maxs, GRCAN_BUFFER_ALIGNMENT);
+
+	/* Put the small buffer after that */
+	size_t ssize = min(tsize, rsize);
+
+	/* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
+	dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
+	dma->base_buf = dma_alloc_coherent(&dev->dev,
+					   dma->base_size,
+					   &dma->base_handle,
+					   GFP_KERNEL);
+
+	if (!dma->base_buf)
+		return -ENOMEM;
+
+	dma->tx.size = tsize;
+	dma->rx.size = rsize;
+
+	large->handle = ALIGN(dma->base_handle, GRCAN_BUFFER_ALIGNMENT);
+	small->handle = large->handle + lsize;
+	shift = large->handle - dma->base_handle;
+
+	large->buf = dma->base_buf + shift;
+	small->buf = large->buf + lsize;
+
+	return 0;
+}
+
+/* priv->lock *must* be held when calling this function */
+static int grcan_start(struct net_device *dev)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	u32 confop, txctrl;
+
+	grcan_reset(dev);
+
+	grcan_write_reg(&regs->txaddr, priv->dma.tx.handle);
+	grcan_write_reg(&regs->txsize, priv->dma.tx.size);
+	/* regs->txwr, regs->txrd and priv->eskbp already set to 0 by reset */
+
+	grcan_write_reg(&regs->rxaddr, priv->dma.rx.handle);
+	grcan_write_reg(&regs->rxsize, priv->dma.rx.size);
+	/* regs->rxwr and regs->rxrd already set to 0 by reset */
+
+	/* Enable interrupts */
+	grcan_read_reg(&regs->pir);
+	grcan_write_reg(&regs->imr, GRCAN_IRQ_DEFAULT);
+
+	/* Enable interfaces, channels and device */
+	confop = GRCAN_CONF_ABORT
+		| (priv->config.enable0 ? GRCAN_CONF_ENABLE0 : 0)
+		| (priv->config.enable1 ? GRCAN_CONF_ENABLE1 : 0)
+		| (priv->config.select ? GRCAN_CONF_SELECT : 0)
+		| (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY ?
+		   GRCAN_CONF_SILENT : 0)
+		| (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
+		   GRCAN_CONF_SAM : 0);
+	grcan_write_bits(&regs->conf, confop, GRCAN_CONF_OPERATION);
+	txctrl = GRCAN_TXCTRL_ENABLE
+		| (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT
+		   ? GRCAN_TXCTRL_SINGLE : 0);
+	grcan_write_reg(&regs->txctrl, txctrl);
+	grcan_write_reg(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
+	grcan_write_reg(&regs->ctrl, GRCAN_CTRL_ENABLE);
+
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+	return 0;
+}
+
+static int grcan_set_mode(struct net_device *dev, enum can_mode mode)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	unsigned long flags;
+	int err = 0;
+
+	if (mode == CAN_MODE_START) {
+		/* This might be called to restart the device to recover from
+		 * bus off errors
+		 */
+		spin_lock_irqsave(&priv->lock, flags);
+		if (priv->closing || priv->resetting) {
+			err = -EBUSY;
+		} else {
+			netdev_info(dev, "Restarting device\n");
+			grcan_start(dev);
+			if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+				netif_wake_queue(dev);
+		}
+		spin_unlock_irqrestore(&priv->lock, flags);
+		return err;
+	}
+	return -EOPNOTSUPP;
+}
+
+static int grcan_open(struct net_device *dev)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_dma *dma = &priv->dma;
+	unsigned long flags;
+	int err;
+
+	/* Allocate memory */
+	err = grcan_allocate_dma_buffers(dev, priv->config.txsize,
+					 priv->config.rxsize);
+	if (err) {
+		netdev_err(dev, "could not allocate DMA buffers\n");
+		return err;
+	}
+
+	priv->echo_skb = kzalloc(dma->tx.size * sizeof(*priv->echo_skb),
+				 GFP_KERNEL);
+	if (!priv->echo_skb) {
+		err = -ENOMEM;
+		goto exit_free_dma_buffers;
+	}
+	priv->can.echo_skb_max = dma->tx.size;
+	priv->can.echo_skb = priv->echo_skb;
+
+	priv->txdlc = kzalloc(dma->tx.size * sizeof(*priv->txdlc), GFP_KERNEL);
+	if (!priv->txdlc) {
+		err = -ENOMEM;
+		goto exit_free_echo_skb;
+	}
+
+	/* Get can device up */
+	err = open_candev(dev);
+	if (err)
+		goto exit_free_txdlc;
+
+	err = request_irq(dev->irq, grcan_interrupt, IRQF_SHARED,
+			  dev->name, dev);
+	if (err)
+		goto exit_close_candev;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	napi_enable(&priv->napi);
+	grcan_start(dev);
+	if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+		netif_start_queue(dev);
+	priv->resetting = false;
+	priv->closing = false;
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return 0;
+
+exit_close_candev:
+	close_candev(dev);
+exit_free_txdlc:
+	kfree(priv->txdlc);
+exit_free_echo_skb:
+	kfree(priv->echo_skb);
+exit_free_dma_buffers:
+	grcan_free_dma_buffers(dev);
+	return err;
+}
+
+static int grcan_close(struct net_device *dev)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	unsigned long flags;
+
+	napi_disable(&priv->napi);
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	priv->closing = true;
+	if (priv->need_txbug_workaround) {
+		del_timer_sync(&priv->hang_timer);
+		del_timer_sync(&priv->rr_timer);
+	}
+	netif_stop_queue(dev);
+	grcan_stop_hardware(dev);
+	priv->can.state = CAN_STATE_STOPPED;
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	free_irq(dev->irq, dev);
+	close_candev(dev);
+
+	grcan_free_dma_buffers(dev);
+	priv->can.echo_skb_max = 0;
+	priv->can.echo_skb = NULL;
+	kfree(priv->echo_skb);
+	kfree(priv->txdlc);
+
+	return 0;
+}
+
+static int grcan_transmit_catch_up(struct net_device *dev, int budget)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	unsigned long flags;
+	int work_done;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	work_done = catch_up_echo_skb(dev, budget, true);
+	if (work_done) {
+		if (!priv->resetting && !priv->closing &&
+		    !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+			netif_wake_queue(dev);
+
+		/* With napi we don't get TX interrupts for a while,
+		 * so prevent a running reset while catching up
+		 */
+		if (priv->need_txbug_workaround)
+			del_timer(&priv->hang_timer);
+	}
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return work_done;
+}
+
+static int grcan_receive(struct net_device *dev, int budget)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	struct grcan_dma *dma = &priv->dma;
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	u32 wr, rd, startrd;
+	u32 *slot;
+	u32 i, rtr, eff, j, shift;
+	int work_done = 0;
+
+	rd = grcan_read_reg(&regs->rxrd);
+	startrd = rd;
+	for (work_done = 0; work_done < budget; work_done++) {
+		/* Check for packet to receive */
+		wr = grcan_read_reg(&regs->rxwr);
+		if (rd == wr)
+			break;
+
+		/* Take care of packet */
+		skb = alloc_can_skb(dev, &cf);
+		if (skb == NULL) {
+			netdev_err(dev,
+				   "dropping frame: skb allocation failed\n");
+			stats->rx_dropped++;
+			continue;
+		}
+
+		slot = dma->rx.buf + rd;
+		eff = slot[0] & GRCAN_MSG_IDE;
+		rtr = slot[0] & GRCAN_MSG_RTR;
+		if (eff) {
+			cf->can_id = ((slot[0] & GRCAN_MSG_EID)
+				      >> GRCAN_MSG_EID_BIT);
+			cf->can_id |= CAN_EFF_FLAG;
+		} else {
+			cf->can_id = ((slot[0] & GRCAN_MSG_BID)
+				      >> GRCAN_MSG_BID_BIT);
+		}
+		cf->can_dlc = get_can_dlc((slot[1] & GRCAN_MSG_DLC)
+					  >> GRCAN_MSG_DLC_BIT);
+		if (rtr) {
+			cf->can_id |= CAN_RTR_FLAG;
+		} else {
+			for (i = 0; i < cf->can_dlc; i++) {
+				j = GRCAN_MSG_DATA_SLOT_INDEX(i);
+				shift = GRCAN_MSG_DATA_SHIFT(i);
+				cf->data[i] = (u8)(slot[j] >> shift);
+			}
+		}
+		netif_receive_skb(skb);
+
+		/* Update statistics and read pointer */
+		stats->rx_packets++;
+		stats->rx_bytes += cf->can_dlc;
+		rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
+	}
+
+	/* Make sure everything is read before allowing hardware to
+	 * use the memory
+	 */
+	mb();
+
+	/* Update read pointer - no need to check for ongoing */
+	if (likely(rd != startrd))
+		grcan_write_reg(&regs->rxrd, rd);
+
+	return work_done;
+}
+
+static int grcan_poll(struct napi_struct *napi, int budget)
+{
+	struct grcan_priv *priv = container_of(napi, struct grcan_priv, napi);
+	struct net_device *dev = priv->dev;
+	struct grcan_registers __iomem *regs = priv->regs;
+	unsigned long flags;
+	int tx_work_done, rx_work_done;
+	int rx_budget = budget / 2;
+	int tx_budget = budget - rx_budget;
+
+	/* Half of the budget for receiveing messages */
+	rx_work_done = grcan_receive(dev, rx_budget);
+
+	/* Half of the budget for transmitting messages as that can trigger echo
+	 * frames being received
+	 */
+	tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
+
+	if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
+		napi_complete(napi);
+
+		/* Guarantee no interference with a running reset that otherwise
+		 * could turn off interrupts.
+		 */
+		spin_lock_irqsave(&priv->lock, flags);
+
+		/* Enable tx and rx interrupts again. No need to check
+		 * priv->closing as napi_disable in grcan_close is waiting for
+		 * scheduled napi calls to finish.
+		 */
+		grcan_set_bits(&regs->imr, GRCAN_IRQ_TX | GRCAN_IRQ_RX);
+
+		spin_unlock_irqrestore(&priv->lock, flags);
+	}
+
+	return rx_work_done + tx_work_done;
+}
+
+/* Work tx bug by waiting while for the risky situation to clear. If that fails,
+ * drop a frame in one-shot mode or indicate a busy device otherwise.
+ *
+ * Returns 0 on successful wait. Otherwise it sets *netdev_tx_status to the
+ * value that should be returned by grcan_start_xmit when aborting the xmit.
+ */
+static int grcan_txbug_workaround(struct net_device *dev, struct sk_buff *skb,
+				  u32 txwr, u32 oneshotmode,
+				  netdev_tx_t *netdev_tx_status)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	struct grcan_dma *dma = &priv->dma;
+	int i;
+	unsigned long flags;
+
+	/* Wait a while for ongoing to be cleared or read pointer to catch up to
+	 * write pointer. The latter is needed due to a bug in older versions of
+	 * GRCAN in which ONGOING is not cleared properly one-shot mode when a
+	 * transmission fails.
+	 */
+	for (i = 0; i < GRCAN_SHORTWAIT_USECS; i++) {
+		udelay(1);
+		if (!grcan_read_bits(&regs->txctrl, GRCAN_TXCTRL_ONGOING) ||
+		    grcan_read_reg(&regs->txrd) == txwr) {
+			return 0;
+		}
+	}
+
+	/* Clean up, in case the situation was not resolved */
+	spin_lock_irqsave(&priv->lock, flags);
+	if (!priv->resetting && !priv->closing) {
+		/* Queue might have been stopped earlier in grcan_start_xmit */
+		if (grcan_txspace(dma->tx.size, txwr, priv->eskbp))
+			netif_wake_queue(dev);
+		/* Set a timer to resolve a hanged tx controller */
+		if (!timer_pending(&priv->hang_timer))
+			grcan_reset_timer(&priv->hang_timer,
+					  priv->can.bittiming.bitrate);
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (oneshotmode) {
+		/* In one-shot mode we should never end up here because
+		 * then the interrupt handler increases txrd on TXLOSS,
+		 * but it is consistent with one-shot mode to drop the
+		 * frame in this case.
+		 */
+		kfree_skb(skb);
+		*netdev_tx_status = NETDEV_TX_OK;
+	} else {
+		/* In normal mode the socket-can transmission queue get
+		 * to keep the frame so that it can be retransmitted
+		 * later
+		 */
+		*netdev_tx_status = NETDEV_TX_BUSY;
+	}
+	return -EBUSY;
+}
+
+/* Notes on the tx cyclic buffer handling:
+ *
+ * regs->txwr	- the next slot for the driver to put data to be sent
+ * regs->txrd	- the next slot for the device to read data
+ * priv->eskbp	- the next slot for the driver to call can_put_echo_skb for
+ *
+ * grcan_start_xmit can enter more messages as long as regs->txwr does
+ * not reach priv->eskbp (within 1 message gap)
+ *
+ * The device sends messages until regs->txrd reaches regs->txwr
+ *
+ * The interrupt calls handler calls can_put_echo_skb until
+ * priv->eskbp reaches regs->txrd
+ */
+static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
+				    struct net_device *dev)
+{
+	struct grcan_priv *priv = netdev_priv(dev);
+	struct grcan_registers __iomem *regs = priv->regs;
+	struct grcan_dma *dma = &priv->dma;
+	struct can_frame *cf = (struct can_frame *)skb->data;
+	u32 id, txwr, txrd, space, txctrl;
+	int slotindex;
+	u32 *slot;
+	u32 i, rtr, eff, dlc, tmp, err;
+	int j, shift;
+	unsigned long flags;
+	u32 oneshotmode = priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT;
+
+	if (can_dropped_invalid_skb(dev, skb))
+		return NETDEV_TX_OK;
+
+	/* Trying to transmit in silent mode will generate error interrupts, but
+	 * this should never happen - the queue should not have been started.
+	 */
+	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+		return NETDEV_TX_BUSY;
+
+	/* Reads of priv->eskbp and shut-downs of the queue needs to
+	 * be atomic towards the updates to priv->eskbp and wake-ups
+	 * of the queue in the interrupt handler.
+	 */
+	spin_lock_irqsave(&priv->lock, flags);
+
+	txwr = grcan_read_reg(&regs->txwr);
+	space = grcan_txspace(dma->tx.size, txwr, priv->eskbp);
+
+	slotindex = txwr / GRCAN_MSG_SIZE;
+	slot = dma->tx.buf + txwr;
+
+	if (unlikely(space == 1))
+		netif_stop_queue(dev);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+	/* End of critical section*/
+
+	/* This should never happen. If circular buffer is full, the
+	 * netif_stop_queue should have been stopped already.
+	 */
+	if (unlikely(!space)) {
+		netdev_err(dev, "No buffer space, but queue is non-stopped.\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	/* Convert and write CAN message to DMA buffer */
+	eff = cf->can_id & CAN_EFF_FLAG;
+	rtr = cf->can_id & CAN_RTR_FLAG;
+	id = cf->can_id & (eff ? CAN_EFF_MASK : CAN_SFF_MASK);
+	dlc = cf->can_dlc;
+	if (eff)
+		tmp = (id << GRCAN_MSG_EID_BIT) & GRCAN_MSG_EID;
+	else
+		tmp = (id << GRCAN_MSG_BID_BIT) & GRCAN_MSG_BID;
+	slot[0] = (eff ? GRCAN_MSG_IDE : 0) | (rtr ? GRCAN_MSG_RTR : 0) | tmp;
+
+	slot[1] = ((dlc << GRCAN_MSG_DLC_BIT) & GRCAN_MSG_DLC);
+	slot[2] = 0;
+	slot[3] = 0;
+	for (i = 0; i < dlc; i++) {
+		j = GRCAN_MSG_DATA_SLOT_INDEX(i);
+		shift = GRCAN_MSG_DATA_SHIFT(i);
+		slot[j] |= cf->data[i] << shift;
+	}
+
+	/* Checking that channel has not been disabled. These cases
+	 * should never happen
+	 */
+	txctrl = grcan_read_reg(&regs->txctrl);
+	if (!(txctrl & GRCAN_TXCTRL_ENABLE))
+		netdev_err(dev, "tx channel spuriously disabled\n");
+
+	if (oneshotmode && !(txctrl & GRCAN_TXCTRL_SINGLE))
+		netdev_err(dev, "one-shot mode spuriously disabled\n");
+
+	/* Bug workaround for old version of grcan where updating txwr
+	 * in the same clock cycle as the controller updates txrd to
+	 * the current txwr could hang the can controller
+	 */
+	if (priv->need_txbug_workaround) {
+		txrd = grcan_read_reg(&regs->txrd);
+		if (unlikely(grcan_ring_sub(txwr, txrd, dma->tx.size) == 1)) {
+			netdev_tx_t txstatus;
+
+			err = grcan_txbug_workaround(dev, skb, txwr,
+						     oneshotmode, &txstatus);
+			if (err)
+				return txstatus;
+		}
+	}
+
+	/* Prepare skb for echoing. This must be after the bug workaround above
+	 * as ownership of the skb is passed on by calling can_put_echo_skb.
+	 * Returning NETDEV_TX_BUSY or accessing skb or cf after a call to
+	 * can_put_echo_skb would be an error unless other measures are
+	 * taken.
+	 */
+	priv->txdlc[slotindex] = cf->can_dlc; /* Store dlc for statistics */
+	can_put_echo_skb(skb, dev, slotindex);
+
+	/* Make sure everything is written before allowing hardware to
+	 * read from the memory
+	 */
+	wmb();
+
+	/* Update write pointer to start transmission */
+	grcan_write_reg(&regs->txwr,
+			grcan_ring_add(txwr, GRCAN_MSG_SIZE, dma->tx.size));
+
+	return NETDEV_TX_OK;
+}
+
+/* ========== Setting up sysfs interface and module parameters ========== */
+
+#define GRCAN_NOT_BOOL(unsigned_val) ((unsigned_val) > 1)
+
+#define GRCAN_MODULE_PARAM(name, mtype, valcheckf, desc)		\
+	static void grcan_sanitize_##name(struct platform_device *pd)	\
+	{								\
+		struct grcan_device_config grcan_default_config		\
+			= GRCAN_DEFAULT_DEVICE_CONFIG;			\
+		if (valcheckf(grcan_module_config.name)) {		\
+			dev_err(&pd->dev,				\
+				"Invalid module parameter value for "	\
+				#name " - setting default\n");		\
+			grcan_module_config.name =			\
+				grcan_default_config.name;		\
+		}							\
+	}								\
+	module_param_named(name, grcan_module_config.name,		\
+			   mtype, S_IRUGO);				\
+	MODULE_PARM_DESC(name, desc)
+
+#define GRCAN_CONFIG_ATTR(name, desc)					\
+	static ssize_t grcan_store_##name(struct device *sdev,		\
+					  struct device_attribute *att,	\
+					  const char *buf,		\
+					  size_t count)			\
+	{								\
+		struct net_device *dev = to_net_dev(sdev);		\
+		struct grcan_priv *priv = netdev_priv(dev);		\
+		u8 val;							\
+		int ret;						\
+		if (dev->flags & IFF_UP)				\
+			return -EBUSY;					\
+		ret = kstrtou8(buf, 0, &val);				\
+		if (ret < 0 || val > 1)					\
+			return -EINVAL;					\
+		priv->config.name = val;				\
+		return count;						\
+	}								\
+	static ssize_t grcan_show_##name(struct device *sdev,		\
+					 struct device_attribute *att,	\
+					 char *buf)			\
+	{								\
+		struct net_device *dev = to_net_dev(sdev);		\
+		struct grcan_priv *priv = netdev_priv(dev);		\
+		return sprintf(buf, "%d\n", priv->config.name);		\
+	}								\
+	static DEVICE_ATTR(name, S_IRUGO | S_IWUSR,			\
+			   grcan_show_##name,				\
+			   grcan_store_##name);				\
+	GRCAN_MODULE_PARAM(name, ushort, GRCAN_NOT_BOOL, desc)
+
+/* The following configuration options are made available both via module
+ * parameters and writable sysfs files. See the chapter about GRCAN in the
+ * documentation for the GRLIB VHDL library for further details.
+ */
+GRCAN_CONFIG_ATTR(enable0,
+		  "Configuration of physical interface 0. Determines\n"	\
+		  "the \"Enable 0\" bit of the configuration register.\n" \
+		  "Format: 0 | 1\nDefault: 0\n");
+
+GRCAN_CONFIG_ATTR(enable1,
+		  "Configuration of physical interface 1. Determines\n"	\
+		  "the \"Enable 1\" bit of the configuration register.\n" \
+		  "Format: 0 | 1\nDefault: 0\n");
+
+GRCAN_CONFIG_ATTR(select,
+		  "Select which physical interface to use.\n"	\
+		  "Format: 0 | 1\nDefault: 0\n");
+
+/* The tx and rx buffer size configuration options are only available via module
+ * parameters.
+ */
+GRCAN_MODULE_PARAM(txsize, uint, GRCAN_INVALID_BUFFER_SIZE,
+		   "Sets the size of the tx buffer.\n"			\
+		   "Format: <unsigned int> where (txsize & ~0x1fffc0) == 0\n" \
+		   "Default: 1024\n");
+GRCAN_MODULE_PARAM(rxsize, uint, GRCAN_INVALID_BUFFER_SIZE,
+		   "Sets the size of the rx buffer.\n"			\
+		   "Format: <unsigned int> where (size & ~0x1fffc0) == 0\n" \
+		   "Default: 1024\n");
+
+/* Function that makes sure that configuration done using
+ * module parameters are set to valid values
+ */
+static void grcan_sanitize_module_config(struct platform_device *ofdev)
+{
+	grcan_sanitize_enable0(ofdev);
+	grcan_sanitize_enable1(ofdev);
+	grcan_sanitize_select(ofdev);
+	grcan_sanitize_txsize(ofdev);
+	grcan_sanitize_rxsize(ofdev);
+}
+
+static const struct attribute *const sysfs_grcan_attrs[] = {
+	/* Config attrs */
+	&dev_attr_enable0.attr,
+	&dev_attr_enable1.attr,
+	&dev_attr_select.attr,
+	NULL,
+};
+
+static const struct attribute_group sysfs_grcan_group = {
+	.name	= "grcan",
+	.attrs	= (struct attribute **)sysfs_grcan_attrs,
+};
+
+/* ========== Setting up the driver ========== */
+
+static const struct net_device_ops grcan_netdev_ops = {
+	.ndo_open	= grcan_open,
+	.ndo_stop	= grcan_close,
+	.ndo_start_xmit	= grcan_start_xmit,
+};
+
+static int grcan_setup_netdev(struct platform_device *ofdev,
+			      void __iomem *base,
+			      int irq, u32 ambafreq, bool txbug)
+{
+	struct net_device *dev;
+	struct grcan_priv *priv;
+	struct grcan_registers __iomem *regs;
+	int err;
+
+	dev = alloc_candev(sizeof(struct grcan_priv), 0);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->irq = irq;
+	dev->flags |= IFF_ECHO;
+	dev->netdev_ops = &grcan_netdev_ops;
+	dev->sysfs_groups[0] = &sysfs_grcan_group;
+
+	priv = netdev_priv(dev);
+	memcpy(&priv->config, &grcan_module_config,
+	       sizeof(struct grcan_device_config));
+	priv->dev = dev;
+	priv->regs = base;
+	priv->can.bittiming_const = &grcan_bittiming_const;
+	priv->can.do_set_bittiming = grcan_set_bittiming;
+	priv->can.do_set_mode = grcan_set_mode;
+	priv->can.do_get_berr_counter = grcan_get_berr_counter;
+	priv->can.clock.freq = ambafreq;
+	priv->can.ctrlmode_supported =
+		CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT;
+	priv->need_txbug_workaround = txbug;
+
+	/* Discover if triple sampling is supported by hardware */
+	regs = priv->regs;
+	grcan_set_bits(&regs->ctrl, GRCAN_CTRL_RESET);
+	grcan_set_bits(&regs->conf, GRCAN_CONF_SAM);
+	if (grcan_read_bits(&regs->conf, GRCAN_CONF_SAM)) {
+		priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+		dev_dbg(&ofdev->dev, "Hardware supports triple-sampling\n");
+	}
+
+	spin_lock_init(&priv->lock);
+
+	if (priv->need_txbug_workaround) {
+		init_timer(&priv->rr_timer);
+		priv->rr_timer.function = grcan_running_reset;
+		priv->rr_timer.data = (unsigned long)dev;
+
+		init_timer(&priv->hang_timer);
+		priv->hang_timer.function = grcan_initiate_running_reset;
+		priv->hang_timer.data = (unsigned long)dev;
+	}
+
+	netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
+
+	SET_NETDEV_DEV(dev, &ofdev->dev);
+	dev_info(&ofdev->dev, "regs=0x%p, irq=%d, clock=%d\n",
+		 priv->regs, dev->irq, priv->can.clock.freq);
+
+	err = register_candev(dev);
+	if (err)
+		goto exit_free_candev;
+
+	dev_set_drvdata(&ofdev->dev, dev);
+
+	/* Reset device to allow bit-timing to be set. No need to call
+	 * grcan_reset at this stage. That is done in grcan_open.
+	 */
+	grcan_write_reg(&regs->ctrl, GRCAN_CTRL_RESET);
+
+	return 0;
+exit_free_candev:
+	free_candev(dev);
+	return err;
+}
+
+static int __devinit grcan_probe(struct platform_device *ofdev)
+{
+	struct device_node *np = ofdev->dev.of_node;
+	struct resource *res;
+	u32 sysid, ambafreq;
+	int irq, err;
+	void __iomem *base;
+	bool txbug = true;
+
+	/* Compare GRLIB version number with the first that does not
+	 * have the tx bug (see start_xmit)
+	 */
+	err = of_property_read_u32(np, "systemid", &sysid);
+	if (!err && ((sysid & GRLIB_VERSION_MASK)
+		     >= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
+		txbug = false;
+
+	err = of_property_read_u32(np, "freq", &ambafreq);
+	if (err) {
+		dev_err(&ofdev->dev, "unable to fetch \"freq\" property\n");
+		goto exit_error;
+	}
+
+	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+	base = devm_request_and_ioremap(&ofdev->dev, res);
+	if (!base) {
+		dev_err(&ofdev->dev, "couldn't map IO resource\n");
+		err = -EADDRNOTAVAIL;
+		goto exit_error;
+	}
+
+	irq = irq_of_parse_and_map(np, GRCAN_IRQIX_IRQ);
+	if (!irq) {
+		dev_err(&ofdev->dev, "no irq found\n");
+		err = -ENODEV;
+		goto exit_error;
+	}
+
+	grcan_sanitize_module_config(ofdev);
+
+	err = grcan_setup_netdev(ofdev, base, irq, ambafreq, txbug);
+	if (err)
+		goto exit_dispose_irq;
+
+	return 0;
+
+exit_dispose_irq:
+	irq_dispose_mapping(irq);
+exit_error:
+	dev_err(&ofdev->dev,
+		"%s socket CAN driver initialization failed with error %d\n",
+		DRV_NAME, err);
+	return err;
+}
+
+static int __devexit grcan_remove(struct platform_device *ofdev)
+{
+	struct net_device *dev = dev_get_drvdata(&ofdev->dev);
+	struct grcan_priv *priv = netdev_priv(dev);
+
+	unregister_candev(dev); /* Will in turn call grcan_close */
+
+	irq_dispose_mapping(dev->irq);
+	dev_set_drvdata(&ofdev->dev, NULL);
+	netif_napi_del(&priv->napi);
+	free_candev(dev);
+
+	return 0;
+}
+
+static struct of_device_id grcan_match[] __devinitconst = {
+	{.name = "GAISLER_GRCAN"},
+	{.name = "01_03d"},
+	{.name = "GAISLER_GRHCAN"},
+	{.name = "01_034"},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, grcan_match);
+
+static struct platform_driver grcan_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = grcan_match,
+	},
+	.probe = grcan_probe,
+	.remove = __devexit_p(grcan_remove),
+};
+
+module_platform_driver(grcan_driver);
+
+MODULE_AUTHOR("Aeroflex Gaisler AB.");
+MODULE_DESCRIPTION("Socket CAN driver for Aeroflex Gaisler GRCAN");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 7edadee..c0bfb0a 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1692,7 +1692,7 @@
 		return ret;
 
 	ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
-	if (ret <= 0) {
+	if (ret == 0) {
 		dev_info(mod->dev, "%s timed out\n", __func__);
 		return -ETIMEDOUT;
 	}
@@ -1718,7 +1718,7 @@
 		return ret;
 
 	ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
-	if (ret <= 0) {
+	if (ret == 0) {
 		dev_info(mod->dev, "%s timed out\n", __func__);
 		return -ETIMEDOUT;
 	}
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 799c354..514d020 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -396,6 +396,7 @@
 	{ .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
 	{},
 };
+MODULE_DEVICE_TABLE(of, mpc5xxx_can_table);
 
 static struct platform_driver mpc5xxx_can_driver = {
 	.driver = {
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 03df9a8..92f73c7 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -21,7 +21,7 @@
 
 config CAN_SJA1000_OF_PLATFORM
 	tristate "Generic OF Platform Bus based SJA1000 driver"
-	depends on PPC_OF
+	depends on OF
 	---help---
 	  This driver adds support for the SJA1000 chips connected to
 	  the OpenFirmware "platform bus" found on embedded systems with
@@ -93,6 +93,7 @@
 	   - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
 	   - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
 	   - IXXAT Automation PC-I 04/PCI card (http://www.ixxat.com/)
+	   - Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card (http://www.connecttech.com)
 
 config CAN_TSCAN1
 	tristate "TS-CAN1 PC104 boards"
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 8bc9598..dc04407 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -44,6 +44,7 @@
 			"esd CAN-PCI/CPCI/PCI104/200, "
 			"esd CAN-PCI/PMC/266, "
 			"esd CAN-PCIe/2000, "
+			"Connect Tech Inc. CANpro/104-Plus Opto (CRG001), "
 			"IXXAT PC-I 04/PCI")
 MODULE_LICENSE("GPL v2");
 
@@ -131,6 +132,9 @@
 #define TEWS_PCI_VENDOR_ID		0x1498
 #define TEWS_PCI_DEVICE_ID_TMPC810	0x032A
 
+#define CTI_PCI_VENDOR_ID		0x12c4
+#define CTI_PCI_DEVICE_ID_CRG001	0x0900
+
 static void plx_pci_reset_common(struct pci_dev *pdev);
 static void plx_pci_reset_marathon(struct pci_dev *pdev);
 static void plx9056_pci_reset_common(struct pci_dev *pdev);
@@ -222,6 +226,14 @@
 	/* based on PLX9030 */
 };
 
+static struct plx_pci_card_info plx_pci_card_info_cti __devinitdata = {
+	"Connect Tech Inc. CANpro/104-Plus Opto (CRG001)", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
+	&plx_pci_reset_common
+	/* based on PLX9030 */
+};
+
 static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
 	{
 		/* Adlink PCI-7841/cPCI-7841 */
@@ -300,6 +312,13 @@
 		0, 0,
 		(kernel_ulong_t)&plx_pci_card_info_tews
 	},
+	{
+		/* Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+		CTI_PCI_VENDOR_ID, CTI_PCI_DEVICE_ID_CRG001,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_cti
+	},
 	{ 0,}
 };
 MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index f2683eb..e45258d 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -42,6 +42,8 @@
 #include <linux/can/dev.h>
 
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <asm/prom.h>
 
 #include "sja1000.h"
@@ -59,13 +61,13 @@
 
 static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
 {
-	return in_8(priv->reg_base + reg);
+	return ioread8(priv->reg_base + reg);
 }
 
 static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
 				  int reg, u8 val)
 {
-	out_8(priv->reg_base + reg, val);
+	iowrite8(val, priv->reg_base + reg);
 }
 
 static int __devexit sja1000_ofp_remove(struct platform_device *ofdev)
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 662c5f7..21619bb 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -34,6 +34,7 @@
 
 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
 MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
+MODULE_ALIAS("platform:" DRV_NAME);
 MODULE_LICENSE("GPL v2");
 
 static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg)
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 9ded21e..1267b36 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1055,3 +1055,4 @@
 MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION(DRV_DESC);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 0a68768..a4e4bee 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -13,6 +13,35 @@
           This driver supports the CAN-USB/2 interface
           from esd electronic system design gmbh (http://www.esd.eu).
 
+config CAN_KVASER_USB
+	tristate "Kvaser CAN/USB interface"
+	---help---
+	  This driver adds support for Kvaser CAN/USB devices like Kvaser
+	  Leaf Light.
+
+	  The driver gives support for the following devices:
+	    - Kvaser Leaf Light
+	    - Kvaser Leaf Professional HS
+	    - Kvaser Leaf SemiPro HS
+	    - Kvaser Leaf Professional LS
+	    - Kvaser Leaf Professional SWC
+	    - Kvaser Leaf Professional LIN
+	    - Kvaser Leaf SemiPro LS
+	    - Kvaser Leaf SemiPro SWC
+	    - Kvaser Memorator II HS/HS
+	    - Kvaser USBcan Professional HS/HS
+	    - Kvaser Leaf Light GI
+	    - Kvaser Leaf Professional HS (OBD-II connector)
+	    - Kvaser Memorator Professional HS/LS
+	    - Kvaser Leaf Light "China"
+	    - Kvaser BlackBird SemiPro
+	    - Kvaser USBcan R
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called kvaser_usb.
+
 config CAN_PEAK_USB
 	tristate "PEAK PCAN-USB/USB Pro interfaces"
 	---help---
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index da6d1d3..80a2ee4 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
 obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
 obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index bd36e55..124e0dd 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -1,7 +1,7 @@
 /*
- * CAN driver for esd CAN-USB/2
+ * CAN driver for esd CAN-USB/2 and CAN-USB/Micro
  *
- * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2010-2012 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published
@@ -28,14 +28,16 @@
 #include <linux/can/error.h>
 
 MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>");
-MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 interfaces");
+MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 and CAN-USB/Micro interfaces");
 MODULE_LICENSE("GPL v2");
 
 /* Define these values to match your devices */
 #define USB_ESDGMBH_VENDOR_ID	0x0ab4
 #define USB_CANUSB2_PRODUCT_ID	0x0010
+#define USB_CANUSBM_PRODUCT_ID	0x0011
 
 #define ESD_USB2_CAN_CLOCK	60000000
+#define ESD_USBM_CAN_CLOCK	36000000
 #define ESD_USB2_MAX_NETS	2
 
 /* USB2 commands */
@@ -69,6 +71,7 @@
 #define ESD_USB2_TSEG2_SHIFT	20
 #define ESD_USB2_SJW_MAX	4
 #define ESD_USB2_SJW_SHIFT	14
+#define ESD_USBM_SJW_SHIFT	24
 #define ESD_USB2_BRP_MIN	1
 #define ESD_USB2_BRP_MAX	1024
 #define ESD_USB2_BRP_INC	1
@@ -183,6 +186,7 @@
 
 static struct usb_device_id esd_usb2_table[] = {
 	{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)},
+	{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSBM_PRODUCT_ID)},
 	{}
 };
 MODULE_DEVICE_TABLE(usb, esd_usb2_table);
@@ -889,11 +893,22 @@
 	struct can_bittiming *bt = &priv->can.bittiming;
 	struct esd_usb2_msg msg;
 	u32 canbtr;
+	int sjw_shift;
 
 	canbtr = ESD_USB2_UBR;
+	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+		canbtr |= ESD_USB2_LOM;
+
 	canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1);
+
+	if (le16_to_cpu(priv->usb2->udev->descriptor.idProduct) ==
+	    USB_CANUSBM_PRODUCT_ID)
+		sjw_shift = ESD_USBM_SJW_SHIFT;
+	else
+		sjw_shift = ESD_USB2_SJW_SHIFT;
+
 	canbtr |= ((bt->sjw - 1) & (ESD_USB2_SJW_MAX - 1))
-		<< ESD_USB2_SJW_SHIFT;
+		<< sjw_shift;
 	canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1)
 		   & (ESD_USB2_TSEG1_MAX - 1))
 		<< ESD_USB2_TSEG1_SHIFT;
@@ -971,12 +986,20 @@
 	priv->index = index;
 
 	priv->can.state = CAN_STATE_STOPPED;
-	priv->can.clock.freq = ESD_USB2_CAN_CLOCK;
+	priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+
+	if (le16_to_cpu(dev->udev->descriptor.idProduct) ==
+	    USB_CANUSBM_PRODUCT_ID)
+		priv->can.clock.freq = ESD_USBM_CAN_CLOCK;
+	else {
+		priv->can.clock.freq = ESD_USB2_CAN_CLOCK;
+		priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+	}
+
 	priv->can.bittiming_const = &esd_usb2_bittiming_const;
 	priv->can.do_set_bittiming = esd_usb2_set_bittiming;
 	priv->can.do_set_mode = esd_usb2_set_mode;
 	priv->can.do_get_berr_counter = esd_usb2_get_berr_counter;
-	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
 
 	netdev->flags |= IFF_ECHO; /* we support local echo */
 
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
new file mode 100644
index 0000000..5b58a4d
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -0,0 +1,1627 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * Parts of this driver are based on the following:
+ *  - Kvaser linux leaf driver (version 4.78)
+ *  - CAN driver for esd CAN-USB/2
+ *
+ * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ */
+
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#define MAX_TX_URBS			16
+#define MAX_RX_URBS			4
+#define START_TIMEOUT			1000 /* msecs */
+#define STOP_TIMEOUT			1000 /* msecs */
+#define USB_SEND_TIMEOUT		1000 /* msecs */
+#define USB_RECV_TIMEOUT		1000 /* msecs */
+#define RX_BUFFER_SIZE			3072
+#define CAN_USB_CLOCK			8000000
+#define MAX_NET_DEVICES			3
+
+/* Kvaser USB devices */
+#define KVASER_VENDOR_ID		0x0bfd
+#define USB_LEAF_DEVEL_PRODUCT_ID	10
+#define USB_LEAF_LITE_PRODUCT_ID	11
+#define USB_LEAF_PRO_PRODUCT_ID		12
+#define USB_LEAF_SPRO_PRODUCT_ID	14
+#define USB_LEAF_PRO_LS_PRODUCT_ID	15
+#define USB_LEAF_PRO_SWC_PRODUCT_ID	16
+#define USB_LEAF_PRO_LIN_PRODUCT_ID	17
+#define USB_LEAF_SPRO_LS_PRODUCT_ID	18
+#define USB_LEAF_SPRO_SWC_PRODUCT_ID	19
+#define USB_MEMO2_DEVEL_PRODUCT_ID	22
+#define USB_MEMO2_HSHS_PRODUCT_ID	23
+#define USB_UPRO_HSHS_PRODUCT_ID	24
+#define USB_LEAF_LITE_GI_PRODUCT_ID	25
+#define USB_LEAF_PRO_OBDII_PRODUCT_ID	26
+#define USB_MEMO2_HSLS_PRODUCT_ID	27
+#define USB_LEAF_LITE_CH_PRODUCT_ID	28
+#define USB_BLACKBIRD_SPRO_PRODUCT_ID	29
+#define USB_OEM_MERCURY_PRODUCT_ID	34
+#define USB_OEM_LEAF_PRODUCT_ID		35
+#define USB_CAN_R_PRODUCT_ID		39
+
+/* USB devices features */
+#define KVASER_HAS_SILENT_MODE		BIT(0)
+#define KVASER_HAS_TXRX_ERRORS		BIT(1)
+
+/* Message header size */
+#define MSG_HEADER_LEN			2
+
+/* Can message flags */
+#define MSG_FLAG_ERROR_FRAME		BIT(0)
+#define MSG_FLAG_OVERRUN		BIT(1)
+#define MSG_FLAG_NERR			BIT(2)
+#define MSG_FLAG_WAKEUP			BIT(3)
+#define MSG_FLAG_REMOTE_FRAME		BIT(4)
+#define MSG_FLAG_RESERVED		BIT(5)
+#define MSG_FLAG_TX_ACK			BIT(6)
+#define MSG_FLAG_TX_REQUEST		BIT(7)
+
+/* Can states */
+#define M16C_STATE_BUS_RESET		BIT(0)
+#define M16C_STATE_BUS_ERROR		BIT(4)
+#define M16C_STATE_BUS_PASSIVE		BIT(5)
+#define M16C_STATE_BUS_OFF		BIT(6)
+
+/* Can msg ids */
+#define CMD_RX_STD_MESSAGE		12
+#define CMD_TX_STD_MESSAGE		13
+#define CMD_RX_EXT_MESSAGE		14
+#define CMD_TX_EXT_MESSAGE		15
+#define CMD_SET_BUS_PARAMS		16
+#define CMD_GET_BUS_PARAMS		17
+#define CMD_GET_BUS_PARAMS_REPLY	18
+#define CMD_GET_CHIP_STATE		19
+#define CMD_CHIP_STATE_EVENT		20
+#define CMD_SET_CTRL_MODE		21
+#define CMD_GET_CTRL_MODE		22
+#define CMD_GET_CTRL_MODE_REPLY		23
+#define CMD_RESET_CHIP			24
+#define CMD_RESET_CARD			25
+#define CMD_START_CHIP			26
+#define CMD_START_CHIP_REPLY		27
+#define CMD_STOP_CHIP			28
+#define CMD_STOP_CHIP_REPLY		29
+#define CMD_GET_CARD_INFO2		32
+#define CMD_GET_CARD_INFO		34
+#define CMD_GET_CARD_INFO_REPLY		35
+#define CMD_GET_SOFTWARE_INFO		38
+#define CMD_GET_SOFTWARE_INFO_REPLY	39
+#define CMD_ERROR_EVENT			45
+#define CMD_FLUSH_QUEUE			48
+#define CMD_RESET_ERROR_COUNTER		49
+#define CMD_TX_ACKNOWLEDGE		50
+#define CMD_CAN_ERROR_EVENT		51
+#define CMD_USB_THROTTLE		77
+#define CMD_LOG_MESSAGE			106
+
+/* error factors */
+#define M16C_EF_ACKE			BIT(0)
+#define M16C_EF_CRCE			BIT(1)
+#define M16C_EF_FORME			BIT(2)
+#define M16C_EF_STFE			BIT(3)
+#define M16C_EF_BITE0			BIT(4)
+#define M16C_EF_BITE1			BIT(5)
+#define M16C_EF_RCVE			BIT(6)
+#define M16C_EF_TRE			BIT(7)
+
+/* bittiming parameters */
+#define KVASER_USB_TSEG1_MIN		1
+#define KVASER_USB_TSEG1_MAX		16
+#define KVASER_USB_TSEG2_MIN		1
+#define KVASER_USB_TSEG2_MAX		8
+#define KVASER_USB_SJW_MAX		4
+#define KVASER_USB_BRP_MIN		1
+#define KVASER_USB_BRP_MAX		64
+#define KVASER_USB_BRP_INC		1
+
+/* ctrl modes */
+#define KVASER_CTRL_MODE_NORMAL		1
+#define KVASER_CTRL_MODE_SILENT		2
+#define KVASER_CTRL_MODE_SELFRECEPTION	3
+#define KVASER_CTRL_MODE_OFF		4
+
+struct kvaser_msg_simple {
+	u8 tid;
+	u8 channel;
+} __packed;
+
+struct kvaser_msg_cardinfo {
+	u8 tid;
+	u8 nchannels;
+	__le32 serial_number;
+	__le32 padding;
+	__le32 clock_resolution;
+	__le32 mfgdate;
+	u8 ean[8];
+	u8 hw_revision;
+	u8 usb_hs_mode;
+	__le16 padding2;
+} __packed;
+
+struct kvaser_msg_cardinfo2 {
+	u8 tid;
+	u8 channel;
+	u8 pcb_id[24];
+	__le32 oem_unlock_code;
+} __packed;
+
+struct kvaser_msg_softinfo {
+	u8 tid;
+	u8 channel;
+	__le32 sw_options;
+	__le32 fw_version;
+	__le16 max_outstanding_tx;
+	__le16 padding[9];
+} __packed;
+
+struct kvaser_msg_busparams {
+	u8 tid;
+	u8 channel;
+	__le32 bitrate;
+	u8 tseg1;
+	u8 tseg2;
+	u8 sjw;
+	u8 no_samp;
+} __packed;
+
+struct kvaser_msg_tx_can {
+	u8 channel;
+	u8 tid;
+	u8 msg[14];
+	u8 padding;
+	u8 flags;
+} __packed;
+
+struct kvaser_msg_rx_can {
+	u8 channel;
+	u8 flag;
+	__le16 time[3];
+	u8 msg[14];
+} __packed;
+
+struct kvaser_msg_chip_state_event {
+	u8 tid;
+	u8 channel;
+	__le16 time[3];
+	u8 tx_errors_count;
+	u8 rx_errors_count;
+	u8 status;
+	u8 padding[3];
+} __packed;
+
+struct kvaser_msg_tx_acknowledge {
+	u8 channel;
+	u8 tid;
+	__le16 time[3];
+	u8 flags;
+	u8 time_offset;
+} __packed;
+
+struct kvaser_msg_error_event {
+	u8 tid;
+	u8 flags;
+	__le16 time[3];
+	u8 channel;
+	u8 padding;
+	u8 tx_errors_count;
+	u8 rx_errors_count;
+	u8 status;
+	u8 error_factor;
+} __packed;
+
+struct kvaser_msg_ctrl_mode {
+	u8 tid;
+	u8 channel;
+	u8 ctrl_mode;
+	u8 padding[3];
+} __packed;
+
+struct kvaser_msg_flush_queue {
+	u8 tid;
+	u8 channel;
+	u8 flags;
+	u8 padding[3];
+} __packed;
+
+struct kvaser_msg_log_message {
+	u8 channel;
+	u8 flags;
+	__le16 time[3];
+	u8 dlc;
+	u8 time_offset;
+	__le32 id;
+	u8 data[8];
+} __packed;
+
+struct kvaser_msg {
+	u8 len;
+	u8 id;
+	union	{
+		struct kvaser_msg_simple simple;
+		struct kvaser_msg_cardinfo cardinfo;
+		struct kvaser_msg_cardinfo2 cardinfo2;
+		struct kvaser_msg_softinfo softinfo;
+		struct kvaser_msg_busparams busparams;
+		struct kvaser_msg_tx_can tx_can;
+		struct kvaser_msg_rx_can rx_can;
+		struct kvaser_msg_chip_state_event chip_state_event;
+		struct kvaser_msg_tx_acknowledge tx_acknowledge;
+		struct kvaser_msg_error_event error_event;
+		struct kvaser_msg_ctrl_mode ctrl_mode;
+		struct kvaser_msg_flush_queue flush_queue;
+		struct kvaser_msg_log_message log_message;
+	} u;
+} __packed;
+
+struct kvaser_usb_tx_urb_context {
+	struct kvaser_usb_net_priv *priv;
+	u32 echo_index;
+	int dlc;
+};
+
+struct kvaser_usb {
+	struct usb_device *udev;
+	struct kvaser_usb_net_priv *nets[MAX_NET_DEVICES];
+
+	struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+	struct usb_anchor rx_submitted;
+
+	u32 fw_version;
+	unsigned int nchannels;
+
+	bool rxinitdone;
+	void *rxbuf[MAX_RX_URBS];
+	dma_addr_t rxbuf_dma[MAX_RX_URBS];
+};
+
+struct kvaser_usb_net_priv {
+	struct can_priv can;
+
+	atomic_t active_tx_urbs;
+	struct usb_anchor tx_submitted;
+	struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
+
+	struct completion start_comp, stop_comp;
+
+	struct kvaser_usb *dev;
+	struct net_device *netdev;
+	int channel;
+
+	struct can_berr_counter bec;
+};
+
+static const struct usb_device_id kvaser_usb_table[] = {
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS |
+			       KVASER_HAS_SILENT_MODE },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS |
+			       KVASER_HAS_SILENT_MODE },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS |
+			       KVASER_HAS_SILENT_MODE },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS |
+			       KVASER_HAS_SILENT_MODE },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS |
+			       KVASER_HAS_SILENT_MODE },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS |
+			       KVASER_HAS_SILENT_MODE },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS |
+			       KVASER_HAS_SILENT_MODE },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS |
+			       KVASER_HAS_SILENT_MODE },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS |
+			       KVASER_HAS_SILENT_MODE },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS |
+			       KVASER_HAS_SILENT_MODE },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS },
+	{ USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
+		.driver_info = KVASER_HAS_TXRX_ERRORS },
+	{ }
+};
+MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
+
+static inline int kvaser_usb_send_msg(const struct kvaser_usb *dev,
+				      struct kvaser_msg *msg)
+{
+	int actual_len;
+
+	return usb_bulk_msg(dev->udev,
+			    usb_sndbulkpipe(dev->udev,
+					dev->bulk_out->bEndpointAddress),
+			    msg, msg->len, &actual_len,
+			    USB_SEND_TIMEOUT);
+}
+
+static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
+			       struct kvaser_msg *msg)
+{
+	struct kvaser_msg *tmp;
+	void *buf;
+	int actual_len;
+	int err;
+	int pos = 0;
+
+	buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	err = usb_bulk_msg(dev->udev,
+			   usb_rcvbulkpipe(dev->udev,
+					   dev->bulk_in->bEndpointAddress),
+			   buf, RX_BUFFER_SIZE, &actual_len,
+			   USB_RECV_TIMEOUT);
+	if (err < 0)
+		goto end;
+
+	while (pos <= actual_len - MSG_HEADER_LEN) {
+		tmp = buf + pos;
+
+		if (!tmp->len)
+			break;
+
+		if (pos + tmp->len > actual_len) {
+			dev_err(dev->udev->dev.parent, "Format error\n");
+			break;
+		}
+
+		if (tmp->id == id) {
+			memcpy(msg, tmp, tmp->len);
+			goto end;
+		}
+
+		pos += tmp->len;
+	}
+
+	err = -EINVAL;
+
+end:
+	kfree(buf);
+
+	return err;
+}
+
+static int kvaser_usb_send_simple_msg(const struct kvaser_usb *dev,
+				      u8 msg_id, int channel)
+{
+	struct kvaser_msg *msg;
+	int rc;
+
+	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	msg->id = msg_id;
+	msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
+	msg->u.simple.channel = channel;
+	msg->u.simple.tid = 0xff;
+
+	rc = kvaser_usb_send_msg(dev, msg);
+
+	kfree(msg);
+	return rc;
+}
+
+static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
+{
+	struct kvaser_msg msg;
+	int err;
+
+	err = kvaser_usb_send_simple_msg(dev, CMD_GET_SOFTWARE_INFO, 0);
+	if (err)
+		return err;
+
+	err = kvaser_usb_wait_msg(dev, CMD_GET_SOFTWARE_INFO_REPLY, &msg);
+	if (err)
+		return err;
+
+	dev->fw_version = le32_to_cpu(msg.u.softinfo.fw_version);
+
+	return 0;
+}
+
+static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
+{
+	struct kvaser_msg msg;
+	int err;
+
+	err = kvaser_usb_send_simple_msg(dev, CMD_GET_CARD_INFO, 0);
+	if (err)
+		return err;
+
+	err = kvaser_usb_wait_msg(dev, CMD_GET_CARD_INFO_REPLY, &msg);
+	if (err)
+		return err;
+
+	dev->nchannels = msg.u.cardinfo.nchannels;
+
+	return 0;
+}
+
+static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
+				      const struct kvaser_msg *msg)
+{
+	struct net_device_stats *stats;
+	struct kvaser_usb_tx_urb_context *context;
+	struct kvaser_usb_net_priv *priv;
+	struct sk_buff *skb;
+	struct can_frame *cf;
+	u8 channel = msg->u.tx_acknowledge.channel;
+	u8 tid = msg->u.tx_acknowledge.tid;
+
+	if (channel >= dev->nchannels) {
+		dev_err(dev->udev->dev.parent,
+			"Invalid channel number (%d)\n", channel);
+		return;
+	}
+
+	priv = dev->nets[channel];
+
+	if (!netif_device_present(priv->netdev))
+		return;
+
+	stats = &priv->netdev->stats;
+
+	context = &priv->tx_contexts[tid % MAX_TX_URBS];
+
+	/* Sometimes the state change doesn't come after a bus-off event */
+	if (priv->can.restart_ms &&
+	    (priv->can.state >= CAN_STATE_BUS_OFF)) {
+		skb = alloc_can_err_skb(priv->netdev, &cf);
+		if (skb) {
+			cf->can_id |= CAN_ERR_RESTARTED;
+			netif_rx(skb);
+
+			stats->rx_packets++;
+			stats->rx_bytes += cf->can_dlc;
+		} else {
+			netdev_err(priv->netdev,
+				   "No memory left for err_skb\n");
+		}
+
+		priv->can.can_stats.restarts++;
+		netif_carrier_on(priv->netdev);
+
+		priv->can.state = CAN_STATE_ERROR_ACTIVE;
+	}
+
+	stats->tx_packets++;
+	stats->tx_bytes += context->dlc;
+	can_get_echo_skb(priv->netdev, context->echo_index);
+
+	context->echo_index = MAX_TX_URBS;
+	atomic_dec(&priv->active_tx_urbs);
+
+	netif_wake_queue(priv->netdev);
+}
+
+static void kvaser_usb_simple_msg_callback(struct urb *urb)
+{
+	struct net_device *netdev = urb->context;
+
+	kfree(urb->transfer_buffer);
+
+	if (urb->status)
+		netdev_warn(netdev, "urb status received: %d\n",
+			    urb->status);
+}
+
+static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
+				       u8 msg_id)
+{
+	struct kvaser_usb *dev = priv->dev;
+	struct net_device *netdev = priv->netdev;
+	struct kvaser_msg *msg;
+	struct urb *urb;
+	void *buf;
+	int err;
+
+	urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!urb) {
+		netdev_err(netdev, "No memory left for URBs\n");
+		return -ENOMEM;
+	}
+
+	buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
+	if (!buf) {
+		netdev_err(netdev, "No memory left for USB buffer\n");
+		usb_free_urb(urb);
+		return -ENOMEM;
+	}
+
+	msg = (struct kvaser_msg *)buf;
+	msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
+	msg->id = msg_id;
+	msg->u.simple.channel = priv->channel;
+
+	usb_fill_bulk_urb(urb, dev->udev,
+			  usb_sndbulkpipe(dev->udev,
+					  dev->bulk_out->bEndpointAddress),
+			  buf, msg->len,
+			  kvaser_usb_simple_msg_callback, priv);
+	usb_anchor_urb(urb, &priv->tx_submitted);
+
+	err = usb_submit_urb(urb, GFP_ATOMIC);
+	if (err) {
+		netdev_err(netdev, "Error transmitting URB\n");
+		usb_unanchor_urb(urb);
+		usb_free_urb(urb);
+		kfree(buf);
+		return err;
+	}
+
+	usb_free_urb(urb);
+
+	return 0;
+}
+
+static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+{
+	int i;
+
+	usb_kill_anchored_urbs(&priv->tx_submitted);
+	atomic_set(&priv->active_tx_urbs, 0);
+
+	for (i = 0; i < MAX_TX_URBS; i++)
+		priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+}
+
+static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+				const struct kvaser_msg *msg)
+{
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	struct net_device_stats *stats;
+	struct kvaser_usb_net_priv *priv;
+	unsigned int new_state;
+	u8 channel, status, txerr, rxerr, error_factor;
+
+	switch (msg->id) {
+	case CMD_CAN_ERROR_EVENT:
+		channel = msg->u.error_event.channel;
+		status =  msg->u.error_event.status;
+		txerr = msg->u.error_event.tx_errors_count;
+		rxerr = msg->u.error_event.rx_errors_count;
+		error_factor = msg->u.error_event.error_factor;
+		break;
+	case CMD_LOG_MESSAGE:
+		channel = msg->u.log_message.channel;
+		status = msg->u.log_message.data[0];
+		txerr = msg->u.log_message.data[2];
+		rxerr = msg->u.log_message.data[3];
+		error_factor = msg->u.log_message.data[1];
+		break;
+	case CMD_CHIP_STATE_EVENT:
+		channel = msg->u.chip_state_event.channel;
+		status =  msg->u.chip_state_event.status;
+		txerr = msg->u.chip_state_event.tx_errors_count;
+		rxerr = msg->u.chip_state_event.rx_errors_count;
+		error_factor = 0;
+		break;
+	default:
+		dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
+			msg->id);
+		return;
+	}
+
+	if (channel >= dev->nchannels) {
+		dev_err(dev->udev->dev.parent,
+			"Invalid channel number (%d)\n", channel);
+		return;
+	}
+
+	priv = dev->nets[channel];
+	stats = &priv->netdev->stats;
+
+	if (status & M16C_STATE_BUS_RESET) {
+		kvaser_usb_unlink_tx_urbs(priv);
+		return;
+	}
+
+	skb = alloc_can_err_skb(priv->netdev, &cf);
+	if (!skb) {
+		stats->rx_dropped++;
+		return;
+	}
+
+	new_state = priv->can.state;
+
+	netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
+
+	if (status & M16C_STATE_BUS_OFF) {
+		cf->can_id |= CAN_ERR_BUSOFF;
+
+		priv->can.can_stats.bus_off++;
+		if (!priv->can.restart_ms)
+			kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
+
+		netif_carrier_off(priv->netdev);
+
+		new_state = CAN_STATE_BUS_OFF;
+	} else if (status & M16C_STATE_BUS_PASSIVE) {
+		if (priv->can.state != CAN_STATE_ERROR_PASSIVE) {
+			cf->can_id |= CAN_ERR_CRTL;
+
+			if (txerr || rxerr)
+				cf->data[1] = (txerr > rxerr)
+						? CAN_ERR_CRTL_TX_PASSIVE
+						: CAN_ERR_CRTL_RX_PASSIVE;
+			else
+				cf->data[1] = CAN_ERR_CRTL_TX_PASSIVE |
+					      CAN_ERR_CRTL_RX_PASSIVE;
+
+			priv->can.can_stats.error_passive++;
+		}
+
+		new_state = CAN_STATE_ERROR_PASSIVE;
+	}
+
+	if (status == M16C_STATE_BUS_ERROR) {
+		if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
+		    ((txerr >= 96) || (rxerr >= 96))) {
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = (txerr > rxerr)
+					? CAN_ERR_CRTL_TX_WARNING
+					: CAN_ERR_CRTL_RX_WARNING;
+
+			priv->can.can_stats.error_warning++;
+			new_state = CAN_STATE_ERROR_WARNING;
+		} else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+			cf->can_id |= CAN_ERR_PROT;
+			cf->data[2] = CAN_ERR_PROT_ACTIVE;
+
+			new_state = CAN_STATE_ERROR_ACTIVE;
+		}
+	}
+
+	if (!status) {
+		cf->can_id |= CAN_ERR_PROT;
+		cf->data[2] = CAN_ERR_PROT_ACTIVE;
+
+		new_state = CAN_STATE_ERROR_ACTIVE;
+	}
+
+	if (priv->can.restart_ms &&
+	    (priv->can.state >= CAN_STATE_BUS_OFF) &&
+	    (new_state < CAN_STATE_BUS_OFF)) {
+		cf->can_id |= CAN_ERR_RESTARTED;
+		netif_carrier_on(priv->netdev);
+
+		priv->can.can_stats.restarts++;
+	}
+
+	if (error_factor) {
+		priv->can.can_stats.bus_error++;
+		stats->rx_errors++;
+
+		cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+
+		if (error_factor & M16C_EF_ACKE)
+			cf->data[3] |= (CAN_ERR_PROT_LOC_ACK);
+		if (error_factor & M16C_EF_CRCE)
+			cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+					CAN_ERR_PROT_LOC_CRC_DEL);
+		if (error_factor & M16C_EF_FORME)
+			cf->data[2] |= CAN_ERR_PROT_FORM;
+		if (error_factor & M16C_EF_STFE)
+			cf->data[2] |= CAN_ERR_PROT_STUFF;
+		if (error_factor & M16C_EF_BITE0)
+			cf->data[2] |= CAN_ERR_PROT_BIT0;
+		if (error_factor & M16C_EF_BITE1)
+			cf->data[2] |= CAN_ERR_PROT_BIT1;
+		if (error_factor & M16C_EF_TRE)
+			cf->data[2] |= CAN_ERR_PROT_TX;
+	}
+
+	cf->data[6] = txerr;
+	cf->data[7] = rxerr;
+
+	priv->bec.txerr = txerr;
+	priv->bec.rxerr = rxerr;
+
+	priv->can.state = new_state;
+
+	netif_rx(skb);
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+}
+
+static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
+				  const struct kvaser_msg *msg)
+{
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	struct net_device_stats *stats = &priv->netdev->stats;
+
+	if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
+					 MSG_FLAG_NERR)) {
+		netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n",
+			   msg->u.rx_can.flag);
+
+		stats->rx_errors++;
+		return;
+	}
+
+	if (msg->u.rx_can.flag & MSG_FLAG_OVERRUN) {
+		skb = alloc_can_err_skb(priv->netdev, &cf);
+		if (!skb) {
+			stats->rx_dropped++;
+			return;
+		}
+
+		cf->can_id |= CAN_ERR_CRTL;
+		cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+		stats->rx_over_errors++;
+		stats->rx_errors++;
+
+		netif_rx(skb);
+
+		stats->rx_packets++;
+		stats->rx_bytes += cf->can_dlc;
+	}
+}
+
+static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
+				  const struct kvaser_msg *msg)
+{
+	struct kvaser_usb_net_priv *priv;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	struct net_device_stats *stats;
+	u8 channel = msg->u.rx_can.channel;
+
+	if (channel >= dev->nchannels) {
+		dev_err(dev->udev->dev.parent,
+			"Invalid channel number (%d)\n", channel);
+		return;
+	}
+
+	priv = dev->nets[channel];
+	stats = &priv->netdev->stats;
+
+	if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR |
+				  MSG_FLAG_OVERRUN)) {
+		kvaser_usb_rx_can_err(priv, msg);
+		return;
+	} else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) {
+		netdev_warn(priv->netdev,
+			    "Unhandled frame (flags: 0x%02x)",
+			    msg->u.rx_can.flag);
+		return;
+	}
+
+	skb = alloc_can_skb(priv->netdev, &cf);
+	if (!skb) {
+		stats->tx_dropped++;
+		return;
+	}
+
+	cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
+		     (msg->u.rx_can.msg[1] & 0x3f);
+	cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+
+	if (msg->id == CMD_RX_EXT_MESSAGE) {
+		cf->can_id <<= 18;
+		cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
+			      ((msg->u.rx_can.msg[3] & 0xff) << 6) |
+			      (msg->u.rx_can.msg[4] & 0x3f);
+		cf->can_id |= CAN_EFF_FLAG;
+	}
+
+	if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
+		cf->can_id |= CAN_RTR_FLAG;
+	else
+		memcpy(cf->data, &msg->u.rx_can.msg[6], cf->can_dlc);
+
+	netif_rx(skb);
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+}
+
+static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
+					const struct kvaser_msg *msg)
+{
+	struct kvaser_usb_net_priv *priv;
+	u8 channel = msg->u.simple.channel;
+
+	if (channel >= dev->nchannels) {
+		dev_err(dev->udev->dev.parent,
+			"Invalid channel number (%d)\n", channel);
+		return;
+	}
+
+	priv = dev->nets[channel];
+
+	if (completion_done(&priv->start_comp) &&
+	    netif_queue_stopped(priv->netdev)) {
+		netif_wake_queue(priv->netdev);
+	} else {
+		netif_start_queue(priv->netdev);
+		complete(&priv->start_comp);
+	}
+}
+
+static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev,
+				       const struct kvaser_msg *msg)
+{
+	struct kvaser_usb_net_priv *priv;
+	u8 channel = msg->u.simple.channel;
+
+	if (channel >= dev->nchannels) {
+		dev_err(dev->udev->dev.parent,
+			"Invalid channel number (%d)\n", channel);
+		return;
+	}
+
+	priv = dev->nets[channel];
+
+	complete(&priv->stop_comp);
+}
+
+static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
+				      const struct kvaser_msg *msg)
+{
+	switch (msg->id) {
+	case CMD_START_CHIP_REPLY:
+		kvaser_usb_start_chip_reply(dev, msg);
+		break;
+
+	case CMD_STOP_CHIP_REPLY:
+		kvaser_usb_stop_chip_reply(dev, msg);
+		break;
+
+	case CMD_RX_STD_MESSAGE:
+	case CMD_RX_EXT_MESSAGE:
+		kvaser_usb_rx_can_msg(dev, msg);
+		break;
+
+	case CMD_CHIP_STATE_EVENT:
+	case CMD_CAN_ERROR_EVENT:
+		kvaser_usb_rx_error(dev, msg);
+		break;
+
+	case CMD_LOG_MESSAGE:
+		if (msg->u.log_message.flags & MSG_FLAG_ERROR_FRAME)
+			kvaser_usb_rx_error(dev, msg);
+		break;
+
+	case CMD_TX_ACKNOWLEDGE:
+		kvaser_usb_tx_acknowledge(dev, msg);
+		break;
+
+	default:
+		dev_warn(dev->udev->dev.parent,
+			 "Unhandled message (%d)\n", msg->id);
+		break;
+	}
+}
+
+static void kvaser_usb_read_bulk_callback(struct urb *urb)
+{
+	struct kvaser_usb *dev = urb->context;
+	struct kvaser_msg *msg;
+	int pos = 0;
+	int err, i;
+
+	switch (urb->status) {
+	case 0:
+		break;
+	case -ENOENT:
+	case -ESHUTDOWN:
+		return;
+	default:
+		dev_info(dev->udev->dev.parent, "Rx URB aborted (%d)\n",
+			 urb->status);
+		goto resubmit_urb;
+	}
+
+	while (pos <= urb->actual_length - MSG_HEADER_LEN) {
+		msg = urb->transfer_buffer + pos;
+
+		if (!msg->len)
+			break;
+
+		if (pos + msg->len > urb->actual_length) {
+			dev_err(dev->udev->dev.parent, "Format error\n");
+			break;
+		}
+
+		kvaser_usb_handle_message(dev, msg);
+
+		pos += msg->len;
+	}
+
+resubmit_urb:
+	usb_fill_bulk_urb(urb, dev->udev,
+			  usb_rcvbulkpipe(dev->udev,
+					  dev->bulk_in->bEndpointAddress),
+			  urb->transfer_buffer, RX_BUFFER_SIZE,
+			  kvaser_usb_read_bulk_callback, dev);
+
+	err = usb_submit_urb(urb, GFP_ATOMIC);
+	if (err == -ENODEV) {
+		for (i = 0; i < dev->nchannels; i++) {
+			if (!dev->nets[i])
+				continue;
+
+			netif_device_detach(dev->nets[i]->netdev);
+		}
+	} else if (err) {
+		dev_err(dev->udev->dev.parent,
+			"Failed resubmitting read bulk urb: %d\n", err);
+	}
+
+	return;
+}
+
+static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
+{
+	int i, err = 0;
+
+	if (dev->rxinitdone)
+		return 0;
+
+	for (i = 0; i < MAX_RX_URBS; i++) {
+		struct urb *urb = NULL;
+		u8 *buf = NULL;
+		dma_addr_t buf_dma;
+
+		urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!urb) {
+			dev_warn(dev->udev->dev.parent,
+				 "No memory left for URBs\n");
+			err = -ENOMEM;
+			break;
+		}
+
+		buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE,
+					 GFP_KERNEL, &buf_dma);
+		if (!buf) {
+			dev_warn(dev->udev->dev.parent,
+				 "No memory left for USB buffer\n");
+			usb_free_urb(urb);
+			err = -ENOMEM;
+			break;
+		}
+
+		usb_fill_bulk_urb(urb, dev->udev,
+				  usb_rcvbulkpipe(dev->udev,
+					  dev->bulk_in->bEndpointAddress),
+				  buf, RX_BUFFER_SIZE,
+				  kvaser_usb_read_bulk_callback,
+				  dev);
+		urb->transfer_dma = buf_dma;
+		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+		usb_anchor_urb(urb, &dev->rx_submitted);
+
+		err = usb_submit_urb(urb, GFP_KERNEL);
+		if (err) {
+			usb_unanchor_urb(urb);
+			usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
+					  buf_dma);
+			usb_free_urb(urb);
+			break;
+		}
+
+		dev->rxbuf[i] = buf;
+		dev->rxbuf_dma[i] = buf_dma;
+
+		usb_free_urb(urb);
+	}
+
+	if (i == 0) {
+		dev_warn(dev->udev->dev.parent,
+			 "Cannot setup read URBs, error %d\n", err);
+		return err;
+	} else if (i < MAX_RX_URBS) {
+		dev_warn(dev->udev->dev.parent,
+			 "RX performances may be slow\n");
+	}
+
+	dev->rxinitdone = true;
+
+	return 0;
+}
+
+static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+{
+	struct kvaser_msg *msg;
+	int rc;
+
+	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	msg->id = CMD_SET_CTRL_MODE;
+	msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_ctrl_mode);
+	msg->u.ctrl_mode.tid = 0xff;
+	msg->u.ctrl_mode.channel = priv->channel;
+
+	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+		msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
+	else
+		msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
+
+	rc = kvaser_usb_send_msg(priv->dev, msg);
+
+	kfree(msg);
+	return rc;
+}
+
+static int kvaser_usb_start_chip(struct kvaser_usb_net_priv *priv)
+{
+	int err;
+
+	init_completion(&priv->start_comp);
+
+	err = kvaser_usb_send_simple_msg(priv->dev, CMD_START_CHIP,
+					 priv->channel);
+	if (err)
+		return err;
+
+	if (!wait_for_completion_timeout(&priv->start_comp,
+					 msecs_to_jiffies(START_TIMEOUT)))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int kvaser_usb_open(struct net_device *netdev)
+{
+	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+	struct kvaser_usb *dev = priv->dev;
+	int err;
+
+	err = open_candev(netdev);
+	if (err)
+		return err;
+
+	err = kvaser_usb_setup_rx_urbs(dev);
+	if (err)
+		goto error;
+
+	err = kvaser_usb_set_opt_mode(priv);
+	if (err)
+		goto error;
+
+	err = kvaser_usb_start_chip(priv);
+	if (err) {
+		netdev_warn(netdev, "Cannot start device, error %d\n", err);
+		goto error;
+	}
+
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+	return 0;
+
+error:
+	close_candev(netdev);
+	return err;
+}
+
+static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
+{
+	int i;
+
+	usb_kill_anchored_urbs(&dev->rx_submitted);
+
+	for (i = 0; i < MAX_RX_URBS; i++)
+		usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
+				  dev->rxbuf[i],
+				  dev->rxbuf_dma[i]);
+
+	for (i = 0; i < MAX_NET_DEVICES; i++) {
+		struct kvaser_usb_net_priv *priv = dev->nets[i];
+
+		if (priv)
+			kvaser_usb_unlink_tx_urbs(priv);
+	}
+}
+
+static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv)
+{
+	int err;
+
+	init_completion(&priv->stop_comp);
+
+	err = kvaser_usb_send_simple_msg(priv->dev, CMD_STOP_CHIP,
+					 priv->channel);
+	if (err)
+		return err;
+
+	if (!wait_for_completion_timeout(&priv->stop_comp,
+					 msecs_to_jiffies(STOP_TIMEOUT)))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv)
+{
+	struct kvaser_msg *msg;
+	int rc;
+
+	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	msg->id = CMD_FLUSH_QUEUE;
+	msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_flush_queue);
+	msg->u.flush_queue.channel = priv->channel;
+	msg->u.flush_queue.flags = 0x00;
+
+	rc = kvaser_usb_send_msg(priv->dev, msg);
+
+	kfree(msg);
+	return rc;
+}
+
+static int kvaser_usb_close(struct net_device *netdev)
+{
+	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+	struct kvaser_usb *dev = priv->dev;
+	int err;
+
+	netif_stop_queue(netdev);
+
+	err = kvaser_usb_flush_queue(priv);
+	if (err)
+		netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
+
+	if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
+		netdev_warn(netdev, "Cannot reset card, error %d\n", err);
+
+	err = kvaser_usb_stop_chip(priv);
+	if (err)
+		netdev_warn(netdev, "Cannot stop device, error %d\n", err);
+
+	priv->can.state = CAN_STATE_STOPPED;
+	close_candev(priv->netdev);
+
+	return 0;
+}
+
+static void kvaser_usb_write_bulk_callback(struct urb *urb)
+{
+	struct kvaser_usb_tx_urb_context *context = urb->context;
+	struct kvaser_usb_net_priv *priv;
+	struct net_device *netdev;
+
+	if (WARN_ON(!context))
+		return;
+
+	priv = context->priv;
+	netdev = priv->netdev;
+
+	kfree(urb->transfer_buffer);
+
+	if (!netif_device_present(netdev))
+		return;
+
+	if (urb->status)
+		netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
+}
+
+static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
+					 struct net_device *netdev)
+{
+	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+	struct kvaser_usb *dev = priv->dev;
+	struct net_device_stats *stats = &netdev->stats;
+	struct can_frame *cf = (struct can_frame *)skb->data;
+	struct kvaser_usb_tx_urb_context *context = NULL;
+	struct urb *urb;
+	void *buf;
+	struct kvaser_msg *msg;
+	int i, err;
+	int ret = NETDEV_TX_OK;
+
+	if (can_dropped_invalid_skb(netdev, skb))
+		return NETDEV_TX_OK;
+
+	urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!urb) {
+		netdev_err(netdev, "No memory left for URBs\n");
+		stats->tx_dropped++;
+		goto nourbmem;
+	}
+
+	buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
+	if (!buf) {
+		netdev_err(netdev, "No memory left for USB buffer\n");
+		stats->tx_dropped++;
+		goto nobufmem;
+	}
+
+	msg = buf;
+	msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can);
+	msg->u.tx_can.flags = 0;
+	msg->u.tx_can.channel = priv->channel;
+
+	if (cf->can_id & CAN_EFF_FLAG) {
+		msg->id = CMD_TX_EXT_MESSAGE;
+		msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f;
+		msg->u.tx_can.msg[1] = (cf->can_id >> 18) & 0x3f;
+		msg->u.tx_can.msg[2] = (cf->can_id >> 14) & 0x0f;
+		msg->u.tx_can.msg[3] = (cf->can_id >> 6) & 0xff;
+		msg->u.tx_can.msg[4] = cf->can_id & 0x3f;
+	} else {
+		msg->id = CMD_TX_STD_MESSAGE;
+		msg->u.tx_can.msg[0] = (cf->can_id >> 6) & 0x1f;
+		msg->u.tx_can.msg[1] = cf->can_id & 0x3f;
+	}
+
+	msg->u.tx_can.msg[5] = cf->can_dlc;
+	memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc);
+
+	if (cf->can_id & CAN_RTR_FLAG)
+		msg->u.tx_can.flags |= MSG_FLAG_REMOTE_FRAME;
+
+	for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
+		if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
+			context = &priv->tx_contexts[i];
+			break;
+		}
+	}
+
+	if (!context) {
+		netdev_warn(netdev, "cannot find free context\n");
+		ret =  NETDEV_TX_BUSY;
+		goto releasebuf;
+	}
+
+	context->priv = priv;
+	context->echo_index = i;
+	context->dlc = cf->can_dlc;
+
+	msg->u.tx_can.tid = context->echo_index;
+
+	usb_fill_bulk_urb(urb, dev->udev,
+			  usb_sndbulkpipe(dev->udev,
+					  dev->bulk_out->bEndpointAddress),
+			  buf, msg->len,
+			  kvaser_usb_write_bulk_callback, context);
+	usb_anchor_urb(urb, &priv->tx_submitted);
+
+	can_put_echo_skb(skb, netdev, context->echo_index);
+
+	atomic_inc(&priv->active_tx_urbs);
+
+	if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
+		netif_stop_queue(netdev);
+
+	err = usb_submit_urb(urb, GFP_ATOMIC);
+	if (unlikely(err)) {
+		can_free_echo_skb(netdev, context->echo_index);
+
+		skb = NULL; /* set to NULL to avoid double free in
+			     * dev_kfree_skb(skb) */
+
+		atomic_dec(&priv->active_tx_urbs);
+		usb_unanchor_urb(urb);
+
+		stats->tx_dropped++;
+
+		if (err == -ENODEV)
+			netif_device_detach(netdev);
+		else
+			netdev_warn(netdev, "Failed tx_urb %d\n", err);
+
+		goto releasebuf;
+	}
+
+	usb_free_urb(urb);
+
+	return NETDEV_TX_OK;
+
+releasebuf:
+	kfree(buf);
+nobufmem:
+	usb_free_urb(urb);
+nourbmem:
+	dev_kfree_skb(skb);
+	return ret;
+}
+
+static const struct net_device_ops kvaser_usb_netdev_ops = {
+	.ndo_open = kvaser_usb_open,
+	.ndo_stop = kvaser_usb_close,
+	.ndo_start_xmit = kvaser_usb_start_xmit,
+};
+
+static const struct can_bittiming_const kvaser_usb_bittiming_const = {
+	.name = "kvaser_usb",
+	.tseg1_min = KVASER_USB_TSEG1_MIN,
+	.tseg1_max = KVASER_USB_TSEG1_MAX,
+	.tseg2_min = KVASER_USB_TSEG2_MIN,
+	.tseg2_max = KVASER_USB_TSEG2_MAX,
+	.sjw_max = KVASER_USB_SJW_MAX,
+	.brp_min = KVASER_USB_BRP_MIN,
+	.brp_max = KVASER_USB_BRP_MAX,
+	.brp_inc = KVASER_USB_BRP_INC,
+};
+
+static int kvaser_usb_set_bittiming(struct net_device *netdev)
+{
+	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+	struct can_bittiming *bt = &priv->can.bittiming;
+	struct kvaser_usb *dev = priv->dev;
+	struct kvaser_msg *msg;
+	int rc;
+
+	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	msg->id = CMD_SET_BUS_PARAMS;
+	msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_busparams);
+	msg->u.busparams.channel = priv->channel;
+	msg->u.busparams.tid = 0xff;
+	msg->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
+	msg->u.busparams.sjw = bt->sjw;
+	msg->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
+	msg->u.busparams.tseg2 = bt->phase_seg2;
+
+	if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+		msg->u.busparams.no_samp = 3;
+	else
+		msg->u.busparams.no_samp = 1;
+
+	rc = kvaser_usb_send_msg(dev, msg);
+
+	kfree(msg);
+	return rc;
+}
+
+static int kvaser_usb_set_mode(struct net_device *netdev,
+			       enum can_mode mode)
+{
+	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+	int err;
+
+	switch (mode) {
+	case CAN_MODE_START:
+		err = kvaser_usb_simple_msg_async(priv, CMD_START_CHIP);
+		if (err)
+			return err;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int kvaser_usb_get_berr_counter(const struct net_device *netdev,
+				       struct can_berr_counter *bec)
+{
+	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+
+	*bec = priv->bec;
+
+	return 0;
+}
+
+static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
+{
+	int i;
+
+	for (i = 0; i < dev->nchannels; i++) {
+		if (!dev->nets[i])
+			continue;
+
+		unregister_netdev(dev->nets[i]->netdev);
+	}
+
+	kvaser_usb_unlink_all_urbs(dev);
+
+	for (i = 0; i < dev->nchannels; i++) {
+		if (!dev->nets[i])
+			continue;
+
+		free_candev(dev->nets[i]->netdev);
+	}
+}
+
+static int kvaser_usb_init_one(struct usb_interface *intf,
+			       const struct usb_device_id *id, int channel)
+{
+	struct kvaser_usb *dev = usb_get_intfdata(intf);
+	struct net_device *netdev;
+	struct kvaser_usb_net_priv *priv;
+	int i, err;
+
+	netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
+	if (!netdev) {
+		dev_err(&intf->dev, "Cannot alloc candev\n");
+		return -ENOMEM;
+	}
+
+	priv = netdev_priv(netdev);
+
+	init_completion(&priv->start_comp);
+	init_completion(&priv->stop_comp);
+
+	init_usb_anchor(&priv->tx_submitted);
+	atomic_set(&priv->active_tx_urbs, 0);
+
+	for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
+		priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+
+	priv->dev = dev;
+	priv->netdev = netdev;
+	priv->channel = channel;
+
+	priv->can.state = CAN_STATE_STOPPED;
+	priv->can.clock.freq = CAN_USB_CLOCK;
+	priv->can.bittiming_const = &kvaser_usb_bittiming_const;
+	priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
+	priv->can.do_set_mode = kvaser_usb_set_mode;
+	if (id->driver_info & KVASER_HAS_TXRX_ERRORS)
+		priv->can.do_get_berr_counter = kvaser_usb_get_berr_counter;
+	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+	if (id->driver_info & KVASER_HAS_SILENT_MODE)
+		priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+
+	netdev->flags |= IFF_ECHO;
+
+	netdev->netdev_ops = &kvaser_usb_netdev_ops;
+
+	SET_NETDEV_DEV(netdev, &intf->dev);
+
+	dev->nets[channel] = priv;
+
+	err = register_candev(netdev);
+	if (err) {
+		dev_err(&intf->dev, "Failed to register can device\n");
+		free_candev(netdev);
+		dev->nets[channel] = NULL;
+		return err;
+	}
+
+	netdev_dbg(netdev, "device registered\n");
+
+	return 0;
+}
+
+static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
+				     struct usb_endpoint_descriptor **in,
+				     struct usb_endpoint_descriptor **out)
+{
+	const struct usb_host_interface *iface_desc;
+	struct usb_endpoint_descriptor *endpoint;
+	int i;
+
+	iface_desc = &intf->altsetting[0];
+
+	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+		endpoint = &iface_desc->endpoint[i].desc;
+
+		if (usb_endpoint_is_bulk_in(endpoint))
+			*in = endpoint;
+
+		if (usb_endpoint_is_bulk_out(endpoint))
+			*out = endpoint;
+	}
+}
+
+static int kvaser_usb_probe(struct usb_interface *intf,
+			    const struct usb_device_id *id)
+{
+	struct kvaser_usb *dev;
+	int err = -ENOMEM;
+	int i;
+
+	dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
+	if (!dev->bulk_in || !dev->bulk_out) {
+		dev_err(&intf->dev, "Cannot get usb endpoint(s)");
+		return err;
+	}
+
+	dev->udev = interface_to_usbdev(intf);
+
+	init_usb_anchor(&dev->rx_submitted);
+
+	usb_set_intfdata(intf, dev);
+
+	for (i = 0; i < MAX_NET_DEVICES; i++)
+		kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
+
+	err = kvaser_usb_get_software_info(dev);
+	if (err) {
+		dev_err(&intf->dev,
+			"Cannot get software infos, error %d\n", err);
+		return err;
+	}
+
+	err = kvaser_usb_get_card_info(dev);
+	if (err) {
+		dev_err(&intf->dev,
+			"Cannot get card infos, error %d\n", err);
+		return err;
+	}
+
+	dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
+		((dev->fw_version >> 24) & 0xff),
+		((dev->fw_version >> 16) & 0xff),
+		(dev->fw_version & 0xffff));
+
+	for (i = 0; i < dev->nchannels; i++) {
+		err = kvaser_usb_init_one(intf, id, i);
+		if (err) {
+			kvaser_usb_remove_interfaces(dev);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static void kvaser_usb_disconnect(struct usb_interface *intf)
+{
+	struct kvaser_usb *dev = usb_get_intfdata(intf);
+
+	usb_set_intfdata(intf, NULL);
+
+	if (!dev)
+		return;
+
+	kvaser_usb_remove_interfaces(dev);
+}
+
+static struct usb_driver kvaser_usb_driver = {
+	.name = "kvaser_usb",
+	.probe = kvaser_usb_probe,
+	.disconnect = kvaser_usb_disconnect,
+	.id_table = kvaser_usb_table,
+};
+
+module_usb_driver(kvaser_usb_driver);
+
+MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
+MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index dd151d5..b8fe808 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -1,5 +1,5 @@
 menu "Distributed Switch Architecture drivers"
-	depends on NET_DSA
+	depends on HAVE_NET_DSA
 
 config NET_DSA_MV88E6XXX
 	tristate
@@ -7,6 +7,7 @@
 
 config NET_DSA_MV88E6060
 	tristate "Marvell 88E6060 ethernet switch chip support"
+	select NET_DSA
 	select NET_DSA_TAG_TRAILER
 	---help---
 	  This enables support for the Marvell 88E6060 ethernet switch
@@ -18,6 +19,7 @@
 
 config NET_DSA_MV88E6131
 	tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
+	select NET_DSA
 	select NET_DSA_MV88E6XXX
 	select NET_DSA_MV88E6XXX_NEED_PPU
 	select NET_DSA_TAG_DSA
@@ -27,6 +29,7 @@
 
 config NET_DSA_MV88E6123_61_65
 	tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
+	select NET_DSA
 	select NET_DSA_MV88E6XXX
 	select NET_DSA_TAG_EDSA
 	---help---
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 8322c54..6414e84 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -463,12 +463,6 @@
 {
 	struct ei_device *ei_local = netdev_priv(dev);
 
-	if (!is_valid_ether_addr(dev->dev_addr)) {
-		printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
-			dev->name);
-		return -EINVAL;
-	}
-
 	if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev))
 		return -EAGAIN;
 
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index 49a30d3..e49c0ef 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -61,7 +61,7 @@
 
 config BFIN_MAC_USE_HWSTAMP
 	bool "Use IEEE 1588 hwstamp"
-	depends on BFIN_MAC && BF518
+	select PTP_1588_CLOCK
 	default y
 	---help---
 	  To support the IEEE 1588 Precision Time Protocol (PTP), select y here
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index f816426..f1c458d 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -548,14 +548,17 @@
 	return 0;
 }
 
+#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
 static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
 	struct ethtool_ts_info *info)
 {
+	struct bfin_mac_local *lp = netdev_priv(dev);
+
 	info->so_timestamping =
 		SOF_TIMESTAMPING_TX_HARDWARE |
 		SOF_TIMESTAMPING_RX_HARDWARE |
-		SOF_TIMESTAMPING_SYS_HARDWARE;
-	info->phc_index = -1;
+		SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->phc_index = lp->phc_index;
 	info->tx_types =
 		(1 << HWTSTAMP_TX_OFF) |
 		(1 << HWTSTAMP_TX_ON);
@@ -566,6 +569,7 @@
 		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
 	return 0;
 }
+#endif
 
 static const struct ethtool_ops bfin_mac_ethtool_ops = {
 	.get_settings = bfin_mac_ethtool_getsettings,
@@ -574,7 +578,9 @@
 	.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
 	.get_wol = bfin_mac_ethtool_getwol,
 	.set_wol = bfin_mac_ethtool_setwol,
+#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
 	.get_ts_info = bfin_mac_ethtool_get_ts_info,
+#endif
 };
 
 /**************************************************************************/
@@ -649,6 +655,20 @@
 #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
 #define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
 
+static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
+{
+	u32 ipn = 1000000000UL / input_clk;
+	u32 ppn = 1;
+	unsigned int shift = 0;
+
+	while (ppn <= ipn) {
+		ppn <<= 1;
+		shift++;
+	}
+	*shift_result = shift;
+	return 1000000000UL / ppn;
+}
+
 static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
 		struct ifreq *ifr, int cmd)
 {
@@ -798,19 +818,7 @@
 		bfin_read_EMAC_PTP_TXSNAPLO();
 		bfin_read_EMAC_PTP_TXSNAPHI();
 
-		/*
-		 * Set registers so that rollover occurs soon to test this.
-		 */
-		bfin_write_EMAC_PTP_TIMELO(0x00000000);
-		bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
-
 		SSYNC();
-
-		lp->compare.last_update = 0;
-		timecounter_init(&lp->clock,
-				&lp->cycles,
-				ktime_to_ns(ktime_get_real()));
-		timecompare_update(&lp->compare, 0);
 	}
 
 	lp->stamp_cfg = config;
@@ -818,15 +826,6 @@
 		-EFAULT : 0;
 }
 
-static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
-{
-	ktime_t sys = ktime_get_real();
-
-	pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
-			__func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
-			sys.tv.nsec, cmp->offset, cmp->skew);
-}
-
 static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
 {
 	struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -857,15 +856,9 @@
 			regval = bfin_read_EMAC_PTP_TXSNAPLO();
 			regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-			ns = timecounter_cyc2time(&lp->clock,
-					regval);
-			timecompare_update(&lp->compare, ns);
+			ns = regval << lp->shift;
 			shhwtstamps.hwtstamp = ns_to_ktime(ns);
-			shhwtstamps.syststamp =
-				timecompare_transform(&lp->compare, ns);
 			skb_tstamp_tx(skb, &shhwtstamps);
-
-			bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
 		}
 	}
 }
@@ -888,63 +881,194 @@
 
 	regval = bfin_read_EMAC_PTP_RXSNAPLO();
 	regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
-	ns = timecounter_cyc2time(&lp->clock, regval);
-	timecompare_update(&lp->compare, ns);
+	ns = regval << lp->shift;
 	memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 	shhwtstamps->hwtstamp = ns_to_ktime(ns);
-	shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns);
-
-	bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare);
 }
 
-/*
- * bfin_read_clock - read raw cycle counter (to be used by time counter)
- */
-static cycle_t bfin_read_clock(const struct cyclecounter *tc)
-{
-	u64 stamp;
-
-	stamp =  bfin_read_EMAC_PTP_TIMELO();
-	stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL;
-
-	return stamp;
-}
-
-#define PTP_CLK 25000000
-
 static void bfin_mac_hwtstamp_init(struct net_device *netdev)
 {
 	struct bfin_mac_local *lp = netdev_priv(netdev);
-	u64 append;
+	u64 addend, ppb;
+	u32 input_clk, phc_clk;
 
 	/* Initialize hardware timer */
-	append = PTP_CLK * (1ULL << 32);
-	do_div(append, get_sclk());
-	bfin_write_EMAC_PTP_ADDEND((u32)append);
+	input_clk = get_sclk();
+	phc_clk = bfin_select_phc_clock(input_clk, &lp->shift);
+	addend = phc_clk * (1ULL << 32);
+	do_div(addend, input_clk);
+	bfin_write_EMAC_PTP_ADDEND((u32)addend);
 
-	memset(&lp->cycles, 0, sizeof(lp->cycles));
-	lp->cycles.read = bfin_read_clock;
-	lp->cycles.mask = CLOCKSOURCE_MASK(64);
-	lp->cycles.mult = 1000000000 / PTP_CLK;
-	lp->cycles.shift = 0;
-
-	/* Synchronize our NIC clock against system wall clock */
-	memset(&lp->compare, 0, sizeof(lp->compare));
-	lp->compare.source = &lp->clock;
-	lp->compare.target = ktime_get_real;
-	lp->compare.num_samples = 10;
+	lp->addend = addend;
+	ppb = 1000000000ULL * input_clk;
+	do_div(ppb, phc_clk);
+	lp->max_ppb = ppb - 1000000000ULL - 1ULL;
 
 	/* Initialize hwstamp config */
 	lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
 	lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
 }
 
+static u64 bfin_ptp_time_read(struct bfin_mac_local *lp)
+{
+	u64 ns;
+	u32 lo, hi;
+
+	lo = bfin_read_EMAC_PTP_TIMELO();
+	hi = bfin_read_EMAC_PTP_TIMEHI();
+
+	ns = ((u64) hi) << 32;
+	ns |= lo;
+	ns <<= lp->shift;
+
+	return ns;
+}
+
+static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns)
+{
+	u32 hi, lo;
+
+	ns >>= lp->shift;
+	hi = ns >> 32;
+	lo = ns & 0xffffffff;
+
+	bfin_write_EMAC_PTP_TIMELO(lo);
+	bfin_write_EMAC_PTP_TIMEHI(hi);
+}
+
+/* PTP Hardware Clock operations */
+
+static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	u64 adj;
+	u32 diff, addend;
+	int neg_adj = 0;
+	struct bfin_mac_local *lp =
+		container_of(ptp, struct bfin_mac_local, caps);
+
+	if (ppb < 0) {
+		neg_adj = 1;
+		ppb = -ppb;
+	}
+	addend = lp->addend;
+	adj = addend;
+	adj *= ppb;
+	diff = div_u64(adj, 1000000000ULL);
+
+	addend = neg_adj ? addend - diff : addend + diff;
+
+	bfin_write_EMAC_PTP_ADDEND(addend);
+
+	return 0;
+}
+
+static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	s64 now;
+	unsigned long flags;
+	struct bfin_mac_local *lp =
+		container_of(ptp, struct bfin_mac_local, caps);
+
+	spin_lock_irqsave(&lp->phc_lock, flags);
+
+	now = bfin_ptp_time_read(lp);
+	now += delta;
+	bfin_ptp_time_write(lp, now);
+
+	spin_unlock_irqrestore(&lp->phc_lock, flags);
+
+	return 0;
+}
+
+static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+	u64 ns;
+	u32 remainder;
+	unsigned long flags;
+	struct bfin_mac_local *lp =
+		container_of(ptp, struct bfin_mac_local, caps);
+
+	spin_lock_irqsave(&lp->phc_lock, flags);
+
+	ns = bfin_ptp_time_read(lp);
+
+	spin_unlock_irqrestore(&lp->phc_lock, flags);
+
+	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+	ts->tv_nsec = remainder;
+	return 0;
+}
+
+static int bfin_ptp_settime(struct ptp_clock_info *ptp,
+			   const struct timespec *ts)
+{
+	u64 ns;
+	unsigned long flags;
+	struct bfin_mac_local *lp =
+		container_of(ptp, struct bfin_mac_local, caps);
+
+	ns = ts->tv_sec * 1000000000ULL;
+	ns += ts->tv_nsec;
+
+	spin_lock_irqsave(&lp->phc_lock, flags);
+
+	bfin_ptp_time_write(lp, ns);
+
+	spin_unlock_irqrestore(&lp->phc_lock, flags);
+
+	return 0;
+}
+
+static int bfin_ptp_enable(struct ptp_clock_info *ptp,
+			  struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info bfin_ptp_caps = {
+	.owner		= THIS_MODULE,
+	.name		= "BF518 clock",
+	.max_adj	= 0,
+	.n_alarm	= 0,
+	.n_ext_ts	= 0,
+	.n_per_out	= 0,
+	.pps		= 0,
+	.adjfreq	= bfin_ptp_adjfreq,
+	.adjtime	= bfin_ptp_adjtime,
+	.gettime	= bfin_ptp_gettime,
+	.settime	= bfin_ptp_settime,
+	.enable		= bfin_ptp_enable,
+};
+
+static int bfin_phc_init(struct net_device *netdev, struct device *dev)
+{
+	struct bfin_mac_local *lp = netdev_priv(netdev);
+
+	lp->caps = bfin_ptp_caps;
+	lp->caps.max_adj = lp->max_ppb;
+	lp->clock = ptp_clock_register(&lp->caps, dev);
+	if (IS_ERR(lp->clock))
+		return PTR_ERR(lp->clock);
+
+	lp->phc_index = ptp_clock_index(lp->clock);
+	spin_lock_init(&lp->phc_lock);
+
+	return 0;
+}
+
+static void bfin_phc_release(struct bfin_mac_local *lp)
+{
+	ptp_clock_unregister(lp->clock);
+}
+
 #else
 # define bfin_mac_hwtstamp_is_none(cfg) 0
 # define bfin_mac_hwtstamp_init(dev)
 # define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
 # define bfin_rx_hwtstamp(dev, skb)
 # define bfin_tx_hwtstamp(dev, skb)
+# define bfin_phc_init(netdev, dev) 0
+# define bfin_phc_release(lp)
 #endif
 
 static inline void _tx_reclaim_skb(void)
@@ -1579,12 +1703,17 @@
 	}
 
 	bfin_mac_hwtstamp_init(ndev);
+	if (bfin_phc_init(ndev, &pdev->dev)) {
+		dev_err(&pdev->dev, "Cannot register PHC device!\n");
+		goto out_err_phc;
+	}
 
 	/* now, print out the card info, in a short format.. */
 	netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
 
 	return 0;
 
+out_err_phc:
 out_err_reg_ndev:
 	free_irq(IRQ_MAC_RX, ndev);
 out_err_request_irq:
@@ -1603,6 +1732,8 @@
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct bfin_mac_local *lp = netdev_priv(ndev);
 
+	bfin_phc_release(lp);
+
 	platform_set_drvdata(pdev, NULL);
 
 	lp->mii_bus->priv = NULL;
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 960905c..7a07ee0 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -11,8 +11,7 @@
 #define _BFIN_MAC_H_
 
 #include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
-#include <linux/timecompare.h>
+#include <linux/ptp_clock_kernel.h>
 #include <linux/timer.h>
 #include <linux/etherdevice.h>
 #include <linux/bfin_mac.h>
@@ -94,10 +93,14 @@
 	struct mii_bus *mii_bus;
 
 #if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
-	struct cyclecounter cycles;
-	struct timecounter clock;
-	struct timecompare compare;
+	u32 addend;
+	unsigned int shift;
+	s32 max_ppb;
 	struct hwtstamp_config stamp_cfg;
+	struct ptp_clock_info caps;
+	struct ptp_clock *clock;
+	int phc_index;
+	spinlock_t phc_lock; /* protects time lo/hi registers */
 #endif
 };
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 72897c4..641d884 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -34,25 +34,16 @@
 
 #include "bnx2x_hsi.h"
 
-#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
-#define BCM_CNIC 1
 #include "../cnic_if.h"
-#endif
 
-#ifdef BCM_CNIC
-#define BNX2X_MIN_MSIX_VEC_CNT 3
-#define BNX2X_MSIX_VEC_FP_START 2
-#else
-#define BNX2X_MIN_MSIX_VEC_CNT 2
-#define BNX2X_MSIX_VEC_FP_START 1
-#endif
+
+#define BNX2X_MIN_MSIX_VEC_CNT(bp)		((bp)->min_msix_vec_cnt)
 
 #include <linux/mdio.h>
 
 #include "bnx2x_reg.h"
 #include "bnx2x_fw_defs.h"
 #include "bnx2x_mfw_req.h"
-#include "bnx2x_hsi.h"
 #include "bnx2x_link.h"
 #include "bnx2x_sp.h"
 #include "bnx2x_dcb.h"
@@ -256,15 +247,10 @@
 	/* FCoE L2 */
 #define	BNX2X_FCOE_ETH_CID(bp)		(BNX2X_CNIC_START_ETH_CID(bp) + 1)
 
-/** Additional rings budgeting */
-#ifdef BCM_CNIC
-#define CNIC_PRESENT			1
-#define FCOE_PRESENT			1
-#else
-#define CNIC_PRESENT			0
-#define FCOE_PRESENT			0
-#endif /* BCM_CNIC */
-#define NON_ETH_CONTEXT_USE	(FCOE_PRESENT)
+#define CNIC_SUPPORT(bp)		((bp)->cnic_support)
+#define CNIC_ENABLED(bp)		((bp)->cnic_enabled)
+#define CNIC_LOADED(bp)			((bp)->cnic_loaded)
+#define FCOE_INIT(bp)			((bp)->fcoe_init)
 
 #define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
 	AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@ -297,9 +283,7 @@
 	OOO_TXQ_IDX_OFFSET,
 };
 #define MAX_ETH_TXQ_IDX(bp)	(BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
-#ifdef BCM_CNIC
 #define FCOE_TXQ_IDX(bp)	(MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
-#endif
 
 /* fast path */
 /*
@@ -585,15 +569,9 @@
 						->var)
 
 
-#define IS_ETH_FP(fp)			(fp->index < \
-					 BNX2X_NUM_ETH_QUEUES(fp->bp))
-#ifdef BCM_CNIC
-#define IS_FCOE_FP(fp)			(fp->index == FCOE_IDX(fp->bp))
-#define IS_FCOE_IDX(idx)		((idx) == FCOE_IDX(bp))
-#else
-#define IS_FCOE_FP(fp)		false
-#define IS_FCOE_IDX(idx)	false
-#endif
+#define IS_ETH_FP(fp)		((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
+#define IS_FCOE_FP(fp)		((fp)->index == FCOE_IDX((fp)->bp))
+#define IS_FCOE_IDX(idx)	((idx) == FCOE_IDX(bp))
 
 
 /* MC hsi */
@@ -886,6 +864,18 @@
 					 (CHIP_REV(bp) == CHIP_REV_Bx))
 #define CHIP_IS_E3A0(bp)		(CHIP_IS_E3(bp) && \
 					 (CHIP_REV(bp) == CHIP_REV_Ax))
+/* This define is used in two main places:
+ * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher
+ * to nic-only mode or to offload mode. Offload mode is configured if either the
+ * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
+ * registered for this port (which means that the user wants storage services).
+ * 2. During cnic-related load, to know if offload mode is already configured in
+ * the HW or needs to be configrued.
+ * Since the transition from nic-mode to offload-mode in HW causes traffic
+ * coruption, nic-mode is configured only in ports on which storage services
+ * where never requested.
+ */
+#define CONFIGURE_NIC_MODE(bp)		(!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
 
 	int			flash_size;
 #define BNX2X_NVRAM_1MB_SIZE			0x20000	/* 1M bit in bytes */
@@ -946,7 +936,6 @@
 
 	/* used to synchronize phy accesses */
 	struct mutex		phy_mutex;
-	int			need_hw_lock;
 
 	u32			port_stx;
 
@@ -1003,18 +992,15 @@
 #define CDU_ILT_PAGE_SZ		(8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
 #define ILT_PAGE_CIDS		(CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
 
-#ifdef BCM_CNIC
 #define CNIC_ISCSI_CID_MAX	256
 #define CNIC_FCOE_CID_MAX	2048
 #define CNIC_CID_MAX		(CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
 #define CNIC_ILT_LINES		DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
-#endif
 
 #define QM_ILT_PAGE_SZ_HW	0
 #define QM_ILT_PAGE_SZ		(4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
 #define QM_CID_ROUND		1024
 
-#ifdef BCM_CNIC
 /* TM (timers) host DB constants */
 #define TM_ILT_PAGE_SZ_HW	0
 #define TM_ILT_PAGE_SZ		(4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
@@ -1032,8 +1018,6 @@
 #define SRC_T2_SZ		SRC_ILT_SZ
 #define SRC_ILT_LINES		DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
 
-#endif
-
 #define MAX_DMAE_C		8
 
 /* DMA memory not used in fastpath */
@@ -1227,7 +1211,6 @@
 	struct bnx2x_sp_objs	*sp_objs;
 	struct bnx2x_fp_stats	*fp_stats;
 	struct bnx2x_fp_txdata	*bnx2x_txq;
-	int			bnx2x_txq_size;
 	void __iomem		*regview;
 	void __iomem		*doorbells;
 	u16			db_size;
@@ -1350,6 +1333,15 @@
 #define NO_ISCSI_OOO(bp)	((bp)->flags & NO_ISCSI_OOO_FLAG)
 #define NO_FCOE(bp)		((bp)->flags & NO_FCOE_FLAG)
 
+	u8			cnic_support;
+	bool			cnic_enabled;
+	bool			cnic_loaded;
+
+	/* Flag that indicates that we can start looking for FCoE L2 queue
+	 * completions in the default status block.
+	 */
+	bool			fcoe_init;
+
 	int			pm_cap;
 	int			mrrs;
 
@@ -1420,6 +1412,8 @@
 #define BNX2X_MAX_COS			3
 #define BNX2X_MAX_TX_COS		2
 	int			num_queues;
+	uint			num_ethernet_queues;
+	uint			num_cnic_queues;
 	int			num_napi_queues;
 	int			disable_tpa;
 
@@ -1433,6 +1427,7 @@
 	u8			igu_dsb_id;
 	u8			igu_base_sb;
 	u8			igu_sb_cnt;
+	u8			min_msix_vec_cnt;
 
 	dma_addr_t		def_status_blk_mapping;
 
@@ -1478,16 +1473,16 @@
  * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
  * to CNIC.
  */
-#define BNX2X_MAX_RSS_COUNT(bp)	((bp)->igu_sb_cnt - CNIC_PRESENT)
+#define BNX2X_MAX_RSS_COUNT(bp)	((bp)->igu_sb_cnt - CNIC_SUPPORT(bp))
 
 /*
  * Maximum CID count that might be required by the bnx2x:
  * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
  */
 #define BNX2X_L2_CID_COUNT(bp)	(BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
-				+ NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+				+ 2 * CNIC_SUPPORT(bp))
 #define BNX2X_L2_MAX_CID(bp)	(BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
-				+ NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+				+ 2 * CNIC_SUPPORT(bp))
 #define L2_ILT_LINES(bp)	(DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
 					ILT_PAGE_CIDS))
 
@@ -1495,9 +1490,6 @@
 
 	int			dropless_fc;
 
-#ifdef BCM_CNIC
-	u32			cnic_flags;
-#define BNX2X_CNIC_FLAG_MAC_SET		1
 	void			*t2;
 	dma_addr_t		t2_mapping;
 	struct cnic_ops	__rcu	*cnic_ops;
@@ -1518,7 +1510,6 @@
 
 	/* Start index of the "special" (CNIC related) L2 cleints */
 	u8				cnic_base_cl_id;
-#endif
 
 	int			dmae_ready;
 	/* used to synchronize dmae accesses */
@@ -1647,9 +1638,9 @@
 /* Tx queues may be less or equal to Rx queues */
 extern int num_queues;
 #define BNX2X_NUM_QUEUES(bp)	(bp->num_queues)
-#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
+#define BNX2X_NUM_ETH_QUEUES(bp) ((bp)->num_ethernet_queues)
 #define BNX2X_NUM_NON_CNIC_QUEUES(bp)	(BNX2X_NUM_QUEUES(bp) - \
-					 NON_ETH_CONTEXT_USE)
+					 (bp)->num_cnic_queues)
 #define BNX2X_NUM_RX_QUEUES(bp)	BNX2X_NUM_QUEUES(bp)
 
 #define is_multi(bp)		(BNX2X_NUM_QUEUES(bp) > 1)
@@ -1689,6 +1680,13 @@
 	u16		spq_prod;	/* valid iff FUNC_FLG_SPQ */
 };
 
+#define for_each_cnic_queue(bp, var) \
+	for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+	     (var)++) \
+		if (skip_queue(bp, var))	\
+			continue;		\
+		else
+
 #define for_each_eth_queue(bp, var) \
 	for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
 
@@ -1702,6 +1700,22 @@
 		else
 
 /* Skip forwarding FP */
+#define for_each_valid_rx_queue(bp, var)			\
+	for ((var) = 0;						\
+	     (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) :	\
+		      BNX2X_NUM_ETH_QUEUES(bp));		\
+	     (var)++)						\
+		if (skip_rx_queue(bp, var))			\
+			continue;				\
+		else
+
+#define for_each_rx_queue_cnic(bp, var) \
+	for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+	     (var)++) \
+		if (skip_rx_queue(bp, var))	\
+			continue;		\
+		else
+
 #define for_each_rx_queue(bp, var) \
 	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
 		if (skip_rx_queue(bp, var))	\
@@ -1709,6 +1723,22 @@
 		else
 
 /* Skip OOO FP */
+#define for_each_valid_tx_queue(bp, var)			\
+	for ((var) = 0;						\
+	     (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) :	\
+		      BNX2X_NUM_ETH_QUEUES(bp));		\
+	     (var)++)						\
+		if (skip_tx_queue(bp, var))			\
+			continue;				\
+		else
+
+#define for_each_tx_queue_cnic(bp, var) \
+	for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+	     (var)++) \
+		if (skip_tx_queue(bp, var))	\
+			continue;		\
+		else
+
 #define for_each_tx_queue(bp, var) \
 	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
 		if (skip_tx_queue(bp, var))	\
@@ -2179,7 +2209,6 @@
 #define BNX2X_MF_SD_PROTOCOL(bp) \
 	((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
 
-#ifdef BCM_CNIC
 #define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \
 	(BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
 
@@ -2196,9 +2225,12 @@
 #define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
 				(BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
 				 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
-#else
-#define IS_MF_FCOE_AFEX(bp)	false
-#endif
 
+enum {
+	SWITCH_UPDATE,
+	AFEX_UPDATE,
+};
+
+#define NUM_MACS	8
 
 #endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4833b6a..8779ac1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -948,14 +948,12 @@
 {
 	mutex_lock(&bp->port.phy_mutex);
 
-	if (bp->port.need_hw_lock)
-		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
 }
 
 void bnx2x_release_phy_lock(struct bnx2x *bp)
 {
-	if (bp->port.need_hw_lock)
-		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
 
 	mutex_unlock(&bp->port.phy_mutex);
 }
@@ -1152,6 +1150,25 @@
 	}
 }
 
+void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
+{
+	int j;
+
+	for_each_rx_queue_cnic(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		fp->rx_bd_cons = 0;
+
+		/* Activate BD ring */
+		/* Warning!
+		 * this will generate an interrupt (to the TSTORM)
+		 * must only be done after chip is initialized
+		 */
+		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+				     fp->rx_sge_prod);
+	}
+}
+
 void bnx2x_init_rx_rings(struct bnx2x *bp)
 {
 	int func = BP_FUNC(bp);
@@ -1159,7 +1176,7 @@
 	int i, j;
 
 	/* Allocate TPA resources */
-	for_each_rx_queue(bp, j) {
+	for_each_eth_queue(bp, j) {
 		struct bnx2x_fastpath *fp = &bp->fp[j];
 
 		DP(NETIF_MSG_IFUP,
@@ -1217,7 +1234,7 @@
 		}
 	}
 
-	for_each_rx_queue(bp, j) {
+	for_each_eth_queue(bp, j) {
 		struct bnx2x_fastpath *fp = &bp->fp[j];
 
 		fp->rx_bd_cons = 0;
@@ -1244,29 +1261,45 @@
 	}
 }
 
+static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
+{
+	u8 cos;
+	struct bnx2x *bp = fp->bp;
+
+	for_each_cos_in_tx_queue(fp, cos) {
+		struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+		unsigned pkts_compl = 0, bytes_compl = 0;
+
+		u16 sw_prod = txdata->tx_pkt_prod;
+		u16 sw_cons = txdata->tx_pkt_cons;
+
+		while (sw_cons != sw_prod) {
+			bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
+					  &pkts_compl, &bytes_compl);
+			sw_cons++;
+		}
+
+		netdev_tx_reset_queue(
+			netdev_get_tx_queue(bp->dev,
+					    txdata->txq_index));
+	}
+}
+
+static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_tx_queue_cnic(bp, i) {
+		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
+	}
+}
+
 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
 {
 	int i;
-	u8 cos;
 
-	for_each_tx_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-		for_each_cos_in_tx_queue(fp, cos) {
-			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
-			unsigned pkts_compl = 0, bytes_compl = 0;
-
-			u16 sw_prod = txdata->tx_pkt_prod;
-			u16 sw_cons = txdata->tx_pkt_cons;
-
-			while (sw_cons != sw_prod) {
-				bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
-				    &pkts_compl, &bytes_compl);
-				sw_cons++;
-			}
-			netdev_tx_reset_queue(
-				netdev_get_tx_queue(bp->dev,
-						    txdata->txq_index));
-		}
+	for_each_eth_queue(bp, i) {
+		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
 	}
 }
 
@@ -1294,11 +1327,20 @@
 	}
 }
 
+static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
+{
+	int j;
+
+	for_each_rx_queue_cnic(bp, j) {
+		bnx2x_free_rx_bds(&bp->fp[j]);
+	}
+}
+
 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
 {
 	int j;
 
-	for_each_rx_queue(bp, j) {
+	for_each_eth_queue(bp, j) {
 		struct bnx2x_fastpath *fp = &bp->fp[j];
 
 		bnx2x_free_rx_bds(fp);
@@ -1308,6 +1350,12 @@
 	}
 }
 
+void bnx2x_free_skbs_cnic(struct bnx2x *bp)
+{
+	bnx2x_free_tx_skbs_cnic(bp);
+	bnx2x_free_rx_skbs_cnic(bp);
+}
+
 void bnx2x_free_skbs(struct bnx2x *bp)
 {
 	bnx2x_free_tx_skbs(bp);
@@ -1347,11 +1395,12 @@
 	DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
 	   bp->msix_table[offset].vector);
 	offset++;
-#ifdef BCM_CNIC
-	if (nvecs == offset)
-		return;
-	offset++;
-#endif
+
+	if (CNIC_SUPPORT(bp)) {
+		if (nvecs == offset)
+			return;
+		offset++;
+	}
 
 	for_each_eth_queue(bp, i) {
 		if (nvecs == offset)
@@ -1368,7 +1417,7 @@
 	if (bp->flags & USING_MSIX_FLAG &&
 	    !(bp->flags & USING_SINGLE_MSIX_FLAG))
 		bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
-				     CNIC_PRESENT + 1);
+				     CNIC_SUPPORT(bp) + 1);
 	else
 		free_irq(bp->dev->irq, bp->dev);
 }
@@ -1382,12 +1431,14 @@
 	   bp->msix_table[0].entry);
 	msix_vec++;
 
-#ifdef BCM_CNIC
-	bp->msix_table[msix_vec].entry = msix_vec;
-	BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
-	   bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
-	msix_vec++;
-#endif
+	/* Cnic requires an msix vector for itself */
+	if (CNIC_SUPPORT(bp)) {
+		bp->msix_table[msix_vec].entry = msix_vec;
+		BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
+			       msix_vec, bp->msix_table[msix_vec].entry);
+		msix_vec++;
+	}
+
 	/* We need separate vectors for ETH queues only (not FCoE) */
 	for_each_eth_queue(bp, i) {
 		bp->msix_table[msix_vec].entry = msix_vec;
@@ -1396,7 +1447,7 @@
 		msix_vec++;
 	}
 
-	req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
+	req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
 
 	rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
 
@@ -1404,7 +1455,7 @@
 	 * reconfigure number of tx/rx queues according to available
 	 * MSI-X vectors
 	 */
-	if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
+	if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
 		/* how less vectors we will have? */
 		int diff = req_cnt - rc;
 
@@ -1419,7 +1470,8 @@
 		/*
 		 * decrease number of queues by number of unallocated entries
 		 */
-		bp->num_queues -= diff;
+		bp->num_ethernet_queues -= diff;
+		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
 
 		BNX2X_DEV_INFO("New queue configuration set: %d\n",
 			       bp->num_queues);
@@ -1435,6 +1487,9 @@
 		BNX2X_DEV_INFO("Using single MSI-X vector\n");
 		bp->flags |= USING_SINGLE_MSIX_FLAG;
 
+		BNX2X_DEV_INFO("set number of queues to 1\n");
+		bp->num_ethernet_queues = 1;
+		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
 	} else if (rc < 0) {
 		BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
 		goto no_msix;
@@ -1464,9 +1519,9 @@
 		return -EBUSY;
 	}
 
-#ifdef BCM_CNIC
-	offset++;
-#endif
+	if (CNIC_SUPPORT(bp))
+		offset++;
+
 	for_each_eth_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -1485,7 +1540,7 @@
 	}
 
 	i = BNX2X_NUM_ETH_QUEUES(bp);
-	offset = 1 + CNIC_PRESENT;
+	offset = 1 + CNIC_SUPPORT(bp);
 	netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
 	       bp->msix_table[0].vector,
 	       0, bp->msix_table[offset].vector,
@@ -1556,19 +1611,35 @@
 	return 0;
 }
 
+static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_rx_queue_cnic(bp, i)
+		napi_enable(&bnx2x_fp(bp, i, napi));
+}
+
 static void bnx2x_napi_enable(struct bnx2x *bp)
 {
 	int i;
 
-	for_each_rx_queue(bp, i)
+	for_each_eth_queue(bp, i)
 		napi_enable(&bnx2x_fp(bp, i, napi));
 }
 
+static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_rx_queue_cnic(bp, i)
+		napi_disable(&bnx2x_fp(bp, i, napi));
+}
+
 static void bnx2x_napi_disable(struct bnx2x *bp)
 {
 	int i;
 
-	for_each_rx_queue(bp, i)
+	for_each_eth_queue(bp, i)
 		napi_disable(&bnx2x_fp(bp, i, napi));
 }
 
@@ -1576,6 +1647,8 @@
 {
 	if (netif_running(bp->dev)) {
 		bnx2x_napi_enable(bp);
+		if (CNIC_LOADED(bp))
+			bnx2x_napi_enable_cnic(bp);
 		bnx2x_int_enable(bp);
 		if (bp->state == BNX2X_STATE_OPEN)
 			netif_tx_wake_all_queues(bp->dev);
@@ -1586,14 +1659,15 @@
 {
 	bnx2x_int_disable_sync(bp, disable_hw);
 	bnx2x_napi_disable(bp);
+	if (CNIC_LOADED(bp))
+		bnx2x_napi_disable_cnic(bp);
 }
 
 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 
-#ifdef BCM_CNIC
-	if (!NO_FCOE(bp)) {
+	if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
 		struct ethhdr *hdr = (struct ethhdr *)skb->data;
 		u16 ether_type = ntohs(hdr->h_proto);
 
@@ -1609,7 +1683,7 @@
 		if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
 			return bnx2x_fcoe_tx(bp, txq_index);
 	}
-#endif
+
 	/* select a non-FCoE queue */
 	return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
 }
@@ -1618,15 +1692,15 @@
 void bnx2x_set_num_queues(struct bnx2x *bp)
 {
 	/* RSS queues */
-	bp->num_queues = bnx2x_calc_num_queues(bp);
+	bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
 
-#ifdef BCM_CNIC
 	/* override in STORAGE SD modes */
 	if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
-		bp->num_queues = 1;
-#endif
+		bp->num_ethernet_queues = 1;
+
 	/* Add special queues */
-	bp->num_queues += NON_ETH_CONTEXT_USE;
+	bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
+	bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
 
 	BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
 }
@@ -1653,20 +1727,18 @@
  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
  */
-static int bnx2x_set_real_num_queues(struct bnx2x *bp)
+static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
 {
 	int rc, tx, rx;
 
 	tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
-	rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
+	rx = BNX2X_NUM_ETH_QUEUES(bp);
 
 /* account for fcoe queue */
-#ifdef BCM_CNIC
-	if (!NO_FCOE(bp)) {
-		rx += FCOE_PRESENT;
-		tx += FCOE_PRESENT;
+	if (include_cnic && !NO_FCOE(bp)) {
+		rx++;
+		tx++;
 	}
-#endif
 
 	rc = netif_set_real_num_tx_queues(bp->dev, tx);
 	if (rc) {
@@ -1859,14 +1931,26 @@
 		(bp)->state = BNX2X_STATE_ERROR; \
 		goto label; \
 	} while (0)
-#else
+
+#define LOAD_ERROR_EXIT_CNIC(bp, label) \
+	do { \
+		bp->cnic_loaded = false; \
+		goto label; \
+	} while (0)
+#else /*BNX2X_STOP_ON_ERROR*/
 #define LOAD_ERROR_EXIT(bp, label) \
 	do { \
 		(bp)->state = BNX2X_STATE_ERROR; \
 		(bp)->panic = 1; \
 		return -EBUSY; \
 	} while (0)
-#endif
+#define LOAD_ERROR_EXIT_CNIC(bp, label) \
+	do { \
+		bp->cnic_loaded = false; \
+		(bp)->panic = 1; \
+		return -EBUSY; \
+	} while (0)
+#endif /*BNX2X_STOP_ON_ERROR*/
 
 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
 {
@@ -1959,10 +2043,8 @@
 		fp->max_cos = 1;
 
 	/* Init txdata pointers */
-#ifdef BCM_CNIC
 	if (IS_FCOE_FP(fp))
 		fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
-#endif
 	if (IS_ETH_FP(fp))
 		for_each_cos_in_tx_queue(fp, cos)
 			fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
@@ -1980,11 +2062,95 @@
 	else if (bp->flags & GRO_ENABLE_FLAG)
 		fp->mode = TPA_MODE_GRO;
 
-#ifdef BCM_CNIC
 	/* We don't want TPA on an FCoE L2 ring */
 	if (IS_FCOE_FP(fp))
 		fp->disable_tpa = 1;
-#endif
+}
+
+int bnx2x_load_cnic(struct bnx2x *bp)
+{
+	int i, rc, port = BP_PORT(bp);
+
+	DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
+
+	mutex_init(&bp->cnic_mutex);
+
+	rc = bnx2x_alloc_mem_cnic(bp);
+	if (rc) {
+		BNX2X_ERR("Unable to allocate bp memory for cnic\n");
+		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+	}
+
+	rc = bnx2x_alloc_fp_mem_cnic(bp);
+	if (rc) {
+		BNX2X_ERR("Unable to allocate memory for cnic fps\n");
+		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+	}
+
+	/* Update the number of queues with the cnic queues */
+	rc = bnx2x_set_real_num_queues(bp, 1);
+	if (rc) {
+		BNX2X_ERR("Unable to set real_num_queues including cnic\n");
+		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+	}
+
+	/* Add all CNIC NAPI objects */
+	bnx2x_add_all_napi_cnic(bp);
+	DP(NETIF_MSG_IFUP, "cnic napi added\n");
+	bnx2x_napi_enable_cnic(bp);
+
+	rc = bnx2x_init_hw_func_cnic(bp);
+	if (rc)
+		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
+
+	bnx2x_nic_init_cnic(bp);
+
+	/* Enable Timer scan */
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
+
+	for_each_cnic_queue(bp, i) {
+		rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+		if (rc) {
+			BNX2X_ERR("Queue setup failed\n");
+			LOAD_ERROR_EXIT(bp, load_error_cnic2);
+		}
+	}
+
+	/* Initialize Rx filter. */
+	netif_addr_lock_bh(bp->dev);
+	bnx2x_set_rx_mode(bp->dev);
+	netif_addr_unlock_bh(bp->dev);
+
+	/* re-read iscsi info */
+	bnx2x_get_iscsi_info(bp);
+	bnx2x_setup_cnic_irq_info(bp);
+	bnx2x_setup_cnic_info(bp);
+	bp->cnic_loaded = true;
+	if (bp->state == BNX2X_STATE_OPEN)
+		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
+
+
+	DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
+
+	return 0;
+
+#ifndef BNX2X_STOP_ON_ERROR
+load_error_cnic2:
+	/* Disable Timer scan */
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+
+load_error_cnic1:
+	bnx2x_napi_disable_cnic(bp);
+	/* Update the number of queues without the cnic queues */
+	rc = bnx2x_set_real_num_queues(bp, 0);
+	if (rc)
+		BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
+load_error_cnic0:
+	BNX2X_ERR("CNIC-related load failed\n");
+	bnx2x_free_fp_mem_cnic(bp);
+	bnx2x_free_mem_cnic(bp);
+	return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
 }
 
 
@@ -1995,6 +2161,10 @@
 	u32 load_code;
 	int i, rc;
 
+	DP(NETIF_MSG_IFUP, "Starting NIC load\n");
+	DP(NETIF_MSG_IFUP,
+	   "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
+
 #ifdef BNX2X_STOP_ON_ERROR
 	if (unlikely(bp->panic)) {
 		BNX2X_ERR("Can't load NIC when there is panic\n");
@@ -2022,9 +2192,11 @@
 	DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
 	for_each_queue(bp, i)
 		bnx2x_bz_fp(bp, i);
-	memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
-	       sizeof(struct bnx2x_fp_txdata));
+	memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
+				  bp->num_cnic_queues) *
+				  sizeof(struct bnx2x_fp_txdata));
 
+	bp->fcoe_init = false;
 
 	/* Set the receive queues buffer size */
 	bnx2x_set_rx_buf_size(bp);
@@ -2034,9 +2206,9 @@
 
 	/* As long as bnx2x_alloc_mem() may possibly update
 	 * bp->num_queues, bnx2x_set_real_num_queues() should always
-	 * come after it.
+	 * come after it. At this stage cnic queues are not counted.
 	 */
-	rc = bnx2x_set_real_num_queues(bp);
+	rc = bnx2x_set_real_num_queues(bp, 0);
 	if (rc) {
 		BNX2X_ERR("Unable to set real_num_queues\n");
 		LOAD_ERROR_EXIT(bp, load_error0);
@@ -2050,6 +2222,7 @@
 
 	/* Add all NAPI objects */
 	bnx2x_add_all_napi(bp);
+	DP(NETIF_MSG_IFUP, "napi added\n");
 	bnx2x_napi_enable(bp);
 
 	/* set pf load just before approaching the MCP */
@@ -2073,7 +2246,8 @@
 			 DRV_PULSE_SEQ_MASK);
 		BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
 
-		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
+					     DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
 		if (!load_code) {
 			BNX2X_ERR("MCP response failure, aborting\n");
 			rc = -EBUSY;
@@ -2191,23 +2365,18 @@
 		LOAD_ERROR_EXIT(bp, load_error3);
 	}
 
-#ifdef BCM_CNIC
-	/* Enable Timer scan */
-	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
-#endif
-
-	for_each_nondefault_queue(bp, i) {
+	for_each_nondefault_eth_queue(bp, i) {
 		rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
 		if (rc) {
 			BNX2X_ERR("Queue setup failed\n");
-			LOAD_ERROR_EXIT(bp, load_error4);
+			LOAD_ERROR_EXIT(bp, load_error3);
 		}
 	}
 
 	rc = bnx2x_init_rss_pf(bp);
 	if (rc) {
 		BNX2X_ERR("PF RSS init failed\n");
-		LOAD_ERROR_EXIT(bp, load_error4);
+		LOAD_ERROR_EXIT(bp, load_error3);
 	}
 
 	/* Now when Clients are configured we are ready to work */
@@ -2217,7 +2386,7 @@
 	rc = bnx2x_set_eth_mac(bp, true);
 	if (rc) {
 		BNX2X_ERR("Setting Ethernet MAC failed\n");
-		LOAD_ERROR_EXIT(bp, load_error4);
+		LOAD_ERROR_EXIT(bp, load_error3);
 	}
 
 	if (bp->pending_max) {
@@ -2264,14 +2433,8 @@
 	/* start the timer */
 	mod_timer(&bp->timer, jiffies + bp->current_interval);
 
-#ifdef BCM_CNIC
-	/* re-read iscsi info */
-	bnx2x_get_iscsi_info(bp);
-	bnx2x_setup_cnic_irq_info(bp);
-	bnx2x_setup_cnic_info(bp);
-	if (bp->state == BNX2X_STATE_OPEN)
-		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
-#endif
+	if (CNIC_ENABLED(bp))
+		bnx2x_load_cnic(bp);
 
 	/* mark driver is loaded in shmem2 */
 	if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
@@ -2293,14 +2456,11 @@
 	if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
 		bnx2x_dcbx_init(bp, false);
 
+	DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
+
 	return 0;
 
 #ifndef BNX2X_STOP_ON_ERROR
-load_error4:
-#ifdef BCM_CNIC
-	/* Disable Timer scan */
-	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
-#endif
 load_error3:
 	bnx2x_int_disable_sync(bp, 1);
 
@@ -2338,6 +2498,8 @@
 	int i;
 	bool global = false;
 
+	DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
+
 	/* mark driver is unloaded in shmem2 */
 	if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
 		u32 val;
@@ -2373,14 +2535,13 @@
 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
 	smp_mb();
 
+	if (CNIC_LOADED(bp))
+		bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+
 	/* Stop Tx */
 	bnx2x_tx_disable(bp);
 	netdev_reset_tc(bp->dev);
 
-#ifdef BCM_CNIC
-	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
-#endif
-
 	bp->rx_mode = BNX2X_RX_MODE_NONE;
 
 	del_timer_sync(&bp->timer);
@@ -2414,7 +2575,8 @@
 		bnx2x_netif_stop(bp, 1);
 		/* Delete all NAPI objects */
 		bnx2x_del_all_napi(bp);
-
+		if (CNIC_LOADED(bp))
+			bnx2x_del_all_napi_cnic(bp);
 		/* Release IRQs */
 		bnx2x_free_irq(bp);
 
@@ -2435,12 +2597,19 @@
 
 	/* Free SKBs, SGEs, TPA pool and driver internals */
 	bnx2x_free_skbs(bp);
+	if (CNIC_LOADED(bp))
+		bnx2x_free_skbs_cnic(bp);
 	for_each_rx_queue(bp, i)
 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 
+	if (CNIC_LOADED(bp)) {
+		bnx2x_free_fp_mem_cnic(bp);
+		bnx2x_free_mem_cnic(bp);
+	}
 	bnx2x_free_mem(bp);
 
 	bp->state = BNX2X_STATE_CLOSED;
+	bp->cnic_loaded = false;
 
 	/* Check if there are pending parity attentions. If there are - set
 	 * RECOVERY_IN_PROGRESS.
@@ -2460,6 +2629,8 @@
 	if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
 		bnx2x_disable_close_the_gate(bp);
 
+	DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
+
 	return 0;
 }
 
@@ -2550,7 +2721,7 @@
 
 		/* Fall out from the NAPI loop if needed */
 		if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
-#ifdef BCM_CNIC
+
 			/* No need to update SB for FCoE L2 ring as long as
 			 * it's connected to the default SB and the SB
 			 * has been updated when NAPI was scheduled.
@@ -2559,8 +2730,6 @@
 				napi_complete(napi);
 				break;
 			}
-#endif
-
 			bnx2x_update_fpsb_idx(fp);
 			/* bnx2x_has_rx_work() reads the status block,
 			 * thus we need to ensure that status block indices
@@ -2940,7 +3109,7 @@
 	txq_index = skb_get_queue_mapping(skb);
 	txq = netdev_get_tx_queue(dev, txq_index);
 
-	BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
+	BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
 
 	txdata = &bp->bnx2x_txq[txq_index];
 
@@ -3339,13 +3508,11 @@
 		return -EINVAL;
 	}
 
-#ifdef BCM_CNIC
 	if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
 	    !is_zero_ether_addr(addr->sa_data)) {
 		BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
 		return -EINVAL;
 	}
-#endif
 
 	if (netif_running(dev))  {
 		rc = bnx2x_set_eth_mac(bp, false);
@@ -3369,13 +3536,11 @@
 	u8 cos;
 
 	/* Common */
-#ifdef BCM_CNIC
+
 	if (IS_FCOE_IDX(fp_index)) {
 		memset(sb, 0, sizeof(union host_hc_status_block));
 		fp->status_blk_mapping = 0;
-
 	} else {
-#endif
 		/* status blocks */
 		if (!CHIP_IS_E1x(bp))
 			BNX2X_PCI_FREE(sb->e2_sb,
@@ -3387,9 +3552,8 @@
 				       bnx2x_fp(bp, fp_index,
 						status_blk_mapping),
 				       sizeof(struct host_hc_status_block_e1x));
-#ifdef BCM_CNIC
 	}
-#endif
+
 	/* Rx */
 	if (!skip_rx_queue(bp, fp_index)) {
 		bnx2x_free_rx_bds(fp);
@@ -3431,10 +3595,17 @@
 	/* end of fastpath */
 }
 
+void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
+{
+	int i;
+	for_each_cnic_queue(bp, i)
+		bnx2x_free_fp_mem_at(bp, i);
+}
+
 void bnx2x_free_fp_mem(struct bnx2x *bp)
 {
 	int i;
-	for_each_queue(bp, i)
+	for_each_eth_queue(bp, i)
 		bnx2x_free_fp_mem_at(bp, i);
 }
 
@@ -3519,14 +3690,11 @@
 	u8 cos;
 	int rx_ring_size = 0;
 
-#ifdef BCM_CNIC
 	if (!bp->rx_ring_size &&
 	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
 		rx_ring_size = MIN_RX_SIZE_NONTPA;
 		bp->rx_ring_size = rx_ring_size;
-	} else
-#endif
-	if (!bp->rx_ring_size) {
+	} else if (!bp->rx_ring_size) {
 		rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
 
 		if (CHIP_IS_E3(bp)) {
@@ -3550,9 +3718,8 @@
 
 	/* Common */
 	sb = &bnx2x_fp(bp, index, status_blk);
-#ifdef BCM_CNIC
+
 	if (!IS_FCOE_IDX(index)) {
-#endif
 		/* status blocks */
 		if (!CHIP_IS_E1x(bp))
 			BNX2X_PCI_ALLOC(sb->e2_sb,
@@ -3562,9 +3729,7 @@
 			BNX2X_PCI_ALLOC(sb->e1x_sb,
 				&bnx2x_fp(bp, index, status_blk_mapping),
 			    sizeof(struct host_hc_status_block_e1x));
-#ifdef BCM_CNIC
 	}
-#endif
 
 	/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
 	 * set shortcuts for it.
@@ -3641,22 +3806,8 @@
 	return 0;
 }
 
-int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
 {
-	int i;
-
-	/**
-	 * 1. Allocate FP for leading - fatal if error
-	 * 2. {CNIC} Allocate FCoE FP - fatal if error
-	 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
-	 * 4. Allocate RSS - fix number of queues if error
-	 */
-
-	/* leading */
-	if (bnx2x_alloc_fp_mem_at(bp, 0))
-		return -ENOMEM;
-
-#ifdef BCM_CNIC
 	if (!NO_FCOE(bp))
 		/* FCoE */
 		if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
@@ -3664,7 +3815,21 @@
 			 * NO_FCOE_FLAG
 			 */
 			return -ENOMEM;
-#endif
+
+	return 0;
+}
+
+int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+{
+	int i;
+
+	/* 1. Allocate FP for leading - fatal if error
+	 * 2. Allocate RSS - fix number of queues if error
+	 */
+
+	/* leading */
+	if (bnx2x_alloc_fp_mem_at(bp, 0))
+		return -ENOMEM;
 
 	/* RSS */
 	for_each_nondefault_eth_queue(bp, i)
@@ -3676,17 +3841,17 @@
 		int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
 
 		WARN_ON(delta < 0);
-#ifdef BCM_CNIC
-		/**
-		 * move non eth FPs next to last eth FP
-		 * must be done in that order
-		 * FCOE_IDX < FWD_IDX < OOO_IDX
-		 */
+		if (CNIC_SUPPORT(bp))
+			/* move non eth FPs next to last eth FP
+			 * must be done in that order
+			 * FCOE_IDX < FWD_IDX < OOO_IDX
+			 */
 
-		/* move FCoE fp even NO_FCOE_FLAG is on */
-		bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
-#endif
-		bp->num_queues -= delta;
+			/* move FCoE fp even NO_FCOE_FLAG is on */
+			bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
+		bp->num_ethernet_queues -= delta;
+		bp->num_queues = bp->num_ethernet_queues +
+				 bp->num_cnic_queues;
 		BNX2X_ERR("Adjusted num of queues from %d to %d\n",
 			  bp->num_queues + delta, bp->num_queues);
 	}
@@ -3711,7 +3876,7 @@
 	struct msix_entry *tbl;
 	struct bnx2x_ilt *ilt;
 	int msix_table_size = 0;
-	int fp_array_size;
+	int fp_array_size, txq_array_size;
 	int i;
 
 	/*
@@ -3721,7 +3886,7 @@
 	msix_table_size = bp->igu_sb_cnt + 1;
 
 	/* fp array: RSS plus CNIC related L2 queues */
-	fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
+	fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
 	BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
 
 	fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
@@ -3750,12 +3915,12 @@
 		goto alloc_err;
 
 	/* Allocate memory for the transmission queues array */
-	bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
-#ifdef BCM_CNIC
-	bp->bnx2x_txq_size++;
-#endif
-	bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
-				sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
+	txq_array_size =
+		BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
+	BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
+
+	bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
+				GFP_KERNEL);
 	if (!bp->bnx2x_txq)
 		goto alloc_err;
 
@@ -3838,7 +4003,7 @@
 	return LINK_CONFIG_IDX(sel_phy_idx);
 }
 
-#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+#ifdef NETDEV_FCOE_WWNN
 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 {
 	struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 9c5ea6c..ad28074 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -238,7 +238,6 @@
  * @dev_instance:	private instance
  */
 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
-#ifdef BCM_CNIC
 
 /**
  * bnx2x_cnic_notify - send command to cnic driver
@@ -262,8 +261,6 @@
  */
 void bnx2x_setup_cnic_info(struct bnx2x *bp);
 
-#endif
-
 /**
  * bnx2x_int_enable - enable HW interrupts.
  *
@@ -283,7 +280,7 @@
 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
 
 /**
- * bnx2x_nic_init - init driver internals.
+ * bnx2x_nic_init_cnic - init driver internals for cnic.
  *
  * @bp:		driver handle
  * @load_code:	COMMON, PORT or FUNCTION
@@ -293,9 +290,26 @@
  *  - status blocks
  *  - etc.
  */
-void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+void bnx2x_nic_init_cnic(struct bnx2x *bp);
 
 /**
+ * bnx2x_nic_init - init driver internals.
+ *
+ * @bp:		driver handle
+ *
+ * Initializes:
+ *  - rings
+ *  - status blocks
+ *  - etc.
+ */
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+/**
+ * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_alloc_mem_cnic(struct bnx2x *bp);
+/**
  * bnx2x_alloc_mem - allocate driver's memory.
  *
  * @bp:		driver handle
@@ -303,6 +317,12 @@
 int bnx2x_alloc_mem(struct bnx2x *bp);
 
 /**
+ * bnx2x_free_mem_cnic - release driver's memory for cnic.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_free_mem_cnic(struct bnx2x *bp);
+/**
  * bnx2x_free_mem - release driver's memory.
  *
  * @bp:		driver handle
@@ -407,6 +427,7 @@
 void bnx2x_set_reset_in_progress(struct bnx2x *bp);
 void bnx2x_set_reset_global(struct bnx2x *bp);
 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
+int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
 
 /**
  * bnx2x_sp_event - handle ramrods completion.
@@ -424,6 +445,14 @@
 void bnx2x_ilt_set_info(struct bnx2x *bp);
 
 /**
+ * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC
+ * and TM.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_ilt_set_info_cnic(struct bnx2x *bp);
+
+/**
  * bnx2x_dcbx_init - initialize dcbx protocol.
  *
  * @bp:		driver handle
@@ -491,12 +520,17 @@
 /* Release IRQ vectors */
 void bnx2x_free_irq(struct bnx2x *bp);
 
+void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
 void bnx2x_free_fp_mem(struct bnx2x *bp);
+int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
 int bnx2x_alloc_fp_mem(struct bnx2x *bp);
 void bnx2x_init_rx_rings(struct bnx2x *bp);
+void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
+void bnx2x_free_skbs_cnic(struct bnx2x *bp);
 void bnx2x_free_skbs(struct bnx2x *bp);
 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
 void bnx2x_netif_start(struct bnx2x *bp);
+int bnx2x_load_cnic(struct bnx2x *bp);
 
 /**
  * bnx2x_enable_msix - set msix configuration.
@@ -547,7 +581,7 @@
  */
 int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
 
-#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+#ifdef NETDEV_FCOE_WWNN
 /**
  * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
  *
@@ -793,23 +827,39 @@
 	sge->addr_lo = 0;
 }
 
+static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	/* Add NAPI objects */
+	for_each_rx_queue_cnic(bp, i)
+		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+			       bnx2x_poll, BNX2X_NAPI_WEIGHT);
+}
+
 static inline void bnx2x_add_all_napi(struct bnx2x *bp)
 {
 	int i;
 
-	bp->num_napi_queues = bp->num_queues;
-
 	/* Add NAPI objects */
-	for_each_rx_queue(bp, i)
+	for_each_eth_queue(bp, i)
 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 			       bnx2x_poll, BNX2X_NAPI_WEIGHT);
 }
 
+static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_rx_queue_cnic(bp, i)
+		netif_napi_del(&bnx2x_fp(bp, i, napi));
+}
+
 static inline void bnx2x_del_all_napi(struct bnx2x *bp)
 {
 	int i;
 
-	for_each_rx_queue(bp, i)
+	for_each_eth_queue(bp, i)
 		netif_napi_del(&bnx2x_fp(bp, i, napi));
 }
 
@@ -979,11 +1029,9 @@
 {
 	struct bnx2x *bp = fp->bp;
 	if (!CHIP_IS_E1x(bp)) {
-#ifdef BCM_CNIC
 		/* there are special statistics counters for FCoE 136..140 */
 		if (IS_FCOE_FP(fp))
 			return bp->cnic_base_cl_id + (bp->pf_num >> 1);
-#endif
 		return fp->cl_id;
 	}
 	return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
@@ -1102,7 +1150,6 @@
 	   txdata->cid, txdata->txq_index);
 }
 
-#ifdef BCM_CNIC
 static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
 {
 	return bp->cnic_base_cl_id + cl_idx +
@@ -1162,7 +1209,6 @@
 	   fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
 	   fp->igu_sb_id);
 }
-#endif
 
 static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
 				       struct bnx2x_fp_txdata *txdata)
@@ -1280,7 +1326,7 @@
 	 */
 	return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
 }
-#ifdef BCM_CNIC
+
 /**
  * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
  *
@@ -1288,7 +1334,6 @@
  *
  */
 void bnx2x_get_iscsi_info(struct bnx2x *bp);
-#endif
 
 /**
  * bnx2x_link_sync_notify - send notification to other functions.
@@ -1340,13 +1385,11 @@
 
 static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
 {
-	if (is_valid_ether_addr(addr))
+	if (is_valid_ether_addr(addr) ||
+	    (is_zero_ether_addr(addr) &&
+	     (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))))
 		return true;
-#ifdef BCM_CNIC
-	if (is_zero_ether_addr(addr) &&
-	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
-		return true;
-#endif
+
 	return false;
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 2245c38..cba4a16 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1908,10 +1908,10 @@
 	/* first the HW mac address */
 	memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
 
-#ifdef BCM_CNIC
-	/* second SAN address */
-	memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len);
-#endif
+	if (CNIC_LOADED(bp))
+		/* second SAN address */
+		memcpy(perm_addr+netdev->addr_len, bp->fip_mac,
+		       netdev->addr_len);
 }
 
 static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 6e5bdd1..e05f981 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2660,20 +2660,25 @@
 		return 1;	/* cycle on/off once per second */
 
 	case ETHTOOL_ID_ON:
+		bnx2x_acquire_phy_lock(bp);
 		bnx2x_set_led(&bp->link_params, &bp->link_vars,
 			      LED_MODE_ON, SPEED_1000);
+		bnx2x_release_phy_lock(bp);
 		break;
 
 	case ETHTOOL_ID_OFF:
+		bnx2x_acquire_phy_lock(bp);
 		bnx2x_set_led(&bp->link_params, &bp->link_vars,
 			      LED_MODE_FRONT_PANEL_OFF, 0);
-
+		bnx2x_release_phy_lock(bp);
 		break;
 
 	case ETHTOOL_ID_INACTIVE:
+		bnx2x_acquire_phy_lock(bp);
 		bnx2x_set_led(&bp->link_params, &bp->link_vars,
 			      LED_MODE_OPER,
 			      bp->link_vars.line_speed);
+		bnx2x_release_phy_lock(bp);
 	}
 
 	return 0;
@@ -2901,7 +2906,9 @@
 static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
 {
 	bnx2x_disable_msi(bp);
-	BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
+	bp->num_ethernet_queues = num_rss;
+	bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+	BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
 	bnx2x_set_int_mode(bp);
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 1870492..1504e0a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -695,6 +695,7 @@
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE    0x00000e00
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722       0x00000f00
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616      0x00001000
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834      0x00001100
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE       0x0000fd00
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN      0x0000ff00
 
@@ -751,6 +752,7 @@
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE     0x00000e00
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722        0x00000f00
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616       0x00001000
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834       0x00001100
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC      0x0000fc00
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE        0x0000fd00
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN       0x0000ff00
@@ -1246,6 +1248,7 @@
 	#define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED        0xa2000000
 	#define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED        0x00070002
 	#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED   0x00070014
+	#define REQ_BC_VER_4_MT_SUPPORTED               0x00070201
 	#define REQ_BC_VER_4_PFC_STATS_SUPPORTED        0x00070201
 	#define REQ_BC_VER_4_FCOE_FEATURES              0x00070209
 
@@ -2159,6 +2162,16 @@
 	#define SHMEM_EEE_TIME_OUTPUT_BIT	   0x80000000
 
 	u32 sizeof_port_stats;
+
+	/* Link Flap Avoidance */
+	u32 lfa_host_addr[PORT_MAX];
+	u32 reserved1;
+
+	u32 reserved2;				/* Offset 0x148 */
+	u32 reserved3;				/* Offset 0x14C */
+	u32 reserved4;				/* Offset 0x150 */
+	u32 link_attr_sync[PORT_MAX];		/* Offset 0x154 */
+	#define LINK_ATTR_SYNC_KR2_ENABLE	(1<<0)
 };
 
 
@@ -4845,9 +4858,17 @@
 	__le32 reserved2;
 };
 
-/*
- * union for all event ring message types
- */
+/* function update event data */
+struct function_update_event_data {
+	u8 echo;
+	u8 reserved;
+	__le16 reserved0;
+	__le32 reserved1;
+	__le32 reserved2;
+};
+
+
+/* union for all event ring message types */
 union event_data {
 	struct vf_pf_event_data vf_pf_event;
 	struct eth_event_data eth_event;
@@ -4855,6 +4876,7 @@
 	struct vf_flr_event_data vf_flr_event;
 	struct malicious_vf_event_data malicious_vf_event;
 	struct vif_list_event_data vif_list_event;
+	struct function_update_event_data function_update_event;
 };
 
 
@@ -4984,8 +5006,10 @@
 	u8 allowed_priorities;
 	u8 network_cos_mode;
 	u8 lb_mode_en;
-	u8 reserved0;
-	__le32 reserved1;
+	u8 tx_switch_suspend_change_flg;
+	u8 tx_switch_suspend;
+	u8 echo;
+	__le16 reserved1;
 };
 
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index fe66d90..d755acf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -648,15 +648,25 @@
 	return rc;
 }
 
+static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop)
+{
+	int rc = 0;
+
+	if (CONFIGURE_NIC_MODE(bp))
+		rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
+	if (!rc)
+		rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
+
+	return rc;
+}
+
 static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
 {
 	int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
 	if (!rc)
 		rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
-	if (!rc)
+	if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
 		rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
-	if (!rc)
-		rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
 
 	return rc;
 }
@@ -781,12 +791,19 @@
 	bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
 }
 
+static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop)
+{
+	if (CONFIGURE_NIC_MODE(bp))
+		bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
+	bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
+}
+
 static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
 {
 	bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
 	bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
-	bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
-	bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
+	if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
+		bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
 }
 
 static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
@@ -890,7 +907,6 @@
 /****************************************************************************
 * SRC initializations
 ****************************************************************************/
-#ifdef BCM_CNIC
 /* called during init func stage */
 static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
 			      dma_addr_t t2_mapping, int src_cid_count)
@@ -915,5 +931,4 @@
 		    U64_HI((u64)t2_mapping +
 			   (src_cid_count-1) * sizeof(struct src_ent)));
 }
-#endif
 #endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index f6cfdc6..3e7d824 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -121,6 +121,7 @@
 #define	GP_STATUS_10G_XFI   MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
 #define	GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
 #define	GP_STATUS_10G_SFI   MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
+#define	GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2
 #define LINK_10THD		LINK_STATUS_SPEED_AND_DUPLEX_10THD
 #define LINK_10TFD		LINK_STATUS_SPEED_AND_DUPLEX_10TFD
 #define LINK_100TXHD		LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
@@ -1440,30 +1441,47 @@
 /******************************************************************/
 /*			MAC/PBF section				  */
 /******************************************************************/
-static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
+static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id,
+			       u32 emac_base)
 {
-	u32 mode, emac_base;
+	u32 new_mode, cur_mode;
+	u32 clc_cnt;
 	/* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
 	 * (a value of 49==0x31) and make sure that the AUTO poll is off
 	 */
+	cur_mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
 
-	if (CHIP_IS_E2(bp))
-		emac_base = GRCBASE_EMAC0;
-	else
-		emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-	mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
-	mode &= ~(EMAC_MDIO_MODE_AUTO_POLL |
-		  EMAC_MDIO_MODE_CLOCK_CNT);
 	if (USES_WARPCORE(bp))
-		mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+		clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
 	else
-		mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+		clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
 
-	mode |= (EMAC_MDIO_MODE_CLAUSE_45);
-	REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode);
+	if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) &&
+	    (cur_mode & (EMAC_MDIO_MODE_CLAUSE_45)))
+		return;
 
+	new_mode = cur_mode &
+		~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
+	new_mode |= clc_cnt;
+	new_mode |= (EMAC_MDIO_MODE_CLAUSE_45);
+
+	DP(NETIF_MSG_LINK, "Changing emac_mode from 0x%x to 0x%x\n",
+	   cur_mode, new_mode);
+	REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode);
 	udelay(40);
 }
+
+static void bnx2x_set_mdio_emac_per_phy(struct bnx2x *bp,
+					struct link_params *params)
+{
+	u8 phy_index;
+	/* Set mdio clock per phy */
+	for (phy_index = INT_PHY; phy_index < params->num_phys;
+	      phy_index++)
+		bnx2x_set_mdio_clk(bp, params->chip_id,
+				   params->phy[phy_index].mdio_ctrl);
+}
+
 static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
 {
 	u32 port4mode_ovwr_val;
@@ -1508,7 +1526,8 @@
 		}
 		timeout--;
 	} while (val & EMAC_MODE_RESET);
-	bnx2x_set_mdio_clk(bp, params->chip_id, port);
+
+	bnx2x_set_mdio_emac_per_phy(bp, params);
 	/* Set mac address */
 	val = ((params->mac_addr[0] << 8) |
 		params->mac_addr[1]);
@@ -1664,7 +1683,10 @@
 	 * ports of the path
 	 */
 
-	if ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) &&
+	if (((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) ||
+	     (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) ||
+	     (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) &&
+	    is_port4mode &&
 	    (REG_RD(bp, MISC_REG_RESET_REG_2) &
 	     MISC_REGISTERS_RESET_REG_2_XMAC)) {
 		DP(NETIF_MSG_LINK,
@@ -1760,6 +1782,18 @@
 	 */
 	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
 
+	/* When XMAC is in XLGMII mode, disable sending idles for fault
+	 * detection.
+	 */
+	if (!(params->phy[INT_PHY].flags & FLAGS_TX_ERROR_CHECK)) {
+		REG_WR(bp, xmac_base + XMAC_REG_RX_LSS_CTRL,
+		       (XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE |
+			XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE));
+		REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+		REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+		       XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+		       XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+	}
 	/* Set Max packet size */
 	REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
 
@@ -1780,6 +1814,12 @@
 	/* Enable TX and RX */
 	val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
 
+	/* Set MAC in XLGMII mode for dual-mode */
+	if ((vars->line_speed == SPEED_20000) &&
+	    (params->phy[INT_PHY].supported &
+	     SUPPORTED_20000baseKR2_Full))
+		val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB;
+
 	/* Check loopback mode */
 	if (lb)
 		val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
@@ -2096,6 +2136,16 @@
 			port_mb[params->port].link_status), link_status);
 }
 
+static void bnx2x_update_link_attr(struct link_params *params, u32 link_attr)
+{
+	struct bnx2x *bp = params->bp;
+
+	if (SHMEM2_HAS(bp, link_attr_sync))
+		REG_WR(bp, params->shmem2_base +
+		       offsetof(struct shmem2_region,
+				link_attr_sync[params->port]), link_attr);
+}
+
 static void bnx2x_update_pfc_nig(struct link_params *params,
 		struct link_vars *vars,
 		struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2126,7 +2176,7 @@
 		if (CHIP_IS_E3(bp))
 			ppp_enable = 0;
 		else
-		ppp_enable = 1;
+			ppp_enable = 1;
 		xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
 				     NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
 		xcm_out_en = 0;
@@ -2247,7 +2297,6 @@
 	return bnx2x_status;
 }
 
-
 static int bnx2x_bmac1_enable(struct link_params *params,
 			      struct link_vars *vars,
 			      u8 is_lb)
@@ -2651,6 +2700,13 @@
 	u32 val;
 	u16 i;
 	int rc = 0;
+	u32 chip_id;
+	if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
+		chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+			  ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+		bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
+	}
+
 	if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
 		bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
 			      EMAC_MDIO_STATUS_10MB);
@@ -2719,6 +2775,13 @@
 	u32 tmp;
 	u8 i;
 	int rc = 0;
+	u32 chip_id;
+	if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
+		chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+			  ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+		bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
+	}
+
 	if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
 		bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
 			      EMAC_MDIO_STATUS_10MB);
@@ -3147,6 +3210,15 @@
 	bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
 }
 
+static void bnx2x_cl45_read_and_write(struct bnx2x *bp,
+				      struct bnx2x_phy *phy,
+				      u8 devad, u16 reg, u16 and_val)
+{
+	u16 val;
+	bnx2x_cl45_read(bp, phy, devad, reg, &val);
+	bnx2x_cl45_write(bp, phy, devad, reg, val & and_val);
+}
+
 int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
 		   u8 devad, u16 reg, u16 *ret_val)
 {
@@ -3551,6 +3623,44 @@
  * init configuration, and set/clear SGMII flag. Internal
  * phy init is done purely in phy_init stage.
  */
+static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
+					 struct link_params *params,
+					 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 i;
+	static struct bnx2x_reg_set reg_set[] = {
+		/* Step 1 - Program the TX/RX alignment markers */
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537},
+		/* Step 2 - Configure the NP registers */
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620}
+	};
+	DP(NETIF_MSG_LINK, "Enabling 20G-KR2\n");
+
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6));
+
+	for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+		bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+				 reg_set[i].val);
+
+	/* Start KR2 work-around timer which handles BCM8073 link-parner */
+	vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+	bnx2x_update_link_attr(params, vars->link_attr_sync);
+}
 
 static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
 					       struct link_params *params)
@@ -3564,6 +3674,21 @@
 				 MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
 }
 
+static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
+					 struct link_params *params)
+{
+	/* Restart autoneg on the leading lane only */
+	struct bnx2x *bp = params->bp;
+	u16 lane = bnx2x_get_warpcore_lane(phy, params);
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, lane);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
+
+	/* Restore AER */
+	bnx2x_set_aer_mmd(params, phy);
+}
+
 static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
 					struct link_params *params,
 					struct link_vars *vars) {
@@ -3576,7 +3701,9 @@
 		{MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
 		{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
 		/* Disable Autoneg: re-enable it after adv is done. */
-		{MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}
+		{MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0},
+		{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0},
 	};
 	DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
 	/* Set to default registers that may be overriden by 10G force */
@@ -3585,11 +3712,11 @@
 				 reg_set[i].val);
 
 	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-		MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
-	cl72_ctrl &= 0xf8ff;
+			MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
+	cl72_ctrl &= 0x08ff;
 	cl72_ctrl |= 0x3800;
 	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-		MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
 
 	/* Check adding advertisement for 1G KX */
 	if (((vars->line_speed == SPEED_AUTO_NEG) &&
@@ -3624,6 +3751,16 @@
 		     ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
 		      (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
 		      (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+	/* Configure the next lane if dual mode */
+	if (phy->flags & FLAGS_WC_DUAL_MODE)
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
+				 ((0x02 <<
+				 MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+				  (0x06 <<
+				   MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+				  (0x09 <<
+				MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
 	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 			 MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
 			 0x03f0);
@@ -3670,10 +3807,26 @@
 	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 			MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
 
-	/* Enable Autoneg */
-	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
-			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
+	    (phy->req_line_speed == SPEED_20000)) {
 
+		CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+				  MDIO_AER_BLOCK_AER_REG, lane);
+
+		bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+					 MDIO_WC_REG_RX1_PCI_CTRL + (0x10*lane),
+					 (1<<11));
+
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7);
+		bnx2x_set_aer_mmd(params, phy);
+
+		bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+	}
+
+	/* Enable Autoneg: only on the main lane */
+	bnx2x_warpcore_restart_AN_KR(phy, params);
 }
 
 static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
@@ -3692,9 +3845,7 @@
 		{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
 		{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
 		/* Leave cl72 training enable, needed for KR */
-		{MDIO_PMA_DEVAD,
-		MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
-		0x2}
+		{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
 	};
 
 	for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
@@ -3764,27 +3915,21 @@
 	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
 
 	/* Disable 100FX Enable and Auto-Detect */
-	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-			MDIO_WC_REG_FX100_CTRL1, &val);
-	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-			 MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_FX100_CTRL1, 0xFFFA);
 
 	/* Disable 100FX Idle detect */
 	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
 				 MDIO_WC_REG_FX100_CTRL3, 0x0080);
 
 	/* Set Block address to Remote PHY & Clear forced_speed[5] */
-	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-			MDIO_WC_REG_DIGITAL4_MISC3, &val);
-	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-			 MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F));
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F);
 
 	/* Turn off auto-detect & fiber mode */
-	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
-	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
-			 (val & 0xFFEE));
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+				  0xFFEE);
 
 	/* Set filter_force_link, disable_false_link and parallel_detect */
 	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3846,22 +3991,65 @@
 			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
 
 	/* Release tx_fifo_reset */
-	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
-	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE);
-
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+				  0xFFFE);
 	/* Release rxSeqStart */
-	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-			MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
-	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-			 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF));
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF);
 }
 
-static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp,
-				       struct bnx2x_phy *phy)
+static void bnx2x_warpcore_set_20G_force_KR2(struct bnx2x_phy *phy,
+					     struct link_params *params)
 {
-	DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n");
+	u16 val;
+	struct bnx2x *bp = params->bp;
+	/* Set global registers, so set AER lane to 0 */
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, 0);
+
+	/* Disable sequencer */
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1<<13));
+
+	bnx2x_set_aer_mmd(params, phy);
+
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_PMA_DEVAD,
+				  MDIO_WC_REG_PMD_KR_CONTROL, ~(1<<1));
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_AN_REG_CTRL, 0);
+	/* Turn off CL73 */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_CL73_USERB0_CTRL, &val);
+	val &= ~(1<<5);
+	val |= (1<<6);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL73_USERB0_CTRL, val);
+
+	/* Set 20G KR2 force speed */
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f);
+
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_DIGITAL4_MISC3, (1<<7));
+
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val);
+	val &= ~(3<<14);
+	val |= (1<<15);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A);
+
+	/* Enable sequencer (over lane 0) */
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, 0);
+
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1<<13));
+
+	bnx2x_set_aer_mmd(params, phy);
 }
 
 static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
@@ -3931,20 +4119,16 @@
 	u16 val16, digctrl_kx1, digctrl_kx2;
 
 	/* Clear XFI clock comp in non-10G single lane mode. */
-	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-			MDIO_WC_REG_RX66_CONTROL, &val16);
-	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-			 MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_RX66_CONTROL, ~(3<<13));
 
 	bnx2x_warpcore_set_lpi_passthrough(phy, params);
 
 	if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
 		/* SGMII Autoneg */
-		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
-		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-				 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
-				 val16 | 0x1000);
+		bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+					 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+					 0x1000);
 		DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
 	} else {
 		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4086,7 +4270,7 @@
 		if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
 		    (cfg_pin > PIN_CFG_GPIO3_P1)) {
 			DP(NETIF_MSG_LINK,
-			   "ERROR: Invalid cfg pin %x for module detect indication\n",
+			   "No cfg pin %x for module detect indication\n",
 			   cfg_pin);
 			return -EINVAL;
 		}
@@ -4097,7 +4281,7 @@
 		*gpio_num = MISC_REGISTERS_GPIO_3;
 		*gpio_port = port;
 	}
-	DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port);
+
 	return 0;
 }
 
@@ -4120,7 +4304,7 @@
 		return 0;
 }
 static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy,
-					struct link_params *params)
+				     struct link_params *params)
 {
 	u16 gp2_status_reg0, lane;
 	struct bnx2x *bp = params->bp;
@@ -4134,8 +4318,8 @@
 }
 
 static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
-				       struct link_params *params,
-				       struct link_vars *vars)
+					  struct link_params *params,
+					  struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u32 serdes_net_if;
@@ -4163,7 +4347,7 @@
 		case PORT_HW_CFG_NET_SERDES_IF_KR:
 			/* Do we get link yet? */
 			bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1,
-								&gp_status1);
+					&gp_status1);
 			lnkup = (gp_status1 >> (8+lane)) & 0x1;/* 1G */
 				/*10G KR*/
 			lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
@@ -4215,6 +4399,27 @@
 	}
 }
 
+static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
+					 struct bnx2x_phy *phy,
+					 u8 tx_en)
+{
+	struct bnx2x *bp = params->bp;
+	u32 cfg_pin;
+	u8 port = params->port;
+
+	cfg_pin = REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region,
+				  dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+		PORT_HW_CFG_E3_TX_LASER_MASK;
+	/* Set the !tx_en since this pin is DISABLE_TX_LASER */
+	DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
+
+	/* For 20G, the expected pin to be used is 3 pins after the current */
+	bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
+	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+		bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
+}
+
 static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
 				       struct link_params *params,
 				       struct link_vars *vars)
@@ -4275,9 +4480,14 @@
 			break;
 
 		case PORT_HW_CFG_NET_SERDES_IF_SFI:
-			/* Issue Module detection */
+			/* Issue Module detection if module is plugged, or
+			 * enabled transmitter to avoid current leakage in case
+			 * no module is connected
+			 */
 			if (bnx2x_is_sfp_module_plugged(phy, params))
 				bnx2x_sfp_module_detection(phy, params);
+			else
+				bnx2x_sfp_e3_set_transmitter(params, phy, 1);
 
 			bnx2x_warpcore_config_sfi(phy, params);
 			break;
@@ -4293,16 +4503,14 @@
 
 			bnx2x_sfp_module_detection(phy, params);
 			break;
-
 		case PORT_HW_CFG_NET_SERDES_IF_KR2:
-			if (vars->line_speed != SPEED_20000) {
-				DP(NETIF_MSG_LINK, "Speed not supported yet\n");
-				return;
+			if (!params->loopback_mode) {
+				bnx2x_warpcore_enable_AN_KR(phy, params, vars);
+			} else {
+				DP(NETIF_MSG_LINK, "Setting KR 20G-Force\n");
+				bnx2x_warpcore_set_20G_force_KR2(phy, params);
 			}
-			DP(NETIF_MSG_LINK, "Setting 20G KR2\n");
-			bnx2x_warpcore_set_20G_KR2(bp, phy);
 			break;
-
 		default:
 			DP(NETIF_MSG_LINK,
 			   "Unsupported Serdes Net Interface 0x%x\n",
@@ -4316,68 +4524,35 @@
 	DP(NETIF_MSG_LINK, "Exit config init\n");
 }
 
-static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
-					 struct bnx2x_phy *phy,
-					 u8 tx_en)
-{
-	struct bnx2x *bp = params->bp;
-	u32 cfg_pin;
-	u8 port = params->port;
-
-	cfg_pin = REG_RD(bp, params->shmem_base +
-				offsetof(struct shmem_region,
-				dev_info.port_hw_config[port].e3_sfp_ctrl)) &
-				PORT_HW_CFG_TX_LASER_MASK;
-	/* Set the !tx_en since this pin is DISABLE_TX_LASER */
-	DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
-	/* For 20G, the expected pin to be used is 3 pins after the current */
-
-	bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
-	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
-		bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
-}
-
 static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
 				      struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
 	u16 val16, lane;
 	bnx2x_sfp_e3_set_transmitter(params, phy, 0);
-	bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+	bnx2x_set_mdio_emac_per_phy(bp, params);
 	bnx2x_set_aer_mmd(params, phy);
 	/* Global register */
 	bnx2x_warpcore_reset_lane(bp, phy, 1);
 
 	/* Clear loopback settings (if any) */
 	/* 10G & 20G */
-	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-			MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
-	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-			 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 &
-			 0xBFFF);
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF);
 
-	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-			MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
-	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-			MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe);
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe);
 
 	/* Update those 1-copy registers */
 	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
 			  MDIO_AER_BLOCK_AER_REG, 0);
 	/* Enable 1G MDIO (1-copy) */
-	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-			MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
-			&val16);
-	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-			 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
-			 val16 & ~0x10);
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+				  ~0x10);
 
-	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-			MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
-	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-			 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
-			 val16 & 0xff00);
-
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00);
 	lane = bnx2x_get_warpcore_lane(phy, params);
 	/* Disable CL36 PCS Tx */
 	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4413,8 +4588,9 @@
 	DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
 		       params->loopback_mode, phy->req_line_speed);
 
-	if (phy->req_line_speed < SPEED_10000) {
-		/* 10/100/1000 */
+	if (phy->req_line_speed < SPEED_10000 ||
+	    phy->supported & SUPPORTED_20000baseKR2_Full) {
+		/* 10/100/1000/20G-KR2 */
 
 		/* Update those 1-copy registers */
 		CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
@@ -4427,18 +4603,20 @@
 		lane = bnx2x_get_warpcore_lane(phy, params);
 		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 				MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+		val16 |= (1<<lane);
+		if (phy->flags & FLAGS_WC_DUAL_MODE)
+			val16 |= (2<<lane);
 		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-				MDIO_WC_REG_XGXSBLK1_LANECTRL2,
-				val16 | (1<<lane));
+				 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+				 val16);
 
 		/* Switch back to 4-copy registers */
 		bnx2x_set_aer_mmd(params, phy);
 	} else {
-		/* 10G & 20G */
+		/* 10G / 20G-DXGXS */
 		bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
 					 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
 					 0x4000);
-
 		bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
 					 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
 	}
@@ -4603,6 +4781,10 @@
 		params->feature_config_flags &=
 					~FEATURE_CONFIG_PFC_ENABLED;
 
+	if (SHMEM2_HAS(bp, link_attr_sync))
+		vars->link_attr_sync = SHMEM2_RD(bp,
+						 link_attr_sync[params->port]);
+
 	DP(NETIF_MSG_LINK, "link_status 0x%x  phy_link_up %x int_mask 0x%x\n",
 		 vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
 	DP(NETIF_MSG_LINK, "line_speed %x  duplex %x  flow_ctrl 0x%x\n",
@@ -5332,6 +5514,7 @@
 			vars->link_status |= LINK_10GTFD;
 			break;
 		case GP_STATUS_20G_DXGXS:
+		case GP_STATUS_20G_KR2:
 			vars->line_speed = SPEED_20000;
 			vars->link_status |= LINK_20GTFD;
 			break;
@@ -5439,7 +5622,15 @@
 	int rc = 0;
 	lane = bnx2x_get_warpcore_lane(phy, params);
 	/* Read gp_status */
-	if (phy->req_line_speed > SPEED_10000) {
+	if ((params->loopback_mode) &&
+	    (phy->flags & FLAGS_WC_DUAL_MODE)) {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
+		link_up &= 0x1;
+	} else if ((phy->req_line_speed > SPEED_10000) &&
+		(phy->supported & SUPPORTED_20000baseMLD2_Full)) {
 		u16 temp_link_up;
 		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 				1, &temp_link_up);
@@ -5452,12 +5643,22 @@
 			bnx2x_ext_phy_resolve_fc(phy, params, vars);
 	} else {
 		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-				MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
+				MDIO_WC_REG_GP2_STATUS_GP_2_1,
+				&gp_status1);
 		DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
-		/* Check for either KR or generic link up. */
-		gp_status1 = ((gp_status1 >> 8) & 0xf) |
-			((gp_status1 >> 12) & 0xf);
-		link_up = gp_status1 & (1 << lane);
+		/* Check for either KR, 1G, or AN up. */
+		link_up = ((gp_status1 >> 8) |
+			   (gp_status1 >> 12) |
+			   (gp_status1)) &
+			(1 << lane);
+		if (phy->supported & SUPPORTED_20000baseKR2_Full) {
+			u16 an_link;
+			bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+					MDIO_AN_REG_STATUS, &an_link);
+			bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+					MDIO_AN_REG_STATUS, &an_link);
+			link_up |= (an_link & (1<<2));
+		}
 		if (link_up && SINGLE_MEDIA_DIRECT(params)) {
 			u16 pd, gp_status4;
 			if (phy->req_line_speed == SPEED_AUTO_NEG) {
@@ -5522,7 +5723,7 @@
 	if ((lane & 1) == 0)
 		gp_speed <<= 8;
 	gp_speed &= 0x3f00;
-
+	link_up = !!link_up;
 
 	rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
 					 duplex);
@@ -6683,7 +6884,7 @@
 		} else if (prev_line_speed != vars->line_speed) {
 			REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
 			       0);
-			 usleep_range(1000, 2000);
+			usleep_range(1000, 2000);
 		}
 	}
 
@@ -6753,7 +6954,7 @@
 {
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
 		       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
-	 usleep_range(1000, 2000);
+	usleep_range(1000, 2000);
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
 		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
 }
@@ -6894,7 +7095,7 @@
 				MDIO_PMA_DEVAD,
 				MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
 
-		 usleep_range(1000, 2000);
+		usleep_range(1000, 2000);
 	} while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
 			((fw_msgout & 0xff) != 0x03 && (phy->type ==
 			PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
@@ -7604,13 +7805,12 @@
 		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
 		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
 			return 0;
-		 usleep_range(1000, 2000);
+		usleep_range(1000, 2000);
 	}
 	return -EINVAL;
 }
 
 static void bnx2x_warpcore_power_module(struct link_params *params,
-					struct bnx2x_phy *phy,
 					u8 power)
 {
 	u32 pin_cfg;
@@ -7652,10 +7852,10 @@
 	addr32 = addr & (~0x3);
 	do {
 		if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) {
-			bnx2x_warpcore_power_module(params, phy, 0);
+			bnx2x_warpcore_power_module(params, 0);
 			/* Note that 100us are not enough here */
 			usleep_range(1000, 2000);
-			bnx2x_warpcore_power_module(params, phy, 1);
+			bnx2x_warpcore_power_module(params, 1);
 		}
 		rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
 				    data_array);
@@ -7715,7 +7915,7 @@
 	/* Wait appropriate time for two-wire command to finish before
 	 * polling the status register
 	 */
-	 usleep_range(1000, 2000);
+	usleep_range(1000, 2000);
 
 	/* Wait up to 500us for command complete status */
 	for (i = 0; i < 100; i++) {
@@ -7751,7 +7951,7 @@
 		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
 		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
 			return 0;
-		 usleep_range(1000, 2000);
+		usleep_range(1000, 2000);
 	}
 
 	return -EINVAL;
@@ -7786,9 +7986,8 @@
 {
 	struct bnx2x *bp = params->bp;
 	u32 sync_offset = 0, phy_idx, media_types;
-	u8 val[2], check_limiting_mode = 0;
+	u8 gport, val[2], check_limiting_mode = 0;
 	*edc_mode = EDC_MODE_LIMITING;
-
 	phy->media_type = ETH_PHY_UNSPECIFIED;
 	/* First check for copper cable */
 	if (bnx2x_read_sfp_module_eeprom(phy,
@@ -7843,8 +8042,15 @@
 			       SFP_EEPROM_COMP_CODE_LR_MASK |
 			       SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
 			DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+			gport = params->port;
 			phy->media_type = ETH_PHY_SFP_1G_FIBER;
 			phy->req_line_speed = SPEED_1000;
+			if (!CHIP_IS_E1x(bp))
+				gport = BP_PATH(bp) + (params->port << 1);
+			netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps."
+			      " Current SFP module in port %d is not"
+			      " compliant with 10G Ethernet\n",
+			 gport);
 		} else {
 			int idx, cfg_idx = 0;
 			DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8241,7 +8447,7 @@
 				    struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
-	bnx2x_warpcore_power_module(params, phy, 0);
+	bnx2x_warpcore_power_module(params, 0);
 	/* Put Warpcore in low power mode */
 	REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
 
@@ -8264,7 +8470,7 @@
 		bnx2x_8727_power_module(params->bp, phy, power);
 		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
-		bnx2x_warpcore_power_module(params, phy, power);
+		bnx2x_warpcore_power_module(params, power);
 		break;
 	default:
 		break;
@@ -8337,7 +8543,8 @@
 	u32 val = REG_RD(bp, params->shmem_base +
 			     offsetof(struct shmem_region, dev_info.
 				     port_feature_config[params->port].config));
-
+	/* Enabled transmitter by default */
+	bnx2x_sfp_set_transmitter(params, phy, 1);
 	DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
 		 params->port);
 	/* Power up module */
@@ -8370,14 +8577,12 @@
 	 */
 	bnx2x_set_limiting_mode(params, phy, edc_mode);
 
-	/* Enable transmit for this module if the module is approved, or
-	 * if unapproved modules should also enable the Tx laser
+	/* Disable transmit for this module if the module is not approved, and
+	 * laser needs to be disabled.
 	 */
-	if (rc == 0 ||
-	    (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
-	    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
-		bnx2x_sfp_set_transmitter(params, phy, 1);
-	else
+	if ((rc) &&
+	    ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+	     PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER))
 		bnx2x_sfp_set_transmitter(params, phy, 0);
 
 	return rc;
@@ -8389,11 +8594,13 @@
 	struct bnx2x_phy *phy;
 	u32 gpio_val;
 	u8 gpio_num, gpio_port;
-	if (CHIP_IS_E3(bp))
+	if (CHIP_IS_E3(bp)) {
 		phy = &params->phy[INT_PHY];
-	else
+		/* Always enable TX laser,will be disabled in case of fault */
+		bnx2x_sfp_set_transmitter(params, phy, 1);
+	} else {
 		phy = &params->phy[EXT_PHY1];
-
+	}
 	if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
 				      params->port, &gpio_num, &gpio_port) ==
 	    -EINVAL) {
@@ -8409,7 +8616,7 @@
 
 	/* Call the handling function in case module is detected */
 	if (gpio_val == 0) {
-		bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+		bnx2x_set_mdio_emac_per_phy(bp, params);
 		bnx2x_set_aer_mmd(params, phy);
 
 		bnx2x_power_sfp_module(params, phy, 1);
@@ -8438,10 +8645,6 @@
 			DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
 		}
 	} else {
-		u32 val = REG_RD(bp, params->shmem_base +
-				 offsetof(struct shmem_region, dev_info.
-					  port_feature_config[params->port].
-					  config));
 		bnx2x_set_gpio_int(bp, gpio_num,
 				   MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
 				   gpio_port);
@@ -8449,10 +8652,6 @@
 		 * Disable transmit for this module
 		 */
 		phy->media_type = ETH_PHY_NOT_PRESENT;
-		if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
-		     PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
-		    CHIP_IS_E3(bp))
-			bnx2x_sfp_set_transmitter(params, phy, 0);
 	}
 }
 
@@ -9192,6 +9391,7 @@
 			bnx2x_cl45_read(bp, phy,
 				MDIO_PMA_DEVAD,
 				MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+			bnx2x_8727_power_module(params->bp, phy, 0);
 			return 0;
 		}
 	} /* Over current check */
@@ -9296,20 +9496,28 @@
 					    struct bnx2x *bp,
 					    u8 port)
 {
-	u16 val, fw_ver1, fw_ver2, cnt;
+	u16 val, fw_ver2, cnt, i;
+	static struct bnx2x_reg_set reg_set[] = {
+		{MDIO_PMA_DEVAD, 0xA819, 0x0014},
+		{MDIO_PMA_DEVAD, 0xA81A, 0xc200},
+		{MDIO_PMA_DEVAD, 0xA81B, 0x0000},
+		{MDIO_PMA_DEVAD, 0xA81C, 0x0300},
+		{MDIO_PMA_DEVAD, 0xA817, 0x0009}
+	};
+	u16 fw_ver1;
 
-	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+	    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
 		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
 		bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
 				phy->ver_addr);
 	} else {
 		/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
 		/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
-		bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
-		bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
-		bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
-		bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
-		bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+		for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set);
+		      i++)
+			bnx2x_cl45_write(bp, phy, reg_set[i].devad,
+					 reg_set[i].reg, reg_set[i].val);
 
 		for (cnt = 0; cnt < 100; cnt++) {
 			bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
@@ -9357,8 +9565,16 @@
 static void bnx2x_848xx_set_led(struct bnx2x *bp,
 				struct bnx2x_phy *phy)
 {
-	u16 val, offset;
-
+	u16 val, offset, i;
+	static struct bnx2x_reg_set reg_set[] = {
+		{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
+		{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
+		{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
+		{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
+		{MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+			MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
+		{MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
+	};
 	/* PHYC_CTL_LED_CTL */
 	bnx2x_cl45_read(bp, phy,
 			MDIO_PMA_DEVAD,
@@ -9370,49 +9586,20 @@
 			 MDIO_PMA_DEVAD,
 			 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
 
-	bnx2x_cl45_write(bp, phy,
-			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_8481_LED1_MASK,
-			 0x80);
+	for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+		bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+				 reg_set[i].val);
 
-	bnx2x_cl45_write(bp, phy,
-			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_8481_LED2_MASK,
-			 0x18);
-
-	/* Select activity source by Tx and Rx, as suggested by PHY AE */
-	bnx2x_cl45_write(bp, phy,
-			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_8481_LED3_MASK,
-			 0x0006);
-
-	/* Select the closest activity blink rate to that in 10/100/1000 */
-	bnx2x_cl45_write(bp, phy,
-			MDIO_PMA_DEVAD,
-			MDIO_PMA_REG_8481_LED3_BLINK,
-			0);
-
-	/* Configure the blink rate to ~15.9 Hz */
-	bnx2x_cl45_write(bp, phy,
-			MDIO_PMA_DEVAD,
-			MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
-			MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ);
-
-	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+	    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
 		offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
 	else
 		offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
 
-	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, offset, &val);
-	val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
-	bnx2x_cl45_write(bp, phy,
-			 MDIO_PMA_DEVAD, offset, val);
-
-	/* 'Interrupt Mask' */
-	bnx2x_cl45_write(bp, phy,
-			 MDIO_AN_DEVAD,
-			 0xFFFB, 0xFFFD);
+	/* stretch_en for LED3*/
+	bnx2x_cl45_read_or_write(bp, phy,
+				 MDIO_PMA_DEVAD, offset,
+				 MDIO_PMA_REG_84823_LED3_STRETCH_EN);
 }
 
 static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
@@ -9422,7 +9609,8 @@
 	struct bnx2x *bp = params->bp;
 	switch (action) {
 	case PHY_INIT:
-		if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+		if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+		    (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
 			/* Save spirom version */
 			bnx2x_save_848xx_spirom_version(phy, bp, params->port);
 		}
@@ -9443,7 +9631,7 @@
 				       struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
-	u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
+	u16 autoneg_val, an_1000_val, an_10_100_val;
 
 	bnx2x_848xx_specific_func(phy, params, PHY_INIT);
 	bnx2x_cl45_write(bp, phy,
@@ -9542,11 +9730,12 @@
 	if (phy->req_duplex == DUPLEX_FULL)
 		autoneg_val |= (1<<8);
 
-	/* Always write this if this is not 84833.
-	 * For 84833, write it only when it's a forced speed.
+	/* Always write this if this is not 84833/4.
+	 * For 84833/4, write it only when it's a forced speed.
 	 */
-	if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-		((autoneg_val & (1<<12)) == 0))
+	if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+	     (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) ||
+	    ((autoneg_val & (1<<12)) == 0))
 		bnx2x_cl45_write(bp, phy,
 			 MDIO_AN_DEVAD,
 			 MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
@@ -9558,14 +9747,11 @@
 			DP(NETIF_MSG_LINK, "Advertising 10G\n");
 			/* Restart autoneg for 10G*/
 
-			bnx2x_cl45_read(bp, phy,
-					MDIO_AN_DEVAD,
-					MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
-					&an_10g_val);
-			bnx2x_cl45_write(bp, phy,
-					 MDIO_AN_DEVAD,
-					 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
-					 an_10g_val | 0x1000);
+			bnx2x_cl45_read_or_write(
+				bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+				0x1000);
 			bnx2x_cl45_write(bp, phy,
 					 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
 					 0x3200);
@@ -9598,9 +9784,8 @@
 #define PHY84833_CMDHDLR_WAIT 300
 #define PHY84833_CMDHDLR_MAX_ARGS 5
 static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
-				   struct link_params *params,
-		   u16 fw_cmd,
-		   u16 cmd_args[], int argc)
+				struct link_params *params, u16 fw_cmd,
+				u16 cmd_args[], int argc)
 {
 	int idx;
 	u16 val;
@@ -9614,7 +9799,7 @@
 				MDIO_84833_CMD_HDLR_STATUS, &val);
 		if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
 			break;
-		 usleep_range(1000, 2000);
+		usleep_range(1000, 2000);
 	}
 	if (idx >= PHY84833_CMDHDLR_WAIT) {
 		DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
@@ -9635,7 +9820,7 @@
 		if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
 			(val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
 			break;
-		 usleep_range(1000, 2000);
+		usleep_range(1000, 2000);
 	}
 	if ((idx >= PHY84833_CMDHDLR_WAIT) ||
 		(val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
@@ -9654,7 +9839,6 @@
 	return 0;
 }
 
-
 static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
 				   struct link_params *params,
 				   struct link_vars *vars)
@@ -9802,11 +9986,11 @@
 	struct bnx2x *bp = params->bp;
 	u8 port, initialize = 1;
 	u16 val;
-	u32 actual_phy_selection, cms_enable;
+	u32 actual_phy_selection;
 	u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
 	int rc = 0;
 
-	 usleep_range(1000, 2000);
+	usleep_range(1000, 2000);
 
 	if (!(CHIP_IS_E1x(bp)))
 		port = BP_PATH(bp);
@@ -9828,7 +10012,8 @@
 
 	/* Wait for GPHY to come out of reset */
 	msleep(50);
-	if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+	if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+	    (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
 		/* BCM84823 requires that XGXS links up first @ 10G for normal
 		 * behavior.
 		 */
@@ -9884,7 +10069,8 @@
 	DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
 		   params->multi_phy_config, val);
 
-	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+	    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
 		bnx2x_84833_pair_swap_cfg(phy, params, vars);
 
 		/* Keep AutogrEEEn disabled. */
@@ -9904,7 +10090,7 @@
 		bnx2x_save_848xx_spirom_version(phy, bp, params->port);
 	/* 84833 PHY has a better feature and doesn't need to support this. */
 	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
-		cms_enable = REG_RD(bp, params->shmem_base +
+		u32 cms_enable = REG_RD(bp, params->shmem_base +
 			offsetof(struct shmem_region,
 			dev_info.port_hw_config[params->port].default_cfg)) &
 			PORT_HW_CFG_ENABLE_CMS_MASK;
@@ -9933,7 +10119,7 @@
 			return rc;
 		}
 
-		if ((params->req_duplex[actual_phy_selection] == DUPLEX_FULL) &&
+		if ((phy->req_duplex == DUPLEX_FULL) &&
 		    (params->eee_mode & EEE_MODE_ADV_LPI) &&
 		    (bnx2x_eee_calc_timer(params) ||
 		     !(params->eee_mode & EEE_MODE_ENABLE_LPI)))
@@ -9948,15 +10134,13 @@
 		vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
 	}
 
-	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+	    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
 		/* Bring PHY out of super isolate mode as the final step. */
-		bnx2x_cl45_read(bp, phy,
-				MDIO_CTL_DEVAD,
-				MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
-		val &= ~MDIO_84833_SUPER_ISOLATE;
-		bnx2x_cl45_write(bp, phy,
-				MDIO_CTL_DEVAD,
-				MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+		bnx2x_cl45_read_and_write(bp, phy,
+					  MDIO_CTL_DEVAD,
+					  MDIO_84833_TOP_CFG_XGPHY_STRAP1,
+					  (u16)~MDIO_84833_SUPER_ISOLATE);
 	}
 	return rc;
 }
@@ -10090,7 +10274,6 @@
 	return link_up;
 }
 
-
 static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
 {
 	int status = 0;
@@ -10962,7 +11145,7 @@
 /*			STATIC PHY DECLARATION			  */
 /******************************************************************/
 
-static struct bnx2x_phy phy_null = {
+static const struct bnx2x_phy phy_null = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
 	.addr		= 0,
 	.def_md_devad	= 0,
@@ -10988,7 +11171,7 @@
 	.phy_specific_func = (phy_specific_func_t)NULL
 };
 
-static struct bnx2x_phy phy_serdes = {
+static const struct bnx2x_phy phy_serdes = {
 	.type		= PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
@@ -11023,7 +11206,7 @@
 	.phy_specific_func = (phy_specific_func_t)NULL
 };
 
-static struct bnx2x_phy phy_xgxs = {
+static const struct bnx2x_phy phy_xgxs = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
@@ -11058,12 +11241,11 @@
 	.set_link_led	= (set_link_led_t)NULL,
 	.phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
 };
-static struct bnx2x_phy phy_warpcore = {
+static const struct bnx2x_phy phy_warpcore = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
-	.flags		= (FLAGS_HW_LOCK_REQUIRED |
-			   FLAGS_TX_ERROR_CHECK),
+	.flags		= FLAGS_TX_ERROR_CHECK,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -11097,7 +11279,7 @@
 };
 
 
-static struct bnx2x_phy phy_7101 = {
+static const struct bnx2x_phy phy_7101 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
@@ -11126,11 +11308,11 @@
 	.set_link_led	= (set_link_led_t)bnx2x_7101_set_link_led,
 	.phy_specific_func = (phy_specific_func_t)NULL
 };
-static struct bnx2x_phy phy_8073 = {
+static const struct bnx2x_phy phy_8073 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
-	.flags		= FLAGS_HW_LOCK_REQUIRED,
+	.flags		= 0,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -11157,7 +11339,7 @@
 	.set_link_led	= (set_link_led_t)NULL,
 	.phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
 };
-static struct bnx2x_phy phy_8705 = {
+static const struct bnx2x_phy phy_8705 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
@@ -11185,7 +11367,7 @@
 	.set_link_led	= (set_link_led_t)NULL,
 	.phy_specific_func = (phy_specific_func_t)NULL
 };
-static struct bnx2x_phy phy_8706 = {
+static const struct bnx2x_phy phy_8706 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
@@ -11215,12 +11397,11 @@
 	.phy_specific_func = (phy_specific_func_t)NULL
 };
 
-static struct bnx2x_phy phy_8726 = {
+static const struct bnx2x_phy phy_8726 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
-	.flags		= (FLAGS_HW_LOCK_REQUIRED |
-			   FLAGS_INIT_XGXS_FIRST |
+	.flags		= (FLAGS_INIT_XGXS_FIRST |
 			   FLAGS_TX_ERROR_CHECK),
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
@@ -11248,7 +11429,7 @@
 	.phy_specific_func = (phy_specific_func_t)NULL
 };
 
-static struct bnx2x_phy phy_8727 = {
+static const struct bnx2x_phy phy_8727 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
@@ -11278,7 +11459,7 @@
 	.set_link_led	= (set_link_led_t)bnx2x_8727_set_link_led,
 	.phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
 };
-static struct bnx2x_phy phy_8481 = {
+static const struct bnx2x_phy phy_8481 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
@@ -11314,7 +11495,7 @@
 	.phy_specific_func = (phy_specific_func_t)NULL
 };
 
-static struct bnx2x_phy phy_84823 = {
+static const struct bnx2x_phy phy_84823 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
@@ -11351,7 +11532,7 @@
 	.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
 };
 
-static struct bnx2x_phy phy_84833 = {
+static const struct bnx2x_phy phy_84833 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
@@ -11386,7 +11567,41 @@
 	.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
 };
 
-static struct bnx2x_phy phy_54618se = {
+static const struct bnx2x_phy phy_84834 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
+			    FLAGS_REARM_LATCH_SIGNAL,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_848x3_config_init,
+	.read_status	= (read_status_t)bnx2x_848xx_read_status,
+	.link_reset	= (link_reset_t)bnx2x_848x3_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_848xx_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_84833_hw_reset_phy,
+	.set_link_led	= (set_link_led_t)bnx2x_848xx_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
+static const struct bnx2x_phy phy_54618se = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
@@ -11564,9 +11779,11 @@
 			phy->media_type = ETH_PHY_KR;
 			phy->flags |= FLAGS_WC_DUAL_MODE;
 			phy->supported &= (SUPPORTED_20000baseKR2_Full |
+					   SUPPORTED_Autoneg |
 					   SUPPORTED_FIBRE |
 					   SUPPORTED_Pause |
 					   SUPPORTED_Asym_Pause);
+			phy->flags &= ~FLAGS_TX_ERROR_CHECK;
 			break;
 		default:
 			DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
@@ -11665,6 +11882,9 @@
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
 		*phy = phy_84833;
 		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+		*phy = phy_84834;
+		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
 		*phy = phy_54618se;
@@ -11721,9 +11941,10 @@
 	}
 	phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
 
-	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+	if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+	     (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) &&
 	    (phy->ver_addr)) {
-		/* Remove 100Mb link supported for BCM84833 when phy fw
+		/* Remove 100Mb link supported for BCM84833/4 when phy fw
 		 * version lower than or equal to 1.39
 		 */
 		u32 raw_ver = REG_RD(bp, phy->ver_addr);
@@ -11733,12 +11954,6 @@
 					    SUPPORTED_100baseT_Full);
 	}
 
-	/* In case mdc/mdio_access of the external phy is different than the
-	 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
-	 * to prevent one port interfere with another port's CL45 operations.
-	 */
-	if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
-		phy->flags |= FLAGS_HW_LOCK_REQUIRED;
 	DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
 		   phy_type, port, phy_index);
 	DP(NETIF_MSG_LINK, "             addr=0x%x, mdio_ctl=0x%x\n",
@@ -11863,7 +12078,6 @@
 	return return_cfg;
 }
 
-
 int bnx2x_phy_probe(struct link_params *params)
 {
 	u8 phy_index, actual_phy_idx;
@@ -11907,6 +12121,10 @@
 		    FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
 			phy->flags &= ~FLAGS_TX_ERROR_CHECK;
 
+		if (!(params->feature_config_flags &
+		      FEATURE_CONFIG_MT_SUPPORT))
+			phy->flags |= FLAGS_MDC_MDIO_WA_G;
+
 		sync_offset = params->shmem_base +
 			offsetof(struct shmem_region,
 			dev_info.port_hw_config[params->port].media_type);
@@ -11934,8 +12152,8 @@
 	return 0;
 }
 
-void bnx2x_init_bmac_loopback(struct link_params *params,
-			      struct link_vars *vars)
+static void bnx2x_init_bmac_loopback(struct link_params *params,
+				     struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 		vars->link_up = 1;
@@ -11954,8 +12172,8 @@
 		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 }
 
-void bnx2x_init_emac_loopback(struct link_params *params,
-			      struct link_vars *vars)
+static void bnx2x_init_emac_loopback(struct link_params *params,
+				     struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 		vars->link_up = 1;
@@ -11973,8 +12191,8 @@
 		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 }
 
-void bnx2x_init_xmac_loopback(struct link_params *params,
-			      struct link_vars *vars)
+static void bnx2x_init_xmac_loopback(struct link_params *params,
+				     struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	vars->link_up = 1;
@@ -11999,8 +12217,8 @@
 	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 }
 
-void bnx2x_init_umac_loopback(struct link_params *params,
-			      struct link_vars *vars)
+static void bnx2x_init_umac_loopback(struct link_params *params,
+				     struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	vars->link_up = 1;
@@ -12014,17 +12232,21 @@
 	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 }
 
-void bnx2x_init_xgxs_loopback(struct link_params *params,
-			      struct link_vars *vars)
+static void bnx2x_init_xgxs_loopback(struct link_params *params,
+				     struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
-		vars->link_up = 1;
-		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-		vars->duplex = DUPLEX_FULL;
+	struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
+	vars->link_up = 1;
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+	vars->duplex = DUPLEX_FULL;
 	if (params->req_line_speed[0] == SPEED_1000)
-			vars->line_speed = SPEED_1000;
+		vars->line_speed = SPEED_1000;
+	else if ((params->req_line_speed[0] == SPEED_20000) ||
+		 (int_phy->flags & FLAGS_WC_DUAL_MODE))
+		vars->line_speed = SPEED_20000;
 	else
-			vars->line_speed = SPEED_10000;
+		vars->line_speed = SPEED_10000;
 
 	if (!USES_WARPCORE(bp))
 		bnx2x_xgxs_deassert(params);
@@ -12044,34 +12266,30 @@
 			bnx2x_bmac_enable(params, vars, 0, 1);
 	}
 
-		if (params->loopback_mode == LOOPBACK_XGXS) {
-			/* set 10G XGXS loopback */
-			params->phy[INT_PHY].config_loopback(
-				&params->phy[INT_PHY],
-				params);
-
-		} else {
-			/* set external phy loopback */
-			u8 phy_index;
-			for (phy_index = EXT_PHY1;
-			      phy_index < params->num_phys; phy_index++) {
-				if (params->phy[phy_index].config_loopback)
-					params->phy[phy_index].config_loopback(
-						&params->phy[phy_index],
-						params);
-			}
-		}
-		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+	if (params->loopback_mode == LOOPBACK_XGXS) {
+		/* Set 10G XGXS loopback */
+		int_phy->config_loopback(int_phy, params);
+	} else {
+		/* Set external phy loopback */
+		u8 phy_index;
+		for (phy_index = EXT_PHY1;
+		      phy_index < params->num_phys; phy_index++)
+			if (params->phy[phy_index].config_loopback)
+				params->phy[phy_index].config_loopback(
+					&params->phy[phy_index],
+					params);
+	}
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 
 	bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
 }
 
-static void bnx2x_set_rx_filter(struct link_params *params, u8 en)
+void bnx2x_set_rx_filter(struct link_params *params, u8 en)
 {
 	struct bnx2x *bp = params->bp;
 	u8 val = en * 0x1F;
 
-	/* Open the gate between the NIG to the BRB */
+	/* Open / close the gate between the NIG and the BRB */
 	if (!CHIP_IS_E1x(bp))
 		val |= en * 0x20;
 	REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val);
@@ -12345,7 +12563,7 @@
 	 * Hold it as vars low
 	 */
 	 /* Clear link led */
-	bnx2x_set_mdio_clk(bp, params->chip_id, port);
+	bnx2x_set_mdio_emac_per_phy(bp, params);
 	bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
 
 	if (reset_ext_phy) {
@@ -12696,7 +12914,7 @@
 	/* Initiate PHY reset*/
 	bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
 		       port);
-	 usleep_range(1000, 2000);
+	usleep_range(1000, 2000);
 	bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
 		       port);
 
@@ -12784,7 +13002,8 @@
 }
 
 static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
-					       struct bnx2x_phy *phy)
+				    struct bnx2x_phy *phy,
+				    u8 port)
 {
 	u16 val, cnt;
 	/* Wait for FW completing its initialization. */
@@ -12794,7 +13013,7 @@
 				MDIO_PMA_REG_CTRL, &val);
 		if (!(val & (1<<15)))
 			break;
-		 usleep_range(1000, 2000);
+		usleep_range(1000, 2000);
 	}
 	if (cnt >= 1500) {
 		DP(NETIF_MSG_LINK, "84833 reset timeout\n");
@@ -12811,26 +13030,28 @@
 			 MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
 
 	/* Save spirom version */
-	bnx2x_save_848xx_spirom_version(phy, bp, PORT_0);
+	bnx2x_save_848xx_spirom_version(phy, bp, port);
 	return 0;
 }
 
 int bnx2x_pre_init_phy(struct bnx2x *bp,
 				  u32 shmem_base,
 				  u32 shmem2_base,
-				  u32 chip_id)
+				  u32 chip_id,
+				  u8 port)
 {
 	int rc = 0;
 	struct bnx2x_phy phy;
-	bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
 	if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
-			       PORT_0, &phy)) {
+			       port, &phy) != 0) {
 		DP(NETIF_MSG_LINK, "populate_phy failed\n");
 		return -EINVAL;
 	}
+	bnx2x_set_mdio_clk(bp, chip_id, phy.mdio_ctrl);
 	switch (phy.type) {
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
-		rc = bnx2x_84833_pre_init_phy(bp, &phy);
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+		rc = bnx2x_84833_pre_init_phy(bp, &phy, port);
 		break;
 	default:
 		break;
@@ -12867,6 +13088,7 @@
 						phy_index, chip_id);
 		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
 		/* GPIO3's are linked, and so both need to be toggled
 		 * to obtain required 2us pulse.
 		 */
@@ -12898,8 +13120,9 @@
 	u32 phy_ver, val;
 	u8 phy_index = 0;
 	u32 ext_phy_type, ext_phy_config;
-	bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
-	bnx2x_set_mdio_clk(bp, chip_id, PORT_1);
+
+	bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC0);
+	bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC1);
 	DP(NETIF_MSG_LINK, "Begin common phy init\n");
 	if (CHIP_IS_E3(bp)) {
 		/* Enable EPIO */
@@ -12960,6 +13183,7 @@
 					    " error.\n",
 			 params->port);
 			vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
+			bnx2x_warpcore_power_module(params, 0);
 		}
 	} else
 		vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
@@ -13139,6 +13363,108 @@
 		}
 	}
 }
+static void bnx2x_disable_kr2(struct link_params *params,
+			      struct link_vars *vars,
+			      struct bnx2x_phy *phy)
+{
+	struct bnx2x *bp = params->bp;
+	int i;
+	static struct bnx2x_reg_set reg_set[] = {
+		/* Step 1 - Program the TX/RX alignment markers */
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
+	};
+	DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
+
+	for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+		bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+				 reg_set[i].val);
+	vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+	bnx2x_update_link_attr(params, vars->link_attr_sync);
+
+	/* Restart AN on leading lane */
+	bnx2x_warpcore_restart_AN_KR(phy, params);
+}
+
+static void bnx2x_kr2_recovery(struct link_params *params,
+			       struct link_vars *vars,
+			       struct bnx2x_phy *phy)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "KR2 recovery\n");
+	bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+	bnx2x_warpcore_restart_AN_KR(phy, params);
+}
+
+static void bnx2x_check_kr2_wa(struct link_params *params,
+			       struct link_vars *vars,
+			       struct bnx2x_phy *phy)
+{
+	struct bnx2x *bp = params->bp;
+	u16 base_page, next_page, not_kr2_device, lane;
+	int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
+
+	if (!sigdet) {
+		if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+			bnx2x_kr2_recovery(params, vars, phy);
+		return;
+	}
+
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, lane);
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+			MDIO_AN_REG_LP_AUTO_NEG, &base_page);
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+			MDIO_AN_REG_LP_AUTO_NEG2, &next_page);
+	bnx2x_set_aer_mmd(params, phy);
+
+	/* CL73 has not begun yet */
+	if (base_page == 0) {
+		if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+			bnx2x_kr2_recovery(params, vars, phy);
+		return;
+	}
+
+	/* In case NP bit is not set in the BasePage, or it is set,
+	 * but only KX is advertised, declare this link partner as non-KR2
+	 * device.
+	 */
+	not_kr2_device = (((base_page & 0x8000) == 0) ||
+			  (((base_page & 0x8000) &&
+			    ((next_page & 0xe0) == 0x2))));
+
+	/* In case KR2 is already disabled, check if we need to re-enable it */
+	if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+		if (!not_kr2_device) {
+			DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
+				       next_page);
+			bnx2x_kr2_recovery(params, vars, phy);
+		}
+		return;
+	}
+	/* KR2 is enabled, but not KR2 device */
+	if (not_kr2_device) {
+		/* Disable KR2 on both lanes */
+		DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
+		bnx2x_disable_kr2(params, vars, phy);
+		return;
+	}
+}
+
 void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
 {
 	u16 phy_idx;
@@ -13156,6 +13482,9 @@
 	if (CHIP_IS_E3(bp)) {
 		struct bnx2x_phy *phy = &params->phy[INT_PHY];
 		bnx2x_set_aer_mmd(params, phy);
+		if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
+		    (phy->speed_cap_mask & SPEED_20000))
+			bnx2x_check_kr2_wa(params, vars, phy);
 		bnx2x_check_over_curr(params, vars);
 		if (vars->rx_tx_asic_rst)
 			bnx2x_warpcore_config_runtime(phy, params, vars);
@@ -13176,27 +13505,7 @@
 				bnx2x_update_mng(params, vars->link_status);
 			}
 		}
-
 	}
-
-}
-
-u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
-{
-	u8 phy_index;
-	struct bnx2x_phy phy;
-	for (phy_index = INT_PHY; phy_index < MAX_PHYS;
-	      phy_index++) {
-		if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
-				       0, &phy) != 0) {
-			DP(NETIF_MSG_LINK, "populate phy failed\n");
-			return 0;
-		}
-
-		if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
-			return 1;
-	}
-	return 0;
 }
 
 u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 9165b89..181c5ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -139,8 +139,6 @@
 	u8 addr;
 	u8 def_md_devad;
 	u16 flags;
-	/* Require HW lock */
-#define FLAGS_HW_LOCK_REQUIRED		(1<<0)
 	/* No Over-Current detection */
 #define FLAGS_NOC			(1<<1)
 	/* Fan failure detection required */
@@ -156,6 +154,7 @@
 #define FLAGS_MDC_MDIO_WA_B0		(1<<10)
 #define FLAGS_TX_ERROR_CHECK		(1<<12)
 #define FLAGS_EEE			(1<<13)
+#define FLAGS_MDC_MDIO_WA_G		(1<<15)
 
 	/* preemphasis values for the rx side */
 	u16 rx_preemphasis[4];
@@ -267,6 +266,8 @@
 #define FEATURE_CONFIG_AUTOGREEEN_ENABLED			(1<<9)
 #define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED		(1<<10)
 #define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET		(1<<11)
+#define FEATURE_CONFIG_MT_SUPPORT			(1<<13)
+
 	/* Will be populated during common init */
 	struct bnx2x_phy phy[MAX_PHYS];
 
@@ -347,6 +348,8 @@
 	u8 rx_tx_asic_rst;
 	u8 turn_to_run_wc_rt;
 	u16 rsrv2;
+	/* The same definitions as the shmem2 parameter */
+	u32 link_attr_sync;
 };
 
 /***********************************************************/
@@ -418,10 +421,6 @@
 
 void bnx2x_hw_reset_phy(struct link_params *params);
 
-/* Checks if HW lock is required for this phy/board type */
-u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
-			  u32 shmem2_base);
-
 /* Check swap bit and adjust PHY order */
 u32 bnx2x_phy_selection(struct link_params *params);
 
@@ -432,7 +431,8 @@
 u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
 			     u32 shmem2_base, u8 port);
 
-
+/* Open / close the gate between the NIG and the BRB */
+void bnx2x_set_rx_filter(struct link_params *params, u8 en);
 
 /* DCBX structs */
 
@@ -459,9 +459,6 @@
 	u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
 	u32 llfc_high_priority_classes;
 	u32 llfc_low_priority_classes;
-	/* BRB */
-	u32 cos0_pauseable;
-	u32 cos1_pauseable;
 };
 
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 01611b3..b4659c4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -791,10 +791,9 @@
 
 		/* host sb data */
 
-#ifdef BCM_CNIC
 		if (IS_FCOE_FP(fp))
 			continue;
-#endif
+
 		BNX2X_ERR("     run indexes (");
 		for (j = 0; j < HC_SB_MAX_SM; j++)
 			pr_cont("0x%x%s",
@@ -859,7 +858,7 @@
 #ifdef BNX2X_STOP_ON_ERROR
 	/* Rings */
 	/* Rx */
-	for_each_rx_queue(bp, i) {
+	for_each_valid_rx_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
 		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -893,7 +892,7 @@
 	}
 
 	/* Tx */
-	for_each_tx_queue(bp, i) {
+	for_each_valid_tx_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 		for_each_cos_in_tx_queue(fp, cos) {
 			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
@@ -1483,7 +1482,7 @@
 		BNX2X_ERR("BUG! proper val not read from IGU!\n");
 }
 
-void bnx2x_int_disable(struct bnx2x *bp)
+static void bnx2x_int_disable(struct bnx2x *bp)
 {
 	if (bp->common.int_block == INT_BLOCK_HC)
 		bnx2x_hc_int_disable(bp);
@@ -1504,9 +1503,8 @@
 	if (msix) {
 		synchronize_irq(bp->msix_table[0].vector);
 		offset = 1;
-#ifdef BCM_CNIC
-		offset++;
-#endif
+		if (CNIC_SUPPORT(bp))
+			offset++;
 		for_each_eth_queue(bp, i)
 			synchronize_irq(bp->msix_table[offset++].vector);
 	} else
@@ -1588,9 +1586,8 @@
 	return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
 }
 
-#ifdef BCM_CNIC
 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
-#endif
+
 
 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
 {
@@ -1720,7 +1717,7 @@
 	for_each_eth_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
-		mask = 0x2 << (fp->index + CNIC_PRESENT);
+		mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
 		if (status & mask) {
 			/* Handle Rx or Tx according to SB id */
 			prefetch(fp->rx_cons_sb);
@@ -1732,22 +1729,23 @@
 		}
 	}
 
-#ifdef BCM_CNIC
-	mask = 0x2;
-	if (status & (mask | 0x1)) {
-		struct cnic_ops *c_ops = NULL;
+	if (CNIC_SUPPORT(bp)) {
+		mask = 0x2;
+		if (status & (mask | 0x1)) {
+			struct cnic_ops *c_ops = NULL;
 
-		if (likely(bp->state == BNX2X_STATE_OPEN)) {
-			rcu_read_lock();
-			c_ops = rcu_dereference(bp->cnic_ops);
-			if (c_ops)
-				c_ops->cnic_handler(bp->cnic_data, NULL);
-			rcu_read_unlock();
+			if (likely(bp->state == BNX2X_STATE_OPEN)) {
+				rcu_read_lock();
+				c_ops = rcu_dereference(bp->cnic_ops);
+				if (c_ops)
+					c_ops->cnic_handler(bp->cnic_data,
+							    NULL);
+				rcu_read_unlock();
+			}
+
+			status &= ~mask;
 		}
-
-		status &= ~mask;
 	}
-#endif
 
 	if (unlikely(status & 0x1)) {
 		queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
@@ -3075,11 +3073,13 @@
 
 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
 {
-#ifdef BCM_CNIC
 	struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
 	struct fcoe_stats_info *fcoe_stat =
 		&bp->slowpath->drv_info_to_mcp.fcoe_stat;
 
+	if (!CNIC_LOADED(bp))
+		return;
+
 	memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
 	       bp->fip_mac, ETH_ALEN);
 
@@ -3162,16 +3162,17 @@
 
 	/* ask L5 driver to add data to the struct */
 	bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
-#endif
 }
 
 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
 {
-#ifdef BCM_CNIC
 	struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
 	struct iscsi_stats_info *iscsi_stat =
 		&bp->slowpath->drv_info_to_mcp.iscsi_stat;
 
+	if (!CNIC_LOADED(bp))
+		return;
+
 	memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
 	       bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
 
@@ -3180,7 +3181,6 @@
 
 	/* ask L5 driver to add data to the struct */
 	bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
-#endif
 }
 
 /* called due to MCP event (on pmf):
@@ -4572,7 +4572,6 @@
 	mmiowb(); /* keep prod updates ordered */
 }
 
-#ifdef BCM_CNIC
 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
 				      union event_ring_elem *elem)
 {
@@ -4594,7 +4593,6 @@
 	bnx2x_cnic_cfc_comp(bp, cid, err);
 	return 0;
 }
-#endif
 
 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
 {
@@ -4635,11 +4633,9 @@
 	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
 	case BNX2X_FILTER_MAC_PENDING:
 		DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
-#ifdef BCM_CNIC
-		if (cid == BNX2X_ISCSI_ETH_CID(bp))
+		if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
 			vlan_mac_obj = &bp->iscsi_l2_mac_obj;
 		else
-#endif
 			vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
 
 		break;
@@ -4665,9 +4661,7 @@
 
 }
 
-#ifdef BCM_CNIC
 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
-#endif
 
 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
 {
@@ -4678,14 +4672,12 @@
 	/* Send rx_mode command again if was requested */
 	if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
 		bnx2x_set_storm_rx_mode(bp);
-#ifdef BCM_CNIC
 	else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
 				    &bp->sp_state))
 		bnx2x_set_iscsi_eth_rx_mode(bp, true);
 	else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
 				    &bp->sp_state))
 		bnx2x_set_iscsi_eth_rx_mode(bp, false);
-#endif
 
 	netif_addr_unlock_bh(bp->dev);
 }
@@ -4747,7 +4739,6 @@
 				  q);
 	}
 
-#ifdef BCM_CNIC
 	if (!NO_FCOE(bp)) {
 		fp = &bp->fp[FCOE_IDX(bp)];
 		queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@@ -4770,22 +4761,16 @@
 		bnx2x_link_report(bp);
 		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
 	}
-#else
-	/* If no FCoE ring - ACK MCP now */
-	bnx2x_link_report(bp);
-	bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
-#endif /* BCM_CNIC */
 }
 
 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
 	struct bnx2x *bp, u32 cid)
 {
 	DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
-#ifdef BCM_CNIC
-	if (cid == BNX2X_FCOE_ETH_CID(bp))
+
+	if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
 		return &bnx2x_fcoe_sp_obj(bp, q_obj);
 	else
-#endif
 		return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
 }
 
@@ -4793,6 +4778,7 @@
 {
 	u16 hw_cons, sw_cons, sw_prod;
 	union event_ring_elem *elem;
+	u8 echo;
 	u32 cid;
 	u8 opcode;
 	int spqe_cnt = 0;
@@ -4847,10 +4833,11 @@
 			 */
 			DP(BNX2X_MSG_SP,
 			   "got delete ramrod for MULTI[%d]\n", cid);
-#ifdef BCM_CNIC
-			if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
+
+			if (CNIC_LOADED(bp) &&
+			    !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
 				goto next_spqe;
-#endif
+
 			q_obj = bnx2x_cid_to_q_obj(bp, cid);
 
 			if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
@@ -4875,21 +4862,34 @@
 				break;
 			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
 			goto next_spqe;
+
 		case EVENT_RING_OPCODE_FUNCTION_UPDATE:
-			DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
-			   "AFEX: ramrod completed FUNCTION_UPDATE\n");
-			f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE);
+			echo = elem->message.data.function_update_event.echo;
+			if (echo == SWITCH_UPDATE) {
+				DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+				   "got FUNC_SWITCH_UPDATE ramrod\n");
+				if (f_obj->complete_cmd(
+					bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
+					break;
 
-			/* We will perform the Queues update from sp_rtnl task
-			 * as all Queue SP operations should run under
-			 * rtnl_lock.
-			 */
-			smp_mb__before_clear_bit();
-			set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
-				&bp->sp_rtnl_state);
-			smp_mb__after_clear_bit();
+			} else {
+				DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
+				   "AFEX: ramrod completed FUNCTION_UPDATE\n");
+				f_obj->complete_cmd(bp, f_obj,
+						    BNX2X_F_CMD_AFEX_UPDATE);
 
-			schedule_delayed_work(&bp->sp_rtnl_task, 0);
+				/* We will perform the Queues update from
+				 * sp_rtnl task as all Queue SP operations
+				 * should run under rtnl_lock.
+				 */
+				smp_mb__before_clear_bit();
+				set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
+					&bp->sp_rtnl_state);
+				smp_mb__after_clear_bit();
+
+				schedule_delayed_work(&bp->sp_rtnl_task, 0);
+			}
+
 			goto next_spqe;
 
 		case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
@@ -4999,11 +4999,10 @@
 
 	/* SP events: STAT_QUERY and others */
 	if (status & BNX2X_DEF_SB_IDX) {
-#ifdef BCM_CNIC
 		struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
 
-		if ((!NO_FCOE(bp)) &&
-			(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+		if (FCOE_INIT(bp) &&
+		    (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
 			/*
 			 * Prevent local bottom-halves from running as
 			 * we are going to change the local NAPI list.
@@ -5012,7 +5011,7 @@
 			napi_schedule(&bnx2x_fcoe(bp, napi));
 			local_bh_enable();
 		}
-#endif
+
 		/* Handle EQ completions */
 		bnx2x_eq_int(bp);
 
@@ -5050,8 +5049,7 @@
 		return IRQ_HANDLED;
 #endif
 
-#ifdef BCM_CNIC
-	{
+	if (CNIC_LOADED(bp)) {
 		struct cnic_ops *c_ops;
 
 		rcu_read_lock();
@@ -5060,7 +5058,7 @@
 			c_ops->cnic_handler(bp->cnic_data, NULL);
 		rcu_read_unlock();
 	}
-#endif
+
 	queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 
 	return IRQ_HANDLED;
@@ -5498,12 +5496,10 @@
 	unsigned long rx_mode_flags = 0, ramrod_flags = 0;
 	unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
 
-#ifdef BCM_CNIC
 	if (!NO_FCOE(bp))
 
 		/* Configure rx_mode of FCoE Queue */
 		__set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
-#endif
 
 	switch (bp->rx_mode) {
 	case BNX2X_RX_MODE_NONE:
@@ -5624,12 +5620,12 @@
 
 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
 {
-	return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
+	return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
 }
 
 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
 {
-	return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
+	return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
 }
 
 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
@@ -5720,32 +5716,48 @@
 	txdata->tx_pkt = 0;
 }
 
+static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_tx_queue_cnic(bp, i)
+		bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
+}
 static void bnx2x_init_tx_rings(struct bnx2x *bp)
 {
 	int i;
 	u8 cos;
 
-	for_each_tx_queue(bp, i)
+	for_each_eth_queue(bp, i)
 		for_each_cos_in_tx_queue(&bp->fp[i], cos)
 			bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
 }
 
+void bnx2x_nic_init_cnic(struct bnx2x *bp)
+{
+	if (!NO_FCOE(bp))
+		bnx2x_init_fcoe_fp(bp);
+
+	bnx2x_init_sb(bp, bp->cnic_sb_mapping,
+		      BNX2X_VF_ID_INVALID, false,
+		      bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
+
+	/* ensure status block indices were read */
+	rmb();
+	bnx2x_init_rx_rings_cnic(bp);
+	bnx2x_init_tx_rings_cnic(bp);
+
+	/* flush all */
+	mb();
+	mmiowb();
+}
+
 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
 {
 	int i;
 
 	for_each_eth_queue(bp, i)
 		bnx2x_init_eth_fp(bp, i);
-#ifdef BCM_CNIC
-	if (!NO_FCOE(bp))
-		bnx2x_init_fcoe_fp(bp);
-
-	bnx2x_init_sb(bp, bp->cnic_sb_mapping,
-		      BNX2X_VF_ID_INVALID, false,
-		      bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
-
-#endif
-
 	/* Initialize MOD_ABS interrupts */
 	bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
 			       bp->common.shmem_base, bp->common.shmem2_base,
@@ -6031,10 +6043,9 @@
 	msleep(50);
 	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
 	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
-#ifndef BCM_CNIC
-	/* set NIC mode */
-	REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif
+	if (!CNIC_SUPPORT(bp))
+		/* set NIC mode */
+		REG_WR(bp, PRS_REG_NIC_MODE, 1);
 
 	/* Enable inputs of parser neighbor blocks */
 	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
@@ -6256,6 +6267,10 @@
 static void bnx2x__common_init_phy(struct bnx2x *bp)
 {
 	u32 shmem_base[2], shmem2_base[2];
+	/* Avoid common init in case MFW supports LFA */
+	if (SHMEM2_RD(bp, size) >
+	    (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
+		return;
 	shmem_base[0] =  bp->common.shmem_base;
 	shmem2_base[0] = bp->common.shmem2_base;
 	if (!CHIP_IS_E1x(bp)) {
@@ -6522,9 +6537,8 @@
 	REG_WR(bp, QM_REG_SOFT_RESET, 1);
 	REG_WR(bp, QM_REG_SOFT_RESET, 0);
 
-#ifdef BCM_CNIC
-	bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
-#endif
+	if (CNIC_SUPPORT(bp))
+		bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
 
 	bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
 	REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
@@ -6611,18 +6625,18 @@
 
 	bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
 
-#ifdef BCM_CNIC
-	REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
-	REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
-	REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
-	REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
-	REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
-	REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
-	REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
-	REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
-	REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
-	REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
-#endif
+	if (CNIC_SUPPORT(bp)) {
+		REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+		REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+		REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+		REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+		REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+		REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+		REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+		REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+		REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+		REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+	}
 	REG_WR(bp, SRC_REG_SOFT_RST, 0);
 
 	if (sizeof(union cdu_context) != 1024)
@@ -6786,11 +6800,11 @@
 	/* QM cid (connection) count */
 	bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
 
-#ifdef BCM_CNIC
-	bnx2x_init_block(bp, BLOCK_TM, init_phase);
-	REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
-	REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
-#endif
+	if (CNIC_SUPPORT(bp)) {
+		bnx2x_init_block(bp, BLOCK_TM, init_phase);
+		REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+		REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
+	}
 
 	bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
 
@@ -6877,9 +6891,9 @@
 		REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
 	}
 
-#ifdef BCM_CNIC
-	bnx2x_init_block(bp, BLOCK_SRC, init_phase);
-#endif
+	if (CNIC_SUPPORT(bp))
+		bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+
 	bnx2x_init_block(bp, BLOCK_CDU, init_phase);
 	bnx2x_init_block(bp, BLOCK_CFC, init_phase);
 
@@ -7040,6 +7054,130 @@
 		bnx2x_ilt_wr(bp, i, 0);
 }
 
+
+static void bnx2x_init_searcher(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
+	/* T1 hash bits value determines the T1 number of entries */
+	REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
+}
+
+static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
+{
+	int rc;
+	struct bnx2x_func_state_params func_params = {NULL};
+	struct bnx2x_func_switch_update_params *switch_update_params =
+		&func_params.params.switch_update;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+	/* Function parameters */
+	switch_update_params->suspend = suspend;
+
+	rc = bnx2x_func_state_change(bp, &func_params);
+
+	return rc;
+}
+
+static int bnx2x_reset_nic_mode(struct bnx2x *bp)
+{
+	int rc, i, port = BP_PORT(bp);
+	int vlan_en = 0, mac_en[NUM_MACS];
+
+
+	/* Close input from network */
+	if (bp->mf_mode == SINGLE_FUNCTION) {
+		bnx2x_set_rx_filter(&bp->link_params, 0);
+	} else {
+		vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
+				   NIG_REG_LLH0_FUNC_EN);
+		REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
+			  NIG_REG_LLH0_FUNC_EN, 0);
+		for (i = 0; i < NUM_MACS; i++) {
+			mac_en[i] = REG_RD(bp, port ?
+					     (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+					      4 * i) :
+					     (NIG_REG_LLH0_FUNC_MEM_ENABLE +
+					      4 * i));
+			REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+					      4 * i) :
+				  (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
+		}
+	}
+
+	/* Close BMC to host */
+	REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
+	       NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
+
+	/* Suspend Tx switching to the PF. Completion of this ramrod
+	 * further guarantees that all the packets of that PF / child
+	 * VFs in BRB were processed by the Parser, so it is safe to
+	 * change the NIC_MODE register.
+	 */
+	rc = bnx2x_func_switch_update(bp, 1);
+	if (rc) {
+		BNX2X_ERR("Can't suspend tx-switching!\n");
+		return rc;
+	}
+
+	/* Change NIC_MODE register */
+	REG_WR(bp, PRS_REG_NIC_MODE, 0);
+
+	/* Open input from network */
+	if (bp->mf_mode == SINGLE_FUNCTION) {
+		bnx2x_set_rx_filter(&bp->link_params, 1);
+	} else {
+		REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
+			  NIG_REG_LLH0_FUNC_EN, vlan_en);
+		for (i = 0; i < NUM_MACS; i++) {
+			REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+					      4 * i) :
+				  (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
+				  mac_en[i]);
+		}
+	}
+
+	/* Enable BMC to host */
+	REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
+	       NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
+
+	/* Resume Tx switching to the PF */
+	rc = bnx2x_func_switch_update(bp, 0);
+	if (rc) {
+		BNX2X_ERR("Can't resume tx-switching!\n");
+		return rc;
+	}
+
+	DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
+	return 0;
+}
+
+int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
+{
+	int rc;
+
+	bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
+
+	if (CONFIGURE_NIC_MODE(bp)) {
+		/* Configrue searcher as part of function hw init */
+		bnx2x_init_searcher(bp);
+
+		/* Reset NIC mode */
+		rc = bnx2x_reset_nic_mode(bp);
+		if (rc)
+			BNX2X_ERR("Can't change NIC mode!\n");
+		return rc;
+	}
+
+	return 0;
+}
+
 static int bnx2x_init_hw_func(struct bnx2x *bp)
 {
 	int port = BP_PORT(bp);
@@ -7082,17 +7220,16 @@
 	}
 	bnx2x_ilt_init_op(bp, INITOP_SET);
 
-#ifdef BCM_CNIC
-	bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
+	if (!CONFIGURE_NIC_MODE(bp)) {
+		bnx2x_init_searcher(bp);
+		REG_WR(bp, PRS_REG_NIC_MODE, 0);
+		DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
+	} else {
+		/* Set NIC mode */
+		REG_WR(bp, PRS_REG_NIC_MODE, 1);
+		DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
 
-	/* T1 hash bits value determines the T1 number of entries */
-	REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
-#endif
-
-#ifndef BCM_CNIC
-	/* set NIC mode */
-	REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif  /* BCM_CNIC */
+	}
 
 	if (!CHIP_IS_E1x(bp)) {
 		u32 pf_conf = IGU_PF_CONF_FUNC_EN;
@@ -7343,6 +7480,20 @@
 }
 
 
+void bnx2x_free_mem_cnic(struct bnx2x *bp)
+{
+	bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
+
+	if (!CHIP_IS_E1x(bp))
+		BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
+			       sizeof(struct host_hc_status_block_e2));
+	else
+		BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
+			       sizeof(struct host_hc_status_block_e1x));
+
+	BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+}
+
 void bnx2x_free_mem(struct bnx2x *bp)
 {
 	int i;
@@ -7367,17 +7518,6 @@
 
 	BNX2X_FREE(bp->ilt->lines);
 
-#ifdef BCM_CNIC
-	if (!CHIP_IS_E1x(bp))
-		BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
-			       sizeof(struct host_hc_status_block_e2));
-	else
-		BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
-			       sizeof(struct host_hc_status_block_e1x));
-
-	BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
-#endif
-
 	BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
 
 	BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
@@ -7445,24 +7585,44 @@
 	return -ENOMEM;
 }
 
-
-int bnx2x_alloc_mem(struct bnx2x *bp)
+int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
 {
-	int i, allocated, context_size;
-
-#ifdef BCM_CNIC
 	if (!CHIP_IS_E1x(bp))
 		/* size = the status block + ramrod buffers */
 		BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
 				sizeof(struct host_hc_status_block_e2));
 	else
-		BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
-				sizeof(struct host_hc_status_block_e1x));
+		BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
+				&bp->cnic_sb_mapping,
+				sizeof(struct
+				       host_hc_status_block_e1x));
 
-	/* allocate searcher T2 table */
-	BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
-#endif
+	if (CONFIGURE_NIC_MODE(bp))
+		/* allocate searcher T2 table, as it wan't allocated before */
+		BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
 
+	/* write address to which L5 should insert its values */
+	bp->cnic_eth_dev.addr_drv_info_to_mcp =
+		&bp->slowpath->drv_info_to_mcp;
+
+	if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
+		goto alloc_mem_err;
+
+	return 0;
+
+alloc_mem_err:
+	bnx2x_free_mem_cnic(bp);
+	BNX2X_ERR("Can't allocate memory\n");
+	return -ENOMEM;
+}
+
+int bnx2x_alloc_mem(struct bnx2x *bp)
+{
+	int i, allocated, context_size;
+
+	if (!CONFIGURE_NIC_MODE(bp))
+		/* allocate searcher T2 table */
+		BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
 
 	BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
 			sizeof(struct host_sp_status_block));
@@ -7470,11 +7630,6 @@
 	BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
 			sizeof(struct bnx2x_slowpath));
 
-#ifdef BCM_CNIC
-	/* write address to which L5 should insert its values */
-	bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp;
-#endif
-
 	/* Allocated memory for FW statistics  */
 	if (bnx2x_alloc_fw_stats_mem(bp))
 		goto alloc_mem_err;
@@ -7596,14 +7751,12 @@
 {
 	unsigned long ramrod_flags = 0;
 
-#ifdef BCM_CNIC
 	if (is_zero_ether_addr(bp->dev->dev_addr) &&
 	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
 		DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
 		   "Ignoring Zero MAC for STORAGE SD mode\n");
 		return 0;
 	}
-#endif
 
 	DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
 
@@ -7632,7 +7785,8 @@
 		bnx2x_enable_msi(bp);
 		/* falling through... */
 	case INT_MODE_INTx:
-		bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+		bp->num_ethernet_queues = 1;
+		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
 		BNX2X_DEV_INFO("set number of queues to 1\n");
 		break;
 	default:
@@ -7644,9 +7798,10 @@
 		    bp->flags & USING_SINGLE_MSIX_FLAG) {
 			/* failed to enable multiple MSI-X */
 			BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
-				       bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
+				       bp->num_queues,
+				       1 + bp->num_cnic_queues);
 
-			bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+			bp->num_queues = 1 + bp->num_cnic_queues;
 
 			/* Try to enable MSI */
 			if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
@@ -7679,9 +7834,9 @@
 	ilt_client->flags = ILT_CLIENT_SKIP_MEM;
 	ilt_client->start = line;
 	line += bnx2x_cid_ilt_lines(bp);
-#ifdef BCM_CNIC
-	line += CNIC_ILT_LINES;
-#endif
+
+	if (CNIC_SUPPORT(bp))
+		line += CNIC_ILT_LINES;
 	ilt_client->end = line - 1;
 
 	DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
@@ -7714,49 +7869,43 @@
 		   ilog2(ilt_client->page_size >> 12));
 
 	}
-	/* SRC */
-	ilt_client = &ilt->clients[ILT_CLIENT_SRC];
-#ifdef BCM_CNIC
-	ilt_client->client_num = ILT_CLIENT_SRC;
-	ilt_client->page_size = SRC_ILT_PAGE_SZ;
-	ilt_client->flags = 0;
-	ilt_client->start = line;
-	line += SRC_ILT_LINES;
-	ilt_client->end = line - 1;
 
-	DP(NETIF_MSG_IFUP,
-	   "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
-	   ilt_client->start,
-	   ilt_client->end,
-	   ilt_client->page_size,
-	   ilt_client->flags,
-	   ilog2(ilt_client->page_size >> 12));
+	if (CNIC_SUPPORT(bp)) {
+		/* SRC */
+		ilt_client = &ilt->clients[ILT_CLIENT_SRC];
+		ilt_client->client_num = ILT_CLIENT_SRC;
+		ilt_client->page_size = SRC_ILT_PAGE_SZ;
+		ilt_client->flags = 0;
+		ilt_client->start = line;
+		line += SRC_ILT_LINES;
+		ilt_client->end = line - 1;
 
-#else
-	ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
-#endif
+		DP(NETIF_MSG_IFUP,
+		   "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+		   ilt_client->start,
+		   ilt_client->end,
+		   ilt_client->page_size,
+		   ilt_client->flags,
+		   ilog2(ilt_client->page_size >> 12));
 
-	/* TM */
-	ilt_client = &ilt->clients[ILT_CLIENT_TM];
-#ifdef BCM_CNIC
-	ilt_client->client_num = ILT_CLIENT_TM;
-	ilt_client->page_size = TM_ILT_PAGE_SZ;
-	ilt_client->flags = 0;
-	ilt_client->start = line;
-	line += TM_ILT_LINES;
-	ilt_client->end = line - 1;
+		/* TM */
+		ilt_client = &ilt->clients[ILT_CLIENT_TM];
+		ilt_client->client_num = ILT_CLIENT_TM;
+		ilt_client->page_size = TM_ILT_PAGE_SZ;
+		ilt_client->flags = 0;
+		ilt_client->start = line;
+		line += TM_ILT_LINES;
+		ilt_client->end = line - 1;
 
-	DP(NETIF_MSG_IFUP,
-	   "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
-	   ilt_client->start,
-	   ilt_client->end,
-	   ilt_client->page_size,
-	   ilt_client->flags,
-	   ilog2(ilt_client->page_size >> 12));
+		DP(NETIF_MSG_IFUP,
+		   "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+		   ilt_client->start,
+		   ilt_client->end,
+		   ilt_client->page_size,
+		   ilt_client->flags,
+		   ilog2(ilt_client->page_size >> 12));
+	}
 
-#else
-	ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
-#endif
 	BUG_ON(line > ILT_MAX_LINES);
 }
 
@@ -7823,7 +7972,7 @@
 	}
 }
 
-int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 			struct bnx2x_queue_state_params *q_params,
 			struct bnx2x_queue_setup_tx_only_params *tx_only_params,
 			int tx_index, bool leading)
@@ -7924,6 +8073,9 @@
 	/* Set the command */
 	q_params.cmd = BNX2X_Q_CMD_SETUP;
 
+	if (IS_FCOE_FP(fp))
+		bp->fcoe_init = true;
+
 	/* Change the state to SETUP */
 	rc = bnx2x_queue_state_change(bp, &q_params);
 	if (rc) {
@@ -8037,12 +8189,12 @@
 			   SB_DISABLED);
 	}
 
-#ifdef BCM_CNIC
-	/* CNIC SB */
-	REG_WR8(bp, BAR_CSTRORM_INTMEM +
-		CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
-		SB_DISABLED);
-#endif
+	if (CNIC_LOADED(bp))
+		/* CNIC SB */
+		REG_WR8(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
+			(bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
+
 	/* SP SB */
 	REG_WR8(bp, BAR_CSTRORM_INTMEM +
 		   CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
@@ -8061,19 +8213,19 @@
 		REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
 	}
 
-#ifdef BCM_CNIC
-	/* Disable Timer scan */
-	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
-	/*
-	 * Wait for at least 10ms and up to 2 second for the timers scan to
-	 * complete
-	 */
-	for (i = 0; i < 200; i++) {
-		msleep(10);
-		if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
-			break;
+	if (CNIC_LOADED(bp)) {
+		/* Disable Timer scan */
+		REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+		/*
+		 * Wait for at least 10ms and up to 2 second for the timers
+		 * scan to complete
+		 */
+		for (i = 0; i < 200; i++) {
+			msleep(10);
+			if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+				break;
+		}
 	}
-#endif
 	/* Clear ILT */
 	bnx2x_clear_func_ilt(bp, func);
 
@@ -8409,13 +8561,24 @@
 	/* Close multi and leading connections
 	 * Completions for ramrods are collected in a synchronous way
 	 */
-	for_each_queue(bp, i)
+	for_each_eth_queue(bp, i)
 		if (bnx2x_stop_queue(bp, i))
 #ifdef BNX2X_STOP_ON_ERROR
 			return;
 #else
 			goto unload_error;
 #endif
+
+	if (CNIC_LOADED(bp)) {
+		for_each_cnic_queue(bp, i)
+			if (bnx2x_stop_queue(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+				return;
+#else
+				goto unload_error;
+#endif
+	}
+
 	/* If SP settings didn't get completed so far - something
 	 * very wrong has happen.
 	 */
@@ -8437,6 +8600,8 @@
 	bnx2x_netif_stop(bp, 1);
 	/* Delete all NAPI objects */
 	bnx2x_del_all_napi(bp);
+	if (CNIC_LOADED(bp))
+		bnx2x_del_all_napi_cnic(bp);
 
 	/* Release IRQs */
 	bnx2x_free_irq(bp);
@@ -8850,7 +9015,7 @@
 	return 0;
 }
 
-int bnx2x_leader_reset(struct bnx2x *bp)
+static int bnx2x_leader_reset(struct bnx2x *bp)
 {
 	int rc = 0;
 	bool global = bnx2x_reset_is_global(bp);
@@ -9701,6 +9866,14 @@
 
 	bp->link_params.shmem_base = bp->common.shmem_base;
 	bp->link_params.shmem2_base = bp->common.shmem2_base;
+	if (SHMEM2_RD(bp, size) >
+	    (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
+		bp->link_params.lfa_base =
+		REG_RD(bp, bp->common.shmem2_base +
+		       (u32)offsetof(struct shmem2_region,
+				     lfa_host_addr[BP_PORT(bp)]));
+	else
+		bp->link_params.lfa_base = 0;
 	BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
 		       bp->common.shmem_base, bp->common.shmem2_base);
 
@@ -9748,6 +9921,11 @@
 	bp->link_params.feature_config_flags |=
 		(val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
 		FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
+	bp->link_params.feature_config_flags |=
+		(val >= REQ_BC_VER_4_MT_SUPPORTED) ?
+		FEATURE_CONFIG_MT_SUPPORT : 0;
+
 	bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
 			BC_SUPPORTS_PFC_STATS : 0;
 
@@ -10199,17 +10377,6 @@
 		bp->mdio.prtad =
 			XGXS_EXT_PHY_ADDR(ext_phy_config);
 
-	/*
-	 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
-	 * In MF mode, it is set to cover self test cases
-	 */
-	if (IS_MF(bp))
-		bp->port.need_hw_lock = 1;
-	else
-		bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
-							bp->common.shmem_base,
-							bp->common.shmem2_base);
-
 	/* Configure link feature according to nvram value */
 	eee_mode = (((SHMEM_RD(bp, dev_info.
 		      port_feature_config[port].eee_power_mode)) &
@@ -10227,12 +10394,15 @@
 void bnx2x_get_iscsi_info(struct bnx2x *bp)
 {
 	u32 no_flags = NO_ISCSI_FLAG;
-#ifdef BCM_CNIC
 	int port = BP_PORT(bp);
-
 	u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
 				drv_lic_key[port].max_iscsi_conn);
 
+	if (!CNIC_SUPPORT(bp)) {
+		bp->flags |= no_flags;
+		return;
+	}
+
 	/* Get the number of maximum allowed iSCSI connections */
 	bp->cnic_eth_dev.max_iscsi_conn =
 		(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
@@ -10247,12 +10417,9 @@
 	 */
 	if (!bp->cnic_eth_dev.max_iscsi_conn)
 		bp->flags |= no_flags;
-#else
-	bp->flags |= no_flags;
-#endif
+
 }
 
-#ifdef BCM_CNIC
 static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
 {
 	/* Port info */
@@ -10267,16 +10434,18 @@
 	bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
 		MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
 }
-#endif
 static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
 {
-#ifdef BCM_CNIC
 	int port = BP_PORT(bp);
 	int func = BP_ABS_FUNC(bp);
-
 	u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
 				drv_lic_key[port].max_fcoe_conn);
 
+	if (!CNIC_SUPPORT(bp)) {
+		bp->flags |= NO_FCOE_FLAG;
+		return;
+	}
+
 	/* Get the number of maximum allowed FCoE connections */
 	bp->cnic_eth_dev.max_fcoe_conn =
 		(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
@@ -10322,9 +10491,6 @@
 	 */
 	if (!bp->cnic_eth_dev.max_fcoe_conn)
 		bp->flags |= NO_FCOE_FLAG;
-#else
-	bp->flags |= NO_FCOE_FLAG;
-#endif
 }
 
 static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -10338,15 +10504,105 @@
 	bnx2x_get_fcoe_info(bp);
 }
 
+static void __devinit bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
+{
+	u32 val, val2;
+	int func = BP_ABS_FUNC(bp);
+	int port = BP_PORT(bp);
+	u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+	u8 *fip_mac = bp->fip_mac;
+
+	if (IS_MF(bp)) {
+		/* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+		 * FCoE MAC then the appropriate feature should be disabled.
+		 * In non SD mode features configuration comes from struct
+		 * func_ext_config.
+		 */
+		if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
+			u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+			if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
+				val2 = MF_CFG_RD(bp, func_ext_config[func].
+						 iscsi_mac_addr_upper);
+				val = MF_CFG_RD(bp, func_ext_config[func].
+						iscsi_mac_addr_lower);
+				bnx2x_set_mac_buf(iscsi_mac, val, val2);
+				BNX2X_DEV_INFO
+					("Read iSCSI MAC: %pM\n", iscsi_mac);
+			} else {
+				bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+			}
+
+			if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+				val2 = MF_CFG_RD(bp, func_ext_config[func].
+						 fcoe_mac_addr_upper);
+				val = MF_CFG_RD(bp, func_ext_config[func].
+						fcoe_mac_addr_lower);
+				bnx2x_set_mac_buf(fip_mac, val, val2);
+				BNX2X_DEV_INFO
+					("Read FCoE L2 MAC: %pM\n", fip_mac);
+			} else {
+				bp->flags |= NO_FCOE_FLAG;
+			}
+
+			bp->mf_ext_config = cfg;
+
+		} else { /* SD MODE */
+			if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
+				/* use primary mac as iscsi mac */
+				memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+
+				BNX2X_DEV_INFO("SD ISCSI MODE\n");
+				BNX2X_DEV_INFO
+					("Read iSCSI MAC: %pM\n", iscsi_mac);
+			} else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
+				/* use primary mac as fip mac */
+				memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+				BNX2X_DEV_INFO("SD FCoE MODE\n");
+				BNX2X_DEV_INFO
+					("Read FIP MAC: %pM\n", fip_mac);
+			}
+		}
+
+		if (IS_MF_STORAGE_SD(bp))
+			/* Zero primary MAC configuration */
+			memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+		if (IS_MF_FCOE_AFEX(bp))
+			/* use FIP MAC as primary MAC */
+			memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
+
+	} else {
+		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+				iscsi_mac_upper);
+		val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+			       iscsi_mac_lower);
+		bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+				fcoe_fip_mac_upper);
+		val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+			       fcoe_fip_mac_lower);
+		bnx2x_set_mac_buf(fip_mac, val, val2);
+	}
+
+	/* Disable iSCSI OOO if MAC configuration is invalid. */
+	if (!is_valid_ether_addr(iscsi_mac)) {
+		bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+		memset(iscsi_mac, 0, ETH_ALEN);
+	}
+
+	/* Disable FCoE if MAC configuration is invalid. */
+	if (!is_valid_ether_addr(fip_mac)) {
+		bp->flags |= NO_FCOE_FLAG;
+		memset(bp->fip_mac, 0, ETH_ALEN);
+	}
+}
+
 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 {
 	u32 val, val2;
 	int func = BP_ABS_FUNC(bp);
 	int port = BP_PORT(bp);
-#ifdef BCM_CNIC
-	u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
-	u8 *fip_mac = bp->fip_mac;
-#endif
 
 	/* Zero primary MAC configuration */
 	memset(bp->dev->dev_addr, 0, ETH_ALEN);
@@ -10361,110 +10617,21 @@
 		    (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
 			bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 
-#ifdef BCM_CNIC
-		/*
-		 * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
-		 * FCoE MAC then the appropriate feature should be disabled.
-		 *
-		 * In non SD mode features configuration comes from
-		 * struct func_ext_config.
-		 */
-		if (!IS_MF_SD(bp)) {
-			u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
-			if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
-				val2 = MF_CFG_RD(bp, func_ext_config[func].
-						     iscsi_mac_addr_upper);
-				val = MF_CFG_RD(bp, func_ext_config[func].
-						    iscsi_mac_addr_lower);
-				bnx2x_set_mac_buf(iscsi_mac, val, val2);
-				BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
-					       iscsi_mac);
-			} else
-				bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-
-			if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
-				val2 = MF_CFG_RD(bp, func_ext_config[func].
-						     fcoe_mac_addr_upper);
-				val = MF_CFG_RD(bp, func_ext_config[func].
-						    fcoe_mac_addr_lower);
-				bnx2x_set_mac_buf(fip_mac, val, val2);
-				BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
-					       fip_mac);
-
-			} else
-				bp->flags |= NO_FCOE_FLAG;
-
-			bp->mf_ext_config = cfg;
-
-		} else { /* SD MODE */
-			if (IS_MF_STORAGE_SD(bp)) {
-				if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
-					/* use primary mac as iscsi mac */
-					memcpy(iscsi_mac, bp->dev->dev_addr,
-					       ETH_ALEN);
-
-					BNX2X_DEV_INFO("SD ISCSI MODE\n");
-					BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
-						       iscsi_mac);
-				} else { /* FCoE */
-					memcpy(fip_mac, bp->dev->dev_addr,
-					       ETH_ALEN);
-					BNX2X_DEV_INFO("SD FCoE MODE\n");
-					BNX2X_DEV_INFO("Read FIP MAC: %pM\n",
-						       fip_mac);
-				}
-				/* Zero primary MAC configuration */
-				memset(bp->dev->dev_addr, 0, ETH_ALEN);
-			}
-		}
-
-		if (IS_MF_FCOE_AFEX(bp))
-			/* use FIP MAC as primary MAC */
-			memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
-
-#endif
+		if (CNIC_SUPPORT(bp))
+			bnx2x_get_cnic_mac_hwinfo(bp);
 	} else {
 		/* in SF read MACs from port configuration */
 		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
 		val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
 		bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 
-#ifdef BCM_CNIC
-		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
-				    iscsi_mac_upper);
-		val = SHMEM_RD(bp, dev_info.port_hw_config[port].
-				   iscsi_mac_lower);
-		bnx2x_set_mac_buf(iscsi_mac, val, val2);
-
-		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
-				    fcoe_fip_mac_upper);
-		val = SHMEM_RD(bp, dev_info.port_hw_config[port].
-				   fcoe_fip_mac_lower);
-		bnx2x_set_mac_buf(fip_mac, val, val2);
-#endif
+		if (CNIC_SUPPORT(bp))
+			bnx2x_get_cnic_mac_hwinfo(bp);
 	}
 
 	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
 	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
 
-#ifdef BCM_CNIC
-	/* Disable iSCSI if MAC configuration is
-	 * invalid.
-	 */
-	if (!is_valid_ether_addr(iscsi_mac)) {
-		bp->flags |= NO_ISCSI_FLAG;
-		memset(iscsi_mac, 0, ETH_ALEN);
-	}
-
-	/* Disable FCoE if MAC configuration is
-	 * invalid.
-	 */
-	if (!is_valid_ether_addr(fip_mac)) {
-		bp->flags |= NO_FCOE_FLAG;
-		memset(bp->fip_mac, 0, ETH_ALEN);
-	}
-#endif
-
 	if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
 		dev_err(&bp->pdev->dev,
 			"bad Ethernet MAC address configuration: %pM\n"
@@ -10840,9 +11007,7 @@
 	mutex_init(&bp->port.phy_mutex);
 	mutex_init(&bp->fw_mb_mutex);
 	spin_lock_init(&bp->stats_lock);
-#ifdef BCM_CNIC
-	mutex_init(&bp->cnic_mutex);
-#endif
+
 
 	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
 	INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -10880,10 +11045,7 @@
 		dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
 
 	bp->disable_tpa = disable_tpa;
-
-#ifdef BCM_CNIC
 	bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
-#endif
 
 	/* Set TPA flags */
 	if (bp->disable_tpa) {
@@ -10917,12 +11079,10 @@
 	bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
 	bnx2x_dcbx_init_params(bp);
 
-#ifdef BCM_CNIC
 	if (CHIP_IS_E1x(bp))
 		bp->cnic_base_cl_id = FP_SB_MAX_E1x;
 	else
 		bp->cnic_base_cl_id = FP_SB_MAX_E2;
-#endif
 
 	/* multiple tx priority */
 	if (CHIP_IS_E1x(bp))
@@ -10932,6 +11092,16 @@
 	if (CHIP_IS_E3B0(bp))
 		bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
 
+	/* We need at least one default status block for slow-path events,
+	 * second status block for the L2 queue, and a third status block for
+	 * CNIC if supproted.
+	 */
+	if (CNIC_SUPPORT(bp))
+		bp->min_msix_vec_cnt = 3;
+	else
+		bp->min_msix_vec_cnt = 2;
+	BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
+
 	return rc;
 }
 
@@ -11168,11 +11338,9 @@
 	}
 
 	bp->rx_mode = rx_mode;
-#ifdef BCM_CNIC
 	/* handle ISCSI SD mode */
 	if (IS_MF_ISCSI_SD(bp))
 		bp->rx_mode = BNX2X_RX_MODE_NONE;
-#endif
 
 	/* Schedule the rx_mode command */
 	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -11284,7 +11452,7 @@
 #endif
 	.ndo_setup_tc		= bnx2x_setup_tc,
 
-#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+#ifdef NETDEV_FCOE_WWNN
 	.ndo_fcoe_get_wwn	= bnx2x_fcoe_get_wwn,
 #endif
 };
@@ -11750,9 +11918,8 @@
 {
 	int cid_count = BNX2X_L2_MAX_CID(bp);
 
-#ifdef BCM_CNIC
-	cid_count += CNIC_CID_MAX;
-#endif
+	if (CNIC_SUPPORT(bp))
+		cid_count += CNIC_CID_MAX;
 	return roundup(cid_count, QM_CID_ROUND);
 }
 
@@ -11762,7 +11929,8 @@
  * @dev:	pci device
  *
  */
-static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
+static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
+				     int cnic_cnt)
 {
 	int pos;
 	u16 control;
@@ -11774,7 +11942,7 @@
 	 * one fast path queue: one FP queue + SB for CNIC
 	 */
 	if (!pos)
-		return 1 + CNIC_PRESENT;
+		return 1 + cnic_cnt;
 
 	/*
 	 * The value in the PCI configuration space is the index of the last
@@ -11794,6 +11962,7 @@
 	int pcie_width, pcie_speed;
 	int rc, max_non_def_sbs;
 	int rx_count, tx_count, rss_count, doorbell_size;
+	int cnic_cnt;
 	/*
 	 * An estimated maximum supported CoS number according to the chip
 	 * version.
@@ -11837,21 +12006,22 @@
 		return -ENODEV;
 	}
 
-	max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
+	cnic_cnt = 1;
+	max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
 
 	WARN_ON(!max_non_def_sbs);
 
 	/* Maximum number of RSS queues: one IGU SB goes to CNIC */
-	rss_count = max_non_def_sbs - CNIC_PRESENT;
+	rss_count = max_non_def_sbs - cnic_cnt;
 
 	/* Maximum number of netdev Rx queues: RSS + FCoE L2 */
-	rx_count = rss_count + FCOE_PRESENT;
+	rx_count = rss_count + cnic_cnt;
 
 	/*
 	 * Maximum number of netdev Tx queues:
 	 * Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
 	 */
-	tx_count = rss_count * max_cos_est + FCOE_PRESENT;
+	tx_count = rss_count * max_cos_est + cnic_cnt;
 
 	/* dev zeroed in init_etherdev */
 	dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11862,6 +12032,8 @@
 
 	bp->igu_sb_cnt = max_non_def_sbs;
 	bp->msg_enable = debug;
+	bp->cnic_support = cnic_cnt;
+
 	pci_set_drvdata(pdev, dev);
 
 	rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
@@ -11870,6 +12042,7 @@
 		return rc;
 	}
 
+	BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
 	BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
 
 	BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
@@ -11902,10 +12075,10 @@
 	/* calc qm_cid_count */
 	bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
 
-#ifdef BCM_CNIC
-	/* disable FCOE L2 queue for E1x */
+	/* disable FCOE L2 queue for E1x*/
 	if (CHIP_IS_E1x(bp))
 		bp->flags |= NO_FCOE_FLAG;
+
 	/* disable FCOE for 57840 device, until FW supports it */
 	switch (ent->driver_data) {
 	case BCM57840_O:
@@ -11915,8 +12088,6 @@
 	case BCM57840_MF:
 		bp->flags |= NO_FCOE_FLAG;
 	}
-#endif
-
 
 	/* Set bp->num_queues for MSI-X mode*/
 	bnx2x_set_num_queues(bp);
@@ -11932,14 +12103,13 @@
 		goto init_one_exit;
 	}
 
-#ifdef BCM_CNIC
+
 	if (!NO_FCOE(bp)) {
 		/* Add storage MAC address */
 		rtnl_lock();
 		dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
 		rtnl_unlock();
 	}
-#endif
 
 	bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
 
@@ -11984,14 +12154,12 @@
 	}
 	bp = netdev_priv(dev);
 
-#ifdef BCM_CNIC
 	/* Delete storage MAC address */
 	if (!NO_FCOE(bp)) {
 		rtnl_lock();
 		dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
 		rtnl_unlock();
 	}
-#endif
 
 #ifdef BCM_DCBNL
 	/* Delete app tlvs from dcbnl */
@@ -12039,15 +12207,17 @@
 
 	bp->rx_mode = BNX2X_RX_MODE_NONE;
 
-#ifdef BCM_CNIC
-	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
-#endif
+	if (CNIC_LOADED(bp))
+		bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+
 	/* Stop Tx */
 	bnx2x_tx_disable(bp);
 
 	bnx2x_netif_stop(bp, 0);
 	/* Delete all NAPI objects */
 	bnx2x_del_all_napi(bp);
+	if (CNIC_LOADED(bp))
+		bnx2x_del_all_napi_cnic(bp);
 
 	del_timer_sync(&bp->timer);
 
@@ -12238,7 +12408,6 @@
 module_init(bnx2x_init);
 module_exit(bnx2x_cleanup);
 
-#ifdef BCM_CNIC
 /**
  * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
  *
@@ -12691,12 +12860,31 @@
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+	int rc;
+
+	DP(NETIF_MSG_IFUP, "Register_cnic called\n");
 
 	if (ops == NULL) {
 		BNX2X_ERR("NULL ops received\n");
 		return -EINVAL;
 	}
 
+	if (!CNIC_SUPPORT(bp)) {
+		BNX2X_ERR("Can't register CNIC when not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (!CNIC_LOADED(bp)) {
+		rc = bnx2x_load_cnic(bp);
+		if (rc) {
+			BNX2X_ERR("CNIC-related load failed\n");
+			return rc;
+		}
+
+	}
+
+	bp->cnic_enabled = true;
+
 	bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
 	if (!bp->cnic_kwq)
 		return -ENOMEM;
@@ -12788,5 +12976,4 @@
 }
 EXPORT_SYMBOL(bnx2x_cnic_probe);
 
-#endif /* BCM_CNIC */
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 1b1999d..f8d432a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2107,6 +2107,7 @@
 #define NIG_REG_LLH1_ERROR_MASK 				 0x10090
 /* [RW 8] event id for llh1 */
 #define NIG_REG_LLH1_EVENT_ID					 0x10088
+#define NIG_REG_LLH1_FUNC_EN					 0x16104
 #define NIG_REG_LLH1_FUNC_MEM					 0x161c0
 #define NIG_REG_LLH1_FUNC_MEM_ENABLE				 0x16160
 #define NIG_REG_LLH1_FUNC_MEM_SIZE				 16
@@ -2302,6 +2303,15 @@
  * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
  * accommodate the 9 input clients to ETS arbiter. */
 #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB			 0x18684
+/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
+ * packets to BRB LB interface to forward the packet to the host. All
+ * packets from MCP are forwarded to the network when this bit is cleared -
+ * regardless of the configured destination in tx_mng_destination register.
+ * When MCP-to-host paths for both ports 0 and 1 are disabled - the arbiter
+ * for BRB LB interface is bypassed and PBF LB traffic is always selected to
+ * send to BRB LB.
+ */
+#define NIG_REG_P0_TX_MNG_HOST_ENABLE				 0x182f4
 #define NIG_REG_P1_HWPFC_ENABLE					 0x181d0
 #define NIG_REG_P1_MAC_IN_EN					 0x185c0
 /* [RW 1] Output enable for TX MAC interface */
@@ -2418,6 +2428,12 @@
 #define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB			 0x186e4
 /* [R 1] TX FIFO for transmitting data to MAC is empty. */
 #define NIG_REG_P1_TX_MACFIFO_EMPTY				 0x18594
+/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
+ * packets to BRB LB interface to forward the packet to the host. All
+ * packets from MCP are forwarded to the network when this bit is cleared -
+ * regardless of the configured destination in tx_mng_destination register.
+ */
+#define NIG_REG_P1_TX_MNG_HOST_ENABLE				 0x182f8
 /* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
    forwarded to the host. */
 #define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY			 0x182b8
@@ -5482,6 +5498,7 @@
 #define XMAC_CTRL_REG_RX_EN					 (0x1<<1)
 #define XMAC_CTRL_REG_SOFT_RESET				 (0x1<<6)
 #define XMAC_CTRL_REG_TX_EN					 (0x1<<0)
+#define XMAC_CTRL_REG_XLGMII_ALIGN_ENB				 (0x1<<7)
 #define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN				 (0x1<<18)
 #define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN				 (0x1<<17)
 #define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON			 (0x1<<1)
@@ -5502,11 +5519,14 @@
 #define XMAC_REG_PAUSE_CTRL					 0x68
 #define XMAC_REG_PFC_CTRL					 0x70
 #define XMAC_REG_PFC_CTRL_HI					 0x74
+#define XMAC_REG_RX_LSS_CTRL					 0x50
 #define XMAC_REG_RX_LSS_STATUS					 0x58
 /* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
  * CRC in strip mode */
 #define XMAC_REG_RX_MAX_SIZE					 0x40
 #define XMAC_REG_TX_CTRL					 0x20
+#define XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE		 (0x1<<0)
+#define XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE		 (0x1<<1)
 /* [RW 16] Indirect access to the XX table of the XX protection mechanism.
    The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
    header pointer. */
@@ -6672,6 +6692,7 @@
 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI	0x1B00
 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS	0x1E00
 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI	0x1F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2	0x3900
 
 
 #define MDIO_REG_BANK_10G_PARALLEL_DETECT		0x8130
@@ -7046,7 +7067,8 @@
 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2	0x12
 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY	0x4000
 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ		0x8000
-#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150  0x96
+#define MDIO_WC_REG_PCS_STATUS2				0x0021
+#define MDIO_WC_REG_PMD_KR_CONTROL			0x0096
 #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL		0x8000
 #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1		0x800e
 #define MDIO_WC_REG_XGXSBLK1_DESKEW			0x8010
@@ -7078,6 +7100,7 @@
 #define MDIO_WC_REG_PAR_DET_10G_STATUS			0x8130
 #define MDIO_WC_REG_PAR_DET_10G_CTRL			0x8131
 #define MDIO_WC_REG_XGXS_X2_CONTROL2			0x8141
+#define MDIO_WC_REG_XGXS_X2_CONTROL3			0x8142
 #define MDIO_WC_REG_XGXS_RX_LN_SWAP1			0x816B
 #define MDIO_WC_REG_XGXS_TX_LN_SWAP1			0x8169
 #define MDIO_WC_REG_GP2_STATUS_GP_2_0			0x81d0
@@ -7112,6 +7135,7 @@
 #define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET		0x0a
 #define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK		0x7c00
 #define MDIO_WC_REG_TX_FIR_TAP_ENABLE		0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP		0x82e2
 #define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL	0x82e3
 #define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL	0x82e6
 #define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL	0x82e7
@@ -7129,9 +7153,16 @@
 #define MDIO_WC_REG_DIGITAL4_MISC5			0x833e
 #define MDIO_WC_REG_DIGITAL5_MISC6			0x8345
 #define MDIO_WC_REG_DIGITAL5_MISC7			0x8349
+#define MDIO_WC_REG_DIGITAL5_LINK_STATUS		0x834d
 #define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED		0x834e
 #define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL		0x8350
 #define MDIO_WC_REG_CL49_USERB0_CTRL			0x8368
+#define MDIO_WC_REG_CL73_USERB0_CTRL			0x8370
+#define MDIO_WC_REG_CL73_USERB0_USTAT			0x8371
+#define MDIO_WC_REG_CL73_BAM_CTRL1			0x8372
+#define MDIO_WC_REG_CL73_BAM_CTRL2			0x8373
+#define MDIO_WC_REG_CL73_BAM_CTRL3			0x8374
+#define MDIO_WC_REG_CL73_BAM_CODE_FIELD			0x837b
 #define MDIO_WC_REG_EEE_COMBO_CONTROL0			0x8390
 #define MDIO_WC_REG_TX66_CONTROL			0x83b0
 #define MDIO_WC_REG_RX66_CONTROL			0x83c0
@@ -7145,7 +7176,17 @@
 #define MDIO_WC_REG_RX66_SCW3_MASK			0x83c9
 #define MDIO_WC_REG_FX100_CTRL1				0x8400
 #define MDIO_WC_REG_FX100_CTRL3				0x8402
-
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5		0x8436
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6		0x8437
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7		0x8438
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9		0x8439
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10		0x843a
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11		0x843b
+#define MDIO_WC_REG_ETA_CL73_OUI1			0x8453
+#define MDIO_WC_REG_ETA_CL73_OUI2			0x8454
+#define MDIO_WC_REG_ETA_CL73_OUI3			0x8455
+#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE		0x8456
+#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE			0x8457
 #define MDIO_WC_REG_MICROBLK_CMD			0xffc2
 #define MDIO_WC_REG_MICROBLK_DL_STATUS			0xffc5
 #define MDIO_WC_REG_MICROBLK_CMD3			0xffcc
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 614981c..b8b4b74 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -5350,12 +5350,24 @@
 		else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
 			next_state = BNX2X_F_STATE_STARTED;
+
+		/* Switch_update ramrod can be sent in either started or
+		 * tx_stopped state, and it doesn't change the state.
+		 */
+		else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
+			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_STARTED;
+
 		else if (cmd == BNX2X_F_CMD_TX_STOP)
 			next_state = BNX2X_F_STATE_TX_STOPPED;
 
 		break;
 	case BNX2X_F_STATE_TX_STOPPED:
-		if (cmd == BNX2X_F_CMD_TX_START)
+		if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
+		    (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_TX_STOPPED;
+
+		else if (cmd == BNX2X_F_CMD_TX_START)
 			next_state = BNX2X_F_STATE_STARTED;
 
 		break;
@@ -5637,6 +5649,28 @@
 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
 }
 
+static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
+					struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct function_update_data *rdata =
+		(struct function_update_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_func_switch_update_params *switch_update_params =
+		&params->params.switch_update;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->tx_switch_suspend_change_flg = 1;
+	rdata->tx_switch_suspend = switch_update_params->suspend;
+	rdata->echo = SWITCH_UPDATE;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
 					 struct bnx2x_func_state_params *params)
 {
@@ -5657,6 +5691,7 @@
 		cpu_to_le16(afex_update_params->afex_default_vlan);
 	rdata->allowed_priorities_change_flg = 1;
 	rdata->allowed_priorities = afex_update_params->allowed_priorities;
+	rdata->echo = AFEX_UPDATE;
 
 	/*  No need for an explicit memory barrier here as long we would
 	 *  need to ensure the ordering of writing to the SPQ element
@@ -5773,6 +5808,8 @@
 		return bnx2x_func_send_tx_stop(bp, params);
 	case BNX2X_F_CMD_TX_START:
 		return bnx2x_func_send_tx_start(bp, params);
+	case BNX2X_F_CMD_SWITCH_UPDATE:
+		return bnx2x_func_send_switch_update(bp, params);
 	default:
 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
 		return -EINVAL;
@@ -5818,16 +5855,30 @@
 			    struct bnx2x_func_state_params *params)
 {
 	struct bnx2x_func_sp_obj *o = params->f_obj;
-	int rc;
+	int rc, cnt = 300;
 	enum bnx2x_func_cmd cmd = params->cmd;
 	unsigned long *pending = &o->pending;
 
 	mutex_lock(&o->one_pending_mutex);
 
 	/* Check that the requested transition is legal */
-	if (o->check_transition(bp, o, params)) {
+	rc = o->check_transition(bp, o, params);
+	if ((rc == -EBUSY) &&
+	    (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
+		while ((rc == -EBUSY) && (--cnt > 0)) {
+			mutex_unlock(&o->one_pending_mutex);
+			msleep(10);
+			mutex_lock(&o->one_pending_mutex);
+			rc = o->check_transition(bp, o, params);
+		}
+		if (rc == -EBUSY) {
+			mutex_unlock(&o->one_pending_mutex);
+			BNX2X_ERR("timeout waiting for previous ramrod completion\n");
+			return rc;
+		}
+	} else if (rc) {
 		mutex_unlock(&o->one_pending_mutex);
-		return -EINVAL;
+		return rc;
 	}
 
 	/* Set "pending" bit */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index acf2fe4..adbd91b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -40,6 +40,12 @@
 	 * pending commands list.
 	 */
 	RAMROD_CONT,
+	/* If there is another pending ramrod, wait until it finishes and
+	 * re-try to submit this one. This flag can be set only in sleepable
+	 * context, and should not be set from the context that completes the
+	 * ramrods as deadlock will occur.
+	 */
+	RAMROD_RETRY,
 };
 
 typedef enum {
@@ -1061,6 +1067,7 @@
 	BNX2X_F_CMD_AFEX_VIFLISTS,
 	BNX2X_F_CMD_TX_STOP,
 	BNX2X_F_CMD_TX_START,
+	BNX2X_F_CMD_SWITCH_UPDATE,
 	BNX2X_F_CMD_MAX,
 };
 
@@ -1103,6 +1110,10 @@
 	u8 network_cos_mode;
 };
 
+struct bnx2x_func_switch_update_params {
+	u8 suspend;
+};
+
 struct bnx2x_func_afex_update_params {
 	u16 vif_id;
 	u16 afex_default_vlan;
@@ -1136,6 +1147,7 @@
 		struct bnx2x_func_hw_init_params hw_init;
 		struct bnx2x_func_hw_reset_params hw_reset;
 		struct bnx2x_func_start_params start;
+		struct bnx2x_func_switch_update_params switch_update;
 		struct bnx2x_func_afex_update_params afex_update;
 		struct bnx2x_func_afex_viflists_params afex_viflists;
 		struct bnx2x_func_tx_start_params tx_start;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a8800ac..5cc976d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -90,10 +90,10 @@
 
 #define DRV_MODULE_NAME		"tg3"
 #define TG3_MAJ_NUM			3
-#define TG3_MIN_NUM			125
+#define TG3_MIN_NUM			127
 #define DRV_MODULE_VERSION	\
 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE	"September 26, 2012"
+#define DRV_MODULE_RELDATE	"November 14, 2012"
 
 #define RESET_KIND_SHUTDOWN	0
 #define RESET_KIND_INIT		1
@@ -226,6 +226,9 @@
 module_param(tg3_debug, int, 0);
 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
 
+#define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
+#define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
+
 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
@@ -245,20 +248,28 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
+			TG3_DRV_DATA_FLAG_5705_10_100},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
+			TG3_DRV_DATA_FLAG_5705_10_100},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
+			TG3_DRV_DATA_FLAG_5705_10_100},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
@@ -266,8 +277,13 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
+	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
+			PCI_VENDOR_ID_LENOVO,
+			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
@@ -286,18 +302,28 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
+	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
+			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
+			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
@@ -398,19 +424,27 @@
 };
 
 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
+#define TG3_NVRAM_TEST		0
+#define TG3_LINK_TEST		1
+#define TG3_REGISTER_TEST	2
+#define TG3_MEMORY_TEST		3
+#define TG3_MAC_LOOPB_TEST	4
+#define TG3_PHY_LOOPB_TEST	5
+#define TG3_EXT_LOOPB_TEST	6
+#define TG3_INTERRUPT_TEST	7
 
 
 static const struct {
 	const char string[ETH_GSTRING_LEN];
 } ethtool_test_keys[] = {
-	{ "nvram test        (online) " },
-	{ "link test         (online) " },
-	{ "register test     (offline)" },
-	{ "memory test       (offline)" },
-	{ "mac loopback test (offline)" },
-	{ "phy loopback test (offline)" },
-	{ "ext loopback test (offline)" },
-	{ "interrupt test    (offline)" },
+	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
+	[TG3_LINK_TEST]		= { "link test         (online) " },
+	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
+	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
+	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
+	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
+	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
+	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
 };
 
 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
@@ -2447,6 +2481,18 @@
 	return err;
 }
 
+static void tg3_carrier_on(struct tg3 *tp)
+{
+	netif_carrier_on(tp->dev);
+	tp->link_up = true;
+}
+
+static void tg3_carrier_off(struct tg3 *tp)
+{
+	netif_carrier_off(tp->dev);
+	tp->link_up = false;
+}
+
 /* This will reset the tigon3 PHY if there is no valid
  * link unless the FORCE argument is non-zero.
  */
@@ -2465,8 +2511,8 @@
 	if (err != 0)
 		return -EBUSY;
 
-	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
-		netif_carrier_off(tp->dev);
+	if (netif_running(tp->dev) && tp->link_up) {
+		tg3_carrier_off(tp);
 		tg3_link_report(tp);
 	}
 
@@ -4160,6 +4206,24 @@
 	return true;
 }
 
+static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
+{
+	if (curr_link_up != tp->link_up) {
+		if (curr_link_up) {
+			tg3_carrier_on(tp);
+		} else {
+			tg3_carrier_off(tp);
+			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+		}
+
+		tg3_link_report(tp);
+		return true;
+	}
+
+	return false;
+}
+
 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
 {
 	int current_link_up;
@@ -4192,7 +4256,7 @@
 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
-	    netif_carrier_ok(tp->dev)) {
+	    tp->link_up) {
 		tg3_readphy(tp, MII_BMSR, &bmsr);
 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 		    !(bmsr & BMSR_LSTATUS))
@@ -4434,13 +4498,7 @@
 						 PCI_EXP_LNKCTL_CLKREQ_EN);
 	}
 
-	if (current_link_up != netif_carrier_ok(tp->dev)) {
-		if (current_link_up)
-			netif_carrier_on(tp->dev);
-		else
-			netif_carrier_off(tp->dev);
-		tg3_link_report(tp);
-	}
+	tg3_test_and_report_link_chg(tp, current_link_up);
 
 	return 0;
 }
@@ -5080,7 +5138,7 @@
 	orig_active_duplex = tp->link_config.active_duplex;
 
 	if (!tg3_flag(tp, HW_AUTONEG) &&
-	    netif_carrier_ok(tp->dev) &&
+	    tp->link_up &&
 	    tg3_flag(tp, INIT_COMPLETE)) {
 		mac_status = tr32(MAC_STATUS);
 		mac_status &= (MAC_STATUS_PCS_SYNCED |
@@ -5158,13 +5216,7 @@
 				    LED_CTRL_TRAFFIC_OVERRIDE));
 	}
 
-	if (current_link_up != netif_carrier_ok(tp->dev)) {
-		if (current_link_up)
-			netif_carrier_on(tp->dev);
-		else
-			netif_carrier_off(tp->dev);
-		tg3_link_report(tp);
-	} else {
+	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
 		if (orig_pause_cfg != now_pause_cfg ||
 		    orig_active_speed != tp->link_config.active_speed ||
@@ -5257,7 +5309,7 @@
 			new_bmcr |= BMCR_SPEED1000;
 
 			/* Force a linkdown */
-			if (netif_carrier_ok(tp->dev)) {
+			if (tp->link_up) {
 				u32 adv;
 
 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
@@ -5269,7 +5321,7 @@
 							   BMCR_ANRESTART |
 							   BMCR_ANENABLE);
 				udelay(10);
-				netif_carrier_off(tp->dev);
+				tg3_carrier_off(tp);
 			}
 			tg3_writephy(tp, MII_BMCR, new_bmcr);
 			bmcr = new_bmcr;
@@ -5335,15 +5387,7 @@
 	tp->link_config.active_speed = current_speed;
 	tp->link_config.active_duplex = current_duplex;
 
-	if (current_link_up != netif_carrier_ok(tp->dev)) {
-		if (current_link_up)
-			netif_carrier_on(tp->dev);
-		else {
-			netif_carrier_off(tp->dev);
-			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
-		}
-		tg3_link_report(tp);
-	}
+	tg3_test_and_report_link_chg(tp, current_link_up);
 	return err;
 }
 
@@ -5355,7 +5399,7 @@
 		return;
 	}
 
-	if (!netif_carrier_ok(tp->dev) &&
+	if (!tp->link_up &&
 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
 		u32 bmcr;
 
@@ -5385,7 +5429,7 @@
 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
 			}
 		}
-	} else if (netif_carrier_ok(tp->dev) &&
+	} else if (tp->link_up &&
 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
 		u32 phy2;
@@ -5451,7 +5495,7 @@
 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
 
 	if (!tg3_flag(tp, 5705_PLUS)) {
-		if (netif_carrier_ok(tp->dev)) {
+		if (tp->link_up) {
 			tw32(HOSTCC_STAT_COAL_TICKS,
 			     tp->coal.stats_block_coalesce_usecs);
 		} else {
@@ -5461,7 +5505,7 @@
 
 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
 		val = tr32(PCIE_PWR_MGMT_THRESH);
-		if (!netif_carrier_ok(tp->dev))
+		if (!tp->link_up)
 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
 			      tp->pwrmgmt_thresh;
 		else
@@ -6477,6 +6521,7 @@
 {
 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
 	tg3_napi_disable(tp);
+	netif_carrier_off(tp->dev);
 	netif_tx_disable(tp->dev);
 }
 
@@ -6488,6 +6533,9 @@
 	 */
 	netif_tx_wake_all_queues(tp->dev);
 
+	if (tp->link_up)
+		netif_carrier_on(tp->dev);
+
 	tg3_napi_enable(tp);
 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
 	tg3_enable_ints(tp);
@@ -8386,7 +8434,7 @@
 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
 
-		if (!netif_carrier_ok(tp->dev))
+		if (!tp->link_up)
 			val = 0;
 
 		tw32(HOSTCC_STAT_COAL_TICKS, val);
@@ -8662,14 +8710,14 @@
 	if (!tg3_flag(tp, SUPPORT_MSIX))
 		return;
 
-	if (tp->irq_cnt <= 2) {
+	if (tp->rxq_cnt == 1) {
 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
 		return;
 	}
 
 	/* Validate table against current IRQ count */
 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
-		if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
+		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
 			break;
 	}
 
@@ -9679,7 +9727,7 @@
 {
 	struct tg3_hw_stats *sp = tp->hw_stats;
 
-	if (!netif_carrier_ok(tp->dev))
+	if (!tp->link_up)
 		return;
 
 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
@@ -9823,11 +9871,11 @@
 			u32 mac_stat = tr32(MAC_STATUS);
 			int need_setup = 0;
 
-			if (netif_carrier_ok(tp->dev) &&
+			if (tp->link_up &&
 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
 				need_setup = 1;
 			}
-			if (!netif_carrier_ok(tp->dev) &&
+			if (!tp->link_up &&
 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
 					 MAC_STATUS_SIGNAL_DET))) {
 				need_setup = 1;
@@ -10429,10 +10477,8 @@
 {
 	int i;
 
-	tg3_napi_disable(tp);
 	tg3_reset_task_cancel(tp);
-
-	netif_tx_disable(tp->dev);
+	tg3_netif_stop(tp);
 
 	tg3_timer_stop(tp);
 
@@ -10481,7 +10527,7 @@
 		}
 	}
 
-	netif_carrier_off(tp->dev);
+	tg3_carrier_off(tp);
 
 	err = tg3_power_up(tp);
 	if (err)
@@ -10514,7 +10560,7 @@
 
 	tg3_power_down(tp);
 
-	netif_carrier_off(tp->dev);
+	tg3_carrier_off(tp);
 
 	return 0;
 }
@@ -10888,7 +10934,7 @@
 			cmd->advertising |= ADVERTISED_Asym_Pause;
 		}
 	}
-	if (netif_running(dev) && netif_carrier_ok(dev)) {
+	if (netif_running(dev) && tp->link_up) {
 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
 		cmd->duplex = tp->link_config.active_duplex;
 		cmd->lp_advertising = tp->link_config.rmt_adv;
@@ -11406,7 +11452,7 @@
 
 	tg3_stop(tp);
 
-	netif_carrier_off(dev);
+	tg3_carrier_off(tp);
 
 	tg3_start(tp, true, false);
 
@@ -11755,7 +11801,7 @@
 		max = TG3_COPPER_TIMEOUT_SEC;
 
 	for (i = 0; i < max; i++) {
-		if (netif_carrier_ok(tp->dev))
+		if (tp->link_up)
 			return 0;
 
 		if (msleep_interruptible(1000))
@@ -12326,19 +12372,19 @@
 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
 
 	if (!netif_running(tp->dev)) {
-		data[0] = TG3_LOOPBACK_FAILED;
-		data[1] = TG3_LOOPBACK_FAILED;
+		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
+		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
 		if (do_extlpbk)
-			data[2] = TG3_LOOPBACK_FAILED;
+			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
 		goto done;
 	}
 
 	err = tg3_reset_hw(tp, 1);
 	if (err) {
-		data[0] = TG3_LOOPBACK_FAILED;
-		data[1] = TG3_LOOPBACK_FAILED;
+		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
+		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
 		if (do_extlpbk)
-			data[2] = TG3_LOOPBACK_FAILED;
+			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
 		goto done;
 	}
 
@@ -12361,11 +12407,11 @@
 		tg3_mac_loopback(tp, true);
 
 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
-			data[0] |= TG3_STD_LOOPBACK_FAILED;
+			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
 
 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
-			data[0] |= TG3_JMB_LOOPBACK_FAILED;
+			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
 
 		tg3_mac_loopback(tp, false);
 	}
@@ -12384,13 +12430,13 @@
 		}
 
 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
-			data[1] |= TG3_STD_LOOPBACK_FAILED;
+			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
 		if (tg3_flag(tp, TSO_CAPABLE) &&
 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
-			data[1] |= TG3_TSO_LOOPBACK_FAILED;
+			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
-			data[1] |= TG3_JMB_LOOPBACK_FAILED;
+			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
 
 		if (do_extlpbk) {
 			tg3_phy_lpbk_set(tp, 0, true);
@@ -12402,13 +12448,16 @@
 			mdelay(40);
 
 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
-				data[2] |= TG3_STD_LOOPBACK_FAILED;
+				data[TG3_EXT_LOOPB_TEST] |=
+							TG3_STD_LOOPBACK_FAILED;
 			if (tg3_flag(tp, TSO_CAPABLE) &&
 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
-				data[2] |= TG3_TSO_LOOPBACK_FAILED;
+				data[TG3_EXT_LOOPB_TEST] |=
+							TG3_TSO_LOOPBACK_FAILED;
 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
-				data[2] |= TG3_JMB_LOOPBACK_FAILED;
+				data[TG3_EXT_LOOPB_TEST] |=
+							TG3_JMB_LOOPBACK_FAILED;
 		}
 
 		/* Re-enable gphy autopowerdown. */
@@ -12416,7 +12465,8 @@
 			tg3_phy_toggle_apd(tp, true);
 	}
 
-	err = (data[0] | data[1] | data[2]) ? -EIO : 0;
+	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
+	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
 
 done:
 	tp->phy_flags |= eee_cap;
@@ -12441,11 +12491,11 @@
 
 	if (tg3_test_nvram(tp) != 0) {
 		etest->flags |= ETH_TEST_FL_FAILED;
-		data[0] = 1;
+		data[TG3_NVRAM_TEST] = 1;
 	}
 	if (!doextlpbk && tg3_test_link(tp)) {
 		etest->flags |= ETH_TEST_FL_FAILED;
-		data[1] = 1;
+		data[TG3_LINK_TEST] = 1;
 	}
 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
 		int err, err2 = 0, irq_sync = 0;
@@ -12471,25 +12521,25 @@
 
 		if (tg3_test_registers(tp) != 0) {
 			etest->flags |= ETH_TEST_FL_FAILED;
-			data[2] = 1;
+			data[TG3_REGISTER_TEST] = 1;
 		}
 
 		if (tg3_test_memory(tp) != 0) {
 			etest->flags |= ETH_TEST_FL_FAILED;
-			data[3] = 1;
+			data[TG3_MEMORY_TEST] = 1;
 		}
 
 		if (doextlpbk)
 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
 
-		if (tg3_test_loopback(tp, &data[4], doextlpbk))
+		if (tg3_test_loopback(tp, data, doextlpbk))
 			etest->flags |= ETH_TEST_FL_FAILED;
 
 		tg3_full_unlock(tp);
 
 		if (tg3_test_interrupt(tp) != 0) {
 			etest->flags |= ETH_TEST_FL_FAILED;
-			data[7] = 1;
+			data[TG3_INTERRUPT_TEST] = 1;
 		}
 
 		tg3_full_lock(tp, 0);
@@ -14026,7 +14076,8 @@
 
 out_no_vpd:
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
-		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
+		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
 			strcpy(tp->board_part_number, "BCM5717");
 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
 			strcpy(tp->board_part_number, "BCM5718");
@@ -14397,6 +14448,7 @@
 		tg3_flag_set(tp, CPMU_PRESENT);
 
 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
@@ -14424,6 +14476,9 @@
 	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
 
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
+		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
+
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
@@ -14462,7 +14517,30 @@
 		tg3_flag_set(tp, 5705_PLUS);
 }
 
-static int __devinit tg3_get_invariants(struct tg3 *tp)
+static bool tg3_10_100_only_device(struct tg3 *tp,
+				   const struct pci_device_id *ent)
+{
+	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
+
+	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+	    (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
+		return true;
+
+	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
+				return true;
+		} else {
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int __devinit tg3_get_invariants(struct tg3 *tp,
+					const struct pci_device_id *ent)
 {
 	u32 misc_ctrl_reg;
 	u32 pci_state_reg, grc_misc_cfg;
@@ -15141,22 +15219,7 @@
 	else
 		tp->mac_mode = 0;
 
-	/* these are limited to 10/100 only */
-	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
-	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
-	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
-	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
-	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
-	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
-	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
-	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
-	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
-	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
-	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
-	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
-	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
-	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
-	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
+	if (tg3_10_100_only_device(tp, ent))
 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
 
 	err = tg3_phy_probe(tp);
@@ -16013,6 +16076,7 @@
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
@@ -16034,7 +16098,7 @@
 	dev->netdev_ops = &tg3_netdev_ops;
 	dev->irq = pdev->irq;
 
-	err = tg3_get_invariants(tp);
+	err = tg3_get_invariants(tp, ent);
 	if (err) {
 		dev_err(&pdev->dev,
 			"Problem fetching invariants of chip, aborting\n");
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index d9308c32..4534804 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -44,12 +44,14 @@
 #define  TG3PCI_DEVICE_TIGON3_5761S	 0x1688
 #define  TG3PCI_DEVICE_TIGON3_5761SE	 0x1689
 #define  TG3PCI_DEVICE_TIGON3_57780	 0x1692
+#define  TG3PCI_DEVICE_TIGON3_5787M	 0x1693
 #define  TG3PCI_DEVICE_TIGON3_57760	 0x1690
 #define  TG3PCI_DEVICE_TIGON3_57790	 0x1694
 #define  TG3PCI_DEVICE_TIGON3_57788	 0x1691
 #define  TG3PCI_DEVICE_TIGON3_5785_G	 0x1699 /* GPHY */
 #define  TG3PCI_DEVICE_TIGON3_5785_F	 0x16a0 /* 10/100 only */
 #define  TG3PCI_DEVICE_TIGON3_5717	 0x1655
+#define  TG3PCI_DEVICE_TIGON3_5717_C	 0x1665
 #define  TG3PCI_DEVICE_TIGON3_5718	 0x1656
 #define  TG3PCI_DEVICE_TIGON3_57781	 0x16b1
 #define  TG3PCI_DEVICE_TIGON3_57785	 0x16b5
@@ -95,6 +97,10 @@
 #define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2	0x0099
 #define TG3PCI_SUBVENDOR_ID_IBM			PCI_VENDOR_ID_IBM
 #define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2	0x0281
+#define TG3PCI_SUBDEVICE_ID_ACER_57780_A	0x0601
+#define TG3PCI_SUBDEVICE_ID_ACER_57780_B	0x0612
+#define TG3PCI_SUBDEVICE_ID_LENOVO_5787M	0x3056
+
 /* 0x30 --> 0x64 unused */
 #define TG3PCI_MSI_DATA			0x00000064
 /* 0x66 --> 0x68 unused */
@@ -149,6 +155,7 @@
 #define  CHIPREV_ID_57780_A0		 0x57780000
 #define  CHIPREV_ID_57780_A1		 0x57780001
 #define  CHIPREV_ID_5717_A0		 0x05717000
+#define  CHIPREV_ID_5717_C0		 0x05717200
 #define  CHIPREV_ID_57765_A0		 0x57785000
 #define  CHIPREV_ID_5719_A0		 0x05719000
 #define  CHIPREV_ID_5720_A0		 0x05720000
@@ -3262,6 +3269,7 @@
 #if IS_ENABLED(CONFIG_HWMON)
 	struct device			*hwmon_dev;
 #endif
+	bool				link_up;
 };
 
 #endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index db93191..ceb0de0 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -2,13 +2,10 @@
 # Atmel device configuration
 #
 
-config HAVE_NET_MACB
-	bool
-
 config NET_CADENCE
 	bool "Cadence devices"
+	depends on HAS_IOMEM
 	default y
-	depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y.
 	  Make sure you know the name of your card. Read the Ethernet-HOWTO,
@@ -25,16 +22,14 @@
 
 config ARM_AT91_ETHER
 	tristate "AT91RM9200 Ethernet support"
-	depends on ARM && ARCH_AT91RM9200
 	select NET_CORE
-	select MII
+	select MACB
 	---help---
 	  If you wish to compile a kernel for the AT91RM9200 and enable
 	  ethernet support, then you should always answer Y to this.
 
 config MACB
 	tristate "Cadence MACB/GEM support"
-	depends on HAVE_NET_MACB
 	select PHYLIB
 	---help---
 	  The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 4e980a78..716cc01 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -6,11 +6,6 @@
  * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
  * Initial version by Rick Bronson 01/11/2003
  *
- * Intel LXT971A PHY support by Christopher Bahns & David Knickerbocker
- *   (Polaroid Corporation)
- *
- * Realtek RTL8201(B)L PHY support by Roman Avramenko <roman@imsystems.ru>
- *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version
@@ -20,7 +15,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/mii.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
@@ -31,956 +25,248 @@
 #include <linux/clk.h>
 #include <linux/gfp.h>
 #include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/pinctrl/consumer.h>
 
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/mach-types.h>
+#include "macb.h"
 
-#include <mach/at91rm9200_emac.h>
-#include <asm/gpio.h>
-#include <mach/board.h>
+/* 1518 rounded up */
+#define MAX_RBUFF_SZ	0x600
+/* max number of receive buffers */
+#define MAX_RX_DESCR	9
 
-#include "at91_ether.h"
-
-#define DRV_NAME	"at91_ether"
-#define DRV_VERSION	"1.0"
-
-#define LINK_POLL_INTERVAL	(HZ)
-
-/* ..................................................................... */
-
-/*
- * Read from a EMAC register.
- */
-static inline unsigned long at91_emac_read(struct at91_private *lp, unsigned int reg)
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct net_device *dev)
 {
-	return __raw_readl(lp->emac_base + reg);
-}
-
-/*
- * Write to a EMAC register.
- */
-static inline void at91_emac_write(struct at91_private *lp, unsigned int reg, unsigned long value)
-{
-	__raw_writel(value, lp->emac_base + reg);
-}
-
-/* ........................... PHY INTERFACE ........................... */
-
-/*
- * Enable the MDIO bit in MAC control register
- * When not called from an interrupt-handler, access to the PHY must be
- *  protected by a spinlock.
- */
-static void enable_mdi(struct at91_private *lp)
-{
-	unsigned long ctl;
-
-	ctl = at91_emac_read(lp, AT91_EMAC_CTL);
-	at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_MPE);	/* enable management port */
-}
-
-/*
- * Disable the MDIO bit in the MAC control register
- */
-static void disable_mdi(struct at91_private *lp)
-{
-	unsigned long ctl;
-
-	ctl = at91_emac_read(lp, AT91_EMAC_CTL);
-	at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE);	/* disable management port */
-}
-
-/*
- * Wait until the PHY operation is complete.
- */
-static inline void at91_phy_wait(struct at91_private *lp)
-{
-	unsigned long timeout = jiffies + 2;
-
-	while (!(at91_emac_read(lp, AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
-		if (time_after(jiffies, timeout)) {
-			printk("at91_ether: MIO timeout\n");
-			break;
-		}
-		cpu_relax();
-	}
-}
-
-/*
- * Write value to the a PHY register
- * Note: MDI interface is assumed to already have been enabled.
- */
-static void write_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int value)
-{
-	at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
-		| ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA));
-
-	/* Wait until IDLE bit in Network Status register is cleared */
-	at91_phy_wait(lp);
-}
-
-/*
- * Read value stored in a PHY register.
- * Note: MDI interface is assumed to already have been enabled.
- */
-static void read_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int *value)
-{
-	at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
-		| ((phy_addr & 0x1f) << 23) | (address << 18));
-
-	/* Wait until IDLE bit in Network Status register is cleared */
-	at91_phy_wait(lp);
-
-	*value = at91_emac_read(lp, AT91_EMAC_MAN) & AT91_EMAC_DATA;
-}
-
-/* ........................... PHY MANAGEMENT .......................... */
-
-/*
- * Access the PHY to determine the current link speed and mode, and update the
- * MAC accordingly.
- * If no link or auto-negotiation is busy, then no changes are made.
- */
-static void update_linkspeed(struct net_device *dev, int silent)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	unsigned int bmsr, bmcr, lpa, mac_cfg;
-	unsigned int speed, duplex;
-
-	if (!mii_link_ok(&lp->mii)) {		/* no link */
-		netif_carrier_off(dev);
-		if (!silent)
-			printk(KERN_INFO "%s: Link down.\n", dev->name);
-		return;
-	}
-
-	/* Link up, or auto-negotiation still in progress */
-	read_phy(lp, lp->phy_address, MII_BMSR, &bmsr);
-	read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
-	if (bmcr & BMCR_ANENABLE) {				/* AutoNegotiation is enabled */
-		if (!(bmsr & BMSR_ANEGCOMPLETE))
-			return;			/* Do nothing - another interrupt generated when negotiation complete */
-
-		read_phy(lp, lp->phy_address, MII_LPA, &lpa);
-		if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100;
-		else speed = SPEED_10;
-		if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL;
-		else duplex = DUPLEX_HALF;
-	} else {
-		speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
-		duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
-	}
-
-	/* Update the MAC */
-	mac_cfg = at91_emac_read(lp, AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
-	if (speed == SPEED_100) {
-		if (duplex == DUPLEX_FULL)		/* 100 Full Duplex */
-			mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD;
-		else					/* 100 Half Duplex */
-			mac_cfg |= AT91_EMAC_SPD;
-	} else {
-		if (duplex == DUPLEX_FULL)		/* 10 Full Duplex */
-			mac_cfg |= AT91_EMAC_FD;
-		else {}					/* 10 Half Duplex */
-	}
-	at91_emac_write(lp, AT91_EMAC_CFG, mac_cfg);
-
-	if (!silent)
-		printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
-	netif_carrier_on(dev);
-}
-
-/*
- * Handle interrupts from the PHY
- */
-static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
-{
-	struct net_device *dev = (struct net_device *) dev_id;
-	struct at91_private *lp = netdev_priv(dev);
-	unsigned int phy;
-
-	/*
-	 * This hander is triggered on both edges, but the PHY chips expect
-	 * level-triggering.  We therefore have to check if the PHY actually has
-	 * an IRQ pending.
-	 */
-	enable_mdi(lp);
-	if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
-		read_phy(lp, lp->phy_address, MII_DSINTR_REG, &phy);	/* ack interrupt in Davicom PHY */
-		if (!(phy & (1 << 0)))
-			goto done;
-	}
-	else if (lp->phy_type == MII_LXT971A_ID) {
-		read_phy(lp, lp->phy_address, MII_ISINTS_REG, &phy);	/* ack interrupt in Intel PHY */
-		if (!(phy & (1 << 2)))
-			goto done;
-	}
-	else if (lp->phy_type == MII_BCM5221_ID) {
-		read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &phy);	/* ack interrupt in Broadcom PHY */
-		if (!(phy & (1 << 0)))
-			goto done;
-	}
-	else if (lp->phy_type == MII_KS8721_ID) {
-		read_phy(lp, lp->phy_address, MII_TPISTATUS, &phy);		/* ack interrupt in Micrel PHY */
-		if (!(phy & ((1 << 2) | 1)))
-			goto done;
-	}
-	else if (lp->phy_type == MII_T78Q21x3_ID) {					/* ack interrupt in Teridian PHY */
-		read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &phy);
-		if (!(phy & ((1 << 2) | 1)))
-			goto done;
-	}
-	else if (lp->phy_type == MII_DP83848_ID) {
-		read_phy(lp, lp->phy_address, MII_DPPHYSTS_REG, &phy);	/* ack interrupt in DP83848 PHY */
-		if (!(phy & (1 << 7)))
-			goto done;
-	}
-
-	update_linkspeed(dev, 0);
-
-done:
-	disable_mdi(lp);
-
-	return IRQ_HANDLED;
-}
-
-/*
- * Initialize and enable the PHY interrupt for link-state changes
- */
-static void enable_phyirq(struct net_device *dev)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	unsigned int dsintr, irq_number;
-	int status;
-
-	if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
-		/*
-		 * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
-		 * or board does not have it connected.
-		 */
-		mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
-		return;
-	}
-
-	irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
-	status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
-	if (status) {
-		printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
-		return;
-	}
-
-	spin_lock_irq(&lp->lock);
-	enable_mdi(lp);
-
-	if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {	/* for Davicom PHY */
-		read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
-		dsintr = dsintr & ~0xf00;		/* clear bits 8..11 */
-		write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
-	}
-	else if (lp->phy_type == MII_LXT971A_ID) {	/* for Intel PHY */
-		read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
-		dsintr = dsintr | 0xf2;			/* set bits 1, 4..7 */
-		write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
-	}
-	else if (lp->phy_type == MII_BCM5221_ID) {	/* for Broadcom PHY */
-		dsintr = (1 << 15) | ( 1 << 14);
-		write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
-	}
-	else if (lp->phy_type == MII_KS8721_ID) {	/* for Micrel PHY */
-		dsintr = (1 << 10) | ( 1 << 8);
-		write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
-	}
-	else if (lp->phy_type == MII_T78Q21x3_ID) {	/* for Teridian PHY */
-		read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
-		dsintr = dsintr | 0x500;		/* set bits 8, 10 */
-		write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
-	}
-	else if (lp->phy_type == MII_DP83848_ID) {	/* National Semiconductor DP83848 PHY */
-		read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
-		dsintr = dsintr | 0x3c;			/* set bits 2..5 */
-		write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
-		read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
-		dsintr = dsintr | 0x3;			/* set bits 0,1 */
-		write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
-	}
-
-	disable_mdi(lp);
-	spin_unlock_irq(&lp->lock);
-}
-
-/*
- * Disable the PHY interrupt
- */
-static void disable_phyirq(struct net_device *dev)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	unsigned int dsintr;
-	unsigned int irq_number;
-
-	if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
-		del_timer_sync(&lp->check_timer);
-		return;
-	}
-
-	spin_lock_irq(&lp->lock);
-	enable_mdi(lp);
-
-	if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {	/* for Davicom PHY */
-		read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
-		dsintr = dsintr | 0xf00;			/* set bits 8..11 */
-		write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
-	}
-	else if (lp->phy_type == MII_LXT971A_ID) {	/* for Intel PHY */
-		read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
-		dsintr = dsintr & ~0xf2;			/* clear bits 1, 4..7 */
-		write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
-	}
-	else if (lp->phy_type == MII_BCM5221_ID) {	/* for Broadcom PHY */
-		read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &dsintr);
-		dsintr = ~(1 << 14);
-		write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
-	}
-	else if (lp->phy_type == MII_KS8721_ID) {	/* for Micrel PHY */
-		read_phy(lp, lp->phy_address, MII_TPISTATUS, &dsintr);
-		dsintr = ~((1 << 10) | (1 << 8));
-		write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
-	}
-	else if (lp->phy_type == MII_T78Q21x3_ID) {	/* for Teridian PHY */
-		read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
-		dsintr = dsintr & ~0x500;			/* clear bits 8, 10 */
-		write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
-	}
-	else if (lp->phy_type == MII_DP83848_ID) {	/* National Semiconductor DP83848 PHY */
-		read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
-		dsintr = dsintr & ~0x3;				/* clear bits 0, 1 */
-		write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
-		read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
-		dsintr = dsintr & ~0x3c;			/* clear bits 2..5 */
-		write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
-	}
-
-	disable_mdi(lp);
-	spin_unlock_irq(&lp->lock);
-
-	irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
-	free_irq(irq_number, dev);			/* Free interrupt handler */
-}
-
-/*
- * Perform a software reset of the PHY.
- */
-#if 0
-static void reset_phy(struct net_device *dev)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	unsigned int bmcr;
-
-	spin_lock_irq(&lp->lock);
-	enable_mdi(lp);
-
-	/* Perform PHY reset */
-	write_phy(lp, lp->phy_address, MII_BMCR, BMCR_RESET);
-
-	/* Wait until PHY reset is complete */
-	do {
-		read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
-	} while (!(bmcr & BMCR_RESET));
-
-	disable_mdi(lp);
-	spin_unlock_irq(&lp->lock);
-}
-#endif
-
-static void at91ether_check_link(unsigned long dev_id)
-{
-	struct net_device *dev = (struct net_device *) dev_id;
-	struct at91_private *lp = netdev_priv(dev);
-
-	enable_mdi(lp);
-	update_linkspeed(dev, 1);
-	disable_mdi(lp);
-
-	mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
-}
-
-/*
- * Perform any PHY-specific initialization.
- */
-static void __init initialize_phy(struct at91_private *lp)
-{
-	unsigned int val;
-
-	spin_lock_irq(&lp->lock);
-	enable_mdi(lp);
-
-	if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
-		read_phy(lp, lp->phy_address, MII_DSCR_REG, &val);
-		if ((val & (1 << 10)) == 0)			/* DSCR bit 10 is 0 -- fiber mode */
-			lp->phy_media = PORT_FIBRE;
-	} else if (machine_is_csb337()) {
-		/* mix link activity status into LED2 link state */
-		write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x0d22);
-	} else if (machine_is_ecbat91())
-		write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x156A);
-
-	disable_mdi(lp);
-	spin_unlock_irq(&lp->lock);
-}
-
-/* ......................... ADDRESS MANAGEMENT ........................ */
-
-/*
- * NOTE: Your bootloader must always set the MAC address correctly before
- * booting into Linux.
- *
- * - It must always set the MAC address after reset, even if it doesn't
- *   happen to access the Ethernet while it's booting.  Some versions of
- *   U-Boot on the AT91RM9200-DK do not do this.
- *
- * - Likewise it must store the addresses in the correct byte order.
- *   MicroMonitor (uMon) on the CSB337 does this incorrectly (and
- *   continues to do so, for bug-compatibility).
- */
-
-static short __init unpack_mac_address(struct net_device *dev, unsigned int hi, unsigned int lo)
-{
-	char addr[6];
-
-	if (machine_is_csb337()) {
-		addr[5] = (lo & 0xff);			/* The CSB337 bootloader stores the MAC the wrong-way around */
-		addr[4] = (lo & 0xff00) >> 8;
-		addr[3] = (lo & 0xff0000) >> 16;
-		addr[2] = (lo & 0xff000000) >> 24;
-		addr[1] = (hi & 0xff);
-		addr[0] = (hi & 0xff00) >> 8;
-	}
-	else {
-		addr[0] = (lo & 0xff);
-		addr[1] = (lo & 0xff00) >> 8;
-		addr[2] = (lo & 0xff0000) >> 16;
-		addr[3] = (lo & 0xff000000) >> 24;
-		addr[4] = (hi & 0xff);
-		addr[5] = (hi & 0xff00) >> 8;
-	}
-
-	if (is_valid_ether_addr(addr)) {
-		memcpy(dev->dev_addr, &addr, 6);
-		return 1;
-	}
-	return 0;
-}
-
-/*
- * Set the ethernet MAC address in dev->dev_addr
- */
-static void __init get_mac_address(struct net_device *dev)
-{
-	struct at91_private *lp = netdev_priv(dev);
-
-	/* Check Specific-Address 1 */
-	if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA1H), at91_emac_read(lp, AT91_EMAC_SA1L)))
-		return;
-	/* Check Specific-Address 2 */
-	if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA2H), at91_emac_read(lp, AT91_EMAC_SA2L)))
-		return;
-	/* Check Specific-Address 3 */
-	if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA3H), at91_emac_read(lp, AT91_EMAC_SA3L)))
-		return;
-	/* Check Specific-Address 4 */
-	if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA4H), at91_emac_read(lp, AT91_EMAC_SA4L)))
-		return;
-
-	printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n");
-}
-
-/*
- * Program the hardware MAC address from dev->dev_addr.
- */
-static void update_mac_address(struct net_device *dev)
-{
-	struct at91_private *lp = netdev_priv(dev);
-
-	at91_emac_write(lp, AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
-	at91_emac_write(lp, AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
-
-	at91_emac_write(lp, AT91_EMAC_SA2L, 0);
-	at91_emac_write(lp, AT91_EMAC_SA2H, 0);
-}
-
-/*
- * Store the new hardware address in dev->dev_addr, and update the MAC.
- */
-static int set_mac_address(struct net_device *dev, void* addr)
-{
-	struct sockaddr *address = addr;
-
-	if (!is_valid_ether_addr(address->sa_data))
-		return -EADDRNOTAVAIL;
-
-	memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
-	update_mac_address(dev);
-
-	printk("%s: Setting MAC address to %pM\n", dev->name,
-	       dev->dev_addr);
-
-	return 0;
-}
-
-static int inline hash_bit_value(int bitnr, __u8 *addr)
-{
-	if (addr[bitnr / 8] & (1 << (bitnr % 8)))
-		return 1;
-	return 0;
-}
-
-/*
- * The hash address register is 64 bits long and takes up two locations in the memory map.
- * The least significant bits are stored in EMAC_HSL and the most significant
- * bits in EMAC_HSH.
- *
- * The unicast hash enable and the multicast hash enable bits in the network configuration
- *  register enable the reception of hash matched frames. The destination address is
- *  reduced to a 6 bit index into the 64 bit hash register using the following hash function.
- * The hash function is an exclusive or of every sixth bit of the destination address.
- *   hash_index[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
- *   hash_index[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
- *   hash_index[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
- *   hash_index[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
- *   hash_index[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
- *   hash_index[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
- * da[0] represents the least significant bit of the first byte received, that is, the multicast/
- *  unicast indicator, and da[47] represents the most significant bit of the last byte
- *  received.
- * If the hash index points to a bit that is set in the hash register then the frame will be
- *  matched according to whether the frame is multicast or unicast.
- * A multicast match will be signalled if the multicast hash enable bit is set, da[0] is 1 and
- *  the hash index points to a bit set in the hash register.
- * A unicast match will be signalled if the unicast hash enable bit is set, da[0] is 0 and the
- *  hash index points to a bit set in the hash register.
- * To receive all multicast frames, the hash register should be set with all ones and the
- *  multicast hash enable bit should be set in the network configuration register.
- */
-
-/*
- * Return the hash index value for the specified address.
- */
-static int hash_get_index(__u8 *addr)
-{
-	int i, j, bitval;
-	int hash_index = 0;
-
-	for (j = 0; j < 6; j++) {
-		for (i = 0, bitval = 0; i < 8; i++)
-			bitval ^= hash_bit_value(i*6 + j, addr);
-
-		hash_index |= (bitval << j);
-	}
-
-	return hash_index;
-}
-
-/*
- * Add multicast addresses to the internal multicast-hash table.
- */
-static void at91ether_sethashtable(struct net_device *dev)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	struct netdev_hw_addr *ha;
-	unsigned long mc_filter[2];
-	unsigned int bitnr;
-
-	mc_filter[0] = mc_filter[1] = 0;
-
-	netdev_for_each_mc_addr(ha, dev) {
-		bitnr = hash_get_index(ha->addr);
-		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
-	}
-
-	at91_emac_write(lp, AT91_EMAC_HSL, mc_filter[0]);
-	at91_emac_write(lp, AT91_EMAC_HSH, mc_filter[1]);
-}
-
-/*
- * Enable/Disable promiscuous and multicast modes.
- */
-static void at91ether_set_multicast_list(struct net_device *dev)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	unsigned long cfg;
-
-	cfg = at91_emac_read(lp, AT91_EMAC_CFG);
-
-	if (dev->flags & IFF_PROMISC)			/* Enable promiscuous mode */
-		cfg |= AT91_EMAC_CAF;
-	else if (dev->flags & (~IFF_PROMISC))		/* Disable promiscuous mode */
-		cfg &= ~AT91_EMAC_CAF;
-
-	if (dev->flags & IFF_ALLMULTI) {		/* Enable all multicast mode */
-		at91_emac_write(lp, AT91_EMAC_HSH, -1);
-		at91_emac_write(lp, AT91_EMAC_HSL, -1);
-		cfg |= AT91_EMAC_MTI;
-	} else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
-		at91ether_sethashtable(dev);
-		cfg |= AT91_EMAC_MTI;
-	} else if (dev->flags & (~IFF_ALLMULTI)) {	/* Disable all multicast mode */
-		at91_emac_write(lp, AT91_EMAC_HSH, 0);
-		at91_emac_write(lp, AT91_EMAC_HSL, 0);
-		cfg &= ~AT91_EMAC_MTI;
-	}
-
-	at91_emac_write(lp, AT91_EMAC_CFG, cfg);
-}
-
-/* ......................... ETHTOOL SUPPORT ........................... */
-
-static int mdio_read(struct net_device *dev, int phy_id, int location)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	unsigned int value;
-
-	read_phy(lp, phy_id, location, &value);
-	return value;
-}
-
-static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
-{
-	struct at91_private *lp = netdev_priv(dev);
-
-	write_phy(lp, phy_id, location, value);
-}
-
-static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	int ret;
-
-	spin_lock_irq(&lp->lock);
-	enable_mdi(lp);
-
-	ret = mii_ethtool_gset(&lp->mii, cmd);
-
-	disable_mdi(lp);
-	spin_unlock_irq(&lp->lock);
-
-	if (lp->phy_media == PORT_FIBRE) {		/* override media type since mii.c doesn't know */
-		cmd->supported = SUPPORTED_FIBRE;
-		cmd->port = PORT_FIBRE;
-	}
-
-	return ret;
-}
-
-static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	int ret;
-
-	spin_lock_irq(&lp->lock);
-	enable_mdi(lp);
-
-	ret = mii_ethtool_sset(&lp->mii, cmd);
-
-	disable_mdi(lp);
-	spin_unlock_irq(&lp->lock);
-
-	return ret;
-}
-
-static int at91ether_nwayreset(struct net_device *dev)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	int ret;
-
-	spin_lock_irq(&lp->lock);
-	enable_mdi(lp);
-
-	ret = mii_nway_restart(&lp->mii);
-
-	disable_mdi(lp);
-	spin_unlock_irq(&lp->lock);
-
-	return ret;
-}
-
-static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
-	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
-	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
-	strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
-}
-
-static const struct ethtool_ops at91ether_ethtool_ops = {
-	.get_settings	= at91ether_get_settings,
-	.set_settings	= at91ether_set_settings,
-	.get_drvinfo	= at91ether_get_drvinfo,
-	.nway_reset	= at91ether_nwayreset,
-	.get_link	= ethtool_op_get_link,
-};
-
-static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	int res;
-
-	if (!netif_running(dev))
-		return -EINVAL;
-
-	spin_lock_irq(&lp->lock);
-	enable_mdi(lp);
-	res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
-	disable_mdi(lp);
-	spin_unlock_irq(&lp->lock);
-
-	return res;
-}
-
-/* ................................ MAC ................................ */
-
-/*
- * Initialize and start the Receiver and Transmit subsystems
- */
-static void at91ether_start(struct net_device *dev)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	struct recv_desc_bufs *dlist, *dlist_phys;
+	struct macb *lp = netdev_priv(dev);
+	dma_addr_t addr;
+	u32 ctl;
 	int i;
-	unsigned long ctl;
 
-	dlist = lp->dlist;
-	dlist_phys = lp->dlist_phys;
+	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+					MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+					&lp->rx_ring_dma, GFP_KERNEL);
+	if (!lp->rx_ring) {
+		netdev_err(dev, "unable to alloc rx ring DMA buffer\n");
+		return -ENOMEM;
+	}
 
+	lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+					MAX_RX_DESCR * MAX_RBUFF_SZ,
+					&lp->rx_buffers_dma, GFP_KERNEL);
+	if (!lp->rx_buffers) {
+		netdev_err(dev, "unable to alloc rx data DMA buffer\n");
+
+		dma_free_coherent(&lp->pdev->dev,
+					MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+					lp->rx_ring, lp->rx_ring_dma);
+		lp->rx_ring = NULL;
+		return -ENOMEM;
+	}
+
+	addr = lp->rx_buffers_dma;
 	for (i = 0; i < MAX_RX_DESCR; i++) {
-		dlist->descriptors[i].addr = (unsigned int) &dlist_phys->recv_buf[i][0];
-		dlist->descriptors[i].size = 0;
+		lp->rx_ring[i].addr = addr;
+		lp->rx_ring[i].ctrl = 0;
+		addr += MAX_RBUFF_SZ;
 	}
 
 	/* Set the Wrap bit on the last descriptor */
-	dlist->descriptors[i-1].addr |= EMAC_DESC_WRAP;
+	lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
 
 	/* Reset buffer index */
-	lp->rxBuffIndex = 0;
+	lp->rx_tail = 0;
 
 	/* Program address of descriptor list in Rx Buffer Queue register */
-	at91_emac_write(lp, AT91_EMAC_RBQP, (unsigned long) dlist_phys);
+	macb_writel(lp, RBQP, lp->rx_ring_dma);
 
 	/* Enable Receive and Transmit */
-	ctl = at91_emac_read(lp, AT91_EMAC_CTL);
-	at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE);
-}
+	ctl = macb_readl(lp, NCR);
+	macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
 
-/*
- * Open the ethernet interface
- */
-static int at91ether_open(struct net_device *dev)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	unsigned long ctl;
-
-	if (!is_valid_ether_addr(dev->dev_addr))
-		return -EADDRNOTAVAIL;
-
-	clk_enable(lp->ether_clk);		/* Re-enable Peripheral clock */
-
-	/* Clear internal statistics */
-	ctl = at91_emac_read(lp, AT91_EMAC_CTL);
-	at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_CSR);
-
-	/* Update the MAC address (incase user has changed it) */
-	update_mac_address(dev);
-
-	/* Enable PHY interrupt */
-	enable_phyirq(dev);
-
-	/* Enable MAC interrupts */
-	at91_emac_write(lp, AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA
-				| AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
-				| AT91_EMAC_ROVR | AT91_EMAC_ABT);
-
-	/* Determine current link speed */
-	spin_lock_irq(&lp->lock);
-	enable_mdi(lp);
-	update_linkspeed(dev, 0);
-	disable_mdi(lp);
-	spin_unlock_irq(&lp->lock);
-
-	at91ether_start(dev);
-	netif_start_queue(dev);
 	return 0;
 }
 
-/*
- * Close the interface
- */
+/* Open the ethernet interface */
+static int at91ether_open(struct net_device *dev)
+{
+	struct macb *lp = netdev_priv(dev);
+	u32 ctl;
+	int ret;
+
+	/* Clear internal statistics */
+	ctl = macb_readl(lp, NCR);
+	macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
+
+	macb_set_hwaddr(lp);
+
+	ret = at91ether_start(dev);
+	if (ret)
+		return ret;
+
+	/* Enable MAC interrupts */
+	macb_writel(lp, IER, MACB_BIT(RCOMP)	|
+			     MACB_BIT(RXUBR)	|
+			     MACB_BIT(ISR_TUND)	|
+			     MACB_BIT(ISR_RLE)	|
+			     MACB_BIT(TCOMP)	|
+			     MACB_BIT(ISR_ROVR)	|
+			     MACB_BIT(HRESP));
+
+	/* schedule a link state check */
+	phy_start(lp->phy_dev);
+
+	netif_start_queue(dev);
+
+	return 0;
+}
+
+/* Close the interface */
 static int at91ether_close(struct net_device *dev)
 {
-	struct at91_private *lp = netdev_priv(dev);
-	unsigned long ctl;
+	struct macb *lp = netdev_priv(dev);
+	u32 ctl;
 
 	/* Disable Receiver and Transmitter */
-	ctl = at91_emac_read(lp, AT91_EMAC_CTL);
-	at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE));
-
-	/* Disable PHY interrupt */
-	disable_phyirq(dev);
+	ctl = macb_readl(lp, NCR);
+	macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
 
 	/* Disable MAC interrupts */
-	at91_emac_write(lp, AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA
-				| AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
-				| AT91_EMAC_ROVR | AT91_EMAC_ABT);
+	macb_writel(lp, IDR, MACB_BIT(RCOMP)	|
+			     MACB_BIT(RXUBR)	|
+			     MACB_BIT(ISR_TUND)	|
+			     MACB_BIT(ISR_RLE)	|
+			     MACB_BIT(TCOMP)	|
+			     MACB_BIT(ISR_ROVR) |
+			     MACB_BIT(HRESP));
 
 	netif_stop_queue(dev);
 
-	clk_disable(lp->ether_clk);		/* Disable Peripheral clock */
+	dma_free_coherent(&lp->pdev->dev,
+				MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+				lp->rx_ring, lp->rx_ring_dma);
+	lp->rx_ring = NULL;
+
+	dma_free_coherent(&lp->pdev->dev,
+				MAX_RX_DESCR * MAX_RBUFF_SZ,
+				lp->rx_buffers, lp->rx_buffers_dma);
+	lp->rx_buffers = NULL;
 
 	return 0;
 }
 
-/*
- * Transmit packet.
- */
+/* Transmit packet */
 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-	struct at91_private *lp = netdev_priv(dev);
+	struct macb *lp = netdev_priv(dev);
 
-	if (at91_emac_read(lp, AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
+	if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
 		netif_stop_queue(dev);
 
 		/* Store packet information (to free when Tx completed) */
 		lp->skb = skb;
 		lp->skb_length = skb->len;
-		lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE);
-		dev->stats.tx_bytes += skb->len;
+		lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
+							DMA_TO_DEVICE);
 
 		/* Set address of the data in the Transmit Address register */
-		at91_emac_write(lp, AT91_EMAC_TAR, lp->skb_physaddr);
+		macb_writel(lp, TAR, lp->skb_physaddr);
 		/* Set length of the packet in the Transmit Control register */
-		at91_emac_write(lp, AT91_EMAC_TCR, skb->len);
+		macb_writel(lp, TCR, skb->len);
 
 	} else {
-		printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
-		return NETDEV_TX_BUSY;	/* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
-				on this skb, he also reports -ENETDOWN and printk's, so either
-				we free and return(0) or don't free and return 1 */
+		netdev_err(dev, "%s called, but device is busy!\n", __func__);
+		return NETDEV_TX_BUSY;
 	}
 
 	return NETDEV_TX_OK;
 }
 
-/*
- * Update the current statistics from the internal statistics registers.
- */
-static struct net_device_stats *at91ether_stats(struct net_device *dev)
-{
-	struct at91_private *lp = netdev_priv(dev);
-	int ale, lenerr, seqe, lcol, ecol;
-
-	if (netif_running(dev)) {
-		dev->stats.rx_packets += at91_emac_read(lp, AT91_EMAC_OK);	/* Good frames received */
-		ale = at91_emac_read(lp, AT91_EMAC_ALE);
-		dev->stats.rx_frame_errors += ale;				/* Alignment errors */
-		lenerr = at91_emac_read(lp, AT91_EMAC_ELR) + at91_emac_read(lp, AT91_EMAC_USF);
-		dev->stats.rx_length_errors += lenerr;				/* Excessive Length or Undersize Frame error */
-		seqe = at91_emac_read(lp, AT91_EMAC_SEQE);
-		dev->stats.rx_crc_errors += seqe;				/* CRC error */
-		dev->stats.rx_fifo_errors += at91_emac_read(lp, AT91_EMAC_DRFC);/* Receive buffer not available */
-		dev->stats.rx_errors += (ale + lenerr + seqe
-			+ at91_emac_read(lp, AT91_EMAC_CDE) + at91_emac_read(lp, AT91_EMAC_RJB));
-
-		dev->stats.tx_packets += at91_emac_read(lp, AT91_EMAC_FRA);	/* Frames successfully transmitted */
-		dev->stats.tx_fifo_errors += at91_emac_read(lp, AT91_EMAC_TUE);	/* Transmit FIFO underruns */
-		dev->stats.tx_carrier_errors += at91_emac_read(lp, AT91_EMAC_CSE);	/* Carrier Sense errors */
-		dev->stats.tx_heartbeat_errors += at91_emac_read(lp, AT91_EMAC_SQEE);/* Heartbeat error */
-
-		lcol = at91_emac_read(lp, AT91_EMAC_LCOL);
-		ecol = at91_emac_read(lp, AT91_EMAC_ECOL);
-		dev->stats.tx_window_errors += lcol;			/* Late collisions */
-		dev->stats.tx_aborted_errors += ecol;			/* 16 collisions */
-
-		dev->stats.collisions += (at91_emac_read(lp, AT91_EMAC_SCOL) + at91_emac_read(lp, AT91_EMAC_MCOL) + lcol + ecol);
-	}
-	return &dev->stats;
-}
-
-/*
- * Extract received frame from buffer descriptors and sent to upper layers.
+/* Extract received frame from buffer descriptors and sent to upper layers.
  * (Called from interrupt context)
  */
 static void at91ether_rx(struct net_device *dev)
 {
-	struct at91_private *lp = netdev_priv(dev);
-	struct recv_desc_bufs *dlist;
+	struct macb *lp = netdev_priv(dev);
 	unsigned char *p_recv;
 	struct sk_buff *skb;
 	unsigned int pktlen;
 
-	dlist = lp->dlist;
-	while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) {
-		p_recv = dlist->recv_buf[lp->rxBuffIndex];
-		pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff;	/* Length of frame including FCS */
+	while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+		p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ;
+		pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
 		skb = netdev_alloc_skb(dev, pktlen + 2);
-		if (skb != NULL) {
+		if (skb) {
 			skb_reserve(skb, 2);
 			memcpy(skb_put(skb, pktlen), p_recv, pktlen);
 
 			skb->protocol = eth_type_trans(skb, dev);
-			dev->stats.rx_bytes += pktlen;
+			lp->stats.rx_packets++;
+			lp->stats.rx_bytes += pktlen;
 			netif_rx(skb);
-		}
-		else {
-			dev->stats.rx_dropped += 1;
-			printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
+		} else {
+			lp->stats.rx_dropped++;
+			netdev_notice(dev, "Memory squeeze, dropping packet.\n");
 		}
 
-		if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST)
-			dev->stats.multicast++;
+		if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+			lp->stats.multicast++;
 
-		dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE;	/* reset ownership bit */
-		if (lp->rxBuffIndex == MAX_RX_DESCR-1)				/* wrap after last buffer */
-			lp->rxBuffIndex = 0;
+		/* reset ownership bit */
+		lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+
+		/* wrap after last buffer */
+		if (lp->rx_tail == MAX_RX_DESCR - 1)
+			lp->rx_tail = 0;
 		else
-			lp->rxBuffIndex++;
+			lp->rx_tail++;
 	}
 }
 
-/*
- * MAC interrupt handler
- */
+/* MAC interrupt handler */
 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
 {
-	struct net_device *dev = (struct net_device *) dev_id;
-	struct at91_private *lp = netdev_priv(dev);
-	unsigned long intstatus, ctl;
+	struct net_device *dev = dev_id;
+	struct macb *lp = netdev_priv(dev);
+	u32 intstatus, ctl;
 
 	/* MAC Interrupt Status register indicates what interrupts are pending.
-	   It is automatically cleared once read. */
-	intstatus = at91_emac_read(lp, AT91_EMAC_ISR);
+	 * It is automatically cleared once read.
+	 */
+	intstatus = macb_readl(lp, ISR);
 
-	if (intstatus & AT91_EMAC_RCOM)		/* Receive complete */
+	/* Receive complete */
+	if (intstatus & MACB_BIT(RCOMP))
 		at91ether_rx(dev);
 
-	if (intstatus & AT91_EMAC_TCOM) {	/* Transmit complete */
-		/* The TCOM bit is set even if the transmission failed. */
-		if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY))
-			dev->stats.tx_errors += 1;
+	/* Transmit complete */
+	if (intstatus & MACB_BIT(TCOMP)) {
+		/* The TCOM bit is set even if the transmission failed */
+		if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
+			lp->stats.tx_errors++;
 
 		if (lp->skb) {
 			dev_kfree_skb_irq(lp->skb);
 			lp->skb = NULL;
 			dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
+			lp->stats.tx_packets++;
+			lp->stats.tx_bytes += lp->skb_length;
 		}
 		netif_wake_queue(dev);
 	}
 
-	/* Work-around for Errata #11 */
-	if (intstatus & AT91_EMAC_RBNA) {
-		ctl = at91_emac_read(lp, AT91_EMAC_CTL);
-		at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE);
-		at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE);
+	/* Work-around for EMAC Errata section 41.3.1 */
+	if (intstatus & MACB_BIT(RXUBR)) {
+		ctl = macb_readl(lp, NCR);
+		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
+		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
 	}
 
-	if (intstatus & AT91_EMAC_ROVR)
-		printk("%s: ROVR error\n", dev->name);
+	if (intstatus & MACB_BIT(ISR_ROVR))
+		netdev_err(dev, "ROVR error\n");
 
 	return IRQ_HANDLED;
 }
@@ -1000,10 +286,10 @@
 	.ndo_open		= at91ether_open,
 	.ndo_stop		= at91ether_close,
 	.ndo_start_xmit		= at91ether_start_xmit,
-	.ndo_get_stats		= at91ether_stats,
-	.ndo_set_rx_mode	= at91ether_set_multicast_list,
-	.ndo_set_mac_address	= set_mac_address,
-	.ndo_do_ioctl		= at91ether_ioctl,
+	.ndo_get_stats		= macb_get_stats,
+	.ndo_set_rx_mode	= macb_set_rx_mode,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_do_ioctl		= macb_ioctl,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1011,197 +297,160 @@
 #endif
 };
 
-/*
- * Detect the PHY type, and its address.
- */
-static int __init at91ether_phy_detect(struct at91_private *lp)
+#if defined(CONFIG_OF)
+static const struct of_device_id at91ether_dt_ids[] = {
+	{ .compatible = "cdns,at91rm9200-emac" },
+	{ .compatible = "cdns,emac" },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
+
+static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
 {
-	unsigned int phyid1, phyid2;
-	unsigned long phy_id;
-	unsigned short phy_address = 0;
+	struct device_node *np = pdev->dev.of_node;
 
-	while (phy_address < PHY_MAX_ADDR) {
-		/* Read the PHY ID registers */
-		enable_mdi(lp);
-		read_phy(lp, phy_address, MII_PHYSID1, &phyid1);
-		read_phy(lp, phy_address, MII_PHYSID2, &phyid2);
-		disable_mdi(lp);
+	if (np)
+		return of_get_phy_mode(np);
 
-		phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
-		switch (phy_id) {
-			case MII_DM9161_ID:		/* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
-			case MII_DM9161A_ID:		/* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
-			case MII_LXT971A_ID:		/* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
-			case MII_RTL8201_ID:		/* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
-			case MII_BCM5221_ID:		/* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
-			case MII_DP83847_ID:		/* National Semiconductor DP83847:  */
-			case MII_DP83848_ID:		/* National Semiconductor DP83848:  */
-			case MII_AC101L_ID:		/* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
-			case MII_KS8721_ID:		/* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
-			case MII_T78Q21x3_ID:		/* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
-			case MII_LAN83C185_ID:		/* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
-				/* store detected values */
-				lp->phy_type = phy_id;		/* Type of PHY connected */
-				lp->phy_address = phy_address;	/* MDI address of PHY */
-				return 1;
-		}
-
-		phy_address++;
-	}
-
-	return 0;		/* not detected */
+	return -ENODEV;
 }
 
+static int at91ether_get_hwaddr_dt(struct macb *bp)
+{
+	struct device_node *np = bp->pdev->dev.of_node;
 
-/*
- * Detect MAC & PHY and perform ethernet interface initialization
- */
+	if (np) {
+		const char *mac = of_get_mac_address(np);
+		if (mac) {
+			memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+			return 0;
+		}
+	}
+
+	return -ENODEV;
+}
+#else
+static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
+{
+	return -ENODEV;
+}
+static int at91ether_get_hwaddr_dt(struct macb *bp)
+{
+	return -ENODEV;
+}
+#endif
+
+/* Detect MAC & PHY and perform ethernet interface initialization */
 static int __init at91ether_probe(struct platform_device *pdev)
 {
 	struct macb_platform_data *board_data = pdev->dev.platform_data;
 	struct resource *regs;
 	struct net_device *dev;
-	struct at91_private *lp;
+	struct phy_device *phydev;
+	struct pinctrl *pinctrl;
+	struct macb *lp;
 	int res;
+	u32 reg;
 
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!regs)
 		return -ENOENT;
 
-	dev = alloc_etherdev(sizeof(struct at91_private));
+	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+	if (IS_ERR(pinctrl)) {
+		res = PTR_ERR(pinctrl);
+		if (res == -EPROBE_DEFER)
+			return res;
+
+		dev_warn(&pdev->dev, "No pinctrl provided\n");
+	}
+
+	dev = alloc_etherdev(sizeof(struct macb));
 	if (!dev)
 		return -ENOMEM;
 
 	lp = netdev_priv(dev);
-	lp->board_data = *board_data;
+	lp->pdev = pdev;
+	lp->dev = dev;
 	spin_lock_init(&lp->lock);
 
-	dev->base_addr = regs->start;		/* physical base address */
-	lp->emac_base = ioremap(regs->start, regs->end - regs->start + 1);
-	if (!lp->emac_base) {
+	/* physical base address */
+	dev->base_addr = regs->start;
+	lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+	if (!lp->regs) {
 		res = -ENOMEM;
 		goto err_free_dev;
 	}
 
 	/* Clock */
-	lp->ether_clk = clk_get(&pdev->dev, "ether_clk");
-	if (IS_ERR(lp->ether_clk)) {
-		res = PTR_ERR(lp->ether_clk);
-		goto err_ioumap;
+	lp->pclk = devm_clk_get(&pdev->dev, "ether_clk");
+	if (IS_ERR(lp->pclk)) {
+		res = PTR_ERR(lp->pclk);
+		goto err_free_dev;
 	}
-	clk_enable(lp->ether_clk);
+	clk_enable(lp->pclk);
 
 	/* Install the interrupt handler */
 	dev->irq = platform_get_irq(pdev, 0);
-	if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) {
-		res = -EBUSY;
+	res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev);
+	if (res)
 		goto err_disable_clock;
-	}
-
-	/* Allocate memory for DMA Receive descriptors */
-	lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
-	if (lp->dlist == NULL) {
-		res = -ENOMEM;
-		goto err_free_irq;
-	}
 
 	ether_setup(dev);
 	dev->netdev_ops = &at91ether_netdev_ops;
-	dev->ethtool_ops = &at91ether_ethtool_ops;
+	dev->ethtool_ops = &macb_ethtool_ops;
 	platform_set_drvdata(pdev, dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-	get_mac_address(dev);		/* Get ethernet address and store it in dev->dev_addr */
-	update_mac_address(dev);	/* Program ethernet address into MAC */
+	res = at91ether_get_hwaddr_dt(lp);
+	if (res < 0)
+		macb_get_hwaddr(lp);
 
-	at91_emac_write(lp, AT91_EMAC_CTL, 0);
-
-	if (board_data->is_rmii)
-		at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII);
-	else
-		at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG);
-
-	/* Detect PHY */
-	if (!at91ether_phy_detect(lp)) {
-		printk(KERN_ERR "at91_ether: Could not detect ethernet PHY\n");
-		res = -ENODEV;
-		goto err_free_dmamem;
+	res = at91ether_get_phy_mode_dt(pdev);
+	if (res < 0) {
+		if (board_data && board_data->is_rmii)
+			lp->phy_interface = PHY_INTERFACE_MODE_RMII;
+		else
+			lp->phy_interface = PHY_INTERFACE_MODE_MII;
+	} else {
+		lp->phy_interface = res;
 	}
 
-	initialize_phy(lp);
+	macb_writel(lp, NCR, 0);
 
-	lp->mii.dev = dev;		/* Support for ethtool */
-	lp->mii.mdio_read = mdio_read;
-	lp->mii.mdio_write = mdio_write;
-	lp->mii.phy_id = lp->phy_address;
-	lp->mii.phy_id_mask = 0x1f;
-	lp->mii.reg_num_mask = 0x1f;
+	reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
+	if (lp->phy_interface == PHY_INTERFACE_MODE_RMII)
+		reg |= MACB_BIT(RM9200_RMII);
+
+	macb_writel(lp, NCFGR, reg);
 
 	/* Register the network interface */
 	res = register_netdev(dev);
 	if (res)
-		goto err_free_dmamem;
+		goto err_disable_clock;
 
-	/* Determine current link speed */
-	spin_lock_irq(&lp->lock);
-	enable_mdi(lp);
-	update_linkspeed(dev, 0);
-	disable_mdi(lp);
-	spin_unlock_irq(&lp->lock);
-	netif_carrier_off(dev);		/* will be enabled in open() */
+	if (macb_mii_init(lp) != 0)
+		goto err_out_unregister_netdev;
 
-	/* If board has no PHY IRQ, use a timer to poll the PHY */
-	if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
-		gpio_request(board_data->phy_irq_pin, "ethernet_phy");
-	} else {
-		/* If board has no PHY IRQ, use a timer to poll the PHY */
-		init_timer(&lp->check_timer);
-		lp->check_timer.data = (unsigned long)dev;
-		lp->check_timer.function = at91ether_check_link;
-	}
+	/* will be enabled in open() */
+	netif_carrier_off(dev);
+
+	phydev = lp->phy_dev;
+	netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+				phydev->drv->name, dev_name(&phydev->dev),
+				phydev->irq);
 
 	/* Display ethernet banner */
-	printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
-	       dev->name, (uint) dev->base_addr, dev->irq,
-	       at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
-	       at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
-	       dev->dev_addr);
-	if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
-		printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
-	else if (lp->phy_type == MII_LXT971A_ID)
-		printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
-	else if (lp->phy_type == MII_RTL8201_ID)
-		printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
-	else if (lp->phy_type == MII_BCM5221_ID)
-		printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
-	else if (lp->phy_type == MII_DP83847_ID)
-		printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
-	else if (lp->phy_type == MII_DP83848_ID)
-		printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
-	else if (lp->phy_type == MII_AC101L_ID)
-		printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
-	else if (lp->phy_type == MII_KS8721_ID)
-		printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
-	else if (lp->phy_type == MII_T78Q21x3_ID)
-		printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
-	else if (lp->phy_type == MII_LAN83C185_ID)
-		printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
-
-	clk_disable(lp->ether_clk);					/* Disable Peripheral clock */
+	netdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n",
+				dev->base_addr, dev->irq, dev->dev_addr);
 
 	return 0;
 
-
-err_free_dmamem:
-	platform_set_drvdata(pdev, NULL);
-	dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
-err_free_irq:
-	free_irq(dev->irq, dev);
+err_out_unregister_netdev:
+	unregister_netdev(dev);
 err_disable_clock:
-	clk_disable(lp->ether_clk);
-	clk_put(lp->ether_clk);
-err_ioumap:
-	iounmap(lp->emac_base);
+	clk_disable(lp->pclk);
 err_free_dev:
 	free_netdev(dev);
 	return res;
@@ -1210,38 +459,33 @@
 static int __devexit at91ether_remove(struct platform_device *pdev)
 {
 	struct net_device *dev = platform_get_drvdata(pdev);
-	struct at91_private *lp = netdev_priv(dev);
+	struct macb *lp = netdev_priv(dev);
 
-	if (gpio_is_valid(lp->board_data.phy_irq_pin))
-		gpio_free(lp->board_data.phy_irq_pin);
+	if (lp->phy_dev)
+		phy_disconnect(lp->phy_dev);
 
+	mdiobus_unregister(lp->mii_bus);
+	kfree(lp->mii_bus->irq);
+	mdiobus_free(lp->mii_bus);
 	unregister_netdev(dev);
-	free_irq(dev->irq, dev);
-	dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
-	clk_put(lp->ether_clk);
-
-	platform_set_drvdata(pdev, NULL);
+	clk_disable(lp->pclk);
 	free_netdev(dev);
+	platform_set_drvdata(pdev, NULL);
+
 	return 0;
 }
 
 #ifdef CONFIG_PM
-
 static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
 {
 	struct net_device *net_dev = platform_get_drvdata(pdev);
-	struct at91_private *lp = netdev_priv(net_dev);
+	struct macb *lp = netdev_priv(net_dev);
 
 	if (netif_running(net_dev)) {
-		if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
-			int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
-			disable_irq(phy_irq);
-		}
-
 		netif_stop_queue(net_dev);
 		netif_device_detach(net_dev);
 
-		clk_disable(lp->ether_clk);
+		clk_disable(lp->pclk);
 	}
 	return 0;
 }
@@ -1249,22 +493,16 @@
 static int at91ether_resume(struct platform_device *pdev)
 {
 	struct net_device *net_dev = platform_get_drvdata(pdev);
-	struct at91_private *lp = netdev_priv(net_dev);
+	struct macb *lp = netdev_priv(net_dev);
 
 	if (netif_running(net_dev)) {
-		clk_enable(lp->ether_clk);
+		clk_enable(lp->pclk);
 
 		netif_device_attach(net_dev);
 		netif_start_queue(net_dev);
-
-		if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
-			int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
-			enable_irq(phy_irq);
-		}
 	}
 	return 0;
 }
-
 #else
 #define at91ether_suspend	NULL
 #define at91ether_resume	NULL
@@ -1275,8 +513,9 @@
 	.suspend	= at91ether_suspend,
 	.resume		= at91ether_resume,
 	.driver		= {
-		.name	= DRV_NAME,
+		.name	= "at91_ether",
 		.owner	= THIS_MODULE,
+		.of_match_table	= of_match_ptr(at91ether_dt_ids),
 	},
 };
 
@@ -1296,4 +535,4 @@
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
 MODULE_AUTHOR("Andrew Victor");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:at91_ether");
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
deleted file mode 100644
index 0ef6328..0000000
--- a/drivers/net/ethernet/cadence/at91_ether.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Ethernet driver for the Atmel AT91RM9200 (Thunder)
- *
- *  Copyright (C) SAN People (Pty) Ltd
- *
- * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
- * Initial version by Rick Bronson.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef AT91_ETHERNET
-#define AT91_ETHERNET
-
-
-/* Davicom 9161 PHY */
-#define MII_DM9161_ID		0x0181b880
-#define MII_DM9161A_ID		0x0181b8a0
-#define MII_DSCR_REG		16
-#define MII_DSCSR_REG		17
-#define MII_DSINTR_REG		21
-
-/* Intel LXT971A PHY */
-#define MII_LXT971A_ID		0x001378E0
-#define MII_ISINTE_REG		18
-#define MII_ISINTS_REG		19
-#define MII_LEDCTRL_REG		20
-
-/* Realtek RTL8201 PHY */
-#define MII_RTL8201_ID		0x00008200
-
-/* Broadcom BCM5221 PHY */
-#define MII_BCM5221_ID		0x004061e0
-#define MII_BCMINTR_REG		26
-
-/* National Semiconductor DP83847 */
-#define MII_DP83847_ID		0x20005c30
-
-/* National Semiconductor DP83848 */
-#define MII_DP83848_ID		0x20005c90
-#define MII_DPPHYSTS_REG	16
-#define MII_DPMICR_REG		17
-#define MII_DPMISR_REG		18
-
-/* Altima AC101L PHY */
-#define MII_AC101L_ID		0x00225520
-
-/* Micrel KS8721 PHY */
-#define MII_KS8721_ID		0x00221610
-
-/* Teridian 78Q2123/78Q2133 */
-#define MII_T78Q21x3_ID		0x000e7230
-#define MII_T78Q21INT_REG	17
-
-/* SMSC LAN83C185 */
-#define MII_LAN83C185_ID	0x0007C0A0
-
-/* ........................................................................ */
-
-#define MAX_RBUFF_SZ	0x600		/* 1518 rounded up */
-#define MAX_RX_DESCR	9		/* max number of receive buffers */
-
-#define EMAC_DESC_DONE	0x00000001	/* bit for if DMA is done */
-#define EMAC_DESC_WRAP	0x00000002	/* bit for wrap */
-
-#define EMAC_BROADCAST	0x80000000	/* broadcast address */
-#define EMAC_MULTICAST	0x40000000	/* multicast address */
-#define EMAC_UNICAST	0x20000000	/* unicast address */
-
-struct rbf_t
-{
-	unsigned int addr;
-	unsigned long size;
-};
-
-struct recv_desc_bufs
-{
-	struct rbf_t descriptors[MAX_RX_DESCR];		/* must be on sizeof (rbf_t) boundary */
-	char recv_buf[MAX_RX_DESCR][MAX_RBUFF_SZ];	/* must be on long boundary */
-};
-
-struct at91_private
-{
-	struct mii_if_info mii;			/* ethtool support */
-	struct macb_platform_data board_data;	/* board-specific
-						 * configuration (shared with
-						 * macb for common data */
-	void __iomem *emac_base;		/* base register address */
-	struct clk *ether_clk;			/* clock */
-
-	/* PHY */
-	unsigned long phy_type;			/* type of PHY (PHY_ID) */
-	spinlock_t lock;			/* lock for MDI interface */
-	short phy_media;			/* media interface type */
-	unsigned short phy_address;		/* 5-bit MDI address of PHY (0..31) */
-	struct timer_list check_timer;		/* Poll link status */
-
-	/* Transmit */
-	struct sk_buff *skb;			/* holds skb until xmit interrupt completes */
-	dma_addr_t skb_physaddr;		/* phys addr from pci_map_single */
-	int skb_length;				/* saved skb length for pci_unmap_single */
-
-	/* Receive */
-	int rxBuffIndex;			/* index into receive descriptor list */
-	struct recv_desc_bufs *dlist;		/* descriptor list address */
-	struct recv_desc_bufs *dlist_phys;	/* descriptor list physical address */
-};
-
-#endif
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 033064b..6a59bce 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -14,8 +14,10 @@
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/circ_buf.h>
 #include <linux/slab.h>
 #include <linux/init.h>
+#include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -26,37 +28,74 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_net.h>
+#include <linux/pinctrl/consumer.h>
 
 #include "macb.h"
 
 #define RX_BUFFER_SIZE		128
-#define RX_RING_SIZE		512
-#define RX_RING_BYTES		(sizeof(struct dma_desc) * RX_RING_SIZE)
+#define RX_RING_SIZE		512 /* must be power of 2 */
+#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 
-/* Make the IP header word-aligned (the ethernet header is 14 bytes) */
-#define RX_OFFSET		2
+#define TX_RING_SIZE		128 /* must be power of 2 */
+#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 
-#define TX_RING_SIZE		128
-#define DEF_TX_RING_PENDING	(TX_RING_SIZE - 1)
-#define TX_RING_BYTES		(sizeof(struct dma_desc) * TX_RING_SIZE)
-
-#define TX_RING_GAP(bp)						\
-	(TX_RING_SIZE - (bp)->tx_pending)
-#define TX_BUFFS_AVAIL(bp)					\
-	(((bp)->tx_tail <= (bp)->tx_head) ?			\
-	 (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head :	\
-	 (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
-#define NEXT_TX(n)		(((n) + 1) & (TX_RING_SIZE - 1))
-
-#define NEXT_RX(n)		(((n) + 1) & (RX_RING_SIZE - 1))
-
-/* minimum number of free TX descriptors before waking up TX process */
-#define MACB_TX_WAKEUP_THRESH	(TX_RING_SIZE / 4)
+/* level of occupied TX descriptors under which we wake up TX process */
+#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 
 #define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 				 | MACB_BIT(ISR_ROVR))
+#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
+					| MACB_BIT(ISR_RLE)		\
+					| MACB_BIT(TXERR))
+#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 
-static void __macb_set_hwaddr(struct macb *bp)
+/*
+ * Graceful stop timeouts in us. We should allow up to
+ * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
+ */
+#define MACB_HALT_TIMEOUT	1230
+
+/* Ring buffer accessors */
+static unsigned int macb_tx_ring_wrap(unsigned int index)
+{
+	return index & (TX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
+{
+	return &bp->tx_ring[macb_tx_ring_wrap(index)];
+}
+
+static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
+{
+	return &bp->tx_skb[macb_tx_ring_wrap(index)];
+}
+
+static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
+{
+	dma_addr_t offset;
+
+	offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
+
+	return bp->tx_ring_dma + offset;
+}
+
+static unsigned int macb_rx_ring_wrap(unsigned int index)
+{
+	return index & (RX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+{
+	return &bp->rx_ring[macb_rx_ring_wrap(index)];
+}
+
+static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+{
+	return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index);
+}
+
+void macb_set_hwaddr(struct macb *bp)
 {
 	u32 bottom;
 	u16 top;
@@ -65,31 +104,58 @@
 	macb_or_gem_writel(bp, SA1B, bottom);
 	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
 	macb_or_gem_writel(bp, SA1T, top);
-}
 
-static void __init macb_get_hwaddr(struct macb *bp)
+	/* Clear unused address register sets */
+	macb_or_gem_writel(bp, SA2B, 0);
+	macb_or_gem_writel(bp, SA2T, 0);
+	macb_or_gem_writel(bp, SA3B, 0);
+	macb_or_gem_writel(bp, SA3T, 0);
+	macb_or_gem_writel(bp, SA4B, 0);
+	macb_or_gem_writel(bp, SA4T, 0);
+}
+EXPORT_SYMBOL_GPL(macb_set_hwaddr);
+
+void macb_get_hwaddr(struct macb *bp)
 {
+	struct macb_platform_data *pdata;
 	u32 bottom;
 	u16 top;
 	u8 addr[6];
+	int i;
 
-	bottom = macb_or_gem_readl(bp, SA1B);
-	top = macb_or_gem_readl(bp, SA1T);
+	pdata = bp->pdev->dev.platform_data;
 
-	addr[0] = bottom & 0xff;
-	addr[1] = (bottom >> 8) & 0xff;
-	addr[2] = (bottom >> 16) & 0xff;
-	addr[3] = (bottom >> 24) & 0xff;
-	addr[4] = top & 0xff;
-	addr[5] = (top >> 8) & 0xff;
+	/* Check all 4 address register for vaild address */
+	for (i = 0; i < 4; i++) {
+		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
+		top = macb_or_gem_readl(bp, SA1T + i * 8);
 
-	if (is_valid_ether_addr(addr)) {
-		memcpy(bp->dev->dev_addr, addr, sizeof(addr));
-	} else {
-		netdev_info(bp->dev, "invalid hw address, using random\n");
-		eth_hw_addr_random(bp->dev);
+		if (pdata && pdata->rev_eth_addr) {
+			addr[5] = bottom & 0xff;
+			addr[4] = (bottom >> 8) & 0xff;
+			addr[3] = (bottom >> 16) & 0xff;
+			addr[2] = (bottom >> 24) & 0xff;
+			addr[1] = top & 0xff;
+			addr[0] = (top & 0xff00) >> 8;
+		} else {
+			addr[0] = bottom & 0xff;
+			addr[1] = (bottom >> 8) & 0xff;
+			addr[2] = (bottom >> 16) & 0xff;
+			addr[3] = (bottom >> 24) & 0xff;
+			addr[4] = top & 0xff;
+			addr[5] = (top >> 8) & 0xff;
+		}
+
+		if (is_valid_ether_addr(addr)) {
+			memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+			return;
+		}
 	}
+
+	netdev_info(bp->dev, "invalid hw address, using random\n");
+	eth_hw_addr_random(bp->dev);
 }
+EXPORT_SYMBOL_GPL(macb_get_hwaddr);
 
 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
@@ -152,13 +218,17 @@
 
 			reg = macb_readl(bp, NCFGR);
 			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+			if (macb_is_gem(bp))
+				reg &= ~GEM_BIT(GBE);
 
 			if (phydev->duplex)
 				reg |= MACB_BIT(FD);
 			if (phydev->speed == SPEED_100)
 				reg |= MACB_BIT(SPD);
+			if (phydev->speed == SPEED_1000)
+				reg |= GEM_BIT(GBE);
 
-			macb_writel(bp, NCFGR, reg);
+			macb_or_gem_writel(bp, NCFGR, reg);
 
 			bp->speed = phydev->speed;
 			bp->duplex = phydev->duplex;
@@ -196,7 +266,9 @@
 static int macb_mii_probe(struct net_device *dev)
 {
 	struct macb *bp = netdev_priv(dev);
+	struct macb_platform_data *pdata;
 	struct phy_device *phydev;
+	int phy_irq;
 	int ret;
 
 	phydev = phy_find_first(bp->mii_bus);
@@ -205,7 +277,14 @@
 		return -1;
 	}
 
-	/* TODO : add pin_irq */
+	pdata = dev_get_platdata(&bp->pdev->dev);
+	if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
+		ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
+		if (!ret) {
+			phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+			phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+		}
+	}
 
 	/* attach the mac to the phy */
 	ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
@@ -216,7 +295,10 @@
 	}
 
 	/* mask with MAC supported features */
-	phydev->supported &= PHY_BASIC_FEATURES;
+	if (macb_is_gem(bp))
+		phydev->supported &= PHY_GBIT_FEATURES;
+	else
+		phydev->supported &= PHY_BASIC_FEATURES;
 
 	phydev->advertising = phydev->supported;
 
@@ -228,7 +310,7 @@
 	return 0;
 }
 
-static int macb_mii_init(struct macb *bp)
+int macb_mii_init(struct macb *bp)
 {
 	struct macb_platform_data *pdata;
 	int err = -ENXIO, i;
@@ -284,6 +366,7 @@
 err_out:
 	return err;
 }
+EXPORT_SYMBOL_GPL(macb_mii_init);
 
 static void macb_update_stats(struct macb *bp)
 {
@@ -297,7 +380,103 @@
 		*p += __raw_readl(reg);
 }
 
-static void macb_tx(struct macb *bp)
+static int macb_halt_tx(struct macb *bp)
+{
+	unsigned long	halt_time, timeout;
+	u32		status;
+
+	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
+
+	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
+	do {
+		halt_time = jiffies;
+		status = macb_readl(bp, TSR);
+		if (!(status & MACB_BIT(TGO)))
+			return 0;
+
+		usleep_range(10, 250);
+	} while (time_before(halt_time, timeout));
+
+	return -ETIMEDOUT;
+}
+
+static void macb_tx_error_task(struct work_struct *work)
+{
+	struct macb	*bp = container_of(work, struct macb, tx_error_task);
+	struct macb_tx_skb	*tx_skb;
+	struct sk_buff		*skb;
+	unsigned int		tail;
+
+	netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
+		    bp->tx_tail, bp->tx_head);
+
+	/* Make sure nobody is trying to queue up new packets */
+	netif_stop_queue(bp->dev);
+
+	/*
+	 * Stop transmission now
+	 * (in case we have just queued new packets)
+	 */
+	if (macb_halt_tx(bp))
+		/* Just complain for now, reinitializing TX path can be good */
+		netdev_err(bp->dev, "BUG: halt tx timed out\n");
+
+	/* No need for the lock here as nobody will interrupt us anymore */
+
+	/*
+	 * Treat frames in TX queue including the ones that caused the error.
+	 * Free transmit buffers in upper layer.
+	 */
+	for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
+		struct macb_dma_desc	*desc;
+		u32			ctrl;
+
+		desc = macb_tx_desc(bp, tail);
+		ctrl = desc->ctrl;
+		tx_skb = macb_tx_skb(bp, tail);
+		skb = tx_skb->skb;
+
+		if (ctrl & MACB_BIT(TX_USED)) {
+			netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
+				    macb_tx_ring_wrap(tail), skb->data);
+			bp->stats.tx_packets++;
+			bp->stats.tx_bytes += skb->len;
+		} else {
+			/*
+			 * "Buffers exhausted mid-frame" errors may only happen
+			 * if the driver is buggy, so complain loudly about those.
+			 * Statistics are updated by hardware.
+			 */
+			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
+				netdev_err(bp->dev,
+					   "BUG: TX buffers exhausted mid-frame\n");
+
+			desc->ctrl = ctrl | MACB_BIT(TX_USED);
+		}
+
+		dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
+				 DMA_TO_DEVICE);
+		tx_skb->skb = NULL;
+		dev_kfree_skb(skb);
+	}
+
+	/* Make descriptor updates visible to hardware */
+	wmb();
+
+	/* Reinitialize the TX desc queue */
+	macb_writel(bp, TBQP, bp->tx_ring_dma);
+	/* Make TX ring reflect state of hardware */
+	bp->tx_head = bp->tx_tail = 0;
+
+	/* Now we are ready to start transmission again */
+	netif_wake_queue(bp->dev);
+
+	/* Housework before enabling TX IRQ */
+	macb_writel(bp, TSR, macb_readl(bp, TSR));
+	macb_writel(bp, IER, MACB_TX_INT_FLAGS);
+}
+
+static void macb_tx_interrupt(struct macb *bp)
 {
 	unsigned int tail;
 	unsigned int head;
@@ -306,84 +485,43 @@
 	status = macb_readl(bp, TSR);
 	macb_writel(bp, TSR, status);
 
-	netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status);
-
-	if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
-		int i;
-		netdev_err(bp->dev, "TX %s, resetting buffers\n",
-			   status & MACB_BIT(UND) ?
-			   "underrun" : "retry limit exceeded");
-
-		/* Transfer ongoing, disable transmitter, to avoid confusion */
-		if (status & MACB_BIT(TGO))
-			macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
-
-		head = bp->tx_head;
-
-		/*Mark all the buffer as used to avoid sending a lost buffer*/
-		for (i = 0; i < TX_RING_SIZE; i++)
-			bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
-
-		/* Add wrap bit */
-		bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
-
-		/* free transmit buffer in upper layer*/
-		for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
-			struct ring_info *rp = &bp->tx_skb[tail];
-			struct sk_buff *skb = rp->skb;
-
-			BUG_ON(skb == NULL);
-
-			rmb();
-
-			dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
-							 DMA_TO_DEVICE);
-			rp->skb = NULL;
-			dev_kfree_skb_irq(skb);
-		}
-
-		bp->tx_head = bp->tx_tail = 0;
-
-		/* Enable the transmitter again */
-		if (status & MACB_BIT(TGO))
-			macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
-	}
-
-	if (!(status & MACB_BIT(COMP)))
-		/*
-		 * This may happen when a buffer becomes complete
-		 * between reading the ISR and scanning the
-		 * descriptors.  Nothing to worry about.
-		 */
-		return;
+	netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
+		(unsigned long)status);
 
 	head = bp->tx_head;
-	for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
-		struct ring_info *rp = &bp->tx_skb[tail];
-		struct sk_buff *skb = rp->skb;
-		u32 bufstat;
+	for (tail = bp->tx_tail; tail != head; tail++) {
+		struct macb_tx_skb	*tx_skb;
+		struct sk_buff		*skb;
+		struct macb_dma_desc	*desc;
+		u32			ctrl;
 
-		BUG_ON(skb == NULL);
+		desc = macb_tx_desc(bp, tail);
 
+		/* Make hw descriptor updates visible to CPU */
 		rmb();
-		bufstat = bp->tx_ring[tail].ctrl;
 
-		if (!(bufstat & MACB_BIT(TX_USED)))
+		ctrl = desc->ctrl;
+
+		if (!(ctrl & MACB_BIT(TX_USED)))
 			break;
 
-		netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n",
-			   tail, skb->data);
-		dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
+		tx_skb = macb_tx_skb(bp, tail);
+		skb = tx_skb->skb;
+
+		netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
+			macb_tx_ring_wrap(tail), skb->data);
+		dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
 				 DMA_TO_DEVICE);
 		bp->stats.tx_packets++;
 		bp->stats.tx_bytes += skb->len;
-		rp->skb = NULL;
+		tx_skb->skb = NULL;
 		dev_kfree_skb_irq(skb);
 	}
 
 	bp->tx_tail = tail;
-	if (netif_queue_stopped(bp->dev) &&
-	    TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH)
+	if (netif_queue_stopped(bp->dev)
+			&& CIRC_CNT(bp->tx_head, bp->tx_tail,
+				    TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
 		netif_wake_queue(bp->dev);
 }
 
@@ -392,31 +530,48 @@
 {
 	unsigned int len;
 	unsigned int frag;
-	unsigned int offset = 0;
+	unsigned int offset;
 	struct sk_buff *skb;
+	struct macb_dma_desc *desc;
 
-	len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
+	desc = macb_rx_desc(bp, last_frag);
+	len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
 
-	netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
-		   first_frag, last_frag, len);
+	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+		macb_rx_ring_wrap(first_frag),
+		macb_rx_ring_wrap(last_frag), len);
 
-	skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET);
+	/*
+	 * The ethernet header starts NET_IP_ALIGN bytes into the
+	 * first buffer. Since the header is 14 bytes, this makes the
+	 * payload word-aligned.
+	 *
+	 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
+	 * the two padding bytes into the skb so that we avoid hitting
+	 * the slowpath in memcpy(), and pull them off afterwards.
+	 */
+	skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
 	if (!skb) {
 		bp->stats.rx_dropped++;
-		for (frag = first_frag; ; frag = NEXT_RX(frag)) {
-			bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+		for (frag = first_frag; ; frag++) {
+			desc = macb_rx_desc(bp, frag);
+			desc->addr &= ~MACB_BIT(RX_USED);
 			if (frag == last_frag)
 				break;
 		}
+
+		/* Make descriptor updates visible to hardware */
 		wmb();
+
 		return 1;
 	}
 
-	skb_reserve(skb, RX_OFFSET);
+	offset = 0;
+	len += NET_IP_ALIGN;
 	skb_checksum_none_assert(skb);
 	skb_put(skb, len);
 
-	for (frag = first_frag; ; frag = NEXT_RX(frag)) {
+	for (frag = first_frag; ; frag++) {
 		unsigned int frag_len = RX_BUFFER_SIZE;
 
 		if (offset + frag_len > len) {
@@ -424,22 +579,24 @@
 			frag_len = len - offset;
 		}
 		skb_copy_to_linear_data_offset(skb, offset,
-					       (bp->rx_buffers +
-					        (RX_BUFFER_SIZE * frag)),
-					       frag_len);
+				macb_rx_buffer(bp, frag), frag_len);
 		offset += RX_BUFFER_SIZE;
-		bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
-		wmb();
+		desc = macb_rx_desc(bp, frag);
+		desc->addr &= ~MACB_BIT(RX_USED);
 
 		if (frag == last_frag)
 			break;
 	}
 
+	/* Make descriptor updates visible to hardware */
+	wmb();
+
+	__skb_pull(skb, NET_IP_ALIGN);
 	skb->protocol = eth_type_trans(skb, bp->dev);
 
 	bp->stats.rx_packets++;
-	bp->stats.rx_bytes += len;
-	netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n",
+	bp->stats.rx_bytes += skb->len;
+	netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
 		   skb->len, skb->csum);
 	netif_receive_skb(skb);
 
@@ -452,8 +609,12 @@
 {
 	unsigned int frag;
 
-	for (frag = begin; frag != end; frag = NEXT_RX(frag))
-		bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+	for (frag = begin; frag != end; frag++) {
+		struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+		desc->addr &= ~MACB_BIT(RX_USED);
+	}
+
+	/* Make descriptor updates visible to hardware */
 	wmb();
 
 	/*
@@ -466,15 +627,18 @@
 static int macb_rx(struct macb *bp, int budget)
 {
 	int received = 0;
-	unsigned int tail = bp->rx_tail;
+	unsigned int tail;
 	int first_frag = -1;
 
-	for (; budget > 0; tail = NEXT_RX(tail)) {
+	for (tail = bp->rx_tail; budget > 0; tail++) {
+		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
 		u32 addr, ctrl;
 
+		/* Make hw descriptor updates visible to CPU */
 		rmb();
-		addr = bp->rx_ring[tail].addr;
-		ctrl = bp->rx_ring[tail].ctrl;
+
+		addr = desc->addr;
+		ctrl = desc->ctrl;
 
 		if (!(addr & MACB_BIT(RX_USED)))
 			break;
@@ -517,7 +681,7 @@
 
 	work_done = 0;
 
-	netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n",
+	netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
 		   (unsigned long)status, budget);
 
 	work_done = macb_rx(bp, budget);
@@ -552,10 +716,12 @@
 	while (status) {
 		/* close possible race with dev_close */
 		if (unlikely(!netif_running(dev))) {
-			macb_writel(bp, IDR, ~0UL);
+			macb_writel(bp, IDR, -1);
 			break;
 		}
 
+		netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
+
 		if (status & MACB_RX_INT_FLAGS) {
 			/*
 			 * There's no point taking any more interrupts
@@ -567,14 +733,19 @@
 			macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
 
 			if (napi_schedule_prep(&bp->napi)) {
-				netdev_dbg(bp->dev, "scheduling RX softirq\n");
+				netdev_vdbg(bp->dev, "scheduling RX softirq\n");
 				__napi_schedule(&bp->napi);
 			}
 		}
 
-		if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) |
-			    MACB_BIT(ISR_RLE)))
-			macb_tx(bp);
+		if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
+			macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
+			schedule_work(&bp->tx_error_task);
+			break;
+		}
+
+		if (status & MACB_BIT(TCOMP))
+			macb_tx_interrupt(bp);
 
 		/*
 		 * Link change detection isn't possible with RMII, so we'll
@@ -626,11 +797,13 @@
 	struct macb *bp = netdev_priv(dev);
 	dma_addr_t mapping;
 	unsigned int len, entry;
+	struct macb_dma_desc *desc;
+	struct macb_tx_skb *tx_skb;
 	u32 ctrl;
 	unsigned long flags;
 
-#ifdef DEBUG
-	netdev_dbg(bp->dev,
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+	netdev_vdbg(bp->dev,
 		   "start_xmit: len %u head %p data %p tail %p end %p\n",
 		   skb->len, skb->head, skb->data,
 		   skb_tail_pointer(skb), skb_end_pointer(skb));
@@ -642,7 +815,7 @@
 	spin_lock_irqsave(&bp->lock, flags);
 
 	/* This is a hard error, log it. */
-	if (TX_BUFFS_AVAIL(bp) < 1) {
+	if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
 		netif_stop_queue(dev);
 		spin_unlock_irqrestore(&bp->lock, flags);
 		netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
@@ -651,13 +824,16 @@
 		return NETDEV_TX_BUSY;
 	}
 
-	entry = bp->tx_head;
-	netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry);
+	entry = macb_tx_ring_wrap(bp->tx_head);
+	bp->tx_head++;
+	netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
 	mapping = dma_map_single(&bp->pdev->dev, skb->data,
 				 len, DMA_TO_DEVICE);
-	bp->tx_skb[entry].skb = skb;
-	bp->tx_skb[entry].mapping = mapping;
-	netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
+
+	tx_skb = &bp->tx_skb[entry];
+	tx_skb->skb = skb;
+	tx_skb->mapping = mapping;
+	netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
 		   skb->data, (unsigned long)mapping);
 
 	ctrl = MACB_BF(TX_FRMLEN, len);
@@ -665,18 +841,18 @@
 	if (entry == (TX_RING_SIZE - 1))
 		ctrl |= MACB_BIT(TX_WRAP);
 
-	bp->tx_ring[entry].addr = mapping;
-	bp->tx_ring[entry].ctrl = ctrl;
-	wmb();
+	desc = &bp->tx_ring[entry];
+	desc->addr = mapping;
+	desc->ctrl = ctrl;
 
-	entry = NEXT_TX(entry);
-	bp->tx_head = entry;
+	/* Make newly initialized descriptor visible to hardware */
+	wmb();
 
 	skb_tx_timestamp(skb);
 
 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
 
-	if (TX_BUFFS_AVAIL(bp) < 1)
+	if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
 		netif_stop_queue(dev);
 
 	spin_unlock_irqrestore(&bp->lock, flags);
@@ -712,7 +888,7 @@
 {
 	int size;
 
-	size = TX_RING_SIZE * sizeof(struct ring_info);
+	size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
 	bp->tx_skb = kmalloc(size, GFP_KERNEL);
 	if (!bp->tx_skb)
 		goto out_err;
@@ -775,9 +951,6 @@
 
 static void macb_reset_hw(struct macb *bp)
 {
-	/* Make sure we have the write buffer for ourselves */
-	wmb();
-
 	/*
 	 * Disable RX and TX (XXX: Should we halt the transmission
 	 * more gracefully?)
@@ -788,11 +961,11 @@
 	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
 
 	/* Clear all status flags */
-	macb_writel(bp, TSR, ~0UL);
-	macb_writel(bp, RSR, ~0UL);
+	macb_writel(bp, TSR, -1);
+	macb_writel(bp, RSR, -1);
 
 	/* Disable all interrupts */
-	macb_writel(bp, IDR, ~0UL);
+	macb_writel(bp, IDR, -1);
 	macb_readl(bp, ISR);
 }
 
@@ -860,8 +1033,12 @@
 }
 
 /*
- * Configure the receive DMA engine to use the correct receive buffer size.
- * This is a configurable parameter for GEM.
+ * Configure the receive DMA engine
+ * - use the correct receive buffer size
+ * - set the possibility to use INCR16 bursts
+ *   (if not supported by FIFO, it will fallback to default)
+ * - set both rx/tx packet buffers to full memory size
+ * These are configurable parameters for GEM.
  */
 static void macb_configure_dma(struct macb *bp)
 {
@@ -870,6 +1047,8 @@
 	if (macb_is_gem(bp)) {
 		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
 		dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
+		dmacfg |= GEM_BF(FBLDO, 16);
+		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
 		gem_writel(bp, DMACFG, dmacfg);
 	}
 }
@@ -879,9 +1058,10 @@
 	u32 config;
 
 	macb_reset_hw(bp);
-	__macb_set_hwaddr(bp);
+	macb_set_hwaddr(bp);
 
 	config = macb_mdc_clk_div(bp);
+	config |= MACB_BF(RBOF, NET_IP_ALIGN);	/* Make eth data aligned */
 	config |= MACB_BIT(PAE);		/* PAuse Enable */
 	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
 	config |= MACB_BIT(BIG);		/* Receive oversized frames */
@@ -891,6 +1071,8 @@
 		config |= MACB_BIT(NBC);	/* No BroadCast */
 	config |= macb_dbw(bp);
 	macb_writel(bp, NCFGR, config);
+	bp->speed = SPEED_10;
+	bp->duplex = DUPLEX_HALF;
 
 	macb_configure_dma(bp);
 
@@ -902,13 +1084,8 @@
 	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
 
 	/* Enable interrupts */
-	macb_writel(bp, IER, (MACB_BIT(RCOMP)
-			      | MACB_BIT(RXUBR)
-			      | MACB_BIT(ISR_TUND)
-			      | MACB_BIT(ISR_RLE)
-			      | MACB_BIT(TXERR)
-			      | MACB_BIT(TCOMP)
-			      | MACB_BIT(ISR_ROVR)
+	macb_writel(bp, IER, (MACB_RX_INT_FLAGS
+			      | MACB_TX_INT_FLAGS
 			      | MACB_BIT(HRESP)));
 
 }
@@ -996,7 +1173,7 @@
 /*
  * Enable/Disable promiscuous and multicast modes.
  */
-static void macb_set_rx_mode(struct net_device *dev)
+void macb_set_rx_mode(struct net_device *dev)
 {
 	unsigned long cfg;
 	struct macb *bp = netdev_priv(dev);
@@ -1028,6 +1205,7 @@
 
 	macb_writel(bp, NCFGR, cfg);
 }
+EXPORT_SYMBOL_GPL(macb_set_rx_mode);
 
 static int macb_open(struct net_device *dev)
 {
@@ -1043,9 +1221,6 @@
 	if (!bp->phy_dev)
 		return -EAGAIN;
 
-	if (!is_valid_ether_addr(dev->dev_addr))
-		return -EADDRNOTAVAIL;
-
 	err = macb_alloc_consistent(bp);
 	if (err) {
 		netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
@@ -1135,7 +1310,7 @@
 	return nstat;
 }
 
-static struct net_device_stats *macb_get_stats(struct net_device *dev)
+struct net_device_stats *macb_get_stats(struct net_device *dev)
 {
 	struct macb *bp = netdev_priv(dev);
 	struct net_device_stats *nstat = &bp->stats;
@@ -1181,6 +1356,7 @@
 
 	return nstat;
 }
+EXPORT_SYMBOL_GPL(macb_get_stats);
 
 static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
@@ -1204,25 +1380,55 @@
 	return phy_ethtool_sset(phydev, cmd);
 }
 
-static void macb_get_drvinfo(struct net_device *dev,
-			     struct ethtool_drvinfo *info)
+static int macb_get_regs_len(struct net_device *netdev)
 {
-	struct macb *bp = netdev_priv(dev);
-
-	strcpy(info->driver, bp->pdev->dev.driver->name);
-	strcpy(info->version, "$Revision: 1.14 $");
-	strcpy(info->bus_info, dev_name(&bp->pdev->dev));
+	return MACB_GREGS_NBR * sizeof(u32);
 }
 
-static const struct ethtool_ops macb_ethtool_ops = {
+static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+			  void *p)
+{
+	struct macb *bp = netdev_priv(dev);
+	unsigned int tail, head;
+	u32 *regs_buff = p;
+
+	regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
+			| MACB_GREGS_VERSION;
+
+	tail = macb_tx_ring_wrap(bp->tx_tail);
+	head = macb_tx_ring_wrap(bp->tx_head);
+
+	regs_buff[0]  = macb_readl(bp, NCR);
+	regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
+	regs_buff[2]  = macb_readl(bp, NSR);
+	regs_buff[3]  = macb_readl(bp, TSR);
+	regs_buff[4]  = macb_readl(bp, RBQP);
+	regs_buff[5]  = macb_readl(bp, TBQP);
+	regs_buff[6]  = macb_readl(bp, RSR);
+	regs_buff[7]  = macb_readl(bp, IMR);
+
+	regs_buff[8]  = tail;
+	regs_buff[9]  = head;
+	regs_buff[10] = macb_tx_dma(bp, tail);
+	regs_buff[11] = macb_tx_dma(bp, head);
+
+	if (macb_is_gem(bp)) {
+		regs_buff[12] = gem_readl(bp, USRIO);
+		regs_buff[13] = gem_readl(bp, DMACFG);
+	}
+}
+
+const struct ethtool_ops macb_ethtool_ops = {
 	.get_settings		= macb_get_settings,
 	.set_settings		= macb_set_settings,
-	.get_drvinfo		= macb_get_drvinfo,
+	.get_regs_len		= macb_get_regs_len,
+	.get_regs		= macb_get_regs,
 	.get_link		= ethtool_op_get_link,
 	.get_ts_info		= ethtool_op_get_ts_info,
 };
+EXPORT_SYMBOL_GPL(macb_ethtool_ops);
 
-static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	struct macb *bp = netdev_priv(dev);
 	struct phy_device *phydev = bp->phy_dev;
@@ -1235,6 +1441,7 @@
 
 	return phy_mii_ioctl(phydev, rq, cmd);
 }
+EXPORT_SYMBOL_GPL(macb_ioctl);
 
 static const struct net_device_ops macb_netdev_ops = {
 	.ndo_open		= macb_open,
@@ -1306,6 +1513,7 @@
 	struct phy_device *phydev;
 	u32 config;
 	int err = -ENXIO;
+	struct pinctrl *pinctrl;
 
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!regs) {
@@ -1313,6 +1521,15 @@
 		goto err_out;
 	}
 
+	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+	if (IS_ERR(pinctrl)) {
+		err = PTR_ERR(pinctrl);
+		if (err == -EPROBE_DEFER)
+			goto err_out;
+
+		dev_warn(&pdev->dev, "No pinctrl provided\n");
+	}
+
 	err = -ENOMEM;
 	dev = alloc_etherdev(sizeof(*bp));
 	if (!dev)
@@ -1328,6 +1545,7 @@
 	bp->dev = dev;
 
 	spin_lock_init(&bp->lock);
+	INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
 
 	bp->pclk = clk_get(&pdev->dev, "pclk");
 	if (IS_ERR(bp->pclk)) {
@@ -1384,7 +1602,9 @@
 		bp->phy_interface = err;
 	}
 
-	if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
+	if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+		macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
+	else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
 #if defined(CONFIG_ARCH_AT91)
 		macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
 					       MACB_BIT(CLKEN)));
@@ -1398,8 +1618,6 @@
 		macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
 #endif
 
-	bp->tx_pending = DEF_TX_RING_PENDING;
-
 	err = register_netdev(dev);
 	if (err) {
 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 335e288..570908b 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -10,10 +10,15 @@
 #ifndef _MACB_H
 #define _MACB_H
 
+#define MACB_GREGS_NBR 16
+#define MACB_GREGS_VERSION 1
+
 /* MACB register offsets */
 #define MACB_NCR				0x0000
 #define MACB_NCFGR				0x0004
 #define MACB_NSR				0x0008
+#define MACB_TAR				0x000c /* AT91RM9200 only */
+#define MACB_TCR				0x0010 /* AT91RM9200 only */
 #define MACB_TSR				0x0014
 #define MACB_RBQP				0x0018
 #define MACB_TBQP				0x001c
@@ -69,6 +74,12 @@
 #define GEM_HRT					0x0084
 #define GEM_SA1B				0x0088
 #define GEM_SA1T				0x008C
+#define GEM_SA2B				0x0090
+#define GEM_SA2T				0x0094
+#define GEM_SA3B				0x0098
+#define GEM_SA3T				0x009C
+#define GEM_SA4B				0x00A0
+#define GEM_SA4T				0x00A4
 #define GEM_OTX					0x0100
 #define GEM_DCFG1				0x0280
 #define GEM_DCFG2				0x0284
@@ -133,6 +144,8 @@
 #define MACB_RTY_SIZE				1
 #define MACB_PAE_OFFSET				13
 #define MACB_PAE_SIZE				1
+#define MACB_RM9200_RMII_OFFSET			13 /* AT91RM9200 only */
+#define MACB_RM9200_RMII_SIZE			1  /* AT91RM9200 only */
 #define MACB_RBOF_OFFSET			14
 #define MACB_RBOF_SIZE				2
 #define MACB_RLCE_OFFSET			16
@@ -145,6 +158,8 @@
 #define MACB_IRXFCS_SIZE			1
 
 /* GEM specific NCFGR bitfields. */
+#define GEM_GBE_OFFSET				10
+#define GEM_GBE_SIZE				1
 #define GEM_CLK_OFFSET				18
 #define GEM_CLK_SIZE				3
 #define GEM_DBW_OFFSET				21
@@ -156,8 +171,19 @@
 #define GEM_DBW128				2
 
 /* Bitfields in DMACFG. */
+#define GEM_FBLDO_OFFSET			0
+#define GEM_FBLDO_SIZE				5
+#define GEM_RXBMS_OFFSET			8
+#define GEM_RXBMS_SIZE				2
+#define GEM_TXPBMS_OFFSET			10
+#define GEM_TXPBMS_SIZE				1
+#define GEM_TXCOEN_OFFSET			11
+#define GEM_TXCOEN_SIZE				1
 #define GEM_RXBS_OFFSET				16
 #define GEM_RXBS_SIZE				8
+#define GEM_DDRP_OFFSET				24
+#define GEM_DDRP_SIZE				1
+
 
 /* Bitfields in NSR */
 #define MACB_NSR_LINK_OFFSET			0
@@ -178,6 +204,8 @@
 #define MACB_TGO_SIZE				1
 #define MACB_BEX_OFFSET				4
 #define MACB_BEX_SIZE				1
+#define MACB_RM9200_BNQ_OFFSET			4 /* AT91RM9200 only */
+#define MACB_RM9200_BNQ_SIZE			1 /* AT91RM9200 only */
 #define MACB_COMP_OFFSET			5
 #define MACB_COMP_SIZE				1
 #define MACB_UND_OFFSET				6
@@ -246,6 +274,8 @@
 /* Bitfields in USRIO (AT91) */
 #define MACB_RMII_OFFSET			0
 #define MACB_RMII_SIZE				1
+#define GEM_RGMII_OFFSET			0	/* GEM gigabit mode */
+#define GEM_RGMII_SIZE				1
 #define MACB_CLKEN_OFFSET			1
 #define MACB_CLKEN_SIZE				1
 
@@ -352,7 +382,12 @@
 		__v; \
 	})
 
-struct dma_desc {
+/**
+ * struct macb_dma_desc - Hardware DMA descriptor
+ * @addr: DMA address of data buffer
+ * @ctrl: Control and status bits
+ */
+struct macb_dma_desc {
 	u32	addr;
 	u32	ctrl;
 };
@@ -417,7 +452,12 @@
 #define MACB_TX_USED_OFFSET			31
 #define MACB_TX_USED_SIZE			1
 
-struct ring_info {
+/**
+ * struct macb_tx_skb - data about an skb which is being transmitted
+ * @skb: skb currently being transmitted
+ * @mapping: DMA address of the skb's data buffer
+ */
+struct macb_tx_skb {
 	struct sk_buff		*skb;
 	dma_addr_t		mapping;
 };
@@ -502,12 +542,12 @@
 	void __iomem		*regs;
 
 	unsigned int		rx_tail;
-	struct dma_desc		*rx_ring;
+	struct macb_dma_desc	*rx_ring;
 	void			*rx_buffers;
 
 	unsigned int		tx_head, tx_tail;
-	struct dma_desc		*tx_ring;
-	struct ring_info	*tx_skb;
+	struct macb_dma_desc	*tx_ring;
+	struct macb_tx_skb	*tx_skb;
 
 	spinlock_t		lock;
 	struct platform_device	*pdev;
@@ -515,6 +555,7 @@
 	struct clk		*hclk;
 	struct net_device	*dev;
 	struct napi_struct	napi;
+	struct work_struct	tx_error_task;
 	struct net_device_stats	stats;
 	union {
 		struct macb_stats	macb;
@@ -525,8 +566,6 @@
 	dma_addr_t		tx_ring_dma;
 	dma_addr_t		rx_buffers_dma;
 
-	unsigned int		rx_pending, tx_pending;
-
 	struct mii_bus		*mii_bus;
 	struct phy_device	*phy_dev;
 	unsigned int 		link;
@@ -534,8 +573,22 @@
 	unsigned int 		duplex;
 
 	phy_interface_t		phy_interface;
+
+	/* AT91RM9200 transmit */
+	struct sk_buff *skb;			/* holds skb until xmit interrupt completes */
+	dma_addr_t skb_physaddr;		/* phys addr from pci_map_single */
+	int skb_length;				/* saved skb length for pci_unmap_single */
 };
 
+extern const struct ethtool_ops macb_ethtool_ops;
+
+int macb_mii_init(struct macb *bp);
+int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+struct net_device_stats *macb_get_stats(struct net_device *dev);
+void macb_set_rx_mode(struct net_device *dev);
+void macb_set_hwaddr(struct macb *bp);
+void macb_get_hwaddr(struct macb *bp);
+
 static inline bool macb_is_gem(struct macb *bp)
 {
 	return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 16814b3..b407043 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -191,6 +191,7 @@
 #define DMA_CONTROL_ST		0x00002000	/* Start/Stop Transmission */
 #define DMA_CONTROL_SR		0x00000002	/* Start/Stop Receive */
 #define DMA_CONTROL_DFF		0x01000000	/* Disable flush of rx frames */
+#define DMA_CONTROL_OSF		0x00000004	/* Operate on 2nd tx frame */
 
 /* DMA Normal interrupt */
 #define DMA_INTR_ENA_NIE	0x00010000	/* Normal Summary */
@@ -210,7 +211,7 @@
 #define DMA_INTR_ENA_TIE	0x00000001	/* Transmit Interrupt */
 
 #define DMA_INTR_NORMAL		(DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
-				 DMA_INTR_ENA_TUE)
+				 DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
 
 #define DMA_INTR_ABNORMAL	(DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
 				 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
@@ -373,6 +374,7 @@
 	struct sk_buff **tx_skbuff;
 	unsigned int tx_head;
 	unsigned int tx_tail;
+	int tx_irq_cnt;
 
 	void __iomem *base;
 	unsigned int dma_buf_sz;
@@ -663,6 +665,7 @@
 {
 	struct xgmac_dma_desc *p;
 	dma_addr_t paddr;
+	int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
 	while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
 		int entry = priv->rx_head;
@@ -671,13 +674,13 @@
 		p = priv->dma_rx + entry;
 
 		if (priv->rx_skbuff[entry] == NULL) {
-			skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+			skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
 			if (unlikely(skb == NULL))
 				break;
 
 			priv->rx_skbuff[entry] = skb;
 			paddr = dma_map_single(priv->device, skb->data,
-					       priv->dma_buf_sz, DMA_FROM_DEVICE);
+					       bufsz, DMA_FROM_DEVICE);
 			desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
 		}
 
@@ -701,10 +704,10 @@
 	unsigned int bfsize;
 
 	/* Set the Buffer size according to the MTU;
-	 * indeed, in case of jumbo we need to bump-up the buffer sizes.
+	 * The total buffer size including any IP offset must be a multiple
+	 * of 8 bytes.
 	 */
-	bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
-		       64);
+	bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
 
 	netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
 
@@ -845,9 +848,6 @@
 static void xgmac_tx_complete(struct xgmac_priv *priv)
 {
 	int i;
-	void __iomem *ioaddr = priv->base;
-
-	writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
 
 	while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
 		unsigned int entry = priv->tx_tail;
@@ -888,7 +888,7 @@
 	}
 
 	if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
-	    TX_THRESH)
+	    MAX_SKB_FRAGS)
 		netif_wake_queue(priv->dev);
 }
 
@@ -965,8 +965,7 @@
 		ctrl |= XGMAC_CONTROL_IPC;
 	writel(ctrl, ioaddr + XGMAC_CONTROL);
 
-	value = DMA_CONTROL_DFF;
-	writel(value, ioaddr + XGMAC_DMA_CONTROL);
+	writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
 
 	/* Set the HW DMA mode and the COE */
 	writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
@@ -1060,19 +1059,15 @@
 	struct xgmac_priv *priv = netdev_priv(dev);
 	unsigned int entry;
 	int i;
+	u32 irq_flag;
 	int nfrags = skb_shinfo(skb)->nr_frags;
 	struct xgmac_dma_desc *desc, *first;
 	unsigned int desc_flags;
 	unsigned int len;
 	dma_addr_t paddr;
 
-	if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
-	    (nfrags + 1)) {
-		writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
-			priv->base + XGMAC_DMA_INTR_ENA);
-		netif_stop_queue(dev);
-		return NETDEV_TX_BUSY;
-	}
+	priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
+	irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
 
 	desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
 		TXDESC_CSUM_ALL : 0;
@@ -1113,9 +1108,9 @@
 	/* Interrupt on completition only for the latest segment */
 	if (desc != first)
 		desc_set_tx_owner(desc, desc_flags |
-			TXDESC_LAST_SEG | TXDESC_INTERRUPT);
+			TXDESC_LAST_SEG | irq_flag);
 	else
-		desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT;
+		desc_flags |= TXDESC_LAST_SEG | irq_flag;
 
 	/* Set owner on first desc last to avoid race condition */
 	wmb();
@@ -1124,6 +1119,9 @@
 	priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
 
 	writel(1, priv->base + XGMAC_DMA_TX_POLL);
+	if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
+	    MAX_SKB_FRAGS)
+		netif_stop_queue(dev);
 
 	return NETDEV_TX_OK;
 }
@@ -1139,9 +1137,6 @@
 		struct sk_buff *skb;
 		int frame_len;
 
-		writel(DMA_STATUS_RI | DMA_STATUS_NIS,
-		       priv->base + XGMAC_DMA_STATUS);
-
 		entry = priv->rx_tail;
 		p = priv->dma_rx + entry;
 		if (desc_get_owner(p))
@@ -1180,8 +1175,6 @@
 
 	xgmac_rx_refill(priv);
 
-	writel(1, priv->base + XGMAC_DMA_RX_POLL);
-
 	return count;
 }
 
@@ -1205,7 +1198,7 @@
 
 	if (work_done < budget) {
 		napi_complete(napi);
-		writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
+		__raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
 	}
 	return work_done;
 }
@@ -1350,7 +1343,7 @@
 	struct xgmac_priv *priv = netdev_priv(dev);
 	void __iomem *ioaddr = priv->base;
 
-	intr_status = readl(ioaddr + XGMAC_INT_STAT);
+	intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
 	if (intr_status & XGMAC_INT_STAT_PMT) {
 		netdev_dbg(priv->dev, "received Magic frame\n");
 		/* clear the PMT bits 5 and 6 by reading the PMT */
@@ -1368,9 +1361,9 @@
 	struct xgmac_extra_stats *x = &priv->xstats;
 
 	/* read the status register (CSR5) */
-	intr_status = readl(priv->base + XGMAC_DMA_STATUS);
-	intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA);
-	writel(intr_status, priv->base + XGMAC_DMA_STATUS);
+	intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
+	intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
+	__raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
 
 	/* It displays the DMA process states (CSR5 register) */
 	/* ABNORMAL interrupts */
@@ -1405,8 +1398,8 @@
 	}
 
 	/* TX/RX NORMAL interrupts */
-	if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) {
-		writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
+	if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
+		__raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
 		napi_schedule(&priv->napi);
 	}
 
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 2de50f9..d40c994 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -5,7 +5,7 @@
 config NET_VENDOR_CHELSIO
 	bool "Chelsio devices"
 	default y
-	depends on PCI || INET
+	depends on PCI
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index df01b63..8c82248 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -42,10 +42,9 @@
 #include <linux/mdio.h>
 #include "version.h"
 
-#define CH_ERR(adap, fmt, ...)   dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
-#define CH_WARN(adap, fmt, ...)  dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
-#define CH_ALERT(adap, fmt, ...) \
-	dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
+#define CH_ERR(adap, fmt, ...)   dev_err(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+#define CH_WARN(adap, fmt, ...)  dev_warn(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+#define CH_ALERT(adap, fmt, ...) dev_alert(&adap->pdev->dev, fmt, ##__VA_ARGS__)
 
 /*
  * More powerful macro that selectively prints messages based on msg_enable.
diff --git a/drivers/net/ethernet/cisco/Kconfig b/drivers/net/ethernet/cisco/Kconfig
index 94606f7..1c7b884 100644
--- a/drivers/net/ethernet/cisco/Kconfig
+++ b/drivers/net/ethernet/cisco/Kconfig
@@ -5,7 +5,7 @@
 config NET_VENDOR_CISCO
 	bool "Cisco devices"
 	default y
-	depends on PCI && INET
+	depends on PCI
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig
index 9cc706a..b63f8d8 100644
--- a/drivers/net/ethernet/cisco/enic/Kconfig
+++ b/drivers/net/ethernet/cisco/enic/Kconfig
@@ -4,6 +4,6 @@
 
 config ENIC
 	tristate "Cisco VIC Ethernet NIC Support"
-	depends on PCI && INET
+	depends on PCI
 	---help---
 	  This enables the support for the Cisco VIC Ethernet card.
diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c
index 17ae8c6..9f992b9 100644
--- a/drivers/net/ethernet/dec/ewrk3.c
+++ b/drivers/net/ethernet/dec/ewrk3.c
@@ -1910,9 +1910,8 @@
 static int ndevs;
 static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, };
 
-/* '21' below should really be 'MAX_NUM_EWRK3S' */
 module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
+module_param_array(irq, byte, NULL, 0);
 MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)");
 MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)");
 
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index a059f0c..2fb01bf 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1758,21 +1758,7 @@
 	.remove		= __devexit_p(rio_remove1),
 };
 
-static int __init
-rio_init (void)
-{
-	return pci_register_driver(&rio_driver);
-}
-
-static void __exit
-rio_exit (void)
-{
-	pci_unregister_driver (&rio_driver);
-}
-
-module_init (rio_init);
-module_exit (rio_exit);
-
+module_pci_driver(rio_driver);
 /*
 
 Compile command:
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 290b26f..feb5095 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -664,9 +664,6 @@
 	if (!bp->phy_dev)
 		return -EAGAIN;
 
-	if (!is_valid_ether_addr(dev->dev_addr))
-		return -EADDRNOTAVAIL;
-
 	napi_enable(&bp->napi);
 	dnet_init_hw(bp);
 
diff --git a/drivers/net/ethernet/emulex/Kconfig b/drivers/net/ethernet/emulex/Kconfig
index 7a28a64..1b8d638 100644
--- a/drivers/net/ethernet/emulex/Kconfig
+++ b/drivers/net/ethernet/emulex/Kconfig
@@ -5,7 +5,7 @@
 config NET_VENDOR_EMULEX
 	bool "Emulex devices"
 	default y
-	depends on PCI && INET
+	depends on PCI
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index 804db04..231129d 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -1,6 +1,6 @@
 config BE2NET
 	tristate "ServerEngines' 10Gbps NIC - BladeEngine"
-	depends on PCI && INET
+	depends on PCI
 	---help---
 	  This driver implements the NIC functionality for ServerEngines'
 	  10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cf4c05b..abf26c7 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER			"4.4.31.0u"
+#define DRV_VER			"4.4.161.0u"
 #define DRV_NAME		"be2net"
 #define BE_NAME			"ServerEngines BladeEngine2 10Gbps NIC"
 #define BE3_NAME		"ServerEngines BladeEngine3 10Gbps NIC"
@@ -53,6 +53,7 @@
 #define OC_DEVICE_ID3		0xe220	/* Device id for Lancer cards */
 #define OC_DEVICE_ID4           0xe228   /* Device id for VF in Lancer */
 #define OC_DEVICE_ID5		0x720	/* Device Id for Skyhawk cards */
+#define OC_DEVICE_ID6		0x728   /* Device id for VF in SkyHawk */
 #define OC_SUBSYS_DEVICE_ID1	0xE602
 #define OC_SUBSYS_DEVICE_ID2	0xE642
 #define OC_SUBSYS_DEVICE_ID3	0xE612
@@ -71,6 +72,7 @@
 	case BE_DEVICE_ID2:
 		return BE3_NAME;
 	case OC_DEVICE_ID5:
+	case OC_DEVICE_ID6:
 		return OC_NAME_SH;
 	default:
 		return BE_NAME;
@@ -346,7 +348,6 @@
 	struct pci_dev *pdev;
 	struct net_device *netdev;
 
-	u8 __iomem *csr;
 	u8 __iomem *db;		/* Door Bell */
 
 	struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
@@ -374,11 +375,8 @@
 	struct be_rx_obj rx_obj[MAX_RX_QS];
 	u32 big_page_size;	/* Compounded page size shared by rx wrbs */
 
-	u8 eq_next_idx;
 	struct be_drv_stats drv_stats;
-
 	u16 vlans_added;
-	u16 max_vlans;	/* Number of vlans supported */
 	u8 vlan_tag[VLAN_N_VID];
 	u8 vlan_prio_bmap;	/* Available Priority BitMap */
 	u16 recommended_prio;	/* Recommended Priority */
@@ -391,6 +389,7 @@
 
 	struct delayed_work func_recovery_work;
 	u32 flags;
+	u32 cmd_privileges;
 	/* Ethtool knobs and info */
 	char fw_ver[FW_VER_LEN];
 	int if_handle;		/* Used to configure filtering */
@@ -408,10 +407,8 @@
 	u32 rx_fc;		/* Rx flow control */
 	u32 tx_fc;		/* Tx flow control */
 	bool stats_cmd_sent;
-	u8 generation;		/* BladeEngine ASIC generation */
 	u32 if_type;
 	struct {
-		u8 __iomem *base;	/* Door Bell */
 		u32 size;
 		u32 total_size;
 		u64 io_addr;
@@ -434,10 +431,18 @@
 	struct phy_info phy;
 	u8 wol_cap;
 	bool wol;
-	u32 max_pmac_cnt;	/* Max secondary UC MACs programmable */
 	u32 uc_macs;		/* Count of secondary UC MAC programmed */
 	u32 msg_enable;
 	int be_get_temp_freq;
+	u16 max_mcast_mac;
+	u16 max_tx_queues;
+	u16 max_rss_queues;
+	u16 max_rx_queues;
+	u16 max_pmac_cnt;
+	u16 max_vlans;
+	u16 max_event_queues;
+	u32 if_cap_flags;
+	u8 pf_number;
 };
 
 #define be_physfn(adapter)		(!adapter->virtfn)
@@ -448,21 +453,25 @@
 	for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs;	\
 		i++, vf_cfg++)
 
-/* BladeEngine Generation numbers */
-#define BE_GEN2 2
-#define BE_GEN3 3
-
 #define ON				1
 #define OFF				0
-#define lancer_chip(adapter)	((adapter->pdev->device == OC_DEVICE_ID3) || \
-				 (adapter->pdev->device == OC_DEVICE_ID4))
 
-#define skyhawk_chip(adapter)	(adapter->pdev->device == OC_DEVICE_ID5)
+#define lancer_chip(adapter)	(adapter->pdev->device == OC_DEVICE_ID3 || \
+				 adapter->pdev->device == OC_DEVICE_ID4)
 
+#define skyhawk_chip(adapter)	(adapter->pdev->device == OC_DEVICE_ID5 || \
+				 adapter->pdev->device == OC_DEVICE_ID6)
 
-#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \
-				adapter->sli_family == SKYHAWK_SLI_FAMILY) && \
-				(adapter->function_mode & RDMA_ENABLED))
+#define BE3_chip(adapter)	(adapter->pdev->device == BE_DEVICE_ID2 || \
+				 adapter->pdev->device == OC_DEVICE_ID2)
+
+#define BE2_chip(adapter)	(adapter->pdev->device == BE_DEVICE_ID1 || \
+				 adapter->pdev->device == OC_DEVICE_ID1)
+
+#define BEx_chip(adapter)	(BE3_chip(adapter) || BE2_chip(adapter))
+
+#define be_roce_supported(adapter)	(skyhawk_chip(adapter) && \
+					(adapter->function_mode & RDMA_ENABLED))
 
 extern const struct ethtool_ops be_ethtool_ops;
 
@@ -637,12 +646,6 @@
 	}
 }
 
-static inline bool be_type_2_3(struct be_adapter *adapter)
-{
-	return (adapter->if_type == SLI_INTF_TYPE_2 ||
-		adapter->if_type == SLI_INTF_TYPE_3) ? true : false;
-}
-
 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
 		u16 num_popped);
 extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index af60bb2..f2875aa 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,6 +19,55 @@
 #include "be.h"
 #include "be_cmds.h"
 
+static struct be_cmd_priv_map cmd_priv_map[] = {
+	{
+		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
+		CMD_SUBSYSTEM_ETH,
+		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+	},
+	{
+		OPCODE_COMMON_GET_FLOW_CONTROL,
+		CMD_SUBSYSTEM_COMMON,
+		BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
+		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+	},
+	{
+		OPCODE_COMMON_SET_FLOW_CONTROL,
+		CMD_SUBSYSTEM_COMMON,
+		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+	},
+	{
+		OPCODE_ETH_GET_PPORT_STATS,
+		CMD_SUBSYSTEM_ETH,
+		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+	},
+	{
+		OPCODE_COMMON_GET_PHY_DETAILS,
+		CMD_SUBSYSTEM_COMMON,
+		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+	}
+};
+
+static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
+			   u8 subsystem)
+{
+	int i;
+	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
+	u32 cmd_privileges = adapter->cmd_privileges;
+
+	for (i = 0; i < num_entries; i++)
+		if (opcode == cmd_priv_map[i].opcode &&
+		    subsystem == cmd_priv_map[i].subsystem)
+			if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
+				return false;
+
+	return true;
+}
+
 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
 {
 	return wrb->payload.embedded_payload;
@@ -419,14 +468,13 @@
 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
 {
 	u32 sem;
+	u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
+					  SLIPORT_SEMAPHORE_OFFSET_BE;
 
-	if (lancer_chip(adapter))
-		sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
-	else
-		sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
+	pci_read_config_dword(adapter->pdev, reg, &sem);
+	*stage = sem & POST_STAGE_MASK;
 
-	*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
-	if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
+	if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
 		return -1;
 	else
 		return 0;
@@ -452,10 +500,33 @@
 	return status;
 }
 
+static bool lancer_provisioning_error(struct be_adapter *adapter)
+{
+	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
+	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+		sliport_err1 = ioread32(adapter->db +
+					SLIPORT_ERROR1_OFFSET);
+		sliport_err2 = ioread32(adapter->db +
+					SLIPORT_ERROR2_OFFSET);
+
+		if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
+		    sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
+			return true;
+	}
+	return false;
+}
+
 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
 {
 	int status;
 	u32 sliport_status, err, reset_needed;
+	bool resource_error;
+
+	resource_error = lancer_provisioning_error(adapter);
+	if (resource_error)
+		return -1;
+
 	status = lancer_wait_ready(adapter);
 	if (!status) {
 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
@@ -477,6 +548,14 @@
 			status = -1;
 		}
 	}
+	/* Stop error recovery if error is not recoverable.
+	 * No resource error is temporary errors and will go away
+	 * when PF provisions resources.
+	 */
+	resource_error = lancer_provisioning_error(adapter);
+	if (status == -1 && !resource_error)
+		adapter->eeh_error = true;
+
 	return status;
 }
 
@@ -601,6 +680,9 @@
 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
 	struct be_mcc_wrb *wrb;
 
+	if (!mccq->created)
+		return NULL;
+
 	if (atomic_read(&mccq->used) >= mccq->len) {
 		dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
 		return NULL;
@@ -1155,8 +1237,7 @@
 	req->id = cpu_to_le16(q->id);
 
 	status = be_mbox_notify_wait(adapter);
-	if (!status)
-		q->created = false;
+	q->created = false;
 
 	mutex_unlock(&adapter->mbox_lock);
 	return status;
@@ -1183,8 +1264,7 @@
 	req->id = cpu_to_le16(q->id);
 
 	status = be_mcc_notify_wait(adapter);
-	if (!status)
-		q->created = false;
+	q->created = false;
 
 err:
 	spin_unlock_bh(&adapter->mcc_lock);
@@ -1281,7 +1361,8 @@
 	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
 		OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
 
-	if (adapter->generation == BE_GEN3)
+	/* version 1 of the cmd is not supported only by BE2 */
+	if (!BE2_chip(adapter))
 		hdr->version = 1;
 
 	be_mcc_notify(adapter);
@@ -1301,6 +1382,10 @@
 	struct lancer_cmd_req_pport_stats *req;
 	int status = 0;
 
+	if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
+			    CMD_SUBSYSTEM_ETH))
+		return -EPERM;
+
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
@@ -1367,7 +1452,8 @@
 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
 
-	if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
+	/* version 1 of the cmd is not supported only by BE2 */
+	if (!BE2_chip(adapter))
 		req->hdr.version = 1;
 
 	req->hdr.domain = dom;
@@ -1658,9 +1744,9 @@
 		/* Reset mcast promisc mode if already set by setting mask
 		 * and not setting flags field
 		 */
-		if (!lancer_chip(adapter) || be_physfn(adapter))
-			req->if_flags_mask |=
-				cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+		req->if_flags_mask |=
+			cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
+				    adapter->if_cap_flags);
 
 		req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
 		netdev_for_each_mc_addr(ha, adapter->netdev)
@@ -1680,6 +1766,10 @@
 	struct be_cmd_req_set_flow_control *req;
 	int status;
 
+	if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
+			    CMD_SUBSYSTEM_COMMON))
+		return -EPERM;
+
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
@@ -1709,6 +1799,10 @@
 	struct be_cmd_req_get_flow_control *req;
 	int status;
 
+	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
+			    CMD_SUBSYSTEM_COMMON))
+		return -EPERM;
+
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
@@ -2067,7 +2161,7 @@
 			 int offset)
 {
 	struct be_mcc_wrb *wrb;
-	struct be_cmd_write_flashrom *req;
+	struct be_cmd_read_flash_crc *req;
 	int status;
 
 	spin_lock_bh(&adapter->mcc_lock);
@@ -2080,7 +2174,8 @@
 	req = embedded_payload(wrb);
 
 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-		OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
+			       OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
+			       wrb, NULL);
 
 	req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
 	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
@@ -2089,7 +2184,7 @@
 
 	status = be_mcc_notify_wait(adapter);
 	if (!status)
-		memcpy(flashed_crc, req->params.data_buf, 4);
+		memcpy(flashed_crc, req->crc, 4);
 
 err:
 	spin_unlock_bh(&adapter->mcc_lock);
@@ -2275,6 +2370,10 @@
 	struct be_dma_mem cmd;
 	int status;
 
+	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
+			    CMD_SUBSYSTEM_COMMON))
+		return -EPERM;
+
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
@@ -2434,6 +2533,42 @@
 	return status;
 }
 
+/* Get privilege(s) for a function */
+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+			     u32 domain)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_get_fn_privileges *req;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
+	req = embedded_payload(wrb);
+
+	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			       OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
+			       wrb, NULL);
+
+	req->hdr.domain = domain;
+
+	status = be_mcc_notify_wait(adapter);
+	if (!status) {
+		struct be_cmd_resp_get_fn_privileges *resp =
+						embedded_payload(wrb);
+		*privilege = le32_to_cpu(resp->privilege_mask);
+	}
+
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
 /* Uses synchronous MCCQ */
 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 			     bool *pmac_id_active, u32 *pmac_id, u8 domain)
@@ -2651,6 +2786,10 @@
 	int payload_len = sizeof(*req);
 	struct be_dma_mem cmd;
 
+	if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
+			    CMD_SUBSYSTEM_ETH))
+		return -EPERM;
+
 	memset(&cmd, 0, sizeof(struct be_dma_mem));
 	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
@@ -2792,6 +2931,240 @@
 	return status;
 }
 
+static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
+						    u32 max_buf_size)
+{
+	struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
+	int i;
+
+	for (i = 0; i < desc_count; i++) {
+		desc->desc_len = RESOURCE_DESC_SIZE;
+		if (((void *)desc + desc->desc_len) >
+		    (void *)(buf + max_buf_size)) {
+			desc = NULL;
+			break;
+		}
+
+		if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
+			break;
+
+		desc = (void *)desc + desc->desc_len;
+	}
+
+	if (!desc || i == MAX_RESOURCE_DESC)
+		return NULL;
+
+	return desc;
+}
+
+/* Uses Mbox */
+int be_cmd_get_func_config(struct be_adapter *adapter)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_get_func_config *req;
+	int status;
+	struct be_dma_mem cmd;
+
+	memset(&cmd, 0, sizeof(struct be_dma_mem));
+	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
+	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
+				      &cmd.dma);
+	if (!cmd.va) {
+		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+		return -ENOMEM;
+	}
+	if (mutex_lock_interruptible(&adapter->mbox_lock))
+		return -1;
+
+	wrb = wrb_from_mbox(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
+	req = cmd.va;
+
+	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			       OPCODE_COMMON_GET_FUNC_CONFIG,
+			       cmd.size, wrb, &cmd);
+
+	status = be_mbox_notify_wait(adapter);
+	if (!status) {
+		struct be_cmd_resp_get_func_config *resp = cmd.va;
+		u32 desc_count = le32_to_cpu(resp->desc_count);
+		struct be_nic_resource_desc *desc;
+
+		desc = be_get_nic_desc(resp->func_param, desc_count,
+				       sizeof(resp->func_param));
+		if (!desc) {
+			status = -EINVAL;
+			goto err;
+		}
+
+		adapter->pf_number = desc->pf_num;
+		adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
+		adapter->max_vlans = le16_to_cpu(desc->vlan_count);
+		adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
+		adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
+		adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
+		adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
+
+		adapter->max_event_queues = le16_to_cpu(desc->eq_count);
+		adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
+	}
+err:
+	mutex_unlock(&adapter->mbox_lock);
+	pci_free_consistent(adapter->pdev, cmd.size,
+			    cmd.va, cmd.dma);
+	return status;
+}
+
+ /* Uses sync mcc */
+int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
+			      u8 domain)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_get_profile_config *req;
+	int status;
+	struct be_dma_mem cmd;
+
+	memset(&cmd, 0, sizeof(struct be_dma_mem));
+	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
+	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
+				      &cmd.dma);
+	if (!cmd.va) {
+		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
+	req = cmd.va;
+
+	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			       OPCODE_COMMON_GET_PROFILE_CONFIG,
+			       cmd.size, wrb, &cmd);
+
+	req->type = ACTIVE_PROFILE_TYPE;
+	req->hdr.domain = domain;
+
+	status = be_mcc_notify_wait(adapter);
+	if (!status) {
+		struct be_cmd_resp_get_profile_config *resp = cmd.va;
+		u32 desc_count = le32_to_cpu(resp->desc_count);
+		struct be_nic_resource_desc *desc;
+
+		desc = be_get_nic_desc(resp->func_param, desc_count,
+				       sizeof(resp->func_param));
+
+		if (!desc) {
+			status = -EINVAL;
+			goto err;
+		}
+		*cap_flags = le32_to_cpu(desc->cap_flags);
+	}
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	pci_free_consistent(adapter->pdev, cmd.size,
+			    cmd.va, cmd.dma);
+	return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
+			      u8 domain)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_set_profile_config *req;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
+	req = embedded_payload(wrb);
+
+	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			       OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
+			       wrb, NULL);
+
+	req->hdr.domain = domain;
+	req->desc_count = cpu_to_le32(1);
+
+	req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
+	req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
+	req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
+	req->nic_desc.pf_num = adapter->pf_number;
+	req->nic_desc.vf_num = domain;
+
+	/* Mark fields invalid */
+	req->nic_desc.unicast_mac_count = 0xFFFF;
+	req->nic_desc.mcc_count = 0xFFFF;
+	req->nic_desc.vlan_count = 0xFFFF;
+	req->nic_desc.mcast_mac_count = 0xFFFF;
+	req->nic_desc.txq_count = 0xFFFF;
+	req->nic_desc.rq_count = 0xFFFF;
+	req->nic_desc.rssq_count = 0xFFFF;
+	req->nic_desc.lro_count = 0xFFFF;
+	req->nic_desc.cq_count = 0xFFFF;
+	req->nic_desc.toe_conn_count = 0xFFFF;
+	req->nic_desc.eq_count = 0xFFFF;
+	req->nic_desc.link_param = 0xFF;
+	req->nic_desc.bw_min = 0xFFFFFFFF;
+	req->nic_desc.acpi_params = 0xFF;
+	req->nic_desc.wol_param = 0x0F;
+
+	/* Change BW */
+	req->nic_desc.bw_min = cpu_to_le32(bps);
+	req->nic_desc.bw_max = cpu_to_le32(bps);
+	status = be_mcc_notify_wait(adapter);
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_enable_disable_vf *req;
+	int status;
+
+	if (!lancer_chip(adapter))
+		return 0;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
+	req = embedded_payload(wrb);
+
+	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			       OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
+			       wrb, NULL);
+
+	req->hdr.domain = domain;
+	req->enable = 1;
+	status = be_mcc_notify_wait(adapter);
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
 			int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
 {
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0936e21..d6552e1 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -196,9 +196,14 @@
 #define OPCODE_COMMON_GET_MAC_LIST			147
 #define OPCODE_COMMON_SET_MAC_LIST			148
 #define OPCODE_COMMON_GET_HSW_CONFIG			152
+#define OPCODE_COMMON_GET_FUNC_CONFIG			160
+#define OPCODE_COMMON_GET_PROFILE_CONFIG		164
+#define OPCODE_COMMON_SET_PROFILE_CONFIG		165
 #define OPCODE_COMMON_SET_HSW_CONFIG			153
+#define OPCODE_COMMON_GET_FN_PRIVILEGES			170
 #define OPCODE_COMMON_READ_OBJECT			171
 #define OPCODE_COMMON_WRITE_OBJECT			172
+#define OPCODE_COMMON_ENABLE_DISABLE_VF			196
 
 #define OPCODE_ETH_RSS_CONFIG				1
 #define OPCODE_ETH_ACPI_CONFIG				2
@@ -1151,14 +1156,22 @@
 	u32 op_type;
 	u32 data_buf_size;
 	u32 offset;
-	u8 data_buf[4];
 };
 
 struct be_cmd_write_flashrom {
 	struct be_cmd_req_hdr hdr;
 	struct flashrom_params params;
-};
+	u8 data_buf[32768];
+	u8 rsvd[4];
+} __packed;
 
+/* cmd to read flash crc */
+struct be_cmd_read_flash_crc {
+	struct be_cmd_req_hdr hdr;
+	struct flashrom_params params;
+	u8 crc[4];
+	u8 rsvd[4];
+};
 /**************** Lancer Firmware Flash ************/
 struct amap_lancer_write_obj_context {
 	u8 write_length[24];
@@ -1429,6 +1442,41 @@
 	u8 rsvd[212];
 };
 
+/*********************** Function Privileges ***********************/
+enum {
+	BE_PRIV_DEFAULT = 0x1,
+	BE_PRIV_LNKQUERY = 0x2,
+	BE_PRIV_LNKSTATS = 0x4,
+	BE_PRIV_LNKMGMT = 0x8,
+	BE_PRIV_LNKDIAG = 0x10,
+	BE_PRIV_UTILQUERY = 0x20,
+	BE_PRIV_FILTMGMT = 0x40,
+	BE_PRIV_IFACEMGMT = 0x80,
+	BE_PRIV_VHADM = 0x100,
+	BE_PRIV_DEVCFG = 0x200,
+	BE_PRIV_DEVSEC = 0x400
+};
+#define MAX_PRIVILEGES		(BE_PRIV_VHADM | BE_PRIV_DEVCFG | \
+				 BE_PRIV_DEVSEC)
+#define MIN_PRIVILEGES		BE_PRIV_DEFAULT
+
+struct be_cmd_priv_map {
+	u8 opcode;
+	u8 subsystem;
+	u32 priv_mask;
+};
+
+struct be_cmd_req_get_fn_privileges {
+	struct be_cmd_req_hdr hdr;
+	u32 rsvd;
+};
+
+struct be_cmd_resp_get_fn_privileges {
+	struct be_cmd_resp_hdr hdr;
+	u32 privilege_mask;
+};
+
+
 /******************** GET/SET_MACLIST  **************************/
 #define BE_MAX_MAC			64
 struct be_cmd_req_get_mac_list {
@@ -1608,33 +1656,6 @@
 	struct be_hw_stats_v1 hw_stats;
 };
 
-static inline void *hw_stats_from_cmd(struct be_adapter *adapter)
-{
-	if (adapter->generation == BE_GEN3) {
-		struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
-
-		return &cmd->hw_stats;
-	} else {
-		struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
-
-		return &cmd->hw_stats;
-	}
-}
-
-static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
-{
-	if (adapter->generation == BE_GEN3) {
-		struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
-
-		return &hw_stats->erx;
-	} else {
-		struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
-
-		return &hw_stats->erx;
-	}
-}
-
-
 /************** get fat capabilites *******************/
 #define MAX_MODULES 27
 #define MAX_MODES 4
@@ -1684,6 +1705,96 @@
 	struct be_fat_conf_params set_params;
 };
 
+#define RESOURCE_DESC_SIZE			72
+#define NIC_RESOURCE_DESC_TYPE_ID		0x41
+#define MAX_RESOURCE_DESC			4
+
+/* QOS unit number */
+#define QUN					4
+/* Immediate */
+#define IMM					6
+/* No save */
+#define NOSV					7
+
+struct be_nic_resource_desc {
+	u8 desc_type;
+	u8 desc_len;
+	u8 rsvd1;
+	u8 flags;
+	u8 vf_num;
+	u8 rsvd2;
+	u8 pf_num;
+	u8 rsvd3;
+	u16 unicast_mac_count;
+	u8 rsvd4[6];
+	u16 mcc_count;
+	u16 vlan_count;
+	u16 mcast_mac_count;
+	u16 txq_count;
+	u16 rq_count;
+	u16 rssq_count;
+	u16 lro_count;
+	u16 cq_count;
+	u16 toe_conn_count;
+	u16 eq_count;
+	u32 rsvd5;
+	u32 cap_flags;
+	u8 link_param;
+	u8 rsvd6[3];
+	u32 bw_min;
+	u32 bw_max;
+	u8 acpi_params;
+	u8 wol_param;
+	u16 rsvd7;
+	u32 rsvd8[3];
+};
+
+struct be_cmd_req_get_func_config {
+	struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_get_func_config {
+	struct be_cmd_req_hdr hdr;
+	u32 desc_count;
+	u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
+};
+
+#define ACTIVE_PROFILE_TYPE			0x2
+struct be_cmd_req_get_profile_config {
+	struct be_cmd_req_hdr hdr;
+	u8 rsvd;
+	u8 type;
+	u16 rsvd1;
+};
+
+struct be_cmd_resp_get_profile_config {
+	struct be_cmd_req_hdr hdr;
+	u32 desc_count;
+	u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
+};
+
+struct be_cmd_req_set_profile_config {
+	struct be_cmd_req_hdr hdr;
+	u32 rsvd;
+	u32 desc_count;
+	struct be_nic_resource_desc nic_desc;
+};
+
+struct be_cmd_resp_set_profile_config {
+	struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_enable_disable_vf {
+	struct be_cmd_req_hdr hdr;
+	u8 enable;
+	u8 rsvd[3];
+};
+
+static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
+{
+	return flags & adapter->cmd_privileges ? true : false;
+}
+
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_fw_wait_ready(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1780,6 +1891,8 @@
 extern int be_cmd_req_native_mode(struct be_adapter *adapter);
 extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
 extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
+				    u32 *privilege, u32 domain);
 extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 				    bool *pmac_id_active, u32 *pmac_id,
 				    u8 domain);
@@ -1798,4 +1911,10 @@
 extern int lancer_wait_ready(struct be_adapter *adapter);
 extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
 extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
+extern int be_cmd_get_func_config(struct be_adapter *adapter);
+extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
+				     u8 domain);
 
+extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
+				     u8 domain);
+extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 8e6fb0b..00454a1 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -261,6 +261,9 @@
 	struct be_adapter *adapter = netdev_priv(netdev);
 	u32 log_size = 0;
 
+	if (!check_privilege(adapter, MAX_PRIVILEGES))
+		return 0;
+
 	if (be_physfn(adapter)) {
 		if (lancer_chip(adapter))
 			log_size = lancer_cmd_get_file_len(adapter,
@@ -525,6 +528,10 @@
 	u8 link_status;
 	u16 link_speed = 0;
 	int status;
+	u32 auto_speeds;
+	u32 fixed_speeds;
+	u32 dac_cable_len;
+	u16 interface_type;
 
 	if (adapter->phy.link_speed < 0) {
 		status = be_cmd_link_status_query(adapter, &link_speed,
@@ -534,39 +541,46 @@
 		ethtool_cmd_speed_set(ecmd, link_speed);
 
 		status = be_cmd_get_phy_info(adapter);
-		if (status)
-			return status;
+		if (!status) {
+			interface_type = adapter->phy.interface_type;
+			auto_speeds = adapter->phy.auto_speeds_supported;
+			fixed_speeds = adapter->phy.fixed_speeds_supported;
+			dac_cable_len = adapter->phy.dac_cable_len;
 
-		ecmd->supported =
-			convert_to_et_setting(adapter->phy.interface_type,
-					adapter->phy.auto_speeds_supported |
-					adapter->phy.fixed_speeds_supported);
-		ecmd->advertising =
-			convert_to_et_setting(adapter->phy.interface_type,
-					adapter->phy.auto_speeds_supported);
+			ecmd->supported =
+				convert_to_et_setting(interface_type,
+						      auto_speeds |
+						      fixed_speeds);
+			ecmd->advertising =
+				convert_to_et_setting(interface_type,
+						      auto_speeds);
 
-		ecmd->port = be_get_port_type(adapter->phy.interface_type,
-					      adapter->phy.dac_cable_len);
+			ecmd->port = be_get_port_type(interface_type,
+						      dac_cable_len);
 
-		if (adapter->phy.auto_speeds_supported) {
-			ecmd->supported |= SUPPORTED_Autoneg;
-			ecmd->autoneg = AUTONEG_ENABLE;
-			ecmd->advertising |= ADVERTISED_Autoneg;
-		}
+			if (adapter->phy.auto_speeds_supported) {
+				ecmd->supported |= SUPPORTED_Autoneg;
+				ecmd->autoneg = AUTONEG_ENABLE;
+				ecmd->advertising |= ADVERTISED_Autoneg;
+			}
 
-		if (be_pause_supported(adapter)) {
 			ecmd->supported |= SUPPORTED_Pause;
-			ecmd->advertising |= ADVERTISED_Pause;
-		}
+			if (be_pause_supported(adapter))
+				ecmd->advertising |= ADVERTISED_Pause;
 
-		switch (adapter->phy.interface_type) {
-		case PHY_TYPE_KR_10GB:
-		case PHY_TYPE_KX4_10GB:
-			ecmd->transceiver = XCVR_INTERNAL;
-			break;
-		default:
-			ecmd->transceiver = XCVR_EXTERNAL;
-			break;
+			switch (adapter->phy.interface_type) {
+			case PHY_TYPE_KR_10GB:
+			case PHY_TYPE_KX4_10GB:
+				ecmd->transceiver = XCVR_INTERNAL;
+				break;
+			default:
+				ecmd->transceiver = XCVR_EXTERNAL;
+				break;
+			}
+		} else {
+			ecmd->port = PORT_OTHER;
+			ecmd->autoneg = AUTONEG_DISABLE;
+			ecmd->transceiver = XCVR_DUMMY1;
 		}
 
 		/* Save for future use */
@@ -787,6 +801,10 @@
 be_get_eeprom_len(struct net_device *netdev)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
+
+	if (!check_privilege(adapter, MAX_PRIVILEGES))
+		return 0;
+
 	if (lancer_chip(adapter)) {
 		if (be_physfn(adapter))
 			return lancer_cmd_get_file_len(adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index b755f70..541d453 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -31,12 +31,12 @@
 
 #define MPU_EP_CONTROL 		0
 
-/********** MPU semphore ******************/
-#define MPU_EP_SEMAPHORE_OFFSET		0xac
-#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET	0x400
-#define EP_SEMAPHORE_POST_STAGE_MASK		0x0000FFFF
-#define EP_SEMAPHORE_POST_ERR_MASK		0x1
-#define EP_SEMAPHORE_POST_ERR_SHIFT		31
+/********** MPU semphore: used for SH & BE  *************/
+#define SLIPORT_SEMAPHORE_OFFSET_BE		0x7c
+#define SLIPORT_SEMAPHORE_OFFSET_SH		0x94
+#define POST_STAGE_MASK				0x0000FFFF
+#define POST_ERR_MASK				0x1
+#define POST_ERR_SHIFT				31
 
 /* MPU semphore POST stage values */
 #define POST_STAGE_AWAITING_HOST_RDY 	0x1 /* FW awaiting goahead from host */
@@ -59,6 +59,9 @@
 #define PHYSDEV_CONTROL_FW_RESET_MASK	0x00000002
 #define PHYSDEV_CONTROL_INP_MASK	0x40000000
 
+#define SLIPORT_ERROR_NO_RESOURCE1	0x2
+#define SLIPORT_ERROR_NO_RESOURCE2	0x9
+
 /********* Memory BAR register ************/
 #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 	0xfc
 /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@@ -102,11 +105,6 @@
 #define SLI_INTF_TYPE_2		2
 #define SLI_INTF_TYPE_3		3
 
-/* SLI family */
-#define BE_SLI_FAMILY		0x0
-#define LANCER_A0_SLI_FAMILY	0xA
-#define SKYHAWK_SLI_FAMILY      0x2
-
 /********* ISR0 Register offset **********/
 #define CEV_ISR0_OFFSET 			0xC18
 #define CEV_ISR_SIZE				4
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d1b6cc5..0661e93 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -44,6 +44,7 @@
 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
+	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
 	{ 0 }
 };
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -237,23 +238,46 @@
 	int status = 0;
 	u8 current_mac[ETH_ALEN];
 	u32 pmac_id = adapter->pmac_id[0];
+	bool active_mac = true;
 
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 
-	status = be_cmd_mac_addr_query(adapter, current_mac, false,
-				       adapter->if_handle, 0);
+	/* For BE VF, MAC address is already activated by PF.
+	 * Hence only operation left is updating netdev->devaddr.
+	 * Update it if user is passing the same MAC which was used
+	 * during configuring VF MAC from PF(Hypervisor).
+	 */
+	if (!lancer_chip(adapter) && !be_physfn(adapter)) {
+		status = be_cmd_mac_addr_query(adapter, current_mac,
+					       false, adapter->if_handle, 0);
+		if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
+			goto done;
+		else
+			goto err;
+	}
+
+	if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
+		goto done;
+
+	/* For Lancer check if any MAC is active.
+	 * If active, get its mac id.
+	 */
+	if (lancer_chip(adapter) && !be_physfn(adapter))
+		be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
+					 &pmac_id, 0);
+
+	status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
+				 adapter->if_handle,
+				 &adapter->pmac_id[0], 0);
+
 	if (status)
 		goto err;
 
-	if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
-		status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
-				adapter->if_handle, &adapter->pmac_id[0], 0);
-		if (status)
-			goto err;
-
-		be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
-	}
+	if (active_mac)
+		be_cmd_pmac_del(adapter, adapter->if_handle,
+				pmac_id, 0);
+done:
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 	return 0;
 err:
@@ -261,7 +285,35 @@
 	return status;
 }
 
-static void populate_be2_stats(struct be_adapter *adapter)
+/* BE2 supports only v0 cmd */
+static void *hw_stats_from_cmd(struct be_adapter *adapter)
+{
+	if (BE2_chip(adapter)) {
+		struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
+
+		return &cmd->hw_stats;
+	} else  {
+		struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
+
+		return &cmd->hw_stats;
+	}
+}
+
+/* BE2 supports only v0 cmd */
+static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
+{
+	if (BE2_chip(adapter)) {
+		struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
+
+		return &hw_stats->erx;
+	} else {
+		struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
+
+		return &hw_stats->erx;
+	}
+}
+
+static void populate_be_v0_stats(struct be_adapter *adapter)
 {
 	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
@@ -310,7 +362,7 @@
 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
 }
 
-static void populate_be3_stats(struct be_adapter *adapter)
+static void populate_be_v1_stats(struct be_adapter *adapter)
 {
 	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
@@ -412,28 +464,25 @@
 	struct be_rx_obj *rxo;
 	int i;
 
-	if (adapter->generation == BE_GEN3) {
-		if (lancer_chip(adapter))
-			populate_lancer_stats(adapter);
-		 else
-			populate_be3_stats(adapter);
+	if (lancer_chip(adapter)) {
+		populate_lancer_stats(adapter);
 	} else {
-		populate_be2_stats(adapter);
-	}
+		if (BE2_chip(adapter))
+			populate_be_v0_stats(adapter);
+		else
+			/* for BE3 and Skyhawk */
+			populate_be_v1_stats(adapter);
 
-	if (lancer_chip(adapter))
-		goto done;
-
-	/* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
-	for_all_rx_queues(adapter, rxo, i) {
-		/* below erx HW counter can actually wrap around after
-		 * 65535. Driver accumulates a 32-bit value
-		 */
-		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
-				(u16)erx->rx_drops_no_fragments[rxo->q.id]);
+		/* as erx_v1 is longer than v0, ok to use v1 for v0 access */
+		for_all_rx_queues(adapter, rxo, i) {
+			/* below erx HW counter can actually wrap around after
+			 * 65535. Driver accumulates a 32-bit value
+			 */
+			accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
+					     (u16)erx->rx_drops_no_fragments \
+					     [rxo->q.id]);
+		}
 	}
-done:
-	return;
 }
 
 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
@@ -597,16 +646,6 @@
 			hdr, skb_shinfo(skb)->gso_size);
 		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
-		if (lancer_chip(adapter) && adapter->sli_family  ==
-							LANCER_A0_SLI_FAMILY) {
-			AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
-			if (is_tcp_pkt(skb))
-				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
-								tcpcs, hdr, 1);
-			else if (is_udp_pkt(skb))
-				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
-								udpcs, hdr, 1);
-		}
 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		if (is_tcp_pkt(skb))
 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -856,11 +895,15 @@
 	struct be_adapter *adapter = netdev_priv(netdev);
 	int status = 0;
 
-	if (!be_physfn(adapter)) {
+	if (!lancer_chip(adapter) && !be_physfn(adapter)) {
 		status = -EINVAL;
 		goto ret;
 	}
 
+	/* Packets with VID 0 are always received by Lancer by default */
+	if (lancer_chip(adapter) && vid == 0)
+		goto ret;
+
 	adapter->vlan_tag[vid] = 1;
 	if (adapter->vlans_added <= (adapter->max_vlans + 1))
 		status = be_vid_config(adapter);
@@ -878,11 +921,15 @@
 	struct be_adapter *adapter = netdev_priv(netdev);
 	int status = 0;
 
-	if (!be_physfn(adapter)) {
+	if (!lancer_chip(adapter) && !be_physfn(adapter)) {
 		status = -EINVAL;
 		goto ret;
 	}
 
+	/* Packets with VID 0 are always received by Lancer by default */
+	if (lancer_chip(adapter) && vid == 0)
+		goto ret;
+
 	adapter->vlan_tag[vid] = 0;
 	if (adapter->vlans_added <= adapter->max_vlans)
 		status = be_vid_config(adapter);
@@ -917,7 +964,7 @@
 
 	/* Enable multicast promisc if num configured exceeds what we support */
 	if (netdev->flags & IFF_ALLMULTI ||
-			netdev_mc_count(netdev) > BE_MAX_MC) {
+	    netdev_mc_count(netdev) > adapter->max_mcast_mac) {
 		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
 		goto done;
 	}
@@ -962,6 +1009,9 @@
 	struct be_adapter *adapter = netdev_priv(netdev);
 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
 	int status;
+	bool active_mac = false;
+	u32 pmac_id;
+	u8 old_mac[ETH_ALEN];
 
 	if (!sriov_enabled(adapter))
 		return -EPERM;
@@ -970,6 +1020,12 @@
 		return -EINVAL;
 
 	if (lancer_chip(adapter)) {
+		status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
+						  &pmac_id, vf + 1);
+		if (!status && active_mac)
+			be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+					pmac_id, vf + 1);
+
 		status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
 	} else {
 		status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
@@ -1062,7 +1118,10 @@
 		return -EINVAL;
 	}
 
-	status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
+	if (lancer_chip(adapter))
+		status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
+	else
+		status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
 
 	if (status)
 		dev_err(&adapter->pdev->dev,
@@ -1616,24 +1675,6 @@
 	return num;
 }
 
-static int event_handle(struct be_eq_obj *eqo)
-{
-	bool rearm = false;
-	int num = events_get(eqo);
-
-	/* Deal with any spurious interrupts that come without events */
-	if (!num)
-		rearm = true;
-
-	if (num || msix_enabled(eqo->adapter))
-		be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
-
-	if (num)
-		napi_schedule(&eqo->napi);
-
-	return num;
-}
-
 /* Leaves the EQ is disarmed state */
 static void be_eq_clean(struct be_eq_obj *eqo)
 {
@@ -1837,12 +1878,13 @@
 
 static int be_num_txqs_want(struct be_adapter *adapter)
 {
-	if (sriov_want(adapter) || be_is_mc(adapter) ||
-	    lancer_chip(adapter) || !be_physfn(adapter) ||
-	    adapter->generation == BE_GEN2)
+	if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
+	    be_is_mc(adapter) ||
+	    (!lancer_chip(adapter) && !be_physfn(adapter)) ||
+	    BE2_chip(adapter))
 		return 1;
 	else
-		return MAX_TX_QS;
+		return adapter->max_tx_queues;
 }
 
 static int be_tx_cqs_create(struct be_adapter *adapter)
@@ -1954,22 +1996,31 @@
 
 static irqreturn_t be_intx(int irq, void *dev)
 {
-	struct be_adapter *adapter = dev;
-	int num_evts;
+	struct be_eq_obj *eqo = dev;
+	struct be_adapter *adapter = eqo->adapter;
+	int num_evts = 0;
 
-	/* With INTx only one EQ is used */
-	num_evts = event_handle(&adapter->eq_obj[0]);
-	if (num_evts)
-		return IRQ_HANDLED;
-	else
-		return IRQ_NONE;
+	/* On Lancer, clear-intr bit of the EQ DB does not work.
+	 * INTx is de-asserted only on notifying num evts.
+	 */
+	if (lancer_chip(adapter))
+		num_evts = events_get(eqo);
+
+	/* The EQ-notify may not de-assert INTx rightaway, causing
+	 * the ISR to be invoked again. So, return HANDLED even when
+	 * num_evts is zero.
+	 */
+	be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
+	napi_schedule(&eqo->napi);
+	return IRQ_HANDLED;
 }
 
 static irqreturn_t be_msix(int irq, void *dev)
 {
 	struct be_eq_obj *eqo = dev;
 
-	event_handle(eqo);
+	be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+	napi_schedule(&eqo->napi);
 	return IRQ_HANDLED;
 }
 
@@ -2065,9 +2116,11 @@
 {
 	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
 	struct be_adapter *adapter = eqo->adapter;
-	int max_work = 0, work, i;
+	int max_work = 0, work, i, num_evts;
 	bool tx_done;
 
+	num_evts = events_get(eqo);
+
 	/* Process all TXQs serviced by this EQ */
 	for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
 		tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
@@ -2090,10 +2143,10 @@
 
 	if (max_work < budget) {
 		napi_complete(napi);
-		be_eq_notify(adapter, eqo->q.id, true, false, 0);
+		be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
 	} else {
 		/* As we'll continue in polling mode, count and clear events */
-		be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
+		be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
 	}
 	return max_work;
 }
@@ -2177,9 +2230,11 @@
 static uint be_num_rss_want(struct be_adapter *adapter)
 {
 	u32 num = 0;
+
 	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
-	     !sriov_want(adapter) && be_physfn(adapter)) {
-		num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+	    (lancer_chip(adapter) ||
+	     (!sriov_want(adapter) && be_physfn(adapter)))) {
+		num = adapter->max_rss_queues;
 		num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
 	}
 	return num;
@@ -2277,10 +2332,10 @@
 			return status;
 	}
 
-	/* INTx */
+	/* INTx: only the first EQ is used */
 	netdev->irq = adapter->pdev->irq;
 	status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
-			adapter);
+			     &adapter->eq_obj[0]);
 	if (status) {
 		dev_err(&adapter->pdev->dev,
 			"INTx request IRQ failed - err %d\n", status);
@@ -2302,7 +2357,7 @@
 
 	/* INTx */
 	if (!msix_enabled(adapter)) {
-		free_irq(netdev->irq, adapter);
+		free_irq(netdev->irq, &adapter->eq_obj[0]);
 		goto done;
 	}
 
@@ -2579,10 +2634,30 @@
 	be_tx_queues_destroy(adapter);
 	be_evt_queues_destroy(adapter);
 
+	kfree(adapter->pmac_id);
+	adapter->pmac_id = NULL;
+
 	be_msix_disable(adapter);
 	return 0;
 }
 
+static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
+				   u32 *cap_flags, u8 domain)
+{
+	bool profile_present = false;
+	int status;
+
+	if (lancer_chip(adapter)) {
+		status = be_cmd_get_profile_config(adapter, cap_flags, domain);
+		if (!status)
+			profile_present = true;
+	}
+
+	if (!profile_present)
+		*cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+			     BE_IF_FLAGS_MULTICAST;
+}
+
 static int be_vf_setup_init(struct be_adapter *adapter)
 {
 	struct be_vf_cfg *vf_cfg;
@@ -2634,9 +2709,13 @@
 	if (status)
 		goto err;
 
-	cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
-				BE_IF_FLAGS_MULTICAST;
 	for_all_vfs(adapter, vf_cfg, vf) {
+		be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
+
+		en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+					BE_IF_FLAGS_BROADCAST |
+					BE_IF_FLAGS_MULTICAST);
+
 		status = be_cmd_if_create(adapter, cap_flags, en_flags,
 					  &vf_cfg->if_handle, vf + 1);
 		if (status)
@@ -2661,6 +2740,8 @@
 		if (status)
 			goto err;
 		vf_cfg->def_vid = def_vlan;
+
+		be_cmd_enable_vf(adapter, vf + 1);
 	}
 	return 0;
 err:
@@ -2674,7 +2755,10 @@
 	adapter->if_handle = -1;
 	adapter->be3_native = false;
 	adapter->promiscuous = false;
-	adapter->eq_next_idx = 0;
+	if (be_physfn(adapter))
+		adapter->cmd_privileges = MAX_PRIVILEGES;
+	else
+		adapter->cmd_privileges = MIN_PRIVILEGES;
 }
 
 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
@@ -2712,12 +2796,93 @@
 	return status;
 }
 
+static void be_get_resources(struct be_adapter *adapter)
+{
+	int status;
+	bool profile_present = false;
+
+	if (lancer_chip(adapter)) {
+		status = be_cmd_get_func_config(adapter);
+
+		if (!status)
+			profile_present = true;
+	}
+
+	if (profile_present) {
+		/* Sanity fixes for Lancer */
+		adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
+					      BE_UC_PMAC_COUNT);
+		adapter->max_vlans = min_t(u16, adapter->max_vlans,
+					   BE_NUM_VLANS_SUPPORTED);
+		adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
+					       BE_MAX_MC);
+		adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
+					       MAX_TX_QS);
+		adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
+						BE3_MAX_RSS_QS);
+		adapter->max_event_queues = min_t(u16,
+						  adapter->max_event_queues,
+						  BE3_MAX_RSS_QS);
+
+		if (adapter->max_rss_queues &&
+		    adapter->max_rss_queues == adapter->max_rx_queues)
+			adapter->max_rss_queues -= 1;
+
+		if (adapter->max_event_queues < adapter->max_rss_queues)
+			adapter->max_rss_queues = adapter->max_event_queues;
+
+	} else {
+		if (be_physfn(adapter))
+			adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
+		else
+			adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
+
+		if (adapter->function_mode & FLEX10_MODE)
+			adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+		else
+			adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+
+		adapter->max_mcast_mac = BE_MAX_MC;
+		adapter->max_tx_queues = MAX_TX_QS;
+		adapter->max_rss_queues = (adapter->be3_native) ?
+					   BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+		adapter->max_event_queues = BE3_MAX_RSS_QS;
+
+		adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
+					BE_IF_FLAGS_BROADCAST |
+					BE_IF_FLAGS_MULTICAST |
+					BE_IF_FLAGS_PASS_L3L4_ERRORS |
+					BE_IF_FLAGS_MCAST_PROMISCUOUS |
+					BE_IF_FLAGS_VLAN_PROMISCUOUS |
+					BE_IF_FLAGS_PROMISCUOUS;
+
+		if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
+			adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
+	}
+}
+
 /* Routine to query per function resource limits */
 static int be_get_config(struct be_adapter *adapter)
 {
-	int pos;
+	int pos, status;
 	u16 dev_num_vfs;
 
+	status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
+				     &adapter->function_mode,
+				     &adapter->function_caps);
+	if (status)
+		goto err;
+
+	be_get_resources(adapter);
+
+	/* primary mac needs 1 pmac entry */
+	adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
+				   sizeof(u32), GFP_KERNEL);
+	if (!adapter->pmac_id) {
+		status = -ENOMEM;
+		goto err;
+	}
+
 	pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
 	if (pos) {
 		pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
@@ -2726,13 +2891,14 @@
 			dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
 		adapter->dev_num_vfs = dev_num_vfs;
 	}
-	return 0;
+err:
+	return status;
 }
 
 static int be_setup(struct be_adapter *adapter)
 {
 	struct device *dev = &adapter->pdev->dev;
-	u32 cap_flags, en_flags;
+	u32 en_flags;
 	u32 tx_fc, rx_fc;
 	int status;
 	u8 mac[ETH_ALEN];
@@ -2740,9 +2906,12 @@
 
 	be_setup_init(adapter);
 
-	be_get_config(adapter);
+	if (!lancer_chip(adapter))
+		be_cmd_req_native_mode(adapter);
 
-	be_cmd_req_native_mode(adapter);
+	status = be_get_config(adapter);
+	if (status)
+		goto err;
 
 	be_msix_enable(adapter);
 
@@ -2762,24 +2931,22 @@
 	if (status)
 		goto err;
 
+	be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
+	/* In UMC mode FW does not return right privileges.
+	 * Override with correct privilege equivalent to PF.
+	 */
+	if (be_is_mc(adapter))
+		adapter->cmd_privileges = MAX_PRIVILEGES;
+
 	en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
 			BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
-	cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
-			BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
 
-	if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
-		cap_flags |= BE_IF_FLAGS_RSS;
+	if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
 		en_flags |= BE_IF_FLAGS_RSS;
-	}
 
-	if (lancer_chip(adapter) && !be_physfn(adapter)) {
-		en_flags = BE_IF_FLAGS_UNTAGGED |
-			    BE_IF_FLAGS_BROADCAST |
-			    BE_IF_FLAGS_MULTICAST;
-		cap_flags = en_flags;
-	}
+	en_flags = en_flags & adapter->if_cap_flags;
 
-	status = be_cmd_if_create(adapter, cap_flags, en_flags,
+	status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
 				  &adapter->if_handle, 0);
 	if (status != 0)
 		goto err;
@@ -2827,8 +2994,8 @@
 			dev_warn(dev, "device doesn't support SRIOV\n");
 	}
 
-	be_cmd_get_phy_info(adapter);
-	if (be_pause_supported(adapter))
+	status = be_cmd_get_phy_info(adapter);
+	if (!status && be_pause_supported(adapter))
 		adapter->phy.fc_autoneg = 1;
 
 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
@@ -2846,8 +3013,10 @@
 	struct be_eq_obj *eqo;
 	int i;
 
-	for_all_evt_queues(adapter, eqo, i)
-		event_handle(eqo);
+	for_all_evt_queues(adapter, eqo, i) {
+		be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+		napi_schedule(&eqo->napi);
+	}
 
 	return;
 }
@@ -2895,7 +3064,7 @@
 	int i = 0, img_type = 0;
 	struct flash_section_info_g2 *fsec_g2 = NULL;
 
-	if (adapter->generation != BE_GEN3)
+	if (BE2_chip(adapter))
 		fsec_g2 = (struct flash_section_info_g2 *)fsec;
 
 	for (i = 0; i < MAX_FLASH_COMP; i++) {
@@ -2928,7 +3097,49 @@
 	return NULL;
 }
 
-static int be_flash_data(struct be_adapter *adapter,
+static int be_flash(struct be_adapter *adapter, const u8 *img,
+		struct be_dma_mem *flash_cmd, int optype, int img_size)
+{
+	u32 total_bytes = 0, flash_op, num_bytes = 0;
+	int status = 0;
+	struct be_cmd_write_flashrom *req = flash_cmd->va;
+
+	total_bytes = img_size;
+	while (total_bytes) {
+		num_bytes = min_t(u32, 32*1024, total_bytes);
+
+		total_bytes -= num_bytes;
+
+		if (!total_bytes) {
+			if (optype == OPTYPE_PHY_FW)
+				flash_op = FLASHROM_OPER_PHY_FLASH;
+			else
+				flash_op = FLASHROM_OPER_FLASH;
+		} else {
+			if (optype == OPTYPE_PHY_FW)
+				flash_op = FLASHROM_OPER_PHY_SAVE;
+			else
+				flash_op = FLASHROM_OPER_SAVE;
+		}
+
+		memcpy(req->data_buf, img, num_bytes);
+		img += num_bytes;
+		status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
+						flash_op, num_bytes);
+		if (status) {
+			if (status == ILLEGAL_IOCTL_REQ &&
+			    optype == OPTYPE_PHY_FW)
+				break;
+			dev_err(&adapter->pdev->dev,
+				"cmd to write to flash rom failed.\n");
+			return status;
+		}
+	}
+	return 0;
+}
+
+/* For BE2 and BE3 */
+static int be_flash_BEx(struct be_adapter *adapter,
 			 const struct firmware *fw,
 			 struct be_dma_mem *flash_cmd,
 			 int num_of_images)
@@ -2936,12 +3147,9 @@
 {
 	int status = 0, i, filehdr_size = 0;
 	int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
-	u32 total_bytes = 0, flash_op;
-	int num_bytes;
 	const u8 *p = fw->data;
-	struct be_cmd_write_flashrom *req = flash_cmd->va;
 	const struct flash_comp *pflashcomp;
-	int num_comp, hdr_size;
+	int num_comp, redboot;
 	struct flash_section_info *fsec = NULL;
 
 	struct flash_comp gen3_flash_types[] = {
@@ -2986,7 +3194,7 @@
 			 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
 	};
 
-	if (adapter->generation == BE_GEN3) {
+	if (BE3_chip(adapter)) {
 		pflashcomp = gen3_flash_types;
 		filehdr_size = sizeof(struct flash_file_hdr_g3);
 		num_comp = ARRAY_SIZE(gen3_flash_types);
@@ -2995,6 +3203,7 @@
 		filehdr_size = sizeof(struct flash_file_hdr_g2);
 		num_comp = ARRAY_SIZE(gen2_flash_types);
 	}
+
 	/* Get flash section info*/
 	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
 	if (!fsec) {
@@ -3010,70 +3219,105 @@
 		    memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
 			continue;
 
-		if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
-			if (!phy_flashing_required(adapter))
+		if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
+		    !phy_flashing_required(adapter))
+				continue;
+
+		if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
+			redboot = be_flash_redboot(adapter, fw->data,
+				pflashcomp[i].offset, pflashcomp[i].size,
+				filehdr_size + img_hdrs_size);
+			if (!redboot)
 				continue;
 		}
 
-		hdr_size = filehdr_size +
-			   (num_of_images * sizeof(struct image_hdr));
-
-		if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
-		    (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
-				       pflashcomp[i].size, hdr_size)))
-			continue;
-
-		/* Flash the component */
 		p = fw->data;
 		p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
 		if (p + pflashcomp[i].size > fw->data + fw->size)
 			return -1;
-		total_bytes = pflashcomp[i].size;
-		while (total_bytes) {
-			if (total_bytes > 32*1024)
-				num_bytes = 32*1024;
-			else
-				num_bytes = total_bytes;
-			total_bytes -= num_bytes;
-			if (!total_bytes) {
-				if (pflashcomp[i].optype == OPTYPE_PHY_FW)
-					flash_op = FLASHROM_OPER_PHY_FLASH;
-				else
-					flash_op = FLASHROM_OPER_FLASH;
-			} else {
-				if (pflashcomp[i].optype == OPTYPE_PHY_FW)
-					flash_op = FLASHROM_OPER_PHY_SAVE;
-				else
-					flash_op = FLASHROM_OPER_SAVE;
-			}
-			memcpy(req->params.data_buf, p, num_bytes);
-			p += num_bytes;
-			status = be_cmd_write_flashrom(adapter, flash_cmd,
-				pflashcomp[i].optype, flash_op, num_bytes);
-			if (status) {
-				if ((status == ILLEGAL_IOCTL_REQ) &&
-					(pflashcomp[i].optype ==
-						OPTYPE_PHY_FW))
-					break;
-				dev_err(&adapter->pdev->dev,
-					"cmd to write to flash rom failed.\n");
-				return -1;
-			}
+
+		status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
+					pflashcomp[i].size);
+		if (status) {
+			dev_err(&adapter->pdev->dev,
+				"Flashing section type %d failed.\n",
+				pflashcomp[i].img_type);
+			return status;
 		}
 	}
 	return 0;
 }
 
-static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
+static int be_flash_skyhawk(struct be_adapter *adapter,
+		const struct firmware *fw,
+		struct be_dma_mem *flash_cmd, int num_of_images)
 {
-	if (fhdr == NULL)
-		return 0;
-	if (fhdr->build[0] == '3')
-		return BE_GEN3;
-	else if (fhdr->build[0] == '2')
-		return BE_GEN2;
-	else
-		return 0;
+	int status = 0, i, filehdr_size = 0;
+	int img_offset, img_size, img_optype, redboot;
+	int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
+	const u8 *p = fw->data;
+	struct flash_section_info *fsec = NULL;
+
+	filehdr_size = sizeof(struct flash_file_hdr_g3);
+	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
+	if (!fsec) {
+		dev_err(&adapter->pdev->dev,
+			"Invalid Cookie. UFI corrupted ?\n");
+		return -1;
+	}
+
+	for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
+		img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
+		img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
+
+		switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
+		case IMAGE_FIRMWARE_iSCSI:
+			img_optype = OPTYPE_ISCSI_ACTIVE;
+			break;
+		case IMAGE_BOOT_CODE:
+			img_optype = OPTYPE_REDBOOT;
+			break;
+		case IMAGE_OPTION_ROM_ISCSI:
+			img_optype = OPTYPE_BIOS;
+			break;
+		case IMAGE_OPTION_ROM_PXE:
+			img_optype = OPTYPE_PXE_BIOS;
+			break;
+		case IMAGE_OPTION_ROM_FCoE:
+			img_optype = OPTYPE_FCOE_BIOS;
+			break;
+		case IMAGE_FIRMWARE_BACKUP_iSCSI:
+			img_optype = OPTYPE_ISCSI_BACKUP;
+			break;
+		case IMAGE_NCSI:
+			img_optype = OPTYPE_NCSI_FW;
+			break;
+		default:
+			continue;
+		}
+
+		if (img_optype == OPTYPE_REDBOOT) {
+			redboot = be_flash_redboot(adapter, fw->data,
+					img_offset, img_size,
+					filehdr_size + img_hdrs_size);
+			if (!redboot)
+				continue;
+		}
+
+		p = fw->data;
+		p += filehdr_size + img_offset + img_hdrs_size;
+		if (p + img_size > fw->data + fw->size)
+			return -1;
+
+		status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
+		if (status) {
+			dev_err(&adapter->pdev->dev,
+				"Flashing section type %d failed.\n",
+				fsec->fsec_entry[i].type);
+			return status;
+		}
+	}
+	return 0;
 }
 
 static int lancer_wait_idle(struct be_adapter *adapter)
@@ -3207,6 +3451,28 @@
 	return status;
 }
 
+#define UFI_TYPE2		2
+#define UFI_TYPE3		3
+#define UFI_TYPE4		4
+static int be_get_ufi_type(struct be_adapter *adapter,
+			   struct flash_file_hdr_g2 *fhdr)
+{
+	if (fhdr == NULL)
+		goto be_get_ufi_exit;
+
+	if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
+		return UFI_TYPE4;
+	else if (BE3_chip(adapter) && fhdr->build[0] == '3')
+		return UFI_TYPE3;
+	else if (BE2_chip(adapter) && fhdr->build[0] == '2')
+		return UFI_TYPE2;
+
+be_get_ufi_exit:
+	dev_err(&adapter->pdev->dev,
+		"UFI and Interface are not compatible for flashing\n");
+	return -1;
+}
+
 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
 {
 	struct flash_file_hdr_g2 *fhdr;
@@ -3214,12 +3480,9 @@
 	struct image_hdr *img_hdr_ptr = NULL;
 	struct be_dma_mem flash_cmd;
 	const u8 *p;
-	int status = 0, i = 0, num_imgs = 0;
+	int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
 
-	p = fw->data;
-	fhdr = (struct flash_file_hdr_g2 *) p;
-
-	flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
+	flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
 	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
 					  &flash_cmd.dma, GFP_KERNEL);
 	if (!flash_cmd.va) {
@@ -3229,27 +3492,32 @@
 		goto be_fw_exit;
 	}
 
-	if ((adapter->generation == BE_GEN3) &&
-			(get_ufigen_type(fhdr) == BE_GEN3)) {
-		fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
-		num_imgs = le32_to_cpu(fhdr3->num_imgs);
-		for (i = 0; i < num_imgs; i++) {
-			img_hdr_ptr = (struct image_hdr *) (fw->data +
-					(sizeof(struct flash_file_hdr_g3) +
-					 i * sizeof(struct image_hdr)));
-			if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
-				status = be_flash_data(adapter, fw, &flash_cmd,
-							num_imgs);
+	p = fw->data;
+	fhdr = (struct flash_file_hdr_g2 *)p;
+
+	ufi_type = be_get_ufi_type(adapter, fhdr);
+
+	fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
+	num_imgs = le32_to_cpu(fhdr3->num_imgs);
+	for (i = 0; i < num_imgs; i++) {
+		img_hdr_ptr = (struct image_hdr *)(fw->data +
+				(sizeof(struct flash_file_hdr_g3) +
+				 i * sizeof(struct image_hdr)));
+		if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
+			if (ufi_type == UFI_TYPE4)
+				status = be_flash_skyhawk(adapter, fw,
+							&flash_cmd, num_imgs);
+			else if (ufi_type == UFI_TYPE3)
+				status = be_flash_BEx(adapter, fw, &flash_cmd,
+						      num_imgs);
 		}
-	} else if ((adapter->generation == BE_GEN2) &&
-			(get_ufigen_type(fhdr) == BE_GEN2)) {
-		status = be_flash_data(adapter, fw, &flash_cmd, 0);
-	} else {
-		dev_err(&adapter->pdev->dev,
-			"UFI and Interface are not compatible for flashing\n");
-		status = -1;
 	}
 
+	if (ufi_type == UFI_TYPE2)
+		status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
+	else if (ufi_type == -1)
+		status = -1;
+
 	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
 			  flash_cmd.dma);
 	if (status) {
@@ -3344,80 +3612,47 @@
 
 static void be_unmap_pci_bars(struct be_adapter *adapter)
 {
-	if (adapter->csr)
-		iounmap(adapter->csr);
 	if (adapter->db)
-		iounmap(adapter->db);
-	if (adapter->roce_db.base)
-		pci_iounmap(adapter->pdev, adapter->roce_db.base);
+		pci_iounmap(adapter->pdev, adapter->db);
 }
 
-static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
+static int db_bar(struct be_adapter *adapter)
 {
-	struct pci_dev *pdev = adapter->pdev;
-	u8 __iomem *addr;
+	if (lancer_chip(adapter) || !be_physfn(adapter))
+		return 0;
+	else
+		return 4;
+}
 
-	addr = pci_iomap(pdev, 2, 0);
-	if (addr == NULL)
-		return -ENOMEM;
-
-	adapter->roce_db.base = addr;
-	adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
-	adapter->roce_db.size = 8192;
-	adapter->roce_db.total_size = pci_resource_len(pdev, 2);
+static int be_roce_map_pci_bars(struct be_adapter *adapter)
+{
+	if (skyhawk_chip(adapter)) {
+		adapter->roce_db.size = 4096;
+		adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
+							      db_bar(adapter));
+		adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
+							       db_bar(adapter));
+	}
 	return 0;
 }
 
 static int be_map_pci_bars(struct be_adapter *adapter)
 {
 	u8 __iomem *addr;
-	int db_reg;
+	u32 sli_intf;
 
-	if (lancer_chip(adapter)) {
-		if (be_type_2_3(adapter)) {
-			addr = ioremap_nocache(
-					pci_resource_start(adapter->pdev, 0),
-					pci_resource_len(adapter->pdev, 0));
-			if (addr == NULL)
-				return -ENOMEM;
-			adapter->db = addr;
-		}
-		if (adapter->if_type == SLI_INTF_TYPE_3) {
-			if (lancer_roce_map_pci_bars(adapter))
-				goto pci_map_err;
-		}
-		return 0;
-	}
+	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+	adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
+				SLI_INTF_IF_TYPE_SHIFT;
 
-	if (be_physfn(adapter)) {
-		addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
-				pci_resource_len(adapter->pdev, 2));
-		if (addr == NULL)
-			return -ENOMEM;
-		adapter->csr = addr;
-	}
-
-	if (adapter->generation == BE_GEN2) {
-		db_reg = 4;
-	} else {
-		if (be_physfn(adapter))
-			db_reg = 4;
-		else
-			db_reg = 0;
-	}
-	addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
-				pci_resource_len(adapter->pdev, db_reg));
+	addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
 	if (addr == NULL)
 		goto pci_map_err;
 	adapter->db = addr;
-	if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
-		adapter->roce_db.size = 4096;
-		adapter->roce_db.io_addr =
-				pci_resource_start(adapter->pdev, db_reg);
-		adapter->roce_db.total_size =
-				pci_resource_len(adapter->pdev, db_reg);
-	}
+
+	be_roce_map_pci_bars(adapter);
 	return 0;
+
 pci_map_err:
 	be_unmap_pci_bars(adapter);
 	return -ENOMEM;
@@ -3437,7 +3672,6 @@
 	if (mem->va)
 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
 				  mem->dma);
-	kfree(adapter->pmac_id);
 }
 
 static int be_ctrl_init(struct be_adapter *adapter)
@@ -3445,8 +3679,14 @@
 	struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
 	struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
 	struct be_dma_mem *rx_filter = &adapter->rx_filter;
+	u32 sli_intf;
 	int status;
 
+	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+	adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
+				 SLI_INTF_FAMILY_SHIFT;
+	adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
+
 	status = be_map_pci_bars(adapter);
 	if (status)
 		goto done;
@@ -3473,13 +3713,6 @@
 		goto free_mbox;
 	}
 	memset(rx_filter->va, 0, rx_filter->size);
-
-	/* primary mac needs 1 pmac entry */
-	adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
-				   sizeof(*adapter->pmac_id), GFP_KERNEL);
-	if (!adapter->pmac_id)
-		return -ENOMEM;
-
 	mutex_init(&adapter->mbox_lock);
 	spin_lock_init(&adapter->mcc_lock);
 	spin_lock_init(&adapter->mcc_cq_lock);
@@ -3512,14 +3745,14 @@
 {
 	struct be_dma_mem *cmd = &adapter->stats_cmd;
 
-	if (adapter->generation == BE_GEN2) {
+	if (lancer_chip(adapter))
+		cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
+	else if (BE2_chip(adapter))
 		cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
-	} else {
-		if (lancer_chip(adapter))
-			cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
-		else
-			cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
-	}
+	else
+		/* BE3 and Skyhawk */
+		cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+
 	cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
 				     GFP_KERNEL);
 	if (cmd->va == NULL)
@@ -3573,6 +3806,9 @@
 	u32 level = 0;
 	int j;
 
+	if (lancer_chip(adapter))
+		return 0;
+
 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
 	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
@@ -3598,26 +3834,12 @@
 err:
 	return level;
 }
+
 static int be_get_initial_config(struct be_adapter *adapter)
 {
 	int status;
 	u32 level;
 
-	status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
-			&adapter->function_mode, &adapter->function_caps);
-	if (status)
-		return status;
-
-	if (adapter->function_mode & FLEX10_MODE)
-		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
-	else
-		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
-
-	if (be_physfn(adapter))
-		adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
-	else
-		adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
-
 	status = be_cmd_get_cntl_attributes(adapter);
 	if (status)
 		return status;
@@ -3642,55 +3864,6 @@
 	return 0;
 }
 
-static int be_dev_type_check(struct be_adapter *adapter)
-{
-	struct pci_dev *pdev = adapter->pdev;
-	u32 sli_intf = 0, if_type;
-
-	switch (pdev->device) {
-	case BE_DEVICE_ID1:
-	case OC_DEVICE_ID1:
-		adapter->generation = BE_GEN2;
-		break;
-	case BE_DEVICE_ID2:
-	case OC_DEVICE_ID2:
-		adapter->generation = BE_GEN3;
-		break;
-	case OC_DEVICE_ID3:
-	case OC_DEVICE_ID4:
-		pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
-		adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
-						SLI_INTF_IF_TYPE_SHIFT;
-		if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
-						SLI_INTF_IF_TYPE_SHIFT;
-		if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
-			!be_type_2_3(adapter)) {
-			dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
-			return -EINVAL;
-		}
-		adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
-					 SLI_INTF_FAMILY_SHIFT);
-		adapter->generation = BE_GEN3;
-		break;
-	case OC_DEVICE_ID5:
-		pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
-		if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
-			dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
-			return -EINVAL;
-		}
-		adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
-					 SLI_INTF_FAMILY_SHIFT);
-		adapter->generation = BE_GEN3;
-		break;
-	default:
-		adapter->generation = 0;
-	}
-
-	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
-	adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
-	return 0;
-}
-
 static int lancer_recover_func(struct be_adapter *adapter)
 {
 	int status;
@@ -3721,8 +3894,9 @@
 		"Adapter SLIPORT recovery succeeded\n");
 	return 0;
 err:
-	dev_err(&adapter->pdev->dev,
-		"Adapter SLIPORT recovery failed\n");
+	if (adapter->eeh_error)
+		dev_err(&adapter->pdev->dev,
+			"Adapter SLIPORT recovery failed\n");
 
 	return status;
 }
@@ -3845,11 +4019,6 @@
 	adapter = netdev_priv(netdev);
 	adapter->pdev = pdev;
 	pci_set_drvdata(pdev, adapter);
-
-	status = be_dev_type_check(adapter);
-	if (status)
-		goto free_netdev;
-
 	adapter->netdev = netdev;
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 
@@ -4023,9 +4192,6 @@
 
 	netif_device_detach(adapter->netdev);
 
-	if (adapter->wol)
-		be_setup_wol(adapter, true);
-
 	be_cmd_reset_function(adapter);
 
 	pci_disable_device(pdev);
@@ -4061,9 +4227,13 @@
 
 	/* The error could cause the FW to trigger a flash debug dump.
 	 * Resetting the card while flash dump is in progress
-	 * can cause it not to recover; wait for it to finish
+	 * can cause it not to recover; wait for it to finish.
+	 * Wait only for first function as it is needed only once per
+	 * adapter.
 	 */
-	ssleep(30);
+	if (pdev->devfn == 0)
+		ssleep(30);
+
 	return PCI_ERS_RESULT_NEED_RESET;
 }
 
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index deecc44..55d32aa 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -47,10 +47,7 @@
 		dev_info.dpp_unmapped_len = 0;
 	}
 	dev_info.pdev = adapter->pdev;
-	if (adapter->sli_family == SKYHAWK_SLI_FAMILY)
-		dev_info.db = adapter->db;
-	else
-		dev_info.db = adapter->roce_db.base;
+	dev_info.db = adapter->db;
 	dev_info.unmapped_db = adapter->roce_db.io_addr;
 	dev_info.db_page_size = adapter->roce_db.size;
 	dev_info.db_total_size = adapter->roce_db.total_size;
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index feff516..5ba6e1c 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -92,4 +92,13 @@
 	  This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
 	  and MPC86xx family of chips, and the FEC on the 8540.
 
+config FEC_PTP
+	bool "PTP Hardware Clock (PHC)"
+	depends on FEC && ARCH_MXC
+	select PTP_1588_CLOCK
+	default y if SOC_IMX6Q
+	--help---
+	  Say Y here if you want to use PTP Hardware Clock (PHC) in the
+	  driver.  Only the basic clock operations have been implemented.
+
 endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 3d1839a..d4d19b3 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -3,6 +3,7 @@
 #
 
 obj-$(CONFIG_FEC) += fec.o
+obj-$(CONFIG_FEC_PTP) += fec_ptp.o
 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
 ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
 	obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index fffd205..2665162 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -140,21 +140,6 @@
 #endif
 #endif /* CONFIG_M5272 */
 
-/* The number of Tx and Rx buffers.  These are allocated from the page
- * pool.  The code may assume these are power of two, so it it best
- * to keep them that size.
- * We don't need to allocate pages for the transmitter.  We just use
- * the skbuffer directly.
- */
-#define FEC_ENET_RX_PAGES	8
-#define FEC_ENET_RX_FRSIZE	2048
-#define FEC_ENET_RX_FRPPG	(PAGE_SIZE / FEC_ENET_RX_FRSIZE)
-#define RX_RING_SIZE		(FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
-#define FEC_ENET_TX_FRSIZE	2048
-#define FEC_ENET_TX_FRPPG	(PAGE_SIZE / FEC_ENET_TX_FRSIZE)
-#define TX_RING_SIZE		16	/* Must be power of two */
-#define TX_RING_MOD_MASK	15	/*   for this to work */
-
 #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
 #error "FEC: descriptor ring size constants too large"
 #endif
@@ -179,9 +164,6 @@
 #define PKT_MINBUF_SIZE		64
 #define PKT_MAXBLR_SIZE		1520
 
-/* This device has up to three irqs on some platforms */
-#define FEC_IRQ_NUM		3
-
 /*
  * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
  * size bits. Other FEC hardware does not, so we need to take that into
@@ -194,61 +176,6 @@
 #define	OPT_FRAME_SIZE	0
 #endif
 
-/* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
- * tx_bd_base always point to the base of the buffer descriptors.  The
- * cur_rx and cur_tx point to the currently available buffer.
- * The dirty_tx tracks the current buffer that is being sent by the
- * controller.  The cur_tx and dirty_tx are equal under both completely
- * empty and completely full conditions.  The empty/ready indicator in
- * the buffer descriptor determines the actual condition.
- */
-struct fec_enet_private {
-	/* Hardware registers of the FEC device */
-	void __iomem *hwp;
-
-	struct net_device *netdev;
-
-	struct clk *clk_ipg;
-	struct clk *clk_ahb;
-
-	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
-	unsigned char *tx_bounce[TX_RING_SIZE];
-	struct	sk_buff* tx_skbuff[TX_RING_SIZE];
-	struct	sk_buff* rx_skbuff[RX_RING_SIZE];
-	ushort	skb_cur;
-	ushort	skb_dirty;
-
-	/* CPM dual port RAM relative addresses */
-	dma_addr_t	bd_dma;
-	/* Address of Rx and Tx buffers */
-	struct bufdesc	*rx_bd_base;
-	struct bufdesc	*tx_bd_base;
-	/* The next free ring entry */
-	struct bufdesc	*cur_rx, *cur_tx;
-	/* The ring entries to be free()ed */
-	struct bufdesc	*dirty_tx;
-
-	uint	tx_full;
-	/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
-	spinlock_t hw_lock;
-
-	struct	platform_device *pdev;
-
-	int	opened;
-	int	dev_id;
-
-	/* Phylib and MDIO interface */
-	struct	mii_bus *mii_bus;
-	struct	phy_device *phy_dev;
-	int	mii_timeout;
-	uint	phy_speed;
-	phy_interface_t	phy_interface;
-	int	link;
-	int	full_duplex;
-	struct	completion mdio_done;
-	int	irq[FEC_IRQ_NUM];
-};
-
 /* FEC MII MMFR bits definition */
 #define FEC_MMFR_ST		(1 << 30)
 #define FEC_MMFR_OP_READ	(2 << 28)
@@ -353,6 +280,17 @@
 			| BD_ENET_TX_LAST | BD_ENET_TX_TC);
 	bdp->cbd_sc = status;
 
+#ifdef CONFIG_FEC_PTP
+	bdp->cbd_bdu = 0;
+	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+			fep->hwts_tx_en)) {
+			bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
+			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+	} else {
+
+		bdp->cbd_esc = BD_ENET_TX_INT;
+	}
+#endif
 	/* Trigger transmission start */
 	writel(0, fep->hwp + FEC_X_DES_ACTIVE);
 
@@ -510,10 +448,17 @@
 		writel(1 << 8, fep->hwp + FEC_X_WMRK);
 	}
 
+#ifdef CONFIG_FEC_PTP
+	ecntl |= (1 << 4);
+#endif
+
 	/* And last, enable the transmit and receive processing */
 	writel(ecntl, fep->hwp + FEC_ECNTRL);
 	writel(0, fep->hwp + FEC_R_DES_ACTIVE);
 
+#ifdef CONFIG_FEC_PTP
+	fec_ptp_start_cyclecounter(ndev);
+#endif
 	/* Enable interrupts we wish to service */
 	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 }
@@ -599,6 +544,19 @@
 			ndev->stats.tx_packets++;
 		}
 
+#ifdef CONFIG_FEC_PTP
+		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+			struct skb_shared_hwtstamps shhwtstamps;
+			unsigned long flags;
+
+			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+			spin_lock_irqsave(&fep->tmreg_lock, flags);
+			shhwtstamps.hwtstamp = ns_to_ktime(
+				timecounter_cyc2time(&fep->tc, bdp->ts));
+			spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+			skb_tstamp_tx(skb, &shhwtstamps);
+		}
+#endif
 		if (status & BD_ENET_TX_READY)
 			printk("HEY! Enet xmit interrupt and TX_READY.\n");
 
@@ -725,6 +683,21 @@
 			skb_put(skb, pkt_len - 4);	/* Make room */
 			skb_copy_to_linear_data(skb, data, pkt_len - 4);
 			skb->protocol = eth_type_trans(skb, ndev);
+#ifdef CONFIG_FEC_PTP
+			/* Get receive timestamp from the skb */
+			if (fep->hwts_rx_en) {
+				struct skb_shared_hwtstamps *shhwtstamps =
+							    skb_hwtstamps(skb);
+				unsigned long flags;
+
+				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+				spin_lock_irqsave(&fep->tmreg_lock, flags);
+				shhwtstamps->hwtstamp = ns_to_ktime(
+				    timecounter_cyc2time(&fep->tc, bdp->ts));
+				spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+			}
+#endif
 			if (!skb_defer_rx_timestamp(skb))
 				netif_rx(skb);
 		}
@@ -739,6 +712,12 @@
 		status |= BD_ENET_RX_EMPTY;
 		bdp->cbd_sc = status;
 
+#ifdef CONFIG_FEC_PTP
+		bdp->cbd_esc = BD_ENET_RX_INT;
+		bdp->cbd_prot = 0;
+		bdp->cbd_bdu = 0;
+#endif
+
 		/* Update BD pointer to next entry */
 		if (status & BD_ENET_RX_WRAP)
 			bdp = fep->rx_bd_base;
@@ -1178,6 +1157,10 @@
 	if (!phydev)
 		return -ENODEV;
 
+#ifdef CONFIG_FEC_PTP
+	if (cmd == SIOCSHWTSTAMP)
+		return fec_ptp_ioctl(ndev, rq, cmd);
+#endif
 	return phy_mii_ioctl(phydev, rq, cmd);
 }
 
@@ -1224,6 +1207,9 @@
 		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
 				FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
 		bdp->cbd_sc = BD_ENET_RX_EMPTY;
+#ifdef CONFIG_FEC_PTP
+		bdp->cbd_esc = BD_ENET_RX_INT;
+#endif
 		bdp++;
 	}
 
@@ -1237,6 +1223,10 @@
 
 		bdp->cbd_sc = 0;
 		bdp->cbd_bufaddr = 0;
+
+#ifdef CONFIG_FEC_PTP
+		bdp->cbd_esc = BD_ENET_RX_INT;
+#endif
 		bdp++;
 	}
 
@@ -1638,9 +1628,19 @@
 		goto failed_clk;
 	}
 
+#ifdef CONFIG_FEC_PTP
+	fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
+	if (IS_ERR(fep->clk_ptp)) {
+		ret = PTR_ERR(fep->clk_ptp);
+		goto failed_clk;
+	}
+#endif
+
 	clk_prepare_enable(fep->clk_ahb);
 	clk_prepare_enable(fep->clk_ipg);
-
+#ifdef CONFIG_FEC_PTP
+	clk_prepare_enable(fep->clk_ptp);
+#endif
 	reg_phy = devm_regulator_get(&pdev->dev, "phy");
 	if (!IS_ERR(reg_phy)) {
 		ret = regulator_enable(reg_phy);
@@ -1668,6 +1668,10 @@
 	if (ret)
 		goto failed_register;
 
+#ifdef CONFIG_FEC_PTP
+	fec_ptp_init(ndev, pdev);
+#endif
+
 	return 0;
 
 failed_register:
@@ -1677,6 +1681,9 @@
 failed_regulator:
 	clk_disable_unprepare(fep->clk_ahb);
 	clk_disable_unprepare(fep->clk_ipg);
+#ifdef CONFIG_FEC_PTP
+	clk_disable_unprepare(fep->clk_ptp);
+#endif
 failed_pin:
 failed_clk:
 	for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1709,6 +1716,12 @@
 		if (irq > 0)
 			free_irq(irq, ndev);
 	}
+#ifdef CONFIG_FEC_PTP
+	del_timer_sync(&fep->time_keep);
+	clk_disable_unprepare(fep->clk_ptp);
+	if (fep->ptp_clock)
+		ptp_clock_unregister(fep->ptp_clock);
+#endif
 	clk_disable_unprepare(fep->clk_ahb);
 	clk_disable_unprepare(fep->clk_ipg);
 	iounmap(fep->hwp);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 8408c62..c5a3bc1 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -13,6 +13,12 @@
 #define	FEC_H
 /****************************************************************************/
 
+#ifdef CONFIG_FEC_PTP
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#endif
+
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
     defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
     defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
@@ -88,6 +94,13 @@
 	unsigned short cbd_datlen;	/* Data length */
 	unsigned short cbd_sc;	/* Control and status info */
 	unsigned long cbd_bufaddr;	/* Buffer address */
+#ifdef CONFIG_FEC_PTP
+	unsigned long cbd_esc;
+	unsigned long cbd_prot;
+	unsigned long cbd_bdu;
+	unsigned long ts;
+	unsigned short res0[4];
+#endif
 };
 #else
 struct bufdesc {
@@ -147,6 +160,112 @@
 #define BD_ENET_TX_CSL          ((ushort)0x0001)
 #define BD_ENET_TX_STATS        ((ushort)0x03ff)        /* All status bits */
 
+/*enhanced buffer desciptor control/status used by Ethernet transmit*/
+#define BD_ENET_TX_INT          0x40000000
+#define BD_ENET_TX_TS           0x20000000
+
+
+/* This device has up to three irqs on some platforms */
+#define FEC_IRQ_NUM		3
+
+/* The number of Tx and Rx buffers.  These are allocated from the page
+ * pool.  The code may assume these are power of two, so it it best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter.  We just use
+ * the skbuffer directly.
+ */
+
+#define FEC_ENET_RX_PAGES	8
+#define FEC_ENET_RX_FRSIZE	2048
+#define FEC_ENET_RX_FRPPG	(PAGE_SIZE / FEC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE		(FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+#define FEC_ENET_TX_FRSIZE	2048
+#define FEC_ENET_TX_FRPPG	(PAGE_SIZE / FEC_ENET_TX_FRSIZE)
+#define TX_RING_SIZE		16	/* Must be power of two */
+#define TX_RING_MOD_MASK	15	/*   for this to work */
+
+#define BD_ENET_RX_INT          0x00800000
+#define BD_ENET_RX_PTP          ((ushort)0x0400)
+
+/* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors.  The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller.  The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions.  The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+	/* Hardware registers of the FEC device */
+	void __iomem *hwp;
+
+	struct net_device *netdev;
+
+	struct clk *clk_ipg;
+	struct clk *clk_ahb;
+#ifdef CONFIG_FEC_PTP
+	struct clk *clk_ptp;
+#endif
+
+	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
+	unsigned char *tx_bounce[TX_RING_SIZE];
+	struct	sk_buff *tx_skbuff[TX_RING_SIZE];
+	struct	sk_buff *rx_skbuff[RX_RING_SIZE];
+	ushort	skb_cur;
+	ushort	skb_dirty;
+
+	/* CPM dual port RAM relative addresses */
+	dma_addr_t	bd_dma;
+	/* Address of Rx and Tx buffers */
+	struct bufdesc	*rx_bd_base;
+	struct bufdesc	*tx_bd_base;
+	/* The next free ring entry */
+	struct bufdesc	*cur_rx, *cur_tx;
+	/* The ring entries to be free()ed */
+	struct bufdesc	*dirty_tx;
+
+	uint	tx_full;
+	/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
+	spinlock_t hw_lock;
+
+	struct	platform_device *pdev;
+
+	int	opened;
+	int	dev_id;
+
+	/* Phylib and MDIO interface */
+	struct	mii_bus *mii_bus;
+	struct	phy_device *phy_dev;
+	int	mii_timeout;
+	uint	phy_speed;
+	phy_interface_t	phy_interface;
+	int	link;
+	int	full_duplex;
+	struct	completion mdio_done;
+	int	irq[FEC_IRQ_NUM];
+
+#ifdef CONFIG_FEC_PTP
+	struct ptp_clock *ptp_clock;
+	struct ptp_clock_info ptp_caps;
+	unsigned long last_overflow_check;
+	spinlock_t tmreg_lock;
+	struct cyclecounter cc;
+	struct timecounter tc;
+	int rx_hwtstamp_filter;
+	u32 base_incval;
+	u32 cycle_speed;
+	int hwts_rx_en;
+	int hwts_tx_en;
+	struct timer_list time_keep;
+#endif
+
+};
+
+#ifdef CONFIG_FEC_PTP
+void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
+void fec_ptp_start_cyclecounter(struct net_device *ndev);
+int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+#endif
 
 /****************************************************************************/
 #endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
new file mode 100644
index 0000000..c40526c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -0,0 +1,383 @@
+/*
+ * Fast Ethernet Controller (ENET) PTP driver for MX6x.
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+
+#include "fec.h"
+
+/* FEC 1588 register bits */
+#define FEC_T_CTRL_SLAVE                0x00002000
+#define FEC_T_CTRL_CAPTURE              0x00000800
+#define FEC_T_CTRL_RESTART              0x00000200
+#define FEC_T_CTRL_PERIOD_RST           0x00000030
+#define FEC_T_CTRL_PERIOD_EN		0x00000010
+#define FEC_T_CTRL_ENABLE               0x00000001
+
+#define FEC_T_INC_MASK                  0x0000007f
+#define FEC_T_INC_OFFSET                0
+#define FEC_T_INC_CORR_MASK             0x00007f00
+#define FEC_T_INC_CORR_OFFSET           8
+
+#define FEC_ATIME_CTRL		0x400
+#define FEC_ATIME		0x404
+#define FEC_ATIME_EVT_OFFSET	0x408
+#define FEC_ATIME_EVT_PERIOD	0x40c
+#define FEC_ATIME_CORR		0x410
+#define FEC_ATIME_INC		0x414
+#define FEC_TS_TIMESTAMP	0x418
+
+#define FEC_CC_MULT	(1 << 31)
+/**
+ * fec_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static cycle_t fec_ptp_read(const struct cyclecounter *cc)
+{
+	struct fec_enet_private *fep =
+		container_of(cc, struct fec_enet_private, cc);
+	u32 tempval;
+
+	tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+	tempval |= FEC_T_CTRL_CAPTURE;
+	writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+	return readl(fep->hwp + FEC_ATIME);
+}
+
+/**
+ * fec_ptp_start_cyclecounter - create the cycle counter from hw
+ * @ndev: network device
+ *
+ * this function initializes the timecounter and cyclecounter
+ * structures for use in generated a ns counter from the arbitrary
+ * fixed point cycles registers in the hardware.
+ */
+void fec_ptp_start_cyclecounter(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	unsigned long flags;
+	int inc;
+
+	inc = 1000000000 / clk_get_rate(fep->clk_ptp);
+
+	/* grab the ptp lock */
+	spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+	/* 1ns counter */
+	writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
+
+	/* use free running count */
+	writel(0, fep->hwp + FEC_ATIME_EVT_PERIOD);
+
+	writel(FEC_T_CTRL_ENABLE, fep->hwp + FEC_ATIME_CTRL);
+
+	memset(&fep->cc, 0, sizeof(fep->cc));
+	fep->cc.read = fec_ptp_read;
+	fep->cc.mask = CLOCKSOURCE_MASK(32);
+	fep->cc.shift = 31;
+	fep->cc.mult = FEC_CC_MULT;
+
+	/* reset the ns time counter */
+	timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real()));
+
+	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+}
+
+/**
+ * fec_ptp_adjfreq - adjust ptp cycle frequency
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ *
+ * Because ENET hardware frequency adjust is complex,
+ * using software method to do that.
+ */
+static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	u64 diff;
+	unsigned long flags;
+	int neg_adj = 0;
+	u32 mult = FEC_CC_MULT;
+
+	struct fec_enet_private *fep =
+	    container_of(ptp, struct fec_enet_private, ptp_caps);
+
+	if (ppb < 0) {
+		ppb = -ppb;
+		neg_adj = 1;
+	}
+
+	diff = mult;
+	diff *= ppb;
+	diff = div_u64(diff, 1000000000ULL);
+
+	spin_lock_irqsave(&fep->tmreg_lock, flags);
+	/*
+	 * dummy read to set cycle_last in tc to now.
+	 * So use adjusted mult to calculate when next call
+	 * timercounter_read.
+	 */
+	timecounter_read(&fep->tc);
+
+	fep->cc.mult = neg_adj ? mult - diff : mult + diff;
+
+	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+	return 0;
+}
+
+/**
+ * fec_ptp_adjtime
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	struct fec_enet_private *fep =
+	    container_of(ptp, struct fec_enet_private, ptp_caps);
+	unsigned long flags;
+	u64 now;
+
+	spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+	now = timecounter_read(&fep->tc);
+	now += delta;
+
+	/* reset the timecounter */
+	timecounter_init(&fep->tc, &fep->cc, now);
+
+	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+	return 0;
+}
+
+/**
+ * fec_ptp_gettime
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+	struct fec_enet_private *adapter =
+	    container_of(ptp, struct fec_enet_private, ptp_caps);
+	u64 ns;
+	u32 remainder;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+	ns = timecounter_read(&adapter->tc);
+	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+	ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+	ts->tv_nsec = remainder;
+
+	return 0;
+}
+
+/**
+ * fec_ptp_settime
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int fec_ptp_settime(struct ptp_clock_info *ptp,
+			   const struct timespec *ts)
+{
+	struct fec_enet_private *fep =
+	    container_of(ptp, struct fec_enet_private, ptp_caps);
+
+	u64 ns;
+	unsigned long flags;
+
+	ns = ts->tv_sec * 1000000000ULL;
+	ns += ts->tv_nsec;
+
+	spin_lock_irqsave(&fep->tmreg_lock, flags);
+	timecounter_init(&fep->tc, &fep->cc, ns);
+	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+	return 0;
+}
+
+/**
+ * fec_ptp_enable
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
+ *
+ */
+static int fec_ptp_enable(struct ptp_clock_info *ptp,
+			  struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * fec_ptp_hwtstamp_ioctl - control hardware time stamping
+ * @ndev: pointer to net_device
+ * @ifreq: ioctl data
+ * @cmd: particular ioctl requested
+ */
+int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	struct hwtstamp_config config;
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	/* reserved for future extensions */
+	if (config.flags)
+		return -EINVAL;
+
+	switch (config.tx_type) {
+	case HWTSTAMP_TX_OFF:
+		fep->hwts_tx_en = 0;
+		break;
+	case HWTSTAMP_TX_ON:
+		fep->hwts_tx_en = 1;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		if (fep->hwts_rx_en)
+			fep->hwts_rx_en = 0;
+		config.rx_filter = HWTSTAMP_FILTER_NONE;
+		break;
+
+	default:
+		/*
+		 * register RXMTRL must be set in order to do V1 packets,
+		 * therefore it is not possible to time stamp both V1 Sync and
+		 * Delay_Req messages and hardware does not support
+		 * timestamping all packets => return error
+		 */
+		fep->hwts_rx_en = 1;
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+		break;
+	}
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+	    -EFAULT : 0;
+}
+
+/**
+ * fec_time_keep - call timecounter_read every second to avoid timer overrun
+ *                 because ENET just support 32bit counter, will timeout in 4s
+ */
+static void fec_time_keep(unsigned long _data)
+{
+	struct fec_enet_private *fep = (struct fec_enet_private *)_data;
+	u64 ns;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fep->tmreg_lock, flags);
+	ns = timecounter_read(&fep->tc);
+	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+	mod_timer(&fep->time_keep, jiffies + HZ);
+}
+
+/**
+ * fec_ptp_init
+ * @ndev: The FEC network adapter
+ *
+ * This function performs the required steps for enabling ptp
+ * support. If ptp support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+
+void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	fep->ptp_caps.owner = THIS_MODULE;
+	snprintf(fep->ptp_caps.name, 16, "fec ptp");
+
+	fep->ptp_caps.max_adj = 250000000;
+	fep->ptp_caps.n_alarm = 0;
+	fep->ptp_caps.n_ext_ts = 0;
+	fep->ptp_caps.n_per_out = 0;
+	fep->ptp_caps.pps = 0;
+	fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
+	fep->ptp_caps.adjtime = fec_ptp_adjtime;
+	fep->ptp_caps.gettime = fec_ptp_gettime;
+	fep->ptp_caps.settime = fec_ptp_settime;
+	fep->ptp_caps.enable = fec_ptp_enable;
+
+	spin_lock_init(&fep->tmreg_lock);
+
+	fec_ptp_start_cyclecounter(ndev);
+
+	init_timer(&fep->time_keep);
+	fep->time_keep.data = (unsigned long)fep;
+	fep->time_keep.function = fec_time_keep;
+	fep->time_keep.expires = jiffies + HZ;
+	add_timer(&fep->time_keep);
+
+	fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
+	if (IS_ERR(fep->ptp_clock)) {
+		fep->ptp_clock = NULL;
+		pr_err("ptp_clock_register failed\n");
+	} else {
+		pr_info("registered PHC device on %s\n", ndev->name);
+	}
+}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 19ac096..bffb2ed 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -210,7 +210,7 @@
 				skb = gfar_new_skb(ndev);
 				if (!skb) {
 					netdev_err(ndev, "Can't allocate RX buffers\n");
-					goto err_rxalloc_fail;
+					return -ENOMEM;
 				}
 				rx_queue->rx_skbuff[j] = skb;
 
@@ -223,10 +223,6 @@
 	}
 
 	return 0;
-
-err_rxalloc_fail:
-	free_skb_resources(priv);
-	return -ENOMEM;
 }
 
 static int gfar_alloc_skb_resources(struct net_device *ndev)
@@ -1359,7 +1355,11 @@
 		return 0;
 	}
 
-	gfar_init_bds(ndev);
+	if (gfar_init_bds(ndev)) {
+		free_skb_resources(priv);
+		return -ENOMEM;
+	}
+
 	init_registers(ndev);
 	gfar_set_mac_address(ndev);
 	gfar_init_mac(ndev);
@@ -1712,6 +1712,7 @@
 		tx_queue->tx_skbuff[i] = NULL;
 	}
 	kfree(tx_queue->tx_skbuff);
+	tx_queue->tx_skbuff = NULL;
 }
 
 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
@@ -1735,6 +1736,7 @@
 		rxbdp++;
 	}
 	kfree(rx_queue->rx_skbuff);
+	rx_queue->rx_skbuff = NULL;
 }
 
 /* If there are any tx skbs or rx skbs still around, free them.
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index 067db3f..7b9609d 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -638,12 +638,6 @@
 static int
 ether1_open (struct net_device *dev)
 {
-	if (!is_valid_ether_addr(dev->dev_addr)) {
-		printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
-			dev->name);
-		return -EINVAL;
-	}
-
 	if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
 		return -EAGAIN;
 
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index b9773d2..6529d31 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -6,7 +6,7 @@
 	bool "IBM devices"
 	default y
 	depends on MCA || PPC_PSERIES || PPC_PSERIES || PPC_DCR || \
-		   (IBMEBUS && INET && SPARSEMEM)
+		   (IBMEBUS && SPARSEMEM)
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
@@ -33,8 +33,7 @@
 
 config EHEA
 	tristate "eHEA Ethernet support"
-	depends on IBMEBUS && INET && SPARSEMEM
-	select INET_LRO
+	depends on IBMEBUS && SPARSEMEM
 	---help---
 	  This driver supports the IBM pSeries eHEA ethernet adapter.
 
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index f4d2da0..09faf33 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3028,7 +3028,7 @@
 	ehea_set_ethtool_ops(dev);
 
 	dev->hw_features = NETIF_F_SG | NETIF_F_TSO
-		      | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
+		      | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX;
 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
 		      | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
 		      | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 479e43e..84c6b6c 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -738,13 +738,11 @@
 	/* Synchronize with scheduled polling */
 	napi_disable(&mal->napi);
 
-	if (!list_empty(&mal->list)) {
+	if (!list_empty(&mal->list))
 		/* This is *very* bad */
-		printk(KERN_EMERG
+		WARN(1, KERN_EMERG
 		       "mal%d: commac list is not empty on remove!\n",
 		       mal->index);
-		WARN_ON(1);
-	}
 
 	dev_set_drvdata(&ofdev->dev, NULL);
 
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 0cafe4f..ddee406 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -93,6 +93,7 @@
 config IGB
 	tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
 	depends on PCI
+	select PTP_1588_CLOCK
 	---help---
 	  This driver supports Intel(R) 82575/82576 gigabit ethernet family of
 	  adapters.  For more information on how to identify your adapter, go
@@ -120,19 +121,6 @@
 	  driver.  DCA is a method for warming the CPU cache before data
 	  is used, with the intent of lessening the impact of cache misses.
 
-config IGB_PTP
-	bool "PTP Hardware Clock (PHC)"
-	default n
-	depends on IGB && EXPERIMENTAL
-	select PPS
-	select PTP_1588_CLOCK
-	---help---
-	  Say Y here if you want to use PTP Hardware Clock (PHC) in the
-	  driver.  Only the basic clock operations have been implemented.
-
-	  Every timestamp and clock read operations must consult the
-	  overflow counter to form a correct time value.
-
 config IGBVF
 	tristate "Intel(R) 82576 Virtual Function Ethernet support"
 	depends on PCI
@@ -178,8 +166,9 @@
 
 config IXGBE
 	tristate "Intel(R) 10GbE PCI Express adapters support"
-	depends on PCI && INET
+	depends on PCI
 	select MDIO
+	select PTP_1588_CLOCK
 	---help---
 	  This driver supports Intel(R) 10GbE PCI Express family of
 	  adapters.  For more information on how to identify your adapter, go
@@ -222,19 +211,6 @@
 
 	  If unsure, say N.
 
-config IXGBE_PTP
-	bool "PTP Clock Support"
-	default n
-	depends on IXGBE && EXPERIMENTAL
-	select PPS
-	select PTP_1588_CLOCK
-	---help---
-	  Say Y here if you want support for 1588 Timestamping with a
-	  PHC device, using the PTP 1588 Clock support. This is
-	  required to enable timestamping support for the device.
-
-	  If unsure, say N.
-
 config IXGBEVF
 	tristate "Intel(R) 82599 Virtual Function Ethernet support"
 	depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 3d68395..8fedd24 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -107,6 +107,7 @@
 };
 
 static DEFINE_SPINLOCK(e1000_eeprom_lock);
+static DEFINE_SPINLOCK(e1000_phy_lock);
 
 /**
  * e1000_set_phy_type - Set the phy type member in the hw struct.
@@ -2830,19 +2831,25 @@
 s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
 {
 	u32 ret_val;
+	unsigned long flags;
 
 	e_dbg("e1000_read_phy_reg");
 
+	spin_lock_irqsave(&e1000_phy_lock, flags);
+
 	if ((hw->phy_type == e1000_phy_igp) &&
 	    (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
 		ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
 						 (u16) reg_addr);
-		if (ret_val)
+		if (ret_val) {
+			spin_unlock_irqrestore(&e1000_phy_lock, flags);
 			return ret_val;
+		}
 	}
 
 	ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
 					phy_data);
+	spin_unlock_irqrestore(&e1000_phy_lock, flags);
 
 	return ret_val;
 }
@@ -2965,19 +2972,25 @@
 s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
 {
 	u32 ret_val;
+	unsigned long flags;
 
 	e_dbg("e1000_write_phy_reg");
 
+	spin_lock_irqsave(&e1000_phy_lock, flags);
+
 	if ((hw->phy_type == e1000_phy_igp) &&
 	    (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
 		ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
 						 (u16) reg_addr);
-		if (ret_val)
+		if (ret_val) {
+			spin_unlock_irqrestore(&e1000_phy_lock, flags);
 			return ret_val;
+		}
 	}
 
 	ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
 					 phy_data);
+	spin_unlock_irqrestore(&e1000_phy_lock, flags);
 
 	return ret_val;
 }
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f444eb0..dadb13b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5067,6 +5067,17 @@
 		return NETDEV_TX_OK;
 	}
 
+	/*
+	 * The minimum packet size with TCTL.PSP set is 17 bytes so
+	 * pad skb in order to meet this minimum size requirement
+	 */
+	if (unlikely(skb->len < 17)) {
+		if (skb_pad(skb, 17 - skb->len))
+			return NETDEV_TX_OK;
+		skb->len = 17;
+		skb_set_tail_pointer(skb, 17);
+	}
+
 	mss = skb_shinfo(skb)->gso_size;
 	if (mss) {
 		u8 hdr_len;
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 97c197f..624476c 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -34,6 +34,4 @@
 
 igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
 	    e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
-	    e1000_i210.o
-
-igb-$(CONFIG_IGB_PTP) += igb_ptp.o
+	    e1000_i210.o igb_ptp.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ca4641e..8c12dbd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -319,6 +319,7 @@
 		nvm->ops.acquire = igb_acquire_nvm_i210;
 		nvm->ops.release = igb_release_nvm_i210;
 		nvm->ops.read    = igb_read_nvm_srrd_i210;
+		nvm->ops.write   = igb_write_nvm_srwr_i210;
 		nvm->ops.valid_led_default = igb_valid_led_default_i210;
 		break;
 	case e1000_i211:
@@ -1277,12 +1278,20 @@
 {
 	u32 ctrl;
 	s32  ret_val;
+	u32 phpm_reg;
 
 	ctrl = rd32(E1000_CTRL);
 	ctrl |= E1000_CTRL_SLU;
 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
 	wr32(E1000_CTRL, ctrl);
 
+	/* Clear Go Link Disconnect bit */
+	if (hw->mac.type >= e1000_82580) {
+		phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
+		phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+		wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
+	}
+
 	ret_val = igb_setup_serdes_link_82575(hw);
 	if (ret_val)
 		goto out;
@@ -2233,19 +2242,16 @@
 
 	/* enable or disable per user setting */
 	if (!(hw->dev_spec._82575.eee_disable)) {
-		ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
-			E1000_IPCNFG_EEE_100M_AN);
-		eeer |= (E1000_EEER_TX_LPI_EN |
-			E1000_EEER_RX_LPI_EN |
+		u32 eee_su = rd32(E1000_EEE_SU);
+
+		ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+		eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
 			E1000_EEER_LPI_FC);
 
-		/* keep the LPI clock running before EEE is enabled */
-		if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
-			u32 eee_su;
-			eee_su = rd32(E1000_EEE_SU);
-			eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
-			wr32(E1000_EEE_SU, eee_su);
-		}
+		/* This bit should not be set in normal operation. */
+		if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
+			hw_dbg("LPI Clock Stop Bit should not be set!\n");
+
 
 	} else {
 		ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index e85c453..44b76b3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -172,10 +172,13 @@
 #define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
 #define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
 #define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
 
 #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
 #define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
 #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
 
 /* Additional DCA related definitions, note change in position of CPUID */
 #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index de4b41e..198d148 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -636,6 +636,7 @@
 /* NVM Word Offsets */
 #define NVM_COMPAT                 0x0003
 #define NVM_ID_LED_SETTINGS        0x0004 /* SERDES output amplitude */
+#define NVM_VERSION                0x0005
 #define NVM_INIT_CONTROL2_REG      0x000F
 #define NVM_INIT_CONTROL3_PORT_B   0x0014
 #define NVM_INIT_CONTROL3_PORT_A   0x0024
@@ -653,6 +654,19 @@
 #define NVM_LED_1_CFG              0x001C
 #define NVM_LED_0_2_CFG            0x001F
 
+/* NVM version defines */
+#define NVM_ETRACK_WORD            0x0042
+#define NVM_COMB_VER_OFF           0x0083
+#define NVM_COMB_VER_PTR           0x003d
+#define NVM_MAJOR_MASK             0xF000
+#define NVM_MINOR_MASK             0x0FF0
+#define NVM_BUILD_MASK             0x000F
+#define NVM_COMB_VER_MASK          0x00FF
+#define NVM_MAJOR_SHIFT                12
+#define NVM_MINOR_SHIFT                 4
+#define NVM_COMB_VER_SHFT               8
+#define NVM_VER_INVALID            0xFFFF
+#define NVM_ETRACK_SHIFT               16
 
 #define E1000_NVM_CFG_DONE_PORT_0  0x040000 /* MNG config cycle done */
 #define E1000_NVM_CFG_DONE_PORT_1  0x080000 /* ...for second port */
@@ -860,6 +874,7 @@
 #define E1000_EEER_FRC_AN            0x10000000  /* Enable EEE in loopback */
 #define E1000_EEER_LPI_FC            0x00040000  /* EEE Enable on FC */
 #define E1000_EEE_SU_LPI_CLK_STP     0X00800000  /* EEE LPI Clock Stop */
+#define E1000_EEER_EEE_NEG           0x20000000  /* EEE capability nego */
 
 /* SerDes Control */
 #define E1000_GEN_CTL_READY             0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 77a5f93..4147429 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -423,6 +423,100 @@
 }
 
 /**
+ *  igb_read_invm_version - Reads iNVM version and image type
+ *  @hw: pointer to the HW structure
+ *  @invm_ver: version structure for the version read
+ *
+ *  Reads iNVM version and image type.
+ **/
+s32 igb_read_invm_version(struct e1000_hw *hw,
+			  struct e1000_fw_version *invm_ver) {
+	u32 *record = NULL;
+	u32 *next_record = NULL;
+	u32 i = 0;
+	u32 invm_dword = 0;
+	u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
+					     E1000_INVM_RECORD_SIZE_IN_BYTES);
+	u32 buffer[E1000_INVM_SIZE];
+	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+	u16 version = 0;
+
+	/* Read iNVM memory */
+	for (i = 0; i < E1000_INVM_SIZE; i++) {
+		invm_dword = rd32(E1000_INVM_DATA_REG(i));
+		buffer[i] = invm_dword;
+	}
+
+	/* Read version number */
+	for (i = 1; i < invm_blocks; i++) {
+		record = &buffer[invm_blocks - i];
+		next_record = &buffer[invm_blocks - i + 1];
+
+		/* Check if we have first version location used */
+		if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+			version = 0;
+			status = E1000_SUCCESS;
+			break;
+		}
+		/* Check if we have second version location used */
+		else if ((i == 1) &&
+			 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+			status = E1000_SUCCESS;
+			break;
+		}
+		/* Check if we have odd version location
+		 * used and it is the last one used
+		 */
+		else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+			 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+			 (i != 1))) {
+			version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+				  >> 13;
+			status = E1000_SUCCESS;
+			break;
+		}
+		/* Check if we have even version location
+		 * used and it is the last one used
+		 */
+		else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+			 ((*record & 0x3) == 0)) {
+			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+			status = E1000_SUCCESS;
+			break;
+		}
+	}
+
+	if (status == E1000_SUCCESS) {
+		invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+					>> E1000_INVM_MAJOR_SHIFT;
+		invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+	}
+	/* Read Image Type */
+	for (i = 1; i < invm_blocks; i++) {
+		record = &buffer[invm_blocks - i];
+		next_record = &buffer[invm_blocks - i + 1];
+
+		/* Check if we have image type in first location used */
+		if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+			invm_ver->invm_img_type = 0;
+			status = E1000_SUCCESS;
+			break;
+		}
+		/* Check if we have image type in first location used */
+		else if ((((*record & 0x3) == 0) &&
+			 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+			 ((((*record & 0x3) != 0) && (i != 1)))) {
+			invm_ver->invm_img_type =
+				(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+			status = E1000_SUCCESS;
+			break;
+		}
+	}
+	return status;
+}
+
+/**
  *  igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
  *  @hw: pointer to the HW structure
  *
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 5dc2bd3..974d235 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -43,6 +43,8 @@
 extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
 extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
 			       u16 *data);
+extern s32 igb_read_invm_version(struct e1000_hw *hw,
+				 struct e1000_fw_version *invm_ver);
 
 #define E1000_STM_OPCODE		0xDB00
 #define E1000_EEPROM_FLASH_SIZE_WORD	0x11
@@ -65,6 +67,15 @@
 
 #define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS	8
 #define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS	1
+#define E1000_INVM_ULT_BYTES_SIZE			8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES			4
+#define E1000_INVM_VER_FIELD_ONE			0x1FF8
+#define E1000_INVM_VER_FIELD_TWO			0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD			0x1F800000
+
+#define E1000_INVM_MAJOR_MASK		0x3F0
+#define E1000_INVM_MINOR_MASK		0xF
+#define E1000_INVM_MAJOR_SHIFT		4
 
 #define ID_LED_DEFAULT_I210		((ID_LED_OFF1_ON2  << 8) | \
 					 (ID_LED_OFF1_OFF2 <<  4) | \
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 819c145..7acddfe 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1391,6 +1391,10 @@
 {
 	s32 ret_val = 0;
 
+	/* All MDI settings are supported on 82580 and newer. */
+	if (hw->mac.type >= e1000_82580)
+		goto out;
+
 	if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
 		hw_dbg("Invalid MDI setting detected\n");
 		hw->phy.mdix = 1;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index cbddc4e..e2b2c4b9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -33,6 +33,7 @@
 #include "e1000_phy.h"
 #include "e1000_nvm.h"
 #include "e1000_defines.h"
+#include "e1000_i210.h"
 
 /*
  * Functions that should not be called directly from drivers but can be used
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index aa5fcdf..7db3f80 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -710,3 +710,74 @@
 out:
 	return ret_val;
 }
+
+/**
+ *  igb_get_fw_version - Get firmware version information
+ *  @hw: pointer to the HW structure
+ *  @fw_vers: pointer to output structure
+ *
+ *  unsupported MAC types will return all 0 version structure
+ **/
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+{
+	u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
+	u16 fw_version;
+
+	memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+	switch (hw->mac.type) {
+	case e1000_i211:
+		igb_read_invm_version(hw, fw_vers);
+		return;
+	case e1000_82575:
+	case e1000_82576:
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i210:
+		break;
+	default:
+		return;
+	}
+	/* basic eeprom version numbers */
+	hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+	fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
+	fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
+
+	/* etrack id */
+	hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+	hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+	fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
+
+	switch (hw->mac.type) {
+	case e1000_i210:
+	case e1000_i350:
+		/* find combo image version */
+		hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+		if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
+
+			hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+					 + 1), 1, &comb_verh);
+			hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+					 1, &comb_verl);
+
+			/* get Option Rom version if it exists and is valid */
+			if ((comb_verh && comb_verl) &&
+			    ((comb_verh != NVM_VER_INVALID) &&
+			     (comb_verl != NVM_VER_INVALID))) {
+
+				fw_vers->or_valid = true;
+				fw_vers->or_major =
+					comb_verl >> NVM_COMB_VER_SHFT;
+				fw_vers->or_build =
+					((comb_verl << NVM_COMB_VER_SHFT)
+					| (comb_verh >> NVM_COMB_VER_SHFT));
+				fw_vers->or_patch =
+					comb_verh & NVM_COMB_VER_MASK;
+			}
+		}
+		break;
+	default:
+		break;
+	}
+	return;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 825b022..7012d45 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -40,4 +40,20 @@
 s32  igb_validate_nvm_checksum(struct e1000_hw *hw);
 s32  igb_update_nvm_checksum(struct e1000_hw *hw);
 
+struct e1000_fw_version {
+	u32 etrack_id;
+	u16 eep_major;
+	u16 eep_minor;
+
+	u8 invm_major;
+	u8 invm_minor;
+	u8 invm_img_type;
+
+	bool or_valid;
+	u16 or_major;
+	u16 or_build;
+	u16 or_patch;
+};
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers);
+
 #endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 3404bc7..fe76004 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1207,20 +1207,25 @@
 	u16 phy_data;
 	bool link;
 
-	/*
-	 * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
-	 * forced whenever speed and duplex are forced.
-	 */
-	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-	if (ret_val)
-		goto out;
+	/* I210 and I211 devices support Auto-Crossover in forced operation. */
+	if (phy->type != e1000_phy_i210) {
+		/*
+		 * Clear Auto-Crossover to force MDI manually.  M88E1000
+		 * requires MDI forced whenever speed and duplex are forced.
+		 */
+		ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+					    &phy_data);
+		if (ret_val)
+			goto out;
 
-	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
-	ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-	if (ret_val)
-		goto out;
+		phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+		ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+					     phy_data);
+		if (ret_val)
+			goto out;
 
-	hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+		hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+	}
 
 	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
 	if (ret_val)
@@ -1710,6 +1715,26 @@
 
 	switch (hw->phy.id) {
 	case I210_I_PHY_ID:
+		/* Get cable length from PHY Cable Diagnostics Control Reg */
+		ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+					    (I347AT4_PCDL + phy->addr),
+					    &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		/* Check if the unit of cable length is meters or cm */
+		ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+					    I347AT4_PCDC, &phy_data2);
+		if (ret_val)
+			return ret_val;
+
+		is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+		/* Populate the phy structure with cable length in meters */
+		phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+		phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+		phy->cable_length = phy_data / (is_cm ? 100 : 1);
+		break;
 	case I347AT4_E_PHY_ID:
 		/* Remember the original page select and set it to 7 */
 		ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 6ac3299..ed282f8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -124,6 +124,7 @@
 #define E1000_82580_PM_SPD		0x0001 /* Smart Power Down */
 #define E1000_82580_PM_D0_LPLU		0x0002 /* For D0a states */
 #define E1000_82580_PM_D3_LPLU		0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD		0x0020 /* Go Link Disconnect */
 
 /* Enable flexible speed on link-up */
 #define IGP02E1000_PM_D0_LPLU             0x0002 /* For D0a states */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8aad230..c15a481 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,11 +34,9 @@
 #include "e1000_mac.h"
 #include "e1000_82575.h"
 
-#ifdef CONFIG_IGB_PTP
 #include <linux/clocksource.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
-#endif /* CONFIG_IGB_PTP */
 #include <linux/bitops.h>
 #include <linux/if_vlan.h>
 
@@ -132,9 +130,10 @@
 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
 
 /* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_256   256
-#define IGB_RXBUFFER_16384 16384
-#define IGB_RX_HDR_LEN     IGB_RXBUFFER_256
+#define IGB_RXBUFFER_256	256
+#define IGB_RXBUFFER_2048	2048
+#define IGB_RX_HDR_LEN		IGB_RXBUFFER_256
+#define IGB_RX_BUFSZ		IGB_RXBUFFER_2048
 
 /* How many Tx Descriptors do we need to call netif_wake_queue ? */
 #define IGB_TX_QUEUE_WAKE	16
@@ -151,11 +150,18 @@
 
 #define IGB_MNG_VLAN_NONE -1
 
-#define IGB_TX_FLAGS_CSUM		0x00000001
-#define IGB_TX_FLAGS_VLAN		0x00000002
-#define IGB_TX_FLAGS_TSO		0x00000004
-#define IGB_TX_FLAGS_IPV4		0x00000008
-#define IGB_TX_FLAGS_TSTAMP		0x00000010
+enum igb_tx_flags {
+	/* cmd_type flags */
+	IGB_TX_FLAGS_VLAN	= 0x01,
+	IGB_TX_FLAGS_TSO	= 0x02,
+	IGB_TX_FLAGS_TSTAMP	= 0x04,
+
+	/* olinfo flags */
+	IGB_TX_FLAGS_IPV4	= 0x10,
+	IGB_TX_FLAGS_CSUM	= 0x20,
+};
+
+/* VLAN info */
 #define IGB_TX_FLAGS_VLAN_MASK		0xffff0000
 #define IGB_TX_FLAGS_VLAN_SHIFT	16
 
@@ -174,11 +180,9 @@
 };
 
 struct igb_rx_buffer {
-	struct sk_buff *skb;
 	dma_addr_t dma;
 	struct page *page;
-	dma_addr_t page_dma;
-	u32 page_offset;
+	unsigned int page_offset;
 };
 
 struct igb_tx_queue_stats {
@@ -205,22 +209,6 @@
 	u8 itr;				/* current ITR setting for ring */
 };
 
-struct igb_q_vector {
-	struct igb_adapter *adapter;	/* backlink */
-	int cpu;			/* CPU for DCA */
-	u32 eims_value;			/* EIMS mask value */
-
-	struct igb_ring_container rx, tx;
-
-	struct napi_struct napi;
-
-	u16 itr_val;
-	u8 set_itr;
-	void __iomem *itr_register;
-
-	char name[IFNAMSIZ + 9];
-};
-
 struct igb_ring {
 	struct igb_q_vector *q_vector;	/* backlink to q_vector */
 	struct net_device *netdev;	/* back pointer to net_device */
@@ -232,15 +220,17 @@
 	void *desc;			/* descriptor ring memory */
 	unsigned long flags;		/* ring specific flags */
 	void __iomem *tail;		/* pointer to ring tail register */
+	dma_addr_t dma;			/* phys address of the ring */
+	unsigned int  size;		/* length of desc. ring in bytes */
 
 	u16 count;			/* number of desc. in the ring */
 	u8 queue_index;			/* logical index of the ring*/
 	u8 reg_idx;			/* physical index of the ring */
-	u32 size;			/* length of desc. ring in bytes */
 
 	/* everything past this point are written often */
-	u16 next_to_clean ____cacheline_aligned_in_smp;
+	u16 next_to_clean;
 	u16 next_to_use;
+	u16 next_to_alloc;
 
 	union {
 		/* TX */
@@ -251,12 +241,30 @@
 		};
 		/* RX */
 		struct {
+			struct sk_buff *skb;
 			struct igb_rx_queue_stats rx_stats;
 			struct u64_stats_sync rx_syncp;
 		};
 	};
-	/* Items past this point are only used during ring alloc / free */
-	dma_addr_t dma;                /* phys address of the ring */
+} ____cacheline_internodealigned_in_smp;
+
+struct igb_q_vector {
+	struct igb_adapter *adapter;	/* backlink */
+	int cpu;			/* CPU for DCA */
+	u32 eims_value;			/* EIMS mask value */
+
+	u16 itr_val;
+	u8 set_itr;
+	void __iomem *itr_register;
+
+	struct igb_ring_container rx, tx;
+
+	struct napi_struct napi;
+	struct rcu_head rcu;	/* to avoid race with update stats on free */
+	char name[IFNAMSIZ + 9];
+
+	/* for dynamic allocation of rings associated with this q_vector */
+	struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
 };
 
 enum e1000_ring_flags_t {
@@ -362,8 +370,6 @@
 	u32 eims_other;
 
 	/* to not mess up cache alignment, always add to the bottom */
-	u32 eeprom_wol;
-
 	u16 tx_ring_count;
 	u16 rx_ring_count;
 	unsigned int vfs_allocated_count;
@@ -373,7 +379,6 @@
 	u32 wvbr;
 	u32 *shadow_vfta;
 
-#ifdef CONFIG_IGB_PTP
 	struct ptp_clock *ptp_clock;
 	struct ptp_clock_info ptp_caps;
 	struct delayed_work ptp_overflow_work;
@@ -382,17 +387,19 @@
 	spinlock_t tmreg_lock;
 	struct cyclecounter cc;
 	struct timecounter tc;
-#endif /* CONFIG_IGB_PTP */
 
 	char fw_version[32];
 };
 
-#define IGB_FLAG_HAS_MSI           (1 << 0)
-#define IGB_FLAG_DCA_ENABLED       (1 << 1)
-#define IGB_FLAG_QUAD_PORT_A       (1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS       (1 << 3)
-#define IGB_FLAG_DMAC              (1 << 4)
-#define IGB_FLAG_PTP               (1 << 5)
+#define IGB_FLAG_HAS_MSI		(1 << 0)
+#define IGB_FLAG_DCA_ENABLED		(1 << 1)
+#define IGB_FLAG_QUAD_PORT_A		(1 << 2)
+#define IGB_FLAG_QUEUE_PAIRS		(1 << 3)
+#define IGB_FLAG_DMAC			(1 << 4)
+#define IGB_FLAG_PTP			(1 << 5)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP	(1 << 6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP	(1 << 7)
+#define IGB_FLAG_WOL_SUPPORTED		(1 << 8)
 
 /* DMA Coalescing defines */
 #define IGB_MIN_TXPBSIZE           20408
@@ -436,18 +443,27 @@
 extern void igb_set_ethtool_ops(struct net_device *);
 extern void igb_power_up_link(struct igb_adapter *);
 extern void igb_set_fw_version(struct igb_adapter *);
-#ifdef CONFIG_IGB_PTP
 extern void igb_ptp_init(struct igb_adapter *adapter);
 extern void igb_ptp_stop(struct igb_adapter *adapter);
 extern void igb_ptp_reset(struct igb_adapter *adapter);
 extern void igb_ptp_tx_work(struct work_struct *work);
 extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
-				union e1000_adv_rx_desc *rx_desc,
+extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
 				struct sk_buff *skb);
+extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+				unsigned char *va,
+				struct sk_buff *skb);
+static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
+				       union e1000_adv_rx_desc *rx_desc,
+				       struct sk_buff *skb)
+{
+	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
+	    !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
+		igb_ptp_rx_rgtstamp(q_vector, skb);
+}
+
 extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
 				  struct ifreq *ifr, int cmd);
-#endif /* CONFIG_IGB_PTP */
 
 static inline s32 igb_reset_phy(struct e1000_hw *hw)
 {
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 2ea0128..e2288b5 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -37,6 +37,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
+#include <linux/highmem.h>
 
 #include "igb.h"
 
@@ -1685,16 +1686,24 @@
 	memset(&skb->data[frame_size + 12], 0xAF, 1);
 }
 
-static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
+				  unsigned int frame_size)
 {
-	frame_size /= 2;
-	if (*(skb->data + 3) == 0xFF) {
-		if ((*(skb->data + frame_size + 10) == 0xBE) &&
-		   (*(skb->data + frame_size + 12) == 0xAF)) {
-			return 0;
-		}
-	}
-	return 13;
+	unsigned char *data;
+	bool match = true;
+
+	frame_size >>= 1;
+
+	data = kmap(rx_buffer->page);
+
+	if (data[3] != 0xFF ||
+	    data[frame_size + 10] != 0xBE ||
+	    data[frame_size + 12] != 0xAF)
+		match = false;
+
+	kunmap(rx_buffer->page);
+
+	return match;
 }
 
 static int igb_clean_test_rings(struct igb_ring *rx_ring,
@@ -1704,9 +1713,7 @@
 	union e1000_adv_rx_desc *rx_desc;
 	struct igb_rx_buffer *rx_buffer_info;
 	struct igb_tx_buffer *tx_buffer_info;
-	struct netdev_queue *txq;
 	u16 rx_ntc, tx_ntc, count = 0;
-	unsigned int total_bytes = 0, total_packets = 0;
 
 	/* initialize next to clean and descriptor values */
 	rx_ntc = rx_ring->next_to_clean;
@@ -1717,21 +1724,24 @@
 		/* check rx buffer */
 		rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
 
-		/* unmap rx buffer, will be remapped by alloc_rx_buffers */
-		dma_unmap_single(rx_ring->dev,
-				 rx_buffer_info->dma,
-				 IGB_RX_HDR_LEN,
-				 DMA_FROM_DEVICE);
-		rx_buffer_info->dma = 0;
+		/* sync Rx buffer for CPU read */
+		dma_sync_single_for_cpu(rx_ring->dev,
+					rx_buffer_info->dma,
+					IGB_RX_BUFSZ,
+					DMA_FROM_DEVICE);
 
 		/* verify contents of skb */
-		if (!igb_check_lbtest_frame(rx_buffer_info->skb, size))
+		if (igb_check_lbtest_frame(rx_buffer_info, size))
 			count++;
 
+		/* sync Rx buffer for device write */
+		dma_sync_single_for_device(rx_ring->dev,
+					   rx_buffer_info->dma,
+					   IGB_RX_BUFSZ,
+					   DMA_FROM_DEVICE);
+
 		/* unmap buffer on tx side */
 		tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
-		total_bytes += tx_buffer_info->bytecount;
-		total_packets += tx_buffer_info->gso_segs;
 		igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
 
 		/* increment rx/tx next to clean counters */
@@ -1746,8 +1756,7 @@
 		rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
 	}
 
-	txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
-	netdev_tx_completed_queue(txq, total_packets, total_bytes);
+	netdev_tx_reset_queue(txring_txq(tx_ring));
 
 	/* re-map buffers to ring, store next to clean values */
 	igb_alloc_rx_buffers(rx_ring, count);
@@ -1957,54 +1966,6 @@
 	msleep_interruptible(4 * 1000);
 }
 
-static int igb_wol_exclusion(struct igb_adapter *adapter,
-			     struct ethtool_wolinfo *wol)
-{
-	struct e1000_hw *hw = &adapter->hw;
-	int retval = 1; /* fail by default */
-
-	switch (hw->device_id) {
-	case E1000_DEV_ID_82575GB_QUAD_COPPER:
-		/* WoL not supported */
-		wol->supported = 0;
-		break;
-	case E1000_DEV_ID_82575EB_FIBER_SERDES:
-	case E1000_DEV_ID_82576_FIBER:
-	case E1000_DEV_ID_82576_SERDES:
-		/* Wake events not supported on port B */
-		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
-			wol->supported = 0;
-			break;
-		}
-		/* return success for non excluded adapter ports */
-		retval = 0;
-		break;
-	case E1000_DEV_ID_82576_QUAD_COPPER:
-	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
-		/* quad port adapters only support WoL on port A */
-		if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
-			wol->supported = 0;
-			break;
-		}
-		/* return success for non excluded adapter ports */
-		retval = 0;
-		break;
-	default:
-		/* dual port cards only support WoL on port A from now on
-		 * unless it was enabled in the eeprom for port B
-		 * so exclude FUNC_1 ports from having WoL enabled */
-		if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
-		    !adapter->eeprom_wol) {
-			wol->supported = 0;
-			break;
-		}
-
-		retval = 0;
-	}
-
-	return retval;
-}
-
 static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2014,10 +1975,7 @@
 	                 WAKE_PHY;
 	wol->wolopts = 0;
 
-	/* this function will set ->supported = 0 and return 1 if wol is not
-	 * supported by this hardware */
-	if (igb_wol_exclusion(adapter, wol) ||
-	    !device_can_wakeup(&adapter->pdev->dev))
+	if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
 		return;
 
 	/* apply any specific unsupported masks here */
@@ -2045,8 +2003,7 @@
 	if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
 		return -EOPNOTSUPP;
 
-	if (igb_wol_exclusion(adapter, wol) ||
-	    !device_can_wakeup(&adapter->pdev->dev))
+	if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
 		return wol->wolopts ? -EOPNOTSUPP : 0;
 
 	/* these settings will always override what we currently have */
@@ -2301,7 +2258,6 @@
 	struct igb_adapter *adapter = netdev_priv(dev);
 
 	switch (adapter->hw.mac.type) {
-#ifdef CONFIG_IGB_PTP
 	case e1000_82576:
 	case e1000_82580:
 	case e1000_i350:
@@ -2337,12 +2293,288 @@
 				(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 
 		return 0;
-#endif /* CONFIG_IGB_PTP */
 	default:
 		return -EOPNOTSUPP;
 	}
 }
 
+static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
+				 struct ethtool_rxnfc *cmd)
+{
+	cmd->data = 0;
+
+	/* Report default options for RSS on igb */
+	switch (cmd->flow_type) {
+	case TCP_V4_FLOW:
+		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+	case UDP_V4_FLOW:
+		if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+	case SCTP_V4_FLOW:
+	case AH_ESP_V4_FLOW:
+	case AH_V4_FLOW:
+	case ESP_V4_FLOW:
+	case IPV4_FLOW:
+		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+		break;
+	case TCP_V6_FLOW:
+		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+	case UDP_V6_FLOW:
+		if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+	case SCTP_V6_FLOW:
+	case AH_ESP_V6_FLOW:
+	case AH_V6_FLOW:
+	case ESP_V6_FLOW:
+	case IPV6_FLOW:
+		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+			   u32 *rule_locs)
+{
+	struct igb_adapter *adapter = netdev_priv(dev);
+	int ret = -EOPNOTSUPP;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_GRXRINGS:
+		cmd->data = adapter->num_rx_queues;
+		ret = 0;
+		break;
+	case ETHTOOL_GRXFH:
+		ret = igb_get_rss_hash_opts(adapter, cmd);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
+		       IGB_FLAG_RSS_FIELD_IPV6_UDP)
+static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
+				struct ethtool_rxnfc *nfc)
+{
+	u32 flags = adapter->flags;
+
+	/* RSS does not support anything other than hashing
+	 * to queues on src and dst IPs and ports
+	 */
+	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
+		return -EINVAL;
+
+	switch (nfc->flow_type) {
+	case TCP_V4_FLOW:
+	case TCP_V6_FLOW:
+		if (!(nfc->data & RXH_IP_SRC) ||
+		    !(nfc->data & RXH_IP_DST) ||
+		    !(nfc->data & RXH_L4_B_0_1) ||
+		    !(nfc->data & RXH_L4_B_2_3))
+			return -EINVAL;
+		break;
+	case UDP_V4_FLOW:
+		if (!(nfc->data & RXH_IP_SRC) ||
+		    !(nfc->data & RXH_IP_DST))
+			return -EINVAL;
+		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+		case 0:
+			flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
+			break;
+		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+			flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case UDP_V6_FLOW:
+		if (!(nfc->data & RXH_IP_SRC) ||
+		    !(nfc->data & RXH_IP_DST))
+			return -EINVAL;
+		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+		case 0:
+			flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
+			break;
+		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+			flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case AH_ESP_V4_FLOW:
+	case AH_V4_FLOW:
+	case ESP_V4_FLOW:
+	case SCTP_V4_FLOW:
+	case AH_ESP_V6_FLOW:
+	case AH_V6_FLOW:
+	case ESP_V6_FLOW:
+	case SCTP_V6_FLOW:
+		if (!(nfc->data & RXH_IP_SRC) ||
+		    !(nfc->data & RXH_IP_DST) ||
+		    (nfc->data & RXH_L4_B_0_1) ||
+		    (nfc->data & RXH_L4_B_2_3))
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* if we changed something we need to update flags */
+	if (flags != adapter->flags) {
+		struct e1000_hw *hw = &adapter->hw;
+		u32 mrqc = rd32(E1000_MRQC);
+
+		if ((flags & UDP_RSS_FLAGS) &&
+		    !(adapter->flags & UDP_RSS_FLAGS))
+			dev_err(&adapter->pdev->dev,
+				"enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+
+		adapter->flags = flags;
+
+		/* Perform hash on these packet types */
+		mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
+			E1000_MRQC_RSS_FIELD_IPV4_TCP |
+			E1000_MRQC_RSS_FIELD_IPV6 |
+			E1000_MRQC_RSS_FIELD_IPV6_TCP;
+
+		mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
+			  E1000_MRQC_RSS_FIELD_IPV6_UDP);
+
+		if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+			mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+
+		if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+			mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
+		wr32(E1000_MRQC, mrqc);
+	}
+
+	return 0;
+}
+
+static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+	struct igb_adapter *adapter = netdev_priv(dev);
+	int ret = -EOPNOTSUPP;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_SRXFH:
+		ret = igb_set_rss_hash_opt(adapter, cmd);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ipcnfg, eeer;
+
+	if ((hw->mac.type < e1000_i350) ||
+	    (hw->phy.media_type != e1000_media_type_copper))
+		return -EOPNOTSUPP;
+
+	edata->supported = (SUPPORTED_1000baseT_Full |
+			    SUPPORTED_100baseT_Full);
+
+	ipcnfg = rd32(E1000_IPCNFG);
+	eeer = rd32(E1000_EEER);
+
+	/* EEE status on negotiated link */
+	if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
+		edata->advertised = ADVERTISED_1000baseT_Full;
+
+	if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
+		edata->advertised |= ADVERTISED_100baseT_Full;
+
+	if (eeer & E1000_EEER_EEE_NEG)
+		edata->eee_active = true;
+
+	edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
+
+	if (eeer & E1000_EEER_TX_LPI_EN)
+		edata->tx_lpi_enabled = true;
+
+	/* Report correct negotiated EEE status for devices that
+	 * wrongly report EEE at half-duplex
+	 */
+	if (adapter->link_duplex == HALF_DUPLEX) {
+		edata->eee_enabled = false;
+		edata->eee_active = false;
+		edata->tx_lpi_enabled = false;
+		edata->advertised &= ~edata->advertised;
+	}
+
+	return 0;
+}
+
+static int igb_set_eee(struct net_device *netdev,
+		       struct ethtool_eee *edata)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct ethtool_eee eee_curr;
+	s32 ret_val;
+
+	if ((hw->mac.type < e1000_i350) ||
+	    (hw->phy.media_type != e1000_media_type_copper))
+		return -EOPNOTSUPP;
+
+	ret_val = igb_get_eee(netdev, &eee_curr);
+	if (ret_val)
+		return ret_val;
+
+	if (eee_curr.eee_enabled) {
+		if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+			dev_err(&adapter->pdev->dev,
+				"Setting EEE tx-lpi is not supported\n");
+			return -EINVAL;
+		}
+
+		/* Tx LPI timer is not implemented currently */
+		if (edata->tx_lpi_timer) {
+			dev_err(&adapter->pdev->dev,
+				"Setting EEE Tx LPI timer is not supported\n");
+			return -EINVAL;
+		}
+
+		if (eee_curr.advertised != edata->advertised) {
+			dev_err(&adapter->pdev->dev,
+				"Setting EEE Advertisement is not supported\n");
+			return -EINVAL;
+		}
+
+	} else if (!edata->eee_enabled) {
+		dev_err(&adapter->pdev->dev,
+			"Setting EEE options are not supported with EEE disabled\n");
+			return -EINVAL;
+		}
+
+	if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
+		hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
+		igb_set_eee_i350(hw);
+
+		/* reset link */
+		if (!netif_running(netdev))
+			igb_reset(adapter);
+	}
+
+	return 0;
+}
+
 static int igb_ethtool_begin(struct net_device *netdev)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2383,6 +2615,10 @@
 	.get_coalesce           = igb_get_coalesce,
 	.set_coalesce           = igb_set_coalesce,
 	.get_ts_info            = igb_get_ts_info,
+	.get_rxnfc		= igb_get_rxnfc,
+	.set_rxnfc		= igb_set_rxnfc,
+	.get_eee		= igb_get_eee,
+	.set_eee		= igb_set_eee,
 	.begin			= igb_ethtool_begin,
 	.complete		= igb_ethtool_complete,
 };
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e1ceb37..b85b15a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -61,7 +61,7 @@
 
 #define MAJ 4
 #define MIN 0
-#define BUILD 1
+#define BUILD 17
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 __stringify(BUILD) "-k"
 char igb_driver_name[] = "igb";
@@ -534,31 +534,27 @@
 
 			if (staterr & E1000_RXD_STAT_DD) {
 				/* Descriptor Done */
-				pr_info("%s[0x%03X]     %016llX %016llX -------"
-					"--------- %p%s\n", "RWB", i,
+				pr_info("%s[0x%03X]     %016llX %016llX ---------------- %s\n",
+					"RWB", i,
 					le64_to_cpu(u0->a),
 					le64_to_cpu(u0->b),
-					buffer_info->skb, next_desc);
+					next_desc);
 			} else {
-				pr_info("%s[0x%03X]     %016llX %016llX %016llX"
-					" %p%s\n", "R  ", i,
+				pr_info("%s[0x%03X]     %016llX %016llX %016llX %s\n",
+					"R  ", i,
 					le64_to_cpu(u0->a),
 					le64_to_cpu(u0->b),
 					(u64)buffer_info->dma,
-					buffer_info->skb, next_desc);
+					next_desc);
 
 				if (netif_msg_pktdata(adapter) &&
-				    buffer_info->dma && buffer_info->skb) {
-					print_hex_dump(KERN_INFO, "",
-						  DUMP_PREFIX_ADDRESS,
-						  16, 1, buffer_info->skb->data,
-						  IGB_RX_HDR_LEN, true);
+				    buffer_info->dma && buffer_info->page) {
 					print_hex_dump(KERN_INFO, "",
 					  DUMP_PREFIX_ADDRESS,
 					  16, 1,
 					  page_address(buffer_info->page) +
 						      buffer_info->page_offset,
-					  PAGE_SIZE/2, true);
+					  IGB_RX_BUFSZ, true);
 				}
 			}
 		}
@@ -656,80 +652,6 @@
 	}
 }
 
-static void igb_free_queues(struct igb_adapter *adapter)
-{
-	int i;
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		kfree(adapter->tx_ring[i]);
-		adapter->tx_ring[i] = NULL;
-	}
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		kfree(adapter->rx_ring[i]);
-		adapter->rx_ring[i] = NULL;
-	}
-	adapter->num_rx_queues = 0;
-	adapter->num_tx_queues = 0;
-}
-
-/**
- * igb_alloc_queues - Allocate memory for all rings
- * @adapter: board private structure to initialize
- *
- * We allocate one ring per queue at run-time since we don't know the
- * number of queues at compile-time.
- **/
-static int igb_alloc_queues(struct igb_adapter *adapter)
-{
-	struct igb_ring *ring;
-	int i;
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
-		if (!ring)
-			goto err;
-		ring->count = adapter->tx_ring_count;
-		ring->queue_index = i;
-		ring->dev = &adapter->pdev->dev;
-		ring->netdev = adapter->netdev;
-		/* For 82575, context index must be unique per ring. */
-		if (adapter->hw.mac.type == e1000_82575)
-			set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
-		adapter->tx_ring[i] = ring;
-	}
-
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
-		if (!ring)
-			goto err;
-		ring->count = adapter->rx_ring_count;
-		ring->queue_index = i;
-		ring->dev = &adapter->pdev->dev;
-		ring->netdev = adapter->netdev;
-		/* set flag indicating ring supports SCTP checksum offload */
-		if (adapter->hw.mac.type >= e1000_82576)
-			set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
-
-		/*
-		 * On i350, i210, and i211, loopback VLAN packets
-		 * have the tag byte-swapped.
-		 * */
-		if (adapter->hw.mac.type >= e1000_i350)
-			set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
-
-		adapter->rx_ring[i] = ring;
-	}
-
-	igb_cache_ring_register(adapter);
-
-	return 0;
-
-err:
-	igb_free_queues(adapter);
-
-	return -ENOMEM;
-}
-
 /**
  *  igb_write_ivar - configure ivar for given MSI-X vector
  *  @hw: pointer to the HW structure
@@ -960,6 +882,35 @@
 }
 
 /**
+ * igb_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+	if (q_vector->tx.ring)
+		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+	if (q_vector->rx.ring)
+		adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
+
+	adapter->q_vector[v_idx] = NULL;
+	netif_napi_del(&q_vector->napi);
+
+	/*
+	 * ixgbe_get_stats64() might access the rings on this vector,
+	 * we must wait a grace period before freeing it.
+	 */
+	kfree_rcu(q_vector, rcu);
+}
+
+/**
  * igb_free_q_vectors - Free memory allocated for interrupt vectors
  * @adapter: board private structure to initialize
  *
@@ -969,17 +920,14 @@
  **/
 static void igb_free_q_vectors(struct igb_adapter *adapter)
 {
-	int v_idx;
+	int v_idx = adapter->num_q_vectors;
 
-	for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
-		struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
-		adapter->q_vector[v_idx] = NULL;
-		if (!q_vector)
-			continue;
-		netif_napi_del(&q_vector->napi);
-		kfree(q_vector);
-	}
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
 	adapter->num_q_vectors = 0;
+
+	while (v_idx--)
+		igb_free_q_vector(adapter, v_idx);
 }
 
 /**
@@ -990,7 +938,6 @@
  */
 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
 {
-	igb_free_queues(adapter);
 	igb_free_q_vectors(adapter);
 	igb_reset_interrupt_capability(adapter);
 }
@@ -1001,7 +948,7 @@
  * Attempt to configure interrupts using the best available
  * capabilities of the hardware and kernel.
  **/
-static int igb_set_interrupt_capability(struct igb_adapter *adapter)
+static void igb_set_interrupt_capability(struct igb_adapter *adapter)
 {
 	int err;
 	int numvecs, i;
@@ -1038,7 +985,7 @@
 			      adapter->msix_entries,
 			      numvecs);
 	if (err == 0)
-		goto out;
+		return;
 
 	igb_reset_interrupt_capability(adapter);
 
@@ -1068,16 +1015,127 @@
 	adapter->num_q_vectors = 1;
 	if (!pci_enable_msi(adapter->pdev))
 		adapter->flags |= IGB_FLAG_HAS_MSI;
-out:
-	/* Notify the stack of the (possibly) reduced queue counts. */
-	rtnl_lock();
-	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
-	err = netif_set_real_num_rx_queues(adapter->netdev,
-		adapter->num_rx_queues);
-	rtnl_unlock();
-	return err;
 }
 
+static void igb_add_ring(struct igb_ring *ring,
+			 struct igb_ring_container *head)
+{
+	head->ring = ring;
+	head->count++;
+}
+
+/**
+ * igb_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
+ *
+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int igb_alloc_q_vector(struct igb_adapter *adapter,
+			      int v_count, int v_idx,
+			      int txr_count, int txr_idx,
+			      int rxr_count, int rxr_idx)
+{
+	struct igb_q_vector *q_vector;
+	struct igb_ring *ring;
+	int ring_count, size;
+
+	/* igb only supports 1 Tx and/or 1 Rx queue per vector */
+	if (txr_count > 1 || rxr_count > 1)
+		return -ENOMEM;
+
+	ring_count = txr_count + rxr_count;
+	size = sizeof(struct igb_q_vector) +
+	       (sizeof(struct igb_ring) * ring_count);
+
+	/* allocate q_vector and rings */
+	q_vector = kzalloc(size, GFP_KERNEL);
+	if (!q_vector)
+		return -ENOMEM;
+
+	/* initialize NAPI */
+	netif_napi_add(adapter->netdev, &q_vector->napi,
+		       igb_poll, 64);
+
+	/* tie q_vector and adapter together */
+	adapter->q_vector[v_idx] = q_vector;
+	q_vector->adapter = adapter;
+
+	/* initialize work limits */
+	q_vector->tx.work_limit = adapter->tx_work_limit;
+
+	/* initialize ITR configuration */
+	q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
+	q_vector->itr_val = IGB_START_ITR;
+
+	/* initialize pointer to rings */
+	ring = q_vector->ring;
+
+	if (txr_count) {
+		/* assign generic ring traits */
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Tx values */
+		igb_add_ring(ring, &q_vector->tx);
+
+		/* For 82575, context index must be unique per ring. */
+		if (adapter->hw.mac.type == e1000_82575)
+			set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
+
+		/* apply Tx specific ring traits */
+		ring->count = adapter->tx_ring_count;
+		ring->queue_index = txr_idx;
+
+		/* assign ring to adapter */
+		adapter->tx_ring[txr_idx] = ring;
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	if (rxr_count) {
+		/* assign generic ring traits */
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Rx values */
+		igb_add_ring(ring, &q_vector->rx);
+
+		/* set flag indicating ring supports SCTP checksum offload */
+		if (adapter->hw.mac.type >= e1000_82576)
+			set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
+
+		/*
+		 * On i350, i210, and i211, loopback VLAN packets
+		 * have the tag byte-swapped.
+		 * */
+		if (adapter->hw.mac.type >= e1000_i350)
+			set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
+
+		/* apply Rx specific ring traits */
+		ring->count = adapter->rx_ring_count;
+		ring->queue_index = rxr_idx;
+
+		/* assign ring to adapter */
+		adapter->rx_ring[rxr_idx] = ring;
+	}
+
+	return 0;
+}
+
+
 /**
  * igb_alloc_q_vectors - Allocate memory for interrupt vectors
  * @adapter: board private structure to initialize
@@ -1087,88 +1145,55 @@
  **/
 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
 {
-	struct igb_q_vector *q_vector;
-	struct e1000_hw *hw = &adapter->hw;
-	int v_idx;
+	int q_vectors = adapter->num_q_vectors;
+	int rxr_remaining = adapter->num_rx_queues;
+	int txr_remaining = adapter->num_tx_queues;
+	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+	int err;
 
-	for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
-		q_vector = kzalloc(sizeof(struct igb_q_vector),
-				   GFP_KERNEL);
-		if (!q_vector)
+	if (q_vectors >= (rxr_remaining + txr_remaining)) {
+		for (; rxr_remaining; v_idx++) {
+			err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+						 0, 0, 1, rxr_idx);
+
+			if (err)
+				goto err_out;
+
+			/* update counts and index */
+			rxr_remaining--;
+			rxr_idx++;
+		}
+	}
+
+	for (; v_idx < q_vectors; v_idx++) {
+		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+		err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+					 tqpv, txr_idx, rqpv, rxr_idx);
+
+		if (err)
 			goto err_out;
-		q_vector->adapter = adapter;
-		q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
-		q_vector->itr_val = IGB_START_ITR;
-		netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
-		adapter->q_vector[v_idx] = q_vector;
+
+		/* update counts and index */
+		rxr_remaining -= rqpv;
+		txr_remaining -= tqpv;
+		rxr_idx++;
+		txr_idx++;
 	}
 
 	return 0;
 
 err_out:
-	igb_free_q_vectors(adapter);
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	while (v_idx--)
+		igb_free_q_vector(adapter, v_idx);
+
 	return -ENOMEM;
 }
 
-static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
-                                      int ring_idx, int v_idx)
-{
-	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
-
-	q_vector->rx.ring = adapter->rx_ring[ring_idx];
-	q_vector->rx.ring->q_vector = q_vector;
-	q_vector->rx.count++;
-	q_vector->itr_val = adapter->rx_itr_setting;
-	if (q_vector->itr_val && q_vector->itr_val <= 3)
-		q_vector->itr_val = IGB_START_ITR;
-}
-
-static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
-                                      int ring_idx, int v_idx)
-{
-	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
-
-	q_vector->tx.ring = adapter->tx_ring[ring_idx];
-	q_vector->tx.ring->q_vector = q_vector;
-	q_vector->tx.count++;
-	q_vector->itr_val = adapter->tx_itr_setting;
-	q_vector->tx.work_limit = adapter->tx_work_limit;
-	if (q_vector->itr_val && q_vector->itr_val <= 3)
-		q_vector->itr_val = IGB_START_ITR;
-}
-
-/**
- * igb_map_ring_to_vector - maps allocated queues to vectors
- *
- * This function maps the recently allocated queues to vectors.
- **/
-static int igb_map_ring_to_vector(struct igb_adapter *adapter)
-{
-	int i;
-	int v_idx = 0;
-
-	if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
-	    (adapter->num_q_vectors < adapter->num_tx_queues))
-		return -ENOMEM;
-
-	if (adapter->num_q_vectors >=
-	    (adapter->num_rx_queues + adapter->num_tx_queues)) {
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			igb_map_rx_ring_to_vector(adapter, i, v_idx++);
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			igb_map_tx_ring_to_vector(adapter, i, v_idx++);
-	} else {
-		for (i = 0; i < adapter->num_rx_queues; i++) {
-			if (i < adapter->num_tx_queues)
-				igb_map_tx_ring_to_vector(adapter, i, v_idx);
-			igb_map_rx_ring_to_vector(adapter, i, v_idx++);
-		}
-		for (; i < adapter->num_tx_queues; i++)
-			igb_map_tx_ring_to_vector(adapter, i, v_idx++);
-	}
-	return 0;
-}
-
 /**
  * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
  *
@@ -1179,9 +1204,7 @@
 	struct pci_dev *pdev = adapter->pdev;
 	int err;
 
-	err = igb_set_interrupt_capability(adapter);
-	if (err)
-		return err;
+	igb_set_interrupt_capability(adapter);
 
 	err = igb_alloc_q_vectors(adapter);
 	if (err) {
@@ -1189,24 +1212,10 @@
 		goto err_alloc_q_vectors;
 	}
 
-	err = igb_alloc_queues(adapter);
-	if (err) {
-		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
-		goto err_alloc_queues;
-	}
-
-	err = igb_map_ring_to_vector(adapter);
-	if (err) {
-		dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
-		goto err_map_queues;
-	}
-
+	igb_cache_ring_register(adapter);
 
 	return 0;
-err_map_queues:
-	igb_free_queues(adapter);
-err_alloc_queues:
-	igb_free_q_vectors(adapter);
+
 err_alloc_q_vectors:
 	igb_reset_interrupt_capability(adapter);
 	return err;
@@ -1229,11 +1238,11 @@
 		if (!err)
 			goto request_done;
 		/* fall back to MSI */
+		igb_free_all_tx_resources(adapter);
+		igb_free_all_rx_resources(adapter);
 		igb_clear_interrupt_scheme(adapter);
 		if (!pci_enable_msi(pdev))
 			adapter->flags |= IGB_FLAG_HAS_MSI;
-		igb_free_all_tx_resources(adapter);
-		igb_free_all_rx_resources(adapter);
 		adapter->num_tx_queues = 1;
 		adapter->num_rx_queues = 1;
 		adapter->num_q_vectors = 1;
@@ -1243,13 +1252,6 @@
 			        "Unable to allocate memory for vectors\n");
 			goto request_done;
 		}
-		err = igb_alloc_queues(adapter);
-		if (err) {
-			dev_err(&pdev->dev,
-			        "Unable to allocate memory for queues\n");
-			igb_free_q_vectors(adapter);
-			goto request_done;
-		}
 		igb_setup_all_tx_resources(adapter);
 		igb_setup_all_rx_resources(adapter);
 	}
@@ -1587,8 +1589,7 @@
 	struct e1000_hw *hw = &adapter->hw;
 	struct e1000_mac_info *mac = &hw->mac;
 	struct e1000_fc_info *fc = &hw->fc;
-	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
-	u16 hwm;
+	u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
 
 	/* Repartition Pba for greater than 9k mtu
 	 * To take effect CTRL.RST is required.
@@ -1663,7 +1664,7 @@
 	hwm = min(((pba << 10) * 9 / 10),
 			((pba << 10) - 2 * adapter->max_frame_size));
 
-	fc->high_water = hwm & 0xFFF0;	/* 16-byte granularity */
+	fc->high_water = hwm & 0xFFFFFFF0;	/* 16-byte granularity */
 	fc->low_water = fc->high_water - 16;
 	fc->pause_time = 0xFFFF;
 	fc->send_xon = 1;
@@ -1706,10 +1707,8 @@
 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
 
-#ifdef CONFIG_IGB_PTP
 	/* Re-enable PTP, where applicable. */
 	igb_ptp_reset(adapter);
-#endif /* CONFIG_IGB_PTP */
 
 	igb_get_phy_info(hw);
 }
@@ -1783,58 +1782,34 @@
 void igb_set_fw_version(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
-	u16 major, build, patch, fw_version;
-	u32 etrack_id;
+	struct e1000_fw_version fw;
 
-	hw->nvm.ops.read(hw, 5, 1, &fw_version);
-	if (adapter->hw.mac.type != e1000_i211) {
-		hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
-		hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
-		etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
+	igb_get_fw_version(hw, &fw);
 
-		/* combo image version needs to be found */
-		hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
-		if ((comb_offset != 0x0) &&
-		    (comb_offset != IGB_NVM_VER_INVALID)) {
-			hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
-					 + 1), 1, &comb_verh);
-			hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
-					 1, &comb_verl);
+	switch (hw->mac.type) {
+	case e1000_i211:
+		snprintf(adapter->fw_version, sizeof(adapter->fw_version),
+			 "%2d.%2d-%d",
+			 fw.invm_major, fw.invm_minor, fw.invm_img_type);
+		break;
 
-			/* Only display Option Rom if it exists and is valid */
-			if ((comb_verh && comb_verl) &&
-			    ((comb_verh != IGB_NVM_VER_INVALID) &&
-			     (comb_verl != IGB_NVM_VER_INVALID))) {
-				major = comb_verl >> IGB_COMB_VER_SHFT;
-				build = (comb_verl << IGB_COMB_VER_SHFT) |
-					(comb_verh >> IGB_COMB_VER_SHFT);
-				patch = comb_verh & IGB_COMB_VER_MASK;
-				snprintf(adapter->fw_version,
-					 sizeof(adapter->fw_version),
-					 "%d.%d%d, 0x%08x, %d.%d.%d",
-					 (fw_version & IGB_MAJOR_MASK) >>
-					 IGB_MAJOR_SHIFT,
-					 (fw_version & IGB_MINOR_MASK) >>
-					 IGB_MINOR_SHIFT,
-					 (fw_version & IGB_BUILD_MASK),
-					 etrack_id, major, build, patch);
-				goto out;
-			}
+	default:
+		/* if option is rom valid, display its version too */
+		if (fw.or_valid) {
+			snprintf(adapter->fw_version,
+				 sizeof(adapter->fw_version),
+				 "%d.%d, 0x%08x, %d.%d.%d",
+				 fw.eep_major, fw.eep_minor, fw.etrack_id,
+				 fw.or_major, fw.or_build, fw.or_patch);
+		/* no option rom */
+		} else {
+			snprintf(adapter->fw_version,
+				 sizeof(adapter->fw_version),
+				 "%d.%d, 0x%08x",
+				 fw.eep_major, fw.eep_minor, fw.etrack_id);
 		}
-		snprintf(adapter->fw_version, sizeof(adapter->fw_version),
-			 "%d.%d%d, 0x%08x",
-			 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
-			 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
-			 (fw_version & IGB_BUILD_MASK), etrack_id);
-	} else {
-		snprintf(adapter->fw_version, sizeof(adapter->fw_version),
-			 "%d.%d%d",
-			 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
-			 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
-			 (fw_version & IGB_BUILD_MASK));
+		break;
 	}
-out:
 	return;
 }
 
@@ -1861,7 +1836,6 @@
 	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
 	unsigned long mmio_start, mmio_len;
 	int err, pci_using_dac;
-	u16 eeprom_apme_mask = IGB_EEPROM_APME;
 	u8 part_str[E1000_PBANUM_LENGTH];
 
 	/* Catch broken hardware that put the wrong VF device ID in
@@ -2069,28 +2043,27 @@
 
 	igb_validate_mdi_setting(hw);
 
-	/* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
-	 * enable the ACPI Magic Packet filter
-	 */
-
+	/* By default, support wake on port A */
 	if (hw->bus.func == 0)
-		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
-	else if (hw->mac.type >= e1000_82580)
+		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+	/* Check the NVM for wake support on non-port A ports */
+	if (hw->mac.type >= e1000_82580)
 		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
 		                 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
 		                 &eeprom_data);
 	else if (hw->bus.func == 1)
 		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
 
-	if (eeprom_data & eeprom_apme_mask)
-		adapter->eeprom_wol |= E1000_WUFC_MAG;
+	if (eeprom_data & IGB_EEPROM_APME)
+		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
 
 	/* now that we have the eeprom settings, apply the special cases where
 	 * the eeprom may be wrong or the board simply won't support wake on
 	 * lan on a particular port */
 	switch (pdev->device) {
 	case E1000_DEV_ID_82575GB_QUAD_COPPER:
-		adapter->eeprom_wol = 0;
+		adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
 		break;
 	case E1000_DEV_ID_82575EB_FIBER_SERDES:
 	case E1000_DEV_ID_82576_FIBER:
@@ -2098,24 +2071,38 @@
 		/* Wake events only supported on port A for dual fiber
 		 * regardless of eeprom setting */
 		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
-			adapter->eeprom_wol = 0;
+			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
 		break;
 	case E1000_DEV_ID_82576_QUAD_COPPER:
 	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
 		/* if quad port adapter, disable WoL on all but port A */
 		if (global_quad_port_a != 0)
-			adapter->eeprom_wol = 0;
+			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
 		else
 			adapter->flags |= IGB_FLAG_QUAD_PORT_A;
 		/* Reset for multiple quad port adapters */
 		if (++global_quad_port_a == 4)
 			global_quad_port_a = 0;
 		break;
+	default:
+		/* If the device can't wake, don't set software support */
+		if (!device_can_wakeup(&adapter->pdev->dev))
+			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
 	}
 
 	/* initialize the wol settings based on the eeprom settings */
-	adapter->wol = adapter->eeprom_wol;
-	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+	if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
+		adapter->wol |= E1000_WUFC_MAG;
+
+	/* Some vendors want WoL disabled by default, but still supported */
+	if ((hw->mac.type == e1000_i350) &&
+	    (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+		adapter->wol = 0;
+	}
+
+	device_set_wakeup_enable(&adapter->pdev->dev,
+				 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
 
 	/* reset the hardware with the new settings */
 	igb_reset(adapter);
@@ -2141,10 +2128,8 @@
 
 #endif
 
-#ifdef CONFIG_IGB_PTP
 	/* do hw tstamp init after resetting */
 	igb_ptp_init(adapter);
-#endif /* CONFIG_IGB_PTP */
 
 	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
 	/* print bus type/speed/width info */
@@ -2219,9 +2204,7 @@
 	struct e1000_hw *hw = &adapter->hw;
 
 	pm_runtime_get_noresume(&pdev->dev);
-#ifdef CONFIG_IGB_PTP
 	igb_ptp_stop(adapter);
-#endif /* CONFIG_IGB_PTP */
 
 	/*
 	 * The watchdog timer may be rescheduled, so explicitly
@@ -2531,6 +2514,17 @@
 	if (err)
 		goto err_req_irq;
 
+	/* Notify the stack of the actual queue counts. */
+	err = netif_set_real_num_tx_queues(adapter->netdev,
+					   adapter->num_tx_queues);
+	if (err)
+		goto err_set_queues;
+
+	err = netif_set_real_num_rx_queues(adapter->netdev,
+					   adapter->num_rx_queues);
+	if (err)
+		goto err_set_queues;
+
 	/* From here on the code is the same as igb_up() */
 	clear_bit(__IGB_DOWN, &adapter->state);
 
@@ -2560,6 +2554,8 @@
 
 	return 0;
 
+err_set_queues:
+	igb_free_irq(adapter);
 err_req_irq:
 	igb_release_hw_control(adapter);
 	igb_power_down_link(adapter);
@@ -2637,10 +2633,8 @@
 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
 	tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-	tx_ring->desc = dma_alloc_coherent(dev,
-					   tx_ring->size,
-					   &tx_ring->dma,
-					   GFP_KERNEL);
+	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+					   &tx_ring->dma, GFP_KERNEL);
 	if (!tx_ring->desc)
 		goto err;
 
@@ -2777,18 +2771,16 @@
 	if (!rx_ring->rx_buffer_info)
 		goto err;
 
-
 	/* Round up to nearest 4K */
 	rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
 	rx_ring->size = ALIGN(rx_ring->size, 4096);
 
-	rx_ring->desc = dma_alloc_coherent(dev,
-					   rx_ring->size,
-					   &rx_ring->dma,
-					   GFP_KERNEL);
+	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+					   &rx_ring->dma, GFP_KERNEL);
 	if (!rx_ring->desc)
 		goto err;
 
+	rx_ring->next_to_alloc = 0;
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 
@@ -2893,18 +2885,21 @@
 
 	/* Don't need to set TUOFL or IPOFL, they default to 1 */
 	wr32(E1000_RXCSUM, rxcsum);
-	/*
-	 * Generate RSS hash based on TCP port numbers and/or
-	 * IPv4/v6 src and dst addresses since UDP cannot be
-	 * hashed reliably due to IP fragmentation
-	 */
 
+	/* Generate RSS hash based on packet types, TCP/UDP
+	 * port numbers and/or IPv4/v6 src and dst addresses
+	 */
 	mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
 	       E1000_MRQC_RSS_FIELD_IPV4_TCP |
 	       E1000_MRQC_RSS_FIELD_IPV6 |
 	       E1000_MRQC_RSS_FIELD_IPV6_TCP |
 	       E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
 
+	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
 	/* If VMDq is enabled then we set the appropriate mode for that, else
 	 * we default to RSS so that an RSS hash is calculated per packet even
 	 * if we are only using one queue */
@@ -3106,16 +3101,10 @@
 
 	/* set descriptor configuration */
 	srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
-	srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#else
-	srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#endif
-	srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-#ifdef CONFIG_IGB_PTP
+	srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+	srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
 	if (hw->mac.type >= e1000_82580)
 		srrctl |= E1000_SRRCTL_TIMESTAMP;
-#endif /* CONFIG_IGB_PTP */
 	/* Only set Drop Enable if we are supporting multiple queues */
 	if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
 		srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3305,36 +3294,27 @@
 	unsigned long size;
 	u16 i;
 
+	if (rx_ring->skb)
+		dev_kfree_skb(rx_ring->skb);
+	rx_ring->skb = NULL;
+
 	if (!rx_ring->rx_buffer_info)
 		return;
 
 	/* Free all the Rx ring sk_buffs */
 	for (i = 0; i < rx_ring->count; i++) {
 		struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
-		if (buffer_info->dma) {
-			dma_unmap_single(rx_ring->dev,
-			                 buffer_info->dma,
-					 IGB_RX_HDR_LEN,
-					 DMA_FROM_DEVICE);
-			buffer_info->dma = 0;
-		}
 
-		if (buffer_info->skb) {
-			dev_kfree_skb(buffer_info->skb);
-			buffer_info->skb = NULL;
-		}
-		if (buffer_info->page_dma) {
-			dma_unmap_page(rx_ring->dev,
-			               buffer_info->page_dma,
-				       PAGE_SIZE / 2,
-				       DMA_FROM_DEVICE);
-			buffer_info->page_dma = 0;
-		}
-		if (buffer_info->page) {
-			put_page(buffer_info->page);
-			buffer_info->page = NULL;
-			buffer_info->page_offset = 0;
-		}
+		if (!buffer_info->page)
+			continue;
+
+		dma_unmap_page(rx_ring->dev,
+			       buffer_info->dma,
+			       PAGE_SIZE,
+			       DMA_FROM_DEVICE);
+		__free_page(buffer_info->page);
+
+		buffer_info->page = NULL;
 	}
 
 	size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -3343,6 +3323,7 @@
 	/* Zero out the descriptor ring */
 	memset(rx_ring->desc, 0, rx_ring->size);
 
+	rx_ring->next_to_alloc = 0;
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 }
@@ -4028,6 +4009,9 @@
 	u32 vlan_macip_lens, type_tucmd;
 	u32 mss_l4len_idx, l4len;
 
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
 	if (!skb_is_gso(skb))
 		return 0;
 
@@ -4148,26 +4132,32 @@
 	igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
 }
 
-static __le32 igb_tx_cmd_type(u32 tx_flags)
+#define IGB_SET_FLAG(_input, _flag, _result) \
+	((_flag <= _result) ? \
+	 ((u32)(_input & _flag) * (_result / _flag)) : \
+	 ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
 {
 	/* set type for advanced descriptor with frame checksum insertion */
-	__le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
-				      E1000_ADVTXD_DCMD_IFCS |
-				      E1000_ADVTXD_DCMD_DEXT);
+	u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
+		       E1000_ADVTXD_DCMD_DEXT |
+		       E1000_ADVTXD_DCMD_IFCS;
 
 	/* set HW vlan bit if vlan is present */
-	if (tx_flags & IGB_TX_FLAGS_VLAN)
-		cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
-
-#ifdef CONFIG_IGB_PTP
-	/* set timestamp bit if present */
-	if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
-		cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
-#endif /* CONFIG_IGB_PTP */
+	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
+				 (E1000_ADVTXD_DCMD_VLE));
 
 	/* set segmentation bits for TSO */
-	if (tx_flags & IGB_TX_FLAGS_TSO)
-		cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
+	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
+				 (E1000_ADVTXD_DCMD_TSE));
+
+	/* set timestamp bit if present */
+	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
+				 (E1000_ADVTXD_MAC_TSTAMP));
+
+	/* insert frame checksum */
+	cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
 
 	return cmd_type;
 }
@@ -4178,19 +4168,19 @@
 {
 	u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
 
-	/* 82575 requires a unique index per ring if any offload is enabled */
-	if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
-	    test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+	/* 82575 requires a unique index per ring */
+	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
 		olinfo_status |= tx_ring->reg_idx << 4;
 
 	/* insert L4 checksum */
-	if (tx_flags & IGB_TX_FLAGS_CSUM) {
-		olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+	olinfo_status |= IGB_SET_FLAG(tx_flags,
+				      IGB_TX_FLAGS_CSUM,
+				      (E1000_TXD_POPTS_TXSM << 8));
 
-		/* insert IPv4 checksum */
-		if (tx_flags & IGB_TX_FLAGS_IPV4)
-			olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
-	}
+	/* insert IPv4 checksum */
+	olinfo_status |= IGB_SET_FLAG(tx_flags,
+				      IGB_TX_FLAGS_IPV4,
+				      (E1000_TXD_POPTS_IXSM << 8));
 
 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
 }
@@ -4209,33 +4199,37 @@
 	struct sk_buff *skb = first->skb;
 	struct igb_tx_buffer *tx_buffer;
 	union e1000_adv_tx_desc *tx_desc;
+	struct skb_frag_struct *frag;
 	dma_addr_t dma;
-	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-	unsigned int data_len = skb->data_len;
-	unsigned int size = skb_headlen(skb);
-	unsigned int paylen = skb->len - hdr_len;
-	__le32 cmd_type;
+	unsigned int data_len, size;
 	u32 tx_flags = first->tx_flags;
+	u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
 	u16 i = tx_ring->next_to_use;
 
 	tx_desc = IGB_TX_DESC(tx_ring, i);
 
-	igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
-	cmd_type = igb_tx_cmd_type(tx_flags);
+	igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
+
+	size = skb_headlen(skb);
+	data_len = skb->data_len;
 
 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
-	if (dma_mapping_error(tx_ring->dev, dma))
-		goto dma_error;
 
-	/* record length, and DMA address */
-	dma_unmap_len_set(first, len, size);
-	dma_unmap_addr_set(first, dma, dma);
-	tx_desc->read.buffer_addr = cpu_to_le64(dma);
+	tx_buffer = first;
 
-	for (;;) {
+	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_error;
+
+		/* record length, and DMA address */
+		dma_unmap_len_set(tx_buffer, len, size);
+		dma_unmap_addr_set(tx_buffer, dma, dma);
+
+		tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
 		while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
 			tx_desc->read.cmd_type_len =
-				cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
+				cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
 
 			i++;
 			tx_desc++;
@@ -4243,18 +4237,18 @@
 				tx_desc = IGB_TX_DESC(tx_ring, 0);
 				i = 0;
 			}
+			tx_desc->read.olinfo_status = 0;
 
 			dma += IGB_MAX_DATA_PER_TXD;
 			size -= IGB_MAX_DATA_PER_TXD;
 
-			tx_desc->read.olinfo_status = 0;
 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
 		}
 
 		if (likely(!data_len))
 			break;
 
-		tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
 
 		i++;
 		tx_desc++;
@@ -4262,32 +4256,22 @@
 			tx_desc = IGB_TX_DESC(tx_ring, 0);
 			i = 0;
 		}
+		tx_desc->read.olinfo_status = 0;
 
 		size = skb_frag_size(frag);
 		data_len -= size;
 
 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
-				   size, DMA_TO_DEVICE);
-		if (dma_mapping_error(tx_ring->dev, dma))
-			goto dma_error;
+				       size, DMA_TO_DEVICE);
 
 		tx_buffer = &tx_ring->tx_buffer_info[i];
-		dma_unmap_len_set(tx_buffer, len, size);
-		dma_unmap_addr_set(tx_buffer, dma, dma);
-
-		tx_desc->read.olinfo_status = 0;
-		tx_desc->read.buffer_addr = cpu_to_le64(dma);
-
-		frag++;
 	}
 
-	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
-
 	/* write last descriptor with RS and EOP bits */
-	cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
-	if (unlikely(skb->no_fcs))
-		cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
-	tx_desc->read.cmd_type_len = cmd_type;
+	cmd_type |= size | IGB_TXD_DCMD;
+	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
 
 	/* set the timestamp */
 	first->time_stamp = jiffies;
@@ -4372,9 +4356,7 @@
 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
 				struct igb_ring *tx_ring)
 {
-#ifdef CONFIG_IGB_PTP
 	struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
-#endif /* CONFIG_IGB_PTP */
 	struct igb_tx_buffer *first;
 	int tso;
 	u32 tx_flags = 0;
@@ -4397,7 +4379,6 @@
 	first->bytecount = skb->len;
 	first->gso_segs = 1;
 
-#ifdef CONFIG_IGB_PTP
 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
 		     !(adapter->ptp_tx_skb))) {
 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -4407,7 +4388,6 @@
 		if (adapter->hw.mac.type == e1000_82576)
 			schedule_work(&adapter->ptp_tx_work);
 	}
-#endif /* CONFIG_IGB_PTP */
 
 	if (vlan_tx_tag_present(skb)) {
 		tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4467,10 +4447,11 @@
 	 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
 	 * in order to meet this minimum size requirement.
 	 */
-	if (skb->len < 17) {
-		if (skb_padto(skb, 17))
+	if (unlikely(skb->len < 17)) {
+		if (skb_pad(skb, 17 - skb->len))
 			return NETDEV_TX_OK;
 		skb->len = 17;
+		skb_set_tail_pointer(skb, 17);
 	}
 
 	return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
@@ -4800,7 +4781,6 @@
 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 
-#ifdef CONFIG_IGB_PTP
 	if (icr & E1000_ICR_TS) {
 		u32 tsicr = rd32(E1000_TSICR);
 
@@ -4811,7 +4791,6 @@
 			schedule_work(&adapter->ptp_tx_work);
 		}
 	}
-#endif /* CONFIG_IGB_PTP */
 
 	wr32(E1000_EIMS, adapter->eims_other);
 
@@ -4851,45 +4830,63 @@
 }
 
 #ifdef CONFIG_IGB_DCA
+static void igb_update_tx_dca(struct igb_adapter *adapter,
+			      struct igb_ring *tx_ring,
+			      int cpu)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+
+	if (hw->mac.type != e1000_82575)
+		txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
+
+	/*
+	 * We can enable relaxed ordering for reads, but not writes when
+	 * DCA is enabled.  This is due to a known issue in some chipsets
+	 * which will cause the DCA tag to be cleared.
+	 */
+	txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
+		  E1000_DCA_TXCTRL_DATA_RRO_EN |
+		  E1000_DCA_TXCTRL_DESC_DCA_EN;
+
+	wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
+}
+
+static void igb_update_rx_dca(struct igb_adapter *adapter,
+			      struct igb_ring *rx_ring,
+			      int cpu)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
+
+	if (hw->mac.type != e1000_82575)
+		rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
+
+	/*
+	 * We can enable relaxed ordering for reads, but not writes when
+	 * DCA is enabled.  This is due to a known issue in some chipsets
+	 * which will cause the DCA tag to be cleared.
+	 */
+	rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
+		  E1000_DCA_RXCTRL_DESC_DCA_EN;
+
+	wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
+}
+
 static void igb_update_dca(struct igb_q_vector *q_vector)
 {
 	struct igb_adapter *adapter = q_vector->adapter;
-	struct e1000_hw *hw = &adapter->hw;
 	int cpu = get_cpu();
 
 	if (q_vector->cpu == cpu)
 		goto out_no_update;
 
-	if (q_vector->tx.ring) {
-		int q = q_vector->tx.ring->reg_idx;
-		u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
-		if (hw->mac.type == e1000_82575) {
-			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
-			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-		} else {
-			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
-			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
-			              E1000_DCA_TXCTRL_CPUID_SHIFT;
-		}
-		dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
-		wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
-	}
-	if (q_vector->rx.ring) {
-		int q = q_vector->rx.ring->reg_idx;
-		u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
-		if (hw->mac.type == e1000_82575) {
-			dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
-			dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-		} else {
-			dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
-			dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
-			              E1000_DCA_RXCTRL_CPUID_SHIFT;
-		}
-		dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
-		dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
-		dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
-		wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
-	}
+	if (q_vector->tx.ring)
+		igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
+
+	if (q_vector->rx.ring)
+		igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
+
 	q_vector->cpu = cpu;
 out_no_update:
 	put_cpu();
@@ -5545,7 +5542,6 @@
 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 
-#ifdef CONFIG_IGB_PTP
 	if (icr & E1000_ICR_TS) {
 		u32 tsicr = rd32(E1000_TSICR);
 
@@ -5556,7 +5552,6 @@
 			schedule_work(&adapter->ptp_tx_work);
 		}
 	}
-#endif /* CONFIG_IGB_PTP */
 
 	napi_schedule(&q_vector->napi);
 
@@ -5599,7 +5594,6 @@
 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 
-#ifdef CONFIG_IGB_PTP
 	if (icr & E1000_ICR_TS) {
 		u32 tsicr = rd32(E1000_TSICR);
 
@@ -5610,7 +5604,6 @@
 			schedule_work(&adapter->ptp_tx_work);
 		}
 	}
-#endif /* CONFIG_IGB_PTP */
 
 	napi_schedule(&q_vector->napi);
 
@@ -5840,6 +5833,181 @@
 	return !!budget;
 }
 
+/**
+ * igb_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void igb_reuse_rx_page(struct igb_ring *rx_ring,
+			      struct igb_rx_buffer *old_buff)
+{
+	struct igb_rx_buffer *new_buff;
+	u16 nta = rx_ring->next_to_alloc;
+
+	new_buff = &rx_ring->rx_buffer_info[nta];
+
+	/* update, and store next to alloc */
+	nta++;
+	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+	/* transfer page from old buffer to new buffer */
+	memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+
+	/* sync the buffer for use by the device */
+	dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
+					 old_buff->page_offset,
+					 IGB_RX_BUFSZ,
+					 DMA_FROM_DEVICE);
+}
+
+/**
+ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool igb_add_rx_frag(struct igb_ring *rx_ring,
+			    struct igb_rx_buffer *rx_buffer,
+			    union e1000_adv_rx_desc *rx_desc,
+			    struct sk_buff *skb)
+{
+	struct page *page = rx_buffer->page;
+	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+
+	if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
+		unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+		if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+			igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+			va += IGB_TS_HDR_LEN;
+			size -= IGB_TS_HDR_LEN;
+		}
+
+		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+		/* we can reuse buffer as-is, just make sure it is local */
+		if (likely(page_to_nid(page) == numa_node_id()))
+			return true;
+
+		/* this page cannot be reused so discard it */
+		put_page(page);
+		return false;
+	}
+
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+			rx_buffer->page_offset, size, IGB_RX_BUFSZ);
+
+	/* avoid re-using remote pages */
+	if (unlikely(page_to_nid(page) != numa_node_id()))
+		return false;
+
+#if (PAGE_SIZE < 8192)
+	/* if we are only owner of page we can reuse it */
+	if (unlikely(page_count(page) != 1))
+		return false;
+
+	/* flip page offset to other buffer */
+	rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+	/*
+	 * since we are the only owner of the page and we need to
+	 * increment it, just set the value to 2 in order to avoid
+	 * an unnecessary locked operation
+	 */
+	atomic_set(&page->_count, 2);
+#else
+	/* move offset up to the next cache line */
+	rx_buffer->page_offset += SKB_DATA_ALIGN(size);
+
+	if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+		return false;
+
+	/* bump ref count on page before it is given to the stack */
+	get_page(page);
+#endif
+
+	return true;
+}
+
+static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
+					   union e1000_adv_rx_desc *rx_desc,
+					   struct sk_buff *skb)
+{
+	struct igb_rx_buffer *rx_buffer;
+	struct page *page;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+
+	/*
+	 * This memory barrier is needed to keep us from reading
+	 * any other fields out of the rx_desc until we know the
+	 * RXD_STAT_DD bit is set
+	 */
+	rmb();
+
+	page = rx_buffer->page;
+	prefetchw(page);
+
+	if (likely(!skb)) {
+		void *page_addr = page_address(page) +
+				  rx_buffer->page_offset;
+
+		/* prefetch first cache line of first page */
+		prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+		prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+		/* allocate a skb to store the frags */
+		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+						IGB_RX_HDR_LEN);
+		if (unlikely(!skb)) {
+			rx_ring->rx_stats.alloc_failed++;
+			return NULL;
+		}
+
+		/*
+		 * we will be copying header into skb->data in
+		 * pskb_may_pull so it is in our interest to prefetch
+		 * it now to avoid a possible cache miss
+		 */
+		prefetchw(skb->data);
+	}
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev,
+				      rx_buffer->dma,
+				      rx_buffer->page_offset,
+				      IGB_RX_BUFSZ,
+				      DMA_FROM_DEVICE);
+
+	/* pull page into skb */
+	if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+		/* hand second half of page back to the ring */
+		igb_reuse_rx_page(rx_ring, rx_buffer);
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+			       PAGE_SIZE, DMA_FROM_DEVICE);
+	}
+
+	/* clear contents of rx_buffer */
+	rx_buffer->page = NULL;
+
+	return skb;
+}
+
 static inline void igb_rx_checksum(struct igb_ring *ring,
 				   union e1000_adv_rx_desc *rx_desc,
 				   struct sk_buff *skb)
@@ -5889,147 +6057,341 @@
 		skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
 }
 
-static void igb_rx_vlan(struct igb_ring *ring,
-			union e1000_adv_rx_desc *rx_desc,
-			struct sk_buff *skb)
+/**
+ * igb_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool igb_is_non_eop(struct igb_ring *rx_ring,
+			   union e1000_adv_rx_desc *rx_desc)
 {
-	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+	u32 ntc = rx_ring->next_to_clean + 1;
+
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(IGB_RX_DESC(rx_ring, ntc));
+
+	if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
+		return false;
+
+	return true;
+}
+
+/**
+ * igb_get_headlen - determine size of header for LRO/GRO
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, and GRO offloads.  The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int igb_get_headlen(unsigned char *data,
+				    unsigned int max_len)
+{
+	union {
+		unsigned char *network;
+		/* l2 headers */
+		struct ethhdr *eth;
+		struct vlan_hdr *vlan;
+		/* l3 headers */
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+	} hdr;
+	__be16 protocol;
+	u8 nexthdr = 0;	/* default to not TCP */
+	u8 hlen;
+
+	/* this should never happen, but better safe than sorry */
+	if (max_len < ETH_HLEN)
+		return max_len;
+
+	/* initialize network frame pointer */
+	hdr.network = data;
+
+	/* set first protocol and move network header forward */
+	protocol = hdr.eth->h_proto;
+	hdr.network += ETH_HLEN;
+
+	/* handle any vlan tag if present */
+	if (protocol == __constant_htons(ETH_P_8021Q)) {
+		if ((hdr.network - data) > (max_len - VLAN_HLEN))
+			return max_len;
+
+		protocol = hdr.vlan->h_vlan_encapsulated_proto;
+		hdr.network += VLAN_HLEN;
+	}
+
+	/* handle L3 protocols */
+	if (protocol == __constant_htons(ETH_P_IP)) {
+		if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+			return max_len;
+
+		/* access ihl as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[0] & 0x0F) << 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct iphdr))
+			return hdr.network - data;
+
+		/* record next protocol if header is present */
+		if (!hdr.ipv4->frag_off)
+			nexthdr = hdr.ipv4->protocol;
+	} else if (protocol == __constant_htons(ETH_P_IPV6)) {
+		if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+			return max_len;
+
+		/* record next protocol */
+		nexthdr = hdr.ipv6->nexthdr;
+		hlen = sizeof(struct ipv6hdr);
+	} else {
+		return hdr.network - data;
+	}
+
+	/* relocate pointer to start of L4 header */
+	hdr.network += hlen;
+
+	/* finally sort out TCP */
+	if (nexthdr == IPPROTO_TCP) {
+		if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+			return max_len;
+
+		/* access doff as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[12] & 0xF0) >> 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct tcphdr))
+			return hdr.network - data;
+
+		hdr.network += hlen;
+	} else if (nexthdr == IPPROTO_UDP) {
+		if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+			return max_len;
+
+		hdr.network += sizeof(struct udphdr);
+	}
+
+	/*
+	 * If everything has gone correctly hdr.network should be the
+	 * data section of the packet and will be the end of the header.
+	 * If not then it probably represents the end of the last recognized
+	 * header.
+	 */
+	if ((hdr.network - data) < max_len)
+		return hdr.network - data;
+	else
+		return max_len;
+}
+
+/**
+ * igb_pull_tail - igb specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an igb specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void igb_pull_tail(struct igb_ring *rx_ring,
+			  union e1000_adv_rx_desc *rx_desc,
+			  struct sk_buff *skb)
+{
+	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+	unsigned char *va;
+	unsigned int pull_len;
+
+	/*
+	 * it is valid to use page_address instead of kmap since we are
+	 * working with pages allocated out of the lomem pool per
+	 * alloc_page(GFP_ATOMIC)
+	 */
+	va = skb_frag_address(frag);
+
+	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+		/* retrieve timestamp from buffer */
+		igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+
+		/* update pointers to remove timestamp header */
+		skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
+		frag->page_offset += IGB_TS_HDR_LEN;
+		skb->data_len -= IGB_TS_HDR_LEN;
+		skb->len -= IGB_TS_HDR_LEN;
+
+		/* move va to start of packet data */
+		va += IGB_TS_HDR_LEN;
+	}
+
+	/*
+	 * we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	skb_frag_size_sub(frag, pull_len);
+	frag->page_offset += pull_len;
+	skb->data_len -= pull_len;
+	skb->tail += pull_len;
+}
+
+/**
+ * igb_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool igb_cleanup_headers(struct igb_ring *rx_ring,
+				union e1000_adv_rx_desc *rx_desc,
+				struct sk_buff *skb)
+{
+
+	if (unlikely((igb_test_staterr(rx_desc,
+				       E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+		struct net_device *netdev = rx_ring->netdev;
+		if (!(netdev->features & NETIF_F_RXALL)) {
+			dev_kfree_skb_any(skb);
+			return true;
+		}
+	}
+
+	/* place header in linear portion of buffer */
+	if (skb_is_nonlinear(skb))
+		igb_pull_tail(rx_ring, rx_desc, skb);
+
+	/* if skb_pad returns an error the skb was freed */
+	if (unlikely(skb->len < 60)) {
+		int pad_len = 60 - skb->len;
+
+		if (skb_pad(skb, pad_len))
+			return true;
+		__skb_put(skb, pad_len);
+	}
+
+	return false;
+}
+
+/**
+ * igb_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ **/
+static void igb_process_skb_fields(struct igb_ring *rx_ring,
+				   union e1000_adv_rx_desc *rx_desc,
+				   struct sk_buff *skb)
+{
+	struct net_device *dev = rx_ring->netdev;
+
+	igb_rx_hash(rx_ring, rx_desc, skb);
+
+	igb_rx_checksum(rx_ring, rx_desc, skb);
+
+	igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
+
+	if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+	    igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
 		u16 vid;
 		if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
-		    test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
+		    test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
 			vid = be16_to_cpu(rx_desc->wb.upper.vlan);
 		else
 			vid = le16_to_cpu(rx_desc->wb.upper.vlan);
 
 		__vlan_hwaccel_put_tag(skb, vid);
 	}
+
+	skb_record_rx_queue(skb, rx_ring->queue_index);
+
+	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 }
 
-static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
-{
-	/* HW will not DMA in data larger than the given buffer, even if it
-	 * parses the (NFS, of course) header to be larger.  In that case, it
-	 * fills the header buffer and spills the rest into the page.
-	 */
-	u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
-	           E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
-	if (hlen > IGB_RX_HDR_LEN)
-		hlen = IGB_RX_HDR_LEN;
-	return hlen;
-}
-
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
 {
 	struct igb_ring *rx_ring = q_vector->rx.ring;
-	union e1000_adv_rx_desc *rx_desc;
-	const int current_node = numa_node_id();
+	struct sk_buff *skb = rx_ring->skb;
 	unsigned int total_bytes = 0, total_packets = 0;
 	u16 cleaned_count = igb_desc_unused(rx_ring);
-	u16 i = rx_ring->next_to_clean;
 
-	rx_desc = IGB_RX_DESC(rx_ring, i);
+	do {
+		union e1000_adv_rx_desc *rx_desc;
 
-	while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
-		struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
-		struct sk_buff *skb = buffer_info->skb;
-		union e1000_adv_rx_desc *next_rxd;
-
-		buffer_info->skb = NULL;
-		prefetch(skb->data);
-
-		i++;
-		if (i == rx_ring->count)
-			i = 0;
-
-		next_rxd = IGB_RX_DESC(rx_ring, i);
-		prefetch(next_rxd);
-
-		/*
-		 * This memory barrier is needed to keep us from reading
-		 * any other fields out of the rx_desc until we know the
-		 * RXD_STAT_DD bit is set
-		 */
-		rmb();
-
-		if (!skb_is_nonlinear(skb)) {
-			__skb_put(skb, igb_get_hlen(rx_desc));
-			dma_unmap_single(rx_ring->dev, buffer_info->dma,
-					 IGB_RX_HDR_LEN,
-					 DMA_FROM_DEVICE);
-			buffer_info->dma = 0;
-		}
-
-		if (rx_desc->wb.upper.length) {
-			u16 length = le16_to_cpu(rx_desc->wb.upper.length);
-
-			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-						buffer_info->page,
-						buffer_info->page_offset,
-						length);
-
-			skb->len += length;
-			skb->data_len += length;
-			skb->truesize += PAGE_SIZE / 2;
-
-			if ((page_count(buffer_info->page) != 1) ||
-			    (page_to_nid(buffer_info->page) != current_node))
-				buffer_info->page = NULL;
-			else
-				get_page(buffer_info->page);
-
-			dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
-				       PAGE_SIZE / 2, DMA_FROM_DEVICE);
-			buffer_info->page_dma = 0;
-		}
-
-		if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
-			struct igb_rx_buffer *next_buffer;
-			next_buffer = &rx_ring->rx_buffer_info[i];
-			buffer_info->skb = next_buffer->skb;
-			buffer_info->dma = next_buffer->dma;
-			next_buffer->skb = skb;
-			next_buffer->dma = 0;
-			goto next_desc;
-		}
-
-		if (unlikely((igb_test_staterr(rx_desc,
-					       E1000_RXDEXT_ERR_FRAME_ERR_MASK))
-			     && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
-			dev_kfree_skb_any(skb);
-			goto next_desc;
-		}
-
-#ifdef CONFIG_IGB_PTP
-		igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
-#endif /* CONFIG_IGB_PTP */
-		igb_rx_hash(rx_ring, rx_desc, skb);
-		igb_rx_checksum(rx_ring, rx_desc, skb);
-		igb_rx_vlan(rx_ring, rx_desc, skb);
-
-		total_bytes += skb->len;
-		total_packets++;
-
-		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
-		napi_gro_receive(&q_vector->napi, skb);
-
-		budget--;
-next_desc:
-		if (!budget)
-			break;
-
-		cleaned_count++;
 		/* return some buffers to hardware, one at a time is too slow */
 		if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
 			igb_alloc_rx_buffers(rx_ring, cleaned_count);
 			cleaned_count = 0;
 		}
 
-		/* use prefetched values */
-		rx_desc = next_rxd;
-	}
+		rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
 
-	rx_ring->next_to_clean = i;
+		if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+			break;
+
+		/* retrieve a buffer from the ring */
+		skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+
+		/* exit if we failed to retrieve a buffer */
+		if (!skb)
+			break;
+
+		cleaned_count++;
+
+		/* fetch next buffer in frame if non-eop */
+		if (igb_is_non_eop(rx_ring, rx_desc))
+			continue;
+
+		/* verify the packet layout is correct */
+		if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+			skb = NULL;
+			continue;
+		}
+
+		/* probably a little skewed due to removing CRC */
+		total_bytes += skb->len;
+
+		/* populate checksum, timestamp, VLAN, and protocol */
+		igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+		napi_gro_receive(&q_vector->napi, skb);
+
+		/* reset skb pointer */
+		skb = NULL;
+
+		/* update budget accounting */
+		total_packets++;
+	} while (likely(total_packets < budget));
+
+	/* place incomplete frames back on ring for completion */
+	rx_ring->skb = skb;
+
 	u64_stats_update_begin(&rx_ring->rx_syncp);
 	rx_ring->rx_stats.packets += total_packets;
 	rx_ring->rx_stats.bytes += total_bytes;
@@ -6040,73 +6402,44 @@
 	if (cleaned_count)
 		igb_alloc_rx_buffers(rx_ring, cleaned_count);
 
-	return !!budget;
-}
-
-static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
-				 struct igb_rx_buffer *bi)
-{
-	struct sk_buff *skb = bi->skb;
-	dma_addr_t dma = bi->dma;
-
-	if (dma)
-		return true;
-
-	if (likely(!skb)) {
-		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
-						IGB_RX_HDR_LEN);
-		bi->skb = skb;
-		if (!skb) {
-			rx_ring->rx_stats.alloc_failed++;
-			return false;
-		}
-
-		/* initialize skb for ring */
-		skb_record_rx_queue(skb, rx_ring->queue_index);
-	}
-
-	dma = dma_map_single(rx_ring->dev, skb->data,
-			     IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
-
-	if (dma_mapping_error(rx_ring->dev, dma)) {
-		rx_ring->rx_stats.alloc_failed++;
-		return false;
-	}
-
-	bi->dma = dma;
-	return true;
+	return (total_packets < budget);
 }
 
 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
 				  struct igb_rx_buffer *bi)
 {
 	struct page *page = bi->page;
-	dma_addr_t page_dma = bi->page_dma;
-	unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
+	dma_addr_t dma;
 
-	if (page_dma)
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page))
 		return true;
 
-	if (!page) {
-		page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
-		bi->page = page;
-		if (unlikely(!page)) {
-			rx_ring->rx_stats.alloc_failed++;
-			return false;
-		}
-	}
-
-	page_dma = dma_map_page(rx_ring->dev, page,
-				page_offset, PAGE_SIZE / 2,
-				DMA_FROM_DEVICE);
-
-	if (dma_mapping_error(rx_ring->dev, page_dma)) {
+	/* alloc new page for storage */
+	page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
+	if (unlikely(!page)) {
 		rx_ring->rx_stats.alloc_failed++;
 		return false;
 	}
 
-	bi->page_dma = page_dma;
-	bi->page_offset = page_offset;
+	/* map page for use */
+	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+
+	/*
+	 * if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_page(page);
+
+		rx_ring->rx_stats.alloc_failed++;
+		return false;
+	}
+
+	bi->dma = dma;
+	bi->page = page;
+	bi->page_offset = 0;
+
 	return true;
 }
 
@@ -6120,22 +6453,23 @@
 	struct igb_rx_buffer *bi;
 	u16 i = rx_ring->next_to_use;
 
+	/* nothing to do */
+	if (!cleaned_count)
+		return;
+
 	rx_desc = IGB_RX_DESC(rx_ring, i);
 	bi = &rx_ring->rx_buffer_info[i];
 	i -= rx_ring->count;
 
-	while (cleaned_count--) {
-		if (!igb_alloc_mapped_skb(rx_ring, bi))
-			break;
-
-		/* Refresh the desc even if buffer_addrs didn't change
-		 * because each write-back erases this info. */
-		rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-
+	do {
 		if (!igb_alloc_mapped_page(rx_ring, bi))
 			break;
 
-		rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+		/*
+		 * Refresh the desc even if buffer_addrs didn't change
+		 * because each write-back erases this info.
+		 */
+		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
 
 		rx_desc++;
 		bi++;
@@ -6148,17 +6482,25 @@
 
 		/* clear the hdr_addr for the next_to_use descriptor */
 		rx_desc->read.hdr_addr = 0;
-	}
+
+		cleaned_count--;
+	} while (cleaned_count);
 
 	i += rx_ring->count;
 
 	if (rx_ring->next_to_use != i) {
+		/* record the next descriptor to use */
 		rx_ring->next_to_use = i;
 
-		/* Force memory writes to complete before letting h/w
+		/* update next to alloc since we have filled the ring */
+		rx_ring->next_to_alloc = i;
+
+		/*
+		 * Force memory writes to complete before letting h/w
 		 * know there are new descriptors to fetch.  (Only
 		 * applicable for weak-ordered memory model archs,
-		 * such as IA-64). */
+		 * such as IA-64).
+		 */
 		wmb();
 		writel(i, rx_ring->tail);
 	}
@@ -6207,10 +6549,8 @@
 	case SIOCGMIIREG:
 	case SIOCSMIIREG:
 		return igb_mii_ioctl(netdev, ifr, cmd);
-#ifdef CONFIG_IGB_PTP
 	case SIOCSHWTSTAMP:
 		return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
-#endif /* CONFIG_IGB_PTP */
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -6492,7 +6832,9 @@
 	wr32(E1000_WUS, ~0);
 
 	if (netdev->flags & IFF_UP) {
+		rtnl_lock();
 		err = __igb_open(netdev, true);
+		rtnl_unlock();
 		if (err)
 			return err;
 	}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ee21445..ab34297 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -441,18 +441,46 @@
 	adapter->ptp_tx_skb = NULL;
 }
 
-void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
-			 union e1000_adv_rx_desc *rx_desc,
+/**
+ * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * @q_vector: Pointer to interrupt specific structure
+ * @va: Pointer to address containing Rx buffer
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the first buffer of an
+ * incoming frame.  The value is stored in little endian format starting on
+ * byte 8.
+ */
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+			 unsigned char *va,
+			 struct sk_buff *skb)
+{
+	__le64 *regval = (__le64 *)va;
+
+	/*
+	 * The timestamp is recorded in little endian format.
+	 * DWORD: 0        1        2        3
+	 * Field: Reserved Reserved SYSTIML  SYSTIMH
+	 */
+	igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
+				   le64_to_cpu(regval[1]));
+}
+
+/**
+ * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
+ * @q_vector: Pointer to interrupt specific structure
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the internal registers
+ * of the adapter and store it in the skb.
+ */
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
 			 struct sk_buff *skb)
 {
 	struct igb_adapter *adapter = q_vector->adapter;
 	struct e1000_hw *hw = &adapter->hw;
 	u64 regval;
 
-	if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
-				       E1000_RXDADV_STAT_TS))
-		return;
-
 	/*
 	 * If this bit is set, then the RX registers contain the time stamp. No
 	 * other packet will be time stamped until we read these registers, so
@@ -464,18 +492,11 @@
 	 * If nothing went wrong, then it should have a shared tx_flags that we
 	 * can turn into a skb_shared_hwtstamps.
 	 */
-	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-		u32 *stamp = (u32 *)skb->data;
-		regval = le32_to_cpu(*(stamp + 2));
-		regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
-		skb_pull(skb, IGB_TS_HDR_LEN);
-	} else {
-		if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
-			return;
+	if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+		return;
 
-		regval = rd32(E1000_RXSTMPL);
-		regval |= (u64)rd32(E1000_RXSTMPH) << 32;
-	}
+	regval = rd32(E1000_RXSTMPL);
+	regval |= (u64)rd32(E1000_RXSTMPH) << 32;
 
 	igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
 }
@@ -532,18 +553,6 @@
 	case HWTSTAMP_FILTER_NONE:
 		tsync_rx_ctl = 0;
 		break;
-	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
-	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
-	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
-	case HWTSTAMP_FILTER_ALL:
-		/*
-		 * register TSYNCRXCFG must be set, therefore it is not
-		 * possible to time stamp both Sync and Delay_Req messages
-		 * => fall back to time stamping all packets
-		 */
-		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
-		config.rx_filter = HWTSTAMP_FILTER_ALL;
-		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
 		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
@@ -554,31 +563,33 @@
 		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
 		is_l4 = true;
 		break;
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
-		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
-		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
-		is_l2 = true;
-		is_l4 = true;
-		config.rx_filter = HWTSTAMP_FILTER_SOME;
-		break;
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
-		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
-		is_l2 = true;
-		is_l4 = true;
-		config.rx_filter = HWTSTAMP_FILTER_SOME;
-		break;
-	case HWTSTAMP_FILTER_PTP_V2_EVENT:
-	case HWTSTAMP_FILTER_PTP_V2_SYNC:
-	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
 		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 		is_l2 = true;
 		is_l4 = true;
 		break;
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_ALL:
+		/* 82576 cannot timestamp all packets, which it needs to do to
+		 * support both V1 Sync and Delay_Req messages
+		 */
+		if (hw->mac.type != e1000_82576) {
+			tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+			config.rx_filter = HWTSTAMP_FILTER_ALL;
+			break;
+		}
+		/* fall through */
 	default:
+		config.rx_filter = HWTSTAMP_FILTER_NONE;
 		return -ERANGE;
 	}
 
@@ -596,6 +607,9 @@
 	if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
 		tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
 		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+		is_l2 = true;
+		is_l4 = true;
 
 		if ((hw->mac.type == e1000_i210) ||
 		    (hw->mac.type == e1000_i211)) {
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index 3e18045..d9fa999 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -46,6 +46,7 @@
 #define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
 
+#define E1000_RXDEXT_STATERR_LB    0x00040000
 #define E1000_RXDEXT_STATERR_CE    0x01000000
 #define E1000_RXDEXT_STATERR_SE    0x02000000
 #define E1000_RXDEXT_STATERR_SEQ   0x04000000
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index a895e2f..fdca7b6 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -295,7 +295,7 @@
 
 /* hardware capability, feature, and workaround flags */
 #define IGBVF_FLAG_RX_CSUM_DISABLED             (1 << 0)
-
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP		(1 << 1)
 #define IGBVF_RX_DESC_ADV(R, i)     \
 	(&((((R).desc))[i].rx_desc))
 #define IGBVF_TX_DESC_ADV(R, i)     \
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 0ac11f5..3d92ad8 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -47,7 +47,7 @@
 
 #include "igbvf.h"
 
-#define DRV_VERSION "2.0.1-k"
+#define DRV_VERSION "2.0.2-k"
 char igbvf_driver_name[] = "igbvf";
 const char igbvf_driver_version[] = DRV_VERSION;
 static const char igbvf_driver_string[] =
@@ -107,12 +107,19 @@
                               struct sk_buff *skb,
                               u32 status, u16 vlan)
 {
+	u16 vid;
+
 	if (status & E1000_RXD_STAT_VP) {
-		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+		if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
+		    (status & E1000_RXDEXT_STATERR_LB))
+			vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+		else
+			vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
 		if (test_bit(vid, adapter->active_vlans))
 			__vlan_hwaccel_put_tag(skb, vid);
 	}
-	netif_receive_skb(skb);
+
+	napi_gro_receive(&adapter->rx_ring->napi, skb);
 }
 
 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
@@ -184,6 +191,13 @@
 				             buffer_info->page_offset,
 				             PAGE_SIZE / 2,
 					     DMA_FROM_DEVICE);
+			if (dma_mapping_error(&pdev->dev,
+					      buffer_info->page_dma)) {
+				__free_page(buffer_info->page);
+				buffer_info->page = NULL;
+				dev_err(&pdev->dev, "RX DMA map failed\n");
+				break;
+			}
 		}
 
 		if (!buffer_info->skb) {
@@ -197,6 +211,12 @@
 			buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
 			                                  bufsz,
 							  DMA_FROM_DEVICE);
+			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+				dev_kfree_skb(buffer_info->skb);
+				buffer_info->skb = NULL;
+				dev_err(&pdev->dev, "RX DMA map failed\n");
+				goto no_buffers;
+			}
 		}
 		/* Refresh the desc even if buffer_addrs didn't change because
 		 * each write-back erases this info. */
@@ -2754,6 +2774,10 @@
 	/* reset the hardware with the new settings */
 	igbvf_reset(adapter);
 
+	/* set hardware-specific flags */
+	if (adapter->hw.mac.type == e1000_vfadapt_i350)
+		adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP;
+
 	strcpy(netdev->name, "eth%d");
 	err = register_netdev(netdev);
 	if (err)
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 89f40e5..f3a632b 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,11 +34,10 @@
 
 ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
               ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
-              ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
+              ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
 
 ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \
                               ixgbe_dcb_82599.o ixgbe_dcb_nl.o
 
-ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
 ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
 ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 30efc9f..8e78676 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -36,11 +36,9 @@
 #include <linux/aer.h>
 #include <linux/if_vlan.h>
 
-#ifdef CONFIG_IXGBE_PTP
 #include <linux/clocksource.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
-#endif /* CONFIG_IXGBE_PTP */
 
 #include "ixgbe_type.h"
 #include "ixgbe_common.h"
@@ -135,6 +133,7 @@
 	u16 tx_rate;
 	u16 vlan_count;
 	u8 spoofchk_enabled;
+	unsigned int vf_api;
 };
 
 struct vf_macvlans {
@@ -482,8 +481,9 @@
 #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT        (u32)(1 << 7)
 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP		(u32)(1 << 8)
 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP		(u32)(1 << 9)
-#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED	(u32)(1 << 10)
+#define IXGBE_FLAG2_PTP_ENABLED			(u32)(1 << 10)
 #define IXGBE_FLAG2_PTP_PPS_ENABLED		(u32)(1 << 11)
+#define IXGBE_FLAG2_BRIDGE_MODE_VEB		(u32)(1 << 12)
 
 	/* Tx fast path data */
 	int num_tx_queues;
@@ -571,7 +571,6 @@
 	u32 interrupt_event;
 	u32 led_reg;
 
-#ifdef CONFIG_IXGBE_PTP
 	struct ptp_clock *ptp_clock;
 	struct ptp_clock_info ptp_caps;
 	unsigned long last_overflow_check;
@@ -580,8 +579,6 @@
 	struct timecounter tc;
 	int rx_hwtstamp_filter;
 	u32 base_incval;
-	u32 cycle_speed;
-#endif /* CONFIG_IXGBE_PTP */
 
 	/* SR-IOV */
 	DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -600,6 +597,8 @@
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *ixgbe_dbg_adapter;
 #endif /*CONFIG_DEBUG_FS*/
+
+	u8 default_up;
 };
 
 struct ixgbe_fdir_filter {
@@ -691,6 +690,7 @@
 						 u16 soft_id);
 extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
 						 union ixgbe_atr_input *mask);
+extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
 extern void ixgbe_set_rx_mode(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
 extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
@@ -739,7 +739,6 @@
 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
 }
 
-#ifdef CONFIG_IXGBE_PTP
 extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
 extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
 extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
@@ -751,7 +750,7 @@
 extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
 				    struct ifreq *ifr, int cmd);
 extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
 extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
-#endif /* CONFIG_IXGBE_PTP */
 
 #endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 1077cb2..e75f5a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -62,7 +62,6 @@
                                          bool autoneg,
                                          bool autoneg_wait_to_complete);
 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
 
 static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
 {
@@ -99,9 +98,8 @@
 static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 {
 	s32 ret_val = 0;
-	u32 reg_anlp1 = 0;
-	u32 i = 0;
 	u16 list_offset, data_offset, data_value;
+	bool got_lock = false;
 
 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
 		ixgbe_init_mac_link_ops_82599(hw);
@@ -137,28 +135,36 @@
 		usleep_range(hw->eeprom.semaphore_delay * 1000,
 			     hw->eeprom.semaphore_delay * 2000);
 
-		/* Now restart DSP by setting Restart_AN and clearing LMS */
-		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
-		                IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
-		                IXGBE_AUTOC_AN_RESTART));
+		/* Need SW/FW semaphore around AUTOC writes if LESM on,
+		 * likewise reset_pipeline requires lock as it also writes
+		 * AUTOC.
+		 */
+		if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+							IXGBE_GSSR_MAC_CSR_SM);
+			if (ret_val)
+				goto setup_sfp_out;
 
-		/* Wait for AN to leave state 0 */
-		for (i = 0; i < 10; i++) {
-			usleep_range(4000, 8000);
-			reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-			if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
-				break;
+			got_lock = true;
 		}
-		if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
-			hw_dbg(hw, "sfp module setup not complete\n");
+
+		/* Restart DSP and set SFI mode */
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+				IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL));
+
+		ret_val = ixgbe_reset_pipeline_82599(hw);
+
+		if (got_lock) {
+			hw->mac.ops.release_swfw_sync(hw,
+						      IXGBE_GSSR_MAC_CSR_SM);
+			got_lock = false;
+		}
+
+		if (ret_val) {
+			hw_dbg(hw, " sfp module setup not complete\n");
 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
 			goto setup_sfp_out;
 		}
-
-		/* Restart DSP by setting Restart_AN and return to SFI mode */
-		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
-		                IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
-		                IXGBE_AUTOC_AN_RESTART));
 	}
 
 setup_sfp_out:
@@ -394,14 +400,26 @@
 	u32 links_reg;
 	u32 i;
 	s32 status = 0;
+	bool got_lock = false;
+
+	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+		status = hw->mac.ops.acquire_swfw_sync(hw,
+						IXGBE_GSSR_MAC_CSR_SM);
+		if (status)
+			goto out;
+
+		got_lock = true;
+	}
 
 	/* Restart link */
-	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+	ixgbe_reset_pipeline_82599(hw);
+
+	if (got_lock)
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
 
 	/* Only poll for autoneg to complete if specified to do so */
 	if (autoneg_wait_to_complete) {
+		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@@ -425,6 +443,7 @@
 	/* Add delay to filter out noises during initial link setup */
 	msleep(50);
 
+out:
 	return status;
 }
 
@@ -779,6 +798,7 @@
 	u32 links_reg;
 	u32 i;
 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+	bool got_lock = false;
 
 	/* Check to see if speed passed in is supported. */
 	status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -836,9 +856,26 @@
 	}
 
 	if (autoc != start_autoc) {
+		/* Need SW/FW semaphore around AUTOC writes if LESM is on,
+		 * likewise reset_pipeline requires us to hold this lock as
+		 * it also writes to AUTOC.
+		 */
+		if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+			status = hw->mac.ops.acquire_swfw_sync(hw,
+							IXGBE_GSSR_MAC_CSR_SM);
+			if (status != 0)
+				goto out;
+
+			got_lock = true;
+		}
+
 		/* Restart link */
-		autoc |= IXGBE_AUTOC_AN_RESTART;
 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+		ixgbe_reset_pipeline_82599(hw);
+
+		if (got_lock)
+			hw->mac.ops.release_swfw_sync(hw,
+						      IXGBE_GSSR_MAC_CSR_SM);
 
 		/* Only poll for autoneg to complete if specified to do so */
 		if (autoneg_wait_to_complete) {
@@ -994,9 +1031,28 @@
 		hw->mac.orig_autoc2 = autoc2;
 		hw->mac.orig_link_settings_stored = true;
 	} else {
-		if (autoc != hw->mac.orig_autoc)
-			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
-			                IXGBE_AUTOC_AN_RESTART));
+		if (autoc != hw->mac.orig_autoc) {
+			/* Need SW/FW semaphore around AUTOC writes if LESM is
+			 * on, likewise reset_pipeline requires us to hold
+			 * this lock as it also writes to AUTOC.
+			 */
+			bool got_lock = false;
+			if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+				status = hw->mac.ops.acquire_swfw_sync(hw,
+							IXGBE_GSSR_MAC_CSR_SM);
+				if (status)
+					goto reset_hw_out;
+
+				got_lock = true;
+			}
+
+			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+			ixgbe_reset_pipeline_82599(hw);
+
+			if (got_lock)
+				hw->mac.ops.release_swfw_sync(hw,
+							IXGBE_GSSR_MAC_CSR_SM);
+		}
 
 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
@@ -1983,7 +2039,7 @@
  *  Returns true if the LESM FW module is present and enabled. Otherwise
  *  returns false. Smart Speed must be disabled if LESM FW module is enabled.
  **/
-static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
 {
 	bool lesm_enabled = false;
 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2080,6 +2136,50 @@
 	return ret_val;
 }
 
+/**
+ * ixgbe_reset_pipeline_82599 - perform pipeline reset
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset.  Note - We must hold the SW/FW semaphore before writing
+ * to AUTOC, so this function assumes the semaphore is held.
+ **/
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+{
+	s32 i, autoc_reg, ret_val;
+	s32 anlp1_reg = 0;
+
+	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+
+	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+
+	/* Wait for AN to leave state 0 */
+	for (i = 0; i < 10; i++) {
+		usleep_range(4000, 8000);
+		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
+			break;
+	}
+
+	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+		hw_dbg(hw, "auto negotiation not completed\n");
+		ret_val = IXGBE_ERR_RESET_FAILED;
+		goto reset_pipeline_out;
+	}
+
+	ret_val = 0;
+
+reset_pipeline_out:
+	/* Write AUTOC register with original LMS field and Restart_AN */
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return ret_val;
+}
+
 static struct ixgbe_mac_operations mac_ops_82599 = {
 	.init_hw                = &ixgbe_init_hw_generic,
 	.reset_hw               = &ixgbe_reset_hw_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index dbf37e4..5af1eeb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -65,13 +65,12 @@
  *  function check the device id to see if the associated phy supports
  *  autoneg flow control.
  **/
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
 {
 
 	switch (hw->device_id) {
 	case IXGBE_DEV_ID_X540T:
 	case IXGBE_DEV_ID_X540T1:
-		return 0;
 	case IXGBE_DEV_ID_82599_T3_LOM:
 		return 0;
 	default:
@@ -90,6 +89,7 @@
 	s32 ret_val = 0;
 	u32 reg = 0, reg_bp = 0;
 	u16 reg_cu = 0;
+	bool got_lock = false;
 
 	/*
 	 * Validate the requested mode.  Strict IEEE mode does not allow
@@ -210,8 +210,29 @@
 	 *
 	 */
 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
-		reg_bp |= IXGBE_AUTOC_AN_RESTART;
+		/* Need the SW/FW semaphore around AUTOC writes if 82599 and
+		 * LESM is on, likewise reset_pipeline requries the lock as
+		 * it also writes AUTOC.
+		 */
+		if ((hw->mac.type == ixgbe_mac_82599EB) &&
+		    ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+							IXGBE_GSSR_MAC_CSR_SM);
+			if (ret_val)
+				goto out;
+
+			got_lock = true;
+		}
+
 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+
+		if (hw->mac.type == ixgbe_mac_82599EB)
+			ixgbe_reset_pipeline_82599(hw);
+
+		if (got_lock)
+			hw->mac.ops.release_swfw_sync(hw,
+						      IXGBE_GSSR_MAC_CSR_SM);
+
 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
 		    (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
 		hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
@@ -1778,8 +1799,7 @@
 	else if (IXGBE_IS_BROADCAST(mac_addr))
 		status = IXGBE_ERR_INVALID_MAC_ADDR;
 	/* Reject the zero address */
-	else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
-	         mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
+	else if (is_zero_ether_addr(mac_addr))
 		status = IXGBE_ERR_INVALID_MAC_ADDR;
 
 	return status;
@@ -2617,6 +2637,7 @@
 	bool link_up = false;
 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+	s32 ret_val = 0;
 
 	/*
 	 * Link must be up to auto-blink the LEDs;
@@ -2625,10 +2646,28 @@
 	hw->mac.ops.check_link(hw, &speed, &link_up, false);
 
 	if (!link_up) {
+		/* Need the SW/FW semaphore around AUTOC writes if 82599 and
+		 * LESM is on.
+		 */
+		bool got_lock = false;
+
+		if ((hw->mac.type == ixgbe_mac_82599EB) &&
+		    ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+							IXGBE_GSSR_MAC_CSR_SM);
+			if (ret_val)
+				goto out;
+
+			got_lock = true;
+		}
 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
 		autoc_reg |= IXGBE_AUTOC_FLU;
 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
 		IXGBE_WRITE_FLUSH(hw);
+
+		if (got_lock)
+			hw->mac.ops.release_swfw_sync(hw,
+						      IXGBE_GSSR_MAC_CSR_SM);
 		usleep_range(10000, 20000);
 	}
 
@@ -2637,7 +2676,8 @@
 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
 	IXGBE_WRITE_FLUSH(hw);
 
-	return 0;
+out:
+	return ret_val;
 }
 
 /**
@@ -2649,18 +2689,40 @@
 {
 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+	s32 ret_val = 0;
+	bool got_lock = false;
+
+	/* Need the SW/FW semaphore around AUTOC writes if 82599 and
+	 * LESM is on.
+	 */
+	if ((hw->mac.type == ixgbe_mac_82599EB) &&
+	    ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+						IXGBE_GSSR_MAC_CSR_SM);
+		if (ret_val)
+			goto out;
+
+		got_lock = true;
+	}
 
 	autoc_reg &= ~IXGBE_AUTOC_FLU;
 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
 
+	if (hw->mac.type == ixgbe_mac_82599EB)
+		ixgbe_reset_pipeline_82599(hw);
+
+	if (got_lock)
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
 	led_reg &= ~IXGBE_LED_BLINK(index);
 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
 	IXGBE_WRITE_FLUSH(hw);
 
-	return 0;
+out:
+	return ret_val;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d813d11..1b65b6c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -78,6 +78,7 @@
 s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
 void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
 
 s32 ixgbe_validate_mac_addr(u8 *mac_addr);
@@ -107,6 +108,7 @@
 
 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
 			     u32 headroom, int strategy);
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
 
 #define IXGBE_I2C_THERMAL_SENSOR_ADDR	0xF8
 #define IXGBE_EMC_INTERNAL_DATA		0x00
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 8d3a218..efaf9a7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -37,20 +37,6 @@
 static char ixgbe_dbg_reg_ops_buf[256] = "";
 
 /**
- * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened
- * @inode: inode that was opened
- * @filp:  file info
- *
- * Stash the adapter pointer hiding in the inode into the file pointer where
- * we can find it later in the read and write calls
- **/
-static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp)
-{
-	filp->private_data = inode->i_private;
-	return 0;
-}
-
-/**
  * ixgbe_dbg_reg_ops_read - read for reg_ops datum
  * @filp: the opened file
  * @buffer: where to write the data for the user to read
@@ -142,7 +128,7 @@
 
 static const struct file_operations ixgbe_dbg_reg_ops_fops = {
 	.owner = THIS_MODULE,
-	.open =  ixgbe_dbg_reg_ops_open,
+	.open = simple_open,
 	.read =  ixgbe_dbg_reg_ops_read,
 	.write = ixgbe_dbg_reg_ops_write,
 };
@@ -150,20 +136,6 @@
 static char ixgbe_dbg_netdev_ops_buf[256] = "";
 
 /**
- * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item
- * @inode: inode that was opened
- * @filp: file info
- *
- * Stash the adapter pointer hiding in the inode into the file pointer
- * where we can find it later in the read and write calls
- **/
-static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp)
-{
-	filp->private_data = inode->i_private;
-	return 0;
-}
-
-/**
  * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum
  * @filp: the opened file
  * @buffer: where to write the data for the user to read
@@ -238,7 +210,7 @@
 
 static const struct file_operations ixgbe_dbg_netdev_ops_fops = {
 	.owner = THIS_MODULE,
-	.open = ixgbe_dbg_netdev_ops_open,
+	.open = simple_open,
 	.read = ixgbe_dbg_netdev_ops_read,
 	.write = ixgbe_dbg_netdev_ops_write,
 };
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 116f0e9..3268584 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -383,6 +383,11 @@
 	    (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
 		return -EINVAL;
 
+	/* some devices do not support autoneg of link flow control */
+	if ((pause->autoneg == AUTONEG_ENABLE) &&
+	    (ixgbe_device_supports_autoneg_fc(hw) != 0))
+		return -EINVAL;
+
 	fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
 
 	if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
@@ -887,24 +892,23 @@
                                struct ethtool_ringparam *ring)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
+	struct ixgbe_ring *temp_ring;
 	int i, err = 0;
 	u32 new_rx_count, new_tx_count;
-	bool need_update = false;
 
 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 		return -EINVAL;
 
-	new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD);
-	new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD);
-	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
-
-	new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
-	new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
+	new_tx_count = clamp_t(u32, ring->tx_pending,
+			       IXGBE_MIN_TXD, IXGBE_MAX_TXD);
 	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
 
-	if ((new_tx_count == adapter->tx_ring[0]->count) &&
-	    (new_rx_count == adapter->rx_ring[0]->count)) {
+	new_rx_count = clamp_t(u32, ring->rx_pending,
+			       IXGBE_MIN_RXD, IXGBE_MAX_RXD);
+	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+	if ((new_tx_count == adapter->tx_ring_count) &&
+	    (new_rx_count == adapter->rx_ring_count)) {
 		/* nothing to do */
 		return 0;
 	}
@@ -922,81 +926,80 @@
 		goto clear_reset;
 	}
 
-	temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
-	if (!temp_tx_ring) {
+	/* allocate temporary buffer to store rings in */
+	i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+	temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
+
+	if (!temp_ring) {
 		err = -ENOMEM;
 		goto clear_reset;
 	}
 
+	ixgbe_down(adapter);
+
+	/*
+	 * Setup new Tx resources and free the old Tx resources in that order.
+	 * We can then assign the new resources to the rings via a memcpy.
+	 * The advantage to this approach is that we are guaranteed to still
+	 * have resources even in the case of an allocation failure.
+	 */
 	if (new_tx_count != adapter->tx_ring_count) {
 		for (i = 0; i < adapter->num_tx_queues; i++) {
-			memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
+			memcpy(&temp_ring[i], adapter->tx_ring[i],
 			       sizeof(struct ixgbe_ring));
-			temp_tx_ring[i].count = new_tx_count;
-			err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
+
+			temp_ring[i].count = new_tx_count;
+			err = ixgbe_setup_tx_resources(&temp_ring[i]);
 			if (err) {
 				while (i) {
 					i--;
-					ixgbe_free_tx_resources(&temp_tx_ring[i]);
-				}
-				goto clear_reset;
-			}
-		}
-		need_update = true;
-	}
-
-	temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
-	if (!temp_rx_ring) {
-		err = -ENOMEM;
-		goto err_setup;
-	}
-
-	if (new_rx_count != adapter->rx_ring_count) {
-		for (i = 0; i < adapter->num_rx_queues; i++) {
-			memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
-			       sizeof(struct ixgbe_ring));
-			temp_rx_ring[i].count = new_rx_count;
-			err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
-			if (err) {
-				while (i) {
-					i--;
-					ixgbe_free_rx_resources(&temp_rx_ring[i]);
+					ixgbe_free_tx_resources(&temp_ring[i]);
 				}
 				goto err_setup;
 			}
 		}
-		need_update = true;
-	}
 
-	/* if rings need to be updated, here's the place to do it in one shot */
-	if (need_update) {
-		ixgbe_down(adapter);
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			ixgbe_free_tx_resources(adapter->tx_ring[i]);
 
-		/* tx */
-		if (new_tx_count != adapter->tx_ring_count) {
-			for (i = 0; i < adapter->num_tx_queues; i++) {
-				ixgbe_free_tx_resources(adapter->tx_ring[i]);
-				memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
-				       sizeof(struct ixgbe_ring));
-			}
-			adapter->tx_ring_count = new_tx_count;
+			memcpy(adapter->tx_ring[i], &temp_ring[i],
+			       sizeof(struct ixgbe_ring));
 		}
 
-		/* rx */
-		if (new_rx_count != adapter->rx_ring_count) {
-			for (i = 0; i < adapter->num_rx_queues; i++) {
-				ixgbe_free_rx_resources(adapter->rx_ring[i]);
-				memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
-				       sizeof(struct ixgbe_ring));
-			}
-			adapter->rx_ring_count = new_rx_count;
-		}
-		ixgbe_up(adapter);
+		adapter->tx_ring_count = new_tx_count;
 	}
 
-	vfree(temp_rx_ring);
+	/* Repeat the process for the Rx rings if needed */
+	if (new_rx_count != adapter->rx_ring_count) {
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			memcpy(&temp_ring[i], adapter->rx_ring[i],
+			       sizeof(struct ixgbe_ring));
+
+			temp_ring[i].count = new_rx_count;
+			err = ixgbe_setup_rx_resources(&temp_ring[i]);
+			if (err) {
+				while (i) {
+					i--;
+					ixgbe_free_rx_resources(&temp_ring[i]);
+				}
+				goto err_setup;
+			}
+
+		}
+
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			ixgbe_free_rx_resources(adapter->rx_ring[i]);
+
+			memcpy(adapter->rx_ring[i], &temp_ring[i],
+			       sizeof(struct ixgbe_ring));
+		}
+
+		adapter->rx_ring_count = new_rx_count;
+	}
+
 err_setup:
-	vfree(temp_tx_ring);
+	ixgbe_up(adapter);
+	vfree(temp_ring);
 clear_reset:
 	clear_bit(__IXGBE_RESETTING, &adapter->state);
 	return err;
@@ -2669,7 +2672,6 @@
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 
 	switch (adapter->hw.mac.type) {
-#ifdef CONFIG_IXGBE_PTP
 	case ixgbe_mac_X540:
 	case ixgbe_mac_82599EB:
 		info->so_timestamping =
@@ -2695,7 +2697,6 @@
 			(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
 			(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 		break;
-#endif /* CONFIG_IXGBE_PTP */
 	default:
 		return ethtool_op_get_ts_info(dev, info);
 		break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index ae73ef1..252850d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -800,6 +800,10 @@
 		return -EINVAL;
 
 	e_info(drv, "Enabling FCoE offload features.\n");
+
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+		e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
+
 	if (netif_running(netdev))
 		netdev->netdev_ops->ndo_stop(netdev);
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 17ecbce..8c74f73 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -802,10 +802,13 @@
 	/* setup affinity mask and node */
 	if (cpu != -1)
 		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
-	else
-		cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
 	q_vector->numa_node = node;
 
+#ifdef CONFIG_IXGBE_DCA
+	/* initialize CPU for DCA */
+	q_vector->cpu = -1;
+
+#endif
 	/* initialize NAPI */
 	netif_napi_add(adapter->netdev, &q_vector->napi,
 		       ixgbe_poll, 64);
@@ -821,6 +824,21 @@
 	/* initialize pointer to rings */
 	ring = q_vector->ring;
 
+	/* intialize ITR */
+	if (txr_count && !rxr_count) {
+		/* tx only vector */
+		if (adapter->tx_itr_setting == 1)
+			q_vector->itr = IXGBE_10K_ITR;
+		else
+			q_vector->itr = adapter->tx_itr_setting;
+	} else {
+		/* rx or rx/tx vector */
+		if (adapter->rx_itr_setting == 1)
+			q_vector->itr = IXGBE_20K_ITR;
+		else
+			q_vector->itr = adapter->rx_itr_setting;
+	}
+
 	while (txr_count) {
 		/* assign generic ring traits */
 		ring->dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fa3d552..484bbed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -44,6 +44,7 @@
 #include <linux/ethtool.h>
 #include <linux/if.h>
 #include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
 #include <linux/prefetch.h>
 #include <scsi/fc/fc_fcoe.h>
 
@@ -62,11 +63,7 @@
 static char ixgbe_default_device_descr[] =
 			      "Intel(R) 10 Gigabit Network Connection";
 #endif
-#define MAJ 3
-#define MIN 9
-#define BUILD 15
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
-	__stringify(BUILD) "-k"
+#define DRV_VERSION "3.11.33-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
 				"Copyright (c) 1999-2012 Intel Corporation.";
@@ -335,11 +332,13 @@
 		goto exit;
 
 	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
-	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
+	pr_info(" %s     %s              %s        %s\n",
+		"Queue [NTU] [NTC] [bi(ntc)->dma  ]",
+		"leng", "ntw", "timestamp");
 	for (n = 0; n < adapter->num_tx_queues; n++) {
 		tx_ring = adapter->tx_ring[n];
 		tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
-		pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+		pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
 			   n, tx_ring->next_to_use, tx_ring->next_to_clean,
 			   (u64)dma_unmap_addr(tx_buffer, dma),
 			   dma_unmap_len(tx_buffer, len),
@@ -355,13 +354,37 @@
 
 	/* Transmit Descriptor Formats
 	 *
-	 * Advanced Transmit Descriptor
+	 * 82598 Advanced Transmit Descriptor
 	 *   +--------------------------------------------------------------+
 	 * 0 |         Buffer Address [63:0]                                |
 	 *   +--------------------------------------------------------------+
-	 * 8 |  PAYLEN  | PORTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN |
+	 * 8 |  PAYLEN  | POPTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN |
 	 *   +--------------------------------------------------------------+
 	 *   63       46 45    40 39 36 35 32 31   24 23 20 19              0
+	 *
+	 * 82598 Advanced Transmit Descriptor (Write-Back Format)
+	 *   +--------------------------------------------------------------+
+	 * 0 |                          RSV [63:0]                          |
+	 *   +--------------------------------------------------------------+
+	 * 8 |            RSV           |  STA  |          NXTSEQ           |
+	 *   +--------------------------------------------------------------+
+	 *   63                       36 35   32 31                         0
+	 *
+	 * 82599+ Advanced Transmit Descriptor
+	 *   +--------------------------------------------------------------+
+	 * 0 |         Buffer Address [63:0]                                |
+	 *   +--------------------------------------------------------------+
+	 * 8 |PAYLEN  |POPTS|CC|IDX  |STA  |DCMD  |DTYP |MAC  |RSV  |DTALEN |
+	 *   +--------------------------------------------------------------+
+	 *   63     46 45 40 39 38 36 35 32 31  24 23 20 19 18 17 16 15     0
+	 *
+	 * 82599+ Advanced Transmit Descriptor (Write-Back Format)
+	 *   +--------------------------------------------------------------+
+	 * 0 |                          RSV [63:0]                          |
+	 *   +--------------------------------------------------------------+
+	 * 8 |            RSV           |  STA  |           RSV             |
+	 *   +--------------------------------------------------------------+
+	 *   63                       36 35   32 31                         0
 	 */
 
 	for (n = 0; n < adapter->num_tx_queues; n++) {
@@ -369,40 +392,43 @@
 		pr_info("------------------------------------\n");
 		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
 		pr_info("------------------------------------\n");
-		pr_info("T [desc]     [address 63:0  ] "
-			"[PlPOIdStDDt Ln] [bi->dma       ] "
-			"leng  ntw timestamp        bi->skb\n");
+		pr_info("%s%s    %s              %s        %s          %s\n",
+			"T [desc]     [address 63:0  ] ",
+			"[PlPOIdStDDt Ln] [bi->dma       ] ",
+			"leng", "ntw", "timestamp", "bi->skb");
 
 		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
 			tx_desc = IXGBE_TX_DESC(tx_ring, i);
 			tx_buffer = &tx_ring->tx_buffer_info[i];
 			u0 = (struct my_u0 *)tx_desc;
-			pr_info("T [0x%03X]    %016llX %016llX %016llX"
-				" %04X  %p %016llX %p", i,
-				le64_to_cpu(u0->a),
-				le64_to_cpu(u0->b),
-				(u64)dma_unmap_addr(tx_buffer, dma),
-				dma_unmap_len(tx_buffer, len),
-				tx_buffer->next_to_watch,
-				(u64)tx_buffer->time_stamp,
-				tx_buffer->skb);
-			if (i == tx_ring->next_to_use &&
-				i == tx_ring->next_to_clean)
-				pr_cont(" NTC/U\n");
-			else if (i == tx_ring->next_to_use)
-				pr_cont(" NTU\n");
-			else if (i == tx_ring->next_to_clean)
-				pr_cont(" NTC\n");
-			else
-				pr_cont("\n");
-
-			if (netif_msg_pktdata(adapter) &&
-			    tx_buffer->skb)
-				print_hex_dump(KERN_INFO, "",
-					DUMP_PREFIX_ADDRESS, 16, 1,
-					tx_buffer->skb->data,
+			if (dma_unmap_len(tx_buffer, len) > 0) {
+				pr_info("T [0x%03X]    %016llX %016llX %016llX %08X %p %016llX %p",
+					i,
+					le64_to_cpu(u0->a),
+					le64_to_cpu(u0->b),
+					(u64)dma_unmap_addr(tx_buffer, dma),
 					dma_unmap_len(tx_buffer, len),
-					true);
+					tx_buffer->next_to_watch,
+					(u64)tx_buffer->time_stamp,
+					tx_buffer->skb);
+				if (i == tx_ring->next_to_use &&
+					i == tx_ring->next_to_clean)
+					pr_cont(" NTC/U\n");
+				else if (i == tx_ring->next_to_use)
+					pr_cont(" NTU\n");
+				else if (i == tx_ring->next_to_clean)
+					pr_cont(" NTC\n");
+				else
+					pr_cont("\n");
+
+				if (netif_msg_pktdata(adapter) &&
+				    tx_buffer->skb)
+					print_hex_dump(KERN_INFO, "",
+						DUMP_PREFIX_ADDRESS, 16, 1,
+						tx_buffer->skb->data,
+						dma_unmap_len(tx_buffer, len),
+						true);
+			}
 		}
 	}
 
@@ -422,7 +448,9 @@
 
 	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
 
-	/* Advanced Receive Descriptor (Read) Format
+	/* Receive Descriptor Formats
+	 *
+	 * 82598 Advanced Receive Descriptor (Read) Format
 	 *    63                                           1        0
 	 *    +-----------------------------------------------------+
 	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
@@ -431,27 +459,52 @@
 	 *    +-----------------------------------------------------+
 	 *
 	 *
-	 * Advanced Receive Descriptor (Write-Back) Format
+	 * 82598 Advanced Receive Descriptor (Write-Back) Format
 	 *
 	 *   63       48 47    32 31  30      21 20 16 15   4 3     0
 	 *   +------------------------------------------------------+
-	 * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
-	 *   | Checksum   Ident  |   |           |    | Type | Type |
+	 * 0 |       RSS Hash /  |SPH| HDR_LEN  | RSV |Packet|  RSS |
+	 *   | Packet   | IP     |   |          |     | Type | Type |
+	 *   | Checksum | Ident  |   |          |     |      |      |
 	 *   +------------------------------------------------------+
 	 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
 	 *   +------------------------------------------------------+
 	 *   63       48 47    32 31            20 19               0
+	 *
+	 * 82599+ Advanced Receive Descriptor (Read) Format
+	 *    63                                           1        0
+	 *    +-----------------------------------------------------+
+	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
+	 *    +----------------------------------------------+------+
+	 *  8 |       Header Buffer Address [63:1]           |  DD  |
+	 *    +-----------------------------------------------------+
+	 *
+	 *
+	 * 82599+ Advanced Receive Descriptor (Write-Back) Format
+	 *
+	 *   63       48 47    32 31  30      21 20 17 16   4 3     0
+	 *   +------------------------------------------------------+
+	 * 0 |RSS / Frag Checksum|SPH| HDR_LEN  |RSC- |Packet|  RSS |
+	 *   |/ RTT / PCoE_PARAM |   |          | CNT | Type | Type |
+	 *   |/ Flow Dir Flt ID  |   |          |     |      |      |
+	 *   +------------------------------------------------------+
+	 * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
+	 *   +------------------------------------------------------+
+	 *   63       48 47    32 31          20 19                 0
 	 */
+
 	for (n = 0; n < adapter->num_rx_queues; n++) {
 		rx_ring = adapter->rx_ring[n];
 		pr_info("------------------------------------\n");
 		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
 		pr_info("------------------------------------\n");
-		pr_info("R  [desc]      [ PktBuf     A0] "
-			"[  HeadBuf   DD] [bi->dma       ] [bi->skb] "
+		pr_info("%s%s%s",
+			"R  [desc]      [ PktBuf     A0] ",
+			"[  HeadBuf   DD] [bi->dma       ] [bi->skb       ] ",
 			"<-- Adv Rx Read format\n");
-		pr_info("RWB[desc]      [PcsmIpSHl PtRs] "
-			"[vl er S cks ln] ---------------- [bi->skb] "
+		pr_info("%s%s%s",
+			"RWB[desc]      [PcsmIpSHl PtRs] ",
+			"[vl er S cks ln] ---------------- [bi->skb       ] ",
 			"<-- Adv Rx Write-Back format\n");
 
 		for (i = 0; i < rx_ring->count; i++) {
@@ -646,6 +699,7 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct ixgbe_hw_stats *hwstats = &adapter->stats;
 	u32 xoff[8] = {0};
+	u8 tc;
 	int i;
 	bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
 
@@ -659,21 +713,26 @@
 
 	/* update stats for each tc, only valid with PFC enabled */
 	for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+		u32 pxoffrxc;
+
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
-			xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+			pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
 			break;
 		default:
-			xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+			pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
 		}
-		hwstats->pxoffrxc[i] += xoff[i];
+		hwstats->pxoffrxc[i] += pxoffrxc;
+		/* Get the TC for given UP */
+		tc = netdev_get_prio_tc_map(adapter->netdev, i);
+		xoff[tc] += pxoffrxc;
 	}
 
 	/* disarm tx queues that have received xoff frames */
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 		struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
-		u8 tc = tx_ring->dcb_tc;
 
+		tc = tx_ring->dcb_tc;
 		if (xoff[tc])
 			clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
 	}
@@ -791,10 +850,8 @@
 		total_bytes += tx_buffer->bytecount;
 		total_packets += tx_buffer->gso_segs;
 
-#ifdef CONFIG_IXGBE_PTP
 		if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
 			ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
-#endif
 
 		/* free the skb */
 		dev_kfree_skb_any(tx_buffer->skb);
@@ -967,7 +1024,6 @@
 	 * which will cause the DCA tag to be cleared.
 	 */
 	rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
-		  IXGBE_DCA_RXCTRL_DATA_DCA_EN |
 		  IXGBE_DCA_RXCTRL_DESC_DCA_EN;
 
 	IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
@@ -1244,6 +1300,7 @@
 		struct vlan_hdr *vlan;
 		/* l3 headers */
 		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
 	} hdr;
 	__be16 protocol;
 	u8 nexthdr = 0;	/* default to not TCP */
@@ -1284,6 +1341,13 @@
 		/* record next protocol */
 		nexthdr = hdr.ipv4->protocol;
 		hdr.network += hlen;
+	} else if (protocol == __constant_htons(ETH_P_IPV6)) {
+		if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+			return max_len;
+
+		/* record next protocol */
+		nexthdr = hdr.ipv6->nexthdr;
+		hdr.network += sizeof(struct ipv6hdr);
 #ifdef IXGBE_FCOE
 	} else if (protocol == __constant_htons(ETH_P_FCOE)) {
 		if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
@@ -1294,7 +1358,7 @@
 		return hdr.network - data;
 	}
 
-	/* finally sort out TCP */
+	/* finally sort out TCP/UDP */
 	if (nexthdr == IPPROTO_TCP) {
 		if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
 			return max_len;
@@ -1307,6 +1371,11 @@
 			return hdr.network - data;
 
 		hdr.network += hlen;
+	} else if (nexthdr == IPPROTO_UDP) {
+		if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+			return max_len;
+
+		hdr.network += sizeof(struct udphdr);
 	}
 
 	/*
@@ -1369,9 +1438,7 @@
 
 	ixgbe_rx_checksum(rx_ring, rx_desc, skb);
 
-#ifdef CONFIG_IXGBE_PTP
 	ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
-#endif
 
 	if ((dev->features & NETIF_F_HW_VLAN_RX) &&
 	    ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -1781,7 +1848,7 @@
  **/
 static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 			       struct ixgbe_ring *rx_ring,
-			       int budget)
+			       const int budget)
 {
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 #ifdef IXGBE_FCOE
@@ -1832,7 +1899,6 @@
 
 		/* probably a little skewed due to removing CRC */
 		total_rx_bytes += skb->len;
-		total_rx_packets++;
 
 		/* populate checksum, timestamp, VLAN, and protocol */
 		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
@@ -1865,8 +1931,8 @@
 		ixgbe_rx_skb(q_vector, skb);
 
 		/* update budget accounting */
-		budget--;
-	} while (likely(budget));
+		total_rx_packets++;
+	} while (likely(total_rx_packets < budget));
 
 	u64_stats_update_begin(&rx_ring->syncp);
 	rx_ring->stats.packets += total_rx_packets;
@@ -1878,7 +1944,7 @@
 	if (cleaned_count)
 		ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
 
-	return !!budget;
+	return (total_rx_packets < budget);
 }
 
 /**
@@ -1914,20 +1980,6 @@
 		ixgbe_for_each_ring(ring, q_vector->tx)
 			ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
 
-		if (q_vector->tx.ring && !q_vector->rx.ring) {
-			/* tx only vector */
-			if (adapter->tx_itr_setting == 1)
-				q_vector->itr = IXGBE_10K_ITR;
-			else
-				q_vector->itr = adapter->tx_itr_setting;
-		} else {
-			/* rx or rx/tx vector */
-			if (adapter->rx_itr_setting == 1)
-				q_vector->itr = IXGBE_20K_ITR;
-			else
-				q_vector->itr = adapter->rx_itr_setting;
-		}
-
 		ixgbe_write_eitr(q_vector);
 	}
 
@@ -2324,10 +2376,8 @@
 		break;
 	}
 
-#ifdef CONFIG_IXGBE_PTP
 	if (adapter->hw.mac.type == ixgbe_mac_X540)
 		mask |= IXGBE_EIMS_TIMESYNC;
-#endif
 
 	if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
 	    !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
@@ -2393,10 +2443,8 @@
 
 	ixgbe_check_fan_failure(adapter, eicr);
 
-#ifdef CONFIG_IXGBE_PTP
 	if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
 		ixgbe_ptp_check_pps_event(adapter, eicr);
-#endif
 
 	/* re-enable the original interrupt state, no lsc, no queues */
 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2588,10 +2636,8 @@
 	}
 
 	ixgbe_check_fan_failure(adapter, eicr);
-#ifdef CONFIG_IXGBE_PTP
 	if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
 		ixgbe_ptp_check_pps_event(adapter, eicr);
-#endif
 
 	/* would disable interrupts here but EIAM disabled it */
 	napi_schedule(&q_vector->napi);
@@ -2699,12 +2745,6 @@
 {
 	struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
 
-	/* rx/tx vector */
-	if (adapter->rx_itr_setting == 1)
-		q_vector->itr = IXGBE_20K_ITR;
-	else
-		q_vector->itr = adapter->rx_itr_setting;
-
 	ixgbe_write_eitr(q_vector);
 
 	ixgbe_set_ivar(adapter, 0, 0, 0);
@@ -3132,14 +3172,6 @@
 	ixgbe_configure_srrctl(adapter, ring);
 	ixgbe_configure_rscctl(adapter, ring);
 
-	/* If operating in IOV mode set RLPML for X540 */
-	if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
-	    hw->mac.type == ixgbe_mac_X540) {
-		rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
-		rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
-			    ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
-	}
-
 	if (hw->mac.type == ixgbe_mac_82598EB) {
 		/*
 		 * enable cache line friendly hardware writes:
@@ -3211,7 +3243,8 @@
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
-	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+	if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
+		IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
 
 	/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
 	hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
@@ -3234,8 +3267,6 @@
 
 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
 
-	/* enable Tx loopback for VF/PF communication */
-	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
 
 	/* Enable MAC Anti-Spoofing */
 	hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
@@ -3263,6 +3294,11 @@
 		max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
 
 #endif /* IXGBE_FCOE */
+
+	/* adjust max frame to be at least the size of a standard frame */
+	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+		max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
 	if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -3271,9 +3307,6 @@
 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
 	}
 
-	/* MHADD will allow an extra 4 bytes past for vlan tagged frames */
-	max_frame += VLAN_HLEN;
-
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
 	/* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
 	hlreg0 |= IXGBE_HLREG0_JUMBOEN;
@@ -4072,11 +4105,8 @@
 	else
 		ixgbe_configure_msi_and_legacy(adapter);
 
-	/* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
-	if (hw->mac.ops.enable_tx_laser &&
-	    ((hw->phy.multispeed_fiber) ||
-	     ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
-	      (hw->mac.type == ixgbe_mac_82599EB))))
+	/* enable the optics for 82599 SFP+ fiber */
+	if (hw->mac.ops.enable_tx_laser)
 		hw->mac.ops.enable_tx_laser(hw);
 
 	clear_bit(__IXGBE_DOWN, &adapter->state);
@@ -4192,6 +4222,9 @@
 	/* update SAN MAC vmdq pool selection */
 	if (hw->mac.san_mac_rar_index)
 		hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
+
+	if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+		ixgbe_ptp_reset(adapter);
 }
 
 /**
@@ -4393,11 +4426,8 @@
 	if (!pci_channel_offline(adapter->pdev))
 		ixgbe_reset(adapter);
 
-	/* power down the optics for multispeed fiber and 82599 SFP+ fiber */
-	if (hw->mac.ops.disable_tx_laser &&
-	    ((hw->phy.multispeed_fiber) ||
-	     ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
-	      (hw->mac.type == ixgbe_mac_82599EB))))
+	/* power down the optics for 82599 SFP+ fiber */
+	if (hw->mac.ops.disable_tx_laser)
 		hw->mac.ops.disable_tx_laser(hw);
 
 	ixgbe_clean_all_tx_rings(adapter);
@@ -4533,7 +4563,8 @@
 	ixgbe_pbthresh_setup(adapter);
 	hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
 	hw->fc.send_xon = true;
-	hw->fc.disable_fc_autoneg = false;
+	hw->fc.disable_fc_autoneg =
+		(ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true;
 
 #ifdef CONFIG_PCI_IOV
 	/* assign number of SR-IOV VFs */
@@ -4828,14 +4859,14 @@
 		return -EINVAL;
 
 	/*
-	 * For 82599EB we cannot allow PF to change MTU greater than 1500
-	 * in SR-IOV mode as it may cause buffer overruns in guest VFs that
-	 * don't allocate and chain buffers correctly.
+	 * For 82599EB we cannot allow legacy VFs to enable their receive
+	 * paths when MTU greater than 1500 is configured.  So display a
+	 * warning that legacy VFs will be disabled.
 	 */
 	if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
 	    (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
 	    (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
-			return -EINVAL;
+		e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
 
 	e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
 
@@ -4901,6 +4932,8 @@
 	if (err)
 		goto err_set_queues;
 
+	ixgbe_ptp_init(adapter);
+
 	ixgbe_up_complete(adapter);
 
 	return 0;
@@ -4932,6 +4965,8 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
+	ixgbe_ptp_stop(adapter);
+
 	ixgbe_down(adapter);
 	ixgbe_free_irq(adapter);
 
@@ -5022,14 +5057,8 @@
 	if (wufc) {
 		ixgbe_set_rx_mode(netdev);
 
-		/*
-		 * enable the optics for both mult-speed fiber and
-		 * 82599 SFP+ fiber as we can WoL.
-		 */
-		if (hw->mac.ops.enable_tx_laser &&
-		    (hw->phy.multispeed_fiber ||
-		    (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
-		     hw->mac.type == ixgbe_mac_82599EB)))
+		/* enable the optics for 82599 SFP+ fiber as we can WoL */
+		if (hw->mac.ops.enable_tx_laser)
 			hw->mac.ops.enable_tx_laser(hw);
 
 		/* turn on all-multi mode if wake on multicast is enabled */
@@ -5442,6 +5471,23 @@
 	adapter->link_speed = link_speed;
 }
 
+static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
+{
+#ifdef CONFIG_IXGBE_DCB
+	struct net_device *netdev = adapter->netdev;
+	struct dcb_app app = {
+			      .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
+			      .protocol = 0,
+			     };
+	u8 up = 0;
+
+	if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
+		up = dcb_ieee_getapp_mask(netdev, &app);
+
+	adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
+#endif
+}
+
 /**
  * ixgbe_watchdog_link_is_up - update netif_carrier status and
  *                             print link up message
@@ -5482,9 +5528,8 @@
 		break;
 	}
 
-#ifdef CONFIG_IXGBE_PTP
-	ixgbe_ptp_start_cyclecounter(adapter);
-#endif
+	if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+		ixgbe_ptp_start_cyclecounter(adapter);
 
 	e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
 	       (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5501,6 +5546,9 @@
 	netif_carrier_on(netdev);
 	ixgbe_check_vf_rate_limit(adapter);
 
+	/* update the default user priority for VFs */
+	ixgbe_update_default_up(adapter);
+
 	/* ping all the active vfs to let them know link has changed */
 	ixgbe_ping_all_vfs(adapter);
 }
@@ -5526,9 +5574,8 @@
 	if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
 		adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
 
-#ifdef CONFIG_IXGBE_PTP
-	ixgbe_ptp_start_cyclecounter(adapter);
-#endif
+	if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+		ixgbe_ptp_start_cyclecounter(adapter);
 
 	e_info(drv, "NIC Link is Down\n");
 	netif_carrier_off(netdev);
@@ -5833,9 +5880,7 @@
 	ixgbe_watchdog_subtask(adapter);
 	ixgbe_fdir_reinit_subtask(adapter);
 	ixgbe_check_hang_subtask(adapter);
-#ifdef CONFIG_IXGBE_PTP
 	ixgbe_ptp_overflow_check(adapter);
-#endif
 
 	ixgbe_service_event_complete(adapter);
 }
@@ -5988,10 +6033,8 @@
 	if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
 		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
 
-#ifdef CONFIG_IXGBE_PTP
 	if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
 		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
-#endif
 
 	/* set segmentation enable bits for TSO/FSO */
 #ifdef IXGBE_FCOE
@@ -6393,12 +6436,10 @@
 
 	skb_tx_timestamp(skb);
 
-#ifdef CONFIG_IXGBE_PTP
 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 		tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
 	}
-#endif
 
 #ifdef CONFIG_PCI_IOV
 	/*
@@ -6485,6 +6526,7 @@
 		if (skb_pad(skb, 17 - skb->len))
 			return NETDEV_TX_OK;
 		skb->len = 17;
+		skb_set_tail_pointer(skb, 17);
 	}
 
 	tx_ring = adapter->tx_ring[skb->queue_mapping];
@@ -6547,10 +6589,8 @@
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
 	switch (cmd) {
-#ifdef CONFIG_IXGBE_PTP
 	case SIOCSHWTSTAMP:
 		return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
-#endif
 	default:
 		return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
 	}
@@ -6910,13 +6950,16 @@
 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 		return -EOPNOTSUPP;
 
-	if (ndm->ndm_state & NUD_PERMANENT) {
+	/* Hardware does not support aging addresses so if a
+	 * ndm_state is given only allow permanent addresses
+	 */
+	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
 		pr_info("%s: FDB only supports static addresses\n",
 			ixgbe_driver_name);
 		return -EINVAL;
 	}
 
-	if (is_unicast_ether_addr(addr)) {
+	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
 		u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
 
 		if (netdev_uc_count(dev) < rar_uc_entries)
@@ -6974,6 +7017,61 @@
 	return idx;
 }
 
+static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
+				    struct nlmsghdr *nlh)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	struct nlattr *attr, *br_spec;
+	int rem;
+
+	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+		return -EOPNOTSUPP;
+
+	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+	nla_for_each_nested(attr, br_spec, rem) {
+		__u16 mode;
+		u32 reg = 0;
+
+		if (nla_type(attr) != IFLA_BRIDGE_MODE)
+			continue;
+
+		mode = nla_get_u16(attr);
+		if (mode == BRIDGE_MODE_VEPA) {
+			reg = 0;
+			adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB;
+		} else if (mode == BRIDGE_MODE_VEB) {
+			reg = IXGBE_PFDTXGSWC_VT_LBEN;
+			adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
+		} else
+			return -EINVAL;
+
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
+
+		e_info(drv, "enabling bridge mode: %s\n",
+			mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+	}
+
+	return 0;
+}
+
+static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+				    struct net_device *dev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	u16 mode;
+
+	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+		return 0;
+
+	if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
+		mode = BRIDGE_MODE_VEB;
+	else
+		mode = BRIDGE_MODE_VEPA;
+
+	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
+}
+
 static const struct net_device_ops ixgbe_netdev_ops = {
 	.ndo_open		= ixgbe_open,
 	.ndo_stop		= ixgbe_close,
@@ -7013,6 +7111,8 @@
 	.ndo_fdb_add		= ixgbe_ndo_fdb_add,
 	.ndo_fdb_del		= ixgbe_ndo_fdb_del,
 	.ndo_fdb_dump		= ixgbe_ndo_fdb_dump,
+	.ndo_bridge_setlink	= ixgbe_ndo_bridge_setlink,
+	.ndo_bridge_getlink	= ixgbe_ndo_bridge_getlink,
 };
 
 /**
@@ -7042,6 +7142,7 @@
 				break;
 		case IXGBE_SUBDEV_ID_82599_SFP:
 		case IXGBE_SUBDEV_ID_82599_RNDC:
+		case IXGBE_SUBDEV_ID_82599_ECNA_DP:
 			is_wol_supported = 1;
 			break;
 		}
@@ -7364,10 +7465,6 @@
 
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
-#ifdef CONFIG_IXGBE_PTP
-	ixgbe_ptp_init(adapter);
-#endif /* CONFIG_IXGBE_PTP*/
-
 	/* save off EEPROM version number */
 	hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
 	hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
@@ -7420,11 +7517,8 @@
 	if (err)
 		goto err_register;
 
-	/* power down the optics for multispeed fiber and 82599 SFP+ fiber */
-	if (hw->mac.ops.disable_tx_laser &&
-	    ((hw->phy.multispeed_fiber) ||
-	     ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
-	      (hw->mac.type == ixgbe_mac_82599EB))))
+	/* power down the optics for 82599 SFP+ fiber */
+	if (hw->mac.ops.disable_tx_laser)
 		hw->mac.ops.disable_tx_laser(hw);
 
 	/* carrier off reporting is important to ethtool even BEFORE open */
@@ -7505,9 +7599,6 @@
 	set_bit(__IXGBE_DOWN, &adapter->state);
 	cancel_work_sync(&adapter->service_task);
 
-#ifdef CONFIG_IXGBE_PTP
-	ixgbe_ptp_stop(adapter);
-#endif
 
 #ifdef CONFIG_IXGBE_DCA
 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 310bdd9..42dd65e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -62,12 +62,39 @@
 /* bits 23:16 are used for exra info for certain messages */
 #define IXGBE_VT_MSGINFO_MASK     (0xFF << IXGBE_VT_MSGINFO_SHIFT)
 
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * Each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+	ixgbe_mbox_api_10,	/* API version 1.0, linux/freebsd VF driver */
+	ixgbe_mbox_api_20,	/* API version 2.0, solaris Phase1 VF driver */
+	ixgbe_mbox_api_11,	/* API version 1.1, linux/freebsd VF driver */
+	/* This value should always be last */
+	ixgbe_mbox_api_unknown,	/* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
 #define IXGBE_VF_RESET            0x01 /* VF requests reset */
 #define IXGBE_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
 #define IXGBE_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
 #define IXGBE_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
-#define IXGBE_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
-#define IXGBE_VF_SET_MACVLAN      0x06 /* VF requests PF for unicast filter */
+
+/* mailbox API, version 1.0 VF requests */
+#define IXGBE_VF_SET_LPE	0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN	0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE	0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUES	0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES	1	/* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES	2	/* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN	3	/* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE	4	/* Default queue offset */
 
 /* length of permanent address message returned from PF */
 #define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index d929131..1a751c9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -387,6 +387,15 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct ptp_clock_event event;
 
+	event.type = PTP_CLOCK_PPS;
+
+	/* this check is necessary in case the interrupt was enabled via some
+	 * alternative means (ex. debug_fs). Better to check here than
+	 * everywhere that calls this function.
+	 */
+	if (!adapter->ptp_clock)
+		return;
+
 	switch (hw->mac.type) {
 	case ixgbe_mac_X540:
 		ptp_clock_event(adapter->ptp_clock, &event);
@@ -411,7 +420,7 @@
 	unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
 	struct timespec ts;
 
-	if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) &&
+	if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) &&
 	    (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
 		ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
 		adapter->last_overflow_check = jiffies;
@@ -554,12 +563,14 @@
 	adapter = q_vector->adapter;
 	hw = &adapter->hw;
 
+	if (likely(!ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+		return;
+
 	tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
 
 	/* Check if we have a valid timestamp and make sure the skb should
 	 * have been timestamped */
-	if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) ||
-		   !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+	if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
 		return;
 
 	/*
@@ -622,8 +633,7 @@
 	struct hwtstamp_config config;
 	u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
 	u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
-	u32 tsync_rx_mtrl = 0;
-	bool is_l4 = false;
+	u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
 	bool is_l2 = false;
 	u32 regval;
 
@@ -646,16 +656,15 @@
 	switch (config.rx_filter) {
 	case HWTSTAMP_FILTER_NONE:
 		tsync_rx_ctl = 0;
+		tsync_rx_mtrl = 0;
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
 		tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG;
-		is_l4 = true;
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
 		tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
-		is_l4 = true;
 		break;
 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -668,7 +677,6 @@
 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
 		is_l2 = true;
-		is_l4 = true;
 		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
@@ -693,42 +701,15 @@
 	/* Store filter value for later use */
 	adapter->rx_hwtstamp_filter = config.rx_filter;
 
-	/* define ethertype filter for timestamped packets */
+	/* define ethertype filter for timestamping L2 packets */
 	if (is_l2)
-		IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
+		IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
 				(IXGBE_ETQF_FILTER_EN | /* enable filter */
 				 IXGBE_ETQF_1588 | /* enable timestamping */
 				 ETH_P_1588));     /* 1588 eth protocol type */
 	else
-		IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
 
-#define PTP_PORT 319
-	/* L4 Queue Filter[3]: filter by destination port and protocol */
-	if (is_l4) {
-		u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */
-			    | IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */
-			    | IXGBE_FTQF_QUEUE_ENABLE);
-
-		ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */
-			  & IXGBE_FTQF_DEST_PORT_MASK /* dest check */
-			  & IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */
-			 << IXGBE_FTQF_5TUPLE_MASK_SHIFT);
-
-		IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3),
-				(3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 |
-				 IXGBE_IMIR_SIZE_BP_82599));
-
-		/* enable port check */
-		IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3),
-				(htons(PTP_PORT) |
-				 htons(PTP_PORT) << 16));
-
-		IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf);
-
-		tsync_rx_mtrl |= PTP_PORT << 16;
-	} else {
-		IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0);
-	}
 
 	/* enable/disable TX */
 	regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
@@ -759,58 +740,20 @@
  * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
  * @adapter: pointer to the adapter structure
  *
- * this function initializes the timecounter and cyclecounter
- * structures for use in generated a ns counter from the arbitrary
- * fixed point cycles registers in the hardware.
- *
- * A change in link speed impacts the frequency of the DMA clock on
- * the device, which is used to generate the cycle counter
- * registers. Therefor this function is called whenever the link speed
- * changes.
- *
- * This function also turns on the SDP pin for clock out feature (X540
- * only), because this is where the shift is first calculated.
+ * This function should be called to set the proper values for the TIMINCA
+ * register and tell the cyclecounter structure what the tick rate of SYSTIME
+ * is. It does not directly modify SYSTIME registers or the timecounter
+ * structure. It should be called whenever a new TIMINCA value is necessary,
+ * such as during initialization or when the link speed changes.
  */
 void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 incval = 0;
-	u32 timinca = 0;
 	u32 shift = 0;
-	u32 cycle_speed;
 	unsigned long flags;
 
 	/**
-	 * Determine what speed we need to set the cyclecounter
-	 * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat
-	 * unknown speeds as 10Gb. (Hence why we can't just copy the
-	 * link_speed.
-	 */
-	switch (adapter->link_speed) {
-	case IXGBE_LINK_SPEED_100_FULL:
-	case IXGBE_LINK_SPEED_1GB_FULL:
-	case IXGBE_LINK_SPEED_10GB_FULL:
-		cycle_speed = adapter->link_speed;
-		break;
-	default:
-		/* cycle speed should be 10Gb when there is no link */
-		cycle_speed = IXGBE_LINK_SPEED_10GB_FULL;
-		break;
-	}
-
-	/*
-	 * grab the current TIMINCA value from the register so that it can be
-	 * double checked. If the register value has been cleared, it must be
-	 * reset to the correct value for generating a cyclecounter. If
-	 * TIMINCA is zero, the SYSTIME registers do not increment at all.
-	 */
-	timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
-
-	/* Bail if the cycle speed didn't change and TIMINCA is non-zero */
-	if (adapter->cycle_speed == cycle_speed && timinca)
-		return;
-
-	/**
 	 * Scale the NIC cycle counter by a large factor so that
 	 * relatively small corrections to the frequency can be added
 	 * or subtracted. The drawbacks of a large factor include
@@ -819,8 +762,12 @@
 	 * to nanoseconds using only a multiplier and a right-shift,
 	 * and (c) the value must fit within the timinca register space
 	 * => math based on internal DMA clock rate and available bits
+	 *
+	 * Note that when there is no link, internal DMA clock is same as when
+	 * link speed is 10Gb. Set the registers correctly even when link is
+	 * down to preserve the clock setting
 	 */
-	switch (cycle_speed) {
+	switch (adapter->link_speed) {
 	case IXGBE_LINK_SPEED_100_FULL:
 		incval = IXGBE_INCVAL_100;
 		shift = IXGBE_INCVAL_SHIFT_100;
@@ -830,6 +777,7 @@
 		shift = IXGBE_INCVAL_SHIFT_1GB;
 		break;
 	case IXGBE_LINK_SPEED_10GB_FULL:
+	default:
 		incval = IXGBE_INCVAL_10GB;
 		shift = IXGBE_INCVAL_SHIFT_10GB;
 		break;
@@ -857,18 +805,11 @@
 		return;
 	}
 
-	/* reset the system time registers */
-	IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
-	IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* store the new cycle speed */
-	adapter->cycle_speed = cycle_speed;
-
+	/* update the base incval used to calculate frequency adjustment */
 	ACCESS_ONCE(adapter->base_incval) = incval;
 	smp_mb();
 
-	/* grab the ptp lock */
+	/* need lock to prevent incorrect read while modifying cyclecounter */
 	spin_lock_irqsave(&adapter->tmreg_lock, flags);
 
 	memset(&adapter->cc, 0, sizeof(adapter->cc));
@@ -877,6 +818,31 @@
 	adapter->cc.shift = shift;
 	adapter->cc.mult = 1;
 
+	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+}
+
+/**
+ * ixgbe_ptp_reset
+ * @adapter: the ixgbe private board structure
+ *
+ * When the MAC resets, all timesync features are reset. This function should be
+ * called to re-enable the PTP clock structure. It will re-init the timecounter
+ * structure based on the kernel time as well as setup the cycle counter data.
+ */
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	unsigned long flags;
+
+	/* set SYSTIME registers to 0 just in case */
+	IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
+	IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
+	IXGBE_WRITE_FLUSH(hw);
+
+	ixgbe_ptp_start_cyclecounter(adapter);
+
+	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
 	/* reset the ns time counter */
 	timecounter_init(&adapter->tc, &adapter->cc,
 			 ktime_to_ns(ktime_get_real()));
@@ -904,7 +870,7 @@
 
 	switch (adapter->hw.mac.type) {
 	case ixgbe_mac_X540:
-		snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+		snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
 		adapter->ptp_caps.owner = THIS_MODULE;
 		adapter->ptp_caps.max_adj = 250000000;
 		adapter->ptp_caps.n_alarm = 0;
@@ -918,7 +884,7 @@
 		adapter->ptp_caps.enable = ixgbe_ptp_enable;
 		break;
 	case ixgbe_mac_82599EB:
-		snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+		snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
 		adapter->ptp_caps.owner = THIS_MODULE;
 		adapter->ptp_caps.max_adj = 250000000;
 		adapter->ptp_caps.n_alarm = 0;
@@ -942,11 +908,6 @@
 
 	spin_lock_init(&adapter->tmreg_lock);
 
-	ixgbe_ptp_start_cyclecounter(adapter);
-
-	/* (Re)start the overflow check */
-	adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
-
 	adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
 						&adapter->pdev->dev);
 	if (IS_ERR(adapter->ptp_clock)) {
@@ -955,6 +916,11 @@
 	} else
 		e_dev_info("registered PHC device on %s\n", netdev->name);
 
+	ixgbe_ptp_reset(adapter);
+
+	/* set the flag that PTP has been enabled */
+	adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED;
+
 	return;
 }
 
@@ -967,7 +933,7 @@
 void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
 {
 	/* stop the overflow check task */
-	adapter->flags2 &= ~(IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED |
+	adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED |
 			     IXGBE_FLAG2_PTP_PPS_ENABLED);
 
 	ixgbe_ptp_setup_sdp(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index dce48bf..85cddac 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -117,6 +117,10 @@
 		}
 	}
 
+	/* Initialize default switching mode VEB */
+	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+	adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
+
 	/* If call to enable VFs succeeded then allocate memory
 	 * for per VF control structures.
 	 */
@@ -150,16 +154,6 @@
 		adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
 				     IXGBE_FLAG2_RSC_ENABLED);
 
-#ifdef IXGBE_FCOE
-		/*
-		 * When SR-IOV is enabled 82599 cannot support jumbo frames
-		 * so we must disable FCoE because we cannot support FCoE MTU.
-		 */
-		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-			adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
-					    IXGBE_FLAG_FCOE_CAPABLE);
-#endif
-
 		/* enable spoof checking for all VFs */
 		for (i = 0; i < adapter->num_vfs; i++)
 			adapter->vfinfo[i].spoofchk_enabled = true;
@@ -265,8 +259,11 @@
 }
 
 static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
-				   int entries, u16 *hash_list, u32 vf)
+				   u32 *msgbuf, u32 vf)
 {
+	int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+		       >> IXGBE_VT_MSGINFO_SHIFT;
+	u16 *hash_list = (u16 *)&msgbuf[1];
 	struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
 	struct ixgbe_hw *hw = &adapter->hw;
 	int i;
@@ -353,31 +350,89 @@
 	return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
 }
 
-static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
-	int new_mtu = msgbuf[1];
+	int max_frame = msgbuf[1];
 	u32 max_frs;
-	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
-	/* Only X540 supports jumbo frames in IOV mode */
-	if (adapter->hw.mac.type != ixgbe_mac_X540)
-		return;
+	/*
+	 * For 82599EB we have to keep all PFs and VFs operating with
+	 * the same max_frame value in order to avoid sending an oversize
+	 * frame to a VF.  In order to guarantee this is handled correctly
+	 * for all cases we have several special exceptions to take into
+	 * account before we can enable the VF for receive
+	 */
+	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+		struct net_device *dev = adapter->netdev;
+		int pf_max_frame = dev->mtu + ETH_HLEN;
+		u32 reg_offset, vf_shift, vfre;
+		s32 err = 0;
 
-	/* MTU < 68 is an error and causes problems on some kernels */
-	if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
-		e_err(drv, "VF mtu %d out of range\n", new_mtu);
-		return;
+#ifdef CONFIG_FCOE
+		if (dev->features & NETIF_F_FCOE_MTU)
+			pf_max_frame = max_t(int, pf_max_frame,
+					     IXGBE_FCOE_JUMBO_FRAME_SIZE);
+
+#endif /* CONFIG_FCOE */
+		switch (adapter->vfinfo[vf].vf_api) {
+		case ixgbe_mbox_api_11:
+			/*
+			 * Version 1.1 supports jumbo frames on VFs if PF has
+			 * jumbo frames enabled which means legacy VFs are
+			 * disabled
+			 */
+			if (pf_max_frame > ETH_FRAME_LEN)
+				break;
+		default:
+			/*
+			 * If the PF or VF are running w/ jumbo frames enabled
+			 * we need to shut down the VF Rx path as we cannot
+			 * support jumbo frames on legacy VFs
+			 */
+			if ((pf_max_frame > ETH_FRAME_LEN) ||
+			    (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
+				err = -EINVAL;
+			break;
+		}
+
+		/* determine VF receive enable location */
+		vf_shift = vf % 32;
+		reg_offset = vf / 32;
+
+		/* enable or disable receive depending on error */
+		vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+		if (err)
+			vfre &= ~(1 << vf_shift);
+		else
+			vfre |= 1 << vf_shift;
+		IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
+
+		if (err) {
+			e_err(drv, "VF max_frame %d out of range\n", max_frame);
+			return err;
+		}
 	}
 
-	max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
-		   IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
-	if (max_frs < new_mtu) {
-		max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+	/* MTU < 68 is an error and causes problems on some kernels */
+	if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+		e_err(drv, "VF max_frame %d out of range\n", max_frame);
+		return -EINVAL;
+	}
+
+	/* pull current max frame size from hardware */
+	max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+	max_frs &= IXGBE_MHADD_MFS_MASK;
+	max_frs >>= IXGBE_MHADD_MFS_SHIFT;
+
+	if (max_frs < max_frame) {
+		max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
 		IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
 	}
 
-	e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
+	e_info(hw, "VF requests change max MTU to %d\n", max_frame);
+
+	return 0;
 }
 
 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
@@ -392,35 +447,47 @@
 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 }
 
-static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
+static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
+			    u16 vid, u16 qos, u32 vf)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
+}
+
+static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 
-	if (vid)
-		IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
-				(vid | IXGBE_VMVIR_VLANA_DEFAULT));
-	else
-		IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
 }
-
 static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
+	struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
 	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+	u8 num_tcs = netdev_get_num_tc(adapter->netdev);
+
+	/* add PF assigned VLAN or VLAN 0 */
+	ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
 
 	/* reset offloads to defaults */
-	if (adapter->vfinfo[vf].pf_vlan) {
-		ixgbe_set_vf_vlan(adapter, true,
-				  adapter->vfinfo[vf].pf_vlan, vf);
-		ixgbe_set_vmvir(adapter,
-				(adapter->vfinfo[vf].pf_vlan |
-				 (adapter->vfinfo[vf].pf_qos <<
-				  VLAN_PRIO_SHIFT)), vf);
-		ixgbe_set_vmolr(hw, vf, false);
+	ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
+
+	/* set outgoing tags for VFs */
+	if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
+		ixgbe_clear_vmvir(adapter, vf);
 	} else {
-		ixgbe_set_vf_vlan(adapter, true, 0, vf);
-		ixgbe_set_vmvir(adapter, 0, vf);
-		ixgbe_set_vmolr(hw, vf, true);
+		if (vfinfo->pf_qos || !num_tcs)
+			ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+					vfinfo->pf_qos, vf);
+		else
+			ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+					adapter->default_up, vf);
+
+		if (vfinfo->spoofchk_enabled)
+			hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
 	}
 
 	/* reset multicast table array for vf */
@@ -430,6 +497,9 @@
 	ixgbe_set_rx_mode(adapter->netdev);
 
 	hw->mac.ops.clear_rar(hw, rar_entry);
+
+	/* reset VF api back to unknown */
+	adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
 }
 
 static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
@@ -521,30 +591,221 @@
 	return 0;
 }
 
-static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 reg;
+	unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+	u32 reg, msgbuf[4];
 	u32 reg_offset, vf_shift;
+	u8 *addr = (u8 *)(&msgbuf[1]);
+
+	e_info(probe, "VF Reset msg received from vf %d\n", vf);
+
+	/* reset the filters for the device */
+	ixgbe_vf_reset_event(adapter, vf);
+
+	/* set vf mac address */
+	ixgbe_set_vf_mac(adapter, vf, vf_mac);
 
 	vf_shift = vf % 32;
 	reg_offset = vf / 32;
 
-	/* enable transmit and receive for vf */
+	/* enable transmit for vf */
 	reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
-	reg |= (reg | (1 << vf_shift));
+	reg |= 1 << vf_shift;
 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
 
+	/* enable receive for vf */
 	reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
-	reg |= (reg | (1 << vf_shift));
+	reg |= 1 << vf_shift;
+	/*
+	 * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
+	 * For more info take a look at ixgbe_set_vf_lpe
+	 */
+	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+		struct net_device *dev = adapter->netdev;
+		int pf_max_frame = dev->mtu + ETH_HLEN;
+
+#ifdef CONFIG_FCOE
+		if (dev->features & NETIF_F_FCOE_MTU)
+			pf_max_frame = max_t(int, pf_max_frame,
+					     IXGBE_FCOE_JUMBO_FRAME_SIZE);
+
+#endif /* CONFIG_FCOE */
+		if (pf_max_frame > ETH_FRAME_LEN)
+			reg &= ~(1 << vf_shift);
+	}
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
 
+	/* enable VF mailbox for further messages */
+	adapter->vfinfo[vf].clear_to_send = true;
+
 	/* Enable counting of spoofed packets in the SSVPC register */
 	reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
 	reg |= (1 << vf_shift);
 	IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
 
-	ixgbe_vf_reset_event(adapter, vf);
+	/* reply to reset with ack and vf mac address */
+	msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+	memcpy(addr, vf_mac, ETH_ALEN);
+
+	/*
+	 * Piggyback the multicast filter type so VF can compute the
+	 * correct vectors
+	 */
+	msgbuf[3] = hw->mac.mc_filter_type;
+	ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+	return 0;
+}
+
+static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
+				 u32 *msgbuf, u32 vf)
+{
+	u8 *new_mac = ((u8 *)(&msgbuf[1]));
+
+	if (!is_valid_ether_addr(new_mac)) {
+		e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+		return -1;
+	}
+
+	if (adapter->vfinfo[vf].pf_set_mac &&
+	    memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
+		   ETH_ALEN)) {
+		e_warn(drv,
+		       "VF %d attempted to override administratively set MAC address\n"
+		       "Reload the VF driver to resume operations\n",
+		       vf);
+		return -1;
+	}
+
+	return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
+}
+
+static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
+				 u32 *msgbuf, u32 vf)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
+	int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+	int err;
+	u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+	if (adapter->vfinfo[vf].pf_vlan || tcs) {
+		e_warn(drv,
+		       "VF %d attempted to override administratively set VLAN configuration\n"
+		       "Reload the VF driver to resume operations\n",
+		       vf);
+		return -1;
+	}
+
+	if (add)
+		adapter->vfinfo[vf].vlan_count++;
+	else if (adapter->vfinfo[vf].vlan_count)
+		adapter->vfinfo[vf].vlan_count--;
+
+	err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+	if (!err && adapter->vfinfo[vf].spoofchk_enabled)
+		hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+
+	return err;
+}
+
+static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
+				    u32 *msgbuf, u32 vf)
+{
+	u8 *new_mac = ((u8 *)(&msgbuf[1]));
+	int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+		    IXGBE_VT_MSGINFO_SHIFT;
+	int err;
+
+	if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
+		e_warn(drv,
+		       "VF %d requested MACVLAN filter but is administratively denied\n",
+		       vf);
+		return -1;
+	}
+
+	/* An non-zero index indicates the VF is setting a filter */
+	if (index) {
+		if (!is_valid_ether_addr(new_mac)) {
+			e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+			return -1;
+		}
+
+		/*
+		 * If the VF is allowed to set MAC filters then turn off
+		 * anti-spoofing to avoid false positives.
+		 */
+		if (adapter->vfinfo[vf].spoofchk_enabled)
+			ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+	}
+
+	err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
+	if (err == -ENOSPC)
+		e_warn(drv,
+		       "VF %d has requested a MACVLAN filter but there is no space for it\n",
+		       vf);
+
+	return err < 0;
+}
+
+static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
+				  u32 *msgbuf, u32 vf)
+{
+	int api = msgbuf[1];
+
+	switch (api) {
+	case ixgbe_mbox_api_10:
+	case ixgbe_mbox_api_11:
+		adapter->vfinfo[vf].vf_api = api;
+		return 0;
+	default:
+		break;
+	}
+
+	e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
+
+	return -1;
+}
+
+static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
+			       u32 *msgbuf, u32 vf)
+{
+	struct net_device *dev = adapter->netdev;
+	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+	unsigned int default_tc = 0;
+	u8 num_tcs = netdev_get_num_tc(dev);
+
+	/* verify the PF is supporting the correct APIs */
+	switch (adapter->vfinfo[vf].vf_api) {
+	case ixgbe_mbox_api_20:
+	case ixgbe_mbox_api_11:
+		break;
+	default:
+		return -1;
+	}
+
+	/* only allow 1 Tx queue for bandwidth limiting */
+	msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+	msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+
+	/* if TCs > 1 determine which TC belongs to default user priority */
+	if (num_tcs > 1)
+		default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
+
+	/* notify VF of need for VLAN tag stripping, and correct queue */
+	if (num_tcs)
+		msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
+	else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
+		msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
+	else
+		msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
+
+	/* notify VF of default queue */
+	msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
+
+	return 0;
 }
 
 static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
@@ -553,10 +814,6 @@
 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 	struct ixgbe_hw *hw = &adapter->hw;
 	s32 retval;
-	int entries;
-	u16 *hash_list;
-	int add, vid, index;
-	u8 *new_mac;
 
 	retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
 
@@ -572,39 +829,13 @@
 	/* flush the ack before we write any messages back */
 	IXGBE_WRITE_FLUSH(hw);
 
+	if (msgbuf[0] == IXGBE_VF_RESET)
+		return ixgbe_vf_reset_msg(adapter, vf);
+
 	/*
 	 * until the vf completes a virtual function reset it should not be
 	 * allowed to start any configuration.
 	 */
-
-	if (msgbuf[0] == IXGBE_VF_RESET) {
-		unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
-		new_mac = (u8 *)(&msgbuf[1]);
-		e_info(probe, "VF Reset msg received from vf %d\n", vf);
-		adapter->vfinfo[vf].clear_to_send = false;
-		ixgbe_vf_reset_msg(adapter, vf);
-		adapter->vfinfo[vf].clear_to_send = true;
-
-		if (is_valid_ether_addr(new_mac) &&
-		    !adapter->vfinfo[vf].pf_set_mac)
-			ixgbe_set_vf_mac(adapter, vf, vf_mac);
-		else
-			ixgbe_set_vf_mac(adapter,
-				 vf, adapter->vfinfo[vf].vf_mac_addresses);
-
-		/* reply to reset with ack and vf mac address */
-		msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
-		memcpy(new_mac, vf_mac, ETH_ALEN);
-		/*
-		 * Piggyback the multicast filter type so VF can compute the
-		 * correct vectors
-		 */
-		msgbuf[3] = hw->mac.mc_filter_type;
-		ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
-
-		return retval;
-	}
-
 	if (!adapter->vfinfo[vf].clear_to_send) {
 		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
 		ixgbe_write_mbx(hw, msgbuf, 1, vf);
@@ -613,70 +844,25 @@
 
 	switch ((msgbuf[0] & 0xFFFF)) {
 	case IXGBE_VF_SET_MAC_ADDR:
-		new_mac = ((u8 *)(&msgbuf[1]));
-		if (is_valid_ether_addr(new_mac) &&
-		    !adapter->vfinfo[vf].pf_set_mac) {
-			ixgbe_set_vf_mac(adapter, vf, new_mac);
-		} else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
-				  new_mac, ETH_ALEN)) {
-			e_warn(drv, "VF %d attempted to override "
-			       "administratively set MAC address\nReload "
-			       "the VF driver to resume operations\n", vf);
-			retval = -1;
-		}
+		retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
 		break;
 	case IXGBE_VF_SET_MULTICAST:
-		entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
-		          >> IXGBE_VT_MSGINFO_SHIFT;
-		hash_list = (u16 *)&msgbuf[1];
-		retval = ixgbe_set_vf_multicasts(adapter, entries,
-		                                 hash_list, vf);
-		break;
-	case IXGBE_VF_SET_LPE:
-		ixgbe_set_vf_lpe(adapter, msgbuf);
+		retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
 		break;
 	case IXGBE_VF_SET_VLAN:
-		add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
-		      >> IXGBE_VT_MSGINFO_SHIFT;
-		vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
-		if (adapter->vfinfo[vf].pf_vlan) {
-			e_warn(drv, "VF %d attempted to override "
-			       "administratively set VLAN configuration\n"
-			       "Reload the VF driver to resume operations\n",
-			       vf);
-			retval = -1;
-		} else {
-			if (add)
-				adapter->vfinfo[vf].vlan_count++;
-			else if (adapter->vfinfo[vf].vlan_count)
-				adapter->vfinfo[vf].vlan_count--;
-			retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
-			if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
-				hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
-		}
+		retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
+		break;
+	case IXGBE_VF_SET_LPE:
+		retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
 		break;
 	case IXGBE_VF_SET_MACVLAN:
-		index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
-			IXGBE_VT_MSGINFO_SHIFT;
-		if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
-			e_warn(drv, "VF %d requested MACVLAN filter but is "
-				    "administratively denied\n", vf);
-			retval = -1;
-			break;
-		}
-		/*
-		 * If the VF is allowed to set MAC filters then turn off
-		 * anti-spoofing to avoid false positives.  An index
-		 * greater than 0 will indicate the VF is setting a
-		 * macvlan MAC filter.
-		 */
-		if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
-			ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
-		retval = ixgbe_set_vf_macvlan(adapter, vf, index,
-					      (unsigned char *)(&msgbuf[1]));
-		if (retval == -ENOSPC)
-			e_warn(drv, "VF %d has requested a MACVLAN filter "
-				    "but there is no space for it\n", vf);
+		retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
+		break;
+	case IXGBE_VF_API_NEGOTIATE:
+		retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
+		break;
+	case IXGBE_VF_GET_QUEUES:
+		retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
 		break;
 	default:
 		e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
@@ -692,7 +878,7 @@
 
 	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
 
-	ixgbe_write_mbx(hw, msgbuf, 1, vf);
+	ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
 
 	return retval;
 }
@@ -783,7 +969,7 @@
 		err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
 		if (err)
 			goto out;
-		ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+		ixgbe_set_vmvir(adapter, vlan, qos, vf);
 		ixgbe_set_vmolr(hw, vf, false);
 		if (adapter->vfinfo[vf].spoofchk_enabled)
 			hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
@@ -803,7 +989,7 @@
 	} else {
 		err = ixgbe_set_vf_vlan(adapter, false,
 					adapter->vfinfo[vf].pf_vlan, vf);
-		ixgbe_set_vmvir(adapter, vlan, vf);
+		ixgbe_clear_vmvir(adapter, vf);
 		ixgbe_set_vmolr(hw, vf, true);
 		hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
 		if (adapter->vfinfo[vf].vlan_count)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 0722f33..21915e2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -56,6 +56,7 @@
 #define IXGBE_SUBDEV_ID_82599_SFP        0x11A9
 #define IXGBE_SUBDEV_ID_82599_RNDC       0x1F72
 #define IXGBE_SUBDEV_ID_82599_560FLR     0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP    0x0470
 #define IXGBE_DEV_ID_82599_SFP_EM        0x1507
 #define IXGBE_DEV_ID_82599_SFP_SF2       0x154D
 #define IXGBE_DEV_ID_82599EN_SFP         0x1557
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index da17ccf..3147795 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -33,8 +33,11 @@
 #define IXGBE_DEV_ID_X540_VF            0x1515
 
 #define IXGBE_VF_IRQ_CLEAR_MASK         7
-#define IXGBE_VF_MAX_TX_QUEUES          1
-#define IXGBE_VF_MAX_RX_QUEUES          1
+#define IXGBE_VF_MAX_TX_QUEUES          8
+#define IXGBE_VF_MAX_RX_QUEUES          8
+
+/* DCB define */
+#define IXGBE_VF_MAX_TRAFFIC_CLASS	8
 
 /* Link speed */
 typedef u32 ixgbe_link_speed;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 4a9c9c2..fc0af9a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -58,7 +58,6 @@
 	struct ixgbevf_ring *next;
 	struct net_device *netdev;
 	struct device *dev;
-	struct ixgbevf_adapter *adapter;  /* backlink */
 	void *desc;			/* descriptor ring memory */
 	dma_addr_t dma;			/* phys. address of descriptor ring */
 	unsigned int size;		/* length in bytes */
@@ -75,6 +74,8 @@
 	u64			total_bytes;
 	u64			total_packets;
 	struct u64_stats_sync	syncp;
+	u64 hw_csum_rx_error;
+	u64 hw_csum_rx_good;
 
 	u16 head;
 	u16 tail;
@@ -89,8 +90,8 @@
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define IXGBEVF_RX_BUFFER_WRITE	16	/* Must be power of 2 */
 
-#define MAX_RX_QUEUES 1
-#define MAX_TX_QUEUES 1
+#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
+#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
 
 #define IXGBEVF_DEFAULT_TXD   1024
 #define IXGBEVF_DEFAULT_RXD   512
@@ -101,10 +102,10 @@
 
 /* Supported Rx Buffer Sizes */
 #define IXGBEVF_RXBUFFER_256   256    /* Used for packet split */
-#define IXGBEVF_RXBUFFER_3K    3072
-#define IXGBEVF_RXBUFFER_7K    7168
-#define IXGBEVF_RXBUFFER_15K   15360
-#define IXGBEVF_MAX_RXBUFFER   16384  /* largest size for single descriptor */
+#define IXGBEVF_RXBUFFER_2K    2048
+#define IXGBEVF_RXBUFFER_4K    4096
+#define IXGBEVF_RXBUFFER_8K    8192
+#define IXGBEVF_RXBUFFER_10K   10240
 
 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
 
@@ -229,6 +230,7 @@
 	 */
 	u32 flags;
 #define IXGBE_FLAG_IN_WATCHDOG_TASK             (u32)(1)
+#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 1)
 
 	/* OS defined structs */
 	struct net_device *netdev;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index de1ad50..f267c00 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@
 static const char ixgbevf_driver_string[] =
 	"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
 
-#define DRV_VERSION "2.6.0-k"
+#define DRV_VERSION "2.7.12-k"
 const char ixgbevf_driver_version[] = DRV_VERSION;
 static char ixgbevf_copyright[] =
 	"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -99,6 +99,7 @@
 
 /* forward decls */
 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
 
 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
 					   struct ixgbevf_ring *rx_ring,
@@ -120,7 +121,6 @@
  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
  * @queue: queue to map the corresponding interrupt to
  * @msix_vector: the vector to map to the corresponding queue
- *
  */
 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
 			     u8 queue, u8 msix_vector)
@@ -287,17 +287,19 @@
 	if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
 		__vlan_hwaccel_put_tag(skb, tag);
 
-	napi_gro_receive(&q_vector->napi, skb);
+	if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+		napi_gro_receive(&q_vector->napi, skb);
+	else
+		netif_rx(skb);
 }
 
 /**
  * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
- * @adapter: address of board private structure
+ * @ring: pointer to Rx descriptor ring structure
  * @status_err: hardware indication of status of receive
  * @skb: skb currently being received and modified
  **/
-static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
-				       struct ixgbevf_ring *ring,
+static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
 				       u32 status_err, struct sk_buff *skb)
 {
 	skb_checksum_none_assert(skb);
@@ -309,7 +311,7 @@
 	/* if IP and error */
 	if ((status_err & IXGBE_RXD_STAT_IPCS) &&
 	    (status_err & IXGBE_RXDADV_ERR_IPE)) {
-		adapter->hw_csum_rx_error++;
+		ring->hw_csum_rx_error++;
 		return;
 	}
 
@@ -317,13 +319,13 @@
 		return;
 
 	if (status_err & IXGBE_RXDADV_ERR_TCPE) {
-		adapter->hw_csum_rx_error++;
+		ring->hw_csum_rx_error++;
 		return;
 	}
 
 	/* It must be a TCP or UDP packet with a valid checksum */
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
-	adapter->hw_csum_rx_good++;
+	ring->hw_csum_rx_good++;
 }
 
 /**
@@ -337,15 +339,16 @@
 	struct pci_dev *pdev = adapter->pdev;
 	union ixgbe_adv_rx_desc *rx_desc;
 	struct ixgbevf_rx_buffer *bi;
-	struct sk_buff *skb;
 	unsigned int i = rx_ring->next_to_use;
 
 	bi = &rx_ring->rx_buffer_info[i];
 
 	while (cleaned_count--) {
 		rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
-		skb = bi->skb;
-		if (!skb) {
+
+		if (!bi->skb) {
+			struct sk_buff *skb;
+
 			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
 							rx_ring->rx_buf_len);
 			if (!skb) {
@@ -353,11 +356,16 @@
 				goto no_buffers;
 			}
 			bi->skb = skb;
-		}
-		if (!bi->dma) {
+
 			bi->dma = dma_map_single(&pdev->dev, skb->data,
 						 rx_ring->rx_buf_len,
 						 DMA_FROM_DEVICE);
+			if (dma_mapping_error(&pdev->dev, bi->dma)) {
+				dev_kfree_skb(skb);
+				bi->skb = NULL;
+				dev_err(&pdev->dev, "RX DMA map failed\n");
+				break;
+			}
 		}
 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
 
@@ -370,7 +378,6 @@
 no_buffers:
 	if (rx_ring->next_to_use != i) {
 		rx_ring->next_to_use = i;
-
 		ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
 	}
 }
@@ -454,7 +461,7 @@
 			goto next_desc;
 		}
 
-		ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
+		ixgbevf_rx_checksum(rx_ring, staterr, skb);
 
 		/* probably a little skewed due to removing CRC */
 		total_rx_bytes += skb->len;
@@ -471,6 +478,16 @@
 		}
 		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
+		/* Workaround hardware that can't do proper VEPA multicast
+		 * source pruning.
+		 */
+		if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
+		    !(compare_ether_addr(adapter->netdev->dev_addr,
+					eth_hdr(skb)->h_source))) {
+			dev_kfree_skb_irq(skb);
+			goto next_desc;
+		}
+
 		ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
 
 next_desc:
@@ -533,9 +550,11 @@
 	else
 		per_ring_budget = budget;
 
+	adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
 	ixgbevf_for_each_ring(ring, q_vector->rx)
 		clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
 						       per_ring_budget);
+	adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
 
 	/* If all work not completed, return budget and keep polling */
 	if (!clean_complete)
@@ -743,7 +762,6 @@
 	return IRQ_HANDLED;
 }
 
-
 /**
  * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
  * @irq: unused
@@ -1065,20 +1083,20 @@
 	max_frame += VLAN_HLEN;
 
 	/*
-	 * Make best use of allocation by using all but 1K of a
-	 * power of 2 allocation that will be used for skb->head.
+	 * Allocate buffer sizes that fit well into 32K and
+	 * take into account max frame size of 9.5K
 	 */
 	if ((hw->mac.type == ixgbe_mac_X540_vf) &&
 	    (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
 		rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-	else if (max_frame <= IXGBEVF_RXBUFFER_3K)
-		rx_buf_len = IXGBEVF_RXBUFFER_3K;
-	else if (max_frame <= IXGBEVF_RXBUFFER_7K)
-		rx_buf_len = IXGBEVF_RXBUFFER_7K;
-	else if (max_frame <= IXGBEVF_RXBUFFER_15K)
-		rx_buf_len = IXGBEVF_RXBUFFER_15K;
+	else if (max_frame <= IXGBEVF_RXBUFFER_2K)
+		rx_buf_len = IXGBEVF_RXBUFFER_2K;
+	else if (max_frame <= IXGBEVF_RXBUFFER_4K)
+		rx_buf_len = IXGBEVF_RXBUFFER_4K;
+	else if (max_frame <= IXGBEVF_RXBUFFER_8K)
+		rx_buf_len = IXGBEVF_RXBUFFER_8K;
 	else
-		rx_buf_len = IXGBEVF_MAX_RXBUFFER;
+		rx_buf_len = IXGBEVF_RXBUFFER_10K;
 
 	for (i = 0; i < adapter->num_rx_queues; i++)
 		adapter->rx_ring[i].rx_buf_len = rx_buf_len;
@@ -1128,15 +1146,12 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	int err;
 
-	if (!hw->mac.ops.set_vfta)
-		return -EOPNOTSUPP;
-
-	spin_lock(&adapter->mbx_lock);
+	spin_lock_bh(&adapter->mbx_lock);
 
 	/* add VID to filter table */
 	err = hw->mac.ops.set_vfta(hw, vid, 0, true);
 
-	spin_unlock(&adapter->mbx_lock);
+	spin_unlock_bh(&adapter->mbx_lock);
 
 	/* translate error return types so error makes sense */
 	if (err == IXGBE_ERR_MBX)
@@ -1156,13 +1171,12 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	int err = -EOPNOTSUPP;
 
-	spin_lock(&adapter->mbx_lock);
+	spin_lock_bh(&adapter->mbx_lock);
 
 	/* remove VID from filter table */
-	if (hw->mac.ops.set_vfta)
-		err = hw->mac.ops.set_vfta(hw, vid, 0, false);
+	err = hw->mac.ops.set_vfta(hw, vid, 0, false);
 
-	spin_unlock(&adapter->mbx_lock);
+	spin_unlock_bh(&adapter->mbx_lock);
 
 	clear_bit(vid, adapter->active_vlans);
 
@@ -1206,27 +1220,27 @@
 }
 
 /**
- * ixgbevf_set_rx_mode - Multicast set
+ * ixgbevf_set_rx_mode - Multicast and unicast set
  * @netdev: network interface device structure
  *
  * The set_rx_method entry point is called whenever the multicast address
- * list or the network interface flags are updated.  This routine is
- * responsible for configuring the hardware for proper multicast mode.
+ * list, unicast address list or the network interface flags are updated.
+ * This routine is responsible for configuring the hardware for proper
+ * multicast mode and configuring requested unicast filters.
  **/
 static void ixgbevf_set_rx_mode(struct net_device *netdev)
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
 
-	spin_lock(&adapter->mbx_lock);
+	spin_lock_bh(&adapter->mbx_lock);
 
 	/* reprogram multicast list */
-	if (hw->mac.ops.update_mc_addr_list)
-		hw->mac.ops.update_mc_addr_list(hw, netdev);
+	hw->mac.ops.update_mc_addr_list(hw, netdev);
 
 	ixgbevf_write_uc_addr_list(netdev);
 
-	spin_unlock(&adapter->mbx_lock);
+	spin_unlock_bh(&adapter->mbx_lock);
 }
 
 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1290,8 +1304,8 @@
 		       "not set within the polling period\n", rxr);
 	}
 
-	ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
-				(adapter->rx_ring[rxr].count - 1));
+	ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
+				adapter->rx_ring[rxr].count - 1);
 }
 
 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1335,11 +1349,12 @@
 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
-	int api[] = { ixgbe_mbox_api_10,
+	int api[] = { ixgbe_mbox_api_11,
+		      ixgbe_mbox_api_10,
 		      ixgbe_mbox_api_unknown };
 	int err = 0, idx = 0;
 
-	spin_lock(&adapter->mbx_lock);
+	spin_lock_bh(&adapter->mbx_lock);
 
 	while (api[idx] != ixgbe_mbox_api_unknown) {
 		err = ixgbevf_negotiate_api_version(hw, api[idx]);
@@ -1348,7 +1363,7 @@
 		idx++;
 	}
 
-	spin_unlock(&adapter->mbx_lock);
+	spin_unlock_bh(&adapter->mbx_lock);
 }
 
 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
@@ -1389,16 +1404,14 @@
 
 	ixgbevf_configure_msix(adapter);
 
-	spin_lock(&adapter->mbx_lock);
+	spin_lock_bh(&adapter->mbx_lock);
 
-	if (hw->mac.ops.set_rar) {
-		if (is_valid_ether_addr(hw->mac.addr))
-			hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
-		else
-			hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
-	}
+	if (is_valid_ether_addr(hw->mac.addr))
+		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+	else
+		hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
 
-	spin_unlock(&adapter->mbx_lock);
+	spin_unlock_bh(&adapter->mbx_lock);
 
 	clear_bit(__IXGBEVF_DOWN, &adapter->state);
 	ixgbevf_napi_enable_all(adapter);
@@ -1413,12 +1426,87 @@
 	mod_timer(&adapter->watchdog_timer, jiffies);
 }
 
+static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbevf_ring *rx_ring;
+	unsigned int def_q = 0;
+	unsigned int num_tcs = 0;
+	unsigned int num_rx_queues = 1;
+	int err, i;
+
+	spin_lock_bh(&adapter->mbx_lock);
+
+	/* fetch queue configuration from the PF */
+	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+	spin_unlock_bh(&adapter->mbx_lock);
+
+	if (err)
+		return err;
+
+	if (num_tcs > 1) {
+		/* update default Tx ring register index */
+		adapter->tx_ring[0].reg_idx = def_q;
+
+		/* we need as many queues as traffic classes */
+		num_rx_queues = num_tcs;
+	}
+
+	/* nothing to do if we have the correct number of queues */
+	if (adapter->num_rx_queues == num_rx_queues)
+		return 0;
+
+	/* allocate new rings */
+	rx_ring = kcalloc(num_rx_queues,
+			  sizeof(struct ixgbevf_ring), GFP_KERNEL);
+	if (!rx_ring)
+		return -ENOMEM;
+
+	/* setup ring fields */
+	for (i = 0; i < num_rx_queues; i++) {
+		rx_ring[i].count = adapter->rx_ring_count;
+		rx_ring[i].queue_index = i;
+		rx_ring[i].reg_idx = i;
+		rx_ring[i].dev = &adapter->pdev->dev;
+		rx_ring[i].netdev = adapter->netdev;
+
+		/* allocate resources on the ring */
+		err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
+		if (err) {
+			while (i) {
+				i--;
+				ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
+			}
+			kfree(rx_ring);
+			return err;
+		}
+	}
+
+	/* free the existing rings and queues */
+	ixgbevf_free_all_rx_resources(adapter);
+	adapter->num_rx_queues = 0;
+	kfree(adapter->rx_ring);
+
+	/* move new rings into position on the adapter struct */
+	adapter->rx_ring = rx_ring;
+	adapter->num_rx_queues = num_rx_queues;
+
+	/* reset ring to vector mapping */
+	ixgbevf_reset_q_vectors(adapter);
+	ixgbevf_map_rings_to_vectors(adapter);
+
+	return 0;
+}
+
 void ixgbevf_up(struct ixgbevf_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 
 	ixgbevf_negotiate_api(adapter);
 
+	ixgbevf_reset_queues(adapter);
+
 	ixgbevf_configure(adapter);
 
 	ixgbevf_up_complete(adapter);
@@ -1497,7 +1585,6 @@
 		return;
 
 	/* Free all the Tx ring sk_buffs */
-
 	for (i = 0; i < tx_ring->count; i++) {
 		tx_buffer_info = &tx_ring->tx_buffer_info[i];
 		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
@@ -1593,13 +1680,6 @@
 	while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
 		msleep(1);
 
-	/*
-	 * Check if PF is up before re-init.  If not then skip until
-	 * later when the PF is up and ready to service requests from
-	 * the VF via mailbox.  If the VF is up and running then the
-	 * watchdog task will continue to schedule reset tasks until
-	 * the PF is up and running.
-	 */
 	ixgbevf_down(adapter);
 	ixgbevf_up(adapter);
 
@@ -1611,15 +1691,11 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
 
-	spin_lock(&adapter->mbx_lock);
-
 	if (hw->mac.ops.reset_hw(hw))
 		hw_dbg(hw, "PF still resetting\n");
 	else
 		hw->mac.ops.init_hw(hw);
 
-	spin_unlock(&adapter->mbx_lock);
-
 	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
 		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
 		       netdev->addr_len);
@@ -1628,10 +1704,11 @@
 	}
 }
 
-static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
-					 int vectors)
+static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+					int vectors)
 {
-	int err, vector_threshold;
+	int err = 0;
+	int vector_threshold;
 
 	/* We'll want at least 2 (vector_threshold):
 	 * 1) TxQ[0] + RxQ[0] handler
@@ -1647,21 +1724,18 @@
 	while (vectors >= vector_threshold) {
 		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
 				      vectors);
-		if (!err) /* Success in acquiring all requested vectors. */
+		if (!err || err < 0) /* Success or a nasty failure. */
 			break;
-		else if (err < 0)
-			vectors = 0; /* Nasty failure, quit now */
 		else /* err == number of vectors we should try again with */
 			vectors = err;
 	}
 
-	if (vectors < vector_threshold) {
-		/* Can't allocate enough MSI-X interrupts?  Oh well.
-		 * This just means we'll go with either a single MSI
-		 * vector or fall back to legacy interrupts.
-		 */
-		hw_dbg(&adapter->hw,
-		       "Unable to allocate MSI-X interrupts\n");
+	if (vectors < vector_threshold)
+		err = -ENOMEM;
+
+	if (err) {
+		dev_err(&adapter->pdev->dev,
+			"Unable to allocate MSI-X interrupts\n");
 		kfree(adapter->msix_entries);
 		adapter->msix_entries = NULL;
 	} else {
@@ -1672,6 +1746,8 @@
 		 */
 		adapter->num_msix_vectors = vectors;
 	}
+
+	return err;
 }
 
 /**
@@ -1717,6 +1793,7 @@
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 		adapter->tx_ring[i].count = adapter->tx_ring_count;
 		adapter->tx_ring[i].queue_index = i;
+		/* reg_idx may be remapped later by DCB config */
 		adapter->tx_ring[i].reg_idx = i;
 		adapter->tx_ring[i].dev = &adapter->pdev->dev;
 		adapter->tx_ring[i].netdev = adapter->netdev;
@@ -1774,7 +1851,9 @@
 	for (vector = 0; vector < v_budget; vector++)
 		adapter->msix_entries[vector].entry = vector;
 
-	ixgbevf_acquire_msix_vectors(adapter, v_budget);
+	err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
+	if (err)
+		goto out;
 
 	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
 	if (err)
@@ -1834,18 +1913,13 @@
  **/
 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
 {
-	int q_idx, num_q_vectors;
-	int napi_vectors;
-
-	num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-	napi_vectors = adapter->num_rx_queues;
+	int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
 	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
 		struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
 
 		adapter->q_vector[q_idx] = NULL;
-		if (q_idx < napi_vectors)
-			netif_napi_del(&q_vector->napi);
+		netif_napi_del(&q_vector->napi);
 		kfree(q_vector);
 	}
 }
@@ -1950,8 +2024,11 @@
 	hw->subsystem_device_id = pdev->subsystem_device;
 
 	hw->mbx.ops.init_params(hw);
-	hw->mac.max_tx_queues = MAX_TX_QUEUES;
-	hw->mac.max_rx_queues = MAX_RX_QUEUES;
+
+	/* assume legacy case in which PF would only give VF 2 queues */
+	hw->mac.max_tx_queues = 2;
+	hw->mac.max_rx_queues = 2;
+
 	err = hw->mac.ops.reset_hw(hw);
 	if (err) {
 		dev_info(&pdev->dev,
@@ -1966,7 +2043,7 @@
 			goto out;
 		}
 		memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
-			adapter->netdev->addr_len);
+		       adapter->netdev->addr_len);
 	}
 
 	/* lock to protect mailbox accesses */
@@ -2016,6 +2093,7 @@
 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
+	int i;
 
 	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
 				adapter->stats.vfgprc);
@@ -2029,6 +2107,15 @@
 				adapter->stats.vfgotc);
 	UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
 				adapter->stats.vfmprc);
+
+	for (i = 0;  i  < adapter->num_rx_queues;  i++) {
+		adapter->hw_csum_rx_error +=
+			adapter->rx_ring[i].hw_csum_rx_error;
+		adapter->hw_csum_rx_good +=
+			adapter->rx_ring[i].hw_csum_rx_good;
+		adapter->rx_ring[i].hw_csum_rx_error = 0;
+		adapter->rx_ring[i].hw_csum_rx_good = 0;
+	}
 }
 
 /**
@@ -2103,6 +2190,7 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 link_speed = adapter->link_speed;
 	bool link_up = adapter->link_up;
+	s32 need_reset;
 
 	adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
 
@@ -2110,29 +2198,19 @@
 	 * Always check the link on the watchdog because we have
 	 * no LSC interrupt
 	 */
-	if (hw->mac.ops.check_link) {
-		s32 need_reset;
+	spin_lock_bh(&adapter->mbx_lock);
 
-		spin_lock(&adapter->mbx_lock);
+	need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
 
-		need_reset = hw->mac.ops.check_link(hw, &link_speed,
-						    &link_up, false);
+	spin_unlock_bh(&adapter->mbx_lock);
 
-		spin_unlock(&adapter->mbx_lock);
-
-		if (need_reset) {
-			adapter->link_up = link_up;
-			adapter->link_speed = link_speed;
-			netif_carrier_off(netdev);
-			netif_tx_stop_all_queues(netdev);
-			schedule_work(&adapter->reset_task);
-			goto pf_has_reset;
-		}
-	} else {
-		/* always assume link is up, if no check link
-		 * function */
-		link_speed = IXGBE_LINK_SPEED_10GB_FULL;
-		link_up = true;
+	if (need_reset) {
+		adapter->link_up = link_up;
+		adapter->link_speed = link_speed;
+		netif_carrier_off(netdev);
+		netif_tx_stop_all_queues(netdev);
+		schedule_work(&adapter->reset_task);
+		goto pf_has_reset;
 	}
 	adapter->link_up = link_up;
 	adapter->link_speed = link_speed;
@@ -2377,6 +2455,63 @@
 						  &adapter->rx_ring[i]);
 }
 
+static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbevf_ring *rx_ring;
+	unsigned int def_q = 0;
+	unsigned int num_tcs = 0;
+	unsigned int num_rx_queues = 1;
+	int err, i;
+
+	spin_lock_bh(&adapter->mbx_lock);
+
+	/* fetch queue configuration from the PF */
+	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+	spin_unlock_bh(&adapter->mbx_lock);
+
+	if (err)
+		return err;
+
+	if (num_tcs > 1) {
+		/* update default Tx ring register index */
+		adapter->tx_ring[0].reg_idx = def_q;
+
+		/* we need as many queues as traffic classes */
+		num_rx_queues = num_tcs;
+	}
+
+	/* nothing to do if we have the correct number of queues */
+	if (adapter->num_rx_queues == num_rx_queues)
+		return 0;
+
+	/* allocate new rings */
+	rx_ring = kcalloc(num_rx_queues,
+			  sizeof(struct ixgbevf_ring), GFP_KERNEL);
+	if (!rx_ring)
+		return -ENOMEM;
+
+	/* setup ring fields */
+	for (i = 0; i < num_rx_queues; i++) {
+		rx_ring[i].count = adapter->rx_ring_count;
+		rx_ring[i].queue_index = i;
+		rx_ring[i].reg_idx = i;
+		rx_ring[i].dev = &adapter->pdev->dev;
+		rx_ring[i].netdev = adapter->netdev;
+	}
+
+	/* free the existing ring and queues */
+	adapter->num_rx_queues = 0;
+	kfree(adapter->rx_ring);
+
+	/* move new rings into position on the adapter struct */
+	adapter->rx_ring = rx_ring;
+	adapter->num_rx_queues = num_rx_queues;
+
+	return 0;
+}
+
 /**
  * ixgbevf_open - Called when a network interface is made active
  * @netdev: network interface device structure
@@ -2413,6 +2548,11 @@
 
 	ixgbevf_negotiate_api(adapter);
 
+	/* setup queue reg_idx and Rx queue count */
+	err = ixgbevf_setup_queues(adapter);
+	if (err)
+		goto err_setup_queues;
+
 	/* allocate transmit descriptors */
 	err = ixgbevf_setup_all_tx_resources(adapter);
 	if (err)
@@ -2451,6 +2591,7 @@
 	ixgbevf_free_all_rx_resources(adapter);
 err_setup_tx:
 	ixgbevf_free_all_tx_resources(adapter);
+err_setup_queues:
 	ixgbevf_reset(adapter);
 
 err_setup_reset:
@@ -2562,9 +2703,6 @@
 static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
 			    struct sk_buff *skb, u32 tx_flags)
 {
-
-
-
 	u32 vlan_macip_lens = 0;
 	u32 mss_l4len_idx = 0;
 	u32 type_tucmd = 0;
@@ -2678,10 +2816,10 @@
 			tx_buffer_info->dma =
 				skb_frag_dma_map(tx_ring->dev, frag,
 						 offset, size, DMA_TO_DEVICE);
-			tx_buffer_info->mapped_as_page = true;
 			if (dma_mapping_error(tx_ring->dev,
 					      tx_buffer_info->dma))
 				goto dma_error;
+			tx_buffer_info->mapped_as_page = true;
 			tx_buffer_info->next_to_watch = i;
 
 			len -= size;
@@ -2754,7 +2892,6 @@
 		olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
 		if (tx_flags & IXGBE_TX_FLAGS_IPV4)
 			olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
-
 	}
 
 	/*
@@ -2823,6 +2960,11 @@
 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
 	unsigned short f;
 #endif
+	u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
+	if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
 
 	tx_ring = &adapter->tx_ring[r_idx];
 
@@ -2902,12 +3044,11 @@
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-	spin_lock(&adapter->mbx_lock);
+	spin_lock_bh(&adapter->mbx_lock);
 
-	if (hw->mac.ops.set_rar)
-		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
 
-	spin_unlock(&adapter->mbx_lock);
+	spin_unlock_bh(&adapter->mbx_lock);
 
 	return 0;
 }
@@ -2925,8 +3066,15 @@
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 	int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
 
-	if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+	switch (adapter->hw.api_version) {
+	case ixgbe_mbox_api_11:
 		max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
+		break;
+	default:
+		if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+			max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
+		break;
+	}
 
 	/* MTU < 68 is an error and causes problems on some kernels */
 	if ((new_mtu < 68) || (max_frame > max_possible_frame))
@@ -3223,10 +3371,6 @@
 	if (err)
 		goto err_sw_init;
 
-	/* pick up the PCI bus settings for reporting later */
-	if (hw->mac.ops.get_bus_info)
-		hw->mac.ops.get_bus_info(hw);
-
 	strcpy(netdev->name, "eth%d");
 
 	err = register_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 946ce86..0bc3005 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -85,6 +85,7 @@
 enum ixgbe_pfvf_api_rev {
 	ixgbe_mbox_api_10,	/* API version 1.0, linux/freebsd VF driver */
 	ixgbe_mbox_api_20,	/* API version 2.0, solaris Phase1 VF driver */
+	ixgbe_mbox_api_11,	/* API version 1.1, linux/freebsd VF driver */
 	/* This value should always be last */
 	ixgbe_mbox_api_unknown,	/* indicates that API version is not known */
 };
@@ -100,6 +101,15 @@
 #define IXGBE_VF_SET_MACVLAN	0x06 /* VF requests PF for unicast filter */
 #define IXGBE_VF_API_NEGOTIATE	0x08 /* negotiate API version */
 
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUE	0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES	1	/* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES	2	/* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN	3	/* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE	4	/* Default queue offset */
+
 /* length of permanent address message returned from PF */
 #define IXGBE_VF_PERMADDR_MSG_LEN 4
 /* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c7447e..0c94557 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -331,6 +331,9 @@
 	netdev_for_each_mc_addr(ha, netdev) {
 		if (i == cnt)
 			break;
+		if (is_link_local_ether_addr(ha->addr))
+			continue;
+
 		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
 	}
 
@@ -513,6 +516,64 @@
 	return err;
 }
 
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+		       unsigned int *default_tc)
+{
+	int err;
+	u32 msg[5];
+
+	/* do nothing if API doesn't support ixgbevf_get_queues */
+	switch (hw->api_version) {
+	case ixgbe_mbox_api_11:
+		break;
+	default:
+		return 0;
+	}
+
+	/* Fetch queue configuration from the PF */
+	msg[0] = IXGBE_VF_GET_QUEUE;
+	msg[1] = msg[2] = msg[3] = msg[4] = 0;
+	err = hw->mbx.ops.write_posted(hw, msg, 5);
+
+	if (!err)
+		err = hw->mbx.ops.read_posted(hw, msg, 5);
+
+	if (!err) {
+		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+		/*
+		 * if we we didn't get an ACK there must have been
+		 * some sort of mailbox error so we should treat it
+		 * as such
+		 */
+		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
+			return IXGBE_ERR_MBX;
+
+		/* record and validate values from message */
+		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
+		if (hw->mac.max_tx_queues == 0 ||
+		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
+			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+
+		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
+		if (hw->mac.max_rx_queues == 0 ||
+		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
+			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
+		/* in case of unknown state assume we cannot tag frames */
+		if (*num_tcs > hw->mac.max_rx_queues)
+			*num_tcs = 1;
+
+		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
+		/* default to queue 0 on out-of-bounds queue number */
+		if (*default_tc >= hw->mac.max_tx_queues)
+			*default_tc = 0;
+	}
+
+	return err;
+}
+
 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
 	.init_hw             = ixgbevf_init_hw_vf,
 	.reset_hw            = ixgbevf_reset_hw_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 47f11a5..7b1f502 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -174,5 +174,7 @@
 
 void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
 int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+		       unsigned int *default_tc);
 #endif /* __IXGBE_VF_H__ */
 
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 5948972..10d678d 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1131,7 +1131,7 @@
 	err = request_irq(dev->irq, pxa168_eth_int_handler,
 			  IRQF_DISABLED, dev->name, dev);
 	if (err) {
-		dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+		dev_err(&dev->dev, "can't assign irq\n");
 		return -EAGAIN;
 	}
 	pep->rx_resource_err = 0;
@@ -1201,9 +1201,8 @@
 	 */
 	pxa168_eth_stop(dev);
 	if (pxa168_eth_open(dev)) {
-		dev_printk(KERN_ERR, &dev->dev,
-			   "fatal error on re-opening device after "
-			   "MTU change\n");
+		dev_err(&dev->dev,
+			"fatal error on re-opening device after MTU change\n");
 	}
 
 	return 0;
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index d8099a7..bcdbc14 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -5,7 +5,7 @@
 config NET_VENDOR_MELLANOX
 	bool "Mellanox devices"
 	default y
-	depends on PCI && INET
+	depends on PCI
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 5f027f9..eb520ab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -4,9 +4,8 @@
 
 config MLX4_EN
 	tristate "Mellanox Technologies 10Gbit Ethernet support"
-	depends on PCI && INET
+	depends on PCI
 	select MLX4_CORE
-	select INET_LRO
 	---help---
 	  This driver supports Mellanox Technologies ConnectX Ethernet
 	  devices.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index edd9cb8..2b23ca2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -870,7 +870,7 @@
 	/* If we haven't received a specific coalescing setting
 	 * (module param), we set the moderation parameters as follows:
 	 * - moder_cnt is set to the number of mtu sized packets to
-	 *   satisfy our coelsing target.
+	 *   satisfy our coalescing target.
 	 * - moder_time is set to a fixed value.
 	 */
 	priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 5aba5ec..f76c967 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -630,7 +630,7 @@
 			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
 			    (cqe->checksum == cpu_to_be16(0xffff))) {
 				ring->csum_ok++;
-				/* This packet is eligible for LRO if it is:
+				/* This packet is eligible for GRO if it is:
 				 * - DIX Ethernet (type interpretation)
 				 * - TCP/IP (v4)
 				 * - without IP options
@@ -667,7 +667,7 @@
 					goto next;
 				}
 
-				/* LRO not possible, complete processing here */
+				/* GRO not possible, complete processing here */
 				ip_summed = CHECKSUM_UNNECESSARY;
 			} else {
 				ip_summed = CHECKSUM_NONE;
@@ -710,11 +710,8 @@
 		++cq->mcq.cons_index;
 		index = (cq->mcq.cons_index) & ring->size_mask;
 		cqe = &cq->buf[index];
-		if (++polled == budget) {
-			/* We are here because we reached the NAPI budget -
-			 * flush only pending LRO sessions */
+		if (++polled == budget)
 			goto out;
-		}
 	}
 
 out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9d27e42..d3eba8b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -95,8 +95,6 @@
 #define MLX4_EN_ALLOC_SIZE	PAGE_ALIGN(16384)
 #define MLX4_EN_ALLOC_ORDER	get_order(MLX4_EN_ALLOC_SIZE)
 
-#define MLX4_EN_MAX_LRO_DESCRIPTORS	32
-
 /* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
  * and 4K allocations) */
 enum {
@@ -126,7 +124,7 @@
 #define MLX4_EN_RX_COAL_TIME	0x10
 
 #define MLX4_EN_TX_COAL_PKTS	16
-#define MLX4_EN_TX_COAL_TIME	0x80
+#define MLX4_EN_TX_COAL_TIME	0x10
 
 #define MLX4_EN_RX_RATE_LOW		400000
 #define MLX4_EN_RX_COAL_TIME_LOW	0
@@ -290,21 +288,6 @@
 	unsigned long csum_none;
 };
 
-
-static inline int mlx4_en_can_lro(__be16 status)
-{
-	return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4	|
-				     MLX4_CQE_STATUS_IPV4F	|
-				     MLX4_CQE_STATUS_IPV6	|
-				     MLX4_CQE_STATUS_IPV4OPT	|
-				     MLX4_CQE_STATUS_TCP	|
-				     MLX4_CQE_STATUS_UDP	|
-				     MLX4_CQE_STATUS_IPOK)) ==
-		cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
-			    MLX4_CQE_STATUS_IPOK |
-			    MLX4_CQE_STATUS_TCP);
-}
-
 struct mlx4_en_cq {
 	struct mlx4_cq          mcq;
 	struct mlx4_hwq_resources wqres;
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index dccae1d..e62c312 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1249,9 +1249,6 @@
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	int ret;
 
-	if (!is_valid_ether_addr(ndev->dev_addr))
-		return -EADDRNOTAVAIL;
-
 	ks8695_reset(ksp);
 
 	ks8695_update_mac(ksp);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 69e0197..d16ef24 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -7243,18 +7243,7 @@
 	.remove		= pcidev_exit
 };
 
-static int __init ksz884x_init_module(void)
-{
-	return pci_register_driver(&pci_device_driver);
-}
-
-static void __exit ksz884x_cleanup_module(void)
-{
-	pci_unregister_driver(&pci_device_driver);
-}
-
-module_init(ksz884x_init_module);
-module_exit(ksz884x_cleanup_module);
+module_pci_driver(pci_device_driver);
 
 MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
 MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index ff26b54..87abb4f 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -32,7 +32,7 @@
 
 config VXGE
 	tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
-	depends on PCI && INET
+	depends on PCI
 	---help---
 	  This driver supports Exar Corp's X3100 Series 10 GbE PCIe
 	  I/O Virtualized Server Adapter.
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index de50547..c98decb 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8239,7 +8239,8 @@
 
 /**
  * s2io_closer - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
+ * Description: This function is the cleanup routine for the driver. It
+ * unregisters the driver.
  */
 
 static __exit void s2io_closer(void)
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index af8b414..db6e101 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1219,9 +1219,6 @@
 	if (netif_msg_ifup(pldat))
 		dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
 
-	if (!is_valid_ether_addr(ndev->dev_addr))
-		return -EADDRNOTAVAIL;
-
 	__lpc_eth_clock_enable(pldat, true);
 
 	/* Reset and initialize */
@@ -1301,6 +1298,7 @@
 	.ndo_set_rx_mode	= lpc_eth_set_multicast_list,
 	.ndo_do_ioctl		= lpc_eth_ioctl,
 	.ndo_set_mac_address	= lpc_set_mac_address,
+	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 };
 
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 5296cc8..34d05bf 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -7,6 +7,7 @@
 	depends on PCI
 	select NET_CORE
 	select MII
+	select PTP_1588_CLOCK_PCH
 	---help---
 	  This is a gigabit ethernet driver for EG20T PCH.
 	  EG20T PCH is the platform controller hub that is used in Intel's
@@ -20,19 +21,3 @@
 	  purpose use.
 	  ML7223/ML7831 is companion chip for Intel Atom E6xx series.
 	  ML7223/ML7831 is completely compatible for Intel EG20T PCH.
-
-if PCH_GBE
-
-config PCH_PTP
-	bool "PCH PTP clock support"
-	default n
-	depends on EXPERIMENTAL
-	select PPS
-	select PTP_1588_CLOCK
-	select PTP_1588_CLOCK_PCH
-	---help---
-	  Say Y here if you want to use Precision Time Protocol (PTP) in the
-	  driver. PTP is a method to precisely synchronize distributed clocks
-	  over Ethernet networks.
-
-endif # PCH_GBE
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index b07311e..7fb7e17 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -649,7 +649,6 @@
 extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
 				       struct pch_gbe_rx_ring *rx_ring);
 extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
-#ifdef CONFIG_PCH_PTP
 extern u32 pch_ch_control_read(struct pci_dev *pdev);
 extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
 extern u32 pch_ch_event_read(struct pci_dev *pdev);
@@ -659,7 +658,6 @@
 extern u64 pch_rx_snap_read(struct pci_dev *pdev);
 extern u64 pch_tx_snap_read(struct pci_dev *pdev);
 extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
-#endif
 
 /* pch_gbe_param.c */
 extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 4c4fe5b..39ab4d0 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -21,10 +21,8 @@
 #include "pch_gbe.h"
 #include "pch_gbe_api.h"
 #include <linux/module.h>
-#ifdef CONFIG_PCH_PTP
 #include <linux/net_tstamp.h>
 #include <linux/ptp_classify.h>
-#endif
 
 #define DRV_VERSION     "1.01"
 const char pch_driver_version[] = DRV_VERSION;
@@ -98,7 +96,6 @@
 
 #define PCH_GBE_INT_DISABLE_ALL		0
 
-#ifdef CONFIG_PCH_PTP
 /* Macros for ieee1588 */
 /* 0x40 Time Synchronization Channel Control Register Bits */
 #define MASTER_MODE   (1<<0)
@@ -113,7 +110,6 @@
 
 #define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
 #define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
-#endif
 
 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
 
@@ -122,7 +118,6 @@
 			       int data);
 static void pch_gbe_set_multi(struct net_device *netdev);
 
-#ifdef CONFIG_PCH_PTP
 static struct sock_filter ptp_filter[] = {
 	PTP_FILTER
 };
@@ -291,7 +286,6 @@
 
 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 }
-#endif
 
 inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
 {
@@ -1244,9 +1238,7 @@
 		  (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
 		  &hw->reg->TX_DSC_SW_P);
 
-#ifdef CONFIG_PCH_PTP
 	pch_tx_timestamp(adapter, skb);
-#endif
 
 	dev_kfree_skb_any(skb);
 }
@@ -1730,9 +1722,7 @@
 			/* Write meta date of skb */
 			skb_put(skb, length);
 
-#ifdef CONFIG_PCH_PTP
 			pch_rx_timestamp(adapter, skb);
-#endif
 
 			skb->protocol = eth_type_trans(skb, netdev);
 			if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
@@ -2334,10 +2324,8 @@
 
 	pr_debug("cmd : 0x%04x\n", cmd);
 
-#ifdef CONFIG_PCH_PTP
 	if (cmd == SIOCSHWTSTAMP)
 		return hwtstamp_ioctl(netdev, ifr, cmd);
-#endif
 
 	return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
 }
@@ -2623,14 +2611,12 @@
 		goto err_free_netdev;
 	}
 
-#ifdef CONFIG_PCH_PTP
 	adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
 					       PCI_DEVFN(12, 4));
 	if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
 		pr_err("Bad ptp filter\n");
 		return -EINVAL;
 	}
-#endif
 
 	netdev->netdev_ops = &pch_gbe_netdev_ops;
 	netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 10468e7..4ca2c19 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -218,7 +218,7 @@
 			check_sfp_module = netif_running(dev) &&
 				adapter->has_link_events;
 		} else {
-			ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
+			ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
 			ecmd->advertising |=
 				(ADVERTISED_TP | ADVERTISED_Autoneg);
 			ecmd->port = PORT_TP;
@@ -381,7 +381,7 @@
 
 static int
 netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
-		      u8 * bytes)
+		      u8 *bytes)
 {
 	struct netxen_adapter *adapter = netdev_priv(dev);
 	int offset;
@@ -488,6 +488,8 @@
 	__u32 val;
 	int port = adapter->physical_port;
 
+	pause->autoneg = 0;
+
 	if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
 		if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
 			return;
@@ -496,19 +498,19 @@
 		pause->rx_pause = netxen_gb_get_rx_flowctl(val);
 		val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
 		switch (port) {
-			case 0:
-				pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
-				break;
-			case 1:
-				pause->tx_pause = !(netxen_gb_get_gb1_mask(val));
-				break;
-			case 2:
-				pause->tx_pause = !(netxen_gb_get_gb2_mask(val));
-				break;
-			case 3:
-			default:
-				pause->tx_pause = !(netxen_gb_get_gb3_mask(val));
-				break;
+		case 0:
+			pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
+			break;
+		case 1:
+			pause->tx_pause = !(netxen_gb_get_gb1_mask(val));
+			break;
+		case 2:
+			pause->tx_pause = !(netxen_gb_get_gb2_mask(val));
+			break;
+		case 3:
+		default:
+			pause->tx_pause = !(netxen_gb_get_gb3_mask(val));
+			break;
 		}
 	} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
 		if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
@@ -532,6 +534,11 @@
 	struct netxen_adapter *adapter = netdev_priv(dev);
 	__u32 val;
 	int port = adapter->physical_port;
+
+	/* not supported */
+	if (pause->autoneg)
+		return -EINVAL;
+
 	/* read mode */
 	if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
 		if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
@@ -549,31 +556,31 @@
 		/* set autoneg */
 		val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
 		switch (port) {
-			case 0:
-				if (pause->tx_pause)
-					netxen_gb_unset_gb0_mask(val);
-				else
-					netxen_gb_set_gb0_mask(val);
-				break;
-			case 1:
-				if (pause->tx_pause)
-					netxen_gb_unset_gb1_mask(val);
-				else
-					netxen_gb_set_gb1_mask(val);
-				break;
-			case 2:
-				if (pause->tx_pause)
-					netxen_gb_unset_gb2_mask(val);
-				else
-					netxen_gb_set_gb2_mask(val);
-				break;
-			case 3:
-			default:
-				if (pause->tx_pause)
-					netxen_gb_unset_gb3_mask(val);
-				else
-					netxen_gb_set_gb3_mask(val);
-				break;
+		case 0:
+			if (pause->tx_pause)
+				netxen_gb_unset_gb0_mask(val);
+			else
+				netxen_gb_set_gb0_mask(val);
+			break;
+		case 1:
+			if (pause->tx_pause)
+				netxen_gb_unset_gb1_mask(val);
+			else
+				netxen_gb_set_gb1_mask(val);
+			break;
+		case 2:
+			if (pause->tx_pause)
+				netxen_gb_unset_gb2_mask(val);
+			else
+				netxen_gb_set_gb2_mask(val);
+			break;
+		case 3:
+		default:
+			if (pause->tx_pause)
+				netxen_gb_unset_gb3_mask(val);
+			else
+				netxen_gb_set_gb3_mask(val);
+			break;
 		}
 		NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
 	} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
@@ -636,7 +643,7 @@
 
 static void
 netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
-		     u64 * data)
+		     u64 *data)
 {
 	memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN);
 	if ((data[0] = netxen_nic_reg_test(dev)))
@@ -647,7 +654,7 @@
 }
 
 static void
-netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
 	int index;
 
@@ -668,7 +675,7 @@
 
 static void
 netxen_nic_get_ethtool_stats(struct net_device *dev,
-			     struct ethtool_stats *stats, u64 * data)
+			     struct ethtool_stats *stats, u64 *data)
 {
 	struct netxen_adapter *adapter = netdev_priv(dev);
 	int index;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 6407d0d..12d1f24 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1920,7 +1920,6 @@
 {
 	struct ql_tx_buf_cb *tx_cb;
 	int i;
-	int retval = 0;
 
 	if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
 		netdev_warn(qdev->ndev,
@@ -1935,7 +1934,6 @@
 			   "Frame too short to be legal, frame not sent\n");
 
 		qdev->ndev->stats.tx_errors++;
-		retval = -EIO;
 		goto frame_not_sent;
 	}
 
@@ -1944,7 +1942,6 @@
 			   mac_rsp->transaction_id);
 
 		qdev->ndev->stats.tx_errors++;
-		retval = -EIO;
 		goto invalid_seg_count;
 	}
 
@@ -3958,15 +3955,4 @@
 	.remove = __devexit_p(ql3xxx_remove),
 };
 
-static int __init ql3xxx_init_module(void)
-{
-	return pci_register_driver(&ql3xxx_driver);
-}
-
-static void __exit ql3xxx_exit(void)
-{
-	pci_unregister_driver(&ql3xxx_driver);
-}
-
-module_init(ql3xxx_init_module);
-module_exit(ql3xxx_exit);
+module_pci_driver(ql3xxx_driver);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index ddba83e..c4b8ced 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -5,4 +5,5 @@
 obj-$(CONFIG_QLCNIC) := qlcnic.o
 
 qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
-	qlcnic_ethtool.o qlcnic_ctx.o
+	qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
+	qlcnic_sysfs.o qlcnic_minidump.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index eaa1db9..082eecb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -89,16 +89,6 @@
 #define QLCNIC_CT_DEFAULT_RX_BUF_LEN	2048
 #define QLCNIC_LRO_BUFFER_EXTRA		2048
 
-/* Opcodes to be used with the commands */
-#define TX_ETHER_PKT	0x01
-#define TX_TCP_PKT	0x02
-#define TX_UDP_PKT	0x03
-#define TX_IP_PKT	0x04
-#define TX_TCP_LSO	0x05
-#define TX_TCP_LSO6	0x06
-#define TX_TCPV6_PKT	0x0b
-#define TX_UDPV6_PKT	0x0c
-
 /* Tx defines */
 #define QLCNIC_MAX_FRAGS_PER_TX	14
 #define MAX_TSO_HEADER_DESC	2
@@ -147,28 +137,6 @@
  * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
  * we are doing LSO (above the 1500 size packet) only.
  */
-
-#define FLAGS_VLAN_TAGGED	0x10
-#define FLAGS_VLAN_OOB		0x40
-
-#define qlcnic_set_tx_vlan_tci(cmd_desc, v)	\
-	(cmd_desc)->vlan_TCI = cpu_to_le16(v);
-#define qlcnic_set_cmd_desc_port(cmd_desc, var)	\
-	((cmd_desc)->port_ctxid |= ((var) & 0x0F))
-#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var)	\
-	((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
-
-#define qlcnic_set_tx_port(_desc, _port) \
-	((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
-
-#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
-	((_desc)->flags_opcode |= \
-	cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
-
-#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
-	((_desc)->nfrags__length = \
-	cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
-
 struct cmd_desc_type0 {
 	u8 tcp_hdr_offset;	/* For LSO only */
 	u8 ip_hdr_offset;	/* For LSO only */
@@ -203,65 +171,6 @@
 	__le64 addr_buffer;
 } __packed;
 
-/* opcode field in status_desc */
-#define QLCNIC_SYN_OFFLOAD	0x03
-#define QLCNIC_RXPKT_DESC  	0x04
-#define QLCNIC_OLD_RXPKT_DESC	0x3f
-#define QLCNIC_RESPONSE_DESC	0x05
-#define QLCNIC_LRO_DESC  	0x12
-
-/* for status field in status_desc */
-#define STATUS_CKSUM_LOOP	0
-#define STATUS_CKSUM_OK		2
-
-/* owner bits of status_desc */
-#define STATUS_OWNER_HOST	(0x1ULL << 56)
-#define STATUS_OWNER_PHANTOM	(0x2ULL << 56)
-
-/* Status descriptor:
-   0-3 port, 4-7 status, 8-11 type, 12-27 total_length
-   28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
-   53-55 desc_cnt, 56-57 owner, 58-63 opcode
- */
-#define qlcnic_get_sts_port(sts_data)	\
-	((sts_data) & 0x0F)
-#define qlcnic_get_sts_status(sts_data)	\
-	(((sts_data) >> 4) & 0x0F)
-#define qlcnic_get_sts_type(sts_data)	\
-	(((sts_data) >> 8) & 0x0F)
-#define qlcnic_get_sts_totallength(sts_data)	\
-	(((sts_data) >> 12) & 0xFFFF)
-#define qlcnic_get_sts_refhandle(sts_data)	\
-	(((sts_data) >> 28) & 0xFFFF)
-#define qlcnic_get_sts_prot(sts_data)	\
-	(((sts_data) >> 44) & 0x0F)
-#define qlcnic_get_sts_pkt_offset(sts_data)	\
-	(((sts_data) >> 48) & 0x1F)
-#define qlcnic_get_sts_desc_cnt(sts_data)	\
-	(((sts_data) >> 53) & 0x7)
-#define qlcnic_get_sts_opcode(sts_data)	\
-	(((sts_data) >> 58) & 0x03F)
-
-#define qlcnic_get_lro_sts_refhandle(sts_data) 	\
-	((sts_data) & 0x0FFFF)
-#define qlcnic_get_lro_sts_length(sts_data)	\
-	(((sts_data) >> 16) & 0x0FFFF)
-#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data)	\
-	(((sts_data) >> 32) & 0x0FF)
-#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data)	\
-	(((sts_data) >> 40) & 0x0FF)
-#define qlcnic_get_lro_sts_timestamp(sts_data)	\
-	(((sts_data) >> 48) & 0x1)
-#define qlcnic_get_lro_sts_type(sts_data)	\
-	(((sts_data) >> 49) & 0x7)
-#define qlcnic_get_lro_sts_push_flag(sts_data)		\
-	(((sts_data) >> 52) & 0x1)
-#define qlcnic_get_lro_sts_seq_number(sts_data)		\
-	((sts_data) & 0x0FFFFFFFF)
-#define qlcnic_get_lro_sts_mss(sts_data1)		\
-	((sts_data1 >> 32) & 0x0FFFF)
-
-
 struct status_desc {
 	__le64 status_desc_data[2];
 } __attribute__ ((aligned(16)));
@@ -280,16 +189,16 @@
 #define QLCNIC_UNI_FIRMWARE_IDX_OFF 	29
 
 struct uni_table_desc{
-	u32	findex;
-	u32	num_entries;
-	u32	entry_size;
-	u32	reserved[5];
+	__le32	findex;
+	__le32	num_entries;
+	__le32	entry_size;
+	__le32	reserved[5];
 };
 
 struct uni_data_desc{
-	u32	findex;
-	u32	size;
-	u32	reserved[5];
+	__le32	findex;
+	__le32	size;
+	__le32	reserved[5];
 };
 
 /* Flash Defines and Structures */
@@ -416,19 +325,19 @@
 };
 
 struct qlcnic_dump_template_hdr {
-	__le32	type;
-	__le32	offset;
-	__le32	size;
-	__le32	cap_mask;
-	__le32	num_entries;
-	__le32	version;
-	__le32	timestamp;
-	__le32	checksum;
-	__le32	drv_cap_mask;
-	__le32	sys_info[3];
-	__le32	saved_state[16];
-	__le32	cap_sizes[8];
-	__le32	rsvd[0];
+	u32	type;
+	u32	offset;
+	u32	size;
+	u32	cap_mask;
+	u32	num_entries;
+	u32	version;
+	u32	timestamp;
+	u32	checksum;
+	u32	drv_cap_mask;
+	u32	sys_info[3];
+	u32	saved_state[16];
+	u32	cap_sizes[8];
+	u32	rsvd[0];
 };
 
 struct qlcnic_fw_dump {
@@ -1065,16 +974,16 @@
 
 	spinlock_t tx_clean_lock;
 	spinlock_t mac_learn_lock;
-	__le32 file_prd_off;	/*File fw product offset*/
+	u32 file_prd_off;	/*File fw product offset*/
 	u32 fw_version;
 	const struct firmware *fw;
 };
 
-struct qlcnic_info {
+struct qlcnic_info_le {
 	__le16	pci_func;
-	__le16	op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
+	__le16	op_mode;	/* 1 = Priv, 2 = NP, 3 = NP passthru */
 	__le16	phys_port;
-	__le16	switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
+	__le16	switch_mode;	/* 0 = disabled, 1 = int, 2 = ext */
 
 	__le32	capabilities;
 	u8	max_mac_filters;
@@ -1088,13 +997,28 @@
 	u8	reserved2[104];
 } __packed;
 
-struct qlcnic_pci_info {
-	__le16	id; /* pci function id */
-	__le16	active; /* 1 = Enabled */
-	__le16	type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
-	__le16	default_port; /* default port number */
+struct qlcnic_info {
+	u16	pci_func;
+	u16	op_mode;
+	u16	phys_port;
+	u16	switch_mode;
+	u32	capabilities;
+	u8	max_mac_filters;
+	u8	reserved1;
+	u16	max_mtu;
+	u16	max_tx_ques;
+	u16	max_rx_ques;
+	u16	min_tx_bw;
+	u16	max_tx_bw;
+};
 
-	__le16	tx_min_bw; /* Multiple of 100mbpc */
+struct qlcnic_pci_info_le {
+	__le16	id;		/* pci function id */
+	__le16	active;		/* 1 = Enabled */
+	__le16	type;		/* 1 = NIC, 2 = FCoE, 3 = iSCSI */
+	__le16	default_port;	/* default port number */
+
+	__le16	tx_min_bw;	/* Multiple of 100mbpc */
 	__le16	tx_max_bw;
 	__le16	reserved1[2];
 
@@ -1102,6 +1026,16 @@
 	u8	reserved2[106];
 } __packed;
 
+struct qlcnic_pci_info {
+	u16	id;
+	u16	active;
+	u16	type;
+	u16	default_port;
+	u16	tx_min_bw;
+	u16	tx_max_bw;
+	u8	mac[ETH_ALEN];
+};
+
 struct qlcnic_npar_info {
 	u16	pvid;
 	u16	min_bw;
@@ -1208,7 +1142,7 @@
 			(VAL1) += (VAL2); \
 } while (0)
 
-struct qlcnic_mac_statistics{
+struct qlcnic_mac_statistics_le {
 	__le64	mac_tx_frames;
 	__le64	mac_tx_bytes;
 	__le64	mac_tx_mcast_pkts;
@@ -1248,7 +1182,45 @@
 	__le64	mac_align_error;
 } __packed;
 
-struct __qlcnic_esw_statistics {
+struct qlcnic_mac_statistics {
+	u64	mac_tx_frames;
+	u64	mac_tx_bytes;
+	u64	mac_tx_mcast_pkts;
+	u64	mac_tx_bcast_pkts;
+	u64	mac_tx_pause_cnt;
+	u64	mac_tx_ctrl_pkt;
+	u64	mac_tx_lt_64b_pkts;
+	u64	mac_tx_lt_127b_pkts;
+	u64	mac_tx_lt_255b_pkts;
+	u64	mac_tx_lt_511b_pkts;
+	u64	mac_tx_lt_1023b_pkts;
+	u64	mac_tx_lt_1518b_pkts;
+	u64	mac_tx_gt_1518b_pkts;
+	u64	rsvd1[3];
+	u64	mac_rx_frames;
+	u64	mac_rx_bytes;
+	u64	mac_rx_mcast_pkts;
+	u64	mac_rx_bcast_pkts;
+	u64	mac_rx_pause_cnt;
+	u64	mac_rx_ctrl_pkt;
+	u64	mac_rx_lt_64b_pkts;
+	u64	mac_rx_lt_127b_pkts;
+	u64	mac_rx_lt_255b_pkts;
+	u64	mac_rx_lt_511b_pkts;
+	u64	mac_rx_lt_1023b_pkts;
+	u64	mac_rx_lt_1518b_pkts;
+	u64	mac_rx_gt_1518b_pkts;
+	u64	rsvd2[3];
+	u64	mac_rx_length_error;
+	u64	mac_rx_length_small;
+	u64	mac_rx_length_large;
+	u64	mac_rx_jabber;
+	u64	mac_rx_dropped;
+	u64	mac_rx_crc_error;
+	u64	mac_align_error;
+};
+
+struct qlcnic_esw_stats_le {
 	__le16 context_id;
 	__le16 version;
 	__le16 size;
@@ -1263,147 +1235,27 @@
 	__le64 rsvd[3];
 } __packed;
 
+struct __qlcnic_esw_statistics {
+	u16	context_id;
+	u16	version;
+	u16	size;
+	u16	unused;
+	u64	unicast_frames;
+	u64	multicast_frames;
+	u64	broadcast_frames;
+	u64	dropped_frames;
+	u64	errors;
+	u64	local_frames;
+	u64	numbytes;
+	u64	rsvd[3];
+};
+
 struct qlcnic_esw_statistics {
 	struct __qlcnic_esw_statistics rx;
 	struct __qlcnic_esw_statistics tx;
 };
 
-struct qlcnic_common_entry_hdr {
-	__le32	type;
-	__le32	offset;
-	__le32	cap_size;
-	u8	mask;
-	u8	rsvd[2];
-	u8	flags;
-} __packed;
-
-struct __crb {
-	__le32	addr;
-	u8	stride;
-	u8	rsvd1[3];
-	__le32	data_size;
-	__le32	no_ops;
-	__le32	rsvd2[4];
-} __packed;
-
-struct __ctrl {
-	__le32	addr;
-	u8	stride;
-	u8	index_a;
-	__le16	timeout;
-	__le32	data_size;
-	__le32	no_ops;
-	u8	opcode;
-	u8	index_v;
-	u8	shl_val;
-	u8	shr_val;
-	__le32	val1;
-	__le32	val2;
-	__le32	val3;
-} __packed;
-
-struct __cache {
-	__le32	addr;
-	__le16	stride;
-	__le16	init_tag_val;
-	__le32	size;
-	__le32	no_ops;
-	__le32	ctrl_addr;
-	__le32	ctrl_val;
-	__le32	read_addr;
-	u8	read_addr_stride;
-	u8	read_addr_num;
-	u8	rsvd1[2];
-} __packed;
-
-struct __ocm {
-	u8	rsvd[8];
-	__le32	size;
-	__le32	no_ops;
-	u8	rsvd1[8];
-	__le32	read_addr;
-	__le32	read_addr_stride;
-} __packed;
-
-struct __mem {
-	u8	rsvd[24];
-	__le32	addr;
-	__le32	size;
-} __packed;
-
-struct __mux {
-	__le32	addr;
-	u8	rsvd[4];
-	__le32	size;
-	__le32	no_ops;
-	__le32	val;
-	__le32	val_stride;
-	__le32	read_addr;
-	u8	rsvd2[4];
-} __packed;
-
-struct __queue {
-	__le32	sel_addr;
-	__le16	stride;
-	u8	rsvd[2];
-	__le32	size;
-	__le32	no_ops;
-	u8	rsvd2[8];
-	__le32	read_addr;
-	u8	read_addr_stride;
-	u8	read_addr_cnt;
-	u8	rsvd3[2];
-} __packed;
-
-struct qlcnic_dump_entry {
-	struct qlcnic_common_entry_hdr hdr;
-	union {
-		struct __crb	crb;
-		struct __cache	cache;
-		struct __ocm	ocm;
-		struct __mem	mem;
-		struct __mux	mux;
-		struct __queue	que;
-		struct __ctrl	ctrl;
-	} region;
-} __packed;
-
-enum op_codes {
-	QLCNIC_DUMP_NOP		= 0,
-	QLCNIC_DUMP_READ_CRB	= 1,
-	QLCNIC_DUMP_READ_MUX	= 2,
-	QLCNIC_DUMP_QUEUE	= 3,
-	QLCNIC_DUMP_BRD_CONFIG	= 4,
-	QLCNIC_DUMP_READ_OCM	= 6,
-	QLCNIC_DUMP_PEG_REG	= 7,
-	QLCNIC_DUMP_L1_DTAG	= 8,
-	QLCNIC_DUMP_L1_ITAG	= 9,
-	QLCNIC_DUMP_L1_DATA	= 11,
-	QLCNIC_DUMP_L1_INST	= 12,
-	QLCNIC_DUMP_L2_DTAG	= 21,
-	QLCNIC_DUMP_L2_ITAG	= 22,
-	QLCNIC_DUMP_L2_DATA	= 23,
-	QLCNIC_DUMP_L2_INST	= 24,
-	QLCNIC_DUMP_READ_ROM	= 71,
-	QLCNIC_DUMP_READ_MEM	= 72,
-	QLCNIC_DUMP_READ_CTRL	= 98,
-	QLCNIC_DUMP_TLHDR	= 99,
-	QLCNIC_DUMP_RDEND	= 255
-};
-
-#define QLCNIC_DUMP_WCRB	BIT_0
-#define QLCNIC_DUMP_RWCRB	BIT_1
-#define QLCNIC_DUMP_ANDCRB	BIT_2
-#define QLCNIC_DUMP_ORCRB	BIT_3
-#define QLCNIC_DUMP_POLLCRB	BIT_4
-#define QLCNIC_DUMP_RD_SAVE	BIT_5
-#define QLCNIC_DUMP_WRT_SAVED	BIT_6
-#define QLCNIC_DUMP_MOD_SAVE_ST	BIT_7
-#define QLCNIC_DUMP_SKIP	BIT_7
-
-#define QLCNIC_DUMP_MASK_MIN		3
 #define QLCNIC_DUMP_MASK_DEF		0x1f
-#define QLCNIC_DUMP_MASK_MAX		0xff
 #define QLCNIC_FORCE_FW_DUMP_KEY	0xdeadfeed
 #define QLCNIC_ENABLE_FW_DUMP		0xaddfeed
 #define QLCNIC_DISABLE_FW_DUMP		0xbadfeed
@@ -1411,12 +1263,6 @@
 #define QLCNIC_SET_QUIESCENT		0xadd00010
 #define QLCNIC_RESET_QUIESCENT		0xadd00020
 
-struct qlcnic_dump_operations {
-	enum op_codes opcode;
-	u32 (*handler)(struct qlcnic_adapter *,
-			struct qlcnic_dump_entry *, u32 *);
-};
-
 struct _cdrp_cmd {
 	u32 cmd;
 	u32 arg1;
@@ -1474,6 +1320,8 @@
 #define __QLCNIC_MAX_LED_RATE	0xf
 #define __QLCNIC_MAX_LED_STATE	0x2
 
+#define MAX_CTL_CHECK 1000
+
 int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
 int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
 int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
@@ -1530,9 +1378,8 @@
 int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
 int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
 int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
-void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
-		struct qlcnic_host_tx_ring *tx_ring);
-void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
+void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
+void qlcnic_fetch_mac(u32, u32, u8, u8 *);
 void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
 void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter);
 int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode);
@@ -1571,6 +1418,26 @@
 int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
 extern int qlcnic_config_tso;
 
+int qlcnic_napi_add(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_napi_del(struct qlcnic_adapter *adapter);
+void qlcnic_napi_enable(struct qlcnic_adapter *adapter);
+void qlcnic_napi_disable(struct qlcnic_adapter *adapter);
+int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int);
+void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
+void qlcnic_free_tx_rings(struct qlcnic_adapter *);
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
+
+void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
+void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
+void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
+void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
+int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_set_vlan_config(struct qlcnic_adapter *,
+			    struct qlcnic_esw_func_cfg *);
+void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
+				      struct qlcnic_esw_func_cfg *);
+
 /*
  * QLOGIC Board information
  */
@@ -1617,6 +1484,21 @@
 				tx_ring->producer;
 }
 
+static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+	writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+	struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+	writel(0x1, sds_ring->crb_intr_mask);
+
+	if (!QLCNIC_IS_MSI_FAMILY(adapter))
+		writel(0xfbff, adapter->tgt_mask_reg);
+}
+
 extern const struct ethtool_ops qlcnic_ethtool_ops;
 extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
 
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 2a179d0..bd31104 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -103,7 +103,7 @@
 
 }
 
-static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size)
+static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
 {
 	uint64_t sum = 0;
 	int count = temp_size / sizeof(uint32_t);
@@ -117,9 +117,9 @@
 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
 {
 	int err, i;
-	u16 temp_size;
 	void *tmp_addr;
-	u32 version, csum, *template, *tmp_buf;
+	u32 temp_size, version, csum, *template;
+	__le32 *tmp_buf;
 	struct qlcnic_cmd_args cmd;
 	struct qlcnic_hardware_context *ahw;
 	struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl;
@@ -163,13 +163,6 @@
 		goto error;
 	}
 	tmp_tmpl = tmp_addr;
-	csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size);
-	if (csum) {
-		dev_err(&adapter->pdev->dev,
-			"Template header checksum validation failed\n");
-		err = -EIO;
-		goto error;
-	}
 	ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
 	if (!ahw->fw_dump.tmpl_hdr) {
 		err = -EIO;
@@ -180,6 +173,14 @@
 	for (i = 0; i < temp_size/sizeof(u32); i++)
 		*template++ = __le32_to_cpu(*tmp_buf++);
 
+	csum = qlcnic_temp_checksum((u32 *)ahw->fw_dump.tmpl_hdr, temp_size);
+	if (csum) {
+		dev_err(&adapter->pdev->dev,
+			"Template header checksum validation failed\n");
+		err = -EIO;
+		goto error;
+	}
+
 	tmpl_hdr = ahw->fw_dump.tmpl_hdr;
 	tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
 	ahw->fw_dump.enable = 1;
@@ -231,6 +232,7 @@
 	size_t rq_size, rsp_size;
 	u32 cap, reg, val, reg2;
 	int err;
+	u16 temp;
 
 	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 
@@ -267,8 +269,8 @@
 	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
 		cap |= QLCNIC_CAP0_LRO_MSS;
 
-	prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
-							 msix_handler);
+	temp = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+	prq->valid_field_offset = cpu_to_le16(temp);
 	prq->txrx_sds_binding = nsds_rings - 1;
 
 	prq->capabilities[0] = cpu_to_le32(cap);
@@ -671,7 +673,7 @@
 	err = cmd.rsp.cmd;
 
 	if (err == QLCNIC_RCODE_SUCCESS)
-		qlcnic_fetch_mac(adapter, cmd.rsp.arg1, cmd.rsp.arg2, 0, mac);
+		qlcnic_fetch_mac(cmd.rsp.arg1, cmd.rsp.arg2, 0, mac);
 	else {
 		dev_err(&adapter->pdev->dev,
 			"Failed to get mac address%d\n", err);
@@ -687,10 +689,10 @@
 {
 	int	err;
 	dma_addr_t nic_dma_t;
-	struct qlcnic_info *nic_info;
+	struct qlcnic_info_le *nic_info;
 	void *nic_info_addr;
 	struct qlcnic_cmd_args cmd;
-	size_t	nic_size = sizeof(struct qlcnic_info);
+	size_t	nic_size = sizeof(struct qlcnic_info_le);
 
 	nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
 				&nic_dma_t, GFP_KERNEL);
@@ -745,8 +747,8 @@
 	dma_addr_t nic_dma_t;
 	void *nic_info_addr;
 	struct qlcnic_cmd_args cmd;
-	struct qlcnic_info *nic_info;
-	size_t nic_size = sizeof(struct qlcnic_info);
+	struct qlcnic_info_le *nic_info;
+	size_t nic_size = sizeof(struct qlcnic_info_le);
 
 	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
 		return err;
@@ -796,9 +798,9 @@
 	int err = 0, i;
 	struct qlcnic_cmd_args cmd;
 	dma_addr_t pci_info_dma_t;
-	struct qlcnic_pci_info *npar;
+	struct qlcnic_pci_info_le *npar;
 	void *pci_info_addr;
-	size_t npar_size = sizeof(struct qlcnic_pci_info);
+	size_t npar_size = sizeof(struct qlcnic_pci_info_le);
 	size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
 
 	pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
@@ -877,8 +879,8 @@
 int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
 		const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
 
-	size_t stats_size = sizeof(struct __qlcnic_esw_statistics);
-	struct __qlcnic_esw_statistics *stats;
+	size_t stats_size = sizeof(struct qlcnic_esw_stats_le);
+	struct qlcnic_esw_stats_le *stats;
 	dma_addr_t stats_dma_t;
 	void *stats_addr;
 	u32 arg1;
@@ -939,9 +941,9 @@
 int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
 		struct qlcnic_mac_statistics *mac_stats)
 {
-	struct qlcnic_mac_statistics *stats;
+	struct qlcnic_mac_statistics_le *stats;
 	struct qlcnic_cmd_args cmd;
-	size_t stats_size = sizeof(struct qlcnic_mac_statistics);
+	size_t stats_size = sizeof(struct qlcnic_mac_statistics_le);
 	dma_addr_t stats_dma_t;
 	void *stats_addr;
 	int err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 9e9e78a..4a9425b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1247,7 +1247,8 @@
 			void *buffer)
 {
 	int i, copy_sz;
-	u32 *hdr_ptr, *data;
+	u32 *hdr_ptr;
+	__le32 *data;
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
 
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 28a6b28..bd5030e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -792,22 +792,6 @@
 #define QLCNIC_FLASH_SEM2_ULK	0x0013C014
 #define QLCNIC_FLASH_LOCK_ID	0x001B2100
 
-#define QLCNIC_RD_DUMP_REG(addr, bar0, data) do {			\
-	writel((addr & 0xFFFF0000), (void *) (bar0 +			\
-		QLCNIC_FW_DUMP_REG1));					\
-	readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1));			\
-	*data = readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 +		\
-		LSW(addr)));						\
-} while (0)
-
-#define QLCNIC_WR_DUMP_REG(addr, bar0, data) do {			\
-	writel((addr & 0xFFFF0000), (void *) (bar0 +			\
-		QLCNIC_FW_DUMP_REG1));					\
-	readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1));			\
-	writel(data, (void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr)));\
-	readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr)));	\
-} while (0)
-
 /* PCI function operational mode */
 enum {
 	QLCNIC_MGMT_FUNC	= 0,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 2a0c9dc..ff879cd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -350,7 +350,7 @@
 
 	tx_ring->producer = producer;
 
-	qlcnic_update_cmd_producer(adapter, tx_ring);
+	qlcnic_update_cmd_producer(tx_ring);
 
 	__netif_tx_unlock_bh(tx_ring->txq);
 
@@ -540,7 +540,7 @@
 	}
 }
 
-int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
+static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
 {
 	struct qlcnic_nic_req req;
 	int rv;
@@ -1061,8 +1061,6 @@
 	mutex_unlock(&adapter->ahw->mem_lock);
 }
 
-#define MAX_CTL_CHECK   1000
-
 int
 qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
 		u64 off, u64 data)
@@ -1320,469 +1318,3 @@
 
 	return rv;
 }
-
-/* FW dump related functions */
-static u32
-qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
-		u32 *buffer)
-{
-	int i;
-	u32 addr, data;
-	struct __crb *crb = &entry->region.crb;
-	void __iomem *base = adapter->ahw->pci_base0;
-
-	addr = crb->addr;
-
-	for (i = 0; i < crb->no_ops; i++) {
-		QLCNIC_RD_DUMP_REG(addr, base, &data);
-		*buffer++ = cpu_to_le32(addr);
-		*buffer++ = cpu_to_le32(data);
-		addr += crb->stride;
-	}
-	return crb->no_ops * 2 * sizeof(u32);
-}
-
-static u32
-qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
-	struct qlcnic_dump_entry *entry, u32 *buffer)
-{
-	int i, k, timeout = 0;
-	void __iomem *base = adapter->ahw->pci_base0;
-	u32 addr, data;
-	u8 opcode, no_ops;
-	struct __ctrl *ctr = &entry->region.ctrl;
-	struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
-
-	addr = ctr->addr;
-	no_ops = ctr->no_ops;
-
-	for (i = 0; i < no_ops; i++) {
-		k = 0;
-		opcode = 0;
-		for (k = 0; k < 8; k++) {
-			if (!(ctr->opcode & (1 << k)))
-				continue;
-			switch (1 << k) {
-			case QLCNIC_DUMP_WCRB:
-				QLCNIC_WR_DUMP_REG(addr, base, ctr->val1);
-				break;
-			case QLCNIC_DUMP_RWCRB:
-				QLCNIC_RD_DUMP_REG(addr, base, &data);
-				QLCNIC_WR_DUMP_REG(addr, base, data);
-				break;
-			case QLCNIC_DUMP_ANDCRB:
-				QLCNIC_RD_DUMP_REG(addr, base, &data);
-				QLCNIC_WR_DUMP_REG(addr, base,
-					(data & ctr->val2));
-				break;
-			case QLCNIC_DUMP_ORCRB:
-				QLCNIC_RD_DUMP_REG(addr, base, &data);
-				QLCNIC_WR_DUMP_REG(addr, base,
-					(data | ctr->val3));
-				break;
-			case QLCNIC_DUMP_POLLCRB:
-				while (timeout <= ctr->timeout) {
-					QLCNIC_RD_DUMP_REG(addr, base, &data);
-					if ((data & ctr->val2) == ctr->val1)
-						break;
-					msleep(1);
-					timeout++;
-				}
-				if (timeout > ctr->timeout) {
-					dev_info(&adapter->pdev->dev,
-					"Timed out, aborting poll CRB\n");
-					return -EINVAL;
-				}
-				break;
-			case QLCNIC_DUMP_RD_SAVE:
-				if (ctr->index_a)
-					addr = t_hdr->saved_state[ctr->index_a];
-				QLCNIC_RD_DUMP_REG(addr, base, &data);
-				t_hdr->saved_state[ctr->index_v] = data;
-				break;
-			case QLCNIC_DUMP_WRT_SAVED:
-				if (ctr->index_v)
-					data = t_hdr->saved_state[ctr->index_v];
-				else
-					data = ctr->val1;
-				if (ctr->index_a)
-					addr = t_hdr->saved_state[ctr->index_a];
-				QLCNIC_WR_DUMP_REG(addr, base, data);
-				break;
-			case QLCNIC_DUMP_MOD_SAVE_ST:
-				data = t_hdr->saved_state[ctr->index_v];
-				data <<= ctr->shl_val;
-				data >>= ctr->shr_val;
-				if (ctr->val2)
-					data &= ctr->val2;
-				data |= ctr->val3;
-				data += ctr->val1;
-				t_hdr->saved_state[ctr->index_v] = data;
-				break;
-			default:
-				dev_info(&adapter->pdev->dev,
-					"Unknown opcode\n");
-				break;
-			}
-		}
-		addr += ctr->stride;
-	}
-	return 0;
-}
-
-static u32
-qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
-	u32 *buffer)
-{
-	int loop;
-	u32 val, data = 0;
-	struct __mux *mux = &entry->region.mux;
-	void __iomem *base = adapter->ahw->pci_base0;
-
-	val = mux->val;
-	for (loop = 0; loop < mux->no_ops; loop++) {
-		QLCNIC_WR_DUMP_REG(mux->addr, base, val);
-		QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data);
-		*buffer++ = cpu_to_le32(val);
-		*buffer++ = cpu_to_le32(data);
-		val += mux->val_stride;
-	}
-	return 2 * mux->no_ops * sizeof(u32);
-}
-
-static u32
-qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
-	u32 *buffer)
-{
-	int i, loop;
-	u32 cnt, addr, data, que_id = 0;
-	void __iomem *base = adapter->ahw->pci_base0;
-	struct __queue *que = &entry->region.que;
-
-	addr = que->read_addr;
-	cnt = que->read_addr_cnt;
-
-	for (loop = 0; loop < que->no_ops; loop++) {
-		QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
-		addr = que->read_addr;
-		for (i = 0; i < cnt; i++) {
-			QLCNIC_RD_DUMP_REG(addr, base, &data);
-			*buffer++ = cpu_to_le32(data);
-			addr += que->read_addr_stride;
-		}
-		que_id += que->stride;
-	}
-	return que->no_ops * cnt * sizeof(u32);
-}
-
-static u32
-qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
-	u32 *buffer)
-{
-	int i;
-	u32 data;
-	void __iomem *addr;
-	struct __ocm *ocm = &entry->region.ocm;
-
-	addr = adapter->ahw->pci_base0 + ocm->read_addr;
-	for (i = 0; i < ocm->no_ops; i++) {
-		data = readl(addr);
-		*buffer++ = cpu_to_le32(data);
-		addr += ocm->read_addr_stride;
-	}
-	return ocm->no_ops * sizeof(u32);
-}
-
-static u32
-qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
-	u32 *buffer)
-{
-	int i, count = 0;
-	u32 fl_addr, size, val, lck_val, addr;
-	struct __mem *rom = &entry->region.mem;
-	void __iomem *base = adapter->ahw->pci_base0;
-
-	fl_addr = rom->addr;
-	size = rom->size/4;
-lock_try:
-	lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
-	if (!lck_val && count < MAX_CTL_CHECK) {
-		msleep(10);
-		count++;
-		goto lock_try;
-	}
-	writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
-	for (i = 0; i < size; i++) {
-		addr = fl_addr & 0xFFFF0000;
-		QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr);
-		addr = LSW(fl_addr) + FLASH_ROM_DATA;
-		QLCNIC_RD_DUMP_REG(addr, base, &val);
-		fl_addr += 4;
-		*buffer++ = cpu_to_le32(val);
-	}
-	readl(base + QLCNIC_FLASH_SEM2_ULK);
-	return rom->size;
-}
-
-static u32
-qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
-	struct qlcnic_dump_entry *entry, u32 *buffer)
-{
-	int i;
-	u32 cnt, val, data, addr;
-	void __iomem *base = adapter->ahw->pci_base0;
-	struct __cache *l1 = &entry->region.cache;
-
-	val = l1->init_tag_val;
-
-	for (i = 0; i < l1->no_ops; i++) {
-		QLCNIC_WR_DUMP_REG(l1->addr, base, val);
-		QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val));
-		addr = l1->read_addr;
-		cnt = l1->read_addr_num;
-		while (cnt) {
-			QLCNIC_RD_DUMP_REG(addr, base, &data);
-			*buffer++ = cpu_to_le32(data);
-			addr += l1->read_addr_stride;
-			cnt--;
-		}
-		val += l1->stride;
-	}
-	return l1->no_ops * l1->read_addr_num * sizeof(u32);
-}
-
-static u32
-qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
-	struct qlcnic_dump_entry *entry, u32 *buffer)
-{
-	int i;
-	u32 cnt, val, data, addr;
-	u8 poll_mask, poll_to, time_out = 0;
-	void __iomem *base = adapter->ahw->pci_base0;
-	struct __cache *l2 = &entry->region.cache;
-
-	val = l2->init_tag_val;
-	poll_mask = LSB(MSW(l2->ctrl_val));
-	poll_to = MSB(MSW(l2->ctrl_val));
-
-	for (i = 0; i < l2->no_ops; i++) {
-		QLCNIC_WR_DUMP_REG(l2->addr, base, val);
-		if (LSW(l2->ctrl_val))
-			QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base,
-				LSW(l2->ctrl_val));
-		if (!poll_mask)
-			goto skip_poll;
-		do {
-			QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data);
-			if (!(data & poll_mask))
-				break;
-			msleep(1);
-			time_out++;
-		} while (time_out <= poll_to);
-
-		if (time_out > poll_to) {
-			dev_err(&adapter->pdev->dev,
-				"Timeout exceeded in %s, aborting dump\n",
-				__func__);
-			return -EINVAL;
-		}
-skip_poll:
-		addr = l2->read_addr;
-		cnt = l2->read_addr_num;
-		while (cnt) {
-			QLCNIC_RD_DUMP_REG(addr, base, &data);
-			*buffer++ = cpu_to_le32(data);
-			addr += l2->read_addr_stride;
-			cnt--;
-		}
-		val += l2->stride;
-	}
-	return l2->no_ops * l2->read_addr_num * sizeof(u32);
-}
-
-static u32
-qlcnic_read_memory(struct qlcnic_adapter *adapter,
-	struct qlcnic_dump_entry *entry, u32 *buffer)
-{
-	u32 addr, data, test, ret = 0;
-	int i, reg_read;
-	struct __mem *mem = &entry->region.mem;
-	void __iomem *base = adapter->ahw->pci_base0;
-
-	reg_read = mem->size;
-	addr = mem->addr;
-	/* check for data size of multiple of 16 and 16 byte alignment */
-	if ((addr & 0xf) || (reg_read%16)) {
-		dev_info(&adapter->pdev->dev,
-			"Unaligned memory addr:0x%x size:0x%x\n",
-			addr, reg_read);
-		return -EINVAL;
-	}
-
-	mutex_lock(&adapter->ahw->mem_lock);
-
-	while (reg_read != 0) {
-		QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr);
-		QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0);
-		QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base,
-			TA_CTL_ENABLE | TA_CTL_START);
-
-		for (i = 0; i < MAX_CTL_CHECK; i++) {
-			QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test);
-			if (!(test & TA_CTL_BUSY))
-				break;
-		}
-		if (i == MAX_CTL_CHECK) {
-			if (printk_ratelimit()) {
-				dev_err(&adapter->pdev->dev,
-					"failed to read through agent\n");
-				ret = -EINVAL;
-				goto out;
-			}
-		}
-		for (i = 0; i < 4; i++) {
-			QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data);
-			*buffer++ = cpu_to_le32(data);
-		}
-		addr += 16;
-		reg_read -= 16;
-		ret += 16;
-	}
-out:
-	mutex_unlock(&adapter->ahw->mem_lock);
-	return mem->size;
-}
-
-static u32
-qlcnic_dump_nop(struct qlcnic_adapter *adapter,
-	struct qlcnic_dump_entry *entry, u32 *buffer)
-{
-	entry->hdr.flags |= QLCNIC_DUMP_SKIP;
-	return 0;
-}
-
-struct qlcnic_dump_operations fw_dump_ops[] = {
-	{ QLCNIC_DUMP_NOP, qlcnic_dump_nop },
-	{ QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
-	{ QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
-	{ QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
-	{ QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
-	{ QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
-	{ QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
-	{ QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
-	{ QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
-	{ QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
-	{ QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
-	{ QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
-	{ QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
-	{ QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
-	{ QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
-	{ QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
-	{ QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
-	{ QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
-	{ QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
-	{ QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
-};
-
-/* Walk the template and collect dump for each entry in the dump template */
-static int
-qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
-	u32 size)
-{
-	int ret = 1;
-	if (size != entry->hdr.cap_size) {
-		dev_info(dev,
-		"Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
-		entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
-		dev_info(dev, "Aborting further dump capture\n");
-		ret = 0;
-	}
-	return ret;
-}
-
-int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
-{
-	u32 *buffer;
-	char mesg[64];
-	char *msg[] = {mesg, NULL};
-	int i, k, ops_cnt, ops_index, dump_size = 0;
-	u32 entry_offset, dump, no_entries, buf_offset = 0;
-	struct qlcnic_dump_entry *entry;
-	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
-	struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
-
-	if (fw_dump->clr) {
-		dev_info(&adapter->pdev->dev,
-			"Previous dump not cleared, not capturing dump\n");
-		return -EIO;
-	}
-	/* Calculate the size for dump data area only */
-	for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
-		if (i & tmpl_hdr->drv_cap_mask)
-			dump_size += tmpl_hdr->cap_sizes[k];
-	if (!dump_size)
-		return -EIO;
-
-	fw_dump->data = vzalloc(dump_size);
-	if (!fw_dump->data) {
-		dev_info(&adapter->pdev->dev,
-			"Unable to allocate (%d KB) for fw dump\n",
-			dump_size/1024);
-		return -ENOMEM;
-	}
-	buffer = fw_dump->data;
-	fw_dump->size = dump_size;
-	no_entries = tmpl_hdr->num_entries;
-	ops_cnt = ARRAY_SIZE(fw_dump_ops);
-	entry_offset = tmpl_hdr->offset;
-	tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
-	tmpl_hdr->sys_info[1] = adapter->fw_version;
-
-	for (i = 0; i < no_entries; i++) {
-		entry = (void *)tmpl_hdr + entry_offset;
-		if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
-			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
-			entry_offset += entry->hdr.offset;
-			continue;
-		}
-		/* Find the handler for this entry */
-		ops_index = 0;
-		while (ops_index < ops_cnt) {
-			if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
-				break;
-			ops_index++;
-		}
-		if (ops_index == ops_cnt) {
-			dev_info(&adapter->pdev->dev,
-				"Invalid entry type %d, exiting dump\n",
-				entry->hdr.type);
-			goto error;
-		}
-		/* Collect dump for this entry */
-		dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
-		if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
-			dump))
-			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
-		buf_offset += entry->hdr.cap_size;
-		entry_offset += entry->hdr.offset;
-		buffer = fw_dump->data + buf_offset;
-	}
-	if (dump_size != buf_offset) {
-		dev_info(&adapter->pdev->dev,
-			"Captured(%d) and expected size(%d) do not match\n",
-			buf_offset, dump_size);
-		goto error;
-	} else {
-		fw_dump->clr = 1;
-		snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
-			adapter->netdev->name);
-		dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
-			fw_dump->size);
-		/* Send a udev event to notify availability of FW dump */
-		kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
-		return 0;
-	}
-error:
-	vfree(fw_dump->data);
-	return -EINVAL;
-}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 0bcda9c..d8610ea 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -25,10 +25,6 @@
 
 #define QLCNIC_ADDR_ERROR (0xffffffff)
 
-static void
-qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
-		struct qlcnic_host_rds_ring *rds_ring);
-
 static int
 qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
 
@@ -778,15 +774,15 @@
 static
 struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
 {
-	u32 i;
+	u32 i, entries;
 	struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
-	__le32 entries = cpu_to_le32(directory->num_entries);
+	entries = le32_to_cpu(directory->num_entries);
 
 	for (i = 0; i < entries; i++) {
 
-		__le32 offs = cpu_to_le32(directory->findex) +
-				(i * cpu_to_le32(directory->entry_size));
-		__le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+		u32 offs = le32_to_cpu(directory->findex) +
+			   i * le32_to_cpu(directory->entry_size);
+		u32 tab_type = le32_to_cpu(*((__le32 *)&unirom[offs] + 8));
 
 		if (tab_type == section)
 			return (struct uni_table_desc *) &unirom[offs];
@@ -802,17 +798,16 @@
 {
 	const u8 *unirom = adapter->fw->data;
 	struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
-	__le32 fw_file_size = adapter->fw->size;
-	__le32 entries;
-	__le32 entry_size;
-	__le32 tab_size;
+	u32 entries, entry_size, tab_size, fw_file_size;
+
+	fw_file_size = adapter->fw->size;
 
 	if (fw_file_size < FILEHEADER_SIZE)
 		return -EINVAL;
 
-	entries = cpu_to_le32(directory->num_entries);
-	entry_size = cpu_to_le32(directory->entry_size);
-	tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
+	entries = le32_to_cpu(directory->num_entries);
+	entry_size = le32_to_cpu(directory->entry_size);
+	tab_size = le32_to_cpu(directory->findex) + (entries * entry_size);
 
 	if (fw_file_size < tab_size)
 		return -EINVAL;
@@ -825,29 +820,29 @@
 {
 	struct uni_table_desc *tab_desc;
 	struct uni_data_desc *descr;
+	u32 offs, tab_size, data_size, idx;
 	const u8 *unirom = adapter->fw->data;
-	int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
-				QLCNIC_UNI_BOOTLD_IDX_OFF));
-	__le32 offs;
-	__le32 tab_size;
-	__le32 data_size;
+	__le32 temp;
 
+	temp = *((__le32 *)&unirom[adapter->file_prd_off] +
+		 QLCNIC_UNI_BOOTLD_IDX_OFF);
+	idx = le32_to_cpu(temp);
 	tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD);
 
 	if (!tab_desc)
 		return -EINVAL;
 
-	tab_size = cpu_to_le32(tab_desc->findex) +
-			(cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+	tab_size = le32_to_cpu(tab_desc->findex) +
+		   le32_to_cpu(tab_desc->entry_size) * (idx + 1);
 
 	if (adapter->fw->size < tab_size)
 		return -EINVAL;
 
-	offs = cpu_to_le32(tab_desc->findex) +
-		(cpu_to_le32(tab_desc->entry_size) * (idx));
+	offs = le32_to_cpu(tab_desc->findex) +
+	       le32_to_cpu(tab_desc->entry_size) * idx;
 	descr = (struct uni_data_desc *)&unirom[offs];
 
-	data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+	data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
 
 	if (adapter->fw->size < data_size)
 		return -EINVAL;
@@ -861,27 +856,27 @@
 	struct uni_table_desc *tab_desc;
 	struct uni_data_desc *descr;
 	const u8 *unirom = adapter->fw->data;
-	int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
-				QLCNIC_UNI_FIRMWARE_IDX_OFF));
-	__le32 offs;
-	__le32 tab_size;
-	__le32 data_size;
+	u32 offs, tab_size, data_size, idx;
+	__le32 temp;
 
+	temp = *((__le32 *)&unirom[adapter->file_prd_off] +
+		 QLCNIC_UNI_FIRMWARE_IDX_OFF);
+	idx = le32_to_cpu(temp);
 	tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW);
 
 	if (!tab_desc)
 		return -EINVAL;
 
-	tab_size = cpu_to_le32(tab_desc->findex) +
-			(cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+	tab_size = le32_to_cpu(tab_desc->findex) +
+		   le32_to_cpu(tab_desc->entry_size) * (idx + 1);
 
 	if (adapter->fw->size < tab_size)
 		return -EINVAL;
 
-	offs = cpu_to_le32(tab_desc->findex) +
-		(cpu_to_le32(tab_desc->entry_size) * (idx));
+	offs = le32_to_cpu(tab_desc->findex) +
+	       le32_to_cpu(tab_desc->entry_size) * idx;
 	descr = (struct uni_data_desc *)&unirom[offs];
-	data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+	data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
 
 	if (adapter->fw->size < data_size)
 		return -EINVAL;
@@ -895,19 +890,17 @@
 	struct uni_table_desc *ptab_descr;
 	const u8 *unirom = adapter->fw->data;
 	int mn_present = qlcnic_has_mn(adapter);
-	__le32 entries;
-	__le32 entry_size;
-	__le32 tab_size;
-	u32 i;
+	u32 entries, entry_size, tab_size, i;
+	__le32 temp;
 
 	ptab_descr = qlcnic_get_table_desc(unirom,
 				QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
 	if (!ptab_descr)
 		return -EINVAL;
 
-	entries = cpu_to_le32(ptab_descr->num_entries);
-	entry_size = cpu_to_le32(ptab_descr->entry_size);
-	tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
+	entries = le32_to_cpu(ptab_descr->num_entries);
+	entry_size = le32_to_cpu(ptab_descr->entry_size);
+	tab_size = le32_to_cpu(ptab_descr->findex) + (entries * entry_size);
 
 	if (adapter->fw->size < tab_size)
 		return -EINVAL;
@@ -915,16 +908,16 @@
 nomn:
 	for (i = 0; i < entries; i++) {
 
-		__le32 flags, file_chiprev, offs;
+		u32 flags, file_chiprev, offs;
 		u8 chiprev = adapter->ahw->revision_id;
 		u32 flagbit;
 
-		offs = cpu_to_le32(ptab_descr->findex) +
-				(i * cpu_to_le32(ptab_descr->entry_size));
-		flags = cpu_to_le32(*((int *)&unirom[offs] +
-						QLCNIC_UNI_FLAGS_OFF));
-		file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
-						QLCNIC_UNI_CHIP_REV_OFF));
+		offs = le32_to_cpu(ptab_descr->findex) +
+		       i * le32_to_cpu(ptab_descr->entry_size);
+		temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_FLAGS_OFF);
+		flags = le32_to_cpu(temp);
+		temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_CHIP_REV_OFF);
+		file_chiprev = le32_to_cpu(temp);
 
 		flagbit = mn_present ? 1 : 2;
 
@@ -976,18 +969,20 @@
 			u32 section, u32 idx_offset)
 {
 	const u8 *unirom = adapter->fw->data;
-	int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
-								idx_offset));
 	struct uni_table_desc *tab_desc;
-	__le32 offs;
+	u32 offs, idx;
+	__le32 temp;
+
+	temp = *((__le32 *)&unirom[adapter->file_prd_off] + idx_offset);
+	idx = le32_to_cpu(temp);
 
 	tab_desc = qlcnic_get_table_desc(unirom, section);
 
 	if (tab_desc == NULL)
 		return NULL;
 
-	offs = cpu_to_le32(tab_desc->findex) +
-			(cpu_to_le32(tab_desc->entry_size) * idx);
+	offs = le32_to_cpu(tab_desc->findex) +
+	       le32_to_cpu(tab_desc->entry_size) * idx;
 
 	return (struct uni_data_desc *)&unirom[offs];
 }
@@ -996,11 +991,13 @@
 qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
 {
 	u32 offs = QLCNIC_BOOTLD_START;
+	struct uni_data_desc *data_desc;
+
+	data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_BOOTLD,
+					 QLCNIC_UNI_BOOTLD_IDX_OFF);
 
 	if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
-		offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
-					QLCNIC_UNI_DIR_SECT_BOOTLD,
-					QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
+		offs = le32_to_cpu(data_desc->findex);
 
 	return (u8 *)&adapter->fw->data[offs];
 }
@@ -1009,43 +1006,48 @@
 qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
 {
 	u32 offs = QLCNIC_IMAGE_START;
+	struct uni_data_desc *data_desc;
 
+	data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+					 QLCNIC_UNI_FIRMWARE_IDX_OFF);
 	if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
-		offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
-					QLCNIC_UNI_DIR_SECT_FW,
-					QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
+		offs = le32_to_cpu(data_desc->findex);
 
 	return (u8 *)&adapter->fw->data[offs];
 }
 
-static __le32
-qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
+static u32 qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
 {
+	struct uni_data_desc *data_desc;
+	const u8 *unirom = adapter->fw->data;
+
+	data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+					 QLCNIC_UNI_FIRMWARE_IDX_OFF);
+
 	if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
-		return cpu_to_le32((qlcnic_get_data_desc(adapter,
-					QLCNIC_UNI_DIR_SECT_FW,
-					QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
+		return le32_to_cpu(data_desc->size);
 	else
-		return cpu_to_le32(
-			*(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
+		return le32_to_cpu(*(__le32 *)&unirom[QLCNIC_FW_SIZE_OFFSET]);
 }
 
-static __le32
-qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
+static u32 qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
 {
 	struct uni_data_desc *fw_data_desc;
 	const struct firmware *fw = adapter->fw;
-	__le32 major, minor, sub;
+	u32 major, minor, sub;
+	__le32 version_offset;
 	const u8 *ver_str;
 	int i, ret;
 
-	if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
-		return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
+	if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
+		version_offset = *(__le32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET];
+		return le32_to_cpu(version_offset);
+	}
 
 	fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
 			QLCNIC_UNI_FIRMWARE_IDX_OFF);
-	ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
-		cpu_to_le32(fw_data_desc->size) - 17;
+	ver_str = fw->data + le32_to_cpu(fw_data_desc->findex) +
+		  le32_to_cpu(fw_data_desc->size) - 17;
 
 	for (i = 0; i < 12; i++) {
 		if (!strncmp(&ver_str[i], "REV=", 4)) {
@@ -1061,18 +1063,20 @@
 	return 0;
 }
 
-static __le32
-qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
+static u32 qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
 {
 	const struct firmware *fw = adapter->fw;
-	__le32 bios_ver, prd_off = adapter->file_prd_off;
+	u32 bios_ver, prd_off = adapter->file_prd_off;
+	u8 *version_offset;
+	__le32 temp;
 
-	if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
-		return cpu_to_le32(
-			*(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
+	if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
+		version_offset = (u8 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET];
+		return le32_to_cpu(*(__le32 *)version_offset);
+	}
 
-	bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
-				+ QLCNIC_UNI_BIOS_VERSION_OFF));
+	temp = *((__le32 *)(&fw->data[prd_off]) + QLCNIC_UNI_BIOS_VERSION_OFF);
+	bios_ver = le32_to_cpu(temp);
 
 	return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
 }
@@ -1131,7 +1135,7 @@
 int
 qlcnic_load_firmware(struct qlcnic_adapter *adapter)
 {
-	u64 *ptr64;
+	__le64 *ptr64;
 	u32 i, flashaddr, size;
 	const struct firmware *fw = adapter->fw;
 	struct pci_dev *pdev = adapter->pdev;
@@ -1140,15 +1144,15 @@
 			fw_name[adapter->fw_type]);
 
 	if (fw) {
-		__le64 data;
+		u64 data;
 
 		size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
 
-		ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
+		ptr64 = (__le64 *)qlcnic_get_bootld_offs(adapter);
 		flashaddr = QLCNIC_BOOTLD_START;
 
 		for (i = 0; i < size; i++) {
-			data = cpu_to_le64(ptr64[i]);
+			data = le64_to_cpu(ptr64[i]);
 
 			if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
 				return -EIO;
@@ -1156,13 +1160,13 @@
 			flashaddr += 8;
 		}
 
-		size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
+		size = qlcnic_get_fw_size(adapter) / 8;
 
-		ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
+		ptr64 = (__le64 *)qlcnic_get_fw_offs(adapter);
 		flashaddr = QLCNIC_IMAGE_START;
 
 		for (i = 0; i < size; i++) {
-			data = cpu_to_le64(ptr64[i]);
+			data = le64_to_cpu(ptr64[i]);
 
 			if (qlcnic_pci_mem_write_2M(adapter,
 						flashaddr, data))
@@ -1171,9 +1175,9 @@
 			flashaddr += 8;
 		}
 
-		size = (__force u32)qlcnic_get_fw_size(adapter) % 8;
+		size = qlcnic_get_fw_size(adapter) % 8;
 		if (size) {
-			data = cpu_to_le64(ptr64[i]);
+			data = le64_to_cpu(ptr64[i]);
 
 			if (qlcnic_pci_mem_write_2M(adapter,
 						flashaddr, data))
@@ -1225,7 +1229,7 @@
 static int
 qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
 {
-	__le32 val;
+	u32 val;
 	u32 ver, bios, min_size;
 	struct pci_dev *pdev = adapter->pdev;
 	const struct firmware *fw = adapter->fw;
@@ -1237,8 +1241,8 @@
 
 		min_size = QLCNIC_UNI_FW_MIN_SIZE;
 	} else {
-		val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
-		if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
+		val = le32_to_cpu(*(__le32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
+		if (val != QLCNIC_BDINFO_MAGIC)
 			return -EINVAL;
 
 		min_size = QLCNIC_FW_MIN_SIZE;
@@ -1259,7 +1263,7 @@
 
 	val = qlcnic_get_bios_version(adapter);
 	qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
-	if ((__force u32)val != bios) {
+	if (val != bios) {
 		dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
 				fw_name[fw_type]);
 		return -EINVAL;
@@ -1324,633 +1328,3 @@
 	release_firmware(adapter->fw);
 	adapter->fw = NULL;
 }
-
-static void
-qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
-				struct qlcnic_fw_msg *msg)
-{
-	u32 cable_OUI;
-	u16 cable_len;
-	u16 link_speed;
-	u8  link_status, module, duplex, autoneg;
-	u8 lb_status = 0;
-	struct net_device *netdev = adapter->netdev;
-
-	adapter->has_link_events = 1;
-
-	cable_OUI = msg->body[1] & 0xffffffff;
-	cable_len = (msg->body[1] >> 32) & 0xffff;
-	link_speed = (msg->body[1] >> 48) & 0xffff;
-
-	link_status = msg->body[2] & 0xff;
-	duplex = (msg->body[2] >> 16) & 0xff;
-	autoneg = (msg->body[2] >> 24) & 0xff;
-	lb_status = (msg->body[2] >> 32) & 0x3;
-
-	module = (msg->body[2] >> 8) & 0xff;
-	if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
-		dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
-				"length %d\n", cable_OUI, cable_len);
-	else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
-		dev_info(&netdev->dev, "unsupported cable length %d\n",
-				cable_len);
-
-	if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
-	    lb_status == QLCNIC_ELB_MODE))
-		adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
-
-	qlcnic_advert_link_change(adapter, link_status);
-
-	if (duplex == LINKEVENT_FULL_DUPLEX)
-		adapter->link_duplex = DUPLEX_FULL;
-	else
-		adapter->link_duplex = DUPLEX_HALF;
-
-	adapter->module_type = module;
-	adapter->link_autoneg = autoneg;
-
-	if (link_status) {
-		adapter->link_speed = link_speed;
-	} else {
-		adapter->link_speed = SPEED_UNKNOWN;
-		adapter->link_duplex = DUPLEX_UNKNOWN;
-	}
-}
-
-static void
-qlcnic_handle_fw_message(int desc_cnt, int index,
-		struct qlcnic_host_sds_ring *sds_ring)
-{
-	struct qlcnic_fw_msg msg;
-	struct status_desc *desc;
-	struct qlcnic_adapter *adapter;
-	struct device *dev;
-	int i = 0, opcode, ret;
-
-	while (desc_cnt > 0 && i < 8) {
-		desc = &sds_ring->desc_head[index];
-		msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
-		msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
-
-		index = get_next_index(index, sds_ring->num_desc);
-		desc_cnt--;
-	}
-
-	adapter = sds_ring->adapter;
-	dev = &adapter->pdev->dev;
-	opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
-
-	switch (opcode) {
-	case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
-		qlcnic_handle_linkevent(adapter, &msg);
-		break;
-	case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
-		ret = (u32)(msg.body[1]);
-		switch (ret) {
-		case 0:
-			adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
-			break;
-		case 1:
-			dev_info(dev, "loopback already in progress\n");
-			adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
-			break;
-		case 2:
-			dev_info(dev, "loopback cable is not connected\n");
-			adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
-			break;
-		default:
-			dev_info(dev, "loopback configure request failed,"
-					" ret %x\n", ret);
-			adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
-			break;
-		}
-		break;
-	default:
-		break;
-	}
-}
-
-static int
-qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
-		struct qlcnic_host_rds_ring *rds_ring,
-		struct qlcnic_rx_buffer *buffer)
-{
-	struct sk_buff *skb;
-	dma_addr_t dma;
-	struct pci_dev *pdev = adapter->pdev;
-
-	skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
-	if (!skb) {
-		adapter->stats.skb_alloc_failure++;
-		return -ENOMEM;
-	}
-
-	skb_reserve(skb, NET_IP_ALIGN);
-
-	dma = pci_map_single(pdev, skb->data,
-			rds_ring->dma_size, PCI_DMA_FROMDEVICE);
-
-	if (pci_dma_mapping_error(pdev, dma)) {
-		adapter->stats.rx_dma_map_error++;
-		dev_kfree_skb_any(skb);
-		return -ENOMEM;
-	}
-
-	buffer->skb = skb;
-	buffer->dma = dma;
-
-	return 0;
-}
-
-static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
-		struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
-{
-	struct qlcnic_rx_buffer *buffer;
-	struct sk_buff *skb;
-
-	buffer = &rds_ring->rx_buf_arr[index];
-
-	if (unlikely(buffer->skb == NULL)) {
-		WARN_ON(1);
-		return NULL;
-	}
-
-	pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
-			PCI_DMA_FROMDEVICE);
-
-	skb = buffer->skb;
-
-	if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
-	    (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
-		adapter->stats.csummed++;
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-	} else {
-		skb_checksum_none_assert(skb);
-	}
-
-	buffer->skb = NULL;
-
-	return skb;
-}
-
-static inline int
-qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
-			u16 *vlan_tag)
-{
-	struct ethhdr *eth_hdr;
-
-	if (!__vlan_get_tag(skb, vlan_tag)) {
-		eth_hdr = (struct ethhdr *) skb->data;
-		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
-		skb_pull(skb, VLAN_HLEN);
-	}
-	if (!adapter->pvid)
-		return 0;
-
-	if (*vlan_tag == adapter->pvid) {
-		/* Outer vlan tag. Packet should follow non-vlan path */
-		*vlan_tag = 0xffff;
-		return 0;
-	}
-	if (adapter->flags & QLCNIC_TAGGING_ENABLED)
-		return 0;
-
-	return -EINVAL;
-}
-
-static struct qlcnic_rx_buffer *
-qlcnic_process_rcv(struct qlcnic_adapter *adapter,
-		struct qlcnic_host_sds_ring *sds_ring,
-		int ring, u64 sts_data0)
-{
-	struct net_device *netdev = adapter->netdev;
-	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-	struct qlcnic_rx_buffer *buffer;
-	struct sk_buff *skb;
-	struct qlcnic_host_rds_ring *rds_ring;
-	int index, length, cksum, pkt_offset;
-	u16 vid = 0xffff;
-
-	if (unlikely(ring >= adapter->max_rds_rings))
-		return NULL;
-
-	rds_ring = &recv_ctx->rds_rings[ring];
-
-	index = qlcnic_get_sts_refhandle(sts_data0);
-	if (unlikely(index >= rds_ring->num_desc))
-		return NULL;
-
-	buffer = &rds_ring->rx_buf_arr[index];
-
-	length = qlcnic_get_sts_totallength(sts_data0);
-	cksum  = qlcnic_get_sts_status(sts_data0);
-	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
-
-	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
-	if (!skb)
-		return buffer;
-
-	if (length > rds_ring->skb_size)
-		skb_put(skb, rds_ring->skb_size);
-	else
-		skb_put(skb, length);
-
-	if (pkt_offset)
-		skb_pull(skb, pkt_offset);
-
-	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
-		adapter->stats.rxdropped++;
-		dev_kfree_skb(skb);
-		return buffer;
-	}
-
-	skb->protocol = eth_type_trans(skb, netdev);
-
-	if (vid != 0xffff)
-		__vlan_hwaccel_put_tag(skb, vid);
-
-	napi_gro_receive(&sds_ring->napi, skb);
-
-	adapter->stats.rx_pkts++;
-	adapter->stats.rxbytes += length;
-
-	return buffer;
-}
-
-#define QLC_TCP_HDR_SIZE            20
-#define QLC_TCP_TS_OPTION_SIZE      12
-#define QLC_TCP_TS_HDR_SIZE         (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
-
-static struct qlcnic_rx_buffer *
-qlcnic_process_lro(struct qlcnic_adapter *adapter,
-		struct qlcnic_host_sds_ring *sds_ring,
-		int ring, u64 sts_data0, u64 sts_data1)
-{
-	struct net_device *netdev = adapter->netdev;
-	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-	struct qlcnic_rx_buffer *buffer;
-	struct sk_buff *skb;
-	struct qlcnic_host_rds_ring *rds_ring;
-	struct iphdr *iph;
-	struct tcphdr *th;
-	bool push, timestamp;
-	int l2_hdr_offset, l4_hdr_offset;
-	int index;
-	u16 lro_length, length, data_offset;
-	u32 seq_number;
-	u16 vid = 0xffff;
-
-	if (unlikely(ring > adapter->max_rds_rings))
-		return NULL;
-
-	rds_ring = &recv_ctx->rds_rings[ring];
-
-	index = qlcnic_get_lro_sts_refhandle(sts_data0);
-	if (unlikely(index > rds_ring->num_desc))
-		return NULL;
-
-	buffer = &rds_ring->rx_buf_arr[index];
-
-	timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
-	lro_length = qlcnic_get_lro_sts_length(sts_data0);
-	l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
-	l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
-	push = qlcnic_get_lro_sts_push_flag(sts_data0);
-	seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
-
-	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
-	if (!skb)
-		return buffer;
-
-	if (timestamp)
-		data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
-	else
-		data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
-
-	skb_put(skb, lro_length + data_offset);
-
-	skb_pull(skb, l2_hdr_offset);
-
-	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
-		adapter->stats.rxdropped++;
-		dev_kfree_skb(skb);
-		return buffer;
-	}
-
-	skb->protocol = eth_type_trans(skb, netdev);
-
-	iph = (struct iphdr *)skb->data;
-	th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
-
-	length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
-	iph->tot_len = htons(length);
-	iph->check = 0;
-	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-	th->psh = push;
-	th->seq = htonl(seq_number);
-
-	length = skb->len;
-
-	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
-		skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
-
-	if (vid != 0xffff)
-		__vlan_hwaccel_put_tag(skb, vid);
-	netif_receive_skb(skb);
-
-	adapter->stats.lro_pkts++;
-	adapter->stats.lrobytes += length;
-
-	return buffer;
-}
-
-int
-qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
-{
-	struct qlcnic_adapter *adapter = sds_ring->adapter;
-	struct list_head *cur;
-	struct status_desc *desc;
-	struct qlcnic_rx_buffer *rxbuf;
-	u64 sts_data0, sts_data1;
-
-	int count = 0;
-	int opcode, ring, desc_cnt;
-	u32 consumer = sds_ring->consumer;
-
-	while (count < max) {
-		desc = &sds_ring->desc_head[consumer];
-		sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
-
-		if (!(sts_data0 & STATUS_OWNER_HOST))
-			break;
-
-		desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
-		opcode = qlcnic_get_sts_opcode(sts_data0);
-
-		switch (opcode) {
-		case QLCNIC_RXPKT_DESC:
-		case QLCNIC_OLD_RXPKT_DESC:
-		case QLCNIC_SYN_OFFLOAD:
-			ring = qlcnic_get_sts_type(sts_data0);
-			rxbuf = qlcnic_process_rcv(adapter, sds_ring,
-					ring, sts_data0);
-			break;
-		case QLCNIC_LRO_DESC:
-			ring = qlcnic_get_lro_sts_type(sts_data0);
-			sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
-			rxbuf = qlcnic_process_lro(adapter, sds_ring,
-					ring, sts_data0, sts_data1);
-			break;
-		case QLCNIC_RESPONSE_DESC:
-			qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
-		default:
-			goto skip;
-		}
-
-		WARN_ON(desc_cnt > 1);
-
-		if (likely(rxbuf))
-			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
-		else
-			adapter->stats.null_rxbuf++;
-
-skip:
-		for (; desc_cnt > 0; desc_cnt--) {
-			desc = &sds_ring->desc_head[consumer];
-			desc->status_desc_data[0] =
-				cpu_to_le64(STATUS_OWNER_PHANTOM);
-			consumer = get_next_index(consumer, sds_ring->num_desc);
-		}
-		count++;
-	}
-
-	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
-		struct qlcnic_host_rds_ring *rds_ring =
-			&adapter->recv_ctx->rds_rings[ring];
-
-		if (!list_empty(&sds_ring->free_list[ring])) {
-			list_for_each(cur, &sds_ring->free_list[ring]) {
-				rxbuf = list_entry(cur,
-						struct qlcnic_rx_buffer, list);
-				qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
-			}
-			spin_lock(&rds_ring->lock);
-			list_splice_tail_init(&sds_ring->free_list[ring],
-						&rds_ring->free_list);
-			spin_unlock(&rds_ring->lock);
-		}
-
-		qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
-	}
-
-	if (count) {
-		sds_ring->consumer = consumer;
-		writel(consumer, sds_ring->crb_sts_consumer);
-	}
-
-	return count;
-}
-
-void
-qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
-	struct qlcnic_host_rds_ring *rds_ring)
-{
-	struct rcv_desc *pdesc;
-	struct qlcnic_rx_buffer *buffer;
-	int count = 0;
-	u32 producer;
-	struct list_head *head;
-
-	producer = rds_ring->producer;
-
-	head = &rds_ring->free_list;
-	while (!list_empty(head)) {
-
-		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
-
-		if (!buffer->skb) {
-			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
-				break;
-		}
-
-		count++;
-		list_del(&buffer->list);
-
-		/* make a rcv descriptor  */
-		pdesc = &rds_ring->desc_head[producer];
-		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
-		pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
-		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
-
-		producer = get_next_index(producer, rds_ring->num_desc);
-	}
-
-	if (count) {
-		rds_ring->producer = producer;
-		writel((producer-1) & (rds_ring->num_desc-1),
-				rds_ring->crb_rcv_producer);
-	}
-}
-
-static void
-qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
-		struct qlcnic_host_rds_ring *rds_ring)
-{
-	struct rcv_desc *pdesc;
-	struct qlcnic_rx_buffer *buffer;
-	int  count = 0;
-	uint32_t producer;
-	struct list_head *head;
-
-	if (!spin_trylock(&rds_ring->lock))
-		return;
-
-	producer = rds_ring->producer;
-
-	head = &rds_ring->free_list;
-	while (!list_empty(head)) {
-
-		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
-
-		if (!buffer->skb) {
-			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
-				break;
-		}
-
-		count++;
-		list_del(&buffer->list);
-
-		/* make a rcv descriptor  */
-		pdesc = &rds_ring->desc_head[producer];
-		pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
-		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
-		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
-
-		producer = get_next_index(producer, rds_ring->num_desc);
-	}
-
-	if (count) {
-		rds_ring->producer = producer;
-		writel((producer - 1) & (rds_ring->num_desc - 1),
-				rds_ring->crb_rcv_producer);
-	}
-	spin_unlock(&rds_ring->lock);
-}
-
-static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
-{
-	int i;
-	unsigned char *data = skb->data;
-
-	printk(KERN_INFO "\n");
-	for (i = 0; i < skb->len; i++) {
-		QLCDB(adapter, DRV, "%02x ", data[i]);
-		if ((i & 0x0f) == 8)
-			printk(KERN_INFO "\n");
-	}
-}
-
-void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
-		struct qlcnic_host_sds_ring *sds_ring,
-		int ring, u64 sts_data0)
-{
-	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-	struct sk_buff *skb;
-	struct qlcnic_host_rds_ring *rds_ring;
-	int index, length, cksum, pkt_offset;
-
-	if (unlikely(ring >= adapter->max_rds_rings))
-		return;
-
-	rds_ring = &recv_ctx->rds_rings[ring];
-
-	index = qlcnic_get_sts_refhandle(sts_data0);
-	length = qlcnic_get_sts_totallength(sts_data0);
-	if (unlikely(index >= rds_ring->num_desc))
-		return;
-
-	cksum  = qlcnic_get_sts_status(sts_data0);
-	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
-
-	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
-	if (!skb)
-		return;
-
-	if (length > rds_ring->skb_size)
-		skb_put(skb, rds_ring->skb_size);
-	else
-		skb_put(skb, length);
-
-	if (pkt_offset)
-		skb_pull(skb, pkt_offset);
-
-	if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
-		adapter->diag_cnt++;
-	else
-		dump_skb(skb, adapter);
-
-	dev_kfree_skb_any(skb);
-	adapter->stats.rx_pkts++;
-	adapter->stats.rxbytes += length;
-
-	return;
-}
-
-void
-qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
-{
-	struct qlcnic_adapter *adapter = sds_ring->adapter;
-	struct status_desc *desc;
-	u64 sts_data0;
-	int ring, opcode, desc_cnt;
-
-	u32 consumer = sds_ring->consumer;
-
-	desc = &sds_ring->desc_head[consumer];
-	sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
-
-	if (!(sts_data0 & STATUS_OWNER_HOST))
-		return;
-
-	desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
-	opcode = qlcnic_get_sts_opcode(sts_data0);
-	switch (opcode) {
-	case QLCNIC_RESPONSE_DESC:
-		qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
-		break;
-	default:
-		ring = qlcnic_get_sts_type(sts_data0);
-		qlcnic_process_rcv_diag(adapter, sds_ring, ring, sts_data0);
-		break;
-	}
-
-	for (; desc_cnt > 0; desc_cnt--) {
-		desc = &sds_ring->desc_head[consumer];
-		desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
-		consumer = get_next_index(consumer, sds_ring->num_desc);
-	}
-
-	sds_ring->consumer = consumer;
-	writel(consumer, sds_ring->crb_sts_consumer);
-}
-
-void
-qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
-			u8 alt_mac, u8 *mac)
-{
-	u32 mac_low, mac_high;
-	int i;
-
-	mac_low = off1;
-	mac_high = off2;
-
-	if (alt_mac) {
-		mac_low |= (mac_low >> 16) | (mac_high << 16);
-		mac_high >>= 16;
-	}
-
-	for (i = 0; i < 2; i++)
-		mac[i] = (u8)(mac_high >> ((1 - i) * 8));
-	for (i = 2; i < 6; i++)
-		mac[i] = (u8)(mac_low >> ((5 - i) * 8));
-}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
new file mode 100644
index 0000000..ba352c1
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -0,0 +1,1309 @@
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+
+#include "qlcnic.h"
+
+#define QLCNIC_MAC_HASH(MAC)\
+	((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
+
+#define TX_ETHER_PKT	0x01
+#define TX_TCP_PKT	0x02
+#define TX_UDP_PKT	0x03
+#define TX_IP_PKT	0x04
+#define TX_TCP_LSO	0x05
+#define TX_TCP_LSO6	0x06
+#define TX_TCPV6_PKT	0x0b
+#define TX_UDPV6_PKT	0x0c
+#define FLAGS_VLAN_TAGGED	0x10
+#define FLAGS_VLAN_OOB		0x40
+
+#define qlcnic_set_tx_vlan_tci(cmd_desc, v)	\
+	(cmd_desc)->vlan_TCI = cpu_to_le16(v);
+#define qlcnic_set_cmd_desc_port(cmd_desc, var)	\
+	((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var)	\
+	((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define qlcnic_set_tx_port(_desc, _port) \
+	((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
+
+#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
+	((_desc)->flags_opcode |= \
+	cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
+
+#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
+	((_desc)->nfrags__length = \
+	cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST	(0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM	(0x2ULL << 56)
+
+/* Status descriptor:
+   0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+   28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+   53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define qlcnic_get_sts_port(sts_data)	\
+	((sts_data) & 0x0F)
+#define qlcnic_get_sts_status(sts_data)	\
+	(((sts_data) >> 4) & 0x0F)
+#define qlcnic_get_sts_type(sts_data)	\
+	(((sts_data) >> 8) & 0x0F)
+#define qlcnic_get_sts_totallength(sts_data)	\
+	(((sts_data) >> 12) & 0xFFFF)
+#define qlcnic_get_sts_refhandle(sts_data)	\
+	(((sts_data) >> 28) & 0xFFFF)
+#define qlcnic_get_sts_prot(sts_data)	\
+	(((sts_data) >> 44) & 0x0F)
+#define qlcnic_get_sts_pkt_offset(sts_data)	\
+	(((sts_data) >> 48) & 0x1F)
+#define qlcnic_get_sts_desc_cnt(sts_data)	\
+	(((sts_data) >> 53) & 0x7)
+#define qlcnic_get_sts_opcode(sts_data)	\
+	(((sts_data) >> 58) & 0x03F)
+
+#define qlcnic_get_lro_sts_refhandle(sts_data) 	\
+	((sts_data) & 0x0FFFF)
+#define qlcnic_get_lro_sts_length(sts_data)	\
+	(((sts_data) >> 16) & 0x0FFFF)
+#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data)	\
+	(((sts_data) >> 32) & 0x0FF)
+#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data)	\
+	(((sts_data) >> 40) & 0x0FF)
+#define qlcnic_get_lro_sts_timestamp(sts_data)	\
+	(((sts_data) >> 48) & 0x1)
+#define qlcnic_get_lro_sts_type(sts_data)	\
+	(((sts_data) >> 49) & 0x7)
+#define qlcnic_get_lro_sts_push_flag(sts_data)		\
+	(((sts_data) >> 52) & 0x1)
+#define qlcnic_get_lro_sts_seq_number(sts_data)		\
+	((sts_data) & 0x0FFFFFFFF)
+#define qlcnic_get_lro_sts_mss(sts_data1)		\
+	((sts_data1 >> 32) & 0x0FFFF)
+
+/* opcode field in status_desc */
+#define QLCNIC_SYN_OFFLOAD	0x03
+#define QLCNIC_RXPKT_DESC  	0x04
+#define QLCNIC_OLD_RXPKT_DESC	0x3f
+#define QLCNIC_RESPONSE_DESC	0x05
+#define QLCNIC_LRO_DESC  	0x12
+
+/* for status field in status_desc */
+#define STATUS_CKSUM_LOOP	0
+#define STATUS_CKSUM_OK		2
+
+static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+				 u64 uaddr, __le16 vlan_id,
+				 struct qlcnic_host_tx_ring *tx_ring)
+{
+	struct cmd_desc_type0 *hwdesc;
+	struct qlcnic_nic_req *req;
+	struct qlcnic_mac_req *mac_req;
+	struct qlcnic_vlan_req *vlan_req;
+	u32 producer;
+	u64 word;
+
+	producer = tx_ring->producer;
+	hwdesc = &tx_ring->desc_head[tx_ring->producer];
+
+	req = (struct qlcnic_nic_req *)hwdesc;
+	memset(req, 0, sizeof(struct qlcnic_nic_req));
+	req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+	word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
+	req->req_hdr = cpu_to_le64(word);
+
+	mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
+	mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+	memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
+
+	vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
+	vlan_req->vlan_id = vlan_id;
+
+	tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
+	smp_mb();
+}
+
+static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
+			       struct qlcnic_host_tx_ring *tx_ring,
+			       struct cmd_desc_type0 *first_desc,
+			       struct sk_buff *skb)
+{
+	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+	struct qlcnic_filter *fil, *tmp_fil;
+	struct hlist_node *tmp_hnode, *n;
+	struct hlist_head *head;
+	u64 src_addr = 0;
+	__le16 vlan_id = 0;
+	u8 hindex;
+
+	if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+		return;
+
+	if (adapter->fhash.fnum >= adapter->fhash.fmax)
+		return;
+
+	/* Only NPAR capable devices support vlan based learning*/
+	if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+		vlan_id = first_desc->vlan_TCI;
+	memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+	hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
+	head = &(adapter->fhash.fhead[hindex]);
+
+	hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+		if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+			    tmp_fil->vlan_id == vlan_id) {
+
+			if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+				qlcnic_change_filter(adapter, src_addr, vlan_id,
+						     tx_ring);
+			tmp_fil->ftime = jiffies;
+			return;
+		}
+	}
+
+	fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+	if (!fil)
+		return;
+
+	qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
+
+	fil->ftime = jiffies;
+	fil->vlan_id = vlan_id;
+	memcpy(fil->faddr, &src_addr, ETH_ALEN);
+
+	spin_lock(&adapter->mac_learn_lock);
+
+	hlist_add_head(&(fil->fnode), head);
+	adapter->fhash.fnum++;
+
+	spin_unlock(&adapter->mac_learn_lock);
+}
+
+static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
+			 struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
+{
+	u8 l4proto, opcode = 0, hdr_len = 0;
+	u16 flags = 0, vlan_tci = 0;
+	int copied, offset, copy_len, size;
+	struct cmd_desc_type0 *hwdesc;
+	struct vlan_ethhdr *vh;
+	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+	u16 protocol = ntohs(skb->protocol);
+	u32 producer = tx_ring->producer;
+
+	if (protocol == ETH_P_8021Q) {
+		vh = (struct vlan_ethhdr *)skb->data;
+		flags = FLAGS_VLAN_TAGGED;
+		vlan_tci = ntohs(vh->h_vlan_TCI);
+		protocol = ntohs(vh->h_vlan_encapsulated_proto);
+	} else if (vlan_tx_tag_present(skb)) {
+		flags = FLAGS_VLAN_OOB;
+		vlan_tci = vlan_tx_tag_get(skb);
+	}
+	if (unlikely(adapter->pvid)) {
+		if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+			return -EIO;
+		if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+			goto set_flags;
+
+		flags = FLAGS_VLAN_OOB;
+		vlan_tci = adapter->pvid;
+	}
+set_flags:
+	qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
+	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+	if (*(skb->data) & BIT_0) {
+		flags |= BIT_0;
+		memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
+	}
+	opcode = TX_ETHER_PKT;
+	if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+	    skb_shinfo(skb)->gso_size > 0) {
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+		first_desc->total_hdr_length = hdr_len;
+		opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
+
+		/* For LSO, we need to copy the MAC/IP/TCP headers into
+		* the descriptor ring */
+		copied = 0;
+		offset = 2;
+
+		if (flags & FLAGS_VLAN_OOB) {
+			first_desc->total_hdr_length += VLAN_HLEN;
+			first_desc->tcp_hdr_offset = VLAN_HLEN;
+			first_desc->ip_hdr_offset = VLAN_HLEN;
+
+			/* Only in case of TSO on vlan device */
+			flags |= FLAGS_VLAN_TAGGED;
+
+			/* Create a TSO vlan header template for firmware */
+			hwdesc = &tx_ring->desc_head[producer];
+			tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+			copy_len = min((int)sizeof(struct cmd_desc_type0) -
+				       offset, hdr_len + VLAN_HLEN);
+
+			vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
+			skb_copy_from_linear_data(skb, vh, 12);
+			vh->h_vlan_proto = htons(ETH_P_8021Q);
+			vh->h_vlan_TCI = htons(vlan_tci);
+
+			skb_copy_from_linear_data_offset(skb, 12,
+							 (char *)vh + 16,
+							 copy_len - 16);
+			copied = copy_len - VLAN_HLEN;
+			offset = 0;
+			producer = get_next_index(producer, tx_ring->num_desc);
+		}
+
+		while (copied < hdr_len) {
+			size = (int)sizeof(struct cmd_desc_type0) - offset;
+			copy_len = min(size, (hdr_len - copied));
+			hwdesc = &tx_ring->desc_head[producer];
+			tx_ring->cmd_buf_arr[producer].skb = NULL;
+			skb_copy_from_linear_data_offset(skb, copied,
+							 (char *)hwdesc +
+							 offset, copy_len);
+			copied += copy_len;
+			offset = 0;
+			producer = get_next_index(producer, tx_ring->num_desc);
+		}
+
+		tx_ring->producer = producer;
+		smp_mb();
+		adapter->stats.lso_frames++;
+
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		if (protocol == ETH_P_IP) {
+			l4proto = ip_hdr(skb)->protocol;
+
+			if (l4proto == IPPROTO_TCP)
+				opcode = TX_TCP_PKT;
+			else if (l4proto == IPPROTO_UDP)
+				opcode = TX_UDP_PKT;
+		} else if (protocol == ETH_P_IPV6) {
+			l4proto = ipv6_hdr(skb)->nexthdr;
+
+			if (l4proto == IPPROTO_TCP)
+				opcode = TX_TCPV6_PKT;
+			else if (l4proto == IPPROTO_UDP)
+				opcode = TX_UDPV6_PKT;
+		}
+	}
+	first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+	first_desc->ip_hdr_offset += skb_network_offset(skb);
+	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+	return 0;
+}
+
+static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
+			     struct qlcnic_cmd_buffer *pbuf)
+{
+	struct qlcnic_skb_frag *nf;
+	struct skb_frag_struct *frag;
+	int i, nr_frags;
+	dma_addr_t map;
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	nf = &pbuf->frag_array[0];
+
+	map = pci_map_single(pdev, skb->data, skb_headlen(skb),
+			     PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(pdev, map))
+		goto out_err;
+
+	nf->dma = map;
+	nf->length = skb_headlen(skb);
+
+	for (i = 0; i < nr_frags; i++) {
+		frag = &skb_shinfo(skb)->frags[i];
+		nf = &pbuf->frag_array[i+1];
+		map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+				       DMA_TO_DEVICE);
+		if (dma_mapping_error(&pdev->dev, map))
+			goto unwind;
+
+		nf->dma = map;
+		nf->length = skb_frag_size(frag);
+	}
+
+	return 0;
+
+unwind:
+	while (--i >= 0) {
+		nf = &pbuf->frag_array[i+1];
+		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+	}
+
+	nf = &pbuf->frag_array[0];
+	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+
+out_err:
+	return -ENOMEM;
+}
+
+static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
+				 struct qlcnic_cmd_buffer *pbuf)
+{
+	struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
+	int i, nr_frags = skb_shinfo(skb)->nr_frags;
+
+	for (i = 0; i < nr_frags; i++) {
+		nf = &pbuf->frag_array[i+1];
+		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+	}
+
+	nf = &pbuf->frag_array[0];
+	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+	pbuf->skb = NULL;
+}
+
+static inline void qlcnic_clear_cmddesc(u64 *desc)
+{
+	desc[0] = 0ULL;
+	desc[2] = 0ULL;
+	desc[7] = 0ULL;
+}
+
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+	struct qlcnic_cmd_buffer *pbuf;
+	struct qlcnic_skb_frag *buffrag;
+	struct cmd_desc_type0 *hwdesc, *first_desc;
+	struct pci_dev *pdev;
+	struct ethhdr *phdr;
+	int i, k, frag_count, delta = 0;
+	u32 producer, num_txd;
+
+	num_txd = tx_ring->num_desc;
+
+	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+		netif_stop_queue(netdev);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (adapter->flags & QLCNIC_MACSPOOF) {
+		phdr = (struct ethhdr *)skb->data;
+		if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
+			goto drop_packet;
+	}
+
+	frag_count = skb_shinfo(skb)->nr_frags + 1;
+	/* 14 frags supported for normal packet and
+	 * 32 frags supported for TSO packet
+	 */
+	if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+		for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+			delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+		if (!__pskb_pull_tail(skb, delta))
+			goto drop_packet;
+
+		frag_count = 1 + skb_shinfo(skb)->nr_frags;
+	}
+
+	if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
+		netif_stop_queue(netdev);
+		if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+			netif_start_queue(netdev);
+		} else {
+			adapter->stats.xmit_off++;
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	producer = tx_ring->producer;
+	pbuf = &tx_ring->cmd_buf_arr[producer];
+	pdev = adapter->pdev;
+	first_desc = &tx_ring->desc_head[producer];
+	hwdesc = &tx_ring->desc_head[producer];
+	qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+	if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
+		adapter->stats.tx_dma_map_error++;
+		goto drop_packet;
+	}
+
+	pbuf->skb = skb;
+	pbuf->frag_count = frag_count;
+
+	qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
+	qlcnic_set_tx_port(first_desc, adapter->portnum);
+
+	for (i = 0; i < frag_count; i++) {
+		k = i % 4;
+
+		if ((k == 0) && (i > 0)) {
+			/* move to next desc.*/
+			producer = get_next_index(producer, num_txd);
+			hwdesc = &tx_ring->desc_head[producer];
+			qlcnic_clear_cmddesc((u64 *)hwdesc);
+			tx_ring->cmd_buf_arr[producer].skb = NULL;
+		}
+
+		buffrag = &pbuf->frag_array[i];
+		hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+		switch (k) {
+		case 0:
+			hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+			break;
+		case 1:
+			hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+			break;
+		case 2:
+			hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+			break;
+		case 3:
+			hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+			break;
+		}
+	}
+
+	tx_ring->producer = get_next_index(producer, num_txd);
+	smp_mb();
+
+	if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
+		goto unwind_buff;
+
+	if (adapter->mac_learn)
+		qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
+
+	adapter->stats.txbytes += skb->len;
+	adapter->stats.xmitcalled++;
+
+	qlcnic_update_cmd_producer(tx_ring);
+
+	return NETDEV_TX_OK;
+
+unwind_buff:
+	qlcnic_unmap_buffers(pdev, skb, pbuf);
+drop_packet:
+	adapter->stats.txdropped++;
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	if (adapter->ahw->linkup && !linkup) {
+		netdev_info(netdev, "NIC Link is down\n");
+		adapter->ahw->linkup = 0;
+		if (netif_running(netdev)) {
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+		}
+	} else if (!adapter->ahw->linkup && linkup) {
+		netdev_info(netdev, "NIC Link is up\n");
+		adapter->ahw->linkup = 1;
+		if (netif_running(netdev)) {
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+		}
+	}
+}
+
+static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
+			       struct qlcnic_host_rds_ring *rds_ring,
+			       struct qlcnic_rx_buffer *buffer)
+{
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	struct pci_dev *pdev = adapter->pdev;
+
+	skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
+	if (!skb) {
+		adapter->stats.skb_alloc_failure++;
+		return -ENOMEM;
+	}
+
+	skb_reserve(skb, NET_IP_ALIGN);
+	dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
+			     PCI_DMA_FROMDEVICE);
+
+	if (pci_dma_mapping_error(pdev, dma)) {
+		adapter->stats.rx_dma_map_error++;
+		dev_kfree_skb_any(skb);
+		return -ENOMEM;
+	}
+
+	buffer->skb = skb;
+	buffer->dma = dma;
+
+	return 0;
+}
+
+static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+					 struct qlcnic_host_rds_ring *rds_ring)
+{
+	struct rcv_desc *pdesc;
+	struct qlcnic_rx_buffer *buffer;
+	int  count = 0;
+	uint32_t producer;
+	struct list_head *head;
+
+	if (!spin_trylock(&rds_ring->lock))
+		return;
+
+	producer = rds_ring->producer;
+	head = &rds_ring->free_list;
+
+	while (!list_empty(head)) {
+		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+		if (!buffer->skb) {
+			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+				break;
+		}
+
+		count++;
+		list_del(&buffer->list);
+
+		/* make a rcv descriptor  */
+		pdesc = &rds_ring->desc_head[producer];
+		pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+		producer = get_next_index(producer, rds_ring->num_desc);
+	}
+
+	if (count) {
+		rds_ring->producer = producer;
+		writel((producer - 1) & (rds_ring->num_desc - 1),
+		       rds_ring->crb_rcv_producer);
+	}
+
+	spin_unlock(&rds_ring->lock);
+}
+
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+{
+	u32 sw_consumer, hw_consumer;
+	int i, done, count = 0;
+	struct qlcnic_cmd_buffer *buffer;
+	struct pci_dev *pdev = adapter->pdev;
+	struct net_device *netdev = adapter->netdev;
+	struct qlcnic_skb_frag *frag;
+	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+	if (!spin_trylock(&adapter->tx_clean_lock))
+		return 1;
+
+	sw_consumer = tx_ring->sw_consumer;
+	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+	while (sw_consumer != hw_consumer) {
+		buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+		if (buffer->skb) {
+			frag = &buffer->frag_array[0];
+			pci_unmap_single(pdev, frag->dma, frag->length,
+					 PCI_DMA_TODEVICE);
+			frag->dma = 0ULL;
+			for (i = 1; i < buffer->frag_count; i++) {
+				frag++;
+				pci_unmap_page(pdev, frag->dma, frag->length,
+					       PCI_DMA_TODEVICE);
+				frag->dma = 0ULL;
+			}
+
+			adapter->stats.xmitfinished++;
+			dev_kfree_skb_any(buffer->skb);
+			buffer->skb = NULL;
+		}
+
+		sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+		if (++count >= MAX_STATUS_HANDLE)
+			break;
+	}
+
+	if (count && netif_running(netdev)) {
+		tx_ring->sw_consumer = sw_consumer;
+
+		smp_mb();
+
+		if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+				netif_wake_queue(netdev);
+				adapter->stats.xmit_on++;
+			}
+		}
+		adapter->tx_timeo_cnt = 0;
+	}
+	/*
+	 * If everything is freed up to consumer then check if the ring is full
+	 * If the ring is full then check if more needs to be freed and
+	 * schedule the call back again.
+	 *
+	 * This happens when there are 2 CPUs. One could be freeing and the
+	 * other filling it. If the ring is full when we get out of here and
+	 * the card has already interrupted the host then the host can miss the
+	 * interrupt.
+	 *
+	 * There is still a possible race condition and the host could miss an
+	 * interrupt. The card has to take care of this.
+	 */
+	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+	done = (sw_consumer == hw_consumer);
+
+	spin_unlock(&adapter->tx_clean_lock);
+
+	return done;
+}
+
+static int qlcnic_poll(struct napi_struct *napi, int budget)
+{
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_adapter *adapter;
+	int tx_complete, work_done;
+
+	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+	adapter = sds_ring->adapter;
+
+	tx_complete = qlcnic_process_cmd_ring(adapter);
+	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+	if ((work_done < budget) && tx_complete) {
+		napi_complete(&sds_ring->napi);
+		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+			qlcnic_enable_int(sds_ring);
+	}
+
+	return work_done;
+}
+
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_adapter *adapter;
+	int work_done;
+
+	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+	adapter = sds_ring->adapter;
+
+	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+	if (work_done < budget) {
+		napi_complete(&sds_ring->napi);
+		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+			qlcnic_enable_int(sds_ring);
+	}
+
+	return work_done;
+}
+
+static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
+				    struct qlcnic_fw_msg *msg)
+{
+	u32 cable_OUI;
+	u16 cable_len, link_speed;
+	u8  link_status, module, duplex, autoneg, lb_status = 0;
+	struct net_device *netdev = adapter->netdev;
+
+	adapter->has_link_events = 1;
+
+	cable_OUI = msg->body[1] & 0xffffffff;
+	cable_len = (msg->body[1] >> 32) & 0xffff;
+	link_speed = (msg->body[1] >> 48) & 0xffff;
+
+	link_status = msg->body[2] & 0xff;
+	duplex = (msg->body[2] >> 16) & 0xff;
+	autoneg = (msg->body[2] >> 24) & 0xff;
+	lb_status = (msg->body[2] >> 32) & 0x3;
+
+	module = (msg->body[2] >> 8) & 0xff;
+	if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
+		dev_info(&netdev->dev,
+			 "unsupported cable: OUI 0x%x, length %d\n",
+			 cable_OUI, cable_len);
+	else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
+		dev_info(&netdev->dev, "unsupported cable length %d\n",
+			 cable_len);
+
+	if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
+	    lb_status == QLCNIC_ELB_MODE))
+		adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
+
+	qlcnic_advert_link_change(adapter, link_status);
+
+	if (duplex == LINKEVENT_FULL_DUPLEX)
+		adapter->link_duplex = DUPLEX_FULL;
+	else
+		adapter->link_duplex = DUPLEX_HALF;
+
+	adapter->module_type = module;
+	adapter->link_autoneg = autoneg;
+
+	if (link_status) {
+		adapter->link_speed = link_speed;
+	} else {
+		adapter->link_speed = SPEED_UNKNOWN;
+		adapter->link_duplex = DUPLEX_UNKNOWN;
+	}
+}
+
+static void qlcnic_handle_fw_message(int desc_cnt, int index,
+				     struct qlcnic_host_sds_ring *sds_ring)
+{
+	struct qlcnic_fw_msg msg;
+	struct status_desc *desc;
+	struct qlcnic_adapter *adapter;
+	struct device *dev;
+	int i = 0, opcode, ret;
+
+	while (desc_cnt > 0 && i < 8) {
+		desc = &sds_ring->desc_head[index];
+		msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+		msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+		index = get_next_index(index, sds_ring->num_desc);
+		desc_cnt--;
+	}
+
+	adapter = sds_ring->adapter;
+	dev = &adapter->pdev->dev;
+	opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+
+	switch (opcode) {
+	case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+		qlcnic_handle_linkevent(adapter, &msg);
+		break;
+	case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
+		ret = (u32)(msg.body[1]);
+		switch (ret) {
+		case 0:
+			adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
+			break;
+		case 1:
+			dev_info(dev, "loopback already in progress\n");
+			adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
+			break;
+		case 2:
+			dev_info(dev, "loopback cable is not connected\n");
+			adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
+			break;
+		default:
+			dev_info(dev,
+				 "loopback configure request failed, err %x\n",
+				 ret);
+			adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static struct sk_buff *
+qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+		     struct qlcnic_host_rds_ring *rds_ring, u16 index,
+		     u16 cksum)
+{
+	struct qlcnic_rx_buffer *buffer;
+	struct sk_buff *skb;
+
+	buffer = &rds_ring->rx_buf_arr[index];
+
+	if (unlikely(buffer->skb == NULL)) {
+		WARN_ON(1);
+		return NULL;
+	}
+
+	pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+			 PCI_DMA_FROMDEVICE);
+
+	skb = buffer->skb;
+
+	if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
+		   (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
+		adapter->stats.csummed++;
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else {
+		skb_checksum_none_assert(skb);
+	}
+
+	buffer->skb = NULL;
+
+	return skb;
+}
+
+static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
+					  struct sk_buff *skb, u16 *vlan_tag)
+{
+	struct ethhdr *eth_hdr;
+
+	if (!__vlan_get_tag(skb, vlan_tag)) {
+		eth_hdr = (struct ethhdr *)skb->data;
+		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
+		skb_pull(skb, VLAN_HLEN);
+	}
+	if (!adapter->pvid)
+		return 0;
+
+	if (*vlan_tag == adapter->pvid) {
+		/* Outer vlan tag. Packet should follow non-vlan path */
+		*vlan_tag = 0xffff;
+		return 0;
+	}
+	if (adapter->flags & QLCNIC_TAGGING_ENABLED)
+		return 0;
+
+	return -EINVAL;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv(struct qlcnic_adapter *adapter,
+		   struct qlcnic_host_sds_ring *sds_ring, int ring,
+		   u64 sts_data0)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+	struct qlcnic_rx_buffer *buffer;
+	struct sk_buff *skb;
+	struct qlcnic_host_rds_ring *rds_ring;
+	int index, length, cksum, pkt_offset;
+	u16 vid = 0xffff;
+
+	if (unlikely(ring >= adapter->max_rds_rings))
+		return NULL;
+
+	rds_ring = &recv_ctx->rds_rings[ring];
+
+	index = qlcnic_get_sts_refhandle(sts_data0);
+	if (unlikely(index >= rds_ring->num_desc))
+		return NULL;
+
+	buffer = &rds_ring->rx_buf_arr[index];
+	length = qlcnic_get_sts_totallength(sts_data0);
+	cksum  = qlcnic_get_sts_status(sts_data0);
+	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+	if (!skb)
+		return buffer;
+
+	if (length > rds_ring->skb_size)
+		skb_put(skb, rds_ring->skb_size);
+	else
+		skb_put(skb, length);
+
+	if (pkt_offset)
+		skb_pull(skb, pkt_offset);
+
+	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+		adapter->stats.rxdropped++;
+		dev_kfree_skb(skb);
+		return buffer;
+	}
+
+	skb->protocol = eth_type_trans(skb, netdev);
+
+	if (vid != 0xffff)
+		__vlan_hwaccel_put_tag(skb, vid);
+
+	napi_gro_receive(&sds_ring->napi, skb);
+
+	adapter->stats.rx_pkts++;
+	adapter->stats.rxbytes += length;
+
+	return buffer;
+}
+
+#define QLC_TCP_HDR_SIZE            20
+#define QLC_TCP_TS_OPTION_SIZE      12
+#define QLC_TCP_TS_HDR_SIZE         (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_lro(struct qlcnic_adapter *adapter,
+		   int ring, u64 sts_data0, u64 sts_data1)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+	struct qlcnic_rx_buffer *buffer;
+	struct sk_buff *skb;
+	struct qlcnic_host_rds_ring *rds_ring;
+	struct iphdr *iph;
+	struct tcphdr *th;
+	bool push, timestamp;
+	int index, l2_hdr_offset, l4_hdr_offset;
+	u16 lro_length, length, data_offset, vid = 0xffff;
+	u32 seq_number;
+
+	if (unlikely(ring > adapter->max_rds_rings))
+		return NULL;
+
+	rds_ring = &recv_ctx->rds_rings[ring];
+
+	index = qlcnic_get_lro_sts_refhandle(sts_data0);
+	if (unlikely(index > rds_ring->num_desc))
+		return NULL;
+
+	buffer = &rds_ring->rx_buf_arr[index];
+
+	timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
+	lro_length = qlcnic_get_lro_sts_length(sts_data0);
+	l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
+	l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
+	push = qlcnic_get_lro_sts_push_flag(sts_data0);
+	seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
+
+	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+	if (!skb)
+		return buffer;
+
+	if (timestamp)
+		data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
+	else
+		data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
+
+	skb_put(skb, lro_length + data_offset);
+	skb_pull(skb, l2_hdr_offset);
+
+	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+		adapter->stats.rxdropped++;
+		dev_kfree_skb(skb);
+		return buffer;
+	}
+
+	skb->protocol = eth_type_trans(skb, netdev);
+	iph = (struct iphdr *)skb->data;
+	th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+	length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+	iph->tot_len = htons(length);
+	iph->check = 0;
+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+	th->psh = push;
+	th->seq = htonl(seq_number);
+	length = skb->len;
+
+	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+		skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+
+	if (vid != 0xffff)
+		__vlan_hwaccel_put_tag(skb, vid);
+	netif_receive_skb(skb);
+
+	adapter->stats.lro_pkts++;
+	adapter->stats.lrobytes += length;
+
+	return buffer;
+}
+
+int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+{
+	struct qlcnic_host_rds_ring *rds_ring;
+	struct qlcnic_adapter *adapter = sds_ring->adapter;
+	struct list_head *cur;
+	struct status_desc *desc;
+	struct qlcnic_rx_buffer *rxbuf;
+	u64 sts_data0, sts_data1;
+	__le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM);
+	int opcode, ring, desc_cnt, count = 0;
+	u32 consumer = sds_ring->consumer;
+
+	while (count < max) {
+		desc = &sds_ring->desc_head[consumer];
+		sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+		if (!(sts_data0 & STATUS_OWNER_HOST))
+			break;
+
+		desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+		opcode = qlcnic_get_sts_opcode(sts_data0);
+
+		switch (opcode) {
+		case QLCNIC_RXPKT_DESC:
+		case QLCNIC_OLD_RXPKT_DESC:
+		case QLCNIC_SYN_OFFLOAD:
+			ring = qlcnic_get_sts_type(sts_data0);
+			rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
+						   sts_data0);
+			break;
+		case QLCNIC_LRO_DESC:
+			ring = qlcnic_get_lro_sts_type(sts_data0);
+			sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+			rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
+						   sts_data1);
+			break;
+		case QLCNIC_RESPONSE_DESC:
+			qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+		default:
+			goto skip;
+		}
+
+		WARN_ON(desc_cnt > 1);
+
+		if (likely(rxbuf))
+			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+		else
+			adapter->stats.null_rxbuf++;
+
+skip:
+		for (; desc_cnt > 0; desc_cnt--) {
+			desc = &sds_ring->desc_head[consumer];
+			desc->status_desc_data[0] = owner_phantom;
+			consumer = get_next_index(consumer, sds_ring->num_desc);
+		}
+		count++;
+	}
+
+	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+		rds_ring = &adapter->recv_ctx->rds_rings[ring];
+
+		if (!list_empty(&sds_ring->free_list[ring])) {
+			list_for_each(cur, &sds_ring->free_list[ring]) {
+				rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+						   list);
+				qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+			}
+			spin_lock(&rds_ring->lock);
+			list_splice_tail_init(&sds_ring->free_list[ring],
+					      &rds_ring->free_list);
+			spin_unlock(&rds_ring->lock);
+		}
+
+		qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+	}
+
+	if (count) {
+		sds_ring->consumer = consumer;
+		writel(consumer, sds_ring->crb_sts_consumer);
+	}
+
+	return count;
+}
+
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
+			    struct qlcnic_host_rds_ring *rds_ring)
+{
+	struct rcv_desc *pdesc;
+	struct qlcnic_rx_buffer *buffer;
+	int count = 0;
+	u32 producer;
+	struct list_head *head;
+
+	producer = rds_ring->producer;
+	head = &rds_ring->free_list;
+
+	while (!list_empty(head)) {
+
+		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+		if (!buffer->skb) {
+			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+				break;
+		}
+
+		count++;
+		list_del(&buffer->list);
+
+		/* make a rcv descriptor  */
+		pdesc = &rds_ring->desc_head[producer];
+		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+		pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+		producer = get_next_index(producer, rds_ring->num_desc);
+	}
+
+	if (count) {
+		rds_ring->producer = producer;
+		writel((producer-1) & (rds_ring->num_desc-1),
+		       rds_ring->crb_rcv_producer);
+	}
+}
+
+static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
+{
+	int i;
+	unsigned char *data = skb->data;
+
+	pr_info(KERN_INFO "\n");
+	for (i = 0; i < skb->len; i++) {
+		QLCDB(adapter, DRV, "%02x ", data[i]);
+		if ((i & 0x0f) == 8)
+			pr_info(KERN_INFO "\n");
+	}
+}
+
+static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
+				    u64 sts_data0)
+{
+	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+	struct sk_buff *skb;
+	struct qlcnic_host_rds_ring *rds_ring;
+	int index, length, cksum, pkt_offset;
+
+	if (unlikely(ring >= adapter->max_rds_rings))
+		return;
+
+	rds_ring = &recv_ctx->rds_rings[ring];
+
+	index = qlcnic_get_sts_refhandle(sts_data0);
+	length = qlcnic_get_sts_totallength(sts_data0);
+	if (unlikely(index >= rds_ring->num_desc))
+		return;
+
+	cksum  = qlcnic_get_sts_status(sts_data0);
+	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+	if (!skb)
+		return;
+
+	if (length > rds_ring->skb_size)
+		skb_put(skb, rds_ring->skb_size);
+	else
+		skb_put(skb, length);
+
+	if (pkt_offset)
+		skb_pull(skb, pkt_offset);
+
+	if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+		adapter->diag_cnt++;
+	else
+		dump_skb(skb, adapter);
+
+	dev_kfree_skb_any(skb);
+	adapter->stats.rx_pkts++;
+	adapter->stats.rxbytes += length;
+
+	return;
+}
+
+void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+	struct qlcnic_adapter *adapter = sds_ring->adapter;
+	struct status_desc *desc;
+	u64 sts_data0;
+	int ring, opcode, desc_cnt;
+
+	u32 consumer = sds_ring->consumer;
+
+	desc = &sds_ring->desc_head[consumer];
+	sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+	if (!(sts_data0 & STATUS_OWNER_HOST))
+		return;
+
+	desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+	opcode = qlcnic_get_sts_opcode(sts_data0);
+	switch (opcode) {
+	case QLCNIC_RESPONSE_DESC:
+		qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+		break;
+	default:
+		ring = qlcnic_get_sts_type(sts_data0);
+		qlcnic_process_rcv_diag(adapter, ring, sts_data0);
+		break;
+	}
+
+	for (; desc_cnt > 0; desc_cnt--) {
+		desc = &sds_ring->desc_head[consumer];
+		desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+		consumer = get_next_index(consumer, sds_ring->num_desc);
+	}
+
+	sds_ring->consumer = consumer;
+	writel(consumer, sds_ring->crb_sts_consumer);
+}
+
+void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac)
+{
+	u32 mac_low, mac_high;
+	int i;
+
+	mac_low = off1;
+	mac_high = off2;
+
+	if (alt_mac) {
+		mac_low |= (mac_low >> 16) | (mac_high << 16);
+		mac_high >>= 16;
+	}
+
+	for (i = 0; i < 2; i++)
+		mac[i] = (u8)(mac_high >> ((1 - i) * 8));
+	for (i = 2; i < 6; i++)
+		mac[i] = (u8)(mac_low >> ((5 - i) * 8));
+}
+
+int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+	int ring, max_sds_rings;
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+		return -ENOMEM;
+
+	max_sds_rings = adapter->max_sds_rings;
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+
+		if (ring == max_sds_rings - 1)
+			netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
+				       QLCNIC_NETDEV_WEIGHT / max_sds_rings);
+		else
+			netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
+				       QLCNIC_NETDEV_WEIGHT*2);
+	}
+
+	return 0;
+}
+
+void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+	int ring;
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+		netif_napi_del(&sds_ring->napi);
+	}
+
+	qlcnic_free_sds_rings(adapter->recv_ctx);
+}
+
+void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+	int ring;
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+		return;
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+		napi_enable(&sds_ring->napi);
+		qlcnic_enable_int(sds_ring);
+	}
+}
+
+void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+	int ring;
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+		return;
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+		qlcnic_disable_int(sds_ring);
+		napi_synchronize(&sds_ring->napi);
+		napi_disable(&sds_ring->napi);
+	}
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 24ad17e..1eef0bf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -66,17 +66,10 @@
 static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
 		work_func_t func, int delay);
 static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
-static int qlcnic_poll(struct napi_struct *napi, int budget);
-static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void qlcnic_poll_controller(struct net_device *netdev);
 #endif
 
-static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
-static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
-static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
-static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
-
 static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
 static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
 static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -92,8 +85,6 @@
 
 static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
 static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
-static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
-static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
 static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
 				struct qlcnic_esw_func_cfg *);
@@ -115,9 +106,7 @@
 MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
 
 
-inline void
-qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
-		struct qlcnic_host_tx_ring *tx_ring)
+inline void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *tx_ring)
 {
 	writel(tx_ring->producer, tx_ring->crb_cmd_producer);
 }
@@ -132,23 +121,7 @@
 static const
 struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
 
-static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
-{
-	writel(0, sds_ring->crb_intr_mask);
-}
-
-static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
-{
-	struct qlcnic_adapter *adapter = sds_ring->adapter;
-
-	writel(0x1, sds_ring->crb_intr_mask);
-
-	if (!QLCNIC_IS_MSI_FAMILY(adapter))
-		writel(0xfbff, adapter->tgt_mask_reg);
-}
-
-static int
-qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
 {
 	int size = sizeof(struct qlcnic_host_sds_ring) * count;
 
@@ -157,8 +130,7 @@
 	return recv_ctx->sds_rings == NULL;
 }
 
-static void
-qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
 {
 	if (recv_ctx->sds_rings != NULL)
 		kfree(recv_ctx->sds_rings);
@@ -166,80 +138,6 @@
 	recv_ctx->sds_rings = NULL;
 }
 
-static int
-qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
-{
-	int ring;
-	struct qlcnic_host_sds_ring *sds_ring;
-	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
-	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
-		return -ENOMEM;
-
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-		sds_ring = &recv_ctx->sds_rings[ring];
-
-		if (ring == adapter->max_sds_rings - 1)
-			netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
-				QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
-		else
-			netif_napi_add(netdev, &sds_ring->napi,
-				qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
-	}
-
-	return 0;
-}
-
-static void
-qlcnic_napi_del(struct qlcnic_adapter *adapter)
-{
-	int ring;
-	struct qlcnic_host_sds_ring *sds_ring;
-	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-		sds_ring = &recv_ctx->sds_rings[ring];
-		netif_napi_del(&sds_ring->napi);
-	}
-
-	qlcnic_free_sds_rings(adapter->recv_ctx);
-}
-
-static void
-qlcnic_napi_enable(struct qlcnic_adapter *adapter)
-{
-	int ring;
-	struct qlcnic_host_sds_ring *sds_ring;
-	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
-	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
-		return;
-
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-		sds_ring = &recv_ctx->sds_rings[ring];
-		napi_enable(&sds_ring->napi);
-		qlcnic_enable_int(sds_ring);
-	}
-}
-
-static void
-qlcnic_napi_disable(struct qlcnic_adapter *adapter)
-{
-	int ring;
-	struct qlcnic_host_sds_ring *sds_ring;
-	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
-	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
-		return;
-
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-		sds_ring = &recv_ctx->sds_rings[ring];
-		qlcnic_disable_int(sds_ring);
-		napi_synchronize(&sds_ring->napi);
-		napi_disable(&sds_ring->napi);
-	}
-}
-
 static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
 {
 	memset(&adapter->stats, 0, sizeof(adapter->stats));
@@ -724,9 +622,8 @@
 	return err;
 }
 
-static void
-qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
-		struct qlcnic_esw_func_cfg *esw_cfg)
+void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
+			    struct qlcnic_esw_func_cfg *esw_cfg)
 {
 	if (esw_cfg->discard_tagged)
 		adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
@@ -757,9 +654,8 @@
 	return 0;
 }
 
-static void
-qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
-		struct qlcnic_esw_func_cfg *esw_cfg)
+void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
+				      struct qlcnic_esw_func_cfg *esw_cfg)
 {
 	adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
 				QLCNIC_PROMISC_DISABLED);
@@ -776,8 +672,7 @@
 	qlcnic_set_netdev_features(adapter, esw_cfg);
 }
 
-static int
-qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_esw_func_cfg esw_cfg;
 
@@ -1485,8 +1380,8 @@
 }
 
 static int
-qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
-		struct net_device *netdev, u8 pci_using_dac)
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
+		    int pci_using_dac)
 {
 	int err;
 	struct pci_dev *pdev = adapter->pdev;
@@ -1506,7 +1401,7 @@
 
 	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
 		netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
-	if (pci_using_dac)
+	if (pci_using_dac == 1)
 		netdev->hw_features |= NETIF_F_HIGHDMA;
 
 	netdev->vlan_features = netdev->hw_features;
@@ -1530,7 +1425,7 @@
 	return 0;
 }
 
-static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
+static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
 {
 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
 			!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
@@ -1564,9 +1459,8 @@
 {
 	struct net_device *netdev = NULL;
 	struct qlcnic_adapter *adapter = NULL;
-	int err;
+	int err, pci_using_dac = -1;
 	uint8_t revision_id;
-	uint8_t pci_using_dac;
 	char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
 
 	err = pci_enable_device(pdev);
@@ -1927,428 +1821,6 @@
 	adapter->fhash.fmax = 0;
 }
 
-static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
-		u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
-{
-	struct cmd_desc_type0 *hwdesc;
-	struct qlcnic_nic_req *req;
-	struct qlcnic_mac_req *mac_req;
-	struct qlcnic_vlan_req *vlan_req;
-	u32 producer;
-	u64 word;
-
-	producer = tx_ring->producer;
-	hwdesc = &tx_ring->desc_head[tx_ring->producer];
-
-	req = (struct qlcnic_nic_req *)hwdesc;
-	memset(req, 0, sizeof(struct qlcnic_nic_req));
-	req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
-
-	word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
-	req->req_hdr = cpu_to_le64(word);
-
-	mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
-	mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
-	memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
-
-	vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
-	vlan_req->vlan_id = vlan_id;
-
-	tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
-	smp_mb();
-}
-
-#define QLCNIC_MAC_HASH(MAC)\
-	((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
-
-static void
-qlcnic_send_filter(struct qlcnic_adapter *adapter,
-		struct qlcnic_host_tx_ring *tx_ring,
-		struct cmd_desc_type0 *first_desc,
-		struct sk_buff *skb)
-{
-	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
-	struct qlcnic_filter *fil, *tmp_fil;
-	struct hlist_node *tmp_hnode, *n;
-	struct hlist_head *head;
-	u64 src_addr = 0;
-	__le16 vlan_id = 0;
-	u8 hindex;
-
-	if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
-		return;
-
-	if (adapter->fhash.fnum >= adapter->fhash.fmax)
-		return;
-
-	/* Only NPAR capable devices support vlan based learning*/
-	if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
-		vlan_id = first_desc->vlan_TCI;
-	memcpy(&src_addr, phdr->h_source, ETH_ALEN);
-	hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
-	head = &(adapter->fhash.fhead[hindex]);
-
-	hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
-		if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
-			    tmp_fil->vlan_id == vlan_id) {
-
-			if (jiffies >
-			    (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
-				qlcnic_change_filter(adapter, src_addr, vlan_id,
-								tx_ring);
-			tmp_fil->ftime = jiffies;
-			return;
-		}
-	}
-
-	fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
-	if (!fil)
-		return;
-
-	qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
-
-	fil->ftime = jiffies;
-	fil->vlan_id = vlan_id;
-	memcpy(fil->faddr, &src_addr, ETH_ALEN);
-	spin_lock(&adapter->mac_learn_lock);
-	hlist_add_head(&(fil->fnode), head);
-	adapter->fhash.fnum++;
-	spin_unlock(&adapter->mac_learn_lock);
-}
-
-static int
-qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
-		struct cmd_desc_type0 *first_desc,
-		struct sk_buff *skb)
-{
-	u8 opcode = 0, hdr_len = 0;
-	u16 flags = 0, vlan_tci = 0;
-	int copied, offset, copy_len;
-	struct cmd_desc_type0 *hwdesc;
-	struct vlan_ethhdr *vh;
-	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
-	u16 protocol = ntohs(skb->protocol);
-	u32 producer = tx_ring->producer;
-
-	if (protocol == ETH_P_8021Q) {
-		vh = (struct vlan_ethhdr *)skb->data;
-		flags = FLAGS_VLAN_TAGGED;
-		vlan_tci = vh->h_vlan_TCI;
-		protocol = ntohs(vh->h_vlan_encapsulated_proto);
-	} else if (vlan_tx_tag_present(skb)) {
-		flags = FLAGS_VLAN_OOB;
-		vlan_tci = vlan_tx_tag_get(skb);
-	}
-	if (unlikely(adapter->pvid)) {
-		if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
-			return -EIO;
-		if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
-			goto set_flags;
-
-		flags = FLAGS_VLAN_OOB;
-		vlan_tci = adapter->pvid;
-	}
-set_flags:
-	qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
-	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
-
-	if (*(skb->data) & BIT_0) {
-		flags |= BIT_0;
-		memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
-	}
-	opcode = TX_ETHER_PKT;
-	if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
-			skb_shinfo(skb)->gso_size > 0) {
-
-		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
-
-		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
-		first_desc->total_hdr_length = hdr_len;
-
-		opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
-
-		/* For LSO, we need to copy the MAC/IP/TCP headers into
-		* the descriptor ring */
-		copied = 0;
-		offset = 2;
-
-		if (flags & FLAGS_VLAN_OOB) {
-			first_desc->total_hdr_length += VLAN_HLEN;
-			first_desc->tcp_hdr_offset = VLAN_HLEN;
-			first_desc->ip_hdr_offset = VLAN_HLEN;
-			/* Only in case of TSO on vlan device */
-			flags |= FLAGS_VLAN_TAGGED;
-
-			/* Create a TSO vlan header template for firmware */
-
-			hwdesc = &tx_ring->desc_head[producer];
-			tx_ring->cmd_buf_arr[producer].skb = NULL;
-
-			copy_len = min((int)sizeof(struct cmd_desc_type0) -
-				offset, hdr_len + VLAN_HLEN);
-
-			vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
-			skb_copy_from_linear_data(skb, vh, 12);
-			vh->h_vlan_proto = htons(ETH_P_8021Q);
-			vh->h_vlan_TCI = htons(vlan_tci);
-
-			skb_copy_from_linear_data_offset(skb, 12,
-				(char *)vh + 16, copy_len - 16);
-
-			copied = copy_len - VLAN_HLEN;
-			offset = 0;
-
-			producer = get_next_index(producer, tx_ring->num_desc);
-		}
-
-		while (copied < hdr_len) {
-
-			copy_len = min((int)sizeof(struct cmd_desc_type0) -
-				offset, (hdr_len - copied));
-
-			hwdesc = &tx_ring->desc_head[producer];
-			tx_ring->cmd_buf_arr[producer].skb = NULL;
-
-			skb_copy_from_linear_data_offset(skb, copied,
-				 (char *) hwdesc + offset, copy_len);
-
-			copied += copy_len;
-			offset = 0;
-
-			producer = get_next_index(producer, tx_ring->num_desc);
-		}
-
-		tx_ring->producer = producer;
-		smp_mb();
-		adapter->stats.lso_frames++;
-
-	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		u8 l4proto;
-
-		if (protocol == ETH_P_IP) {
-			l4proto = ip_hdr(skb)->protocol;
-
-			if (l4proto == IPPROTO_TCP)
-				opcode = TX_TCP_PKT;
-			else if (l4proto == IPPROTO_UDP)
-				opcode = TX_UDP_PKT;
-		} else if (protocol == ETH_P_IPV6) {
-			l4proto = ipv6_hdr(skb)->nexthdr;
-
-			if (l4proto == IPPROTO_TCP)
-				opcode = TX_TCPV6_PKT;
-			else if (l4proto == IPPROTO_UDP)
-				opcode = TX_UDPV6_PKT;
-		}
-	}
-	first_desc->tcp_hdr_offset += skb_transport_offset(skb);
-	first_desc->ip_hdr_offset += skb_network_offset(skb);
-	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
-
-	return 0;
-}
-
-static int
-qlcnic_map_tx_skb(struct pci_dev *pdev,
-		struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
-{
-	struct qlcnic_skb_frag *nf;
-	struct skb_frag_struct *frag;
-	int i, nr_frags;
-	dma_addr_t map;
-
-	nr_frags = skb_shinfo(skb)->nr_frags;
-	nf = &pbuf->frag_array[0];
-
-	map = pci_map_single(pdev, skb->data,
-			skb_headlen(skb), PCI_DMA_TODEVICE);
-	if (pci_dma_mapping_error(pdev, map))
-		goto out_err;
-
-	nf->dma = map;
-	nf->length = skb_headlen(skb);
-
-	for (i = 0; i < nr_frags; i++) {
-		frag = &skb_shinfo(skb)->frags[i];
-		nf = &pbuf->frag_array[i+1];
-
-		map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
-				       DMA_TO_DEVICE);
-		if (dma_mapping_error(&pdev->dev, map))
-			goto unwind;
-
-		nf->dma = map;
-		nf->length = skb_frag_size(frag);
-	}
-
-	return 0;
-
-unwind:
-	while (--i >= 0) {
-		nf = &pbuf->frag_array[i+1];
-		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
-	}
-
-	nf = &pbuf->frag_array[0];
-	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
-
-out_err:
-	return -ENOMEM;
-}
-
-static void
-qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
-			struct qlcnic_cmd_buffer *pbuf)
-{
-	struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
-	int nr_frags = skb_shinfo(skb)->nr_frags;
-	int i;
-
-	for (i = 0; i < nr_frags; i++) {
-		nf = &pbuf->frag_array[i+1];
-		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
-	}
-
-	nf = &pbuf->frag_array[0];
-	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
-	pbuf->skb = NULL;
-}
-
-static inline void
-qlcnic_clear_cmddesc(u64 *desc)
-{
-	desc[0] = 0ULL;
-	desc[2] = 0ULL;
-	desc[7] = 0ULL;
-}
-
-netdev_tx_t
-qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
-	struct qlcnic_adapter *adapter = netdev_priv(netdev);
-	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
-	struct qlcnic_cmd_buffer *pbuf;
-	struct qlcnic_skb_frag *buffrag;
-	struct cmd_desc_type0 *hwdesc, *first_desc;
-	struct pci_dev *pdev;
-	struct ethhdr *phdr;
-	int delta = 0;
-	int i, k;
-
-	u32 producer;
-	int frag_count;
-	u32 num_txd = tx_ring->num_desc;
-
-	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
-		netif_stop_queue(netdev);
-		return NETDEV_TX_BUSY;
-	}
-
-	if (adapter->flags & QLCNIC_MACSPOOF) {
-		phdr = (struct ethhdr *)skb->data;
-		if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
-			goto drop_packet;
-	}
-
-	frag_count = skb_shinfo(skb)->nr_frags + 1;
-	/* 14 frags supported for normal packet and
-	 * 32 frags supported for TSO packet
-	 */
-	if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
-
-		for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
-			delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
-
-		if (!__pskb_pull_tail(skb, delta))
-			goto drop_packet;
-
-		frag_count = 1 + skb_shinfo(skb)->nr_frags;
-	}
-
-	if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
-		netif_stop_queue(netdev);
-		if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
-			netif_start_queue(netdev);
-		else {
-			adapter->stats.xmit_off++;
-			return NETDEV_TX_BUSY;
-		}
-	}
-
-	producer = tx_ring->producer;
-	pbuf = &tx_ring->cmd_buf_arr[producer];
-
-	pdev = adapter->pdev;
-
-	first_desc = hwdesc = &tx_ring->desc_head[producer];
-	qlcnic_clear_cmddesc((u64 *)hwdesc);
-
-	if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
-		adapter->stats.tx_dma_map_error++;
-		goto drop_packet;
-	}
-
-	pbuf->skb = skb;
-	pbuf->frag_count = frag_count;
-
-	qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
-	qlcnic_set_tx_port(first_desc, adapter->portnum);
-
-	for (i = 0; i < frag_count; i++) {
-
-		k = i % 4;
-
-		if ((k == 0) && (i > 0)) {
-			/* move to next desc.*/
-			producer = get_next_index(producer, num_txd);
-			hwdesc = &tx_ring->desc_head[producer];
-			qlcnic_clear_cmddesc((u64 *)hwdesc);
-			tx_ring->cmd_buf_arr[producer].skb = NULL;
-		}
-
-		buffrag = &pbuf->frag_array[i];
-
-		hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
-		switch (k) {
-		case 0:
-			hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
-			break;
-		case 1:
-			hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
-			break;
-		case 2:
-			hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
-			break;
-		case 3:
-			hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
-			break;
-		}
-	}
-
-	tx_ring->producer = get_next_index(producer, num_txd);
-	smp_mb();
-
-	if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
-		goto unwind_buff;
-
-	if (adapter->mac_learn)
-		qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
-
-	adapter->stats.txbytes += skb->len;
-	adapter->stats.xmitcalled++;
-
-	qlcnic_update_cmd_producer(adapter, tx_ring);
-
-	return NETDEV_TX_OK;
-
-unwind_buff:
-	qlcnic_unmap_buffers(pdev, skb, pbuf);
-drop_packet:
-	adapter->stats.txdropped++;
-	dev_kfree_skb_any(skb);
-	return NETDEV_TX_OK;
-}
-
 static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
@@ -2385,27 +1857,6 @@
 	return rv;
 }
 
-void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
-{
-	struct net_device *netdev = adapter->netdev;
-
-	if (adapter->ahw->linkup && !linkup) {
-		netdev_info(netdev, "NIC Link is down\n");
-		adapter->ahw->linkup = 0;
-		if (netif_running(netdev)) {
-			netif_carrier_off(netdev);
-			netif_stop_queue(netdev);
-		}
-	} else if (!adapter->ahw->linkup && linkup) {
-		netdev_info(netdev, "NIC Link is up\n");
-		adapter->ahw->linkup = 1;
-		if (netif_running(netdev)) {
-			netif_carrier_on(netdev);
-			netif_wake_queue(netdev);
-		}
-	}
-}
-
 static void qlcnic_tx_timeout(struct net_device *netdev)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -2512,122 +1963,6 @@
 	return IRQ_HANDLED;
 }
 
-static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
-{
-	u32 sw_consumer, hw_consumer;
-	int count = 0, i;
-	struct qlcnic_cmd_buffer *buffer;
-	struct pci_dev *pdev = adapter->pdev;
-	struct net_device *netdev = adapter->netdev;
-	struct qlcnic_skb_frag *frag;
-	int done;
-	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
-
-	if (!spin_trylock(&adapter->tx_clean_lock))
-		return 1;
-
-	sw_consumer = tx_ring->sw_consumer;
-	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
-
-	while (sw_consumer != hw_consumer) {
-		buffer = &tx_ring->cmd_buf_arr[sw_consumer];
-		if (buffer->skb) {
-			frag = &buffer->frag_array[0];
-			pci_unmap_single(pdev, frag->dma, frag->length,
-					 PCI_DMA_TODEVICE);
-			frag->dma = 0ULL;
-			for (i = 1; i < buffer->frag_count; i++) {
-				frag++;
-				pci_unmap_page(pdev, frag->dma, frag->length,
-					       PCI_DMA_TODEVICE);
-				frag->dma = 0ULL;
-			}
-
-			adapter->stats.xmitfinished++;
-			dev_kfree_skb_any(buffer->skb);
-			buffer->skb = NULL;
-		}
-
-		sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
-		if (++count >= MAX_STATUS_HANDLE)
-			break;
-	}
-
-	if (count && netif_running(netdev)) {
-		tx_ring->sw_consumer = sw_consumer;
-
-		smp_mb();
-
-		if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
-			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
-				netif_wake_queue(netdev);
-				adapter->stats.xmit_on++;
-			}
-		}
-		adapter->tx_timeo_cnt = 0;
-	}
-	/*
-	 * If everything is freed up to consumer then check if the ring is full
-	 * If the ring is full then check if more needs to be freed and
-	 * schedule the call back again.
-	 *
-	 * This happens when there are 2 CPUs. One could be freeing and the
-	 * other filling it. If the ring is full when we get out of here and
-	 * the card has already interrupted the host then the host can miss the
-	 * interrupt.
-	 *
-	 * There is still a possible race condition and the host could miss an
-	 * interrupt. The card has to take care of this.
-	 */
-	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
-	done = (sw_consumer == hw_consumer);
-	spin_unlock(&adapter->tx_clean_lock);
-
-	return done;
-}
-
-static int qlcnic_poll(struct napi_struct *napi, int budget)
-{
-	struct qlcnic_host_sds_ring *sds_ring =
-		container_of(napi, struct qlcnic_host_sds_ring, napi);
-
-	struct qlcnic_adapter *adapter = sds_ring->adapter;
-
-	int tx_complete;
-	int work_done;
-
-	tx_complete = qlcnic_process_cmd_ring(adapter);
-
-	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-
-	if ((work_done < budget) && tx_complete) {
-		napi_complete(&sds_ring->napi);
-		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
-			qlcnic_enable_int(sds_ring);
-	}
-
-	return work_done;
-}
-
-static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
-{
-	struct qlcnic_host_sds_ring *sds_ring =
-		container_of(napi, struct qlcnic_host_sds_ring, napi);
-
-	struct qlcnic_adapter *adapter = sds_ring->adapter;
-	int work_done;
-
-	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-
-	if (work_done < budget) {
-		napi_complete(&sds_ring->napi);
-		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
-			qlcnic_enable_int(sds_ring);
-	}
-
-	return work_done;
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void qlcnic_poll_controller(struct net_device *netdev)
 {
@@ -3395,93 +2730,6 @@
 	return err;
 }
 
-static int
-qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
-{
-	return -EOPNOTSUPP;
-}
-
-static int
-qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
-{
-	return -EOPNOTSUPP;
-}
-
-static ssize_t
-qlcnic_store_bridged_mode(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t len)
-{
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	unsigned long new;
-	int ret = -EINVAL;
-
-	if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
-		goto err_out;
-
-	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
-		goto err_out;
-
-	if (strict_strtoul(buf, 2, &new))
-		goto err_out;
-
-	if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
-		ret = len;
-
-err_out:
-	return ret;
-}
-
-static ssize_t
-qlcnic_show_bridged_mode(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	int bridged_mode = 0;
-
-	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
-		bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
-
-	return sprintf(buf, "%d\n", bridged_mode);
-}
-
-static struct device_attribute dev_attr_bridged_mode = {
-       .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
-       .show = qlcnic_show_bridged_mode,
-       .store = qlcnic_store_bridged_mode,
-};
-
-static ssize_t
-qlcnic_store_diag_mode(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t len)
-{
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	unsigned long new;
-
-	if (strict_strtoul(buf, 2, &new))
-		return -EINVAL;
-
-	if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
-		adapter->flags ^= QLCNIC_DIAG_ENABLED;
-
-	return len;
-}
-
-static ssize_t
-qlcnic_show_diag_mode(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-
-	return sprintf(buf, "%d\n",
-			!!(adapter->flags & QLCNIC_DIAG_ENABLED));
-}
-
-static struct device_attribute dev_attr_diag_mode = {
-	.attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
-	.show = qlcnic_show_diag_mode,
-	.store = qlcnic_store_diag_mode,
-};
-
 int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
 {
 	if (!use_msi_x && !use_msi) {
@@ -3532,859 +2780,6 @@
 	return err;
 }
 
-static int
-qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, u8 *state,
-			u8 *rate)
-{
-	*rate = LSB(beacon);
-	*state = MSB(beacon);
-
-	QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
-
-	if (!*state) {
-		*rate = __QLCNIC_MAX_LED_RATE;
-		return 0;
-	} else if (*state > __QLCNIC_MAX_LED_STATE)
-		return -EINVAL;
-
-	if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
-		return -EINVAL;
-
-	return 0;
-}
-
-static ssize_t
-qlcnic_store_beacon(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t len)
-{
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	int max_sds_rings = adapter->max_sds_rings;
-	u16 beacon;
-	u8 b_state, b_rate;
-	int err;
-
-	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
-		dev_warn(dev, "LED test not supported for non "
-				"privilege function\n");
-		return -EOPNOTSUPP;
-	}
-
-	if (len != sizeof(u16))
-		return QL_STATUS_INVALID_PARAM;
-
-	memcpy(&beacon, buf, sizeof(u16));
-	err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
-	if (err)
-		return err;
-
-	if (adapter->ahw->beacon_state == b_state)
-		return len;
-
-	rtnl_lock();
-
-	if (!adapter->ahw->beacon_state)
-		if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
-			rtnl_unlock();
-			return -EBUSY;
-		}
-
-	if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
-		err = -EIO;
-		goto out;
-	}
-
-	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
-		err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
-		if (err)
-			goto out;
-		set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
-	}
-
-	err = qlcnic_config_led(adapter, b_state, b_rate);
-
-	if (!err) {
-		err = len;
-		adapter->ahw->beacon_state = b_state;
-	}
-
-	if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
-		qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
-
- out:
-	if (!adapter->ahw->beacon_state)
-		clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
-	rtnl_unlock();
-
-	return err;
-}
-
-static ssize_t
-qlcnic_show_beacon(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-
-	return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
-}
-
-static struct device_attribute dev_attr_beacon = {
-	.attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
-	.show = qlcnic_show_beacon,
-	.store = qlcnic_store_beacon,
-};
-
-static int
-qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
-		loff_t offset, size_t size)
-{
-	size_t crb_size = 4;
-
-	if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
-		return -EIO;
-
-	if (offset < QLCNIC_PCI_CRBSPACE) {
-		if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
-					QLCNIC_PCI_CAMQM_END))
-			crb_size = 8;
-		else
-			return -EINVAL;
-	}
-
-	if ((size != crb_size) || (offset & (crb_size-1)))
-		return  -EINVAL;
-
-	return 0;
-}
-
-static ssize_t
-qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
-		struct bin_attribute *attr,
-		char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	u32 data;
-	u64 qmdata;
-	int ret;
-
-	ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
-	if (ret != 0)
-		return ret;
-
-	if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
-		qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
-		memcpy(buf, &qmdata, size);
-	} else {
-		data = QLCRD32(adapter, offset);
-		memcpy(buf, &data, size);
-	}
-	return size;
-}
-
-static ssize_t
-qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
-		struct bin_attribute *attr,
-		char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	u32 data;
-	u64 qmdata;
-	int ret;
-
-	ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
-	if (ret != 0)
-		return ret;
-
-	if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
-		memcpy(&qmdata, buf, size);
-		qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
-	} else {
-		memcpy(&data, buf, size);
-		QLCWR32(adapter, offset, data);
-	}
-	return size;
-}
-
-static int
-qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
-		loff_t offset, size_t size)
-{
-	if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
-		return -EIO;
-
-	if ((size != 8) || (offset & 0x7))
-		return  -EIO;
-
-	return 0;
-}
-
-static ssize_t
-qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
-		struct bin_attribute *attr,
-		char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	u64 data;
-	int ret;
-
-	ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
-	if (ret != 0)
-		return ret;
-
-	if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
-		return -EIO;
-
-	memcpy(buf, &data, size);
-
-	return size;
-}
-
-static ssize_t
-qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
-		struct bin_attribute *attr,
-		char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	u64 data;
-	int ret;
-
-	ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
-	if (ret != 0)
-		return ret;
-
-	memcpy(&data, buf, size);
-
-	if (qlcnic_pci_mem_write_2M(adapter, offset, data))
-		return -EIO;
-
-	return size;
-}
-
-static struct bin_attribute bin_attr_crb = {
-	.attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
-	.size = 0,
-	.read = qlcnic_sysfs_read_crb,
-	.write = qlcnic_sysfs_write_crb,
-};
-
-static struct bin_attribute bin_attr_mem = {
-	.attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
-	.size = 0,
-	.read = qlcnic_sysfs_read_mem,
-	.write = qlcnic_sysfs_write_mem,
-};
-
-static int
-validate_pm_config(struct qlcnic_adapter *adapter,
-			struct qlcnic_pm_func_cfg *pm_cfg, int count)
-{
-
-	u8 src_pci_func, s_esw_id, d_esw_id;
-	u8 dest_pci_func;
-	int i;
-
-	for (i = 0; i < count; i++) {
-		src_pci_func = pm_cfg[i].pci_func;
-		dest_pci_func = pm_cfg[i].dest_npar;
-		if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
-				|| dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
-			return QL_STATUS_INVALID_PARAM;
-
-		if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
-			return QL_STATUS_INVALID_PARAM;
-
-		if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
-			return QL_STATUS_INVALID_PARAM;
-
-		s_esw_id = adapter->npars[src_pci_func].phy_port;
-		d_esw_id = adapter->npars[dest_pci_func].phy_port;
-
-		if (s_esw_id != d_esw_id)
-			return QL_STATUS_INVALID_PARAM;
-
-	}
-	return 0;
-
-}
-
-static ssize_t
-qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
-	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	struct qlcnic_pm_func_cfg *pm_cfg;
-	u32 id, action, pci_func;
-	int count, rem, i, ret;
-
-	count	= size / sizeof(struct qlcnic_pm_func_cfg);
-	rem	= size % sizeof(struct qlcnic_pm_func_cfg);
-	if (rem)
-		return QL_STATUS_INVALID_PARAM;
-
-	pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
-
-	ret = validate_pm_config(adapter, pm_cfg, count);
-	if (ret)
-		return ret;
-	for (i = 0; i < count; i++) {
-		pci_func = pm_cfg[i].pci_func;
-		action = !!pm_cfg[i].action;
-		id = adapter->npars[pci_func].phy_port;
-		ret = qlcnic_config_port_mirroring(adapter, id,
-						action, pci_func);
-		if (ret)
-			return ret;
-	}
-
-	for (i = 0; i < count; i++) {
-		pci_func = pm_cfg[i].pci_func;
-		id = adapter->npars[pci_func].phy_port;
-		adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
-		adapter->npars[pci_func].dest_npar = id;
-	}
-	return size;
-}
-
-static ssize_t
-qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
-	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
-	int i;
-
-	if (size != sizeof(pm_cfg))
-		return QL_STATUS_INVALID_PARAM;
-
-	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
-		if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
-			continue;
-		pm_cfg[i].action = adapter->npars[i].enable_pm;
-		pm_cfg[i].dest_npar = 0;
-		pm_cfg[i].pci_func = i;
-	}
-	memcpy(buf, &pm_cfg, size);
-
-	return size;
-}
-
-static int
-validate_esw_config(struct qlcnic_adapter *adapter,
-	struct qlcnic_esw_func_cfg *esw_cfg, int count)
-{
-	u32 op_mode;
-	u8 pci_func;
-	int i;
-
-	op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
-
-	for (i = 0; i < count; i++) {
-		pci_func = esw_cfg[i].pci_func;
-		if (pci_func >= QLCNIC_MAX_PCI_FUNC)
-			return QL_STATUS_INVALID_PARAM;
-
-		if (adapter->op_mode == QLCNIC_MGMT_FUNC)
-			if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
-				return QL_STATUS_INVALID_PARAM;
-
-		switch (esw_cfg[i].op_mode) {
-		case QLCNIC_PORT_DEFAULTS:
-			if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
-						QLCNIC_NON_PRIV_FUNC) {
-				if (esw_cfg[i].mac_anti_spoof != 0)
-					return QL_STATUS_INVALID_PARAM;
-				if (esw_cfg[i].mac_override != 1)
-					return QL_STATUS_INVALID_PARAM;
-				if (esw_cfg[i].promisc_mode != 1)
-					return QL_STATUS_INVALID_PARAM;
-			}
-			break;
-		case QLCNIC_ADD_VLAN:
-			if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
-				return QL_STATUS_INVALID_PARAM;
-			if (!esw_cfg[i].op_type)
-				return QL_STATUS_INVALID_PARAM;
-			break;
-		case QLCNIC_DEL_VLAN:
-			if (!esw_cfg[i].op_type)
-				return QL_STATUS_INVALID_PARAM;
-			break;
-		default:
-			return QL_STATUS_INVALID_PARAM;
-		}
-	}
-	return 0;
-}
-
-static ssize_t
-qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
-	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	struct qlcnic_esw_func_cfg *esw_cfg;
-	struct qlcnic_npar_info *npar;
-	int count, rem, i, ret;
-	u8 pci_func, op_mode = 0;
-
-	count	= size / sizeof(struct qlcnic_esw_func_cfg);
-	rem	= size % sizeof(struct qlcnic_esw_func_cfg);
-	if (rem)
-		return QL_STATUS_INVALID_PARAM;
-
-	esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
-	ret = validate_esw_config(adapter, esw_cfg, count);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < count; i++) {
-		if (adapter->op_mode == QLCNIC_MGMT_FUNC)
-			if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
-				return QL_STATUS_INVALID_PARAM;
-
-		if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
-			continue;
-
-		op_mode = esw_cfg[i].op_mode;
-		qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
-		esw_cfg[i].op_mode = op_mode;
-		esw_cfg[i].pci_func = adapter->ahw->pci_func;
-
-		switch (esw_cfg[i].op_mode) {
-		case QLCNIC_PORT_DEFAULTS:
-			qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
-			break;
-		case QLCNIC_ADD_VLAN:
-			qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
-			break;
-		case QLCNIC_DEL_VLAN:
-			esw_cfg[i].vlan_id = 0;
-			qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
-			break;
-		}
-	}
-
-	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
-		goto out;
-
-	for (i = 0; i < count; i++) {
-		pci_func = esw_cfg[i].pci_func;
-		npar = &adapter->npars[pci_func];
-		switch (esw_cfg[i].op_mode) {
-		case QLCNIC_PORT_DEFAULTS:
-			npar->promisc_mode = esw_cfg[i].promisc_mode;
-			npar->mac_override = esw_cfg[i].mac_override;
-			npar->offload_flags = esw_cfg[i].offload_flags;
-			npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
-			npar->discard_tagged = esw_cfg[i].discard_tagged;
-			break;
-		case QLCNIC_ADD_VLAN:
-			npar->pvid = esw_cfg[i].vlan_id;
-			break;
-		case QLCNIC_DEL_VLAN:
-			npar->pvid = 0;
-			break;
-		}
-	}
-out:
-	return size;
-}
-
-static ssize_t
-qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
-	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
-	u8 i;
-
-	if (size != sizeof(esw_cfg))
-		return QL_STATUS_INVALID_PARAM;
-
-	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
-		if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
-			continue;
-		esw_cfg[i].pci_func = i;
-		if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
-			return QL_STATUS_INVALID_PARAM;
-	}
-	memcpy(buf, &esw_cfg, size);
-
-	return size;
-}
-
-static int
-validate_npar_config(struct qlcnic_adapter *adapter,
-				struct qlcnic_npar_func_cfg *np_cfg, int count)
-{
-	u8 pci_func, i;
-
-	for (i = 0; i < count; i++) {
-		pci_func = np_cfg[i].pci_func;
-		if (pci_func >= QLCNIC_MAX_PCI_FUNC)
-			return QL_STATUS_INVALID_PARAM;
-
-		if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
-			return QL_STATUS_INVALID_PARAM;
-
-		if (!IS_VALID_BW(np_cfg[i].min_bw) ||
-		    !IS_VALID_BW(np_cfg[i].max_bw))
-			return QL_STATUS_INVALID_PARAM;
-	}
-	return 0;
-}
-
-static ssize_t
-qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
-	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	struct qlcnic_info nic_info;
-	struct qlcnic_npar_func_cfg *np_cfg;
-	int i, count, rem, ret;
-	u8 pci_func;
-
-	count	= size / sizeof(struct qlcnic_npar_func_cfg);
-	rem	= size % sizeof(struct qlcnic_npar_func_cfg);
-	if (rem)
-		return QL_STATUS_INVALID_PARAM;
-
-	np_cfg = (struct qlcnic_npar_func_cfg *) buf;
-	ret = validate_npar_config(adapter, np_cfg, count);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < count ; i++) {
-		pci_func = np_cfg[i].pci_func;
-		ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
-		if (ret)
-			return ret;
-		nic_info.pci_func = pci_func;
-		nic_info.min_tx_bw = np_cfg[i].min_bw;
-		nic_info.max_tx_bw = np_cfg[i].max_bw;
-		ret = qlcnic_set_nic_info(adapter, &nic_info);
-		if (ret)
-			return ret;
-		adapter->npars[i].min_bw = nic_info.min_tx_bw;
-		adapter->npars[i].max_bw = nic_info.max_tx_bw;
-	}
-
-	return size;
-
-}
-static ssize_t
-qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
-	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	struct qlcnic_info nic_info;
-	struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
-	int i, ret;
-
-	if (size != sizeof(np_cfg))
-		return QL_STATUS_INVALID_PARAM;
-
-	for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
-		if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
-			continue;
-		ret = qlcnic_get_nic_info(adapter, &nic_info, i);
-		if (ret)
-			return ret;
-
-		np_cfg[i].pci_func = i;
-		np_cfg[i].op_mode = (u8)nic_info.op_mode;
-		np_cfg[i].port_num = nic_info.phys_port;
-		np_cfg[i].fw_capab = nic_info.capabilities;
-		np_cfg[i].min_bw = nic_info.min_tx_bw ;
-		np_cfg[i].max_bw = nic_info.max_tx_bw;
-		np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
-		np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
-	}
-	memcpy(buf, &np_cfg, size);
-	return size;
-}
-
-static ssize_t
-qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
-	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	struct qlcnic_esw_statistics port_stats;
-	int ret;
-
-	if (size != sizeof(struct qlcnic_esw_statistics))
-		return QL_STATUS_INVALID_PARAM;
-
-	if (offset >= QLCNIC_MAX_PCI_FUNC)
-		return QL_STATUS_INVALID_PARAM;
-
-	memset(&port_stats, 0, size);
-	ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
-								&port_stats.rx);
-	if (ret)
-		return ret;
-
-	ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
-								&port_stats.tx);
-	if (ret)
-		return ret;
-
-	memcpy(buf, &port_stats, size);
-	return size;
-}
-
-static ssize_t
-qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
-	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	struct qlcnic_esw_statistics esw_stats;
-	int ret;
-
-	if (size != sizeof(struct qlcnic_esw_statistics))
-		return QL_STATUS_INVALID_PARAM;
-
-	if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
-		return QL_STATUS_INVALID_PARAM;
-
-	memset(&esw_stats, 0, size);
-	ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
-								&esw_stats.rx);
-	if (ret)
-		return ret;
-
-	ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
-								&esw_stats.tx);
-	if (ret)
-		return ret;
-
-	memcpy(buf, &esw_stats, size);
-	return size;
-}
-
-static ssize_t
-qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
-	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	int ret;
-
-	if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
-		return QL_STATUS_INVALID_PARAM;
-
-	ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
-						QLCNIC_QUERY_RX_COUNTER);
-	if (ret)
-		return ret;
-
-	ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
-						QLCNIC_QUERY_TX_COUNTER);
-	if (ret)
-		return ret;
-
-	return size;
-}
-
-static ssize_t
-qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
-	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	int ret;
-
-	if (offset >= QLCNIC_MAX_PCI_FUNC)
-		return QL_STATUS_INVALID_PARAM;
-
-	ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
-						QLCNIC_QUERY_RX_COUNTER);
-	if (ret)
-		return ret;
-
-	ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
-						QLCNIC_QUERY_TX_COUNTER);
-	if (ret)
-		return ret;
-
-	return size;
-}
-
-static ssize_t
-qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
-	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
-	struct qlcnic_pci_info *pci_info;
-	int i, ret;
-
-	if (size != sizeof(pci_cfg))
-		return QL_STATUS_INVALID_PARAM;
-
-	pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
-	if (!pci_info)
-		return -ENOMEM;
-
-	ret = qlcnic_get_pci_info(adapter, pci_info);
-	if (ret) {
-		kfree(pci_info);
-		return ret;
-	}
-
-	for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
-		pci_cfg[i].pci_func = pci_info[i].id;
-		pci_cfg[i].func_type = pci_info[i].type;
-		pci_cfg[i].port_num = pci_info[i].default_port;
-		pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
-		pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
-		memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
-	}
-	memcpy(buf, &pci_cfg, size);
-	kfree(pci_info);
-	return size;
-}
-static struct bin_attribute bin_attr_npar_config = {
-	.attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
-	.size = 0,
-	.read = qlcnic_sysfs_read_npar_config,
-	.write = qlcnic_sysfs_write_npar_config,
-};
-
-static struct bin_attribute bin_attr_pci_config = {
-	.attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
-	.size = 0,
-	.read = qlcnic_sysfs_read_pci_config,
-	.write = NULL,
-};
-
-static struct bin_attribute bin_attr_port_stats = {
-	.attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
-	.size = 0,
-	.read = qlcnic_sysfs_get_port_stats,
-	.write = qlcnic_sysfs_clear_port_stats,
-};
-
-static struct bin_attribute bin_attr_esw_stats = {
-	.attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
-	.size = 0,
-	.read = qlcnic_sysfs_get_esw_stats,
-	.write = qlcnic_sysfs_clear_esw_stats,
-};
-
-static struct bin_attribute bin_attr_esw_config = {
-	.attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
-	.size = 0,
-	.read = qlcnic_sysfs_read_esw_config,
-	.write = qlcnic_sysfs_write_esw_config,
-};
-
-static struct bin_attribute bin_attr_pm_config = {
-	.attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
-	.size = 0,
-	.read = qlcnic_sysfs_read_pm_config,
-	.write = qlcnic_sysfs_write_pm_config,
-};
-
-static void
-qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
-{
-	struct device *dev = &adapter->pdev->dev;
-
-	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
-		if (device_create_file(dev, &dev_attr_bridged_mode))
-			dev_warn(dev,
-				"failed to create bridged_mode sysfs entry\n");
-}
-
-static void
-qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
-{
-	struct device *dev = &adapter->pdev->dev;
-
-	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
-		device_remove_file(dev, &dev_attr_bridged_mode);
-}
-
-static void
-qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
-{
-	struct device *dev = &adapter->pdev->dev;
-	u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
-
-	if (device_create_bin_file(dev, &bin_attr_port_stats))
-		dev_info(dev, "failed to create port stats sysfs entry");
-
-	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
-		return;
-	if (device_create_file(dev, &dev_attr_diag_mode))
-		dev_info(dev, "failed to create diag_mode sysfs entry\n");
-	if (device_create_bin_file(dev, &bin_attr_crb))
-		dev_info(dev, "failed to create crb sysfs entry\n");
-	if (device_create_bin_file(dev, &bin_attr_mem))
-		dev_info(dev, "failed to create mem sysfs entry\n");
-
-	if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
-		return;
-
-	if (device_create_bin_file(dev, &bin_attr_pci_config))
-		dev_info(dev, "failed to create pci config sysfs entry");
-	if (device_create_file(dev, &dev_attr_beacon))
-		dev_info(dev, "failed to create beacon sysfs entry");
-
-	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
-		return;
-	if (device_create_bin_file(dev, &bin_attr_esw_config))
-		dev_info(dev, "failed to create esw config sysfs entry");
-	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
-		return;
-	if (device_create_bin_file(dev, &bin_attr_npar_config))
-		dev_info(dev, "failed to create npar config sysfs entry");
-	if (device_create_bin_file(dev, &bin_attr_pm_config))
-		dev_info(dev, "failed to create pm config sysfs entry");
-	if (device_create_bin_file(dev, &bin_attr_esw_stats))
-		dev_info(dev, "failed to create eswitch stats sysfs entry");
-}
-
-static void
-qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
-{
-	struct device *dev = &adapter->pdev->dev;
-	u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
-
-	device_remove_bin_file(dev, &bin_attr_port_stats);
-
-	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
-		return;
-	device_remove_file(dev, &dev_attr_diag_mode);
-	device_remove_bin_file(dev, &bin_attr_crb);
-	device_remove_bin_file(dev, &bin_attr_mem);
-	if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
-		return;
-	device_remove_bin_file(dev, &bin_attr_pci_config);
-	device_remove_file(dev, &dev_attr_beacon);
-	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
-		return;
-	device_remove_bin_file(dev, &bin_attr_esw_config);
-	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
-		return;
-	device_remove_bin_file(dev, &bin_attr_npar_config);
-	device_remove_bin_file(dev, &bin_attr_pm_config);
-	device_remove_bin_file(dev, &bin_attr_esw_stats);
-}
-
 #ifdef CONFIG_INET
 
 #define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
@@ -4523,7 +2918,7 @@
 qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
 { }
 #endif
-static const struct pci_error_handlers qlcnic_err_handler = {
+static struct pci_error_handlers qlcnic_err_handler = {
 	.error_detected = qlcnic_io_error_detected,
 	.slot_reset = qlcnic_io_slot_reset,
 	.resume = qlcnic_io_resume,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
new file mode 100644
index 0000000..12ff292
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -0,0 +1,629 @@
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
+#include <net/ip.h>
+
+#define QLCNIC_DUMP_WCRB	BIT_0
+#define QLCNIC_DUMP_RWCRB	BIT_1
+#define QLCNIC_DUMP_ANDCRB	BIT_2
+#define QLCNIC_DUMP_ORCRB	BIT_3
+#define QLCNIC_DUMP_POLLCRB	BIT_4
+#define QLCNIC_DUMP_RD_SAVE	BIT_5
+#define QLCNIC_DUMP_WRT_SAVED	BIT_6
+#define QLCNIC_DUMP_MOD_SAVE_ST	BIT_7
+#define QLCNIC_DUMP_SKIP	BIT_7
+
+#define QLCNIC_DUMP_MASK_MAX	0xff
+
+struct qlcnic_common_entry_hdr {
+	u32     type;
+	u32     offset;
+	u32     cap_size;
+	u8      mask;
+	u8      rsvd[2];
+	u8      flags;
+} __packed;
+
+struct __crb {
+	u32	addr;
+	u8	stride;
+	u8	rsvd1[3];
+	u32	data_size;
+	u32	no_ops;
+	u32	rsvd2[4];
+} __packed;
+
+struct __ctrl {
+	u32	addr;
+	u8	stride;
+	u8	index_a;
+	u16	timeout;
+	u32	data_size;
+	u32	no_ops;
+	u8	opcode;
+	u8	index_v;
+	u8	shl_val;
+	u8	shr_val;
+	u32	val1;
+	u32	val2;
+	u32	val3;
+} __packed;
+
+struct __cache {
+	u32	addr;
+	u16	stride;
+	u16	init_tag_val;
+	u32	size;
+	u32	no_ops;
+	u32	ctrl_addr;
+	u32	ctrl_val;
+	u32	read_addr;
+	u8	read_addr_stride;
+	u8	read_addr_num;
+	u8	rsvd1[2];
+} __packed;
+
+struct __ocm {
+	u8	rsvd[8];
+	u32	size;
+	u32	no_ops;
+	u8	rsvd1[8];
+	u32	read_addr;
+	u32	read_addr_stride;
+} __packed;
+
+struct __mem {
+	u8	rsvd[24];
+	u32	addr;
+	u32	size;
+} __packed;
+
+struct __mux {
+	u32	addr;
+	u8	rsvd[4];
+	u32	size;
+	u32	no_ops;
+	u32	val;
+	u32	val_stride;
+	u32	read_addr;
+	u8	rsvd2[4];
+} __packed;
+
+struct __queue {
+	u32	sel_addr;
+	u16	stride;
+	u8	rsvd[2];
+	u32	size;
+	u32	no_ops;
+	u8	rsvd2[8];
+	u32	read_addr;
+	u8	read_addr_stride;
+	u8	read_addr_cnt;
+	u8	rsvd3[2];
+} __packed;
+
+struct qlcnic_dump_entry {
+	struct qlcnic_common_entry_hdr hdr;
+	union {
+		struct __crb	crb;
+		struct __cache	cache;
+		struct __ocm	ocm;
+		struct __mem	mem;
+		struct __mux	mux;
+		struct __queue	que;
+		struct __ctrl	ctrl;
+	} region;
+} __packed;
+
+enum qlcnic_minidump_opcode {
+	QLCNIC_DUMP_NOP		= 0,
+	QLCNIC_DUMP_READ_CRB	= 1,
+	QLCNIC_DUMP_READ_MUX	= 2,
+	QLCNIC_DUMP_QUEUE	= 3,
+	QLCNIC_DUMP_BRD_CONFIG	= 4,
+	QLCNIC_DUMP_READ_OCM	= 6,
+	QLCNIC_DUMP_PEG_REG	= 7,
+	QLCNIC_DUMP_L1_DTAG	= 8,
+	QLCNIC_DUMP_L1_ITAG	= 9,
+	QLCNIC_DUMP_L1_DATA	= 11,
+	QLCNIC_DUMP_L1_INST	= 12,
+	QLCNIC_DUMP_L2_DTAG	= 21,
+	QLCNIC_DUMP_L2_ITAG	= 22,
+	QLCNIC_DUMP_L2_DATA	= 23,
+	QLCNIC_DUMP_L2_INST	= 24,
+	QLCNIC_DUMP_READ_ROM	= 71,
+	QLCNIC_DUMP_READ_MEM	= 72,
+	QLCNIC_DUMP_READ_CTRL	= 98,
+	QLCNIC_DUMP_TLHDR	= 99,
+	QLCNIC_DUMP_RDEND	= 255
+};
+
+struct qlcnic_dump_operations {
+	enum qlcnic_minidump_opcode opcode;
+	u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
+		       __le32 *);
+};
+
+static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
+{
+	u32 dest;
+	void __iomem *window_reg;
+
+	dest = addr & 0xFFFF0000;
+	window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
+	writel(dest, window_reg);
+	readl(window_reg);
+	window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+	*data = readl(window_reg);
+}
+
+static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
+{
+	u32 dest;
+	void __iomem *window_reg;
+
+	dest = addr & 0xFFFF0000;
+	window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
+	writel(dest, window_reg);
+	readl(window_reg);
+	window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+	writel(data, window_reg);
+	readl(window_reg);
+}
+
+/* FW dump related functions */
+static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
+			   struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+	int i;
+	u32 addr, data;
+	struct __crb *crb = &entry->region.crb;
+	void __iomem *base = adapter->ahw->pci_base0;
+
+	addr = crb->addr;
+
+	for (i = 0; i < crb->no_ops; i++) {
+		qlcnic_read_dump_reg(addr, base, &data);
+		*buffer++ = cpu_to_le32(addr);
+		*buffer++ = cpu_to_le32(data);
+		addr += crb->stride;
+	}
+	return crb->no_ops * 2 * sizeof(u32);
+}
+
+static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
+			    struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+	int i, k, timeout = 0;
+	void __iomem *base = adapter->ahw->pci_base0;
+	u32 addr, data;
+	u8 opcode, no_ops;
+	struct __ctrl *ctr = &entry->region.ctrl;
+	struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
+
+	addr = ctr->addr;
+	no_ops = ctr->no_ops;
+
+	for (i = 0; i < no_ops; i++) {
+		k = 0;
+		opcode = 0;
+		for (k = 0; k < 8; k++) {
+			if (!(ctr->opcode & (1 << k)))
+				continue;
+			switch (1 << k) {
+			case QLCNIC_DUMP_WCRB:
+				qlcnic_write_dump_reg(addr, base, ctr->val1);
+				break;
+			case QLCNIC_DUMP_RWCRB:
+				qlcnic_read_dump_reg(addr, base, &data);
+				qlcnic_write_dump_reg(addr, base, data);
+				break;
+			case QLCNIC_DUMP_ANDCRB:
+				qlcnic_read_dump_reg(addr, base, &data);
+				qlcnic_write_dump_reg(addr, base,
+						      data & ctr->val2);
+				break;
+			case QLCNIC_DUMP_ORCRB:
+				qlcnic_read_dump_reg(addr, base, &data);
+				qlcnic_write_dump_reg(addr, base,
+						      data | ctr->val3);
+				break;
+			case QLCNIC_DUMP_POLLCRB:
+				while (timeout <= ctr->timeout) {
+					qlcnic_read_dump_reg(addr, base, &data);
+					if ((data & ctr->val2) == ctr->val1)
+						break;
+					msleep(1);
+					timeout++;
+				}
+				if (timeout > ctr->timeout) {
+					dev_info(&adapter->pdev->dev,
+					"Timed out, aborting poll CRB\n");
+					return -EINVAL;
+				}
+				break;
+			case QLCNIC_DUMP_RD_SAVE:
+				if (ctr->index_a)
+					addr = t_hdr->saved_state[ctr->index_a];
+				qlcnic_read_dump_reg(addr, base, &data);
+				t_hdr->saved_state[ctr->index_v] = data;
+				break;
+			case QLCNIC_DUMP_WRT_SAVED:
+				if (ctr->index_v)
+					data = t_hdr->saved_state[ctr->index_v];
+				else
+					data = ctr->val1;
+				if (ctr->index_a)
+					addr = t_hdr->saved_state[ctr->index_a];
+				qlcnic_write_dump_reg(addr, base, data);
+				break;
+			case QLCNIC_DUMP_MOD_SAVE_ST:
+				data = t_hdr->saved_state[ctr->index_v];
+				data <<= ctr->shl_val;
+				data >>= ctr->shr_val;
+				if (ctr->val2)
+					data &= ctr->val2;
+				data |= ctr->val3;
+				data += ctr->val1;
+				t_hdr->saved_state[ctr->index_v] = data;
+				break;
+			default:
+				dev_info(&adapter->pdev->dev,
+					 "Unknown opcode\n");
+				break;
+			}
+		}
+		addr += ctr->stride;
+	}
+	return 0;
+}
+
+static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
+			   struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+	int loop;
+	u32 val, data = 0;
+	struct __mux *mux = &entry->region.mux;
+	void __iomem *base = adapter->ahw->pci_base0;
+
+	val = mux->val;
+	for (loop = 0; loop < mux->no_ops; loop++) {
+		qlcnic_write_dump_reg(mux->addr, base, val);
+		qlcnic_read_dump_reg(mux->read_addr, base, &data);
+		*buffer++ = cpu_to_le32(val);
+		*buffer++ = cpu_to_le32(data);
+		val += mux->val_stride;
+	}
+	return 2 * mux->no_ops * sizeof(u32);
+}
+
+static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
+			   struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+	int i, loop;
+	u32 cnt, addr, data, que_id = 0;
+	void __iomem *base = adapter->ahw->pci_base0;
+	struct __queue *que = &entry->region.que;
+
+	addr = que->read_addr;
+	cnt = que->read_addr_cnt;
+
+	for (loop = 0; loop < que->no_ops; loop++) {
+		qlcnic_write_dump_reg(que->sel_addr, base, que_id);
+		addr = que->read_addr;
+		for (i = 0; i < cnt; i++) {
+			qlcnic_read_dump_reg(addr, base, &data);
+			*buffer++ = cpu_to_le32(data);
+			addr += que->read_addr_stride;
+		}
+		que_id += que->stride;
+	}
+	return que->no_ops * cnt * sizeof(u32);
+}
+
+static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
+			   struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+	int i;
+	u32 data;
+	void __iomem *addr;
+	struct __ocm *ocm = &entry->region.ocm;
+
+	addr = adapter->ahw->pci_base0 + ocm->read_addr;
+	for (i = 0; i < ocm->no_ops; i++) {
+		data = readl(addr);
+		*buffer++ = cpu_to_le32(data);
+		addr += ocm->read_addr_stride;
+	}
+	return ocm->no_ops * sizeof(u32);
+}
+
+static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
+			   struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+	int i, count = 0;
+	u32 fl_addr, size, val, lck_val, addr;
+	struct __mem *rom = &entry->region.mem;
+	void __iomem *base = adapter->ahw->pci_base0;
+
+	fl_addr = rom->addr;
+	size = rom->size/4;
+lock_try:
+	lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
+	if (!lck_val && count < MAX_CTL_CHECK) {
+		msleep(10);
+		count++;
+		goto lock_try;
+	}
+	writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
+	for (i = 0; i < size; i++) {
+		addr = fl_addr & 0xFFFF0000;
+		qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
+		addr = LSW(fl_addr) + FLASH_ROM_DATA;
+		qlcnic_read_dump_reg(addr, base, &val);
+		fl_addr += 4;
+		*buffer++ = cpu_to_le32(val);
+	}
+	readl(base + QLCNIC_FLASH_SEM2_ULK);
+	return rom->size;
+}
+
+static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
+				struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+	int i;
+	u32 cnt, val, data, addr;
+	void __iomem *base = adapter->ahw->pci_base0;
+	struct __cache *l1 = &entry->region.cache;
+
+	val = l1->init_tag_val;
+
+	for (i = 0; i < l1->no_ops; i++) {
+		qlcnic_write_dump_reg(l1->addr, base, val);
+		qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
+		addr = l1->read_addr;
+		cnt = l1->read_addr_num;
+		while (cnt) {
+			qlcnic_read_dump_reg(addr, base, &data);
+			*buffer++ = cpu_to_le32(data);
+			addr += l1->read_addr_stride;
+			cnt--;
+		}
+		val += l1->stride;
+	}
+	return l1->no_ops * l1->read_addr_num * sizeof(u32);
+}
+
+static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
+				struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+	int i;
+	u32 cnt, val, data, addr;
+	u8 poll_mask, poll_to, time_out = 0;
+	void __iomem *base = adapter->ahw->pci_base0;
+	struct __cache *l2 = &entry->region.cache;
+
+	val = l2->init_tag_val;
+	poll_mask = LSB(MSW(l2->ctrl_val));
+	poll_to = MSB(MSW(l2->ctrl_val));
+
+	for (i = 0; i < l2->no_ops; i++) {
+		qlcnic_write_dump_reg(l2->addr, base, val);
+		if (LSW(l2->ctrl_val))
+			qlcnic_write_dump_reg(l2->ctrl_addr, base,
+					      LSW(l2->ctrl_val));
+		if (!poll_mask)
+			goto skip_poll;
+		do {
+			qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
+			if (!(data & poll_mask))
+				break;
+			msleep(1);
+			time_out++;
+		} while (time_out <= poll_to);
+
+		if (time_out > poll_to) {
+			dev_err(&adapter->pdev->dev,
+				"Timeout exceeded in %s, aborting dump\n",
+				__func__);
+			return -EINVAL;
+		}
+skip_poll:
+		addr = l2->read_addr;
+		cnt = l2->read_addr_num;
+		while (cnt) {
+			qlcnic_read_dump_reg(addr, base, &data);
+			*buffer++ = cpu_to_le32(data);
+			addr += l2->read_addr_stride;
+			cnt--;
+		}
+		val += l2->stride;
+	}
+	return l2->no_ops * l2->read_addr_num * sizeof(u32);
+}
+
+static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
+			      struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+	u32 addr, data, test, ret = 0;
+	int i, reg_read;
+	struct __mem *mem = &entry->region.mem;
+	void __iomem *base = adapter->ahw->pci_base0;
+
+	reg_read = mem->size;
+	addr = mem->addr;
+	/* check for data size of multiple of 16 and 16 byte alignment */
+	if ((addr & 0xf) || (reg_read%16)) {
+		dev_info(&adapter->pdev->dev,
+			 "Unaligned memory addr:0x%x size:0x%x\n",
+			 addr, reg_read);
+		return -EINVAL;
+	}
+
+	mutex_lock(&adapter->ahw->mem_lock);
+
+	while (reg_read != 0) {
+		qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
+		qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
+		qlcnic_write_dump_reg(MIU_TEST_CTR, base,
+				      TA_CTL_ENABLE | TA_CTL_START);
+
+		for (i = 0; i < MAX_CTL_CHECK; i++) {
+			qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
+			if (!(test & TA_CTL_BUSY))
+				break;
+		}
+		if (i == MAX_CTL_CHECK) {
+			if (printk_ratelimit()) {
+				dev_err(&adapter->pdev->dev,
+					"failed to read through agent\n");
+				ret = -EINVAL;
+				goto out;
+			}
+		}
+		for (i = 0; i < 4; i++) {
+			qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
+					     &data);
+			*buffer++ = cpu_to_le32(data);
+		}
+		addr += 16;
+		reg_read -= 16;
+		ret += 16;
+	}
+out:
+	mutex_unlock(&adapter->ahw->mem_lock);
+	return mem->size;
+}
+
+static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
+			   struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+	entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+	return 0;
+}
+
+static const struct qlcnic_dump_operations fw_dump_ops[] = {
+	{ QLCNIC_DUMP_NOP, qlcnic_dump_nop },
+	{ QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
+	{ QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
+	{ QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
+	{ QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
+	{ QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
+	{ QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
+	{ QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
+	{ QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
+	{ QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
+	{ QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
+	{ QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
+	{ QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
+	{ QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
+	{ QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
+	{ QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
+	{ QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
+	{ QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
+	{ QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
+	{ QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
+};
+
+/* Walk the template and collect dump for each entry in the dump template */
+static int
+qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
+			u32 size)
+{
+	int ret = 1;
+	if (size != entry->hdr.cap_size) {
+		dev_info(dev,
+			 "Invalid dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+		entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
+		dev_info(dev, "Aborting further dump capture\n");
+		ret = 0;
+	}
+	return ret;
+}
+
+int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
+{
+	__le32 *buffer;
+	char mesg[64];
+	char *msg[] = {mesg, NULL};
+	int i, k, ops_cnt, ops_index, dump_size = 0;
+	u32 entry_offset, dump, no_entries, buf_offset = 0;
+	struct qlcnic_dump_entry *entry;
+	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+	struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
+
+	if (fw_dump->clr) {
+		dev_info(&adapter->pdev->dev,
+			 "Previous dump not cleared, not capturing dump\n");
+		return -EIO;
+	}
+	/* Calculate the size for dump data area only */
+	for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
+		if (i & tmpl_hdr->drv_cap_mask)
+			dump_size += tmpl_hdr->cap_sizes[k];
+	if (!dump_size)
+		return -EIO;
+
+	fw_dump->data = vzalloc(dump_size);
+	if (!fw_dump->data) {
+		dev_info(&adapter->pdev->dev,
+			 "Unable to allocate (%d KB) for fw dump\n",
+			 dump_size / 1024);
+		return -ENOMEM;
+	}
+	buffer = fw_dump->data;
+	fw_dump->size = dump_size;
+	no_entries = tmpl_hdr->num_entries;
+	ops_cnt = ARRAY_SIZE(fw_dump_ops);
+	entry_offset = tmpl_hdr->offset;
+	tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
+	tmpl_hdr->sys_info[1] = adapter->fw_version;
+
+	for (i = 0; i < no_entries; i++) {
+		entry = (void *)tmpl_hdr + entry_offset;
+		if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
+			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+			entry_offset += entry->hdr.offset;
+			continue;
+		}
+		/* Find the handler for this entry */
+		ops_index = 0;
+		while (ops_index < ops_cnt) {
+			if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
+				break;
+			ops_index++;
+		}
+		if (ops_index == ops_cnt) {
+			dev_info(&adapter->pdev->dev,
+				 "Invalid entry type %d, exiting dump\n",
+				 entry->hdr.type);
+			goto error;
+		}
+		/* Collect dump for this entry */
+		dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
+		if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
+						     dump))
+			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+		buf_offset += entry->hdr.cap_size;
+		entry_offset += entry->hdr.offset;
+		buffer = fw_dump->data + buf_offset;
+	}
+	if (dump_size != buf_offset) {
+		dev_info(&adapter->pdev->dev,
+			 "Captured(%d) and expected size(%d) do not match\n",
+			 buf_offset, dump_size);
+		goto error;
+	} else {
+		fw_dump->clr = 1;
+		snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
+			 adapter->netdev->name);
+		dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
+			 fw_dump->size);
+		/* Send a udev event to notify availability of FW dump */
+		kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
+		return 0;
+	}
+error:
+	vfree(fw_dump->data);
+	return -EINVAL;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
new file mode 100644
index 0000000..10a702a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -0,0 +1,962 @@
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+
+#include "qlcnic.h"
+
+#include <linux/swab.h>
+#include <linux/dma-mapping.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+#include <linux/aer.h>
+#include <linux/log2.h>
+
+#include <linux/sysfs.h>
+
+int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+	return -EOPNOTSUPP;
+}
+
+int qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+	return -EOPNOTSUPP;
+}
+
+static ssize_t qlcnic_store_bridged_mode(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t len)
+{
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	unsigned long new;
+	int ret = -EINVAL;
+
+	if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
+		goto err_out;
+
+	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+		goto err_out;
+
+	if (strict_strtoul(buf, 2, &new))
+		goto err_out;
+
+	if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
+		ret = len;
+
+err_out:
+	return ret;
+}
+
+static ssize_t qlcnic_show_bridged_mode(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	int bridged_mode = 0;
+
+	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+		bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
+
+	return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static ssize_t qlcnic_store_diag_mode(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t len)
+{
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	unsigned long new;
+
+	if (strict_strtoul(buf, 2, &new))
+		return -EINVAL;
+
+	if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
+		adapter->flags ^= QLCNIC_DIAG_ENABLED;
+
+	return len;
+}
+
+static ssize_t qlcnic_show_diag_mode(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n",
+		       !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+}
+
+static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
+				  u8 *state, u8 *rate)
+{
+	*rate = LSB(beacon);
+	*state = MSB(beacon);
+
+	QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
+
+	if (!*state) {
+		*rate = __QLCNIC_MAX_LED_RATE;
+		return 0;
+	} else if (*state > __QLCNIC_MAX_LED_STATE) {
+		return -EINVAL;
+	}
+
+	if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
+		return -EINVAL;
+
+	return 0;
+}
+
+static ssize_t qlcnic_store_beacon(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t len)
+{
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	int max_sds_rings = adapter->max_sds_rings;
+	u16 beacon;
+	u8 b_state, b_rate;
+	int err;
+
+	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+		dev_warn(dev,
+			 "LED test not supported in non privileged mode\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (len != sizeof(u16))
+		return QL_STATUS_INVALID_PARAM;
+
+	memcpy(&beacon, buf, sizeof(u16));
+	err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
+	if (err)
+		return err;
+
+	if (adapter->ahw->beacon_state == b_state)
+		return len;
+
+	rtnl_lock();
+
+	if (!adapter->ahw->beacon_state)
+		if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
+			rtnl_unlock();
+			return -EBUSY;
+		}
+
+	if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+		err = -EIO;
+		goto out;
+	}
+
+	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+		err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
+		if (err)
+			goto out;
+		set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+	}
+
+	err = qlcnic_config_led(adapter, b_state, b_rate);
+
+	if (!err) {
+		err = len;
+		adapter->ahw->beacon_state = b_state;
+	}
+
+	if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
+		qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
+
+ out:
+	if (!adapter->ahw->beacon_state)
+		clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+	rtnl_unlock();
+
+	return err;
+}
+
+static ssize_t qlcnic_show_beacon(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
+}
+
+static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
+				     loff_t offset, size_t size)
+{
+	size_t crb_size = 4;
+
+	if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+		return -EIO;
+
+	if (offset < QLCNIC_PCI_CRBSPACE) {
+		if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
+				  QLCNIC_PCI_CAMQM_END))
+			crb_size = 8;
+		else
+			return -EINVAL;
+	}
+
+	if ((size != crb_size) || (offset & (crb_size-1)))
+		return  -EINVAL;
+
+	return 0;
+}
+
+static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+				     struct bin_attribute *attr, char *buf,
+				     loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	u32 data;
+	u64 qmdata;
+	int ret;
+
+	ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+	if (ret != 0)
+		return ret;
+
+	if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+		qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+		memcpy(buf, &qmdata, size);
+	} else {
+		data = QLCRD32(adapter, offset);
+		memcpy(buf, &data, size);
+	}
+	return size;
+}
+
+static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+				      struct bin_attribute *attr, char *buf,
+				      loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	u32 data;
+	u64 qmdata;
+	int ret;
+
+	ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+	if (ret != 0)
+		return ret;
+
+	if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+		memcpy(&qmdata, buf, size);
+		qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+	} else {
+		memcpy(&data, buf, size);
+		QLCWR32(adapter, offset, data);
+	}
+	return size;
+}
+
+static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
+				     loff_t offset, size_t size)
+{
+	if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+		return -EIO;
+
+	if ((size != 8) || (offset & 0x7))
+		return  -EIO;
+
+	return 0;
+}
+
+static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+				     struct bin_attribute *attr, char *buf,
+				     loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	u64 data;
+	int ret;
+
+	ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+	if (ret != 0)
+		return ret;
+
+	if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
+		return -EIO;
+
+	memcpy(buf, &data, size);
+
+	return size;
+}
+
+static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+				      struct bin_attribute *attr, char *buf,
+				      loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	u64 data;
+	int ret;
+
+	ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+	if (ret != 0)
+		return ret;
+
+	memcpy(&data, buf, size);
+
+	if (qlcnic_pci_mem_write_2M(adapter, offset, data))
+		return -EIO;
+
+	return size;
+}
+
+static int validate_pm_config(struct qlcnic_adapter *adapter,
+			      struct qlcnic_pm_func_cfg *pm_cfg, int count)
+{
+	u8 src_pci_func, s_esw_id, d_esw_id, dest_pci_func;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		src_pci_func = pm_cfg[i].pci_func;
+		dest_pci_func = pm_cfg[i].dest_npar;
+		if (src_pci_func >= QLCNIC_MAX_PCI_FUNC ||
+		    dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
+			return QL_STATUS_INVALID_PARAM;
+
+		if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
+			return QL_STATUS_INVALID_PARAM;
+
+		if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
+			return QL_STATUS_INVALID_PARAM;
+
+		s_esw_id = adapter->npars[src_pci_func].phy_port;
+		d_esw_id = adapter->npars[dest_pci_func].phy_port;
+
+		if (s_esw_id != d_esw_id)
+			return QL_STATUS_INVALID_PARAM;
+	}
+	return 0;
+
+}
+
+static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
+					    struct kobject *kobj,
+					    struct bin_attribute *attr,
+					    char *buf, loff_t offset,
+					    size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_pm_func_cfg *pm_cfg;
+	u32 id, action, pci_func;
+	int count, rem, i, ret;
+
+	count	= size / sizeof(struct qlcnic_pm_func_cfg);
+	rem	= size % sizeof(struct qlcnic_pm_func_cfg);
+	if (rem)
+		return QL_STATUS_INVALID_PARAM;
+
+	pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
+
+	ret = validate_pm_config(adapter, pm_cfg, count);
+	if (ret)
+		return ret;
+	for (i = 0; i < count; i++) {
+		pci_func = pm_cfg[i].pci_func;
+		action = !!pm_cfg[i].action;
+		id = adapter->npars[pci_func].phy_port;
+		ret = qlcnic_config_port_mirroring(adapter, id, action,
+						   pci_func);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < count; i++) {
+		pci_func = pm_cfg[i].pci_func;
+		id = adapter->npars[pci_func].phy_port;
+		adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
+		adapter->npars[pci_func].dest_npar = id;
+	}
+	return size;
+}
+
+static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
+					   struct kobject *kobj,
+					   struct bin_attribute *attr,
+					   char *buf, loff_t offset,
+					   size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
+	int i;
+
+	if (size != sizeof(pm_cfg))
+		return QL_STATUS_INVALID_PARAM;
+
+	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+		if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+			continue;
+		pm_cfg[i].action = adapter->npars[i].enable_pm;
+		pm_cfg[i].dest_npar = 0;
+		pm_cfg[i].pci_func = i;
+	}
+	memcpy(buf, &pm_cfg, size);
+
+	return size;
+}
+
+static int validate_esw_config(struct qlcnic_adapter *adapter,
+			       struct qlcnic_esw_func_cfg *esw_cfg, int count)
+{
+	u32 op_mode;
+	u8 pci_func;
+	int i;
+
+	op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+
+	for (i = 0; i < count; i++) {
+		pci_func = esw_cfg[i].pci_func;
+		if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+			return QL_STATUS_INVALID_PARAM;
+
+		if (adapter->op_mode == QLCNIC_MGMT_FUNC) {
+			if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+				return QL_STATUS_INVALID_PARAM;
+		}
+
+		switch (esw_cfg[i].op_mode) {
+		case QLCNIC_PORT_DEFAULTS:
+			if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
+					    QLCNIC_NON_PRIV_FUNC) {
+				if (esw_cfg[i].mac_anti_spoof != 0)
+					return QL_STATUS_INVALID_PARAM;
+				if (esw_cfg[i].mac_override != 1)
+					return QL_STATUS_INVALID_PARAM;
+				if (esw_cfg[i].promisc_mode != 1)
+					return QL_STATUS_INVALID_PARAM;
+			}
+			break;
+		case QLCNIC_ADD_VLAN:
+			if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
+				return QL_STATUS_INVALID_PARAM;
+			if (!esw_cfg[i].op_type)
+				return QL_STATUS_INVALID_PARAM;
+			break;
+		case QLCNIC_DEL_VLAN:
+			if (!esw_cfg[i].op_type)
+				return QL_STATUS_INVALID_PARAM;
+			break;
+		default:
+			return QL_STATUS_INVALID_PARAM;
+		}
+	}
+	return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
+					     struct kobject *kobj,
+					     struct bin_attribute *attr,
+					     char *buf, loff_t offset,
+					     size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_esw_func_cfg *esw_cfg;
+	struct qlcnic_npar_info *npar;
+	int count, rem, i, ret;
+	u8 pci_func, op_mode = 0;
+
+	count	= size / sizeof(struct qlcnic_esw_func_cfg);
+	rem	= size % sizeof(struct qlcnic_esw_func_cfg);
+	if (rem)
+		return QL_STATUS_INVALID_PARAM;
+
+	esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
+	ret = validate_esw_config(adapter, esw_cfg, count);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < count; i++) {
+		if (adapter->op_mode == QLCNIC_MGMT_FUNC) {
+			if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
+				return QL_STATUS_INVALID_PARAM;
+		}
+
+		if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
+			continue;
+
+		op_mode = esw_cfg[i].op_mode;
+		qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
+		esw_cfg[i].op_mode = op_mode;
+		esw_cfg[i].pci_func = adapter->ahw->pci_func;
+
+		switch (esw_cfg[i].op_mode) {
+		case QLCNIC_PORT_DEFAULTS:
+			qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+			break;
+		case QLCNIC_ADD_VLAN:
+			qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+			break;
+		case QLCNIC_DEL_VLAN:
+			esw_cfg[i].vlan_id = 0;
+			qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+			break;
+		}
+	}
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		goto out;
+
+	for (i = 0; i < count; i++) {
+		pci_func = esw_cfg[i].pci_func;
+		npar = &adapter->npars[pci_func];
+		switch (esw_cfg[i].op_mode) {
+		case QLCNIC_PORT_DEFAULTS:
+			npar->promisc_mode = esw_cfg[i].promisc_mode;
+			npar->mac_override = esw_cfg[i].mac_override;
+			npar->offload_flags = esw_cfg[i].offload_flags;
+			npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
+			npar->discard_tagged = esw_cfg[i].discard_tagged;
+			break;
+		case QLCNIC_ADD_VLAN:
+			npar->pvid = esw_cfg[i].vlan_id;
+			break;
+		case QLCNIC_DEL_VLAN:
+			npar->pvid = 0;
+			break;
+		}
+	}
+out:
+	return size;
+}
+
+static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
+					    struct kobject *kobj,
+					    struct bin_attribute *attr,
+					    char *buf, loff_t offset,
+					    size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
+	u8 i;
+
+	if (size != sizeof(esw_cfg))
+		return QL_STATUS_INVALID_PARAM;
+
+	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+		if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+			continue;
+		esw_cfg[i].pci_func = i;
+		if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
+			return QL_STATUS_INVALID_PARAM;
+	}
+	memcpy(buf, &esw_cfg, size);
+
+	return size;
+}
+
+static int validate_npar_config(struct qlcnic_adapter *adapter,
+				struct qlcnic_npar_func_cfg *np_cfg,
+				int count)
+{
+	u8 pci_func, i;
+
+	for (i = 0; i < count; i++) {
+		pci_func = np_cfg[i].pci_func;
+		if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+			return QL_STATUS_INVALID_PARAM;
+
+		if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+			return QL_STATUS_INVALID_PARAM;
+
+		if (!IS_VALID_BW(np_cfg[i].min_bw) ||
+		    !IS_VALID_BW(np_cfg[i].max_bw))
+			return QL_STATUS_INVALID_PARAM;
+	}
+	return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
+					      struct kobject *kobj,
+					      struct bin_attribute *attr,
+					      char *buf, loff_t offset,
+					      size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_info nic_info;
+	struct qlcnic_npar_func_cfg *np_cfg;
+	int i, count, rem, ret;
+	u8 pci_func;
+
+	count	= size / sizeof(struct qlcnic_npar_func_cfg);
+	rem	= size % sizeof(struct qlcnic_npar_func_cfg);
+	if (rem)
+		return QL_STATUS_INVALID_PARAM;
+
+	np_cfg = (struct qlcnic_npar_func_cfg *)buf;
+	ret = validate_npar_config(adapter, np_cfg, count);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < count ; i++) {
+		pci_func = np_cfg[i].pci_func;
+		ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+		if (ret)
+			return ret;
+		nic_info.pci_func = pci_func;
+		nic_info.min_tx_bw = np_cfg[i].min_bw;
+		nic_info.max_tx_bw = np_cfg[i].max_bw;
+		ret = qlcnic_set_nic_info(adapter, &nic_info);
+		if (ret)
+			return ret;
+		adapter->npars[i].min_bw = nic_info.min_tx_bw;
+		adapter->npars[i].max_bw = nic_info.max_tx_bw;
+	}
+
+	return size;
+
+}
+
+static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
+					     struct kobject *kobj,
+					     struct bin_attribute *attr,
+					     char *buf, loff_t offset,
+					     size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_info nic_info;
+	struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
+	int i, ret;
+
+	if (size != sizeof(np_cfg))
+		return QL_STATUS_INVALID_PARAM;
+
+	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+		if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+			continue;
+		ret = qlcnic_get_nic_info(adapter, &nic_info, i);
+		if (ret)
+			return ret;
+
+		np_cfg[i].pci_func = i;
+		np_cfg[i].op_mode = (u8)nic_info.op_mode;
+		np_cfg[i].port_num = nic_info.phys_port;
+		np_cfg[i].fw_capab = nic_info.capabilities;
+		np_cfg[i].min_bw = nic_info.min_tx_bw;
+		np_cfg[i].max_bw = nic_info.max_tx_bw;
+		np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
+		np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
+	}
+	memcpy(buf, &np_cfg, size);
+	return size;
+}
+
+static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
+					   struct kobject *kobj,
+					   struct bin_attribute *attr,
+					   char *buf, loff_t offset,
+					   size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_esw_statistics port_stats;
+	int ret;
+
+	if (size != sizeof(struct qlcnic_esw_statistics))
+		return QL_STATUS_INVALID_PARAM;
+
+	if (offset >= QLCNIC_MAX_PCI_FUNC)
+		return QL_STATUS_INVALID_PARAM;
+
+	memset(&port_stats, 0, size);
+	ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+				    &port_stats.rx);
+	if (ret)
+		return ret;
+
+	ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+				    &port_stats.tx);
+	if (ret)
+		return ret;
+
+	memcpy(buf, &port_stats, size);
+	return size;
+}
+
+static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
+					  struct kobject *kobj,
+					  struct bin_attribute *attr,
+					  char *buf, loff_t offset,
+					  size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_esw_statistics esw_stats;
+	int ret;
+
+	if (size != sizeof(struct qlcnic_esw_statistics))
+		return QL_STATUS_INVALID_PARAM;
+
+	if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+		return QL_STATUS_INVALID_PARAM;
+
+	memset(&esw_stats, 0, size);
+	ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+				       &esw_stats.rx);
+	if (ret)
+		return ret;
+
+	ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+				       &esw_stats.tx);
+	if (ret)
+		return ret;
+
+	memcpy(buf, &esw_stats, size);
+	return size;
+}
+
+static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
+					    struct kobject *kobj,
+					    struct bin_attribute *attr,
+					    char *buf, loff_t offset,
+					    size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	int ret;
+
+	if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+		return QL_STATUS_INVALID_PARAM;
+
+	ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+				     QLCNIC_QUERY_RX_COUNTER);
+	if (ret)
+		return ret;
+
+	ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+				     QLCNIC_QUERY_TX_COUNTER);
+	if (ret)
+		return ret;
+
+	return size;
+}
+
+static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
+					     struct kobject *kobj,
+					     struct bin_attribute *attr,
+					     char *buf, loff_t offset,
+					     size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	int ret;
+
+	if (offset >= QLCNIC_MAX_PCI_FUNC)
+		return QL_STATUS_INVALID_PARAM;
+
+	ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+				     QLCNIC_QUERY_RX_COUNTER);
+	if (ret)
+		return ret;
+
+	ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+				     QLCNIC_QUERY_TX_COUNTER);
+	if (ret)
+		return ret;
+
+	return size;
+}
+
+static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
+					    struct kobject *kobj,
+					    struct bin_attribute *attr,
+					    char *buf, loff_t offset,
+					    size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
+	struct qlcnic_pci_info *pci_info;
+	int i, ret;
+
+	if (size != sizeof(pci_cfg))
+		return QL_STATUS_INVALID_PARAM;
+
+	pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+	if (!pci_info)
+		return -ENOMEM;
+
+	ret = qlcnic_get_pci_info(adapter, pci_info);
+	if (ret) {
+		kfree(pci_info);
+		return ret;
+	}
+
+	for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+		pci_cfg[i].pci_func = pci_info[i].id;
+		pci_cfg[i].func_type = pci_info[i].type;
+		pci_cfg[i].port_num = pci_info[i].default_port;
+		pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
+		pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
+		memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
+	}
+	memcpy(buf, &pci_cfg, size);
+	kfree(pci_info);
+	return size;
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+       .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+       .show = qlcnic_show_bridged_mode,
+       .store = qlcnic_store_bridged_mode,
+};
+
+static struct device_attribute dev_attr_diag_mode = {
+	.attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+	.show = qlcnic_show_diag_mode,
+	.store = qlcnic_store_diag_mode,
+};
+
+static struct device_attribute dev_attr_beacon = {
+	.attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
+	.show = qlcnic_show_beacon,
+	.store = qlcnic_store_beacon,
+};
+
+static struct bin_attribute bin_attr_crb = {
+	.attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_read_crb,
+	.write = qlcnic_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+	.attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_read_mem,
+	.write = qlcnic_sysfs_write_mem,
+};
+
+static struct bin_attribute bin_attr_npar_config = {
+	.attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_read_npar_config,
+	.write = qlcnic_sysfs_write_npar_config,
+};
+
+static struct bin_attribute bin_attr_pci_config = {
+	.attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_read_pci_config,
+	.write = NULL,
+};
+
+static struct bin_attribute bin_attr_port_stats = {
+	.attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_get_port_stats,
+	.write = qlcnic_sysfs_clear_port_stats,
+};
+
+static struct bin_attribute bin_attr_esw_stats = {
+	.attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_get_esw_stats,
+	.write = qlcnic_sysfs_clear_esw_stats,
+};
+
+static struct bin_attribute bin_attr_esw_config = {
+	.attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_read_esw_config,
+	.write = qlcnic_sysfs_write_esw_config,
+};
+
+static struct bin_attribute bin_attr_pm_config = {
+	.attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_read_pm_config,
+	.write = qlcnic_sysfs_write_pm_config,
+};
+
+void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+	struct device *dev = &adapter->pdev->dev;
+
+	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+		if (device_create_file(dev, &dev_attr_bridged_mode))
+			dev_warn(dev,
+				 "failed to create bridged_mode sysfs entry\n");
+}
+
+void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+	struct device *dev = &adapter->pdev->dev;
+
+	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+		device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+{
+	struct device *dev = &adapter->pdev->dev;
+	u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+	if (device_create_bin_file(dev, &bin_attr_port_stats))
+		dev_info(dev, "failed to create port stats sysfs entry");
+
+	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
+		return;
+	if (device_create_file(dev, &dev_attr_diag_mode))
+		dev_info(dev, "failed to create diag_mode sysfs entry\n");
+	if (device_create_bin_file(dev, &bin_attr_crb))
+		dev_info(dev, "failed to create crb sysfs entry\n");
+	if (device_create_bin_file(dev, &bin_attr_mem))
+		dev_info(dev, "failed to create mem sysfs entry\n");
+
+	if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
+		return;
+
+	if (device_create_bin_file(dev, &bin_attr_pci_config))
+		dev_info(dev, "failed to create pci config sysfs entry");
+	if (device_create_file(dev, &dev_attr_beacon))
+		dev_info(dev, "failed to create beacon sysfs entry");
+
+	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+		return;
+	if (device_create_bin_file(dev, &bin_attr_esw_config))
+		dev_info(dev, "failed to create esw config sysfs entry");
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return;
+	if (device_create_bin_file(dev, &bin_attr_npar_config))
+		dev_info(dev, "failed to create npar config sysfs entry");
+	if (device_create_bin_file(dev, &bin_attr_pm_config))
+		dev_info(dev, "failed to create pm config sysfs entry");
+	if (device_create_bin_file(dev, &bin_attr_esw_stats))
+		dev_info(dev, "failed to create eswitch stats sysfs entry");
+}
+
+void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+{
+	struct device *dev = &adapter->pdev->dev;
+	u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+	device_remove_bin_file(dev, &bin_attr_port_stats);
+
+	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
+		return;
+	device_remove_file(dev, &dev_attr_diag_mode);
+	device_remove_bin_file(dev, &bin_attr_crb);
+	device_remove_bin_file(dev, &bin_attr_mem);
+	if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
+		return;
+	device_remove_bin_file(dev, &bin_attr_pci_config);
+	device_remove_file(dev, &dev_attr_beacon);
+	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+		return;
+	device_remove_bin_file(dev, &bin_attr_esw_config);
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return;
+	device_remove_bin_file(dev, &bin_attr_npar_config);
+	device_remove_bin_file(dev, &bin_attr_pm_config);
+	device_remove_bin_file(dev, &bin_attr_esw_stats);
+}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 58185b6..10093f0 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -86,7 +86,7 @@
 }
 
 /* Read out the SERDES registers */
-static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
+static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
 {
 	int status;
 
@@ -364,7 +364,7 @@
 /* Read the 400 xgmac control/statistics registers
  * skipping unused locations.
  */
-static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
+static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
 					unsigned int other_function)
 {
 	int status = 0;
@@ -405,7 +405,7 @@
 	return status;
 }
 
-static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
+static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
 {
 	int status = 0;
 	int i;
@@ -423,7 +423,7 @@
 	return status;
 }
 
-static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
+static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
 {
 	int i;
 
@@ -434,7 +434,7 @@
 	}
 }
 
-static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
+static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
 {
 	int i, status;
 	u32 value[3];
@@ -471,7 +471,7 @@
 	return status;
 }
 
-static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
+static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
 {
 	int status;
 	u32 value, i;
@@ -496,7 +496,7 @@
 }
 
 /* Read the MPI Processor shadow registers */
-static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
+static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
 {
 	u32 i;
 	int status;
@@ -515,7 +515,7 @@
 }
 
 /* Read the MPI Processor core registers */
-static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
+static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
 				u32 offset, u32 count)
 {
 	int i, status = 0;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index b01f83a..6cb96b4 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -648,6 +648,7 @@
 {
 	unsigned tx_head = cp->tx_head;
 	unsigned tx_tail = cp->tx_tail;
+	unsigned bytes_compl = 0, pkts_compl = 0;
 
 	while (tx_tail != tx_head) {
 		struct cp_desc *txd = cp->tx_ring + tx_tail;
@@ -666,6 +667,9 @@
 				 le32_to_cpu(txd->opts1) & 0xffff,
 				 PCI_DMA_TODEVICE);
 
+		bytes_compl += skb->len;
+		pkts_compl++;
+
 		if (status & LastFrag) {
 			if (status & (TxError | TxFIFOUnder)) {
 				netif_dbg(cp, tx_err, cp->dev,
@@ -697,6 +701,7 @@
 
 	cp->tx_tail = tx_tail;
 
+	netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
 	if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
 		netif_wake_queue(cp->dev);
 }
@@ -843,6 +848,8 @@
 		wmb();
 	}
 	cp->tx_head = entry;
+
+	netdev_sent_queue(dev, skb->len);
 	netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
 		  entry, skb->len);
 	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
@@ -937,6 +944,8 @@
 
 	cp->rx_tail = 0;
 	cp->tx_head = cp->tx_tail = 0;
+
+	netdev_reset_queue(cp->dev);
 }
 
 static void cp_reset_hw (struct cp_private *cp)
@@ -957,8 +966,38 @@
 
 static inline void cp_start_hw (struct cp_private *cp)
 {
+	dma_addr_t ring_dma;
+
 	cpw16(CpCmd, cp->cpcmd);
+
+	/*
+	 * These (at least TxRingAddr) need to be configured after the
+	 * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33
+	 * (C+ Command Register) recommends that these and more be configured
+	 * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
+	 * it's been observed that the TxRingAddr is actually reset to garbage
+	 * when C+ mode Tx is enabled in CpCmd.
+	 */
+	cpw32_f(HiTxRingAddr, 0);
+	cpw32_f(HiTxRingAddr + 4, 0);
+
+	ring_dma = cp->ring_dma;
+	cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+	cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+	ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+	cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+	/*
+	 * Strictly speaking, the datasheet says this should be enabled
+	 * *before* setting the descriptor addresses. But what, then, would
+	 * prevent it from doing DMA to random unconfigured addresses?
+	 * This variant appears to work fine.
+	 */
 	cpw8(Cmd, RxOn | TxOn);
+
+	netdev_reset_queue(cp->dev);
 }
 
 static void cp_enable_irq(struct cp_private *cp)
@@ -969,7 +1008,6 @@
 static void cp_init_hw (struct cp_private *cp)
 {
 	struct net_device *dev = cp->dev;
-	dma_addr_t ring_dma;
 
 	cp_reset_hw(cp);
 
@@ -992,17 +1030,6 @@
 
 	cpw8(Config5, cpr8(Config5) & PMEStatus);
 
-	cpw32_f(HiTxRingAddr, 0);
-	cpw32_f(HiTxRingAddr + 4, 0);
-
-	ring_dma = cp->ring_dma;
-	cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
-	cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
-	ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
-	cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
-	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
 	cpw16(MultiIntr, 0);
 
 	cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1192,6 +1219,7 @@
 	cp_clean_rings(cp);
 	rc = cp_init_rings(cp);
 	cp_start_hw(cp);
+	cp_enable_irq(cp);
 
 	netif_wake_queue(dev);
 
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index e02f04d..9f2d416 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -175,8 +175,7 @@
     unsigned int tx_unit_busy:1;
     unsigned char re_tx,	/* Number of packet retransmissions. */
 		addr_mode,		/* Current Rx filter e.g. promiscuous, etc. */
-		pac_cnt_in_tx_buf,
-		chip_type;
+		pac_cnt_in_tx_buf;
 };
 
 /* This code, written by wwc@super.org, resets the adapter every
@@ -339,7 +338,6 @@
 	write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
 
 	lp = netdev_priv(dev);
-	lp->chip_type = RTL8002;
 	lp->addr_mode = CMR2h_Normal;
 	spin_lock_init(&lp->lock);
 
@@ -852,7 +850,7 @@
  *	Set or clear the multicast filter for this adapter.
  */
 
-static void set_rx_mode_8002(struct net_device *dev)
+static void set_rx_mode(struct net_device *dev)
 {
 	struct net_local *lp = netdev_priv(dev);
 	long ioaddr = dev->base_addr;
@@ -864,58 +862,6 @@
 	write_reg_high(ioaddr, CMR2, lp->addr_mode);
 }
 
-static void set_rx_mode_8012(struct net_device *dev)
-{
-	struct net_local *lp = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
-	unsigned char new_mode, mc_filter[8]; /* Multicast hash filter */
-	int i;
-
-	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
-		new_mode = CMR2h_PROMISC;
-	} else if ((netdev_mc_count(dev) > 1000) ||
-		   (dev->flags & IFF_ALLMULTI)) {
-		/* Too many to filter perfectly -- accept all multicasts. */
-		memset(mc_filter, 0xff, sizeof(mc_filter));
-		new_mode = CMR2h_Normal;
-	} else {
-		struct netdev_hw_addr *ha;
-
-		memset(mc_filter, 0, sizeof(mc_filter));
-		netdev_for_each_mc_addr(ha, dev) {
-			int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
-			mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
-		}
-		new_mode = CMR2h_Normal;
-	}
-	lp->addr_mode = new_mode;
-    write_reg(ioaddr, CMR2, CMR2_IRQOUT | 0x04); /* Switch to page 1. */
-    for (i = 0; i < 8; i++)
-		write_reg_byte(ioaddr, i, mc_filter[i]);
-	if (net_debug > 2 || 1) {
-		lp->addr_mode = 1;
-		printk(KERN_DEBUG "%s: Mode %d, setting multicast filter to",
-			   dev->name, lp->addr_mode);
-		for (i = 0; i < 8; i++)
-			printk(" %2.2x", mc_filter[i]);
-		printk(".\n");
-	}
-
-	write_reg_high(ioaddr, CMR2, lp->addr_mode);
-    write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */
-}
-
-static void set_rx_mode(struct net_device *dev)
-{
-	struct net_local *lp = netdev_priv(dev);
-
-	if (lp->chip_type == RTL8002)
-		return set_rx_mode_8002(dev);
-	else
-		return set_rx_mode_8012(dev);
-}
-
-
 static int __init atp_init_module(void) {
 	if (debug)					/* Emit version even if no cards detected. */
 		printk(KERN_INFO "%s", version);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 0edc642..040b137 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -16,8 +16,6 @@
 #define PAR_STATUS	1
 #define PAR_CONTROL 2
 
-enum chip_type { RTL8002, RTL8012 };
-
 #define Ctrl_LNibRead	0x08	/* LP_PSELECP */
 #define Ctrl_HNibRead	0
 #define Ctrl_LNibWrite	0x08	/* LP_PSELECP */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 927aa33..248f883 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -78,24 +78,18 @@
 
 #define MAX_READ_REQUEST_SHIFT	12
 #define TX_DMA_BURST	7	/* Maximum PCI burst, '7' is unlimited */
-#define SafeMtu		0x1c20	/* ... actually life sucks beyond ~7k */
 #define InterFrameGap	0x03	/* 3 means InterFrameGap = the shortest one */
 
 #define R8169_REGS_SIZE		256
 #define R8169_NAPI_WEIGHT	64
 #define NUM_TX_DESC	64	/* Number of Tx descriptor registers */
 #define NUM_RX_DESC	256	/* Number of Rx descriptor registers */
-#define RX_BUF_SIZE	1536	/* Rx Buffer size */
 #define R8169_TX_RING_BYTES	(NUM_TX_DESC * sizeof(struct TxDesc))
 #define R8169_RX_RING_BYTES	(NUM_RX_DESC * sizeof(struct RxDesc))
 
 #define RTL8169_TX_TIMEOUT	(6*HZ)
 #define RTL8169_PHY_TIMEOUT	(10*HZ)
 
-#define RTL_EEPROM_SIG		cpu_to_le32(0x8129)
-#define RTL_EEPROM_SIG_MASK	cpu_to_le32(0xffff)
-#define RTL_EEPROM_SIG_ADDR	0x0000
-
 /* write/read MMIO register */
 #define RTL_W8(reg, val8)	writeb ((val8), ioaddr + (reg))
 #define RTL_W16(reg, val16)	writew ((val16), ioaddr + (reg))
@@ -456,6 +450,7 @@
 #define PWM_EN				(1 << 22)
 #define RXDV_GATED_EN			(1 << 19)
 #define EARLY_TALLY_EN			(1 << 16)
+#define FORCE_CLK			(1 << 15) /* force clock request */
 };
 
 enum rtl_register_content {
@@ -519,6 +514,7 @@
 	PMEnable	= (1 << 0),	/* Power Management Enable */
 
 	/* Config2 register p. 25 */
+	ClkReqEn	= (1 << 7),	/* Clock Request Enable */
 	MSIEnable	= (1 << 5),	/* 8169 only. Reserved in the 8168. */
 	PCI_Clock_66MHz = 0x01,
 	PCI_Clock_33MHz = 0x00,
@@ -539,6 +535,7 @@
 	Spi_en		= (1 << 3),
 	LanWake		= (1 << 1),	/* LanWake enable/disable */
 	PMEStatus	= (1 << 0),	/* PME status can be reset by PCI RST# */
+	ASPM_en		= (1 << 0),	/* ASPM enable */
 
 	/* TBICSR p.28 */
 	TBIReset	= 0x80000000,
@@ -687,6 +684,7 @@
 	RTL_FEATURE_WOL		= (1 << 0),
 	RTL_FEATURE_MSI		= (1 << 1),
 	RTL_FEATURE_GMII	= (1 << 2),
+	RTL_FEATURE_FW_LOADED	= (1 << 3),
 };
 
 struct rtl8169_counters {
@@ -1816,8 +1814,7 @@
 }
 
 
-static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
-				      struct sk_buff *skb)
+static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
 {
 	return (vlan_tx_tag_present(skb)) ?
 		TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
@@ -2394,8 +2391,10 @@
 	struct rtl_fw *rtl_fw = tp->rtl_fw;
 
 	/* TODO: release firmware once rtl_phy_write_fw signals failures. */
-	if (!IS_ERR_OR_NULL(rtl_fw))
+	if (!IS_ERR_OR_NULL(rtl_fw)) {
 		rtl_phy_write_fw(tp, rtl_fw);
+		tp->features |= RTL_FEATURE_FW_LOADED;
+	}
 }
 
 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2406,6 +2405,31 @@
 		rtl_apply_firmware(tp);
 }
 
+static void r810x_aldps_disable(struct rtl8169_private *tp)
+{
+	rtl_writephy(tp, 0x1f, 0x0000);
+	rtl_writephy(tp, 0x18, 0x0310);
+	msleep(100);
+}
+
+static void r810x_aldps_enable(struct rtl8169_private *tp)
+{
+	if (!(tp->features & RTL_FEATURE_FW_LOADED))
+		return;
+
+	rtl_writephy(tp, 0x1f, 0x0000);
+	rtl_writephy(tp, 0x18, 0x8310);
+}
+
+static void r8168_aldps_enable_1(struct rtl8169_private *tp)
+{
+	if (!(tp->features & RTL_FEATURE_FW_LOADED))
+		return;
+
+	rtl_writephy(tp, 0x1f, 0x0000);
+	rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
+}
+
 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
 {
 	static const struct phy_reg phy_reg_init[] = {
@@ -3178,6 +3202,8 @@
 	rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
 	rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
 	rtl_writephy(tp, 0x1f, 0x0000);
+
+	r8168_aldps_enable_1(tp);
 }
 
 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
@@ -3250,6 +3276,8 @@
 	rtl_writephy(tp, 0x05, 0x8b85);
 	rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
 	rtl_writephy(tp, 0x1f, 0x0000);
+
+	r8168_aldps_enable_1(tp);
 }
 
 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3257,6 +3285,8 @@
 	rtl_apply_firmware(tp);
 
 	rtl8168f_hw_phy_config(tp);
+
+	r8168_aldps_enable_1(tp);
 }
 
 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
@@ -3354,6 +3384,8 @@
 	rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
 	rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
 	rtl_writephy(tp, 0x1f, 0x0000);
+
+	r8168_aldps_enable_1(tp);
 }
 
 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3439,21 +3471,19 @@
 	};
 
 	/* Disable ALDPS before ram code */
-	rtl_writephy(tp, 0x1f, 0x0000);
-	rtl_writephy(tp, 0x18, 0x0310);
-	msleep(100);
+	r810x_aldps_disable(tp);
 
 	rtl_apply_firmware(tp);
 
 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+	r810x_aldps_enable(tp);
 }
 
 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
 {
 	/* Disable ALDPS before setting firmware */
-	rtl_writephy(tp, 0x1f, 0x0000);
-	rtl_writephy(tp, 0x18, 0x0310);
-	msleep(20);
+	r810x_aldps_disable(tp);
 
 	rtl_apply_firmware(tp);
 
@@ -3463,6 +3493,8 @@
 	rtl_writephy(tp, 0x10, 0x401f);
 	rtl_writephy(tp, 0x19, 0x7030);
 	rtl_writephy(tp, 0x1f, 0x0000);
+
+	r810x_aldps_enable(tp);
 }
 
 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
@@ -3475,9 +3507,7 @@
 	};
 
 	/* Disable ALDPS before ram code */
-	rtl_writephy(tp, 0x1f, 0x0000);
-	rtl_writephy(tp, 0x18, 0x0310);
-	msleep(100);
+	r810x_aldps_disable(tp);
 
 	rtl_apply_firmware(tp);
 
@@ -3485,6 +3515,8 @@
 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 
 	rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+
+	r810x_aldps_enable(tp);
 }
 
 static void rtl_hw_phy_config(struct net_device *dev)
@@ -5015,8 +5047,6 @@
 
 	RTL_W8(MaxTxPacketSize, EarlySize);
 
-	rtl_disable_clock_request(pdev);
-
 	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
 	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
 
@@ -5025,7 +5055,8 @@
 
 	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
 	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
-	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+	RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
+	RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
 }
 
 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
@@ -5050,13 +5081,12 @@
 
 	RTL_W8(MaxTxPacketSize, EarlySize);
 
-	rtl_disable_clock_request(pdev);
-
 	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
 	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
 	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
-	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
-	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
+	RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
+	RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
 }
 
 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5113,8 +5143,10 @@
 	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
 
 	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-	RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
+	RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
 	RTL_W8(MaxTxPacketSize, EarlySize);
+	RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
+	RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
 
 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -5330,6 +5362,9 @@
 
 	RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
 	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+	RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
+	RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+	RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
 
 	rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
 }
@@ -5355,6 +5390,9 @@
 
 	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
 	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+	RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
+	RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+	RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
 
 	rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
 
@@ -5376,7 +5414,10 @@
 	/* Force LAN exit from ASPM if Rx/Tx are not idle */
 	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
 
-	RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
+	RTL_W32(MISC,
+		(RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
+	RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
+	RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
 	RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
 	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
 }
@@ -5774,7 +5815,7 @@
 	tp->tx_skb[entry].len = len;
 	txd->addr = cpu_to_le64(mapping);
 
-	opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
+	opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
 	opts[0] = DescOwn;
 
 	rtl8169_tso_csum(tp, skb, opts);
@@ -6992,15 +7033,4 @@
 	.driver.pm	= RTL8169_PM_OPS,
 };
 
-static int __init rtl8169_init_module(void)
-{
-	return pci_register_driver(&rtl8169_pci_driver);
-}
-
-static void __exit rtl8169_cleanup_module(void)
-{
-	pci_unregister_driver(&rtl8169_pci_driver);
-}
-
-module_init(rtl8169_init_module);
-module_exit(rtl8169_cleanup_module);
+module_pci_driver(rtl8169_pci_driver);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c8bfea0..3d70586 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2286,7 +2286,7 @@
 	for (i = 0; i < PHY_MAX_ADDR; i++)
 		mdp->mii_bus->irq[i] = PHY_POLL;
 
-	/* regist mdio bus */
+	/* register mdio bus */
 	ret = mdiobus_register(mdp->mii_bus);
 	if (ret)
 		goto out_free_irq;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 6a40dd0..72a0174 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -399,12 +399,6 @@
 static int
 ether3_open(struct net_device *dev)
 {
-	if (!is_valid_ether_addr(dev->dev_addr)) {
-		printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
-			dev->name);
-		return -EINVAL;
-	}
-
 	if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev))
 		return -EAGAIN;
 
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 25906c1..435b4f1 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,10 +1,11 @@
 config SFC
 	tristate "Solarflare SFC4000/SFC9000-family support"
-	depends on PCI && INET
+	depends on PCI
 	select MDIO
 	select CRC32
 	select I2C
 	select I2C_ALGOBIT
+	select PTP_1588_CLOCK
 	---help---
 	  This driver supports 10-gigabit Ethernet cards based on
 	  the Solarflare SFC4000 and SFC9000-family controllers.
@@ -34,10 +35,3 @@
 	  This enables support for the SFC9000 I/O Virtualization
 	  features, allowing accelerated network performance in
 	  virtualized environments.
-config SFC_PTP
-	bool "Solarflare SFC9000-family PTP support"
-	depends on SFC && PTP_1588_CLOCK && !(SFC=y && PTP_1588_CLOCK=m)
-	default y
-	---help---
-	  This enables support for the Precision Time Protocol (PTP)
-	  on SFC9000-family NICs
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index e11f2ec..945bf06 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -2,9 +2,8 @@
 			   falcon_xmac.o mcdi_mac.o \
 			   selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
 			   tenxpress.o txc43128_phy.o falcon_boards.o \
-			   mcdi.o mcdi_phy.o mcdi_mon.o
+			   mcdi.o mcdi_phy.o mcdi_mon.o ptp.o
 sfc-$(CONFIG_SFC_MTD)	+= mtd.o
 sfc-$(CONFIG_SFC_SRIOV)	+= siena_sriov.o
-sfc-$(CONFIG_SFC_PTP)	+= ptp.o
 
 obj-$(CONFIG_SFC)	+= sfc.o
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 576a310..2487f58 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -868,9 +868,7 @@
 	struct work_struct peer_work;
 #endif
 
-#ifdef CONFIG_SFC_PTP
 	struct efx_ptp_data *ptp_data;
-#endif
 
 	/* The following fields may be written more often */
 
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 438cef1..7a9647a 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -252,7 +252,6 @@
 				     bool spoofchk);
 
 struct ethtool_ts_info;
-#ifdef CONFIG_SFC_PTP
 extern void efx_ptp_probe(struct efx_nic *efx);
 extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
 extern int efx_ptp_get_ts_info(struct net_device *net_dev,
@@ -260,31 +259,6 @@
 extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
 extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
 extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
-#else
-static inline void efx_ptp_probe(struct efx_nic *efx) {}
-static inline int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
-{
-	return -EOPNOTSUPP;
-}
-static inline int efx_ptp_get_ts_info(struct net_device *net_dev,
-				      struct ethtool_ts_info *ts_info)
-{
-	ts_info->so_timestamping = (SOF_TIMESTAMPING_SOFTWARE |
-				    SOF_TIMESTAMPING_RX_SOFTWARE);
-	ts_info->phc_index = -1;
-
-	return 0;
-}
-static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
-{
-	return false;
-}
-static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
-{
-	return NETDEV_TX_OK;
-}
-static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {}
-#endif
 
 extern const struct efx_nic_type falcon_a1_nic_type;
 extern const struct efx_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8d15f7a..990f574 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1400,16 +1400,6 @@
 
 	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
-	/*
-	 * Check that the address is valid.  If its not, refuse
-	 * to bring the device up.	 The user must specify an
-	 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
-	 */
-	if (!is_valid_ether_addr(dev->dev_addr)) {
-		PRINTK("%s: no valid ethernet hw addr\n", __func__);
-		return -EINVAL;
-	}
-
 	/* reset the hardware */
 	smc911x_reset(dev);
 
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 318adc9..f516e5a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1474,16 +1474,6 @@
 
 	DBG(2, "%s: %s\n", dev->name, __func__);
 
-	/*
-	 * Check that the address is valid.  If its not, refuse
-	 * to bring the device up.  The user must specify an
-	 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
-	 */
-	if (!is_valid_ether_addr(dev->dev_addr)) {
-		PRINTK("%s: no valid ethernet hw addr\n", __func__);
-		return -EINVAL;
-	}
-
 	/* Setup the default Register Modes */
 	lp->tcr_cur_mode = TCR_DEFAULT;
 	lp->rcr_cur_mode = RCR_DEFAULT;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index c53c0f4..cc02562 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1463,11 +1463,6 @@
 		return -EAGAIN;
 	}
 
-	if (!is_valid_ether_addr(dev->dev_addr)) {
-		SMSC_WARN(pdata, hw, "dev_addr is not a valid MAC address");
-		return -EADDRNOTAVAIL;
-	}
-
 	/* Reset the LAN911x */
 	if (smsc911x_soft_reset(pdata)) {
 		SMSC_WARN(pdata, hw, "soft reset failed");
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 9f44827..1164930 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -54,31 +54,6 @@
 	  By default, the DMA arbitration scheme is based on Round-robin
 	  (rx:tx priority is 1:1).
 
-config STMMAC_TIMER
-	bool "STMMAC Timer optimisation"
-	default n
-	depends on RTC_HCTOSYS_DEVICE
-	---help---
-	  Use an external timer for mitigating the number of network
-	  interrupts. Currently, for SH architectures, it is possible
-	  to use the TMU channel 2 and the SH-RTC device.
-
-choice
-        prompt "Select Timer device"
-        depends on STMMAC_TIMER
-
-config STMMAC_TMU_TIMER
-        bool "TMU channel 2"
-        depends on CPU_SH4
-	---help---
-
-config STMMAC_RTC_TIMER
-        bool "Real time clock"
-        depends on RTC_CLASS
-	---help---
-
-endchoice
-
 choice
 	prompt "Select the DMA TX/RX descriptor operating modes"
 	depends on STMMAC_ETH
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index bc965ac..c8e8ea6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,5 +1,4 @@
 obj-$(CONFIG_STMMAC_ETH) += stmmac.o
-stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
 stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
 stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
 stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 719be39..186d148 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -48,6 +48,10 @@
 #define CHIP_DBG(fmt, args...)  do { } while (0)
 #endif
 
+/* Synopsys Core versions */
+#define	DWMAC_CORE_3_40	0x34
+#define	DWMAC_CORE_3_50	0x35
+
 #undef FRAME_FILTER_DEBUG
 /* #define FRAME_FILTER_DEBUG */
 
@@ -81,7 +85,7 @@
 	unsigned long rx_missed_cntr;
 	unsigned long rx_overflow_cntr;
 	unsigned long rx_vlan;
-	/* Tx/Rx IRQ errors */
+	/* Tx/Rx IRQ error info */
 	unsigned long tx_undeflow_irq;
 	unsigned long tx_process_stopped_irq;
 	unsigned long tx_jabber_irq;
@@ -91,18 +95,23 @@
 	unsigned long rx_watchdog_irq;
 	unsigned long tx_early_irq;
 	unsigned long fatal_bus_error_irq;
-	/* Extra info */
+	/* Tx/Rx IRQ Events */
+	unsigned long rx_early_irq;
 	unsigned long threshold;
 	unsigned long tx_pkt_n;
 	unsigned long rx_pkt_n;
-	unsigned long poll_n;
-	unsigned long sched_timer_n;
 	unsigned long normal_irq_n;
+	unsigned long rx_normal_irq_n;
+	unsigned long napi_poll;
+	unsigned long tx_normal_irq_n;
+	unsigned long tx_clean;
+	unsigned long tx_reset_ic_bit;
+	unsigned long irq_receive_pmt_irq_n;
+	/* MMC info */
 	unsigned long mmc_tx_irq_n;
 	unsigned long mmc_rx_irq_n;
 	unsigned long mmc_rx_csum_offload_irq_n;
 	/* EEE */
-	unsigned long irq_receive_pmt_irq_n;
 	unsigned long irq_tx_path_in_lpi_mode_n;
 	unsigned long irq_tx_path_exit_lpi_mode_n;
 	unsigned long irq_rx_path_in_lpi_mode_n;
@@ -162,6 +171,15 @@
 #define DMA_HW_FEAT_ACTPHYIF	0x70000000 /* Active/selected PHY interface */
 #define DEFAULT_DMA_PBL		8
 
+/* Max/Min RI Watchdog Timer count value */
+#define MAX_DMA_RIWT		0xff
+#define MIN_DMA_RIWT		0x20
+/* Tx coalesce parameters */
+#define STMMAC_COAL_TX_TIMER	40000
+#define STMMAC_MAX_COAL_TX_TICK	100000
+#define STMMAC_TX_MAX_FRAMES	256
+#define STMMAC_TX_FRAMES	64
+
 enum rx_frame_status { /* IPC status */
 	good_frame = 0,
 	discard_frame = 1,
@@ -169,10 +187,11 @@
 	llc_snap = 4,
 };
 
-enum tx_dma_irq_status {
-	tx_hard_error = 1,
-	tx_hard_error_bump_tc = 2,
-	handle_tx_rx = 3,
+enum dma_irq_status {
+	tx_hard_error = 0x1,
+	tx_hard_error_bump_tc = 0x2,
+	handle_rx = 0x4,
+	handle_tx = 0x8,
 };
 
 enum core_specific_irq_mask {
@@ -296,6 +315,8 @@
 			      struct stmmac_extra_stats *x);
 	/* If supported then get the optional core features */
 	unsigned int (*get_hw_feature) (void __iomem *ioaddr);
+	/* Program the HW RX Watchdog */
+	void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
 };
 
 struct stmmac_ops {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 0e4cace..7ad56af 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -230,8 +230,5 @@
 #define GMAC_MMC_TX_INTR   0x108
 #define GMAC_MMC_RX_CSUM_OFFLOAD   0x208
 
-/* Synopsys Core versions */
-#define	DWMAC_CORE_3_40	0x34
-
 extern const struct stmmac_dma_ops dwmac1000_dma_ops;
 #endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 0335000..bf83c03 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -174,6 +174,11 @@
 	return readl(ioaddr + DMA_HW_FEATURE);
 }
 
+static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+{
+	writel(riwt, ioaddr + DMA_RX_WATCHDOG);
+}
+
 const struct stmmac_dma_ops dwmac1000_dma_ops = {
 	.init = dwmac1000_dma_init,
 	.dump_regs = dwmac1000_dump_dma_regs,
@@ -187,4 +192,5 @@
 	.stop_rx = dwmac_dma_stop_rx,
 	.dma_interrupt = dwmac_dma_interrupt,
 	.get_hw_feature = dwmac1000_get_hw_feature,
+	.rx_watchdog = dwmac1000_rx_watchdog,
 };
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index e49c9a0..807f303 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -35,7 +35,10 @@
 #define DMA_CONTROL		0x00001018	/* Ctrl (Operational Mode) */
 #define DMA_INTR_ENA		0x0000101c	/* Interrupt Enable */
 #define DMA_MISSED_FRAME_CTR	0x00001020	/* Missed Frame Counter */
-#define DMA_AXI_BUS_MODE       0x00001028      /* AXI Bus Mode */
+/* Rx watchdog register */
+#define DMA_RX_WATCHDOG		0x00001024
+/* AXI Bus Mode */
+#define DMA_AXI_BUS_MODE	0x00001028
 #define DMA_CUR_TX_BUF_ADDR	0x00001050	/* Current Host Tx Buffer */
 #define DMA_CUR_RX_BUF_ADDR	0x00001054	/* Current Host Rx Buffer */
 #define DMA_HW_FEATURE		0x00001058	/* HW Feature Register */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 4e0e18a..491d7e9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -204,16 +204,28 @@
 		}
 	}
 	/* TX/RX NORMAL interrupts */
-	if (intr_status & DMA_STATUS_NIS) {
+	if (likely(intr_status & DMA_STATUS_NIS)) {
 		x->normal_irq_n++;
-		if (likely((intr_status & DMA_STATUS_RI) ||
-			 (intr_status & (DMA_STATUS_TI))))
-				ret = handle_tx_rx;
+		if (likely(intr_status & DMA_STATUS_RI)) {
+			u32 value = readl(ioaddr + DMA_INTR_ENA);
+			/* to schedule NAPI on real RIE event. */
+			if (likely(value & DMA_INTR_ENA_RIE)) {
+				x->rx_normal_irq_n++;
+				ret |= handle_rx;
+			}
+		}
+		if (likely(intr_status & DMA_STATUS_TI)) {
+			x->tx_normal_irq_n++;
+			ret |= handle_tx;
+		}
+		if (unlikely(intr_status & DMA_STATUS_ERI))
+			x->rx_early_irq++;
 	}
 	/* Optional hardware blocks, interrupts should be disabled */
 	if (unlikely(intr_status &
 		     (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
 		pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+
 	/* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
 	writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 7d51a65..023a4fb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,16 +24,13 @@
 #define __STMMAC_H__
 
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
-#define DRV_MODULE_VERSION	"March_2012"
+#define DRV_MODULE_VERSION	"Nov_2012"
 
 #include <linux/clk.h>
 #include <linux/stmmac.h>
 #include <linux/phy.h>
 #include <linux/pci.h>
 #include "common.h"
-#ifdef CONFIG_STMMAC_TIMER
-#include "stmmac_timer.h"
-#endif
 
 struct stmmac_priv {
 	/* Frequently used values are kept adjacent for cache effect */
@@ -77,9 +74,6 @@
 	spinlock_t tx_lock;
 	int wolopts;
 	int wol_irq;
-#ifdef CONFIG_STMMAC_TIMER
-	struct stmmac_timer *tm;
-#endif
 	struct plat_stmmacenet_data *plat;
 	struct stmmac_counters mmc;
 	struct dma_features dma_cap;
@@ -93,6 +87,12 @@
 	int eee_enabled;
 	int eee_active;
 	int tx_lpi_timer;
+	struct timer_list txtimer;
+	u32 tx_count_frames;
+	u32 tx_coal_frames;
+	u32 tx_coal_timer;
+	int use_riwt;
+	u32 rx_riwt;
 };
 
 extern int phyaddr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 76fd61a..1372ce2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -76,7 +76,7 @@
 	STMMAC_STAT(rx_missed_cntr),
 	STMMAC_STAT(rx_overflow_cntr),
 	STMMAC_STAT(rx_vlan),
-	/* Tx/Rx IRQ errors */
+	/* Tx/Rx IRQ error info */
 	STMMAC_STAT(tx_undeflow_irq),
 	STMMAC_STAT(tx_process_stopped_irq),
 	STMMAC_STAT(tx_jabber_irq),
@@ -86,18 +86,23 @@
 	STMMAC_STAT(rx_watchdog_irq),
 	STMMAC_STAT(tx_early_irq),
 	STMMAC_STAT(fatal_bus_error_irq),
-	/* Extra info */
+	/* Tx/Rx IRQ Events */
+	STMMAC_STAT(rx_early_irq),
 	STMMAC_STAT(threshold),
 	STMMAC_STAT(tx_pkt_n),
 	STMMAC_STAT(rx_pkt_n),
-	STMMAC_STAT(poll_n),
-	STMMAC_STAT(sched_timer_n),
 	STMMAC_STAT(normal_irq_n),
-	STMMAC_STAT(normal_irq_n),
+	STMMAC_STAT(rx_normal_irq_n),
+	STMMAC_STAT(napi_poll),
+	STMMAC_STAT(tx_normal_irq_n),
+	STMMAC_STAT(tx_clean),
+	STMMAC_STAT(tx_reset_ic_bit),
+	STMMAC_STAT(irq_receive_pmt_irq_n),
+	/* MMC info */
 	STMMAC_STAT(mmc_tx_irq_n),
 	STMMAC_STAT(mmc_rx_irq_n),
 	STMMAC_STAT(mmc_rx_csum_offload_irq_n),
-	STMMAC_STAT(irq_receive_pmt_irq_n),
+	/* EEE */
 	STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
 	STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
 	STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
@@ -519,6 +524,87 @@
 	return phy_ethtool_set_eee(priv->phydev, edata);
 }
 
+static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
+{
+	unsigned long clk = clk_get_rate(priv->stmmac_clk);
+
+	if (!clk)
+		return 0;
+
+	return (usec * (clk / 1000000)) / 256;
+}
+
+static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
+{
+	unsigned long clk = clk_get_rate(priv->stmmac_clk);
+
+	if (!clk)
+		return 0;
+
+	return (riwt * 256) / (clk / 1000000);
+}
+
+static int stmmac_get_coalesce(struct net_device *dev,
+			       struct ethtool_coalesce *ec)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	ec->tx_coalesce_usecs = priv->tx_coal_timer;
+	ec->tx_max_coalesced_frames = priv->tx_coal_frames;
+
+	if (priv->use_riwt)
+		ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv);
+
+	return 0;
+}
+
+static int stmmac_set_coalesce(struct net_device *dev,
+			       struct ethtool_coalesce *ec)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	unsigned int rx_riwt;
+
+	/* Check not supported parameters  */
+	if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
+	    (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
+	    (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
+	    (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
+	    (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
+	    (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
+	    (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
+	    (ec->rx_max_coalesced_frames_high) ||
+	    (ec->tx_max_coalesced_frames_irq) ||
+	    (ec->stats_block_coalesce_usecs) ||
+	    (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
+		return -EOPNOTSUPP;
+
+	if (ec->rx_coalesce_usecs == 0)
+		return -EINVAL;
+
+	if ((ec->tx_coalesce_usecs == 0) &&
+	    (ec->tx_max_coalesced_frames == 0))
+		return -EINVAL;
+
+	if ((ec->tx_coalesce_usecs > STMMAC_COAL_TX_TIMER) ||
+	    (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
+		return -EINVAL;
+
+	rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
+
+	if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
+		return -EINVAL;
+	else if (!priv->use_riwt)
+		return -EOPNOTSUPP;
+
+	/* Only copy relevant parameters, ignore all others. */
+	priv->tx_coal_frames = ec->tx_max_coalesced_frames;
+	priv->tx_coal_timer = ec->tx_coalesce_usecs;
+	priv->rx_riwt = rx_riwt;
+	priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
+
+	return 0;
+}
+
 static const struct ethtool_ops stmmac_ethtool_ops = {
 	.begin = stmmac_check_if_running,
 	.get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -539,6 +625,8 @@
 	.set_eee = stmmac_ethtool_op_set_eee,
 	.get_sset_count	= stmmac_get_sset_count,
 	.get_ts_info = ethtool_op_get_ts_info,
+	.get_coalesce = stmmac_get_coalesce,
+	.set_coalesce = stmmac_set_coalesce,
 };
 
 void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c6cdbc4..542edbc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -115,16 +115,6 @@
 module_param(tc, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(tc, "DMA threshold control value");
 
-/* Pay attention to tune this parameter; take care of both
- * hardware capability and network stabitily/performance impact.
- * Many tests showed that ~4ms latency seems to be good enough. */
-#ifdef CONFIG_STMMAC_TIMER
-#define DEFAULT_PERIODIC_RATE	256
-static int tmrate = DEFAULT_PERIODIC_RATE;
-module_param(tmrate, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)");
-#endif
-
 #define DMA_BUFFER_SIZE	BUF_SIZE_2KiB
 static int buf_sz = DMA_BUFFER_SIZE;
 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
@@ -147,6 +137,8 @@
 static void stmmac_exit_fs(void);
 #endif
 
+#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
+
 /**
  * stmmac_verify_args - verify the driver parameters.
  * Description: it verifies if some wrong parameter is passed to the driver.
@@ -536,12 +528,6 @@
 	else
 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
 
-#ifdef CONFIG_STMMAC_TIMER
-	/* Disable interrupts on completion for the reception if timer is on */
-	if (likely(priv->tm->enable))
-		dis_ic = 1;
-#endif
-
 	DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
 	    txsize, rxsize, bfsize);
 
@@ -617,6 +603,8 @@
 	priv->dirty_tx = 0;
 	priv->cur_tx = 0;
 
+	if (priv->use_riwt)
+		dis_ic = 1;
 	/* Clear the Rx/Tx descriptors */
 	priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
 	priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
@@ -704,16 +692,18 @@
 }
 
 /**
- * stmmac_tx:
- * @priv: private driver structure
+ * stmmac_tx_clean:
+ * @priv: private data pointer
  * Description: it reclaims resources after transmission completes.
  */
-static void stmmac_tx(struct stmmac_priv *priv)
+static void stmmac_tx_clean(struct stmmac_priv *priv)
 {
 	unsigned int txsize = priv->dma_tx_size;
 
 	spin_lock(&priv->tx_lock);
 
+	priv->xstats.tx_clean++;
+
 	while (priv->dirty_tx != priv->cur_tx) {
 		int last;
 		unsigned int entry = priv->dirty_tx % txsize;
@@ -773,69 +763,16 @@
 	spin_unlock(&priv->tx_lock);
 }
 
-static inline void stmmac_enable_irq(struct stmmac_priv *priv)
+static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
 {
-#ifdef CONFIG_STMMAC_TIMER
-	if (likely(priv->tm->enable))
-		priv->tm->timer_start(tmrate);
-	else
-#endif
-		priv->hw->dma->enable_dma_irq(priv->ioaddr);
+	priv->hw->dma->enable_dma_irq(priv->ioaddr);
 }
 
-static inline void stmmac_disable_irq(struct stmmac_priv *priv)
+static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
 {
-#ifdef CONFIG_STMMAC_TIMER
-	if (likely(priv->tm->enable))
-		priv->tm->timer_stop();
-	else
-#endif
-		priv->hw->dma->disable_dma_irq(priv->ioaddr);
+	priv->hw->dma->disable_dma_irq(priv->ioaddr);
 }
 
-static int stmmac_has_work(struct stmmac_priv *priv)
-{
-	unsigned int has_work = 0;
-	int rxret, tx_work = 0;
-
-	rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
-		(priv->cur_rx % priv->dma_rx_size));
-
-	if (priv->dirty_tx != priv->cur_tx)
-		tx_work = 1;
-
-	if (likely(!rxret || tx_work))
-		has_work = 1;
-
-	return has_work;
-}
-
-static inline void _stmmac_schedule(struct stmmac_priv *priv)
-{
-	if (likely(stmmac_has_work(priv))) {
-		stmmac_disable_irq(priv);
-		napi_schedule(&priv->napi);
-	}
-}
-
-#ifdef CONFIG_STMMAC_TIMER
-void stmmac_schedule(struct net_device *dev)
-{
-	struct stmmac_priv *priv = netdev_priv(dev);
-
-	priv->xstats.sched_timer_n++;
-
-	_stmmac_schedule(priv);
-}
-
-static void stmmac_no_timer_started(unsigned int x)
-{;
-};
-
-static void stmmac_no_timer_stopped(void)
-{;
-};
-#endif
 
 /**
  * stmmac_tx_err:
@@ -858,16 +795,18 @@
 	netif_wake_queue(priv->dev);
 }
 
-
 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
 {
 	int status;
 
 	status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
-	if (likely(status == handle_tx_rx))
-		_stmmac_schedule(priv);
-
-	else if (unlikely(status == tx_hard_error_bump_tc)) {
+	if (likely((status & handle_rx)) || (status & handle_tx)) {
+		if (likely(napi_schedule_prep(&priv->napi))) {
+			stmmac_disable_dma_irq(priv);
+			__napi_schedule(&priv->napi);
+		}
+	}
+	if (unlikely(status & tx_hard_error_bump_tc)) {
 		/* Try to bump up the dma threshold on this failure */
 		if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
 			tc += 64;
@@ -983,7 +922,6 @@
 		/* Alternate (enhanced) DESC mode*/
 		priv->dma_cap.enh_desc =
 			(hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
-
 	}
 
 	return hw_cap;
@@ -1025,6 +963,38 @@
 }
 
 /**
+ * stmmac_tx_timer:
+ * @data: data pointer
+ * Description:
+ * This is the timer handler to directly invoke the stmmac_tx_clean.
+ */
+static void stmmac_tx_timer(unsigned long data)
+{
+	struct stmmac_priv *priv = (struct stmmac_priv *)data;
+
+	stmmac_tx_clean(priv);
+}
+
+/**
+ * stmmac_tx_timer:
+ * @priv: private data structure
+ * Description:
+ * This inits the transmit coalesce parameters: i.e. timer rate,
+ * timer handler and default threshold used for enabling the
+ * interrupt on completion bit.
+ */
+static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
+{
+	priv->tx_coal_frames = STMMAC_TX_FRAMES;
+	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
+	init_timer(&priv->txtimer);
+	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
+	priv->txtimer.data = (unsigned long)priv;
+	priv->txtimer.function = stmmac_tx_timer;
+	add_timer(&priv->txtimer);
+}
+
+/**
  *  stmmac_open - open entry point of the driver
  *  @dev : pointer to the device structure.
  *  Description:
@@ -1038,23 +1008,6 @@
 	struct stmmac_priv *priv = netdev_priv(dev);
 	int ret;
 
-#ifdef CONFIG_STMMAC_TIMER
-	priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
-	if (unlikely(priv->tm == NULL))
-		return -ENOMEM;
-
-	priv->tm->freq = tmrate;
-
-	/* Test if the external timer can be actually used.
-	 * In case of failure continue without timer. */
-	if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
-		pr_warning("stmmaceth: cannot attach the external timer.\n");
-		priv->tm->freq = 0;
-		priv->tm->timer_start = stmmac_no_timer_started;
-		priv->tm->timer_stop = stmmac_no_timer_stopped;
-	} else
-		priv->tm->enable = 1;
-#endif
 	clk_prepare_enable(priv->stmmac_clk);
 
 	stmmac_check_ether_addr(priv);
@@ -1141,10 +1094,6 @@
 	priv->hw->dma->start_tx(priv->ioaddr);
 	priv->hw->dma->start_rx(priv->ioaddr);
 
-#ifdef CONFIG_STMMAC_TIMER
-	priv->tm->timer_start(tmrate);
-#endif
-
 	/* Dump DMA/MAC registers */
 	if (netif_msg_hw(priv)) {
 		priv->hw->mac->dump_regs(priv->ioaddr);
@@ -1157,6 +1106,13 @@
 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
 	priv->eee_enabled = stmmac_eee_init(priv);
 
+	stmmac_init_tx_coalesce(priv);
+
+	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
+		priv->rx_riwt = MAX_DMA_RIWT;
+		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
+	}
+
 	napi_enable(&priv->napi);
 	netif_start_queue(dev);
 
@@ -1170,9 +1126,6 @@
 	free_irq(dev->irq, dev);
 
 open_error:
-#ifdef CONFIG_STMMAC_TIMER
-	kfree(priv->tm);
-#endif
 	if (priv->phydev)
 		phy_disconnect(priv->phydev);
 
@@ -1203,14 +1156,10 @@
 
 	netif_stop_queue(dev);
 
-#ifdef CONFIG_STMMAC_TIMER
-	/* Stop and release the timer */
-	stmmac_close_ext_timer();
-	if (priv->tm != NULL)
-		kfree(priv->tm);
-#endif
 	napi_disable(&priv->napi);
 
+	del_timer_sync(&priv->txtimer);
+
 	/* Free the IRQ lines */
 	free_irq(dev->irq, dev);
 	if (priv->wol_irq != dev->irq)
@@ -1273,11 +1222,13 @@
 
 #ifdef STMMAC_XMIT_DEBUG
 	if ((skb->len > ETH_FRAME_LEN) || nfrags)
-		pr_info("stmmac xmit:\n"
-		       "\tskb addr %p - len: %d - nopaged_len: %d\n"
-		       "\tn_frags: %d - ip_summed: %d - %s gso\n",
-		       skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
-		       !skb_is_gso(skb) ? "isn't" : "is");
+		pr_debug("stmmac xmit: [entry %d]\n"
+			 "\tskb addr %p - len: %d - nopaged_len: %d\n"
+			 "\tn_frags: %d - ip_summed: %d - %s gso\n"
+			 "\ttx_count_frames %d\n", entry,
+			 skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
+			 !skb_is_gso(skb) ? "isn't" : "is",
+			 priv->tx_count_frames);
 #endif
 
 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
@@ -1287,9 +1238,9 @@
 
 #ifdef STMMAC_XMIT_DEBUG
 	if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
-		pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
-		       "\t\tn_frags: %d, ip_summed: %d\n",
-		       skb->len, nopaged_len, nfrags, skb->ip_summed);
+		pr_debug("\tskb len: %d, nopaged_len: %d,\n"
+			 "\t\tn_frags: %d, ip_summed: %d\n",
+			 skb->len, nopaged_len, nfrags, skb->ip_summed);
 #endif
 	priv->tx_skbuff[entry] = skb;
 
@@ -1320,16 +1271,24 @@
 		wmb();
 	}
 
-	/* Interrupt on completition only for the latest segment */
+	/* Finalize the latest segment. */
 	priv->hw->desc->close_tx_desc(desc);
 
-#ifdef CONFIG_STMMAC_TIMER
-	/* Clean IC while using timer */
-	if (likely(priv->tm->enable))
-		priv->hw->desc->clear_tx_ic(desc);
-#endif
-
 	wmb();
+	/* According to the coalesce parameter the IC bit for the latest
+	 * segment could be reset and the timer re-started to invoke the
+	 * stmmac_tx function. This approach takes care about the fragments.
+	 */
+	priv->tx_count_frames += nfrags + 1;
+	if (priv->tx_coal_frames > priv->tx_count_frames) {
+		priv->hw->desc->clear_tx_ic(desc);
+		priv->xstats.tx_reset_ic_bit++;
+		TX_DBG("\t[entry %d]: tx_count_frames %d\n", entry,
+		       priv->tx_count_frames);
+		mod_timer(&priv->txtimer,
+			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
+	} else
+		priv->tx_count_frames = 0;
 
 	/* To avoid raise condition */
 	priv->hw->desc->set_tx_owner(first);
@@ -1471,14 +1430,12 @@
 #endif
 			skb->protocol = eth_type_trans(skb, priv->dev);
 
-			if (unlikely(!priv->plat->rx_coe)) {
-				/* No RX COE for old mac10/100 devices */
+			if (unlikely(!priv->plat->rx_coe))
 				skb_checksum_none_assert(skb);
-				netif_receive_skb(skb);
-			} else {
+			else
 				skb->ip_summed = CHECKSUM_UNNECESSARY;
-				napi_gro_receive(&priv->napi, skb);
-			}
+
+			napi_gro_receive(&priv->napi, skb);
 
 			priv->dev->stats.rx_packets++;
 			priv->dev->stats.rx_bytes += frame_len;
@@ -1500,21 +1457,20 @@
  *  @budget : maximum number of packets that the current CPU can receive from
  *	      all interfaces.
  *  Description :
- *   This function implements the the reception process.
- *   Also it runs the TX completion thread
+ *  To look at the incoming frames and clear the tx resources.
  */
 static int stmmac_poll(struct napi_struct *napi, int budget)
 {
 	struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
 	int work_done = 0;
 
-	priv->xstats.poll_n++;
-	stmmac_tx(priv);
-	work_done = stmmac_rx(priv, budget);
+	priv->xstats.napi_poll++;
+	stmmac_tx_clean(priv);
 
+	work_done = stmmac_rx(priv, budget);
 	if (work_done < budget) {
 		napi_complete(napi);
-		stmmac_enable_irq(priv);
+		stmmac_enable_dma_irq(priv);
 	}
 	return work_done;
 }
@@ -1523,7 +1479,7 @@
  *  stmmac_tx_timeout
  *  @dev : Pointer to net device structure
  *  Description: this function is called when a packet transmission fails to
- *   complete within a reasonable tmrate. The driver will mark the error in the
+ *   complete within a reasonable time. The driver will mark the error in the
  *   netdev structure and arrange for the device to be reset to a sane state
  *   in order to transmit a new packet.
  */
@@ -2050,6 +2006,16 @@
 	if (flow_ctrl)
 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
 
+	/* Rx Watchdog is available in the COREs newer than the 3.40.
+	 * In some case, for example on bugged HW this feature
+	 * has to be disable and this can be done by passing the
+	 * riwt_off field from the platform.
+	 */
+	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
+		priv->use_riwt = 1;
+		pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
+	}
+
 	netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
 
 	spin_lock_init(&priv->lock);
@@ -2141,11 +2107,9 @@
 	netif_device_detach(ndev);
 	netif_stop_queue(ndev);
 
-#ifdef CONFIG_STMMAC_TIMER
-	priv->tm->timer_stop();
-	if (likely(priv->tm->enable))
+	if (priv->use_riwt)
 		dis_ic = 1;
-#endif
+
 	napi_disable(&priv->napi);
 
 	/* Stop TX/RX DMA */
@@ -2196,10 +2160,6 @@
 	priv->hw->dma->start_tx(priv->ioaddr);
 	priv->hw->dma->start_rx(priv->ioaddr);
 
-#ifdef CONFIG_STMMAC_TIMER
-	if (likely(priv->tm->enable))
-		priv->tm->timer_start(tmrate);
-#endif
 	napi_enable(&priv->napi);
 
 	netif_start_queue(ndev);
@@ -2295,11 +2255,6 @@
 		} else if (!strncmp(opt, "eee_timer:", 6)) {
 			if (kstrtoint(opt + 10, 0, &eee_timer))
 				goto err;
-#ifdef CONFIG_STMMAC_TIMER
-		} else if (!strncmp(opt, "tmrate:", 7)) {
-			if (kstrtoint(opt + 7, 0, &tmrate))
-				goto err;
-#endif
 		}
 	}
 	return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c
deleted file mode 100644
index 4ccd4e2..0000000
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*******************************************************************************
-  STMMAC external timer support.
-
-  Copyright (C) 2007-2009  STMicroelectronics Ltd
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
-*******************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/etherdevice.h>
-#include "stmmac_timer.h"
-
-static void stmmac_timer_handler(void *data)
-{
-	struct net_device *dev = (struct net_device *)data;
-
-	stmmac_schedule(dev);
-}
-
-#define STMMAC_TIMER_MSG(timer, freq) \
-printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
-
-#if defined(CONFIG_STMMAC_RTC_TIMER)
-#include <linux/rtc.h>
-static struct rtc_device *stmmac_rtc;
-static rtc_task_t stmmac_task;
-
-static void stmmac_rtc_start(unsigned int new_freq)
-{
-	rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
-	rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
-}
-
-static void stmmac_rtc_stop(void)
-{
-	rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
-}
-
-int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
-{
-	stmmac_task.private_data = dev;
-	stmmac_task.func = stmmac_timer_handler;
-
-	stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
-	if (stmmac_rtc == NULL) {
-		pr_err("open rtc device failed\n");
-		return -ENODEV;
-	}
-
-	rtc_irq_register(stmmac_rtc, &stmmac_task);
-
-	/* Periodic mode is not supported */
-	if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
-		pr_err("set periodic failed\n");
-		rtc_irq_unregister(stmmac_rtc, &stmmac_task);
-		rtc_class_close(stmmac_rtc);
-		return -1;
-	}
-
-	STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
-
-	tm->timer_start = stmmac_rtc_start;
-	tm->timer_stop = stmmac_rtc_stop;
-
-	return 0;
-}
-
-int stmmac_close_ext_timer(void)
-{
-	rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
-	rtc_irq_unregister(stmmac_rtc, &stmmac_task);
-	rtc_class_close(stmmac_rtc);
-	return 0;
-}
-
-#elif defined(CONFIG_STMMAC_TMU_TIMER)
-#include <linux/clk.h>
-#define TMU_CHANNEL "tmu2_clk"
-static struct clk *timer_clock;
-
-static void stmmac_tmu_start(unsigned int new_freq)
-{
-	clk_set_rate(timer_clock, new_freq);
-	clk_prepare_enable(timer_clock);
-}
-
-static void stmmac_tmu_stop(void)
-{
-	clk_disable_unprepare(timer_clock);
-}
-
-int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
-{
-	timer_clock = clk_get(NULL, TMU_CHANNEL);
-
-	if (IS_ERR(timer_clock))
-		return -1;
-
-	if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
-		timer_clock = NULL;
-		return -1;
-	}
-
-	STMMAC_TIMER_MSG("TMU2", tm->freq);
-	tm->timer_start = stmmac_tmu_start;
-	tm->timer_stop = stmmac_tmu_stop;
-
-	return 0;
-}
-
-int stmmac_close_ext_timer(void)
-{
-	clk_disable_unprepare(timer_clock);
-	tmu2_unregister_user();
-	clk_put(timer_clock);
-	return 0;
-}
-#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
deleted file mode 100644
index aea9b14..0000000
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*******************************************************************************
-  STMMAC external timer Header File.
-
-  Copyright (C) 2007-2009  STMicroelectronics Ltd
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
-*******************************************************************************/
-#ifndef __STMMAC_TIMER_H__
-#define __STMMAC_TIMER_H__
-
-struct stmmac_timer {
-	void (*timer_start) (unsigned int new_freq);
-	void (*timer_stop) (void);
-	unsigned int freq;
-	unsigned int enable;
-};
-
-/* Open the HW timer device and return 0 in case of success */
-int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm);
-/* Stop the timer and release it */
-int stmmac_close_ext_timer(void);
-/* Function used for scheduling task within the stmmac */
-void stmmac_schedule(struct net_device *dev);
-
-#if defined(CONFIG_STMMAC_TMU_TIMER)
-extern int tmu2_register_user(void *fnt, void *data);
-extern void tmu2_unregister_user(void);
-#endif
-
-#endif /* __STMMAC_TIMER_H__ */
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 2c41894..4426151 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -60,6 +60,15 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called cpsw.
 
+config TI_CPTS
+	boolean "TI Common Platform Time Sync (CPTS) Support"
+	depends on TI_CPSW
+	select PTP_1588_CLOCK
+	---help---
+	  This driver supports the Common Platform Time Sync unit of
+	  the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
+	  and Layer 2 packets, and the driver offers a PTP Hardware Clock.
+
 config TLAN
 	tristate "TI ThunderLAN support"
 	depends on (PCI || EISA)
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 91bd8bb..c65148e 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -8,4 +8,4 @@
 obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
 obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
 obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
-ti_cpsw-y := cpsw_ale.o cpsw.o
+ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index df55e240..c9714e1 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -24,6 +24,7 @@
 #include <linux/if_ether.h>
 #include <linux/etherdevice.h>
 #include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
 #include <linux/phy.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
@@ -35,6 +36,7 @@
 #include <linux/platform_data/cpsw.h>
 
 #include "cpsw_ale.h"
+#include "cpts.h"
 #include "davinci_cpdma.h"
 
 #define CPSW_DEBUG	(NETIF_MSG_HW		| NETIF_MSG_WOL		| \
@@ -70,10 +72,37 @@
 		dev_notice(priv->dev, format, ## __VA_ARGS__);	\
 } while (0)
 
+#define ALE_ALL_PORTS		0x7
+
 #define CPSW_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
 #define CPSW_MINOR_VERSION(reg)		(reg & 0xff)
 #define CPSW_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
 
+#define CPSW_VERSION_1		0x19010a
+#define CPSW_VERSION_2		0x19010c
+
+#define HOST_PORT_NUM		0
+#define SLIVER_SIZE		0x40
+
+#define CPSW1_HOST_PORT_OFFSET	0x028
+#define CPSW1_SLAVE_OFFSET	0x050
+#define CPSW1_SLAVE_SIZE	0x040
+#define CPSW1_CPDMA_OFFSET	0x100
+#define CPSW1_STATERAM_OFFSET	0x200
+#define CPSW1_CPTS_OFFSET	0x500
+#define CPSW1_ALE_OFFSET	0x600
+#define CPSW1_SLIVER_OFFSET	0x700
+
+#define CPSW2_HOST_PORT_OFFSET	0x108
+#define CPSW2_SLAVE_OFFSET	0x200
+#define CPSW2_SLAVE_SIZE	0x100
+#define CPSW2_CPDMA_OFFSET	0x800
+#define CPSW2_STATERAM_OFFSET	0xa00
+#define CPSW2_CPTS_OFFSET	0xc00
+#define CPSW2_ALE_OFFSET	0xd00
+#define CPSW2_SLIVER_OFFSET	0xd80
+#define CPSW2_BD_OFFSET		0x2000
+
 #define CPDMA_RXTHRESH		0x0c0
 #define CPDMA_RXFREE		0x0e0
 #define CPDMA_TXHDP		0x00
@@ -81,21 +110,6 @@
 #define CPDMA_TXCP		0x40
 #define CPDMA_RXCP		0x60
 
-#define cpsw_dma_regs(base, offset)		\
-	(void __iomem *)((base) + (offset))
-#define cpsw_dma_rxthresh(base, offset)		\
-	(void __iomem *)((base) + (offset) + CPDMA_RXTHRESH)
-#define cpsw_dma_rxfree(base, offset)		\
-	(void __iomem *)((base) + (offset) + CPDMA_RXFREE)
-#define cpsw_dma_txhdp(base, offset)		\
-	(void __iomem *)((base) + (offset) + CPDMA_TXHDP)
-#define cpsw_dma_rxhdp(base, offset)		\
-	(void __iomem *)((base) + (offset) + CPDMA_RXHDP)
-#define cpsw_dma_txcp(base, offset)		\
-	(void __iomem *)((base) + (offset) + CPDMA_TXCP)
-#define cpsw_dma_rxcp(base, offset)		\
-	(void __iomem *)((base) + (offset) + CPDMA_RXCP)
-
 #define CPSW_POLL_WEIGHT	64
 #define CPSW_MIN_PACKET_SIZE	60
 #define CPSW_MAX_PACKET_SIZE	(1500 + 14 + 4 + 4)
@@ -129,7 +143,7 @@
 module_param(rx_packet_max, int, 0);
 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
 
-struct cpsw_ss_regs {
+struct cpsw_wr_regs {
 	u32	id_ver;
 	u32	soft_reset;
 	u32	control;
@@ -140,26 +154,98 @@
 	u32	misc_en;
 };
 
-struct cpsw_regs {
+struct cpsw_ss_regs {
 	u32	id_ver;
 	u32	control;
 	u32	soft_reset;
 	u32	stat_port_en;
 	u32	ptype;
+	u32	soft_idle;
+	u32	thru_rate;
+	u32	gap_thresh;
+	u32	tx_start_wds;
+	u32	flow_control;
+	u32	vlan_ltype;
+	u32	ts_ltype;
+	u32	dlr_ltype;
 };
 
-struct cpsw_slave_regs {
-	u32	max_blks;
-	u32	blk_cnt;
-	u32	flow_thresh;
-	u32	port_vlan;
-	u32	tx_pri_map;
-	u32	ts_ctl;
-	u32	ts_seq_ltype;
-	u32	ts_vlan;
-	u32	sa_lo;
-	u32	sa_hi;
-};
+/* CPSW_PORT_V1 */
+#define CPSW1_MAX_BLKS      0x00 /* Maximum FIFO Blocks */
+#define CPSW1_BLK_CNT       0x04 /* FIFO Block Usage Count (Read Only) */
+#define CPSW1_TX_IN_CTL     0x08 /* Transmit FIFO Control */
+#define CPSW1_PORT_VLAN     0x0c /* VLAN Register */
+#define CPSW1_TX_PRI_MAP    0x10 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW1_TS_CTL        0x14 /* Time Sync Control */
+#define CPSW1_TS_SEQ_LTYPE  0x18 /* Time Sync Sequence ID Offset and Msg Type */
+#define CPSW1_TS_VLAN       0x1c /* Time Sync VLAN1 and VLAN2 */
+
+/* CPSW_PORT_V2 */
+#define CPSW2_CONTROL       0x00 /* Control Register */
+#define CPSW2_MAX_BLKS      0x08 /* Maximum FIFO Blocks */
+#define CPSW2_BLK_CNT       0x0c /* FIFO Block Usage Count (Read Only) */
+#define CPSW2_TX_IN_CTL     0x10 /* Transmit FIFO Control */
+#define CPSW2_PORT_VLAN     0x14 /* VLAN Register */
+#define CPSW2_TX_PRI_MAP    0x18 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW2_TS_SEQ_MTYPE  0x1c /* Time Sync Sequence ID Offset and Msg Type */
+
+/* CPSW_PORT_V1 and V2 */
+#define SA_LO               0x20 /* CPGMAC_SL Source Address Low */
+#define SA_HI               0x24 /* CPGMAC_SL Source Address High */
+#define SEND_PERCENT        0x28 /* Transmit Queue Send Percentages */
+
+/* CPSW_PORT_V2 only */
+#define RX_DSCP_PRI_MAP0    0x30 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP1    0x34 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP2    0x38 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP3    0x3c /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP4    0x40 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP5    0x44 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP6    0x48 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP7    0x4c /* Rx DSCP Priority to Rx Packet Mapping */
+
+/* Bit definitions for the CPSW2_CONTROL register */
+#define PASS_PRI_TAGGED     (1<<24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN      (1<<21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN      (1<<20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN         (1<<16) /* DSCP Priority Enable */
+#define TS_320              (1<<14) /* Time Sync Dest Port 320 enable */
+#define TS_319              (1<<13) /* Time Sync Dest Port 319 enable */
+#define TS_132              (1<<12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131              (1<<11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130              (1<<10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129              (1<<9)  /* Time Sync Dest IP Addr 129 enable */
+#define TS_BIT8             (1<<8)  /* ts_ttl_nonzero? */
+#define TS_ANNEX_D_EN       (1<<4)  /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN        (1<<3)  /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN        (1<<2)  /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN            (1<<1)  /* Time Sync Transmit Enable */
+#define TS_RX_EN            (1<<0)  /* Time Sync Receive Enable */
+
+#define CTRL_TS_BITS \
+	(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
+	 TS_ANNEX_D_EN | TS_LTYPE1_EN)
+
+#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_TX_TS_BITS  (CTRL_TS_BITS | TS_TX_EN)
+#define CTRL_RX_TS_BITS  (CTRL_TS_BITS | TS_RX_EN)
+
+/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
+#define TS_SEQ_ID_OFFSET_SHIFT   (16)    /* Time Sync Sequence ID Offset */
+#define TS_SEQ_ID_OFFSET_MASK    (0x3f)
+#define TS_MSG_TYPE_EN_SHIFT     (0)     /* Time Sync Message Type Enable */
+#define TS_MSG_TYPE_EN_MASK      (0xffff)
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
+
+/* Bit definitions for the CPSW1_TS_CTL register */
+#define CPSW_V1_TS_RX_EN		BIT(0)
+#define CPSW_V1_TS_TX_EN		BIT(4)
+#define CPSW_V1_MSG_TYPE_OFS		16
+
+/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
+#define CPSW_V1_SEQ_ID_OFS_SHIFT	16
 
 struct cpsw_host_regs {
 	u32	max_blks;
@@ -185,7 +271,7 @@
 };
 
 struct cpsw_slave {
-	struct cpsw_slave_regs __iomem	*regs;
+	void __iomem			*regs;
 	struct cpsw_sliver_regs __iomem	*sliver;
 	int				slave_num;
 	u32				mac_control;
@@ -193,19 +279,30 @@
 	struct phy_device		*phy;
 };
 
+static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
+{
+	return __raw_readl(slave->regs + offset);
+}
+
+static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
+{
+	__raw_writel(val, slave->regs + offset);
+}
+
 struct cpsw_priv {
 	spinlock_t			lock;
 	struct platform_device		*pdev;
 	struct net_device		*ndev;
 	struct resource			*cpsw_res;
-	struct resource			*cpsw_ss_res;
+	struct resource			*cpsw_wr_res;
 	struct napi_struct		napi;
 	struct device			*dev;
 	struct cpsw_platform_data	data;
-	struct cpsw_regs __iomem	*regs;
-	struct cpsw_ss_regs __iomem	*ss_regs;
+	struct cpsw_ss_regs __iomem	*regs;
+	struct cpsw_wr_regs __iomem	*wr_regs;
 	struct cpsw_host_regs __iomem	*host_port_regs;
 	u32				msg_enable;
+	u32				version;
 	struct net_device_stats		stats;
 	int				rx_packet_max;
 	int				host_port;
@@ -218,6 +315,7 @@
 	/* snapshot of IRQ numbers */
 	u32 irqs_table[4];
 	u32 num_irqs;
+	struct cpts cpts;
 };
 
 #define napi_to_priv(napi)	container_of(napi, struct cpsw_priv, napi)
@@ -228,10 +326,34 @@
 			(func)((priv)->slaves + idx, ##arg);	\
 	} while (0)
 
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+
+	if (ndev->flags & IFF_PROMISC) {
+		/* Enable promiscuous mode */
+		dev_err(priv->dev, "Ignoring Promiscuous mode\n");
+		return;
+	}
+
+	/* Clear all mcast from ALE */
+	cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
+
+	if (!netdev_mc_empty(ndev)) {
+		struct netdev_hw_addr *ha;
+
+		/* program multicast address list into ALE register */
+		netdev_for_each_mc_addr(ha, ndev) {
+			cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
+				ALE_ALL_PORTS << priv->host_port, 0, 0);
+		}
+	}
+}
+
 static void cpsw_intr_enable(struct cpsw_priv *priv)
 {
-	__raw_writel(0xFF, &priv->ss_regs->tx_en);
-	__raw_writel(0xFF, &priv->ss_regs->rx_en);
+	__raw_writel(0xFF, &priv->wr_regs->tx_en);
+	__raw_writel(0xFF, &priv->wr_regs->rx_en);
 
 	cpdma_ctlr_int_ctrl(priv->dma, true);
 	return;
@@ -239,8 +361,8 @@
 
 static void cpsw_intr_disable(struct cpsw_priv *priv)
 {
-	__raw_writel(0, &priv->ss_regs->tx_en);
-	__raw_writel(0, &priv->ss_regs->rx_en);
+	__raw_writel(0, &priv->wr_regs->tx_en);
+	__raw_writel(0, &priv->wr_regs->rx_en);
 
 	cpdma_ctlr_int_ctrl(priv->dma, false);
 	return;
@@ -254,6 +376,7 @@
 
 	if (unlikely(netif_queue_stopped(ndev)))
 		netif_start_queue(ndev);
+	cpts_tx_timestamp(&priv->cpts, skb);
 	priv->stats.tx_packets++;
 	priv->stats.tx_bytes += len;
 	dev_kfree_skb_any(skb);
@@ -274,6 +397,7 @@
 	}
 	if (likely(status >= 0)) {
 		skb_put(skb, len);
+		cpts_rx_timestamp(&priv->cpts, skb);
 		skb->protocol = eth_type_trans(skb, ndev);
 		netif_receive_skb(skb);
 		priv->stats.rx_bytes += len;
@@ -359,8 +483,8 @@
 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
 			       struct cpsw_priv *priv)
 {
-	__raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi);
-	__raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo);
+	slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
+	slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
 }
 
 static void _cpsw_adjust_link(struct cpsw_slave *slave,
@@ -446,7 +570,15 @@
 
 	/* setup priority mapping */
 	__raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
-	__raw_writel(TX_PRIORITY_MAPPING, &slave->regs->tx_pri_map);
+
+	switch (priv->version) {
+	case CPSW_VERSION_1:
+		slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+		break;
+	case CPSW_VERSION_2:
+		slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+		break;
+	}
 
 	/* setup max packet size, and mac address */
 	__raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
@@ -505,7 +637,7 @@
 
 	pm_runtime_get_sync(&priv->pdev->dev);
 
-	reg = __raw_readl(&priv->regs->id_ver);
+	reg = priv->version;
 
 	dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
 		 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
@@ -566,12 +698,12 @@
 	struct cpsw_priv *priv = netdev_priv(ndev);
 
 	cpsw_info(priv, ifdown, "shutting down cpsw device\n");
-	cpsw_intr_disable(priv);
-	cpdma_ctlr_int_ctrl(priv->dma, false);
-	cpdma_ctlr_stop(priv->dma);
 	netif_stop_queue(priv->ndev);
 	napi_disable(&priv->napi);
 	netif_carrier_off(priv->ndev);
+	cpsw_intr_disable(priv);
+	cpdma_ctlr_int_ctrl(priv->dma, false);
+	cpdma_ctlr_stop(priv->dma);
 	cpsw_ale_stop(priv->ale);
 	for_each_slave(priv, cpsw_slave_stop, priv);
 	pm_runtime_put_sync(&priv->pdev->dev);
@@ -592,6 +724,11 @@
 		return NETDEV_TX_OK;
 	}
 
+	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable)
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+	skb_tx_timestamp(skb);
+
 	ret = cpdma_chan_submit(priv->txch, skb, skb->data,
 				skb->len, GFP_KERNEL);
 	if (unlikely(ret != 0)) {
@@ -629,6 +766,129 @@
 		dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
 }
 
+#ifdef CONFIG_TI_CPTS
+
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+{
+	struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+	u32 ts_en, seq_id;
+
+	if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) {
+		slave_write(slave, 0, CPSW1_TS_CTL);
+		return;
+	}
+
+	seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+	ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
+
+	if (priv->cpts.tx_enable)
+		ts_en |= CPSW_V1_TS_TX_EN;
+
+	if (priv->cpts.rx_enable)
+		ts_en |= CPSW_V1_TS_RX_EN;
+
+	slave_write(slave, ts_en, CPSW1_TS_CTL);
+	slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
+}
+
+static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
+{
+	struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+	u32 ctrl, mtype;
+
+	ctrl = slave_read(slave, CPSW2_CONTROL);
+	ctrl &= ~CTRL_ALL_TS_MASK;
+
+	if (priv->cpts.tx_enable)
+		ctrl |= CTRL_TX_TS_BITS;
+
+	if (priv->cpts.rx_enable)
+		ctrl |= CTRL_RX_TS_BITS;
+
+	mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
+
+	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
+	slave_write(slave, ctrl, CPSW2_CONTROL);
+	__raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
+}
+
+static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+	struct cpsw_priv *priv = netdev_priv(dev);
+	struct cpts *cpts = &priv->cpts;
+	struct hwtstamp_config cfg;
+
+	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+		return -EFAULT;
+
+	/* reserved for future extensions */
+	if (cfg.flags)
+		return -EINVAL;
+
+	switch (cfg.tx_type) {
+	case HWTSTAMP_TX_OFF:
+		cpts->tx_enable = 0;
+		break;
+	case HWTSTAMP_TX_ON:
+		cpts->tx_enable = 1;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (cfg.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		cpts->rx_enable = 0;
+		break;
+	case HWTSTAMP_FILTER_ALL:
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+		return -ERANGE;
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		cpts->rx_enable = 1;
+		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (priv->version) {
+	case CPSW_VERSION_1:
+		cpsw_hwtstamp_v1(priv);
+		break;
+	case CPSW_VERSION_2:
+		cpsw_hwtstamp_v2(priv);
+		break;
+	default:
+		return -ENOTSUPP;
+	}
+
+	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+#endif /*CONFIG_TI_CPTS*/
+
+static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+	if (!netif_running(dev))
+		return -EINVAL;
+
+#ifdef CONFIG_TI_CPTS
+	if (cmd == SIOCSHWTSTAMP)
+		return cpsw_hwtstamp_ioctl(dev, req);
+#endif
+	return -ENOTSUPP;
+}
+
 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
 {
 	struct cpsw_priv *priv = netdev_priv(ndev);
@@ -669,10 +929,12 @@
 	.ndo_stop		= cpsw_ndo_stop,
 	.ndo_start_xmit		= cpsw_ndo_start_xmit,
 	.ndo_change_rx_flags	= cpsw_ndo_change_rx_flags,
+	.ndo_do_ioctl		= cpsw_ndo_ioctl,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 	.ndo_tx_timeout		= cpsw_ndo_tx_timeout,
 	.ndo_get_stats		= cpsw_ndo_get_stats,
+	.ndo_set_rx_mode	= cpsw_ndo_set_rx_mode,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= cpsw_ndo_poll_controller,
 #endif
@@ -699,22 +961,56 @@
 	priv->msg_enable = value;
 }
 
+static int cpsw_get_ts_info(struct net_device *ndev,
+			    struct ethtool_ts_info *info)
+{
+#ifdef CONFIG_TI_CPTS
+	struct cpsw_priv *priv = netdev_priv(ndev);
+
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE |
+		SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->phc_index = priv->cpts.phc_index;
+	info->tx_types =
+		(1 << HWTSTAMP_TX_OFF) |
+		(1 << HWTSTAMP_TX_ON);
+	info->rx_filters =
+		(1 << HWTSTAMP_FILTER_NONE) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+#else
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE;
+	info->phc_index = -1;
+	info->tx_types = 0;
+	info->rx_filters = 0;
+#endif
+	return 0;
+}
+
 static const struct ethtool_ops cpsw_ethtool_ops = {
 	.get_drvinfo	= cpsw_get_drvinfo,
 	.get_msglevel	= cpsw_get_msglevel,
 	.set_msglevel	= cpsw_set_msglevel,
 	.get_link	= ethtool_op_get_link,
+	.get_ts_info	= cpsw_get_ts_info,
 };
 
-static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
+static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
+			    u32 slave_reg_ofs, u32 sliver_reg_ofs)
 {
 	void __iomem		*regs = priv->regs;
 	int			slave_num = slave->slave_num;
 	struct cpsw_slave_data	*data = priv->data.slave_data + slave_num;
 
 	slave->data	= data;
-	slave->regs	= regs + data->slave_reg_ofs;
-	slave->sliver	= regs + data->sliver_reg_ofs;
+	slave->regs	= regs + slave_reg_ofs;
+	slave->sliver	= regs + sliver_reg_ofs;
 }
 
 static int cpsw_probe_dt(struct cpsw_platform_data *data,
@@ -734,6 +1030,27 @@
 	}
 	data->slaves = prop;
 
+	if (of_property_read_u32(node, "cpts_active_slave", &prop)) {
+		pr_err("Missing cpts_active_slave property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->cpts_active_slave = prop;
+
+	if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
+		pr_err("Missing cpts_clock_mult property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->cpts_clock_mult = prop;
+
+	if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
+		pr_err("Missing cpts_clock_shift property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->cpts_clock_shift = prop;
+
 	data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
 				   data->slaves, GFP_KERNEL);
 	if (!data->slave_data) {
@@ -741,8 +1058,6 @@
 		return -EINVAL;
 	}
 
-	data->no_bd_ram = of_property_read_bool(node, "no_bd_ram");
-
 	if (of_property_read_u32(node, "cpdma_channels", &prop)) {
 		pr_err("Missing cpdma_channels property in the DT.\n");
 		ret = -EINVAL;
@@ -750,34 +1065,6 @@
 	}
 	data->channels = prop;
 
-	if (of_property_read_u32(node, "host_port_no", &prop)) {
-		pr_err("Missing host_port_no property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
-	}
-	data->host_port_num = prop;
-
-	if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) {
-		pr_err("Missing cpdma_reg_ofs property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
-	}
-	data->cpdma_reg_ofs = prop;
-
-	if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) {
-		pr_err("Missing cpdma_sram_ofs property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
-	}
-	data->cpdma_sram_ofs = prop;
-
-	if (of_property_read_u32(node, "ale_reg_ofs", &prop)) {
-		pr_err("Missing ale_reg_ofs property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
-	}
-	data->ale_reg_ofs = prop;
-
 	if (of_property_read_u32(node, "ale_entries", &prop)) {
 		pr_err("Missing ale_entries property in the DT.\n");
 		ret = -EINVAL;
@@ -785,27 +1072,6 @@
 	}
 	data->ale_entries = prop;
 
-	if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) {
-		pr_err("Missing host_port_reg_ofs property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
-	}
-	data->host_port_reg_ofs = prop;
-
-	if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) {
-		pr_err("Missing hw_stats_reg_ofs property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
-	}
-	data->hw_stats_reg_ofs = prop;
-
-	if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
-		pr_err("Missing bd_ram_ofs property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
-	}
-	data->bd_ram_ofs = prop;
-
 	if (of_property_read_u32(node, "bd_ram_size", &prop)) {
 		pr_err("Missing bd_ram_size property in the DT.\n");
 		ret = -EINVAL;
@@ -827,33 +1093,34 @@
 	}
 	data->mac_control = prop;
 
-	for_each_child_of_node(node, slave_node) {
-		struct cpsw_slave_data *slave_data = data->slave_data + i;
-		const char *phy_id = NULL;
-		const void *mac_addr = NULL;
+	/*
+	 * Populate all the child nodes here...
+	 */
+	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+	/* We do not want to force this, as in some cases may not have child */
+	if (ret)
+		pr_warn("Doesn't have any child node\n");
 
-		if (of_property_read_string(slave_node, "phy_id", &phy_id)) {
+	for_each_node_by_name(slave_node, "slave") {
+		struct cpsw_slave_data *slave_data = data->slave_data + i;
+		const void *mac_addr = NULL;
+		u32 phyid;
+		int lenp;
+		const __be32 *parp;
+		struct device_node *mdio_node;
+		struct platform_device *mdio;
+
+		parp = of_get_property(slave_node, "phy_id", &lenp);
+		if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
 			pr_err("Missing slave[%d] phy_id property\n", i);
 			ret = -EINVAL;
 			goto error_ret;
 		}
-		slave_data->phy_id = phy_id;
-
-		if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) {
-			pr_err("Missing slave[%d] slave_reg_ofs property\n", i);
-			ret = -EINVAL;
-			goto error_ret;
-		}
-		slave_data->slave_reg_ofs = prop;
-
-		if (of_property_read_u32(slave_node, "sliver_reg_ofs",
-					 &prop)) {
-			pr_err("Missing slave[%d] sliver_reg_ofs property\n",
-				i);
-			ret = -EINVAL;
-			goto error_ret;
-		}
-		slave_data->sliver_reg_ofs = prop;
+		mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
+		phyid = be32_to_cpup(parp+1);
+		mdio = of_find_device_by_node(mdio_node);
+		snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+			 PHY_ID_FMT, mdio->name, phyid);
 
 		mac_addr = of_get_mac_address(slave_node);
 		if (mac_addr)
@@ -876,8 +1143,9 @@
 	struct cpsw_priv		*priv;
 	struct cpdma_params		dma_params;
 	struct cpsw_ale_params		ale_params;
-	void __iomem			*regs;
+	void __iomem			*ss_regs, *wr_regs;
 	struct resource			*res;
+	u32 slave_offset, sliver_offset, slave_size;
 	int ret = 0, i, k = 0;
 
 	ndev = alloc_etherdev(sizeof(struct cpsw_priv));
@@ -895,6 +1163,11 @@
 	priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
 	priv->rx_packet_max = max(rx_packet_max, 128);
 
+	/*
+	 * This may be required here for child devices.
+	 */
+	pm_runtime_enable(&pdev->dev);
+
 	if (cpsw_probe_dt(&priv->data, pdev)) {
 		pr_err("cpsw: platform data missing\n");
 		ret = -ENODEV;
@@ -921,7 +1194,6 @@
 	for (i = 0; i < data->slaves; i++)
 		priv->slaves[i].slave_num = i;
 
-	pm_runtime_enable(&pdev->dev);
 	priv->clk = clk_get(&pdev->dev, "fck");
 	if (IS_ERR(priv->clk)) {
 		dev_err(&pdev->dev, "fck is not found\n");
@@ -935,63 +1207,86 @@
 		ret = -ENOENT;
 		goto clean_clk_ret;
 	}
-
 	if (!request_mem_region(priv->cpsw_res->start,
 				resource_size(priv->cpsw_res), ndev->name)) {
 		dev_err(priv->dev, "failed request i/o region\n");
 		ret = -ENXIO;
 		goto clean_clk_ret;
 	}
-
-	regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
-	if (!regs) {
+	ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
+	if (!ss_regs) {
 		dev_err(priv->dev, "unable to map i/o region\n");
 		goto clean_cpsw_iores_ret;
 	}
-	priv->regs = regs;
-	priv->host_port = data->host_port_num;
-	priv->host_port_regs = regs + data->host_port_reg_ofs;
+	priv->regs = ss_regs;
+	priv->version = __raw_readl(&priv->regs->id_ver);
+	priv->host_port = HOST_PORT_NUM;
 
-	priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (!priv->cpsw_ss_res) {
+	priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!priv->cpsw_wr_res) {
 		dev_err(priv->dev, "error getting i/o resource\n");
 		ret = -ENOENT;
-		goto clean_clk_ret;
+		goto clean_iomap_ret;
 	}
-
-	if (!request_mem_region(priv->cpsw_ss_res->start,
-			resource_size(priv->cpsw_ss_res), ndev->name)) {
+	if (!request_mem_region(priv->cpsw_wr_res->start,
+			resource_size(priv->cpsw_wr_res), ndev->name)) {
 		dev_err(priv->dev, "failed request i/o region\n");
 		ret = -ENXIO;
-		goto clean_clk_ret;
+		goto clean_iomap_ret;
 	}
-
-	regs = ioremap(priv->cpsw_ss_res->start,
-				resource_size(priv->cpsw_ss_res));
-	if (!regs) {
+	wr_regs = ioremap(priv->cpsw_wr_res->start,
+				resource_size(priv->cpsw_wr_res));
+	if (!wr_regs) {
 		dev_err(priv->dev, "unable to map i/o region\n");
-		goto clean_cpsw_ss_iores_ret;
+		goto clean_cpsw_wr_iores_ret;
 	}
-	priv->ss_regs = regs;
-
-	for_each_slave(priv, cpsw_slave_init, priv);
+	priv->wr_regs = wr_regs;
 
 	memset(&dma_params, 0, sizeof(dma_params));
+	memset(&ale_params, 0, sizeof(ale_params));
+
+	switch (priv->version) {
+	case CPSW_VERSION_1:
+		priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
+		priv->cpts.reg       = ss_regs + CPSW1_CPTS_OFFSET;
+		dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
+		dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
+		ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
+		slave_offset         = CPSW1_SLAVE_OFFSET;
+		slave_size           = CPSW1_SLAVE_SIZE;
+		sliver_offset        = CPSW1_SLIVER_OFFSET;
+		dma_params.desc_mem_phys = 0;
+		break;
+	case CPSW_VERSION_2:
+		priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
+		priv->cpts.reg       = ss_regs + CPSW2_CPTS_OFFSET;
+		dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
+		dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
+		ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
+		slave_offset         = CPSW2_SLAVE_OFFSET;
+		slave_size           = CPSW2_SLAVE_SIZE;
+		sliver_offset        = CPSW2_SLIVER_OFFSET;
+		dma_params.desc_mem_phys =
+			(u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET;
+		break;
+	default:
+		dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
+		ret = -ENODEV;
+		goto clean_cpsw_wr_iores_ret;
+	}
+	for (i = 0; i < priv->data.slaves; i++) {
+		struct cpsw_slave *slave = &priv->slaves[i];
+		cpsw_slave_init(slave, priv, slave_offset, sliver_offset);
+		slave_offset  += slave_size;
+		sliver_offset += SLIVER_SIZE;
+	}
+
 	dma_params.dev		= &pdev->dev;
-	dma_params.dmaregs	= cpsw_dma_regs((u32)priv->regs,
-						data->cpdma_reg_ofs);
-	dma_params.rxthresh	= cpsw_dma_rxthresh((u32)priv->regs,
-						    data->cpdma_reg_ofs);
-	dma_params.rxfree	= cpsw_dma_rxfree((u32)priv->regs,
-						  data->cpdma_reg_ofs);
-	dma_params.txhdp	= cpsw_dma_txhdp((u32)priv->regs,
-						 data->cpdma_sram_ofs);
-	dma_params.rxhdp	= cpsw_dma_rxhdp((u32)priv->regs,
-						 data->cpdma_sram_ofs);
-	dma_params.txcp		= cpsw_dma_txcp((u32)priv->regs,
-						data->cpdma_sram_ofs);
-	dma_params.rxcp		= cpsw_dma_rxcp((u32)priv->regs,
-						data->cpdma_sram_ofs);
+	dma_params.rxthresh	= dma_params.dmaregs + CPDMA_RXTHRESH;
+	dma_params.rxfree	= dma_params.dmaregs + CPDMA_RXFREE;
+	dma_params.rxhdp	= dma_params.txhdp + CPDMA_RXHDP;
+	dma_params.txcp		= dma_params.txhdp + CPDMA_TXCP;
+	dma_params.rxcp		= dma_params.txhdp + CPDMA_RXCP;
 
 	dma_params.num_chan		= data->channels;
 	dma_params.has_soft_reset	= true;
@@ -999,16 +1294,13 @@
 	dma_params.desc_mem_size	= data->bd_ram_size;
 	dma_params.desc_align		= 16;
 	dma_params.has_ext_regs		= true;
-	dma_params.desc_mem_phys        = data->no_bd_ram ? 0 :
-			(u32 __force)priv->cpsw_res->start + data->bd_ram_ofs;
-	dma_params.desc_hw_addr         = data->hw_ram_addr ?
-			data->hw_ram_addr : dma_params.desc_mem_phys ;
+	dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
 
 	priv->dma = cpdma_ctlr_create(&dma_params);
 	if (!priv->dma) {
 		dev_err(priv->dev, "error initializing dma\n");
 		ret = -ENOMEM;
-		goto clean_iomap_ret;
+		goto clean_wr_iomap_ret;
 	}
 
 	priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
@@ -1022,10 +1314,7 @@
 		goto clean_dma_ret;
 	}
 
-	memset(&ale_params, 0, sizeof(ale_params));
 	ale_params.dev			= &ndev->dev;
-	ale_params.ale_regs		= (void *)((u32)priv->regs) +
-						((u32)data->ale_reg_ofs);
 	ale_params.ale_ageout		= ale_ageout;
 	ale_params.ale_entries		= data->ale_entries;
 	ale_params.ale_ports		= data->slaves;
@@ -1072,6 +1361,10 @@
 		goto clean_irq_ret;
 	}
 
+	if (cpts_register(&pdev->dev, &priv->cpts,
+			  data->cpts_clock_mult, data->cpts_clock_shift))
+		dev_err(priv->dev, "error registering cpts device\n");
+
 	cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
 		  priv->cpsw_res->start, ndev->irq);
 
@@ -1085,11 +1378,13 @@
 	cpdma_chan_destroy(priv->txch);
 	cpdma_chan_destroy(priv->rxch);
 	cpdma_ctlr_destroy(priv->dma);
+clean_wr_iomap_ret:
+	iounmap(priv->wr_regs);
+clean_cpsw_wr_iores_ret:
+	release_mem_region(priv->cpsw_wr_res->start,
+			   resource_size(priv->cpsw_wr_res));
 clean_iomap_ret:
 	iounmap(priv->regs);
-clean_cpsw_ss_iores_ret:
-	release_mem_region(priv->cpsw_ss_res->start,
-			   resource_size(priv->cpsw_ss_res));
 clean_cpsw_iores_ret:
 	release_mem_region(priv->cpsw_res->start,
 			   resource_size(priv->cpsw_res));
@@ -1111,6 +1406,7 @@
 	pr_info("removing device");
 	platform_set_drvdata(pdev, NULL);
 
+	cpts_unregister(&priv->cpts);
 	free_irq(ndev->irq, priv);
 	cpsw_ale_destroy(priv->ale);
 	cpdma_chan_destroy(priv->txch);
@@ -1119,8 +1415,9 @@
 	iounmap(priv->regs);
 	release_mem_region(priv->cpsw_res->start,
 			   resource_size(priv->cpsw_res));
-	release_mem_region(priv->cpsw_ss_res->start,
-			   resource_size(priv->cpsw_ss_res));
+	iounmap(priv->wr_regs);
+	release_mem_region(priv->cpsw_wr_res->start,
+			   resource_size(priv->cpsw_wr_res));
 	pm_runtime_disable(&pdev->dev);
 	clk_put(priv->clk);
 	kfree(priv->slaves);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index ca0d48a..0e9ccc2 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -20,6 +20,7 @@
 #include <linux/io.h>
 #include <linux/stat.h>
 #include <linux/sysfs.h>
+#include <linux/etherdevice.h>
 
 #include "cpsw_ale.h"
 
@@ -211,10 +212,34 @@
 	mask &= ~port_mask;
 
 	/* free if only remaining port is host port */
-	if (mask == BIT(ale->params.ale_ports))
-		cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
-	else
+	if (mask)
 		cpsw_ale_set_port_mask(ale_entry, mask);
+	else
+		cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+}
+
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS];
+	int ret, idx;
+
+	for (idx = 0; idx < ale->params.ale_entries; idx++) {
+		cpsw_ale_read(ale, idx, ale_entry);
+		ret = cpsw_ale_get_entry_type(ale_entry);
+		if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
+			continue;
+
+		if (cpsw_ale_get_mcast(ale_entry)) {
+			u8 addr[6];
+
+			cpsw_ale_get_addr(ale_entry, addr);
+			if (!is_broadcast_ether_addr(addr))
+				cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
+		}
+
+		cpsw_ale_write(ale, idx, ale_entry);
+	}
+	return 0;
 }
 
 static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index a95b37be..2bd09cb 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -80,6 +80,7 @@
 
 int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
 int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
 int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
 int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
 int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
new file mode 100644
index 0000000..3377667
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -0,0 +1,427 @@
+/*
+ * TI Common Platform Time Sync
+ *
+ * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <linux/err.h>
+#include <linux/if.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_classify.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
+#include <plat/clock.h>
+
+#include "cpts.h"
+
+#ifdef CONFIG_TI_CPTS
+
+static struct sock_filter ptp_filter[] = {
+	PTP_FILTER
+};
+
+#define cpts_read32(c, r)	__raw_readl(&c->reg->r)
+#define cpts_write32(c, v, r)	__raw_writel(v, &c->reg->r)
+
+static int event_expired(struct cpts_event *event)
+{
+	return time_after(jiffies, event->tmo);
+}
+
+static int event_type(struct cpts_event *event)
+{
+	return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+}
+
+static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
+{
+	u32 r = cpts_read32(cpts, intstat_raw);
+
+	if (r & TS_PEND_RAW) {
+		*high = cpts_read32(cpts, event_high);
+		*low  = cpts_read32(cpts, event_low);
+		cpts_write32(cpts, EVENT_POP, event_pop);
+		return 0;
+	}
+	return -1;
+}
+
+/*
+ * Returns zero if matching event type was found.
+ */
+static int cpts_fifo_read(struct cpts *cpts, int match)
+{
+	int i, type = -1;
+	u32 hi, lo;
+	struct cpts_event *event;
+
+	for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
+		if (cpts_fifo_pop(cpts, &hi, &lo))
+			break;
+		if (list_empty(&cpts->pool)) {
+			pr_err("cpts: event pool is empty\n");
+			return -1;
+		}
+		event = list_first_entry(&cpts->pool, struct cpts_event, list);
+		event->tmo = jiffies + 2;
+		event->high = hi;
+		event->low = lo;
+		type = event_type(event);
+		switch (type) {
+		case CPTS_EV_PUSH:
+		case CPTS_EV_RX:
+		case CPTS_EV_TX:
+			list_del_init(&event->list);
+			list_add_tail(&event->list, &cpts->events);
+			break;
+		case CPTS_EV_ROLL:
+		case CPTS_EV_HALF:
+		case CPTS_EV_HW:
+			break;
+		default:
+			pr_err("cpts: unkown event type\n");
+			break;
+		}
+		if (type == match)
+			break;
+	}
+	return type == match ? 0 : -1;
+}
+
+static cycle_t cpts_systim_read(const struct cyclecounter *cc)
+{
+	u64 val = 0;
+	struct cpts_event *event;
+	struct list_head *this, *next;
+	struct cpts *cpts = container_of(cc, struct cpts, cc);
+
+	cpts_write32(cpts, TS_PUSH, ts_push);
+	if (cpts_fifo_read(cpts, CPTS_EV_PUSH))
+		pr_err("cpts: unable to obtain a time stamp\n");
+
+	list_for_each_safe(this, next, &cpts->events) {
+		event = list_entry(this, struct cpts_event, list);
+		if (event_type(event) == CPTS_EV_PUSH) {
+			list_del_init(&event->list);
+			list_add(&event->list, &cpts->pool);
+			val = event->low;
+			break;
+		}
+	}
+
+	return val;
+}
+
+/* PTP clock operations */
+
+static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	u64 adj;
+	u32 diff, mult;
+	int neg_adj = 0;
+	unsigned long flags;
+	struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+	if (ppb < 0) {
+		neg_adj = 1;
+		ppb = -ppb;
+	}
+	mult = cpts->cc_mult;
+	adj = mult;
+	adj *= ppb;
+	diff = div_u64(adj, 1000000000ULL);
+
+	spin_lock_irqsave(&cpts->lock, flags);
+
+	timecounter_read(&cpts->tc);
+
+	cpts->cc.mult = neg_adj ? mult - diff : mult + diff;
+
+	spin_unlock_irqrestore(&cpts->lock, flags);
+
+	return 0;
+}
+
+static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	s64 now;
+	unsigned long flags;
+	struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+	spin_lock_irqsave(&cpts->lock, flags);
+	now = timecounter_read(&cpts->tc);
+	now += delta;
+	timecounter_init(&cpts->tc, &cpts->cc, now);
+	spin_unlock_irqrestore(&cpts->lock, flags);
+
+	return 0;
+}
+
+static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+	u64 ns;
+	u32 remainder;
+	unsigned long flags;
+	struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+	spin_lock_irqsave(&cpts->lock, flags);
+	ns = timecounter_read(&cpts->tc);
+	spin_unlock_irqrestore(&cpts->lock, flags);
+
+	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+	ts->tv_nsec = remainder;
+
+	return 0;
+}
+
+static int cpts_ptp_settime(struct ptp_clock_info *ptp,
+			    const struct timespec *ts)
+{
+	u64 ns;
+	unsigned long flags;
+	struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+	ns = ts->tv_sec * 1000000000ULL;
+	ns += ts->tv_nsec;
+
+	spin_lock_irqsave(&cpts->lock, flags);
+	timecounter_init(&cpts->tc, &cpts->cc, ns);
+	spin_unlock_irqrestore(&cpts->lock, flags);
+
+	return 0;
+}
+
+static int cpts_ptp_enable(struct ptp_clock_info *ptp,
+			   struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info cpts_info = {
+	.owner		= THIS_MODULE,
+	.name		= "CTPS timer",
+	.max_adj	= 1000000,
+	.n_ext_ts	= 0,
+	.pps		= 0,
+	.adjfreq	= cpts_ptp_adjfreq,
+	.adjtime	= cpts_ptp_adjtime,
+	.gettime	= cpts_ptp_gettime,
+	.settime	= cpts_ptp_settime,
+	.enable		= cpts_ptp_enable,
+};
+
+static void cpts_overflow_check(struct work_struct *work)
+{
+	struct timespec ts;
+	struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
+
+	cpts_write32(cpts, CPTS_EN, control);
+	cpts_write32(cpts, TS_PEND_EN, int_enable);
+	cpts_ptp_gettime(&cpts->info, &ts);
+	pr_debug("cpts overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+	schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
+}
+
+#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
+
+static void cpts_clk_init(struct cpts *cpts)
+{
+	cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME);
+	if (IS_ERR(cpts->refclk)) {
+		pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME);
+		cpts->refclk = NULL;
+		return;
+	}
+	clk_enable(cpts->refclk);
+	cpts->freq = cpts->refclk->recalc(cpts->refclk);
+}
+
+static void cpts_clk_release(struct cpts *cpts)
+{
+	clk_disable(cpts->refclk);
+	clk_put(cpts->refclk);
+}
+
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
+		      u16 ts_seqid, u8 ts_msgtype)
+{
+	u16 *seqid;
+	unsigned int offset;
+	u8 *msgtype, *data = skb->data;
+
+	switch (ptp_class) {
+	case PTP_CLASS_V1_IPV4:
+	case PTP_CLASS_V2_IPV4:
+		offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+		break;
+	case PTP_CLASS_V1_IPV6:
+	case PTP_CLASS_V2_IPV6:
+		offset = OFF_PTP6;
+		break;
+	case PTP_CLASS_V2_L2:
+		offset = ETH_HLEN;
+		break;
+	case PTP_CLASS_V2_VLAN:
+		offset = ETH_HLEN + VLAN_HLEN;
+		break;
+	default:
+		return 0;
+	}
+
+	if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+		return 0;
+
+	if (unlikely(ptp_class & PTP_CLASS_V1))
+		msgtype = data + offset + OFF_PTP_CONTROL;
+	else
+		msgtype = data + offset;
+
+	seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+	return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid));
+}
+
+static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
+{
+	u64 ns = 0;
+	struct cpts_event *event;
+	struct list_head *this, *next;
+	unsigned int class = sk_run_filter(skb, ptp_filter);
+	unsigned long flags;
+	u16 seqid;
+	u8 mtype;
+
+	if (class == PTP_CLASS_NONE)
+		return 0;
+
+	spin_lock_irqsave(&cpts->lock, flags);
+	cpts_fifo_read(cpts, CPTS_EV_PUSH);
+	list_for_each_safe(this, next, &cpts->events) {
+		event = list_entry(this, struct cpts_event, list);
+		if (event_expired(event)) {
+			list_del_init(&event->list);
+			list_add(&event->list, &cpts->pool);
+			continue;
+		}
+		mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
+		seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
+		if (ev_type == event_type(event) &&
+		    cpts_match(skb, class, seqid, mtype)) {
+			ns = timecounter_cyc2time(&cpts->tc, event->low);
+			list_del_init(&event->list);
+			list_add(&event->list, &cpts->pool);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&cpts->lock, flags);
+
+	return ns;
+}
+
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+	u64 ns;
+	struct skb_shared_hwtstamps *ssh;
+
+	if (!cpts->rx_enable)
+		return;
+	ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
+	if (!ns)
+		return;
+	ssh = skb_hwtstamps(skb);
+	memset(ssh, 0, sizeof(*ssh));
+	ssh->hwtstamp = ns_to_ktime(ns);
+}
+
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+	u64 ns;
+	struct skb_shared_hwtstamps ssh;
+
+	if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+		return;
+	ns = cpts_find_ts(cpts, skb, CPTS_EV_TX);
+	if (!ns)
+		return;
+	memset(&ssh, 0, sizeof(ssh));
+	ssh.hwtstamp = ns_to_ktime(ns);
+	skb_tstamp_tx(skb, &ssh);
+}
+
+#endif /*CONFIG_TI_CPTS*/
+
+int cpts_register(struct device *dev, struct cpts *cpts,
+		  u32 mult, u32 shift)
+{
+#ifdef CONFIG_TI_CPTS
+	int err, i;
+	unsigned long flags;
+
+	if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
+		pr_err("cpts: bad ptp filter\n");
+		return -EINVAL;
+	}
+	cpts->info = cpts_info;
+	cpts->clock = ptp_clock_register(&cpts->info, dev);
+	if (IS_ERR(cpts->clock)) {
+		err = PTR_ERR(cpts->clock);
+		cpts->clock = NULL;
+		return err;
+	}
+	spin_lock_init(&cpts->lock);
+
+	cpts->cc.read = cpts_systim_read;
+	cpts->cc.mask = CLOCKSOURCE_MASK(32);
+	cpts->cc_mult = mult;
+	cpts->cc.mult = mult;
+	cpts->cc.shift = shift;
+
+	INIT_LIST_HEAD(&cpts->events);
+	INIT_LIST_HEAD(&cpts->pool);
+	for (i = 0; i < CPTS_MAX_EVENTS; i++)
+		list_add(&cpts->pool_data[i].list, &cpts->pool);
+
+	cpts_clk_init(cpts);
+	cpts_write32(cpts, CPTS_EN, control);
+	cpts_write32(cpts, TS_PEND_EN, int_enable);
+
+	spin_lock_irqsave(&cpts->lock, flags);
+	timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
+	spin_unlock_irqrestore(&cpts->lock, flags);
+
+	INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
+	schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
+
+	cpts->phc_index = ptp_clock_index(cpts->clock);
+#endif
+	return 0;
+}
+
+void cpts_unregister(struct cpts *cpts)
+{
+#ifdef CONFIG_TI_CPTS
+	if (cpts->clock) {
+		ptp_clock_unregister(cpts->clock);
+		cancel_delayed_work_sync(&cpts->overflow_work);
+	}
+	if (cpts->refclk)
+		cpts_clk_release(cpts);
+#endif
+}
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
new file mode 100644
index 0000000..e1bba3a
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -0,0 +1,146 @@
+/*
+ * TI Common Platform Time Sync
+ *
+ * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef _TI_CPTS_H_
+#define _TI_CPTS_H_
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clocksource.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/skbuff.h>
+
+struct cpsw_cpts {
+	u32 idver;                /* Identification and version */
+	u32 control;              /* Time sync control */
+	u32 res1;
+	u32 ts_push;              /* Time stamp event push */
+	u32 ts_load_val;          /* Time stamp load value */
+	u32 ts_load_en;           /* Time stamp load enable */
+	u32 res2[2];
+	u32 intstat_raw;          /* Time sync interrupt status raw */
+	u32 intstat_masked;       /* Time sync interrupt status masked */
+	u32 int_enable;           /* Time sync interrupt enable */
+	u32 res3;
+	u32 event_pop;            /* Event interrupt pop */
+	u32 event_low;            /* 32 Bit Event Time Stamp */
+	u32 event_high;           /* Event Type Fields */
+};
+
+/* Bit definitions for the IDVER register */
+#define TX_IDENT_SHIFT       (16)    /* TX Identification Value */
+#define TX_IDENT_MASK        (0xffff)
+#define RTL_VER_SHIFT        (11)    /* RTL Version Value */
+#define RTL_VER_MASK         (0x1f)
+#define MAJOR_VER_SHIFT      (8)     /* Major Version Value */
+#define MAJOR_VER_MASK       (0x7)
+#define MINOR_VER_SHIFT      (0)     /* Minor Version Value */
+#define MINOR_VER_MASK       (0xff)
+
+/* Bit definitions for the CONTROL register */
+#define HW4_TS_PUSH_EN       (1<<11) /* Hardware push 4 enable */
+#define HW3_TS_PUSH_EN       (1<<10) /* Hardware push 3 enable */
+#define HW2_TS_PUSH_EN       (1<<9)  /* Hardware push 2 enable */
+#define HW1_TS_PUSH_EN       (1<<8)  /* Hardware push 1 enable */
+#define INT_TEST             (1<<1)  /* Interrupt Test */
+#define CPTS_EN              (1<<0)  /* Time Sync Enable */
+
+/*
+ * Definitions for the single bit resisters:
+ * TS_PUSH TS_LOAD_EN  INTSTAT_RAW INTSTAT_MASKED INT_ENABLE EVENT_POP
+ */
+#define TS_PUSH             (1<<0)  /* Time stamp event push */
+#define TS_LOAD_EN          (1<<0)  /* Time Stamp Load */
+#define TS_PEND_RAW         (1<<0)  /* int read (before enable) */
+#define TS_PEND             (1<<0)  /* masked interrupt read (after enable) */
+#define TS_PEND_EN          (1<<0)  /* masked interrupt enable */
+#define EVENT_POP           (1<<0)  /* writing discards one event */
+
+/* Bit definitions for the EVENT_HIGH register */
+#define PORT_NUMBER_SHIFT    (24)    /* Indicates Ethernet port or HW pin */
+#define PORT_NUMBER_MASK     (0x1f)
+#define EVENT_TYPE_SHIFT     (20)    /* Time sync event type */
+#define EVENT_TYPE_MASK      (0xf)
+#define MESSAGE_TYPE_SHIFT   (16)    /* PTP message type */
+#define MESSAGE_TYPE_MASK    (0xf)
+#define SEQUENCE_ID_SHIFT    (0)     /* PTP message sequence ID */
+#define SEQUENCE_ID_MASK     (0xffff)
+
+enum {
+	CPTS_EV_PUSH, /* Time Stamp Push Event */
+	CPTS_EV_ROLL, /* Time Stamp Rollover Event */
+	CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
+	CPTS_EV_HW,   /* Hardware Time Stamp Push Event */
+	CPTS_EV_RX,   /* Ethernet Receive Event */
+	CPTS_EV_TX,   /* Ethernet Transmit Event */
+};
+
+/* This covers any input clock up to about 500 MHz. */
+#define CPTS_OVERFLOW_PERIOD (HZ * 8)
+
+#define CPTS_FIFO_DEPTH 16
+#define CPTS_MAX_EVENTS 32
+
+struct cpts_event {
+	struct list_head list;
+	unsigned long tmo;
+	u32 high;
+	u32 low;
+};
+
+struct cpts {
+	struct cpsw_cpts __iomem *reg;
+	int tx_enable;
+	int rx_enable;
+#ifdef CONFIG_TI_CPTS
+	struct ptp_clock_info info;
+	struct ptp_clock *clock;
+	spinlock_t lock; /* protects time registers */
+	u32 cc_mult; /* for the nominal frequency */
+	struct cyclecounter cc;
+	struct timecounter tc;
+	struct delayed_work overflow_work;
+	int phc_index;
+	struct clk *refclk;
+	unsigned long freq;
+	struct list_head events;
+	struct list_head pool;
+	struct cpts_event pool_data[CPTS_MAX_EVENTS];
+#endif
+};
+
+#ifdef CONFIG_TI_CPTS
+extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+#else
+static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+}
+static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+}
+#endif
+
+extern int cpts_register(struct device *dev, struct cpts *cpts,
+			 u32 mult, u32 shift);
+extern void cpts_unregister(struct cpts *cpts);
+
+#endif
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 51a96db..ae74280 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -465,7 +465,7 @@
 	u32 ctrl;
 
 	spin_lock(&data->lock);
-	pm_runtime_put_sync(data->dev);
+	pm_runtime_get_sync(data->dev);
 
 	/* restart the scan state machine */
 	ctrl = __raw_readl(&data->regs->control);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 961c832..72b775f 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -452,7 +452,7 @@
 	if (rsn)
 		*buf++ = WLAN_EID_RSN;
 	else
-		*buf++ = WLAN_EID_GENERIC;
+		*buf++ = WLAN_EID_VENDOR_SPECIFIC;
 
 	/* length filed; set later */
 	buf++;
@@ -540,7 +540,7 @@
 			break;
 
 		switch (item_id) {
-		case WLAN_EID_GENERIC:
+		case WLAN_EID_VENDOR_SPECIFIC:
 			if ((OUI_LEN + 1 <= item_len) &&
 			    !memcmp(pos, wpa_oui, OUI_LEN) &&
 			    pos[OUI_LEN] == 0x01) {
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 2c08bf6..7daf92e 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -580,8 +580,6 @@
 	struct w5100_priv *priv = netdev_priv(ndev);
 
 	netif_info(priv, ifup, ndev, "enabling\n");
-	if (!is_valid_ether_addr(ndev->dev_addr))
-		return -EINVAL;
 	w5100_hw_start(priv);
 	napi_enable(&priv->napi);
 	netif_start_queue(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 88943d9..bd9eec6 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -500,8 +500,6 @@
 	struct w5300_priv *priv = netdev_priv(ndev);
 
 	netif_info(priv, ifup, ndev, "enabling\n");
-	if (!is_valid_ether_addr(ndev->dev_addr))
-		return -EINVAL;
 	w5300_hw_start(priv);
 	napi_enable(&priv->napi);
 	netif_start_queue(ndev);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 928148c..7fdeb52 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -363,11 +363,6 @@
 
 	rndis_pkt = &msg->msg.pkt;
 
-	/*
-	 * FIXME: Handle multiple rndis pkt msgs that maybe enclosed in this
-	 * netvsc packet (ie TotalDataBufferLength != MessageLength)
-	 */
-
 	/* Remove the rndis header and pass it back up the stack */
 	data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
 
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4b746d9..945360a 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -33,11 +33,7 @@
 
 #define DRIVER_NAME "sh_irda"
 
-#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
-#define __IRDARAM_LEN	0x13FF
-#else
 #define __IRDARAM_LEN	0x1039
-#endif
 
 #define IRTMR		0x1F00 /* Transfer mode */
 #define IRCFR		0x1F02 /* Configuration */
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index b332112..6989ebe 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -56,6 +56,10 @@
 module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0);
 MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]");
 
+static bool oops_only = false;
+module_param(oops_only, bool, 0600);
+MODULE_PARM_DESC(oops_only, "Only log oops messages");
+
 #ifndef	MODULE
 static int __init option_setup(char *opt)
 {
@@ -683,6 +687,8 @@
 	struct netconsole_target *nt;
 	const char *tmp;
 
+	if (oops_only && !oops_in_progress)
+		return;
 	/* Avoid taking lock and disabling interrupts unnecessarily */
 	if (list_empty(&target_list))
 		return;
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 81c7bc0..383e833 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -150,18 +150,24 @@
 	.name		= "Davicom DM9161E",
 	.phy_id_mask	= 0x0ffffff0,
 	.features	= PHY_BASIC_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= dm9161_config_init,
 	.config_aneg	= dm9161_config_aneg,
 	.read_status	= genphy_read_status,
+	.ack_interrupt	= dm9161_ack_interrupt,
+	.config_intr	= dm9161_config_intr,
 	.driver		= { .owner = THIS_MODULE,},
 }, {
 	.phy_id		= 0x0181b8a0,
 	.name		= "Davicom DM9161A",
 	.phy_id_mask	= 0x0ffffff0,
 	.features	= PHY_BASIC_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= dm9161_config_init,
 	.config_aneg	= dm9161_config_aneg,
 	.read_status	= genphy_read_status,
+	.ack_interrupt	= dm9161_ack_interrupt,
+	.config_intr	= dm9161_config_intr,
 	.driver		= { .owner = THIS_MODULE,},
 }, {
 	.phy_id		= 0x00181b80,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index c1ef300..044b532 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -431,10 +431,24 @@
 
 #endif /* CONFIG_PM */
 
+static ssize_t
+phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct phy_device *phydev = to_phy_device(dev);
+
+	return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
+}
+
+static struct device_attribute mdio_dev_attrs[] = {
+	__ATTR_RO(phy_id),
+	__ATTR_NULL
+};
+
 struct bus_type mdio_bus_type = {
 	.name		= "mdio_bus",
 	.match		= mdio_bus_match,
 	.pm		= MDIO_BUS_PM_OPS,
+	.dev_attrs	= mdio_dev_attrs,
 };
 EXPORT_SYMBOL(mdio_bus_type);
 
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2165d5f..b983596 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -127,6 +127,39 @@
 	return 0;
 }
 
+#define KSZ8873MLL_GLOBAL_CONTROL_4	0x06
+#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX	(1 << 6)
+#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED	(1 << 4)
+int ksz8873mll_read_status(struct phy_device *phydev)
+{
+	int regval;
+
+	/* dummy read */
+	regval = phy_read(phydev, KSZ8873MLL_GLOBAL_CONTROL_4);
+
+	regval = phy_read(phydev, KSZ8873MLL_GLOBAL_CONTROL_4);
+
+	if (regval & KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX)
+		phydev->duplex = DUPLEX_HALF;
+	else
+		phydev->duplex = DUPLEX_FULL;
+
+	if (regval & KSZ8873MLL_GLOBAL_CONTROL_4_SPEED)
+		phydev->speed = SPEED_10;
+	else
+		phydev->speed = SPEED_100;
+
+	phydev->link = 1;
+	phydev->pause = phydev->asym_pause = 0;
+
+	return 0;
+}
+
+static int ksz8873mll_config_aneg(struct phy_device *phydev)
+{
+	return 0;
+}
+
 static struct phy_driver ksphy_driver[] = {
 {
 	.phy_id		= PHY_ID_KS8737,
@@ -204,6 +237,16 @@
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= ksz9021_config_intr,
 	.driver		= { .owner = THIS_MODULE, },
+}, {
+	.phy_id		= PHY_ID_KSZ8873MLL,
+	.phy_id_mask	= 0x00fffff0,
+	.name		= "Micrel KSZ8873MLL Switch",
+	.features	= (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+	.flags		= PHY_HAS_MAGICANEG,
+	.config_init	= kszphy_config_init,
+	.config_aneg	= ksz8873mll_config_aneg,
+	.read_status	= ksz8873mll_read_status,
+	.driver		= { .owner = THIS_MODULE, },
 } };
 
 static int __init ksphy_init(void)
@@ -232,6 +275,7 @@
 	{ PHY_ID_KSZ8021, 0x00ffffff },
 	{ PHY_ID_KSZ8041, 0x00fffff0 },
 	{ PHY_ID_KSZ8051, 0x00fffff0 },
+	{ PHY_ID_KSZ8873MLL, 0x00fffff0 },
 	{ }
 };
 
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 88e3991..16dceed 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -56,37 +56,54 @@
 	return smsc_phy_ack_interrupt (phydev);
 }
 
-static int lan87xx_config_init(struct phy_device *phydev)
-{
-	/*
-	 * Make sure the EDPWRDOWN bit is NOT set. Setting this bit on
-	 * LAN8710/LAN8720 PHY causes the PHY to misbehave, likely due
-	 * to a bug on the chip.
-	 *
-	 * When the system is powered on with the network cable being
-	 * disconnected all the way until after ifconfig ethX up is
-	 * issued for the LAN port with this PHY, connecting the cable
-	 * afterwards does not cause LINK change detection, while the
-	 * expected behavior is the Link UP being detected.
-	 */
-	int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
-	if (rc < 0)
-		return rc;
-
-	rc &= ~MII_LAN83C185_EDPWRDOWN;
-
-	rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc);
-	if (rc < 0)
-		return rc;
-
-	return smsc_phy_ack_interrupt(phydev);
-}
-
 static int lan911x_config_init(struct phy_device *phydev)
 {
 	return smsc_phy_ack_interrupt(phydev);
 }
 
+/*
+ * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
+ * other in order to set the ENERGYON bit and exit EDPD mode.  If a link partner
+ * does send the pulses within this interval, the PHY will remained powered
+ * down.
+ *
+ * This workaround will manually toggle the PHY on/off upon calls to read_status
+ * in order to generate link test pulses if the link is down.  If a link partner
+ * is present, it will respond to the pulses, which will cause the ENERGYON bit
+ * to be set and will cause the EDPD mode to be exited.
+ */
+static int lan87xx_read_status(struct phy_device *phydev)
+{
+	int err = genphy_read_status(phydev);
+
+	if (!phydev->link) {
+		/* Disable EDPD to wake up PHY */
+		int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+		if (rc < 0)
+			return rc;
+
+		rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+			       rc & ~MII_LAN83C185_EDPWRDOWN);
+		if (rc < 0)
+			return rc;
+
+		/* Sleep 64 ms to allow ~5 link test pulses to be sent */
+		msleep(64);
+
+		/* Re-enable EDPD */
+		rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+		if (rc < 0)
+			return rc;
+
+		rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+			       rc | MII_LAN83C185_EDPWRDOWN);
+		if (rc < 0)
+			return rc;
+	}
+
+	return err;
+}
+
 static struct phy_driver smsc_phy_driver[] = {
 {
 	.phy_id		= 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -187,8 +204,8 @@
 
 	/* basic functions */
 	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
-	.config_init	= lan87xx_config_init,
+	.read_status	= lan87xx_read_status,
+	.config_intr	= smsc_phy_config_intr,
 
 	/* IRQ related */
 	.ack_interrupt	= smsc_phy_ack_interrupt,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index eb3f5ce..0b2706a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1034,7 +1034,7 @@
 	return err;
 }
 
-struct rtnl_link_stats64*
+static struct rtnl_link_stats64*
 ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
 {
 	struct ppp *ppp = netdev_priv(dev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0873cdc..607a3a5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -68,7 +68,6 @@
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
 #include <net/sock.h>
-#include <net/cls_cgroup.h>
 
 #include <asm/uaccess.h>
 
@@ -110,16 +109,56 @@
 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
 };
 
+/* 1024 is probably a high enough limit: modern hypervisors seem to support on
+ * the order of 100-200 CPUs so this leaves us some breathing space if we want
+ * to match a queue per guest CPU.
+ */
+#define MAX_TAP_QUEUES 1024
+
+#define TUN_FLOW_EXPIRE (3 * HZ)
+
+/* A tun_file connects an open character device to a tuntap netdevice. It
+ * also contains all socket related strctures (except sock_fprog and tap_filter)
+ * to serve as one transmit queue for tuntap device. The sock_fprog and
+ * tap_filter were kept in tun_struct since they were used for filtering for the
+ * netdevice not for a specific queue (at least I didn't see the requirement for
+ * this).
+ *
+ * RCU usage:
+ * The tun_file and tun_struct are loosely coupled, the pointer from one to the
+ * other can only be read while rcu_read_lock or rtnl_lock is held.
+ */
 struct tun_file {
-	atomic_t count;
-	struct tun_struct *tun;
+	struct sock sk;
+	struct socket socket;
+	struct socket_wq wq;
+	struct tun_struct __rcu *tun;
 	struct net *net;
+	struct fasync_struct *fasync;
+	/* only used for fasnyc */
+	unsigned int flags;
+	u16 queue_index;
 };
 
-struct tun_sock;
+struct tun_flow_entry {
+	struct hlist_node hash_link;
+	struct rcu_head rcu;
+	struct tun_struct *tun;
 
+	u32 rxhash;
+	int queue_index;
+	unsigned long updated;
+};
+
+#define TUN_NUM_FLOW_ENTRIES 1024
+
+/* Since the socket were moved to tun_file, to preserve the behavior of persist
+ * device, socket filter, sndbuf and vnet header size were restore when the
+ * file were attached to a persist device.
+ */
 struct tun_struct {
-	struct tun_file		*tfile;
+	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
+	unsigned int            numqueues;
 	unsigned int 		flags;
 	kuid_t			owner;
 	kgid_t			group;
@@ -128,27 +167,297 @@
 	netdev_features_t	set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
 			  NETIF_F_TSO6|NETIF_F_UFO)
-	struct fasync_struct	*fasync;
-
-	struct tap_filter       txflt;
-	struct socket		socket;
-	struct socket_wq	wq;
 
 	int			vnet_hdr_sz;
-
+	int			sndbuf;
+	struct tap_filter	txflt;
+	struct sock_fprog	fprog;
+	/* protected by rtnl lock */
+	bool			filter_attached;
 #ifdef TUN_DEBUG
 	int debug;
 #endif
+	spinlock_t lock;
+	struct kmem_cache *flow_cache;
+	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
+	struct timer_list flow_gc_timer;
+	unsigned long ageing_time;
 };
 
-struct tun_sock {
-	struct sock		sk;
-	struct tun_struct	*tun;
-};
-
-static inline struct tun_sock *tun_sk(struct sock *sk)
+static inline u32 tun_hashfn(u32 rxhash)
 {
-	return container_of(sk, struct tun_sock, sk);
+	return rxhash & 0x3ff;
+}
+
+static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
+{
+	struct tun_flow_entry *e;
+	struct hlist_node *n;
+
+	hlist_for_each_entry_rcu(e, n, head, hash_link) {
+		if (e->rxhash == rxhash)
+			return e;
+	}
+	return NULL;
+}
+
+static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
+					      struct hlist_head *head,
+					      u32 rxhash, u16 queue_index)
+{
+	struct tun_flow_entry *e = kmem_cache_alloc(tun->flow_cache,
+						    GFP_ATOMIC);
+	if (e) {
+		tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
+			  rxhash, queue_index);
+		e->updated = jiffies;
+		e->rxhash = rxhash;
+		e->queue_index = queue_index;
+		e->tun = tun;
+		hlist_add_head_rcu(&e->hash_link, head);
+	}
+	return e;
+}
+
+static void tun_flow_free(struct rcu_head *head)
+{
+	struct tun_flow_entry *e
+		= container_of(head, struct tun_flow_entry, rcu);
+	kmem_cache_free(e->tun->flow_cache, e);
+}
+
+static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
+{
+	tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
+		  e->rxhash, e->queue_index);
+	hlist_del_rcu(&e->hash_link);
+	call_rcu(&e->rcu, tun_flow_free);
+}
+
+static void tun_flow_flush(struct tun_struct *tun)
+{
+	int i;
+
+	spin_lock_bh(&tun->lock);
+	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
+		struct tun_flow_entry *e;
+		struct hlist_node *h, *n;
+
+		hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link)
+			tun_flow_delete(tun, e);
+	}
+	spin_unlock_bh(&tun->lock);
+}
+
+static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
+{
+	int i;
+
+	spin_lock_bh(&tun->lock);
+	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
+		struct tun_flow_entry *e;
+		struct hlist_node *h, *n;
+
+		hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+			if (e->queue_index == queue_index)
+				tun_flow_delete(tun, e);
+		}
+	}
+	spin_unlock_bh(&tun->lock);
+}
+
+static void tun_flow_cleanup(unsigned long data)
+{
+	struct tun_struct *tun = (struct tun_struct *)data;
+	unsigned long delay = tun->ageing_time;
+	unsigned long next_timer = jiffies + delay;
+	unsigned long count = 0;
+	int i;
+
+	tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
+
+	spin_lock_bh(&tun->lock);
+	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
+		struct tun_flow_entry *e;
+		struct hlist_node *h, *n;
+
+		hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+			unsigned long this_timer;
+			count++;
+			this_timer = e->updated + delay;
+			if (time_before_eq(this_timer, jiffies))
+				tun_flow_delete(tun, e);
+			else if (time_before(this_timer, next_timer))
+				next_timer = this_timer;
+		}
+	}
+
+	if (count)
+		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
+	spin_unlock_bh(&tun->lock);
+}
+
+static void tun_flow_update(struct tun_struct *tun, struct sk_buff *skb,
+			    u16 queue_index)
+{
+	struct hlist_head *head;
+	struct tun_flow_entry *e;
+	unsigned long delay = tun->ageing_time;
+	u32 rxhash = skb_get_rxhash(skb);
+
+	if (!rxhash)
+		return;
+	else
+		head = &tun->flows[tun_hashfn(rxhash)];
+
+	rcu_read_lock();
+
+	if (tun->numqueues == 1)
+		goto unlock;
+
+	e = tun_flow_find(head, rxhash);
+	if (likely(e)) {
+		/* TODO: keep queueing to old queue until it's empty? */
+		e->queue_index = queue_index;
+		e->updated = jiffies;
+	} else {
+		spin_lock_bh(&tun->lock);
+		if (!tun_flow_find(head, rxhash))
+			tun_flow_create(tun, head, rxhash, queue_index);
+
+		if (!timer_pending(&tun->flow_gc_timer))
+			mod_timer(&tun->flow_gc_timer,
+				  round_jiffies_up(jiffies + delay));
+		spin_unlock_bh(&tun->lock);
+	}
+
+unlock:
+	rcu_read_unlock();
+}
+
+/* We try to identify a flow through its rxhash first. The reason that
+ * we do not check rxq no. is becuase some cards(e.g 82599), chooses
+ * the rxq based on the txq where the last packet of the flow comes. As
+ * the userspace application move between processors, we may get a
+ * different rxq no. here. If we could not get rxhash, then we would
+ * hope the rxq no. may help here.
+ */
+static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+	struct tun_struct *tun = netdev_priv(dev);
+	struct tun_flow_entry *e;
+	u32 txq = 0;
+	u32 numqueues = 0;
+
+	rcu_read_lock();
+	numqueues = tun->numqueues;
+
+	txq = skb_get_rxhash(skb);
+	if (txq) {
+		e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
+		if (e)
+			txq = e->queue_index;
+		else
+			/* use multiply and shift instead of expensive divide */
+			txq = ((u64)txq * numqueues) >> 32;
+	} else if (likely(skb_rx_queue_recorded(skb))) {
+		txq = skb_get_rx_queue(skb);
+		while (unlikely(txq >= numqueues))
+			txq -= numqueues;
+	}
+
+	rcu_read_unlock();
+	return txq;
+}
+
+static inline bool tun_not_capable(struct tun_struct *tun)
+{
+	const struct cred *cred = current_cred();
+	struct net *net = dev_net(tun->dev);
+
+	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
+		!ns_capable(net->user_ns, CAP_NET_ADMIN);
+}
+
+static void tun_set_real_num_queues(struct tun_struct *tun)
+{
+	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
+	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
+}
+
+static void __tun_detach(struct tun_file *tfile, bool clean)
+{
+	struct tun_file *ntfile;
+	struct tun_struct *tun;
+	struct net_device *dev;
+
+	tun = rcu_dereference_protected(tfile->tun,
+					lockdep_rtnl_is_held());
+	if (tun) {
+		u16 index = tfile->queue_index;
+		BUG_ON(index >= tun->numqueues);
+		dev = tun->dev;
+
+		rcu_assign_pointer(tun->tfiles[index],
+				   tun->tfiles[tun->numqueues - 1]);
+		rcu_assign_pointer(tfile->tun, NULL);
+		ntfile = rcu_dereference_protected(tun->tfiles[index],
+						   lockdep_rtnl_is_held());
+		ntfile->queue_index = index;
+
+		--tun->numqueues;
+		sock_put(&tfile->sk);
+
+		synchronize_net();
+		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
+		/* Drop read queue */
+		skb_queue_purge(&tfile->sk.sk_receive_queue);
+		tun_set_real_num_queues(tun);
+
+		if (tun->numqueues == 0 && !(tun->flags & TUN_PERSIST))
+			if (dev->reg_state == NETREG_REGISTERED)
+				unregister_netdevice(dev);
+	}
+
+	if (clean) {
+		BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
+				 &tfile->socket.flags));
+		sk_release_kernel(&tfile->sk);
+	}
+}
+
+static void tun_detach(struct tun_file *tfile, bool clean)
+{
+	rtnl_lock();
+	__tun_detach(tfile, clean);
+	rtnl_unlock();
+}
+
+static void tun_detach_all(struct net_device *dev)
+{
+	struct tun_struct *tun = netdev_priv(dev);
+	struct tun_file *tfile;
+	int i, n = tun->numqueues;
+
+	for (i = 0; i < n; i++) {
+		tfile = rcu_dereference_protected(tun->tfiles[i],
+						  lockdep_rtnl_is_held());
+		BUG_ON(!tfile);
+		wake_up_all(&tfile->wq.wait);
+		rcu_assign_pointer(tfile->tun, NULL);
+		--tun->numqueues;
+	}
+	BUG_ON(tun->numqueues != 0);
+
+	synchronize_net();
+	for (i = 0; i < n; i++) {
+		tfile = rcu_dereference_protected(tun->tfiles[i],
+						  lockdep_rtnl_is_held());
+		/* Drop read queue */
+		skb_queue_purge(&tfile->sk.sk_receive_queue);
+		sock_put(&tfile->sk);
+	}
 }
 
 static int tun_attach(struct tun_struct *tun, struct file *file)
@@ -156,60 +465,54 @@
 	struct tun_file *tfile = file->private_data;
 	int err;
 
-	ASSERT_RTNL();
-
-	netif_tx_lock_bh(tun->dev);
-
 	err = -EINVAL;
-	if (tfile->tun)
+	if (rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held()))
 		goto out;
 
 	err = -EBUSY;
-	if (tun->tfile)
+	if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
+		goto out;
+
+	err = -E2BIG;
+	if (tun->numqueues == MAX_TAP_QUEUES)
 		goto out;
 
 	err = 0;
-	tfile->tun = tun;
-	tun->tfile = tfile;
-	tun->socket.file = file;
-	netif_carrier_on(tun->dev);
-	dev_hold(tun->dev);
-	sock_hold(tun->socket.sk);
-	atomic_inc(&tfile->count);
+
+	/* Re-attach the filter to presist device */
+	if (tun->filter_attached == true) {
+		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+		if (!err)
+			goto out;
+	}
+	tfile->queue_index = tun->numqueues;
+	rcu_assign_pointer(tfile->tun, tun);
+	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+	sock_hold(&tfile->sk);
+	tun->numqueues++;
+
+	tun_set_real_num_queues(tun);
+
+	if (tun->numqueues == 1)
+		netif_carrier_on(tun->dev);
+
+	/* device is allowed to go away first, so no need to hold extra
+	 * refcnt.
+	 */
 
 out:
-	netif_tx_unlock_bh(tun->dev);
 	return err;
 }
 
-static void __tun_detach(struct tun_struct *tun)
-{
-	/* Detach from net device */
-	netif_tx_lock_bh(tun->dev);
-	netif_carrier_off(tun->dev);
-	tun->tfile = NULL;
-	netif_tx_unlock_bh(tun->dev);
-
-	/* Drop read queue */
-	skb_queue_purge(&tun->socket.sk->sk_receive_queue);
-
-	/* Drop the extra count on the net device */
-	dev_put(tun->dev);
-}
-
-static void tun_detach(struct tun_struct *tun)
-{
-	rtnl_lock();
-	__tun_detach(tun);
-	rtnl_unlock();
-}
-
 static struct tun_struct *__tun_get(struct tun_file *tfile)
 {
-	struct tun_struct *tun = NULL;
+	struct tun_struct *tun;
 
-	if (atomic_inc_not_zero(&tfile->count))
-		tun = tfile->tun;
+	rcu_read_lock();
+	tun = rcu_dereference(tfile->tun);
+	if (tun)
+		dev_hold(tun->dev);
+	rcu_read_unlock();
 
 	return tun;
 }
@@ -221,10 +524,7 @@
 
 static void tun_put(struct tun_struct *tun)
 {
-	struct tun_file *tfile = tun->tfile;
-
-	if (atomic_dec_and_test(&tfile->count))
-		tun_detach(tfile->tun);
+	dev_put(tun->dev);
 }
 
 /* TAP filtering */
@@ -344,38 +644,20 @@
 /* Net device detach from fd. */
 static void tun_net_uninit(struct net_device *dev)
 {
-	struct tun_struct *tun = netdev_priv(dev);
-	struct tun_file *tfile = tun->tfile;
-
-	/* Inform the methods they need to stop using the dev.
-	 */
-	if (tfile) {
-		wake_up_all(&tun->wq.wait);
-		if (atomic_dec_and_test(&tfile->count))
-			__tun_detach(tun);
-	}
-}
-
-static void tun_free_netdev(struct net_device *dev)
-{
-	struct tun_struct *tun = netdev_priv(dev);
-
-	BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags));
-
-	sk_release_kernel(tun->socket.sk);
+	tun_detach_all(dev);
 }
 
 /* Net device open. */
 static int tun_net_open(struct net_device *dev)
 {
-	netif_start_queue(dev);
+	netif_tx_start_all_queues(dev);
 	return 0;
 }
 
 /* Net device close. */
 static int tun_net_close(struct net_device *dev)
 {
-	netif_stop_queue(dev);
+	netif_tx_stop_all_queues(dev);
 	return 0;
 }
 
@@ -383,12 +665,19 @@
 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct tun_struct *tun = netdev_priv(dev);
+	int txq = skb->queue_mapping;
+	struct tun_file *tfile;
+
+	rcu_read_lock();
+	tfile = rcu_dereference(tun->tfiles[txq]);
+
+	/* Drop packet if interface is not attached */
+	if (txq >= tun->numqueues)
+		goto drop;
 
 	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
 
-	/* Drop packet if interface is not attached */
-	if (!tun->tfile)
-		goto drop;
+	BUG_ON(!tfile);
 
 	/* Drop if the filter does not like it.
 	 * This is a noop if the filter is disabled.
@@ -396,15 +685,19 @@
 	if (!check_filter(&tun->txflt, skb))
 		goto drop;
 
-	if (tun->socket.sk->sk_filter &&
-	    sk_filter(tun->socket.sk, skb))
+	if (tfile->socket.sk->sk_filter &&
+	    sk_filter(tfile->socket.sk, skb))
 		goto drop;
 
-	if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) {
+	/* Limit the number of packets queued by dividing txq length with the
+	 * number of queues.
+	 */
+	if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
+			  >= dev->tx_queue_len / tun->numqueues){
 		if (!(tun->flags & TUN_ONE_QUEUE)) {
 			/* Normal queueing mode. */
 			/* Packet scheduler handles dropping of further packets. */
-			netif_stop_queue(dev);
+			netif_stop_subqueue(dev, txq);
 
 			/* We won't see all dropped packets individually, so overrun
 			 * error is more appropriate. */
@@ -423,18 +716,22 @@
 	skb_orphan(skb);
 
 	/* Enqueue packet */
-	skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
+	skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
 
 	/* Notify and wake up reader process */
-	if (tun->flags & TUN_FASYNC)
-		kill_fasync(&tun->fasync, SIGIO, POLL_IN);
-	wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
+	if (tfile->flags & TUN_FASYNC)
+		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
+	wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
 				   POLLRDNORM | POLLRDBAND);
+
+	rcu_read_unlock();
 	return NETDEV_TX_OK;
 
 drop:
 	dev->stats.tx_dropped++;
+	skb_tx_error(skb);
 	kfree_skb(skb);
+	rcu_read_unlock();
 	return NETDEV_TX_OK;
 }
 
@@ -490,6 +787,7 @@
 	.ndo_start_xmit		= tun_net_xmit,
 	.ndo_change_mtu		= tun_net_change_mtu,
 	.ndo_fix_features	= tun_net_fix_features,
+	.ndo_select_queue	= tun_select_queue,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= tun_poll_controller,
 #endif
@@ -505,11 +803,43 @@
 	.ndo_set_rx_mode	= tun_net_mclist,
 	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_select_queue	= tun_select_queue,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= tun_poll_controller,
 #endif
 };
 
+static int tun_flow_init(struct tun_struct *tun)
+{
+	int i;
+
+	tun->flow_cache = kmem_cache_create("tun_flow_cache",
+					    sizeof(struct tun_flow_entry), 0, 0,
+					    NULL);
+	if (!tun->flow_cache)
+		return -ENOMEM;
+
+	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
+		INIT_HLIST_HEAD(&tun->flows[i]);
+
+	tun->ageing_time = TUN_FLOW_EXPIRE;
+	setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
+	mod_timer(&tun->flow_gc_timer,
+		  round_jiffies_up(jiffies + tun->ageing_time));
+
+	return 0;
+}
+
+static void tun_flow_uninit(struct tun_struct *tun)
+{
+	del_timer_sync(&tun->flow_gc_timer);
+	tun_flow_flush(tun);
+
+	/* Wait for completion of call_rcu()'s */
+	rcu_barrier();
+	kmem_cache_destroy(tun->flow_cache);
+}
+
 /* Initialize net device. */
 static void tun_net_init(struct net_device *dev)
 {
@@ -546,7 +876,7 @@
 /* Character device part */
 
 /* Poll */
-static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
+static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
 {
 	struct tun_file *tfile = file->private_data;
 	struct tun_struct *tun = __tun_get(tfile);
@@ -556,11 +886,11 @@
 	if (!tun)
 		return POLLERR;
 
-	sk = tun->socket.sk;
+	sk = tfile->socket.sk;
 
 	tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
 
-	poll_wait(file, &tun->wq.wait, wait);
+	poll_wait(file, &tfile->wq.wait, wait);
 
 	if (!skb_queue_empty(&sk->sk_receive_queue))
 		mask |= POLLIN | POLLRDNORM;
@@ -579,16 +909,14 @@
 
 /* prepad is the amount to reserve at front.  len is length after that.
  * linear is a hint as to how much to copy (usually headers). */
-static struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
+static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
 				     size_t prepad, size_t len,
 				     size_t linear, int noblock)
 {
-	struct sock *sk = tun->socket.sk;
+	struct sock *sk = tfile->socket.sk;
 	struct sk_buff *skb;
 	int err;
 
-	sock_update_classid(sk);
-
 	/* Under a page?  Don't bother with paged skb. */
 	if (prepad + len < PAGE_SIZE || !linear)
 		linear = len;
@@ -685,9 +1013,9 @@
 }
 
 /* Get packet from user space buffer */
-static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
-			    const struct iovec *iv, size_t total_len,
-			    size_t count, int noblock)
+static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+			    void *msg_control, const struct iovec *iv,
+			    size_t total_len, size_t count, int noblock)
 {
 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
 	struct sk_buff *skb;
@@ -757,7 +1085,7 @@
 	} else
 		copylen = len;
 
-	skb = tun_alloc_skb(tun, align, copylen, gso.hdr_len, noblock);
+	skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
 	if (IS_ERR(skb)) {
 		if (PTR_ERR(skb) != -EAGAIN)
 			tun->dev->stats.rx_dropped++;
@@ -854,6 +1182,7 @@
 	tun->dev->stats.rx_packets++;
 	tun->dev->stats.rx_bytes += len;
 
+	tun_flow_update(tun, skb, tfile->queue_index);
 	return total_len;
 }
 
@@ -862,6 +1191,7 @@
 {
 	struct file *file = iocb->ki_filp;
 	struct tun_struct *tun = tun_get(file);
+	struct tun_file *tfile = file->private_data;
 	ssize_t result;
 
 	if (!tun)
@@ -869,8 +1199,8 @@
 
 	tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
 
-	result = tun_get_user(tun, NULL, iv, iov_length(iv, count), count,
-			      file->f_flags & O_NONBLOCK);
+	result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
+			      count, file->f_flags & O_NONBLOCK);
 
 	tun_put(tun);
 	return result;
@@ -878,6 +1208,7 @@
 
 /* Put packet to the user space buffer */
 static ssize_t tun_put_user(struct tun_struct *tun,
+			    struct tun_file *tfile,
 			    struct sk_buff *skb,
 			    const struct iovec *iv, int len)
 {
@@ -957,7 +1288,7 @@
 	return total;
 }
 
-static ssize_t tun_do_read(struct tun_struct *tun,
+static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
 			   struct kiocb *iocb, const struct iovec *iv,
 			   ssize_t len, int noblock)
 {
@@ -965,15 +1296,15 @@
 	struct sk_buff *skb;
 	ssize_t ret = 0;
 
-	tun_debug(KERN_INFO, tun, "tun_chr_read\n");
+	tun_debug(KERN_INFO, tun, "tun_do_read\n");
 
 	if (unlikely(!noblock))
-		add_wait_queue(&tun->wq.wait, &wait);
+		add_wait_queue(&tfile->wq.wait, &wait);
 	while (len) {
 		current->state = TASK_INTERRUPTIBLE;
 
 		/* Read frames from the queue */
-		if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
+		if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
 			if (noblock) {
 				ret = -EAGAIN;
 				break;
@@ -991,16 +1322,16 @@
 			schedule();
 			continue;
 		}
-		netif_wake_queue(tun->dev);
+		netif_wake_subqueue(tun->dev, tfile->queue_index);
 
-		ret = tun_put_user(tun, skb, iv, len);
+		ret = tun_put_user(tun, tfile, skb, iv, len);
 		kfree_skb(skb);
 		break;
 	}
 
 	current->state = TASK_RUNNING;
 	if (unlikely(!noblock))
-		remove_wait_queue(&tun->wq.wait, &wait);
+		remove_wait_queue(&tfile->wq.wait, &wait);
 
 	return ret;
 }
@@ -1021,13 +1352,22 @@
 		goto out;
 	}
 
-	ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
+	ret = tun_do_read(tun, tfile, iocb, iv, len,
+			  file->f_flags & O_NONBLOCK);
 	ret = min_t(ssize_t, ret, len);
 out:
 	tun_put(tun);
 	return ret;
 }
 
+static void tun_free_netdev(struct net_device *dev)
+{
+	struct tun_struct *tun = netdev_priv(dev);
+
+	tun_flow_uninit(tun);
+	free_netdev(dev);
+}
+
 static void tun_setup(struct net_device *dev)
 {
 	struct tun_struct *tun = netdev_priv(dev);
@@ -1056,7 +1396,7 @@
 
 static void tun_sock_write_space(struct sock *sk)
 {
-	struct tun_struct *tun;
+	struct tun_file *tfile;
 	wait_queue_head_t *wqueue;
 
 	if (!sock_writeable(sk))
@@ -1070,37 +1410,46 @@
 		wake_up_interruptible_sync_poll(wqueue, POLLOUT |
 						POLLWRNORM | POLLWRBAND);
 
-	tun = tun_sk(sk)->tun;
-	kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
-}
-
-static void tun_sock_destruct(struct sock *sk)
-{
-	free_netdev(tun_sk(sk)->tun->dev);
+	tfile = container_of(sk, struct tun_file, sk);
+	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
 }
 
 static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
 		       struct msghdr *m, size_t total_len)
 {
-	struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
-	return tun_get_user(tun, m->msg_control, m->msg_iov, total_len,
-			    m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
+	int ret;
+	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+	struct tun_struct *tun = __tun_get(tfile);
+
+	if (!tun)
+		return -EBADFD;
+	ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
+			   m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
+	tun_put(tun);
+	return ret;
 }
 
+
 static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
 		       struct msghdr *m, size_t total_len,
 		       int flags)
 {
-	struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
+	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+	struct tun_struct *tun = __tun_get(tfile);
 	int ret;
+
+	if (!tun)
+		return -EBADFD;
+
 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
 		return -EINVAL;
-	ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
+	ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
 			  flags & MSG_DONTWAIT);
 	if (ret > total_len) {
 		m->msg_flags |= MSG_TRUNC;
 		ret = flags & MSG_TRUNC ? ret : total_len;
 	}
+	tun_put(tun);
 	return ret;
 }
 
@@ -1121,7 +1470,7 @@
 static struct proto tun_proto = {
 	.name		= "tun",
 	.owner		= THIS_MODULE,
-	.obj_size	= sizeof(struct tun_sock),
+	.obj_size	= sizeof(struct tun_file),
 };
 
 static int tun_flags(struct tun_struct *tun)
@@ -1142,6 +1491,9 @@
 	if (tun->flags & TUN_VNET_HDR)
 		flags |= IFF_VNET_HDR;
 
+	if (tun->flags & TUN_TAP_MQ)
+		flags |= IFF_MULTI_QUEUE;
+
 	return flags;
 }
 
@@ -1178,15 +1530,13 @@
 
 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 {
-	struct sock *sk;
 	struct tun_struct *tun;
+	struct tun_file *tfile = file->private_data;
 	struct net_device *dev;
 	int err;
 
 	dev = __dev_get_by_name(net, ifr->ifr_name);
 	if (dev) {
-		const struct cred *cred = current_cred();
-
 		if (ifr->ifr_flags & IFF_TUN_EXCL)
 			return -EBUSY;
 		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
@@ -1196,11 +1546,9 @@
 		else
 			return -EINVAL;
 
-		if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
-		     (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
-		    !capable(CAP_NET_ADMIN))
+		if (tun_not_capable(tun))
 			return -EPERM;
-		err = security_tun_dev_attach(tun->socket.sk);
+		err = security_tun_dev_attach(tfile->socket.sk);
 		if (err < 0)
 			return err;
 
@@ -1212,7 +1560,7 @@
 		char *name;
 		unsigned long flags = 0;
 
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 		err = security_tun_dev_create();
 		if (err < 0)
@@ -1233,8 +1581,9 @@
 		if (*ifr->ifr_name)
 			name = ifr->ifr_name;
 
-		dev = alloc_netdev(sizeof(struct tun_struct), name,
-				   tun_setup);
+		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
+				       tun_setup,
+				       MAX_TAP_QUEUES, MAX_TAP_QUEUES);
 		if (!dev)
 			return -ENOMEM;
 
@@ -1246,46 +1595,35 @@
 		tun->flags = flags;
 		tun->txflt.count = 0;
 		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
-		set_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags);
 
-		err = -ENOMEM;
-		sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
-		if (!sk)
-			goto err_free_dev;
+		tun->filter_attached = false;
+		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
 
-		sk_change_net(sk, net);
-		tun->socket.wq = &tun->wq;
-		init_waitqueue_head(&tun->wq.wait);
-		tun->socket.ops = &tun_socket_ops;
-		sock_init_data(&tun->socket, sk);
-		sk->sk_write_space = tun_sock_write_space;
-		sk->sk_sndbuf = INT_MAX;
-		sock_set_flag(sk, SOCK_ZEROCOPY);
+		spin_lock_init(&tun->lock);
 
-		tun_sk(sk)->tun = tun;
-
-		security_tun_dev_post_create(sk);
+		security_tun_dev_post_create(&tfile->sk);
 
 		tun_net_init(dev);
 
+		if (tun_flow_init(tun))
+			goto err_free_dev;
+
 		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
 			TUN_USER_FEATURES;
 		dev->features = dev->hw_features;
 
 		err = register_netdevice(tun->dev);
 		if (err < 0)
-			goto err_free_sk;
+			goto err_free_dev;
 
 		if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
 		    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
 		    device_create_file(&tun->dev->dev, &dev_attr_group))
 			pr_err("Failed to create tun sysfs files\n");
 
-		sk->sk_destruct = tun_sock_destruct;
-
 		err = tun_attach(tun, file);
 		if (err < 0)
-			goto failed;
+			goto err_free_dev;
 	}
 
 	tun_debug(KERN_INFO, tun, "tun_set_iff\n");
@@ -1305,24 +1643,26 @@
 	else
 		tun->flags &= ~TUN_VNET_HDR;
 
+	if (ifr->ifr_flags & IFF_MULTI_QUEUE)
+		tun->flags |= TUN_TAP_MQ;
+	else
+		tun->flags &= ~TUN_TAP_MQ;
+
 	/* Make sure persistent devices do not get stuck in
 	 * xoff state.
 	 */
 	if (netif_running(tun->dev))
-		netif_wake_queue(tun->dev);
+		netif_tx_wake_all_queues(tun->dev);
 
 	strcpy(ifr->ifr_name, tun->dev->name);
 	return 0;
 
- err_free_sk:
-	tun_free_netdev(dev);
  err_free_dev:
 	free_netdev(dev);
- failed:
 	return err;
 }
 
-static int tun_get_iff(struct net *net, struct tun_struct *tun,
+static void tun_get_iff(struct net *net, struct tun_struct *tun,
 		       struct ifreq *ifr)
 {
 	tun_debug(KERN_INFO, tun, "tun_get_iff\n");
@@ -1331,7 +1671,6 @@
 
 	ifr->ifr_flags = tun_flags(tun);
 
-	return 0;
 }
 
 /* This is like a cut-down ethtool ops, except done via tun fd so no
@@ -1373,13 +1712,91 @@
 	return 0;
 }
 
+static void tun_detach_filter(struct tun_struct *tun, int n)
+{
+	int i;
+	struct tun_file *tfile;
+
+	for (i = 0; i < n; i++) {
+		tfile = rcu_dereference_protected(tun->tfiles[i],
+						  lockdep_rtnl_is_held());
+		sk_detach_filter(tfile->socket.sk);
+	}
+
+	tun->filter_attached = false;
+}
+
+static int tun_attach_filter(struct tun_struct *tun)
+{
+	int i, ret = 0;
+	struct tun_file *tfile;
+
+	for (i = 0; i < tun->numqueues; i++) {
+		tfile = rcu_dereference_protected(tun->tfiles[i],
+						  lockdep_rtnl_is_held());
+		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+		if (ret) {
+			tun_detach_filter(tun, i);
+			return ret;
+		}
+	}
+
+	tun->filter_attached = true;
+	return ret;
+}
+
+static void tun_set_sndbuf(struct tun_struct *tun)
+{
+	struct tun_file *tfile;
+	int i;
+
+	for (i = 0; i < tun->numqueues; i++) {
+		tfile = rcu_dereference_protected(tun->tfiles[i],
+						lockdep_rtnl_is_held());
+		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
+	}
+}
+
+static int tun_set_queue(struct file *file, struct ifreq *ifr)
+{
+	struct tun_file *tfile = file->private_data;
+	struct tun_struct *tun;
+	struct net_device *dev;
+	int ret = 0;
+
+	rtnl_lock();
+
+	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
+		dev = __dev_get_by_name(tfile->net, ifr->ifr_name);
+		if (!dev) {
+			ret = -EINVAL;
+			goto unlock;
+		}
+
+		tun = netdev_priv(dev);
+		if (dev->netdev_ops != &tap_netdev_ops &&
+			dev->netdev_ops != &tun_netdev_ops)
+			ret = -EINVAL;
+		else if (tun_not_capable(tun))
+			ret = -EPERM;
+		else
+			ret = tun_attach(tun, file);
+	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE)
+		__tun_detach(tfile, false);
+	else
+		ret = -EINVAL;
+
+unlock:
+	rtnl_unlock();
+	return ret;
+}
+
 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 			    unsigned long arg, int ifreq_len)
 {
 	struct tun_file *tfile = file->private_data;
 	struct tun_struct *tun;
 	void __user* argp = (void __user*)arg;
-	struct sock_fprog fprog;
 	struct ifreq ifr;
 	kuid_t owner;
 	kgid_t group;
@@ -1387,7 +1804,7 @@
 	int vnet_hdr_sz;
 	int ret;
 
-	if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
+	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
 		if (copy_from_user(&ifr, argp, ifreq_len))
 			return -EFAULT;
 	} else {
@@ -1398,10 +1815,12 @@
 		 * This is needed because we never checked for invalid flags on
 		 * TUNSETIFF. */
 		return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
-				IFF_VNET_HDR,
+				IFF_VNET_HDR | IFF_MULTI_QUEUE,
 				(unsigned int __user*)argp);
-	}
+	} else if (cmd == TUNSETQUEUE)
+		return tun_set_queue(file, &ifr);
 
+	ret = 0;
 	rtnl_lock();
 
 	tun = __tun_get(tfile);
@@ -1422,14 +1841,12 @@
 	if (!tun)
 		goto unlock;
 
-	tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
+	tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
 
 	ret = 0;
 	switch (cmd) {
 	case TUNGETIFF:
-		ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
-		if (ret)
-			break;
+		tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
 
 		if (copy_to_user(argp, &ifr, ifreq_len))
 			ret = -EFAULT;
@@ -1444,11 +1861,16 @@
 		break;
 
 	case TUNSETPERSIST:
-		/* Disable/Enable persist mode */
-		if (arg)
+		/* Disable/Enable persist mode. Keep an extra reference to the
+		 * module to prevent the module being unprobed.
+		 */
+		if (arg) {
 			tun->flags |= TUN_PERSIST;
-		else
+			__module_get(THIS_MODULE);
+		} else {
 			tun->flags &= ~TUN_PERSIST;
+			module_put(THIS_MODULE);
+		}
 
 		tun_debug(KERN_INFO, tun, "persist %s\n",
 			  arg ? "enabled" : "disabled");
@@ -1462,7 +1884,7 @@
 			break;
 		}
 		tun->owner = owner;
-		tun_debug(KERN_INFO, tun, "owner set to %d\n",
+		tun_debug(KERN_INFO, tun, "owner set to %u\n",
 			  from_kuid(&init_user_ns, tun->owner));
 		break;
 
@@ -1474,7 +1896,7 @@
 			break;
 		}
 		tun->group = group;
-		tun_debug(KERN_INFO, tun, "group set to %d\n",
+		tun_debug(KERN_INFO, tun, "group set to %u\n",
 			  from_kgid(&init_user_ns, tun->group));
 		break;
 
@@ -1526,7 +1948,7 @@
 		break;
 
 	case TUNGETSNDBUF:
-		sndbuf = tun->socket.sk->sk_sndbuf;
+		sndbuf = tfile->socket.sk->sk_sndbuf;
 		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
 			ret = -EFAULT;
 		break;
@@ -1537,7 +1959,8 @@
 			break;
 		}
 
-		tun->socket.sk->sk_sndbuf = sndbuf;
+		tun->sndbuf = sndbuf;
+		tun_set_sndbuf(tun);
 		break;
 
 	case TUNGETVNETHDRSZ:
@@ -1565,10 +1988,10 @@
 		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
 			break;
 		ret = -EFAULT;
-		if (copy_from_user(&fprog, argp, sizeof(fprog)))
+		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
 			break;
 
-		ret = sk_attach_filter(&fprog, tun->socket.sk);
+		ret = tun_attach_filter(tun);
 		break;
 
 	case TUNDETACHFILTER:
@@ -1576,7 +1999,8 @@
 		ret = -EINVAL;
 		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
 			break;
-		ret = sk_detach_filter(tun->socket.sk);
+		ret = 0;
+		tun_detach_filter(tun, tun->numqueues);
 		break;
 
 	default:
@@ -1628,27 +2052,21 @@
 
 static int tun_chr_fasync(int fd, struct file *file, int on)
 {
-	struct tun_struct *tun = tun_get(file);
+	struct tun_file *tfile = file->private_data;
 	int ret;
 
-	if (!tun)
-		return -EBADFD;
-
-	tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
-
-	if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
+	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
 		goto out;
 
 	if (on) {
 		ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
 		if (ret)
 			goto out;
-		tun->flags |= TUN_FASYNC;
+		tfile->flags |= TUN_FASYNC;
 	} else
-		tun->flags &= ~TUN_FASYNC;
+		tfile->flags &= ~TUN_FASYNC;
 	ret = 0;
 out:
-	tun_put(tun);
 	return ret;
 }
 
@@ -1658,44 +2076,39 @@
 
 	DBG1(KERN_INFO, "tunX: tun_chr_open\n");
 
-	tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+	tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
+					    &tun_proto);
 	if (!tfile)
 		return -ENOMEM;
-	atomic_set(&tfile->count, 0);
-	tfile->tun = NULL;
+	rcu_assign_pointer(tfile->tun, NULL);
 	tfile->net = get_net(current->nsproxy->net_ns);
+	tfile->flags = 0;
+
+	rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
+	init_waitqueue_head(&tfile->wq.wait);
+
+	tfile->socket.file = file;
+	tfile->socket.ops = &tun_socket_ops;
+
+	sock_init_data(&tfile->socket, &tfile->sk);
+	sk_change_net(&tfile->sk, tfile->net);
+
+	tfile->sk.sk_write_space = tun_sock_write_space;
+	tfile->sk.sk_sndbuf = INT_MAX;
+
 	file->private_data = tfile;
+	set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
+
 	return 0;
 }
 
 static int tun_chr_close(struct inode *inode, struct file *file)
 {
 	struct tun_file *tfile = file->private_data;
-	struct tun_struct *tun;
+	struct net *net = tfile->net;
 
-	tun = __tun_get(tfile);
-	if (tun) {
-		struct net_device *dev = tun->dev;
-
-		tun_debug(KERN_INFO, tun, "tun_chr_close\n");
-
-		__tun_detach(tun);
-
-		/* If desirable, unregister the netdevice. */
-		if (!(tun->flags & TUN_PERSIST)) {
-			rtnl_lock();
-			if (dev->reg_state == NETREG_REGISTERED)
-				unregister_netdevice(dev);
-			rtnl_unlock();
-		}
-	}
-
-	tun = tfile->tun;
-	if (tun)
-		sock_put(tun->socket.sk);
-
-	put_net(tfile->net);
-	kfree(tfile);
+	tun_detach(tfile, true);
+	put_net(net);
 
 	return 0;
 }
@@ -1822,14 +2235,13 @@
  * holding a reference to the file for as long as the socket is in use. */
 struct socket *tun_get_socket(struct file *file)
 {
-	struct tun_struct *tun;
+	struct tun_file *tfile;
 	if (file->f_op != &tun_fops)
 		return ERR_PTR(-EINVAL);
-	tun = tun_get(file);
-	if (!tun)
+	tfile = file->private_data;
+	if (!tfile)
 		return ERR_PTR(-EBADFD);
-	tun_put(tun);
-	return &tun->socket;
+	return &tfile->socket;
 }
 EXPORT_SYMBOL_GPL(tun_get_socket);
 
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index c1ae769..ef97621 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -219,6 +219,24 @@
 	    * ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
 	    * Ericsson F5521gw Mobile Broadband Module
 
+config USB_NET_CDC_MBIM
+	tristate "CDC MBIM support"
+	depends on USB_USBNET
+	select USB_WDM
+	select USB_NET_CDC_NCM
+	help
+	  This driver provides support for CDC MBIM (Mobile Broadband
+	  Interface Model) devices. The CDC MBIM specification is
+	  available from <http://www.usb.org/>.
+
+	  MBIM devices require configuration using the management
+	  protocol defined by the MBIM specification.  This driver
+	  provides unfiltered access to the MBIM control channel
+	  through the associated /dev/cdc-wdmx character device.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cdc_mbim.
+
 config USB_NET_DM9601
 	tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
 	depends on USB_USBNET
@@ -230,6 +248,8 @@
 config USB_NET_SMSC75XX
 	tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
 	depends on USB_USBNET
+	select BITREVERSE
+	select CRC16
 	select CRC32
 	help
 	  This option adds support for SMSC LAN95XX based USB 2.0
@@ -238,6 +258,8 @@
 config USB_NET_SMSC95XX
 	tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices"
 	depends on USB_USBNET
+	select BITREVERSE
+	select CRC16
 	select CRC32
 	help
 	  This option adds support for SMSC LAN95XX based USB 2.0
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index bf06300..4786913 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -31,4 +31,5 @@
 obj-$(CONFIG_USB_NET_CDC_NCM)	+= cdc_ncm.o
 obj-$(CONFIG_USB_VL600)		+= lg-vl600.o
 obj-$(CONFIG_USB_NET_QMI_WWAN)	+= qmi_wwan.o
+obj-$(CONFIG_USB_NET_CDC_MBIM)	+= cdc_mbim.o
 
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 774d9ce..50d1673 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -25,121 +25,30 @@
 int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
 		  u16 size, void *data)
 {
-	void *buf;
-	int err = -ENOMEM;
+	int ret;
+	ret = usbnet_read_cmd(dev, cmd,
+			       USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			       value, index, data, size);
 
-	netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
-		   cmd, value, index, size);
-
-	buf = kmalloc(size, GFP_KERNEL);
-	if (!buf)
-		goto out;
-
-	err = usb_control_msg(
-		dev->udev,
-		usb_rcvctrlpipe(dev->udev, 0),
-		cmd,
-		USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-		value,
-		index,
-		buf,
-		size,
-		USB_CTRL_GET_TIMEOUT);
-	if (err == size)
-		memcpy(data, buf, size);
-	else if (err >= 0)
-		err = -EINVAL;
-	kfree(buf);
-
-out:
-	return err;
+	if (ret != size && ret >= 0)
+		return -EINVAL;
+	return ret;
 }
 
 int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
 		   u16 size, void *data)
 {
-	void *buf = NULL;
-	int err = -ENOMEM;
-
-	netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
-		   cmd, value, index, size);
-
-	if (data) {
-		buf = kmemdup(data, size, GFP_KERNEL);
-		if (!buf)
-			goto out;
-	}
-
-	err = usb_control_msg(
-		dev->udev,
-		usb_sndctrlpipe(dev->udev, 0),
-		cmd,
-		USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-		value,
-		index,
-		buf,
-		size,
-		USB_CTRL_SET_TIMEOUT);
-	kfree(buf);
-
-out:
-	return err;
-}
-
-static void asix_async_cmd_callback(struct urb *urb)
-{
-	struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
-	int status = urb->status;
-
-	if (status < 0)
-		printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
-			status);
-
-	kfree(req);
-	usb_free_urb(urb);
+	return usbnet_write_cmd(dev, cmd,
+				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				value, index, data, size);
 }
 
 void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
 			  u16 size, void *data)
 {
-	struct usb_ctrlrequest *req;
-	int status;
-	struct urb *urb;
-
-	netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
-		   cmd, value, index, size);
-
-	urb = usb_alloc_urb(0, GFP_ATOMIC);
-	if (!urb) {
-		netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
-		return;
-	}
-
-	req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
-	if (!req) {
-		netdev_err(dev->net, "Failed to allocate memory for control request\n");
-		usb_free_urb(urb);
-		return;
-	}
-
-	req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-	req->bRequest = cmd;
-	req->wValue = cpu_to_le16(value);
-	req->wIndex = cpu_to_le16(index);
-	req->wLength = cpu_to_le16(size);
-
-	usb_fill_control_urb(urb, dev->udev,
-			     usb_sndctrlpipe(dev->udev, 0),
-			     (void *)req, data, size,
-			     asix_async_cmd_callback, req);
-
-	status = usb_submit_urb(urb, GFP_ATOMIC);
-	if (status < 0) {
-		netdev_err(dev->net, "Error submitting the control message: status=%d\n",
-			   status);
-		kfree(req);
-		usb_free_urb(urb);
-	}
+	usbnet_write_cmd_async(dev, cmd,
+			       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			       value, index, data, size);
 }
 
 int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 33ab824..7a6e758 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -64,6 +64,16 @@
 	}
 }
 
+static void asix_set_netdev_dev_addr(struct usbnet *dev, u8 *addr)
+{
+	if (is_valid_ether_addr(addr)) {
+		memcpy(dev->net->dev_addr, addr, ETH_ALEN);
+	} else {
+		netdev_info(dev->net, "invalid hw address, using random\n");
+		eth_hw_addr_random(dev->net);
+	}
+}
+
 /* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
 static u32 asix_get_phyid(struct usbnet *dev)
 {
@@ -225,7 +235,8 @@
 			   ret);
 		goto out;
 	}
-	memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+
+	asix_set_netdev_dev_addr(dev, buf);
 
 	/* Initialize MII structure */
 	dev->mii.dev = dev->net;
@@ -423,7 +434,8 @@
 		netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
 		return ret;
 	}
-	memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+
+	asix_set_netdev_dev_addr(dev, buf);
 
 	/* Initialize MII structure */
 	dev->mii.dev = dev->net;
@@ -777,7 +789,8 @@
 		netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
 		return ret;
 	}
-	memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+
+	asix_set_netdev_dev_addr(dev, buf);
 
 	/* Initialize MII structure */
 	dev->mii.dev = dev->net;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
new file mode 100644
index 0000000..42f51c7
--- /dev/null
+++ b/drivers/net/usb/cdc_mbim.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2012  Smith Micro Software, Inc.
+ * Copyright (c) 2012  Bjørn Mork <bjorn@mork.no>
+ *
+ * This driver is based on and reuse most of cdc_ncm, which is
+ * Copyright (C) ST-Ericsson 2010-2012
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc-wdm.h>
+#include <linux/usb/cdc_ncm.h>
+
+/* driver specific data - must match cdc_ncm usage */
+struct cdc_mbim_state {
+	struct cdc_ncm_ctx *ctx;
+	atomic_t pmcount;
+	struct usb_driver *subdriver;
+	struct usb_interface *control;
+	struct usb_interface *data;
+};
+
+/* using a counter to merge subdriver requests with our own into a combined state */
+static int cdc_mbim_manage_power(struct usbnet *dev, int on)
+{
+	struct cdc_mbim_state *info = (void *)&dev->data;
+	int rv = 0;
+
+	dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+
+	if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
+		/* need autopm_get/put here to ensure the usbcore sees the new value */
+		rv = usb_autopm_get_interface(dev->intf);
+		if (rv < 0)
+			goto err;
+		dev->intf->needs_remote_wakeup = on;
+		usb_autopm_put_interface(dev->intf);
+	}
+err:
+	return rv;
+}
+
+static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
+{
+	struct usbnet *dev = usb_get_intfdata(intf);
+
+	/* can be called while disconnecting */
+	if (!dev)
+		return 0;
+
+	return cdc_mbim_manage_power(dev, status);
+}
+
+
+static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+	struct cdc_ncm_ctx *ctx;
+	struct usb_driver *subdriver = ERR_PTR(-ENODEV);
+	int ret = -ENODEV;
+	u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM;
+	struct cdc_mbim_state *info = (void *)&dev->data;
+
+	/* see if interface supports MBIM alternate setting */
+	if (intf->num_altsetting == 2) {
+		if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+			usb_set_interface(dev->udev,
+					  intf->cur_altsetting->desc.bInterfaceNumber,
+					  CDC_NCM_COMM_ALTSETTING_MBIM);
+		data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
+	}
+
+	/* Probably NCM, defer for cdc_ncm_bind */
+	if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+		goto err;
+
+	ret = cdc_ncm_bind_common(dev, intf, data_altsetting);
+	if (ret)
+		goto err;
+
+	ctx = info->ctx;
+
+	/* The MBIM descriptor and the status endpoint are required */
+	if (ctx->mbim_desc && dev->status)
+		subdriver = usb_cdc_wdm_register(ctx->control,
+						 &dev->status->desc,
+						 le16_to_cpu(ctx->mbim_desc->wMaxControlMessage),
+						 cdc_mbim_wdm_manage_power);
+	if (IS_ERR(subdriver)) {
+		ret = PTR_ERR(subdriver);
+		cdc_ncm_unbind(dev, intf);
+		goto err;
+	}
+
+	/* can't let usbnet use the interrupt endpoint */
+	dev->status = NULL;
+	info->subdriver = subdriver;
+
+	/* MBIM cannot do ARP */
+	dev->net->flags |= IFF_NOARP;
+
+	/* no need to put the VLAN tci in the packet headers */
+	dev->net->features |= NETIF_F_HW_VLAN_TX;
+err:
+	return ret;
+}
+
+static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+	struct cdc_mbim_state *info = (void *)&dev->data;
+	struct cdc_ncm_ctx *ctx = info->ctx;
+
+	/* disconnect subdriver from control interface */
+	if (info->subdriver && info->subdriver->disconnect)
+		info->subdriver->disconnect(ctx->control);
+	info->subdriver = NULL;
+
+	/* let NCM unbind clean up both control and data interface */
+	cdc_ncm_unbind(dev, intf);
+}
+
+
+static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+	struct sk_buff *skb_out;
+	struct cdc_mbim_state *info = (void *)&dev->data;
+	struct cdc_ncm_ctx *ctx = info->ctx;
+	__le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
+	u16 tci = 0;
+	u8 *c;
+
+	if (!ctx)
+		goto error;
+
+	if (skb) {
+		if (skb->len <= sizeof(ETH_HLEN))
+			goto error;
+
+		/* mapping VLANs to MBIM sessions:
+		 *   no tag     => IPS session <0>
+		 *   1 - 255    => IPS session <vlanid>
+		 *   256 - 511  => DSS session <vlanid - 256>
+		 *   512 - 4095 => unsupported, drop
+		 */
+		vlan_get_tag(skb, &tci);
+
+		switch (tci & 0x0f00) {
+		case 0x0000: /* VLAN ID 0 - 255 */
+			/* verify that datagram is IPv4 or IPv6 */
+			skb_reset_mac_header(skb);
+			switch (eth_hdr(skb)->h_proto) {
+			case htons(ETH_P_IP):
+			case htons(ETH_P_IPV6):
+				break;
+			default:
+				goto error;
+			}
+			c = (u8 *)&sign;
+			c[3] = tci;
+			break;
+		case 0x0100: /* VLAN ID 256 - 511 */
+			sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
+			c = (u8 *)&sign;
+			c[3] = tci;
+			break;
+		default:
+			netif_err(dev, tx_err, dev->net,
+				  "unsupported tci=0x%04x\n", tci);
+			goto error;
+		}
+		skb_pull(skb, ETH_HLEN);
+	}
+
+	spin_lock_bh(&ctx->mtx);
+	skb_out = cdc_ncm_fill_tx_frame(ctx, skb, sign);
+	spin_unlock_bh(&ctx->mtx);
+	return skb_out;
+
+error:
+	if (skb)
+		dev_kfree_skb_any(skb);
+
+	return NULL;
+}
+
+static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_t len, u16 tci)
+{
+	__be16 proto = htons(ETH_P_802_3);
+	struct sk_buff *skb = NULL;
+
+	if (tci < 256) { /* IPS session? */
+		if (len < sizeof(struct iphdr))
+			goto err;
+
+		switch (*buf & 0xf0) {
+		case 0x40:
+			proto = htons(ETH_P_IP);
+			break;
+		case 0x60:
+			proto = htons(ETH_P_IPV6);
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	skb = netdev_alloc_skb_ip_align(dev->net,  len + ETH_HLEN);
+	if (!skb)
+		goto err;
+
+	/* add an ethernet header */
+	skb_put(skb, ETH_HLEN);
+	skb_reset_mac_header(skb);
+	eth_hdr(skb)->h_proto = proto;
+	memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
+	memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
+
+	/* add datagram */
+	memcpy(skb_put(skb, len), buf, len);
+
+	/* map MBIM session to VLAN */
+	if (tci)
+		vlan_put_tag(skb, tci);
+err:
+	return skb;
+}
+
+static int cdc_mbim_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+{
+	struct sk_buff *skb;
+	struct cdc_mbim_state *info = (void *)&dev->data;
+	struct cdc_ncm_ctx *ctx = info->ctx;
+	int len;
+	int nframes;
+	int x;
+	int offset;
+	struct usb_cdc_ncm_ndp16 *ndp16;
+	struct usb_cdc_ncm_dpe16 *dpe16;
+	int ndpoffset;
+	int loopcount = 50; /* arbitrary max preventing infinite loop */
+	u8 *c;
+	u16 tci;
+
+	ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+	if (ndpoffset < 0)
+		goto error;
+
+next_ndp:
+	nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
+	if (nframes < 0)
+		goto error;
+
+	ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
+
+	switch (ndp16->dwSignature & cpu_to_le32(0x00ffffff)) {
+	case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN):
+		c = (u8 *)&ndp16->dwSignature;
+		tci = c[3];
+		break;
+	case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN):
+		c = (u8 *)&ndp16->dwSignature;
+		tci = c[3] + 256;
+		break;
+	default:
+		netif_dbg(dev, rx_err, dev->net,
+			  "unsupported NDP signature <0x%08x>\n",
+			  le32_to_cpu(ndp16->dwSignature));
+		goto err_ndp;
+
+	}
+
+	dpe16 = ndp16->dpe16;
+	for (x = 0; x < nframes; x++, dpe16++) {
+		offset = le16_to_cpu(dpe16->wDatagramIndex);
+		len = le16_to_cpu(dpe16->wDatagramLength);
+
+		/*
+		 * CDC NCM ch. 3.7
+		 * All entries after first NULL entry are to be ignored
+		 */
+		if ((offset == 0) || (len == 0)) {
+			if (!x)
+				goto err_ndp; /* empty NTB */
+			break;
+		}
+
+		/* sanity checking */
+		if (((offset + len) > skb_in->len) || (len > ctx->rx_max)) {
+			netif_dbg(dev, rx_err, dev->net,
+				  "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
+				  x, offset, len, skb_in);
+			if (!x)
+				goto err_ndp;
+			break;
+		} else {
+			skb = cdc_mbim_process_dgram(dev, skb_in->data + offset, len, tci);
+			if (!skb)
+				goto error;
+			usbnet_skb_return(dev, skb);
+		}
+	}
+err_ndp:
+	/* are there more NDPs to process? */
+	ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+	if (ndpoffset && loopcount--)
+		goto next_ndp;
+
+	return 1;
+error:
+	return 0;
+}
+
+static int cdc_mbim_suspend(struct usb_interface *intf, pm_message_t message)
+{
+	int ret = 0;
+	struct usbnet *dev = usb_get_intfdata(intf);
+	struct cdc_mbim_state *info = (void *)&dev->data;
+	struct cdc_ncm_ctx *ctx = info->ctx;
+
+	if (ctx == NULL) {
+		ret = -1;
+		goto error;
+	}
+
+	ret = usbnet_suspend(intf, message);
+	if (ret < 0)
+		goto error;
+
+	if (intf == ctx->control && info->subdriver && info->subdriver->suspend)
+		ret = info->subdriver->suspend(intf, message);
+	if (ret < 0)
+		usbnet_resume(intf);
+
+error:
+	return ret;
+}
+
+static int cdc_mbim_resume(struct usb_interface *intf)
+{
+	int  ret = 0;
+	struct usbnet *dev = usb_get_intfdata(intf);
+	struct cdc_mbim_state *info = (void *)&dev->data;
+	struct cdc_ncm_ctx *ctx = info->ctx;
+	bool callsub = (intf == ctx->control && info->subdriver && info->subdriver->resume);
+
+	if (callsub)
+		ret = info->subdriver->resume(intf);
+	if (ret < 0)
+		goto err;
+	ret = usbnet_resume(intf);
+	if (ret < 0 && callsub && info->subdriver->suspend)
+		info->subdriver->suspend(intf, PMSG_SUSPEND);
+err:
+	return ret;
+}
+
+static const struct driver_info cdc_mbim_info = {
+	.description = "CDC MBIM",
+	.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
+	.bind = cdc_mbim_bind,
+	.unbind = cdc_mbim_unbind,
+	.manage_power = cdc_mbim_manage_power,
+	.rx_fixup = cdc_mbim_rx_fixup,
+	.tx_fixup = cdc_mbim_tx_fixup,
+};
+
+static const struct usb_device_id mbim_devs[] = {
+	/* This duplicate NCM entry is intentional. MBIM devices can
+	 * be disguised as NCM by default, and this is necessary to
+	 * allow us to bind the correct driver_info to such devices.
+	 *
+	 * bind() will sort out this for us, selecting the correct
+	 * entry and reject the other
+	 */
+	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+	  .driver_info = (unsigned long)&cdc_mbim_info,
+	},
+	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+	  .driver_info = (unsigned long)&cdc_mbim_info,
+	},
+	{
+	},
+};
+MODULE_DEVICE_TABLE(usb, mbim_devs);
+
+static struct usb_driver cdc_mbim_driver = {
+	.name = "cdc_mbim",
+	.id_table = mbim_devs,
+	.probe = usbnet_probe,
+	.disconnect = usbnet_disconnect,
+	.suspend = cdc_mbim_suspend,
+	.resume = cdc_mbim_resume,
+	.reset_resume =	cdc_mbim_resume,
+	.supports_autosuspend = 1,
+	.disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(cdc_mbim_driver);
+
+MODULE_AUTHOR("Greg Suarez <gsuarez@smithmicro.com>");
+MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>");
+MODULE_DESCRIPTION("USB CDC MBIM host driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 74fab1a..d38bc20 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -51,90 +51,10 @@
 #include <linux/atomic.h>
 #include <linux/usb/usbnet.h>
 #include <linux/usb/cdc.h>
+#include <linux/usb/cdc_ncm.h>
 
 #define	DRIVER_VERSION				"14-Mar-2012"
 
-/* CDC NCM subclass 3.2.1 */
-#define USB_CDC_NCM_NDP16_LENGTH_MIN		0x10
-
-/* Maximum NTB length */
-#define	CDC_NCM_NTB_MAX_SIZE_TX			32768	/* bytes */
-#define	CDC_NCM_NTB_MAX_SIZE_RX			32768	/* bytes */
-
-/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
-#define	CDC_NCM_MIN_DATAGRAM_SIZE		1514	/* bytes */
-
-#define	CDC_NCM_MIN_TX_PKT			512	/* bytes */
-
-/* Default value for MaxDatagramSize */
-#define	CDC_NCM_MAX_DATAGRAM_SIZE		8192	/* bytes */
-
-/*
- * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
- * the last NULL entry.
- */
-#define	CDC_NCM_DPT_DATAGRAMS_MAX		40
-
-/* Restart the timer, if amount of datagrams is less than given value */
-#define	CDC_NCM_RESTART_TIMER_DATAGRAM_CNT	3
-#define	CDC_NCM_TIMER_PENDING_CNT		2
-#define CDC_NCM_TIMER_INTERVAL			(400UL * NSEC_PER_USEC)
-
-/* The following macro defines the minimum header space */
-#define	CDC_NCM_MIN_HDR_SIZE \
-	(sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
-	(CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
-
-struct cdc_ncm_data {
-	struct usb_cdc_ncm_nth16 nth16;
-	struct usb_cdc_ncm_ndp16 ndp16;
-	struct usb_cdc_ncm_dpe16 dpe16[CDC_NCM_DPT_DATAGRAMS_MAX + 1];
-};
-
-struct cdc_ncm_ctx {
-	struct cdc_ncm_data tx_ncm;
-	struct usb_cdc_ncm_ntb_parameters ncm_parm;
-	struct hrtimer tx_timer;
-	struct tasklet_struct bh;
-
-	const struct usb_cdc_ncm_desc *func_desc;
-	const struct usb_cdc_header_desc *header_desc;
-	const struct usb_cdc_union_desc *union_desc;
-	const struct usb_cdc_ether_desc *ether_desc;
-
-	struct net_device *netdev;
-	struct usb_device *udev;
-	struct usb_host_endpoint *in_ep;
-	struct usb_host_endpoint *out_ep;
-	struct usb_host_endpoint *status_ep;
-	struct usb_interface *intf;
-	struct usb_interface *control;
-	struct usb_interface *data;
-
-	struct sk_buff *tx_curr_skb;
-	struct sk_buff *tx_rem_skb;
-
-	spinlock_t mtx;
-	atomic_t stop;
-
-	u32 tx_timer_pending;
-	u32 tx_curr_offset;
-	u32 tx_curr_last_offset;
-	u32 tx_curr_frame_num;
-	u32 rx_speed;
-	u32 tx_speed;
-	u32 rx_max;
-	u32 tx_max;
-	u32 max_datagram_size;
-	u16 tx_max_datagrams;
-	u16 tx_remainder;
-	u16 tx_modulus;
-	u16 tx_ndp_modulus;
-	u16 tx_seq;
-	u16 rx_seq;
-	u16 connected;
-};
-
 static void cdc_ncm_txpath_bh(unsigned long param);
 static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
 static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
@@ -158,17 +78,19 @@
 	u8 flags;
 	u8 iface_no;
 	int err;
+	int eth_hlen;
 	u16 ntb_fmt_supported;
+	u32 min_dgram_size;
+	u32 min_hdr_size;
+	struct usbnet *dev = netdev_priv(ctx->netdev);
 
 	iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
 
-	err = usb_control_msg(ctx->udev,
-				usb_rcvctrlpipe(ctx->udev, 0),
-				USB_CDC_GET_NTB_PARAMETERS,
-				USB_TYPE_CLASS | USB_DIR_IN
-				 | USB_RECIP_INTERFACE,
-				0, iface_no, &ctx->ncm_parm,
-				sizeof(ctx->ncm_parm), 10000);
+	err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
+			      USB_TYPE_CLASS | USB_DIR_IN
+			      |USB_RECIP_INTERFACE,
+			      0, iface_no, &ctx->ncm_parm,
+			      sizeof(ctx->ncm_parm));
 	if (err < 0) {
 		pr_debug("failed GET_NTB_PARAMETERS\n");
 		return 1;
@@ -184,10 +106,19 @@
 	ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
 	ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
 
-	if (ctx->func_desc != NULL)
+	eth_hlen = ETH_HLEN;
+	min_dgram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+	min_hdr_size = CDC_NCM_MIN_HDR_SIZE;
+	if (ctx->mbim_desc != NULL) {
+		flags = ctx->mbim_desc->bmNetworkCapabilities;
+		eth_hlen = 0;
+		min_dgram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
+		min_hdr_size = 0;
+	} else if (ctx->func_desc != NULL) {
 		flags = ctx->func_desc->bmNetworkCapabilities;
-	else
+	} else {
 		flags = 0;
+	}
 
 	pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
 		 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
@@ -215,49 +146,19 @@
 
 	/* inform device about NTB input size changes */
 	if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+		__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
 
-		if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
-			struct usb_cdc_ncm_ndp_input_size *ndp_in_sz;
-
-			ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL);
-			if (!ndp_in_sz) {
-				err = -ENOMEM;
-				goto size_err;
-			}
-
-			err = usb_control_msg(ctx->udev,
-					usb_sndctrlpipe(ctx->udev, 0),
-					USB_CDC_SET_NTB_INPUT_SIZE,
-					USB_TYPE_CLASS | USB_DIR_OUT
-					 | USB_RECIP_INTERFACE,
-					0, iface_no, ndp_in_sz, 8, 1000);
-			kfree(ndp_in_sz);
-		} else {
-			__le32 *dwNtbInMaxSize;
-			dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize),
-					GFP_KERNEL);
-			if (!dwNtbInMaxSize) {
-				err = -ENOMEM;
-				goto size_err;
-			}
-			*dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
-
-			err = usb_control_msg(ctx->udev,
-					usb_sndctrlpipe(ctx->udev, 0),
-					USB_CDC_SET_NTB_INPUT_SIZE,
-					USB_TYPE_CLASS | USB_DIR_OUT
-					 | USB_RECIP_INTERFACE,
-					0, iface_no, dwNtbInMaxSize, 4, 1000);
-			kfree(dwNtbInMaxSize);
-		}
-size_err:
+		err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
+				       USB_TYPE_CLASS | USB_DIR_OUT
+				       | USB_RECIP_INTERFACE,
+				       0, iface_no, &dwNtbInMaxSize, 4);
 		if (err < 0)
 			pr_debug("Setting NTB Input Size failed\n");
 	}
 
 	/* verify maximum size of transmitted NTB in bytes */
 	if ((ctx->tx_max <
-	    (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
+	    (min_hdr_size + min_dgram_size)) ||
 	    (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
 		pr_debug("Using default maximum transmit length=%d\n",
 						CDC_NCM_NTB_MAX_SIZE_TX);
@@ -299,93 +200,85 @@
 	}
 
 	/* adjust TX-remainder according to NCM specification. */
-	ctx->tx_remainder = ((ctx->tx_remainder - ETH_HLEN) &
-						(ctx->tx_modulus - 1));
+	ctx->tx_remainder = ((ctx->tx_remainder - eth_hlen) &
+			     (ctx->tx_modulus - 1));
 
 	/* additional configuration */
 
 	/* set CRC Mode */
 	if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
-		err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
-				USB_CDC_SET_CRC_MODE,
-				USB_TYPE_CLASS | USB_DIR_OUT
-				 | USB_RECIP_INTERFACE,
-				USB_CDC_NCM_CRC_NOT_APPENDED,
-				iface_no, NULL, 0, 1000);
+		err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
+				       USB_TYPE_CLASS | USB_DIR_OUT
+				       | USB_RECIP_INTERFACE,
+				       USB_CDC_NCM_CRC_NOT_APPENDED,
+				       iface_no, NULL, 0);
 		if (err < 0)
 			pr_debug("Setting CRC mode off failed\n");
 	}
 
 	/* set NTB format, if both formats are supported */
 	if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
-		err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
-				USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS
-				 | USB_DIR_OUT | USB_RECIP_INTERFACE,
-				USB_CDC_NCM_NTB16_FORMAT,
-				iface_no, NULL, 0, 1000);
+		err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+				       USB_TYPE_CLASS | USB_DIR_OUT
+				       | USB_RECIP_INTERFACE,
+				       USB_CDC_NCM_NTB16_FORMAT,
+				       iface_no, NULL, 0);
 		if (err < 0)
 			pr_debug("Setting NTB format to 16-bit failed\n");
 	}
 
-	ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+	ctx->max_datagram_size = min_dgram_size;
 
 	/* set Max Datagram Size (MTU) */
 	if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
-		__le16 *max_datagram_size;
-		u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
-
-		max_datagram_size = kzalloc(sizeof(*max_datagram_size),
-				GFP_KERNEL);
-		if (!max_datagram_size) {
-			err = -ENOMEM;
+		__le16 max_datagram_size;
+		u16 eth_max_sz;
+		if (ctx->ether_desc != NULL)
+			eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+		else if (ctx->mbim_desc != NULL)
+			eth_max_sz = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
+		else
 			goto max_dgram_err;
-		}
 
-		err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0),
-				USB_CDC_GET_MAX_DATAGRAM_SIZE,
-				USB_TYPE_CLASS | USB_DIR_IN
-				 | USB_RECIP_INTERFACE,
-				0, iface_no, max_datagram_size,
-				2, 1000);
+		err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+				      USB_TYPE_CLASS | USB_DIR_IN
+				      | USB_RECIP_INTERFACE,
+				      0, iface_no, &max_datagram_size, 2);
 		if (err < 0) {
 			pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
-						CDC_NCM_MIN_DATAGRAM_SIZE);
+				 min_dgram_size);
 		} else {
 			ctx->max_datagram_size =
-				le16_to_cpu(*max_datagram_size);
+				le16_to_cpu(max_datagram_size);
 			/* Check Eth descriptor value */
 			if (ctx->max_datagram_size > eth_max_sz)
 					ctx->max_datagram_size = eth_max_sz;
 
 			if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
-				ctx->max_datagram_size =
-						CDC_NCM_MAX_DATAGRAM_SIZE;
+				ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
 
-			if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
-				ctx->max_datagram_size =
-					CDC_NCM_MIN_DATAGRAM_SIZE;
+			if (ctx->max_datagram_size < min_dgram_size)
+				ctx->max_datagram_size = min_dgram_size;
 
 			/* if value changed, update device */
 			if (ctx->max_datagram_size !=
-					le16_to_cpu(*max_datagram_size)) {
-				err = usb_control_msg(ctx->udev,
-						usb_sndctrlpipe(ctx->udev, 0),
+					le16_to_cpu(max_datagram_size)) {
+				err = usbnet_write_cmd(dev,
 						USB_CDC_SET_MAX_DATAGRAM_SIZE,
 						USB_TYPE_CLASS | USB_DIR_OUT
 						 | USB_RECIP_INTERFACE,
 						0,
-						iface_no, max_datagram_size,
-						2, 1000);
+						iface_no, &max_datagram_size,
+						2);
 				if (err < 0)
 					pr_debug("SET_MAX_DGRAM_SIZE failed\n");
 			}
 		}
-		kfree(max_datagram_size);
 	}
 
 max_dgram_err:
-	if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
-		ctx->netdev->mtu = ctx->max_datagram_size - ETH_HLEN;
+	if (ctx->netdev->mtu != (ctx->max_datagram_size - eth_hlen))
+		ctx->netdev->mtu = ctx->max_datagram_size - eth_hlen;
 
 	return 0;
 }
@@ -451,7 +344,7 @@
 	.nway_reset = usbnet_nway_reset,
 };
 
-static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
 {
 	struct cdc_ncm_ctx *ctx;
 	struct usb_driver *driver;
@@ -525,6 +418,13 @@
 			ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf;
 			break;
 
+		case USB_CDC_MBIM_TYPE:
+			if (buf[0] < sizeof(*(ctx->mbim_desc)))
+				break;
+
+			ctx->mbim_desc = (const struct usb_cdc_mbim_desc *)buf;
+			break;
+
 		default:
 			break;
 		}
@@ -537,7 +437,7 @@
 
 	/* check if we got everything */
 	if ((ctx->control == NULL) || (ctx->data == NULL) ||
-	    (ctx->ether_desc == NULL) || (ctx->control != intf))
+	    ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
 		goto error;
 
 	/* claim data interface, if different from control */
@@ -559,7 +459,7 @@
 		goto error2;
 
 	/* configure data interface */
-	temp = usb_set_interface(dev->udev, iface_no, 1);
+	temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
 	if (temp)
 		goto error2;
 
@@ -576,11 +476,13 @@
 	usb_set_intfdata(ctx->control, dev);
 	usb_set_intfdata(ctx->intf, dev);
 
-	temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
-	if (temp)
-		goto error2;
+	if (ctx->ether_desc) {
+		temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
+		if (temp)
+			goto error2;
+		dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
+	}
 
-	dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
 
 	dev->in = usb_rcvbulkpipe(dev->udev,
 		ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
@@ -589,13 +491,6 @@
 	dev->status = ctx->status_ep;
 	dev->rx_urb_size = ctx->rx_max;
 
-	/*
-	 * We should get an event when network connection is "connected" or
-	 * "disconnected". Set network connection in "disconnected" state
-	 * (carrier is OFF) during attach, so the IP network stack does not
-	 * start IPv6 negotiation and more.
-	 */
-	netif_carrier_off(dev->net);
 	ctx->tx_speed = ctx->rx_speed = 0;
 	return 0;
 
@@ -609,8 +504,9 @@
 	dev_info(&dev->udev->dev, "bind() failure\n");
 	return -ENODEV;
 }
+EXPORT_SYMBOL_GPL(cdc_ncm_bind_common);
 
-static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
+void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
 	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
 	struct usb_driver *driver = driver_of(intf);
@@ -644,52 +540,121 @@
 	usb_set_intfdata(ctx->intf, NULL);
 	cdc_ncm_free(ctx);
 }
+EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
 
-static void cdc_ncm_zero_fill(u8 *ptr, u32 first, u32 end, u32 max)
+static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
 {
-	if (first >= max)
-		return;
-	if (first >= end)
-		return;
-	if (end > max)
-		end = max;
-	memset(ptr + first, 0, end - first);
+	int ret;
+
+	/* The MBIM spec defines a NCM compatible default altsetting,
+	 * which we may have matched:
+	 *
+	 *  "Functions that implement both NCM 1.0 and MBIM (an
+	 *   “NCM/MBIM function”) according to this recommendation
+	 *   shall provide two alternate settings for the
+	 *   Communication Interface.  Alternate setting 0, and the
+	 *   associated class and endpoint descriptors, shall be
+	 *   constructed according to the rules given for the
+	 *   Communication Interface in section 5 of [USBNCM10].
+	 *   Alternate setting 1, and the associated class and
+	 *   endpoint descriptors, shall be constructed according to
+	 *   the rules given in section 6 (USB Device Model) of this
+	 *   specification."
+	 *
+	 * Do not bind to such interfaces, allowing cdc_mbim to handle
+	 * them
+	 */
+#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
+	if ((intf->num_altsetting == 2) &&
+	    !usb_set_interface(dev->udev,
+			       intf->cur_altsetting->desc.bInterfaceNumber,
+			       CDC_NCM_COMM_ALTSETTING_MBIM) &&
+	    cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+		return -ENODEV;
+#endif
+
+	/* NCM data altsetting is always 1 */
+	ret = cdc_ncm_bind_common(dev, intf, 1);
+
+	/*
+	 * We should get an event when network connection is "connected" or
+	 * "disconnected". Set network connection in "disconnected" state
+	 * (carrier is OFF) during attach, so the IP network stack does not
+	 * start IPv6 negotiation and more.
+	 */
+	netif_carrier_off(dev->net);
+	return ret;
 }
 
-static struct sk_buff *
-cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
+static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
 {
+	size_t align = ALIGN(skb->len, modulus) - skb->len + remainder;
+
+	if (skb->len + align > max)
+		align = max - skb->len;
+	if (align && skb_tailroom(skb) >= align)
+		memset(skb_put(skb, align), 0, align);
+}
+
+/* return a pointer to a valid struct usb_cdc_ncm_ndp16 of type sign, possibly
+ * allocating a new one within skb
+ */
+static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
+{
+	struct usb_cdc_ncm_ndp16 *ndp16 = NULL;
+	struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
+	size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex);
+
+	/* follow the chain of NDPs, looking for a match */
+	while (ndpoffset) {
+		ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
+		if  (ndp16->dwSignature == sign)
+			return ndp16;
+		ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+	}
+
+	/* align new NDP */
+	cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
+
+	/* verify that there is room for the NDP and the datagram (reserve) */
+	if ((ctx->tx_max - skb->len - reserve) < CDC_NCM_NDP_SIZE)
+		return NULL;
+
+	/* link to it */
+	if (ndp16)
+		ndp16->wNextNdpIndex = cpu_to_le16(skb->len);
+	else
+		nth16->wNdpIndex = cpu_to_le16(skb->len);
+
+	/* push a new empty NDP */
+	ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, CDC_NCM_NDP_SIZE), 0, CDC_NCM_NDP_SIZE);
+	ndp16->dwSignature = sign;
+	ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
+	return ndp16;
+}
+
+struct sk_buff *
+cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
+{
+	struct usb_cdc_ncm_nth16 *nth16;
+	struct usb_cdc_ncm_ndp16 *ndp16;
 	struct sk_buff *skb_out;
-	u32 rem;
-	u32 offset;
-	u32 last_offset;
-	u16 n = 0, index;
+	u16 n = 0, index, ndplen;
 	u8 ready2send = 0;
 
 	/* if there is a remaining skb, it gets priority */
-	if (skb != NULL)
+	if (skb != NULL) {
 		swap(skb, ctx->tx_rem_skb);
-	else
+		swap(sign, ctx->tx_rem_sign);
+	} else {
 		ready2send = 1;
-
-	/*
-	 * +----------------+
-	 * | skb_out        |
-	 * +----------------+
-	 *           ^ offset
-	 *        ^ last_offset
-	 */
+	}
 
 	/* check if we are resuming an OUT skb */
-	if (ctx->tx_curr_skb != NULL) {
-		/* pop variables */
-		skb_out = ctx->tx_curr_skb;
-		offset = ctx->tx_curr_offset;
-		last_offset = ctx->tx_curr_last_offset;
-		n = ctx->tx_curr_frame_num;
+	skb_out = ctx->tx_curr_skb;
 
-	} else {
-		/* reset variables */
+	/* allocate a new OUT skb */
+	if (!skb_out) {
 		skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
 		if (skb_out == NULL) {
 			if (skb != NULL) {
@@ -698,35 +663,21 @@
 			}
 			goto exit_no_skb;
 		}
+		/* fill out the initial 16-bit NTB header */
+		nth16 = (struct usb_cdc_ncm_nth16 *)memset(skb_put(skb_out, sizeof(struct usb_cdc_ncm_nth16)), 0, sizeof(struct usb_cdc_ncm_nth16));
+		nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+		nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+		nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
 
-		/* make room for NTH and NDP */
-		offset = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
-					ctx->tx_ndp_modulus) +
-					sizeof(struct usb_cdc_ncm_ndp16) +
-					(ctx->tx_max_datagrams + 1) *
-					sizeof(struct usb_cdc_ncm_dpe16);
-
-		/* store last valid offset before alignment */
-		last_offset = offset;
-		/* align first Datagram offset correctly */
-		offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
-		/* zero buffer till the first IP datagram */
-		cdc_ncm_zero_fill(skb_out->data, 0, offset, offset);
-		n = 0;
+		/* count total number of frames in this NTB */
 		ctx->tx_curr_frame_num = 0;
 	}
 
-	for (; n < ctx->tx_max_datagrams; n++) {
-		/* check if end of transmit buffer is reached */
-		if (offset >= ctx->tx_max) {
-			ready2send = 1;
-			break;
-		}
-		/* compute maximum buffer size */
-		rem = ctx->tx_max - offset;
-
+	for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) {
+		/* send any remaining skb first */
 		if (skb == NULL) {
 			skb = ctx->tx_rem_skb;
+			sign = ctx->tx_rem_sign;
 			ctx->tx_rem_skb = NULL;
 
 			/* check for end of skb */
@@ -734,7 +685,14 @@
 				break;
 		}
 
-		if (skb->len > rem) {
+		/* get the appropriate NDP for this skb */
+		ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
+
+		/* align beginning of next frame */
+		cdc_ncm_align_tail(skb_out,  ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max);
+
+		/* check if we had enough room left for both NDP and frame */
+		if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) {
 			if (n == 0) {
 				/* won't fit, MTU problem? */
 				dev_kfree_skb_any(skb);
@@ -747,31 +705,30 @@
 					ctx->netdev->stats.tx_dropped++;
 				}
 				ctx->tx_rem_skb = skb;
+				ctx->tx_rem_sign = sign;
 				skb = NULL;
 				ready2send = 1;
 			}
 			break;
 		}
 
-		memcpy(((u8 *)skb_out->data) + offset, skb->data, skb->len);
+		/* calculate frame number withing this NDP */
+		ndplen = le16_to_cpu(ndp16->wLength);
+		index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
 
-		ctx->tx_ncm.dpe16[n].wDatagramLength = cpu_to_le16(skb->len);
-		ctx->tx_ncm.dpe16[n].wDatagramIndex = cpu_to_le16(offset);
-
-		/* update offset */
-		offset += skb->len;
-
-		/* store last valid offset before alignment */
-		last_offset = offset;
-
-		/* align offset correctly */
-		offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
-
-		/* zero padding */
-		cdc_ncm_zero_fill(skb_out->data, last_offset, offset,
-								ctx->tx_max);
+		/* OK, add this skb */
+		ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
+		ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
+		ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
+		memcpy(skb_put(skb_out, skb->len), skb->data, skb->len);
 		dev_kfree_skb_any(skb);
 		skb = NULL;
+
+		/* send now if this NDP is full */
+		if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) {
+			ready2send = 1;
+			break;
+		}
 	}
 
 	/* free up any dangling skb */
@@ -787,16 +744,12 @@
 		/* wait for more frames */
 		/* push variables */
 		ctx->tx_curr_skb = skb_out;
-		ctx->tx_curr_offset = offset;
-		ctx->tx_curr_last_offset = last_offset;
 		goto exit_no_skb;
 
 	} else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
 		/* wait for more frames */
 		/* push variables */
 		ctx->tx_curr_skb = skb_out;
-		ctx->tx_curr_offset = offset;
-		ctx->tx_curr_last_offset = last_offset;
 		/* set the pending count */
 		if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
 			ctx->tx_timer_pending = CDC_NCM_TIMER_PENDING_CNT;
@@ -807,75 +760,24 @@
 		/* variables will be reset at next call */
 	}
 
-	/* check for overflow */
-	if (last_offset > ctx->tx_max)
-		last_offset = ctx->tx_max;
-
-	/* revert offset */
-	offset = last_offset;
-
 	/*
 	 * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
 	 * we send buffers as it is. If we get more data, it would be more
 	 * efficient for USB HS mobile device with DMA engine to receive a full
 	 * size NTB, than canceling DMA transfer and receiving a short packet.
 	 */
-	if (offset > CDC_NCM_MIN_TX_PKT)
-		offset = ctx->tx_max;
+	if (skb_out->len > CDC_NCM_MIN_TX_PKT)
+		/* final zero padding */
+		memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len);
 
-	/* final zero padding */
-	cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max);
+	/* do we need to prevent a ZLP? */
+	if (((skb_out->len % le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0) &&
+	    (skb_out->len < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)) && skb_tailroom(skb_out))
+		*skb_put(skb_out, 1) = 0;	/* force short packet */
 
-	/* store last offset */
-	last_offset = offset;
-
-	if (((last_offset < ctx->tx_max) && ((last_offset %
-			le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) ||
-	    (((last_offset == ctx->tx_max) && ((ctx->tx_max %
-		le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) &&
-		(ctx->tx_max < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)))) {
-		/* force short packet */
-		*(((u8 *)skb_out->data) + last_offset) = 0;
-		last_offset++;
-	}
-
-	/* zero the rest of the DPEs plus the last NULL entry */
-	for (; n <= CDC_NCM_DPT_DATAGRAMS_MAX; n++) {
-		ctx->tx_ncm.dpe16[n].wDatagramLength = 0;
-		ctx->tx_ncm.dpe16[n].wDatagramIndex = 0;
-	}
-
-	/* fill out 16-bit NTB header */
-	ctx->tx_ncm.nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
-	ctx->tx_ncm.nth16.wHeaderLength =
-					cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
-	ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
-	ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
-	index = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus);
-	ctx->tx_ncm.nth16.wNdpIndex = cpu_to_le16(index);
-
-	memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
-	ctx->tx_seq++;
-
-	/* fill out 16-bit NDP table */
-	ctx->tx_ncm.ndp16.dwSignature =
-				cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN);
-	rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
-					sizeof(struct usb_cdc_ncm_dpe16));
-	ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
-	ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
-
-	memcpy(((u8 *)skb_out->data) + index,
-						&(ctx->tx_ncm.ndp16),
-						sizeof(ctx->tx_ncm.ndp16));
-
-	memcpy(((u8 *)skb_out->data) + index + sizeof(ctx->tx_ncm.ndp16),
-					&(ctx->tx_ncm.dpe16),
-					(ctx->tx_curr_frame_num + 1) *
-					sizeof(struct usb_cdc_ncm_dpe16));
-
-	/* set frame length */
-	skb_put(skb_out, last_offset);
+	/* set final frame length */
+	nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+	nth16->wBlockLength = cpu_to_le16(skb_out->len);
 
 	/* return skb */
 	ctx->tx_curr_skb = NULL;
@@ -888,6 +790,7 @@
 		cdc_ncm_tx_timeout_start(ctx);
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(cdc_ncm_fill_tx_frame);
 
 static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
 {
@@ -922,6 +825,8 @@
 		netif_tx_lock_bh(ctx->netdev);
 		usbnet_start_xmit(NULL, ctx->netdev);
 		netif_tx_unlock_bh(ctx->netdev);
+	} else {
+		spin_unlock_bh(&ctx->mtx);
 	}
 }
 
@@ -942,7 +847,7 @@
 		goto error;
 
 	spin_lock_bh(&ctx->mtx);
-	skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
+	skb_out = cdc_ncm_fill_tx_frame(ctx, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
 	spin_unlock_bh(&ctx->mtx);
 	return skb_out;
 
@@ -953,17 +858,12 @@
 	return NULL;
 }
 
-static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+/* verify NTB header and return offset of first NDP, or negative error */
+int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
 {
-	struct sk_buff *skb;
-	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
-	int len;
-	int nframes;
-	int x;
-	int offset;
 	struct usb_cdc_ncm_nth16 *nth16;
-	struct usb_cdc_ncm_ndp16 *ndp16;
-	struct usb_cdc_ncm_dpe16 *dpe16;
+	int len;
+	int ret = -EINVAL;
 
 	if (ctx == NULL)
 		goto error;
@@ -997,20 +897,23 @@
 	}
 	ctx->rx_seq = le16_to_cpu(nth16->wSequence);
 
-	len = le16_to_cpu(nth16->wNdpIndex);
-	if ((len + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
-		pr_debug("invalid DPT16 index <%u>\n",
-					le16_to_cpu(nth16->wNdpIndex));
+	ret = le16_to_cpu(nth16->wNdpIndex);
+error:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16);
+
+/* verify NDP header and return number of datagrams, or negative error */
+int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
+{
+	struct usb_cdc_ncm_ndp16 *ndp16;
+	int ret = -EINVAL;
+
+	if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
+		pr_debug("invalid NDP offset  <%u>\n", ndpoffset);
 		goto error;
 	}
-
-	ndp16 = (struct usb_cdc_ncm_ndp16 *)(((u8 *)skb_in->data) + len);
-
-	if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) {
-		pr_debug("invalid DPT16 signature <%u>\n",
-					le32_to_cpu(ndp16->dwSignature));
-		goto error;
-	}
+	ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
 
 	if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
 		pr_debug("invalid DPT16 length <%u>\n",
@@ -1018,20 +921,52 @@
 		goto error;
 	}
 
-	nframes = ((le16_to_cpu(ndp16->wLength) -
+	ret = ((le16_to_cpu(ndp16->wLength) -
 					sizeof(struct usb_cdc_ncm_ndp16)) /
 					sizeof(struct usb_cdc_ncm_dpe16));
-	nframes--; /* we process NDP entries except for the last one */
+	ret--; /* we process NDP entries except for the last one */
 
-	len += sizeof(struct usb_cdc_ncm_ndp16);
-
-	if ((len + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) >
+	if ((sizeof(struct usb_cdc_ncm_ndp16) + ret * (sizeof(struct usb_cdc_ncm_dpe16))) >
 								skb_in->len) {
-		pr_debug("Invalid nframes = %d\n", nframes);
-		goto error;
+		pr_debug("Invalid nframes = %d\n", ret);
+		ret = -EINVAL;
 	}
 
-	dpe16 = (struct usb_cdc_ncm_dpe16 *)(((u8 *)skb_in->data) + len);
+error:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
+
+static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+{
+	struct sk_buff *skb;
+	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+	int len;
+	int nframes;
+	int x;
+	int offset;
+	struct usb_cdc_ncm_ndp16 *ndp16;
+	struct usb_cdc_ncm_dpe16 *dpe16;
+	int ndpoffset;
+	int loopcount = 50; /* arbitrary max preventing infinite loop */
+
+	ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+	if (ndpoffset < 0)
+		goto error;
+
+next_ndp:
+	nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
+	if (nframes < 0)
+		goto error;
+
+	ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
+
+	if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) {
+		pr_debug("invalid DPT16 signature <%u>\n",
+			 le32_to_cpu(ndp16->dwSignature));
+		goto err_ndp;
+	}
+	dpe16 = ndp16->dpe16;
 
 	for (x = 0; x < nframes; x++, dpe16++) {
 		offset = le16_to_cpu(dpe16->wDatagramIndex);
@@ -1043,7 +978,7 @@
 		 */
 		if ((offset == 0) || (len == 0)) {
 			if (!x)
-				goto error; /* empty NTB */
+				goto err_ndp; /* empty NTB */
 			break;
 		}
 
@@ -1054,7 +989,7 @@
 					"offset[%u]=%u, length=%u, skb=%p\n",
 					x, offset, len, skb_in);
 			if (!x)
-				goto error;
+				goto err_ndp;
 			break;
 
 		} else {
@@ -1067,6 +1002,12 @@
 			usbnet_skb_return(dev, skb);
 		}
 	}
+err_ndp:
+	/* are there more NDPs to process? */
+	ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+	if (ndpoffset && loopcount--)
+		goto next_ndp;
+
 	return 1;
 error:
 	return 0;
@@ -1131,7 +1072,7 @@
 		 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
 		 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
 		 */
-		ctx->connected = event->wValue;
+		ctx->connected = le16_to_cpu(event->wValue);
 
 		printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
 			" %sconnected\n",
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index e0433ce..3f554c1 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -56,27 +56,12 @@
 
 static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
 {
-	void *buf;
-	int err = -ENOMEM;
-
-	netdev_dbg(dev->net, "dm_read() reg=0x%02x length=%d\n", reg, length);
-
-	buf = kmalloc(length, GFP_KERNEL);
-	if (!buf)
-		goto out;
-
-	err = usb_control_msg(dev->udev,
-			      usb_rcvctrlpipe(dev->udev, 0),
-			      DM_READ_REGS,
-			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-			      0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
-	if (err == length)
-		memcpy(data, buf, length);
-	else if (err >= 0)
+	int err;
+	err = usbnet_read_cmd(dev, DM_READ_REGS,
+			       USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			       0, reg, data, length);
+	if(err != length && err >= 0)
 		err = -EINVAL;
-	kfree(buf);
-
- out:
 	return err;
 }
 
@@ -87,91 +72,29 @@
 
 static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
 {
-	void *buf = NULL;
-	int err = -ENOMEM;
+	int err;
+	err = usbnet_write_cmd(dev, DM_WRITE_REGS,
+				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				0, reg, data, length);
 
-	netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length);
-
-	if (data) {
-		buf = kmemdup(data, length, GFP_KERNEL);
-		if (!buf)
-			goto out;
-	}
-
-	err = usb_control_msg(dev->udev,
-			      usb_sndctrlpipe(dev->udev, 0),
-			      DM_WRITE_REGS,
-			      USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
-			      0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
-	kfree(buf);
 	if (err >= 0 && err < length)
 		err = -EINVAL;
- out:
 	return err;
 }
 
 static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
 {
-	netdev_dbg(dev->net, "dm_write_reg() reg=0x%02x, value=0x%02x\n",
-		   reg, value);
-	return usb_control_msg(dev->udev,
-			       usb_sndctrlpipe(dev->udev, 0),
-			       DM_WRITE_REG,
-			       USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
-			       value, reg, NULL, 0, USB_CTRL_SET_TIMEOUT);
-}
-
-static void dm_write_async_callback(struct urb *urb)
-{
-	struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
-	int status = urb->status;
-
-	if (status < 0)
-		printk(KERN_DEBUG "dm_write_async_callback() failed with %d\n",
-		       status);
-
-	kfree(req);
-	usb_free_urb(urb);
+	return usbnet_write_cmd(dev, DM_WRITE_REGS,
+				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				value, reg, NULL, 0);
 }
 
 static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
 				  u16 length, void *data)
 {
-	struct usb_ctrlrequest *req;
-	struct urb *urb;
-	int status;
-
-	urb = usb_alloc_urb(0, GFP_ATOMIC);
-	if (!urb) {
-		netdev_err(dev->net, "Error allocating URB in dm_write_async_helper!\n");
-		return;
-	}
-
-	req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
-	if (!req) {
-		netdev_err(dev->net, "Failed to allocate memory for control request\n");
-		usb_free_urb(urb);
-		return;
-	}
-
-	req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-	req->bRequest = length ? DM_WRITE_REGS : DM_WRITE_REG;
-	req->wValue = cpu_to_le16(value);
-	req->wIndex = cpu_to_le16(reg);
-	req->wLength = cpu_to_le16(length);
-
-	usb_fill_control_urb(urb, dev->udev,
-			     usb_sndctrlpipe(dev->udev, 0),
-			     (void *)req, data, length,
-			     dm_write_async_callback, req);
-
-	status = usb_submit_urb(urb, GFP_ATOMIC);
-	if (status < 0) {
-		netdev_err(dev->net, "Error submitting the control message: status=%d\n",
-			   status);
-		kfree(req);
-		usb_free_urb(urb);
-	}
+	usbnet_write_cmd_async(dev, DM_WRITE_REGS,
+			       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			       value, reg, data, length);
 }
 
 static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 8de6417..ace9e74 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -116,23 +116,8 @@
 	return skb;
 }
 
-static void int51x1_async_cmd_callback(struct urb *urb)
-{
-	struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
-	int status = urb->status;
-
-	if (status < 0)
-		dev_warn(&urb->dev->dev, "async callback failed with %d\n", status);
-
-	kfree(req);
-	usb_free_urb(urb);
-}
-
 static void int51x1_set_multicast(struct net_device *netdev)
 {
-	struct usb_ctrlrequest *req;
-	int status;
-	struct urb *urb;
 	struct usbnet *dev = netdev_priv(netdev);
 	u16 filter = PACKET_TYPE_DIRECTED | PACKET_TYPE_BROADCAST;
 
@@ -149,40 +134,9 @@
 		netdev_dbg(dev->net, "receive own packets only\n");
 	}
 
-	urb = usb_alloc_urb(0, GFP_ATOMIC);
-	if (!urb) {
-		netdev_warn(dev->net, "Error allocating URB\n");
-		return;
-	}
-
-	req = kmalloc(sizeof(*req), GFP_ATOMIC);
-	if (!req) {
-		netdev_warn(dev->net, "Error allocating control msg\n");
-		goto out;
-	}
-
-	req->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
-	req->bRequest = SET_ETHERNET_PACKET_FILTER;
-	req->wValue = cpu_to_le16(filter);
-	req->wIndex = 0;
-	req->wLength = 0;
-
-	usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
-		(void *)req, NULL, 0,
-		int51x1_async_cmd_callback,
-		(void *)req);
-
-	status = usb_submit_urb(urb, GFP_ATOMIC);
-	if (status < 0) {
-		netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
-			    status);
-		goto out1;
-	}
-	return;
-out1:
-	kfree(req);
-out:
-	usb_free_urb(urb);
+	usbnet_write_cmd_async(dev, SET_ETHERNET_PACKET_FILTER,
+			       USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			       filter, 0, NULL, 0);
 }
 
 static const struct net_device_ops int51x1_netdev_ops = {
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index cc7e720..3f3f566 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -124,93 +124,20 @@
 
 static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
 {
-	struct usb_device *xdev = dev->udev;
-	int ret;
-	void *buffer;
-
-	buffer = kmalloc(size, GFP_NOIO);
-	if (buffer == NULL)
-		return -ENOMEM;
-
-	ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ,
-			      MCS7830_RD_BMREQ, 0x0000, index, buffer,
-			      size, MCS7830_CTRL_TIMEOUT);
-	memcpy(data, buffer, size);
-	kfree(buffer);
-
-	return ret;
+	return usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ,
+				0x0000, index, data, size);
 }
 
 static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data)
 {
-	struct usb_device *xdev = dev->udev;
-	int ret;
-	void *buffer;
-
-	buffer = kmemdup(data, size, GFP_NOIO);
-	if (buffer == NULL)
-		return -ENOMEM;
-
-	ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
-			      MCS7830_WR_BMREQ, 0x0000, index, buffer,
-			      size, MCS7830_CTRL_TIMEOUT);
-	kfree(buffer);
-	return ret;
-}
-
-static void mcs7830_async_cmd_callback(struct urb *urb)
-{
-	struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
-	int status = urb->status;
-
-	if (status < 0)
-		printk(KERN_DEBUG "%s() failed with %d\n",
-		       __func__, status);
-
-	kfree(req);
-	usb_free_urb(urb);
+	return usbnet_write_cmd(dev, MCS7830_WR_BREQ, MCS7830_WR_BMREQ,
+				0x0000, index, data, size);
 }
 
 static void mcs7830_set_reg_async(struct usbnet *dev, u16 index, u16 size, void *data)
 {
-	struct usb_ctrlrequest *req;
-	int ret;
-	struct urb *urb;
-
-	urb = usb_alloc_urb(0, GFP_ATOMIC);
-	if (!urb) {
-		dev_dbg(&dev->udev->dev,
-			"Error allocating URB in write_cmd_async!\n");
-		return;
-	}
-
-	req = kmalloc(sizeof *req, GFP_ATOMIC);
-	if (!req) {
-		dev_err(&dev->udev->dev,
-			"Failed to allocate memory for control request\n");
-		goto out;
-	}
-	req->bRequestType = MCS7830_WR_BMREQ;
-	req->bRequest = MCS7830_WR_BREQ;
-	req->wValue = 0;
-	req->wIndex = cpu_to_le16(index);
-	req->wLength = cpu_to_le16(size);
-
-	usb_fill_control_urb(urb, dev->udev,
-			     usb_sndctrlpipe(dev->udev, 0),
-			     (void *)req, data, size,
-			     mcs7830_async_cmd_callback, req);
-
-	ret = usb_submit_urb(urb, GFP_ATOMIC);
-	if (ret < 0) {
-		dev_err(&dev->udev->dev,
-			"Error submitting the control message: ret=%d\n", ret);
-		goto out;
-	}
-	return;
-out:
-	kfree(req);
-	usb_free_urb(urb);
+	usbnet_write_cmd_async(dev, MCS7830_WR_BREQ, MCS7830_WR_BMREQ,
+				0x0000, index, data, size);
 }
 
 static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index c062a3e..93e0716 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -109,13 +109,11 @@
 static int
 nc_vendor_read(struct usbnet *dev, u8 req, u8 regnum, u16 *retval_ptr)
 {
-	int status = usb_control_msg(dev->udev,
-		usb_rcvctrlpipe(dev->udev, 0),
-		req,
-		USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-		0, regnum,
-		retval_ptr, sizeof *retval_ptr,
-		USB_CTRL_GET_TIMEOUT);
+	int status = usbnet_read_cmd(dev, req,
+				     USB_DIR_IN | USB_TYPE_VENDOR |
+				     USB_RECIP_DEVICE,
+				     0, regnum, retval_ptr,
+				     sizeof *retval_ptr);
 	if (status > 0)
 		status = 0;
 	if (!status)
@@ -133,13 +131,9 @@
 static void
 nc_vendor_write(struct usbnet *dev, u8 req, u8 regnum, u16 value)
 {
-	usb_control_msg(dev->udev,
-		usb_sndctrlpipe(dev->udev, 0),
-		req,
-		USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-		value, regnum,
-		NULL, 0,			// data is in setup packet
-		USB_CTRL_SET_TIMEOUT);
+	usbnet_write_cmd(dev, req,
+			 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			 value, regnum, NULL, 0);
 }
 
 static inline void
@@ -288,37 +282,34 @@
 static int net1080_reset(struct usbnet *dev)
 {
 	u16		usbctl, status, ttl;
-	u16		*vp = kmalloc(sizeof (u16), GFP_KERNEL);
+	u16		vp;
 	int		retval;
 
-	if (!vp)
-		return -ENOMEM;
-
 	// nc_dump_registers(dev);
 
-	if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) {
+	if ((retval = nc_register_read(dev, REG_STATUS, &vp)) < 0) {
 		netdev_dbg(dev->net, "can't read %s-%s status: %d\n",
 			   dev->udev->bus->bus_name, dev->udev->devpath, retval);
 		goto done;
 	}
-	status = *vp;
+	status = vp;
 	nc_dump_status(dev, status);
 
-	if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) {
+	if ((retval = nc_register_read(dev, REG_USBCTL, &vp)) < 0) {
 		netdev_dbg(dev->net, "can't read USBCTL, %d\n", retval);
 		goto done;
 	}
-	usbctl = *vp;
+	usbctl = vp;
 	nc_dump_usbctl(dev, usbctl);
 
 	nc_register_write(dev, REG_USBCTL,
 			USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER);
 
-	if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) {
+	if ((retval = nc_register_read(dev, REG_TTL, &vp)) < 0) {
 		netdev_dbg(dev->net, "can't read TTL, %d\n", retval);
 		goto done;
 	}
-	ttl = *vp;
+	ttl = vp;
 	// nc_dump_ttl(dev, ttl);
 
 	nc_register_write(dev, REG_TTL,
@@ -331,7 +322,6 @@
 	retval = 0;
 
 done:
-	kfree(vp);
 	return retval;
 }
 
@@ -339,13 +329,10 @@
 {
 	int			retval;
 	u16			status;
-	u16			*vp = kmalloc(sizeof (u16), GFP_KERNEL);
+	u16			vp;
 
-	if (!vp)
-		return -ENOMEM;
-	retval = nc_register_read(dev, REG_STATUS, vp);
-	status = *vp;
-	kfree(vp);
+	retval = nc_register_read(dev, REG_STATUS, &vp);
+	status = vp;
 	if (retval != 0) {
 		netdev_dbg(dev->net, "net1080_check_conn read - %d\n", retval);
 		return retval;
@@ -355,59 +342,22 @@
 	return 0;
 }
 
-static void nc_flush_complete(struct urb *urb)
-{
-	kfree(urb->context);
-	usb_free_urb(urb);
-}
-
 static void nc_ensure_sync(struct usbnet *dev)
 {
-	dev->frame_errors++;
-	if (dev->frame_errors > 5) {
-		struct urb		*urb;
-		struct usb_ctrlrequest	*req;
-		int			status;
+	if (++dev->frame_errors <= 5)
+		return;
 
-		/* Send a flush */
-		urb = usb_alloc_urb(0, GFP_ATOMIC);
-		if (!urb)
-			return;
+	if (usbnet_write_cmd_async(dev, REQUEST_REGISTER,
+					USB_DIR_OUT | USB_TYPE_VENDOR |
+					USB_RECIP_DEVICE,
+					USBCTL_FLUSH_THIS |
+					USBCTL_FLUSH_OTHER,
+					REG_USBCTL, NULL, 0))
+		return;
 
-		req = kmalloc(sizeof *req, GFP_ATOMIC);
-		if (!req) {
-			usb_free_urb(urb);
-			return;
-		}
-
-		req->bRequestType = USB_DIR_OUT
-			| USB_TYPE_VENDOR
-			| USB_RECIP_DEVICE;
-		req->bRequest = REQUEST_REGISTER;
-		req->wValue = cpu_to_le16(USBCTL_FLUSH_THIS
-				| USBCTL_FLUSH_OTHER);
-		req->wIndex = cpu_to_le16(REG_USBCTL);
-		req->wLength = cpu_to_le16(0);
-
-		/* queue an async control request, we don't need
-		 * to do anything when it finishes except clean up.
-		 */
-		usb_fill_control_urb(urb, dev->udev,
-			usb_sndctrlpipe(dev->udev, 0),
-			(unsigned char *) req,
-			NULL, 0,
-			nc_flush_complete, req);
-		status = usb_submit_urb(urb, GFP_ATOMIC);
-		if (status) {
-			kfree(req);
-			usb_free_urb(urb);
-			return;
-		}
-
-		netif_dbg(dev, rx_err, dev->net,
-			  "flush net1080; too many framing errors\n");
-		dev->frame_errors = 0;
-	}
+	netif_dbg(dev, rx_err, dev->net,
+		  "flush net1080; too many framing errors\n");
+	dev->frame_errors = 0;
 }
 
 static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 4584b9a..0fcc8e6 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -71,13 +71,10 @@
 static inline int
 pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
 {
-	return usb_control_msg(dev->udev,
-		usb_rcvctrlpipe(dev->udev, 0),
-		req,
-		USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-		val, index,
-		NULL, 0,
-		USB_CTRL_GET_TIMEOUT);
+	return usbnet_read_cmd(dev, req,
+				USB_DIR_IN | USB_TYPE_VENDOR |
+				USB_RECIP_DEVICE,
+				val, index, NULL, 0);
 }
 
 static inline int
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index c27d277..18dd425 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -311,10 +311,9 @@
 	struct sierra_net_data *priv = sierra_net_get_private(dev);
 	int  status;
 
-	status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
-			USB_CDC_SEND_ENCAPSULATED_COMMAND,
-			USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE,	0,
-			priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT);
+	status = usbnet_write_cmd(dev, USB_CDC_SEND_ENCAPSULATED_COMMAND,
+				  USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE,
+				  0, priv->ifnum, cmd, cmdlen);
 
 	if (status != cmdlen && status != -ENODEV)
 		netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status);
@@ -340,7 +339,7 @@
 	dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix);
 	priv->tx_hdr_template[0] = 0x3F;
 	priv->tx_hdr_template[1] = ctx_ix;
-	*((u16 *)&priv->tx_hdr_template[2]) =
+	*((__be16 *)&priv->tx_hdr_template[2]) =
 		cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
 }
 
@@ -632,32 +631,22 @@
 static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
 {
 	int result = 0;
-	u16 *attrdata;
+	__le16 attrdata;
 
-	attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL);
-	if (!attrdata)
-		return -ENOMEM;
+	result = usbnet_read_cmd(dev,
+				/* _u8 vendor specific request */
+				SWI_USB_REQUEST_GET_FW_ATTR,
+				USB_DIR_IN | USB_TYPE_VENDOR,	/* __u8 request type */
+				0x0000,		/* __u16 value not used */
+				0x0000,		/* __u16 index  not used */
+				&attrdata,	/* char *data */
+				sizeof(attrdata)	/* __u16 size */
+				);
 
-	result = usb_control_msg(
-			dev->udev,
-			usb_rcvctrlpipe(dev->udev, 0),
-			/* _u8 vendor specific request */
-			SWI_USB_REQUEST_GET_FW_ATTR,
-			USB_DIR_IN | USB_TYPE_VENDOR,	/* __u8 request type */
-			0x0000,		/* __u16 value not used */
-			0x0000,		/* __u16 index  not used */
-			attrdata,	/* char *data */
-			sizeof(*attrdata),		/* __u16 size */
-			USB_CTRL_SET_TIMEOUT);	/* int timeout */
-
-	if (result < 0) {
-		kfree(attrdata);
+	if (result < 0)
 		return -EIO;
-	}
 
-	*datap = le16_to_cpu(*attrdata);
-
-	kfree(attrdata);
+	*datap = le16_to_cpu(attrdata);
 	return result;
 }
 
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index b77ae76..1823806 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -26,6 +26,8 @@
 #include <linux/ethtool.h>
 #include <linux/mii.h>
 #include <linux/usb.h>
+#include <linux/bitrev.h>
+#include <linux/crc16.h>
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
@@ -52,7 +54,15 @@
 #define USB_PRODUCT_ID_LAN7500		(0x7500)
 #define USB_PRODUCT_ID_LAN7505		(0x7505)
 #define RXW_PADDING			2
-#define SUPPORTED_WAKE			(WAKE_MAGIC)
+#define SUPPORTED_WAKE			(WAKE_PHY | WAKE_UCAST | WAKE_BCAST | \
+					 WAKE_MCAST | WAKE_ARP | WAKE_MAGIC)
+
+#define SUSPEND_SUSPEND0		(0x01)
+#define SUSPEND_SUSPEND1		(0x02)
+#define SUSPEND_SUSPEND2		(0x04)
+#define SUSPEND_SUSPEND3		(0x08)
+#define SUSPEND_ALLMODES		(SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
+					 SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
 
 #define check_warn(ret, fmt, args...) \
 	({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -71,6 +81,7 @@
 	struct mutex dataport_mutex;
 	spinlock_t rfe_ctl_lock;
 	struct work_struct set_multicast;
+	u8 suspend_flags;
 };
 
 struct usb_context {
@@ -82,96 +93,96 @@
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
-static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
-					  u32 *data)
+static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
+					    u32 *data, int in_pm)
 {
-	u32 *buf = kmalloc(4, GFP_KERNEL);
+	u32 buf;
 	int ret;
+	int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
 
 	BUG_ON(!dev);
 
-	if (!buf)
-		return -ENOMEM;
+	if (!in_pm)
+		fn = usbnet_read_cmd;
+	else
+		fn = usbnet_read_cmd_nopm;
 
-	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
-		USB_VENDOR_REQUEST_READ_REGISTER,
-		USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-		00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
-
+	ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
+		 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+		 0, index, &buf, 4);
 	if (unlikely(ret < 0))
-		netdev_warn(dev->net,
-			"Failed to read reg index 0x%08x: %d", index, ret);
+		netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
+			    index, ret);
 
-	le32_to_cpus(buf);
-	*data = *buf;
-	kfree(buf);
+	le32_to_cpus(&buf);
+	*data = buf;
 
 	return ret;
 }
 
+static int __must_check __smsc75xx_write_reg(struct usbnet *dev, u32 index,
+					     u32 data, int in_pm)
+{
+	u32 buf;
+	int ret;
+	int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
+
+	BUG_ON(!dev);
+
+	if (!in_pm)
+		fn = usbnet_write_cmd;
+	else
+		fn = usbnet_write_cmd_nopm;
+
+	buf = data;
+	cpu_to_le32s(&buf);
+
+	ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
+		 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+		 0, index, &buf, 4);
+	if (unlikely(ret < 0))
+		netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n",
+			    index, ret);
+
+	return ret;
+}
+
+static int __must_check smsc75xx_read_reg_nopm(struct usbnet *dev, u32 index,
+					       u32 *data)
+{
+	return __smsc75xx_read_reg(dev, index, data, 1);
+}
+
+static int __must_check smsc75xx_write_reg_nopm(struct usbnet *dev, u32 index,
+						u32 data)
+{
+	return __smsc75xx_write_reg(dev, index, data, 1);
+}
+
+static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
+					  u32 *data)
+{
+	return __smsc75xx_read_reg(dev, index, data, 0);
+}
+
 static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
 					   u32 data)
 {
-	u32 *buf = kmalloc(4, GFP_KERNEL);
-	int ret;
-
-	BUG_ON(!dev);
-
-	if (!buf)
-		return -ENOMEM;
-
-	*buf = data;
-	cpu_to_le32s(buf);
-
-	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
-		USB_VENDOR_REQUEST_WRITE_REGISTER,
-		USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-		00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
-
-	if (unlikely(ret < 0))
-		netdev_warn(dev->net,
-			"Failed to write reg index 0x%08x: %d", index, ret);
-
-	kfree(buf);
-
-	return ret;
-}
-
-static int smsc75xx_set_feature(struct usbnet *dev, u32 feature)
-{
-	if (WARN_ON_ONCE(!dev))
-		return -EINVAL;
-
-	cpu_to_le32s(&feature);
-
-	return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
-		USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
-		USB_CTRL_SET_TIMEOUT);
-}
-
-static int smsc75xx_clear_feature(struct usbnet *dev, u32 feature)
-{
-	if (WARN_ON_ONCE(!dev))
-		return -EINVAL;
-
-	cpu_to_le32s(&feature);
-
-	return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
-		USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
-		USB_CTRL_SET_TIMEOUT);
+	return __smsc75xx_write_reg(dev, index, data, 0);
 }
 
 /* Loop until the read is completed with timeout
  * called with phy_mutex held */
-static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
+static __must_check int __smsc75xx_phy_wait_not_busy(struct usbnet *dev,
+						     int in_pm)
 {
 	unsigned long start_time = jiffies;
 	u32 val;
 	int ret;
 
 	do {
-		ret = smsc75xx_read_reg(dev, MII_ACCESS, &val);
-		check_warn_return(ret, "Error reading MII_ACCESS");
+		ret = __smsc75xx_read_reg(dev, MII_ACCESS, &val, in_pm);
+		check_warn_return(ret, "Error reading MII_ACCESS\n");
 
 		if (!(val & MII_ACCESS_BUSY))
 			return 0;
@@ -180,7 +191,8 @@
 	return -EIO;
 }
 
-static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+static int __smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
+				int in_pm)
 {
 	struct usbnet *dev = netdev_priv(netdev);
 	u32 val, addr;
@@ -189,8 +201,8 @@
 	mutex_lock(&dev->phy_mutex);
 
 	/* confirm MII not busy */
-	ret = smsc75xx_phy_wait_not_busy(dev);
-	check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_read");
+	ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
+	check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_read\n");
 
 	/* set the address, index & direction (read from PHY) */
 	phy_id &= dev->mii.phy_id_mask;
@@ -198,14 +210,14 @@
 	addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
 		| ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
 		| MII_ACCESS_READ | MII_ACCESS_BUSY;
-	ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
-	check_warn_goto_done(ret, "Error writing MII_ACCESS");
+	ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm);
+	check_warn_goto_done(ret, "Error writing MII_ACCESS\n");
 
-	ret = smsc75xx_phy_wait_not_busy(dev);
-	check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx);
+	ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
+	check_warn_goto_done(ret, "Timed out reading MII reg %02X\n", idx);
 
-	ret = smsc75xx_read_reg(dev, MII_DATA, &val);
-	check_warn_goto_done(ret, "Error reading MII_DATA");
+	ret = __smsc75xx_read_reg(dev, MII_DATA, &val, in_pm);
+	check_warn_goto_done(ret, "Error reading MII_DATA\n");
 
 	ret = (u16)(val & 0xFFFF);
 
@@ -214,8 +226,8 @@
 	return ret;
 }
 
-static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
-				int regval)
+static void __smsc75xx_mdio_write(struct net_device *netdev, int phy_id,
+				  int idx, int regval, int in_pm)
 {
 	struct usbnet *dev = netdev_priv(netdev);
 	u32 val, addr;
@@ -224,12 +236,12 @@
 	mutex_lock(&dev->phy_mutex);
 
 	/* confirm MII not busy */
-	ret = smsc75xx_phy_wait_not_busy(dev);
-	check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_write");
+	ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
+	check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_write\n");
 
 	val = regval;
-	ret = smsc75xx_write_reg(dev, MII_DATA, val);
-	check_warn_goto_done(ret, "Error writing MII_DATA");
+	ret = __smsc75xx_write_reg(dev, MII_DATA, val, in_pm);
+	check_warn_goto_done(ret, "Error writing MII_DATA\n");
 
 	/* set the address, index & direction (write to PHY) */
 	phy_id &= dev->mii.phy_id_mask;
@@ -237,16 +249,39 @@
 	addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
 		| ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
 		| MII_ACCESS_WRITE | MII_ACCESS_BUSY;
-	ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
-	check_warn_goto_done(ret, "Error writing MII_ACCESS");
+	ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm);
+	check_warn_goto_done(ret, "Error writing MII_ACCESS\n");
 
-	ret = smsc75xx_phy_wait_not_busy(dev);
-	check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx);
+	ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
+	check_warn_goto_done(ret, "Timed out writing MII reg %02X\n", idx);
 
 done:
 	mutex_unlock(&dev->phy_mutex);
 }
 
+static int smsc75xx_mdio_read_nopm(struct net_device *netdev, int phy_id,
+				   int idx)
+{
+	return __smsc75xx_mdio_read(netdev, phy_id, idx, 1);
+}
+
+static void smsc75xx_mdio_write_nopm(struct net_device *netdev, int phy_id,
+				     int idx, int regval)
+{
+	__smsc75xx_mdio_write(netdev, phy_id, idx, regval, 1);
+}
+
+static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+{
+	return __smsc75xx_mdio_read(netdev, phy_id, idx, 0);
+}
+
+static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
+				int regval)
+{
+	__smsc75xx_mdio_write(netdev, phy_id, idx, regval, 0);
+}
+
 static int smsc75xx_wait_eeprom(struct usbnet *dev)
 {
 	unsigned long start_time = jiffies;
@@ -255,7 +290,7 @@
 
 	do {
 		ret = smsc75xx_read_reg(dev, E2P_CMD, &val);
-		check_warn_return(ret, "Error reading E2P_CMD");
+		check_warn_return(ret, "Error reading E2P_CMD\n");
 
 		if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT))
 			break;
@@ -263,7 +298,7 @@
 	} while (!time_after(jiffies, start_time + HZ));
 
 	if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) {
-		netdev_warn(dev->net, "EEPROM read operation timeout");
+		netdev_warn(dev->net, "EEPROM read operation timeout\n");
 		return -EIO;
 	}
 
@@ -278,7 +313,7 @@
 
 	do {
 		ret = smsc75xx_read_reg(dev, E2P_CMD, &val);
-		check_warn_return(ret, "Error reading E2P_CMD");
+		check_warn_return(ret, "Error reading E2P_CMD\n");
 
 		if (!(val & E2P_CMD_BUSY))
 			return 0;
@@ -286,7 +321,7 @@
 		udelay(40);
 	} while (!time_after(jiffies, start_time + HZ));
 
-	netdev_warn(dev->net, "EEPROM is busy");
+	netdev_warn(dev->net, "EEPROM is busy\n");
 	return -EIO;
 }
 
@@ -306,14 +341,14 @@
 	for (i = 0; i < length; i++) {
 		val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR);
 		ret = smsc75xx_write_reg(dev, E2P_CMD, val);
-		check_warn_return(ret, "Error writing E2P_CMD");
+		check_warn_return(ret, "Error writing E2P_CMD\n");
 
 		ret = smsc75xx_wait_eeprom(dev);
 		if (ret < 0)
 			return ret;
 
 		ret = smsc75xx_read_reg(dev, E2P_DATA, &val);
-		check_warn_return(ret, "Error reading E2P_DATA");
+		check_warn_return(ret, "Error reading E2P_DATA\n");
 
 		data[i] = val & 0xFF;
 		offset++;
@@ -338,7 +373,7 @@
 	/* Issue write/erase enable command */
 	val = E2P_CMD_BUSY | E2P_CMD_EWEN;
 	ret = smsc75xx_write_reg(dev, E2P_CMD, val);
-	check_warn_return(ret, "Error writing E2P_CMD");
+	check_warn_return(ret, "Error writing E2P_CMD\n");
 
 	ret = smsc75xx_wait_eeprom(dev);
 	if (ret < 0)
@@ -349,12 +384,12 @@
 		/* Fill data register */
 		val = data[i];
 		ret = smsc75xx_write_reg(dev, E2P_DATA, val);
-		check_warn_return(ret, "Error writing E2P_DATA");
+		check_warn_return(ret, "Error writing E2P_DATA\n");
 
 		/* Send "write" command */
 		val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR);
 		ret = smsc75xx_write_reg(dev, E2P_CMD, val);
-		check_warn_return(ret, "Error writing E2P_CMD");
+		check_warn_return(ret, "Error writing E2P_CMD\n");
 
 		ret = smsc75xx_wait_eeprom(dev);
 		if (ret < 0)
@@ -373,7 +408,7 @@
 	for (i = 0; i < 100; i++) {
 		u32 dp_sel;
 		ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel);
-		check_warn_return(ret, "Error reading DP_SEL");
+		check_warn_return(ret, "Error reading DP_SEL\n");
 
 		if (dp_sel & DP_SEL_DPRDY)
 			return 0;
@@ -381,7 +416,7 @@
 		udelay(40);
 	}
 
-	netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out");
+	netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out\n");
 
 	return -EIO;
 }
@@ -396,28 +431,28 @@
 	mutex_lock(&pdata->dataport_mutex);
 
 	ret = smsc75xx_dataport_wait_not_busy(dev);
-	check_warn_goto_done(ret, "smsc75xx_dataport_write busy on entry");
+	check_warn_goto_done(ret, "smsc75xx_dataport_write busy on entry\n");
 
 	ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel);
-	check_warn_goto_done(ret, "Error reading DP_SEL");
+	check_warn_goto_done(ret, "Error reading DP_SEL\n");
 
 	dp_sel &= ~DP_SEL_RSEL;
 	dp_sel |= ram_select;
 	ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel);
-	check_warn_goto_done(ret, "Error writing DP_SEL");
+	check_warn_goto_done(ret, "Error writing DP_SEL\n");
 
 	for (i = 0; i < length; i++) {
 		ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i);
-		check_warn_goto_done(ret, "Error writing DP_ADDR");
+		check_warn_goto_done(ret, "Error writing DP_ADDR\n");
 
 		ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]);
-		check_warn_goto_done(ret, "Error writing DP_DATA");
+		check_warn_goto_done(ret, "Error writing DP_DATA\n");
 
 		ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE);
-		check_warn_goto_done(ret, "Error writing DP_CMD");
+		check_warn_goto_done(ret, "Error writing DP_CMD\n");
 
 		ret = smsc75xx_dataport_wait_not_busy(dev);
-		check_warn_goto_done(ret, "smsc75xx_dataport_write timeout");
+		check_warn_goto_done(ret, "smsc75xx_dataport_write timeout\n");
 	}
 
 done:
@@ -438,14 +473,14 @@
 	struct usbnet *dev = pdata->dev;
 	int ret;
 
-	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x",
-		pdata->rfe_ctl);
+	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
+		  pdata->rfe_ctl);
 
 	smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN,
 		DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table);
 
 	ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
-	check_warn(ret, "Error writing RFE_CRL");
+	check_warn(ret, "Error writing RFE_CRL\n");
 }
 
 static void smsc75xx_set_multicast(struct net_device *netdev)
@@ -465,15 +500,15 @@
 		pdata->multicast_hash_table[i] = 0;
 
 	if (dev->net->flags & IFF_PROMISC) {
-		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
+		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled\n");
 		pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU;
 	} else if (dev->net->flags & IFF_ALLMULTI) {
-		netif_dbg(dev, drv, dev->net, "receive all multicast enabled");
+		netif_dbg(dev, drv, dev->net, "receive all multicast enabled\n");
 		pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF;
 	} else if (!netdev_mc_empty(dev->net)) {
 		struct netdev_hw_addr *ha;
 
-		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
+		netif_dbg(dev, drv, dev->net, "receive multicast hash filter\n");
 
 		pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF;
 
@@ -483,7 +518,7 @@
 				(1 << (bitnum % 32));
 		}
 	} else {
-		netif_dbg(dev, drv, dev->net, "receive own packets only");
+		netif_dbg(dev, drv, dev->net, "receive own packets only\n");
 		pdata->rfe_ctl |= RFE_CTL_DPF;
 	}
 
@@ -511,18 +546,18 @@
 		if (cap & FLOW_CTRL_RX)
 			flow |= FLOW_RX_FCEN;
 
-		netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
-			(cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
-			(cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+		netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n",
+			  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
+			  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
 	} else {
-		netif_dbg(dev, link, dev->net, "half duplex");
+		netif_dbg(dev, link, dev->net, "half duplex\n");
 	}
 
 	ret = smsc75xx_write_reg(dev, FLOW, flow);
-	check_warn_return(ret, "Error writing FLOW");
+	check_warn_return(ret, "Error writing FLOW\n");
 
 	ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow);
-	check_warn_return(ret, "Error writing FCT_FLOW");
+	check_warn_return(ret, "Error writing FCT_FLOW\n");
 
 	return 0;
 }
@@ -539,16 +574,15 @@
 		PHY_INT_SRC_CLEAR_ALL);
 
 	ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
-	check_warn_return(ret, "Error writing INT_STS");
+	check_warn_return(ret, "Error writing INT_STS\n");
 
 	mii_check_media(mii, 1, 1);
 	mii_ethtool_gset(&dev->mii, &ecmd);
 	lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
 	rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
 
-	netif_dbg(dev, link, dev->net, "speed: %u duplex: %d lcladv: %04x"
-		  " rmtadv: %04x", ethtool_cmd_speed(&ecmd),
-		  ecmd.duplex, lcladv, rmtadv);
+	netif_dbg(dev, link, dev->net, "speed: %u duplex: %d lcladv: %04x rmtadv: %04x\n",
+		  ethtool_cmd_speed(&ecmd), ecmd.duplex, lcladv, rmtadv);
 
 	return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
 }
@@ -558,21 +592,21 @@
 	u32 intdata;
 
 	if (urb->actual_length != 4) {
-		netdev_warn(dev->net,
-			"unexpected urb length %d", urb->actual_length);
+		netdev_warn(dev->net, "unexpected urb length %d\n",
+			    urb->actual_length);
 		return;
 	}
 
 	memcpy(&intdata, urb->transfer_buffer, 4);
 	le32_to_cpus(&intdata);
 
-	netif_dbg(dev, link, dev->net, "intdata: 0x%08X", intdata);
+	netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
 
 	if (intdata & INT_ENP_PHY_INT)
 		usbnet_defer_kevent(dev, EVENT_LINK_RESET);
 	else
-		netdev_warn(dev->net,
-			"unexpected interrupt, intdata=0x%08X", intdata);
+		netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n",
+			    intdata);
 }
 
 static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net)
@@ -596,8 +630,8 @@
 	struct usbnet *dev = netdev_priv(netdev);
 
 	if (ee->magic != LAN75XX_EEPROM_MAGIC) {
-		netdev_warn(dev->net,
-			"EEPROM: magic value mismatch: 0x%x", ee->magic);
+		netdev_warn(dev->net, "EEPROM: magic value mismatch: 0x%x\n",
+			    ee->magic);
 		return -EINVAL;
 	}
 
@@ -619,8 +653,13 @@
 {
 	struct usbnet *dev = netdev_priv(net);
 	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+	int ret;
 
 	pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
+
+	ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
+	check_warn_return(ret, "device_set_wakeup_enable error %d\n", ret);
+
 	return 0;
 }
 
@@ -657,14 +696,14 @@
 		if (is_valid_ether_addr(dev->net->dev_addr)) {
 			/* eeprom values are valid so use them */
 			netif_dbg(dev, ifup, dev->net,
-				"MAC address read from EEPROM");
+				  "MAC address read from EEPROM\n");
 			return;
 		}
 	}
 
 	/* no eeprom, or eeprom values are invalid. generate random MAC */
 	eth_hw_addr_random(dev->net);
-	netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr");
+	netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
 }
 
 static int smsc75xx_set_mac_address(struct usbnet *dev)
@@ -674,17 +713,17 @@
 	u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8;
 
 	int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi);
-	check_warn_return(ret, "Failed to write RX_ADDRH: %d", ret);
+	check_warn_return(ret, "Failed to write RX_ADDRH: %d\n", ret);
 
 	ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo);
-	check_warn_return(ret, "Failed to write RX_ADDRL: %d", ret);
+	check_warn_return(ret, "Failed to write RX_ADDRL: %d\n", ret);
 
 	addr_hi |= ADDR_FILTX_FB_VALID;
 	ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi);
-	check_warn_return(ret, "Failed to write ADDR_FILTX: %d", ret);
+	check_warn_return(ret, "Failed to write ADDR_FILTX: %d\n", ret);
 
 	ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo);
-	check_warn_return(ret, "Failed to write ADDR_FILTX+4: %d", ret);
+	check_warn_return(ret, "Failed to write ADDR_FILTX+4: %d\n", ret);
 
 	return 0;
 }
@@ -708,12 +747,12 @@
 	do {
 		msleep(10);
 		bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
-		check_warn_return(bmcr, "Error reading MII_BMCR");
+		check_warn_return(bmcr, "Error reading MII_BMCR\n");
 		timeout++;
 	} while ((bmcr & BMCR_RESET) && (timeout < 100));
 
 	if (timeout >= 100) {
-		netdev_warn(dev->net, "timeout on PHY Reset");
+		netdev_warn(dev->net, "timeout on PHY Reset\n");
 		return -EIO;
 	}
 
@@ -725,14 +764,14 @@
 
 	/* read and write to clear phy interrupt status */
 	ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
-	check_warn_return(ret, "Error reading PHY_INT_SRC");
+	check_warn_return(ret, "Error reading PHY_INT_SRC\n");
 	smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff);
 
 	smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
 		PHY_INT_MASK_DEFAULT);
 	mii_nway_restart(&dev->mii);
 
-	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+	netif_dbg(dev, ifup, dev->net, "phy initialised successfully\n");
 	return 0;
 }
 
@@ -743,14 +782,14 @@
 	bool rxenabled;
 
 	ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
-	check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
+	check_warn_return(ret, "Failed to read MAC_RX: %d\n", ret);
 
 	rxenabled = ((buf & MAC_RX_RXEN) != 0);
 
 	if (rxenabled) {
 		buf &= ~MAC_RX_RXEN;
 		ret = smsc75xx_write_reg(dev, MAC_RX, buf);
-		check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+		check_warn_return(ret, "Failed to write MAC_RX: %d\n", ret);
 	}
 
 	/* add 4 to size for FCS */
@@ -758,12 +797,12 @@
 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE);
 
 	ret = smsc75xx_write_reg(dev, MAC_RX, buf);
-	check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+	check_warn_return(ret, "Failed to write MAC_RX: %d\n", ret);
 
 	if (rxenabled) {
 		buf |= MAC_RX_RXEN;
 		ret = smsc75xx_write_reg(dev, MAC_RX, buf);
-		check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+		check_warn_return(ret, "Failed to write MAC_RX: %d\n", ret);
 	}
 
 	return 0;
@@ -774,7 +813,7 @@
 	struct usbnet *dev = netdev_priv(netdev);
 
 	int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
-	check_warn_return(ret, "Failed to set mac rx frame length");
+	check_warn_return(ret, "Failed to set mac rx frame length\n");
 
 	return usbnet_change_mtu(netdev, new_mtu);
 }
@@ -799,19 +838,22 @@
 	/* it's racing here! */
 
 	ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
-	check_warn_return(ret, "Error writing RFE_CTL");
+	check_warn_return(ret, "Error writing RFE_CTL\n");
 
 	return 0;
 }
 
-static int smsc75xx_wait_ready(struct usbnet *dev)
+static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
 {
 	int timeout = 0;
 
 	do {
 		u32 buf;
-		int ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
-		check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
+		int ret;
+
+		ret = __smsc75xx_read_reg(dev, PMT_CTL, &buf, in_pm);
+
+		check_warn_return(ret, "Failed to read PMT_CTL: %d\n", ret);
 
 		if (buf & PMT_CTL_DEV_RDY)
 			return 0;
@@ -820,7 +862,7 @@
 		timeout++;
 	} while (timeout < 100);
 
-	netdev_warn(dev->net, "timeout waiting for device ready");
+	netdev_warn(dev->net, "timeout waiting for device ready\n");
 	return -EIO;
 }
 
@@ -830,79 +872,81 @@
 	u32 buf;
 	int ret = 0, timeout;
 
-	netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset");
+	netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset\n");
 
-	ret = smsc75xx_wait_ready(dev);
-	check_warn_return(ret, "device not ready in smsc75xx_reset");
+	ret = smsc75xx_wait_ready(dev, 0);
+	check_warn_return(ret, "device not ready in smsc75xx_reset\n");
 
 	ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
-	check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+	check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
 
 	buf |= HW_CFG_LRST;
 
 	ret = smsc75xx_write_reg(dev, HW_CFG, buf);
-	check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
+	check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret);
 
 	timeout = 0;
 	do {
 		msleep(10);
 		ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
-		check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+		check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
 		timeout++;
 	} while ((buf & HW_CFG_LRST) && (timeout < 100));
 
 	if (timeout >= 100) {
-		netdev_warn(dev->net, "timeout on completion of Lite Reset");
+		netdev_warn(dev->net, "timeout on completion of Lite Reset\n");
 		return -EIO;
 	}
 
-	netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY");
+	netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY\n");
 
 	ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
-	check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
+	check_warn_return(ret, "Failed to read PMT_CTL: %d\n", ret);
 
 	buf |= PMT_CTL_PHY_RST;
 
 	ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
-	check_warn_return(ret, "Failed to write PMT_CTL: %d", ret);
+	check_warn_return(ret, "Failed to write PMT_CTL: %d\n", ret);
 
 	timeout = 0;
 	do {
 		msleep(10);
 		ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
-		check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
+		check_warn_return(ret, "Failed to read PMT_CTL: %d\n", ret);
 		timeout++;
 	} while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
 
 	if (timeout >= 100) {
-		netdev_warn(dev->net, "timeout waiting for PHY Reset");
+		netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
 		return -EIO;
 	}
 
-	netif_dbg(dev, ifup, dev->net, "PHY reset complete");
+	netif_dbg(dev, ifup, dev->net, "PHY reset complete\n");
 
 	smsc75xx_init_mac_address(dev);
 
 	ret = smsc75xx_set_mac_address(dev);
-	check_warn_return(ret, "Failed to set mac address");
+	check_warn_return(ret, "Failed to set mac address\n");
 
-	netif_dbg(dev, ifup, dev->net, "MAC Address: %pM", dev->net->dev_addr);
+	netif_dbg(dev, ifup, dev->net, "MAC Address: %pM\n",
+		  dev->net->dev_addr);
 
 	ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
-	check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+	check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
 
-	netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x", buf);
+	netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n",
+		  buf);
 
 	buf |= HW_CFG_BIR;
 
 	ret = smsc75xx_write_reg(dev, HW_CFG, buf);
-	check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
+	check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret);
 
 	ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
-	check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+	check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
 
-	netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after "
-			"writing HW_CFG_BIR: 0x%08x", buf);
+	netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after writing HW_CFG_BIR: 0x%08x\n",
+		  buf);
 
 	if (!turbo_mode) {
 		buf = 0;
@@ -915,99 +959,102 @@
 		dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE;
 	}
 
-	netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld",
-		(ulong)dev->rx_urb_size);
+	netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld\n",
+		  (ulong)dev->rx_urb_size);
 
 	ret = smsc75xx_write_reg(dev, BURST_CAP, buf);
-	check_warn_return(ret, "Failed to write BURST_CAP: %d", ret);
+	check_warn_return(ret, "Failed to write BURST_CAP: %d\n", ret);
 
 	ret = smsc75xx_read_reg(dev, BURST_CAP, &buf);
-	check_warn_return(ret, "Failed to read BURST_CAP: %d", ret);
+	check_warn_return(ret, "Failed to read BURST_CAP: %d\n", ret);
 
 	netif_dbg(dev, ifup, dev->net,
-		"Read Value from BURST_CAP after writing: 0x%08x", buf);
+		  "Read Value from BURST_CAP after writing: 0x%08x\n", buf);
 
 	ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
-	check_warn_return(ret, "Failed to write BULK_IN_DLY: %d", ret);
+	check_warn_return(ret, "Failed to write BULK_IN_DLY: %d\n", ret);
 
 	ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf);
-	check_warn_return(ret, "Failed to read BULK_IN_DLY: %d", ret);
+	check_warn_return(ret, "Failed to read BULK_IN_DLY: %d\n", ret);
 
 	netif_dbg(dev, ifup, dev->net,
-		"Read Value from BULK_IN_DLY after writing: 0x%08x", buf);
+		  "Read Value from BULK_IN_DLY after writing: 0x%08x\n", buf);
 
 	if (turbo_mode) {
 		ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
-		check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+		check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
 
-		netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf);
+		netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf);
 
 		buf |= (HW_CFG_MEF | HW_CFG_BCE);
 
 		ret = smsc75xx_write_reg(dev, HW_CFG, buf);
-		check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
+		check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret);
 
 		ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
-		check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+		check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
 
-		netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf);
+		netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf);
 	}
 
 	/* set FIFO sizes */
 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
 	ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf);
-	check_warn_return(ret, "Failed to write FCT_RX_FIFO_END: %d", ret);
+	check_warn_return(ret, "Failed to write FCT_RX_FIFO_END: %d\n", ret);
 
-	netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x", buf);
+	netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x\n", buf);
 
 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
 	ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf);
-	check_warn_return(ret, "Failed to write FCT_TX_FIFO_END: %d", ret);
+	check_warn_return(ret, "Failed to write FCT_TX_FIFO_END: %d\n", ret);
 
-	netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x", buf);
+	netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x\n", buf);
 
 	ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
-	check_warn_return(ret, "Failed to write INT_STS: %d", ret);
+	check_warn_return(ret, "Failed to write INT_STS: %d\n", ret);
 
 	ret = smsc75xx_read_reg(dev, ID_REV, &buf);
-	check_warn_return(ret, "Failed to read ID_REV: %d", ret);
+	check_warn_return(ret, "Failed to read ID_REV: %d\n", ret);
 
-	netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf);
+	netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", buf);
 
 	ret = smsc75xx_read_reg(dev, E2P_CMD, &buf);
-	check_warn_return(ret, "Failed to read E2P_CMD: %d", ret);
+	check_warn_return(ret, "Failed to read E2P_CMD: %d\n", ret);
 
 	/* only set default GPIO/LED settings if no EEPROM is detected */
 	if (!(buf & E2P_CMD_LOADED)) {
 		ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf);
-		check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret);
+		check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d\n",
+				  ret);
 
 		buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL);
 		buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL;
 
 		ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf);
-		check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret);
+		check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d\n",
+				  ret);
 	}
 
 	ret = smsc75xx_write_reg(dev, FLOW, 0);
-	check_warn_return(ret, "Failed to write FLOW: %d", ret);
+	check_warn_return(ret, "Failed to write FLOW: %d\n", ret);
 
 	ret = smsc75xx_write_reg(dev, FCT_FLOW, 0);
-	check_warn_return(ret, "Failed to write FCT_FLOW: %d", ret);
+	check_warn_return(ret, "Failed to write FCT_FLOW: %d\n", ret);
 
 	/* Don't need rfe_ctl_lock during initialisation */
 	ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
-	check_warn_return(ret, "Failed to read RFE_CTL: %d", ret);
+	check_warn_return(ret, "Failed to read RFE_CTL: %d\n", ret);
 
 	pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF;
 
 	ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
-	check_warn_return(ret, "Failed to write RFE_CTL: %d", ret);
+	check_warn_return(ret, "Failed to write RFE_CTL: %d\n", ret);
 
 	ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
-	check_warn_return(ret, "Failed to read RFE_CTL: %d", ret);
+	check_warn_return(ret, "Failed to read RFE_CTL: %d\n", ret);
 
-	netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl);
+	netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x\n",
+		  pdata->rfe_ctl);
 
 	/* Enable or disable checksum offload engines */
 	smsc75xx_set_features(dev->net, dev->net->features);
@@ -1015,69 +1062,69 @@
 	smsc75xx_set_multicast(dev->net);
 
 	ret = smsc75xx_phy_initialize(dev);
-	check_warn_return(ret, "Failed to initialize PHY: %d", ret);
+	check_warn_return(ret, "Failed to initialize PHY: %d\n", ret);
 
 	ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf);
-	check_warn_return(ret, "Failed to read INT_EP_CTL: %d", ret);
+	check_warn_return(ret, "Failed to read INT_EP_CTL: %d\n", ret);
 
 	/* enable PHY interrupts */
 	buf |= INT_ENP_PHY_INT;
 
 	ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf);
-	check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret);
+	check_warn_return(ret, "Failed to write INT_EP_CTL: %d\n", ret);
 
 	/* allow mac to detect speed and duplex from phy */
 	ret = smsc75xx_read_reg(dev, MAC_CR, &buf);
-	check_warn_return(ret, "Failed to read MAC_CR: %d", ret);
+	check_warn_return(ret, "Failed to read MAC_CR: %d\n", ret);
 
 	buf |= (MAC_CR_ADD | MAC_CR_ASD);
 	ret = smsc75xx_write_reg(dev, MAC_CR, buf);
-	check_warn_return(ret, "Failed to write MAC_CR: %d", ret);
+	check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
 
 	ret = smsc75xx_read_reg(dev, MAC_TX, &buf);
-	check_warn_return(ret, "Failed to read MAC_TX: %d", ret);
+	check_warn_return(ret, "Failed to read MAC_TX: %d\n", ret);
 
 	buf |= MAC_TX_TXEN;
 
 	ret = smsc75xx_write_reg(dev, MAC_TX, buf);
-	check_warn_return(ret, "Failed to write MAC_TX: %d", ret);
+	check_warn_return(ret, "Failed to write MAC_TX: %d\n", ret);
 
-	netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x", buf);
+	netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x\n", buf);
 
 	ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf);
-	check_warn_return(ret, "Failed to read FCT_TX_CTL: %d", ret);
+	check_warn_return(ret, "Failed to read FCT_TX_CTL: %d\n", ret);
 
 	buf |= FCT_TX_CTL_EN;
 
 	ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf);
-	check_warn_return(ret, "Failed to write FCT_TX_CTL: %d", ret);
+	check_warn_return(ret, "Failed to write FCT_TX_CTL: %d\n", ret);
 
-	netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf);
+	netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf);
 
 	ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
-	check_warn_return(ret, "Failed to set max rx frame length");
+	check_warn_return(ret, "Failed to set max rx frame length\n");
 
 	ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
-	check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
+	check_warn_return(ret, "Failed to read MAC_RX: %d\n", ret);
 
 	buf |= MAC_RX_RXEN;
 
 	ret = smsc75xx_write_reg(dev, MAC_RX, buf);
-	check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+	check_warn_return(ret, "Failed to write MAC_RX: %d\n", ret);
 
-	netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x", buf);
+	netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x\n", buf);
 
 	ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf);
-	check_warn_return(ret, "Failed to read FCT_RX_CTL: %d", ret);
+	check_warn_return(ret, "Failed to read FCT_RX_CTL: %d\n", ret);
 
 	buf |= FCT_RX_CTL_EN;
 
 	ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf);
-	check_warn_return(ret, "Failed to write FCT_RX_CTL: %d", ret);
+	check_warn_return(ret, "Failed to write FCT_RX_CTL: %d\n", ret);
 
-	netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x", buf);
+	netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x\n", buf);
 
-	netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0");
+	netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0\n");
 	return 0;
 }
 
@@ -1102,14 +1149,14 @@
 	printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
 
 	ret = usbnet_get_endpoints(dev, intf);
-	check_warn_return(ret, "usbnet_get_endpoints failed: %d", ret);
+	check_warn_return(ret, "usbnet_get_endpoints failed: %d\n", ret);
 
 	dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv),
 		GFP_KERNEL);
 
 	pdata = (struct smsc75xx_priv *)(dev->data[0]);
 	if (!pdata) {
-		netdev_warn(dev->net, "Unable to allocate smsc75xx_priv");
+		netdev_warn(dev->net, "Unable to allocate smsc75xx_priv\n");
 		return -ENOMEM;
 	}
 
@@ -1134,6 +1181,7 @@
 
 	/* Init all registers */
 	ret = smsc75xx_reset(dev);
+	check_warn_return(ret, "smsc75xx_reset error %d\n", ret);
 
 	dev->net->netdev_ops = &smsc75xx_netdev_ops;
 	dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
@@ -1147,172 +1195,476 @@
 {
 	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
 	if (pdata) {
-		netif_dbg(dev, ifdown, dev->net, "free pdata");
+		netif_dbg(dev, ifdown, dev->net, "free pdata\n");
 		kfree(pdata);
 		pdata = NULL;
 		dev->data[0] = 0;
 	}
 }
 
-static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
+static u16 smsc_crc(const u8 *buffer, size_t len)
 {
-	struct usbnet *dev = usb_get_intfdata(intf);
-	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+	return bitrev16(crc16(0xFFFF, buffer, len));
+}
+
+static int smsc75xx_write_wuff(struct usbnet *dev, int filter, u32 wuf_cfg,
+			       u32 wuf_mask1)
+{
+	int cfg_base = WUF_CFGX + filter * 4;
+	int mask_base = WUF_MASKX + filter * 16;
 	int ret;
+
+	ret = smsc75xx_write_reg(dev, cfg_base, wuf_cfg);
+	check_warn_return(ret, "Error writing WUF_CFGX\n");
+
+	ret = smsc75xx_write_reg(dev, mask_base, wuf_mask1);
+	check_warn_return(ret, "Error writing WUF_MASKX\n");
+
+	ret = smsc75xx_write_reg(dev, mask_base + 4, 0);
+	check_warn_return(ret, "Error writing WUF_MASKX\n");
+
+	ret = smsc75xx_write_reg(dev, mask_base + 8, 0);
+	check_warn_return(ret, "Error writing WUF_MASKX\n");
+
+	ret = smsc75xx_write_reg(dev, mask_base + 12, 0);
+	check_warn_return(ret, "Error writing WUF_MASKX\n");
+
+	return 0;
+}
+
+static int smsc75xx_enter_suspend0(struct usbnet *dev)
+{
+	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
 	u32 val;
+	int ret;
 
-	ret = usbnet_suspend(intf, message);
-	check_warn_return(ret, "usbnet_suspend error");
+	ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+	check_warn_return(ret, "Error reading PMT_CTL\n");
 
-	/* if no wol options set, enter lowest power SUSPEND2 mode */
-	if (!(pdata->wolopts & SUPPORTED_WAKE)) {
-		netdev_info(dev->net, "entering SUSPEND2 mode");
+	val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_PHY_RST));
+	val |= PMT_CTL_SUS_MODE_0 | PMT_CTL_WOL_EN | PMT_CTL_WUPS;
 
-		/* disable energy detect (link up) & wake up events */
-		ret = smsc75xx_read_reg(dev, WUCSR, &val);
-		check_warn_return(ret, "Error reading WUCSR");
+	ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+	check_warn_return(ret, "Error writing PMT_CTL\n");
 
-		val &= ~(WUCSR_MPEN | WUCSR_WUEN);
+	pdata->suspend_flags |= SUSPEND_SUSPEND0;
 
-		ret = smsc75xx_write_reg(dev, WUCSR, val);
-		check_warn_return(ret, "Error writing WUCSR");
+	return 0;
+}
 
-		ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
-		check_warn_return(ret, "Error reading PMT_CTL");
+static int smsc75xx_enter_suspend1(struct usbnet *dev)
+{
+	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+	u32 val;
+	int ret;
 
-		val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN);
+	ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+	check_warn_return(ret, "Error reading PMT_CTL\n");
 
-		ret = smsc75xx_write_reg(dev, PMT_CTL, val);
-		check_warn_return(ret, "Error writing PMT_CTL");
+	val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
+	val |= PMT_CTL_SUS_MODE_1;
 
-		/* enter suspend2 mode */
-		ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
-		check_warn_return(ret, "Error reading PMT_CTL");
+	ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+	check_warn_return(ret, "Error writing PMT_CTL\n");
 
-		val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
-		val |= PMT_CTL_SUS_MODE_2;
+	/* clear wol status, enable energy detection */
+	val &= ~PMT_CTL_WUPS;
+	val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN);
 
-		ret = smsc75xx_write_reg(dev, PMT_CTL, val);
-		check_warn_return(ret, "Error writing PMT_CTL");
+	ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+	check_warn_return(ret, "Error writing PMT_CTL\n");
 
-		return 0;
+	pdata->suspend_flags |= SUSPEND_SUSPEND1;
+
+	return 0;
+}
+
+static int smsc75xx_enter_suspend2(struct usbnet *dev)
+{
+	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+	u32 val;
+	int ret;
+
+	ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+	check_warn_return(ret, "Error reading PMT_CTL\n");
+
+	val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
+	val |= PMT_CTL_SUS_MODE_2;
+
+	ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+	check_warn_return(ret, "Error writing PMT_CTL\n");
+
+	pdata->suspend_flags |= SUSPEND_SUSPEND2;
+
+	return 0;
+}
+
+static int smsc75xx_enter_suspend3(struct usbnet *dev)
+{
+	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+	u32 val;
+	int ret;
+
+	ret = smsc75xx_read_reg_nopm(dev, FCT_RX_CTL, &val);
+	check_warn_return(ret, "Error reading FCT_RX_CTL\n");
+
+	if (val & FCT_RX_CTL_RXUSED) {
+		netdev_dbg(dev->net, "rx fifo not empty in autosuspend\n");
+		return -EBUSY;
 	}
 
-	if (pdata->wolopts & WAKE_MAGIC) {
-		/* clear any pending magic packet status */
-		ret = smsc75xx_read_reg(dev, WUCSR, &val);
-		check_warn_return(ret, "Error reading WUCSR");
+	ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+	check_warn_return(ret, "Error reading PMT_CTL\n");
 
-		val |= WUCSR_MPR;
+	val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
+	val |= PMT_CTL_SUS_MODE_3 | PMT_CTL_RES_CLR_WKP_EN;
 
-		ret = smsc75xx_write_reg(dev, WUCSR, val);
-		check_warn_return(ret, "Error writing WUCSR");
-	}
-
-	/* enable/disable magic packup wake */
-	ret = smsc75xx_read_reg(dev, WUCSR, &val);
-	check_warn_return(ret, "Error reading WUCSR");
-
-	if (pdata->wolopts & WAKE_MAGIC) {
-		netdev_info(dev->net, "enabling magic packet wakeup");
-		val |= WUCSR_MPEN;
-	} else {
-		netdev_info(dev->net, "disabling magic packet wakeup");
-		val &= ~WUCSR_MPEN;
-	}
-
-	ret = smsc75xx_write_reg(dev, WUCSR, val);
-	check_warn_return(ret, "Error writing WUCSR");
-
-	/* enable wol wakeup source */
-	ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
-	check_warn_return(ret, "Error reading PMT_CTL");
-
-	val |= PMT_CTL_WOL_EN;
-
-	ret = smsc75xx_write_reg(dev, PMT_CTL, val);
-	check_warn_return(ret, "Error writing PMT_CTL");
-
-	/* enable receiver */
-	ret = smsc75xx_read_reg(dev, MAC_RX, &val);
-	check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
-
-	val |= MAC_RX_RXEN;
-
-	ret = smsc75xx_write_reg(dev, MAC_RX, val);
-	check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
-
-	/* some wol options are enabled, so enter SUSPEND0 */
-	netdev_info(dev->net, "entering SUSPEND0 mode");
-
-	ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
-	check_warn_return(ret, "Error reading PMT_CTL");
-
-	val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST));
-	val |= PMT_CTL_SUS_MODE_0;
-
-	ret = smsc75xx_write_reg(dev, PMT_CTL, val);
-	check_warn_return(ret, "Error writing PMT_CTL");
+	ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+	check_warn_return(ret, "Error writing PMT_CTL\n");
 
 	/* clear wol status */
 	val &= ~PMT_CTL_WUPS;
 	val |= PMT_CTL_WUPS_WOL;
-	ret = smsc75xx_write_reg(dev, PMT_CTL, val);
-	check_warn_return(ret, "Error writing PMT_CTL");
 
-	/* read back PMT_CTL */
-	ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
-	check_warn_return(ret, "Error reading PMT_CTL");
+	ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+	check_warn_return(ret, "Error writing PMT_CTL\n");
 
-	smsc75xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+	pdata->suspend_flags |= SUSPEND_SUSPEND3;
 
 	return 0;
 }
 
+static int smsc75xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
+{
+	struct mii_if_info *mii = &dev->mii;
+	int ret;
+
+	netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n");
+
+	/* read to clear */
+	ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC);
+	check_warn_return(ret, "Error reading PHY_INT_SRC\n");
+
+	/* enable interrupt source */
+	ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK);
+	check_warn_return(ret, "Error reading PHY_INT_MASK\n");
+
+	ret |= mask;
+
+	smsc75xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret);
+
+	return 0;
+}
+
+static int smsc75xx_link_ok_nopm(struct usbnet *dev)
+{
+	struct mii_if_info *mii = &dev->mii;
+	int ret;
+
+	/* first, a dummy read, needed to latch some MII phys */
+	ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+	check_warn_return(ret, "Error reading MII_BMSR\n");
+
+	ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+	check_warn_return(ret, "Error reading MII_BMSR\n");
+
+	return !!(ret & BMSR_LSTATUS);
+}
+
+static int smsc75xx_autosuspend(struct usbnet *dev, u32 link_up)
+{
+	int ret;
+
+	if (!netif_running(dev->net)) {
+		/* interface is ifconfig down so fully power down hw */
+		netdev_dbg(dev->net, "autosuspend entering SUSPEND2\n");
+		return smsc75xx_enter_suspend2(dev);
+	}
+
+	if (!link_up) {
+		/* link is down so enter EDPD mode */
+		netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n");
+
+		/* enable PHY wakeup events for if cable is attached */
+		ret = smsc75xx_enable_phy_wakeup_interrupts(dev,
+			PHY_INT_MASK_ANEG_COMP);
+		check_warn_return(ret, "error enabling PHY wakeup ints\n");
+
+		netdev_info(dev->net, "entering SUSPEND1 mode\n");
+		return smsc75xx_enter_suspend1(dev);
+	}
+
+	/* enable PHY wakeup events so we remote wakeup if cable is pulled */
+	ret = smsc75xx_enable_phy_wakeup_interrupts(dev,
+		PHY_INT_MASK_LINK_DOWN);
+	check_warn_return(ret, "error enabling PHY wakeup ints\n");
+
+	netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n");
+	return smsc75xx_enter_suspend3(dev);
+}
+
+static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+	struct usbnet *dev = usb_get_intfdata(intf);
+	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+	u32 val, link_up;
+	int ret;
+
+	ret = usbnet_suspend(intf, message);
+	check_warn_goto_done(ret, "usbnet_suspend error\n");
+
+	if (pdata->suspend_flags) {
+		netdev_warn(dev->net, "error during last resume\n");
+		pdata->suspend_flags = 0;
+	}
+
+	/* determine if link is up using only _nopm functions */
+	link_up = smsc75xx_link_ok_nopm(dev);
+
+	if (message.event == PM_EVENT_AUTO_SUSPEND) {
+		ret = smsc75xx_autosuspend(dev, link_up);
+		goto done;
+	}
+
+	/* if we get this far we're not autosuspending */
+	/* if no wol options set, or if link is down and we're not waking on
+	 * PHY activity, enter lowest power SUSPEND2 mode
+	 */
+	if (!(pdata->wolopts & SUPPORTED_WAKE) ||
+		!(link_up || (pdata->wolopts & WAKE_PHY))) {
+		netdev_info(dev->net, "entering SUSPEND2 mode\n");
+
+		/* disable energy detect (link up) & wake up events */
+		ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+		check_warn_goto_done(ret, "Error reading WUCSR\n");
+
+		val &= ~(WUCSR_MPEN | WUCSR_WUEN);
+
+		ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+		check_warn_goto_done(ret, "Error writing WUCSR\n");
+
+		ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+		check_warn_goto_done(ret, "Error reading PMT_CTL\n");
+
+		val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN);
+
+		ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+		check_warn_goto_done(ret, "Error writing PMT_CTL\n");
+
+		ret = smsc75xx_enter_suspend2(dev);
+		goto done;
+	}
+
+	if (pdata->wolopts & WAKE_PHY) {
+		ret = smsc75xx_enable_phy_wakeup_interrupts(dev,
+			(PHY_INT_MASK_ANEG_COMP | PHY_INT_MASK_LINK_DOWN));
+		check_warn_goto_done(ret, "error enabling PHY wakeup ints\n");
+
+		/* if link is down then configure EDPD and enter SUSPEND1,
+		 * otherwise enter SUSPEND0 below
+		 */
+		if (!link_up) {
+			struct mii_if_info *mii = &dev->mii;
+			netdev_info(dev->net, "entering SUSPEND1 mode\n");
+
+			/* enable energy detect power-down mode */
+			ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id,
+				PHY_MODE_CTRL_STS);
+			check_warn_goto_done(ret, "Error reading PHY_MODE_CTRL_STS\n");
+
+			ret |= MODE_CTRL_STS_EDPWRDOWN;
+
+			smsc75xx_mdio_write_nopm(dev->net, mii->phy_id,
+				PHY_MODE_CTRL_STS, ret);
+
+			/* enter SUSPEND1 mode */
+			ret = smsc75xx_enter_suspend1(dev);
+			goto done;
+		}
+	}
+
+	if (pdata->wolopts & (WAKE_MCAST | WAKE_ARP)) {
+		int i, filter = 0;
+
+		/* disable all filters */
+		for (i = 0; i < WUF_NUM; i++) {
+			ret = smsc75xx_write_reg_nopm(dev, WUF_CFGX + i * 4, 0);
+			check_warn_goto_done(ret, "Error writing WUF_CFGX\n");
+		}
+
+		if (pdata->wolopts & WAKE_MCAST) {
+			const u8 mcast[] = {0x01, 0x00, 0x5E};
+			netdev_info(dev->net, "enabling multicast detection\n");
+
+			val = WUF_CFGX_EN | WUF_CFGX_ATYPE_MULTICAST
+				| smsc_crc(mcast, 3);
+			ret = smsc75xx_write_wuff(dev, filter++, val, 0x0007);
+			check_warn_goto_done(ret, "Error writing wakeup filter\n");
+		}
+
+		if (pdata->wolopts & WAKE_ARP) {
+			const u8 arp[] = {0x08, 0x06};
+			netdev_info(dev->net, "enabling ARP detection\n");
+
+			val = WUF_CFGX_EN | WUF_CFGX_ATYPE_ALL | (0x0C << 16)
+				| smsc_crc(arp, 2);
+			ret = smsc75xx_write_wuff(dev, filter++, val, 0x0003);
+			check_warn_goto_done(ret, "Error writing wakeup filter\n");
+		}
+
+		/* clear any pending pattern match packet status */
+		ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+		check_warn_goto_done(ret, "Error reading WUCSR\n");
+
+		val |= WUCSR_WUFR;
+
+		ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+		check_warn_goto_done(ret, "Error writing WUCSR\n");
+
+		netdev_info(dev->net, "enabling packet match detection\n");
+		ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+		check_warn_goto_done(ret, "Error reading WUCSR\n");
+
+		val |= WUCSR_WUEN;
+
+		ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+		check_warn_goto_done(ret, "Error writing WUCSR\n");
+	} else {
+		netdev_info(dev->net, "disabling packet match detection\n");
+		ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+		check_warn_goto_done(ret, "Error reading WUCSR\n");
+
+		val &= ~WUCSR_WUEN;
+
+		ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+		check_warn_goto_done(ret, "Error writing WUCSR\n");
+	}
+
+	/* disable magic, bcast & unicast wakeup sources */
+	ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+	check_warn_goto_done(ret, "Error reading WUCSR\n");
+
+	val &= ~(WUCSR_MPEN | WUCSR_BCST_EN | WUCSR_PFDA_EN);
+
+	ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+	check_warn_goto_done(ret, "Error writing WUCSR\n");
+
+	if (pdata->wolopts & WAKE_PHY) {
+		netdev_info(dev->net, "enabling PHY wakeup\n");
+
+		ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+		check_warn_goto_done(ret, "Error reading PMT_CTL\n");
+
+		/* clear wol status, enable energy detection */
+		val &= ~PMT_CTL_WUPS;
+		val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN);
+
+		ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+		check_warn_goto_done(ret, "Error writing PMT_CTL\n");
+	}
+
+	if (pdata->wolopts & WAKE_MAGIC) {
+		netdev_info(dev->net, "enabling magic packet wakeup\n");
+		ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+		check_warn_goto_done(ret, "Error reading WUCSR\n");
+
+		/* clear any pending magic packet status */
+		val |= WUCSR_MPR | WUCSR_MPEN;
+
+		ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+		check_warn_goto_done(ret, "Error writing WUCSR\n");
+	}
+
+	if (pdata->wolopts & WAKE_BCAST) {
+		netdev_info(dev->net, "enabling broadcast detection\n");
+		ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+		check_warn_goto_done(ret, "Error reading WUCSR\n");
+
+		val |= WUCSR_BCAST_FR | WUCSR_BCST_EN;
+
+		ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+		check_warn_goto_done(ret, "Error writing WUCSR\n");
+	}
+
+	if (pdata->wolopts & WAKE_UCAST) {
+		netdev_info(dev->net, "enabling unicast detection\n");
+		ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+		check_warn_goto_done(ret, "Error reading WUCSR\n");
+
+		val |= WUCSR_WUFR | WUCSR_PFDA_EN;
+
+		ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+		check_warn_goto_done(ret, "Error writing WUCSR\n");
+	}
+
+	/* enable receiver to enable frame reception */
+	ret = smsc75xx_read_reg_nopm(dev, MAC_RX, &val);
+	check_warn_goto_done(ret, "Failed to read MAC_RX: %d\n", ret);
+
+	val |= MAC_RX_RXEN;
+
+	ret = smsc75xx_write_reg_nopm(dev, MAC_RX, val);
+	check_warn_goto_done(ret, "Failed to write MAC_RX: %d\n", ret);
+
+	/* some wol options are enabled, so enter SUSPEND0 */
+	netdev_info(dev->net, "entering SUSPEND0 mode\n");
+	ret = smsc75xx_enter_suspend0(dev);
+
+done:
+	if (ret)
+		usbnet_resume(intf);
+	return ret;
+}
+
 static int smsc75xx_resume(struct usb_interface *intf)
 {
 	struct usbnet *dev = usb_get_intfdata(intf);
 	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+	u8 suspend_flags = pdata->suspend_flags;
 	int ret;
 	u32 val;
 
-	if (pdata->wolopts & WAKE_MAGIC) {
-		netdev_info(dev->net, "resuming from SUSPEND0");
+	netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags);
 
-		smsc75xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+	/* do this first to ensure it's cleared even in error case */
+	pdata->suspend_flags = 0;
 
-		/* Disable magic packup wake */
-		ret = smsc75xx_read_reg(dev, WUCSR, &val);
-		check_warn_return(ret, "Error reading WUCSR");
+	if (suspend_flags & SUSPEND_ALLMODES) {
+		/* Disable wakeup sources */
+		ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+		check_warn_return(ret, "Error reading WUCSR\n");
 
-		val &= ~WUCSR_MPEN;
+		val &= ~(WUCSR_WUEN | WUCSR_MPEN | WUCSR_PFDA_EN
+			| WUCSR_BCST_EN);
 
-		ret = smsc75xx_write_reg(dev, WUCSR, val);
-		check_warn_return(ret, "Error writing WUCSR");
+		ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+		check_warn_return(ret, "Error writing WUCSR\n");
 
 		/* clear wake-up status */
-		ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
-		check_warn_return(ret, "Error reading PMT_CTL");
+		ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+		check_warn_return(ret, "Error reading PMT_CTL\n");
 
 		val &= ~PMT_CTL_WOL_EN;
 		val |= PMT_CTL_WUPS;
 
-		ret = smsc75xx_write_reg(dev, PMT_CTL, val);
-		check_warn_return(ret, "Error writing PMT_CTL");
-	} else {
-		netdev_info(dev->net, "resuming from SUSPEND2");
+		ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+		check_warn_return(ret, "Error writing PMT_CTL\n");
+	}
 
-		ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
-		check_warn_return(ret, "Error reading PMT_CTL");
+	if (suspend_flags & SUSPEND_SUSPEND2) {
+		netdev_info(dev->net, "resuming from SUSPEND2\n");
+
+		ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+		check_warn_return(ret, "Error reading PMT_CTL\n");
 
 		val |= PMT_CTL_PHY_PWRUP;
 
-		ret = smsc75xx_write_reg(dev, PMT_CTL, val);
-		check_warn_return(ret, "Error writing PMT_CTL");
+		ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+		check_warn_return(ret, "Error writing PMT_CTL\n");
 	}
 
-	ret = smsc75xx_wait_ready(dev);
-	check_warn_return(ret, "device not ready in smsc75xx_resume");
+	ret = smsc75xx_wait_ready(dev, 1);
+	check_warn_return(ret, "device not ready in smsc75xx_resume\n");
 
 	return usbnet_resume(intf);
 }
@@ -1352,7 +1704,7 @@
 
 		if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
 			netif_dbg(dev, rx_err, dev->net,
-				"Error rx_cmd_a=0x%08x", rx_cmd_a);
+				  "Error rx_cmd_a=0x%08x\n", rx_cmd_a);
 			dev->net->stats.rx_errors++;
 			dev->net->stats.rx_dropped++;
 
@@ -1364,7 +1716,8 @@
 			/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
 			if (unlikely(size > (ETH_FRAME_LEN + 12))) {
 				netif_dbg(dev, rx_err, dev->net,
-					"size err rx_cmd_a=0x%08x", rx_cmd_a);
+					  "size err rx_cmd_a=0x%08x\n",
+					  rx_cmd_a);
 				return 0;
 			}
 
@@ -1381,7 +1734,7 @@
 
 			ax_skb = skb_clone(skb, GFP_ATOMIC);
 			if (unlikely(!ax_skb)) {
-				netdev_warn(dev->net, "Error allocating skb");
+				netdev_warn(dev->net, "Error allocating skb\n");
 				return 0;
 			}
 
@@ -1406,7 +1759,7 @@
 	}
 
 	if (unlikely(skb->len < 0)) {
-		netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
+		netdev_warn(dev->net, "invalid rx length<0 %d\n", skb->len);
 		return 0;
 	}
 
@@ -1454,6 +1807,12 @@
 	return skb;
 }
 
+static int smsc75xx_manage_power(struct usbnet *dev, int on)
+{
+	dev->intf->needs_remote_wakeup = on;
+	return 0;
+}
+
 static const struct driver_info smsc75xx_info = {
 	.description	= "smsc75xx USB 2.0 Gigabit Ethernet",
 	.bind		= smsc75xx_bind,
@@ -1463,6 +1822,7 @@
 	.rx_fixup	= smsc75xx_rx_fixup,
 	.tx_fixup	= smsc75xx_tx_fixup,
 	.status		= smsc75xx_status,
+	.manage_power	= smsc75xx_manage_power,
 	.flags		= FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
 };
 
@@ -1490,6 +1850,7 @@
 	.reset_resume	= smsc75xx_resume,
 	.disconnect	= usbnet_disconnect,
 	.disable_hub_initiated_lpm = 1,
+	.supports_autosuspend = 1,
 };
 
 module_usb_driver(smsc75xx_driver);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 362cb8c..79d495d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -26,6 +26,8 @@
 #include <linux/ethtool.h>
 #include <linux/mii.h>
 #include <linux/usb.h>
+#include <linux/bitrev.h>
+#include <linux/crc16.h>
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
@@ -46,7 +48,12 @@
 #define SMSC95XX_INTERNAL_PHY_ID	(1)
 #define SMSC95XX_TX_OVERHEAD		(8)
 #define SMSC95XX_TX_OVERHEAD_CSUM	(12)
-#define SUPPORTED_WAKE			(WAKE_MAGIC)
+#define SUPPORTED_WAKE			(WAKE_PHY | WAKE_UCAST | WAKE_BCAST | \
+					 WAKE_MCAST | WAKE_ARP | WAKE_MAGIC)
+
+#define FEATURE_8_WAKEUP_FILTERS	(0x01)
+#define FEATURE_PHY_NLP_CROSSOVER	(0x02)
+#define FEATURE_AUTOSUSPEND		(0x04)
 
 #define check_warn(ret, fmt, args...) \
 	({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -63,80 +70,98 @@
 	u32 hash_lo;
 	u32 wolopts;
 	spinlock_t mac_cr_lock;
-};
-
-struct usb_context {
-	struct usb_ctrlrequest req;
-	struct usbnet *dev;
+	u8 features;
 };
 
 static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
-static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
-					  u32 *data)
+static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
+					    u32 *data, int in_pm)
 {
-	u32 *buf = kmalloc(4, GFP_KERNEL);
+	u32 buf;
 	int ret;
+	int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
 
 	BUG_ON(!dev);
 
-	if (!buf)
-		return -ENOMEM;
+	if (!in_pm)
+		fn = usbnet_read_cmd;
+	else
+		fn = usbnet_read_cmd_nopm;
 
-	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
-		USB_VENDOR_REQUEST_READ_REGISTER,
-		USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-		00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
-
+	ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
+		 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+		 0, index, &buf, 4);
 	if (unlikely(ret < 0))
-		netdev_warn(dev->net, "Failed to read register index 0x%08x\n", index);
+		netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
+			    index, ret);
 
-	le32_to_cpus(buf);
-	*data = *buf;
-	kfree(buf);
+	le32_to_cpus(&buf);
+	*data = buf;
 
 	return ret;
 }
 
+static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
+					     u32 data, int in_pm)
+{
+	u32 buf;
+	int ret;
+	int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
+
+	BUG_ON(!dev);
+
+	if (!in_pm)
+		fn = usbnet_write_cmd;
+	else
+		fn = usbnet_write_cmd_nopm;
+
+	buf = data;
+	cpu_to_le32s(&buf);
+
+	ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
+		 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+		 0, index, &buf, 4);
+	if (unlikely(ret < 0))
+		netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n",
+			    index, ret);
+
+	return ret;
+}
+
+static int __must_check smsc95xx_read_reg_nopm(struct usbnet *dev, u32 index,
+					       u32 *data)
+{
+	return __smsc95xx_read_reg(dev, index, data, 1);
+}
+
+static int __must_check smsc95xx_write_reg_nopm(struct usbnet *dev, u32 index,
+						u32 data)
+{
+	return __smsc95xx_write_reg(dev, index, data, 1);
+}
+
+static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
+					  u32 *data)
+{
+	return __smsc95xx_read_reg(dev, index, data, 0);
+}
+
 static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
 					   u32 data)
 {
-	u32 *buf = kmalloc(4, GFP_KERNEL);
-	int ret;
-
-	BUG_ON(!dev);
-
-	if (!buf)
-		return -ENOMEM;
-
-	*buf = data;
-	cpu_to_le32s(buf);
-
-	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
-		USB_VENDOR_REQUEST_WRITE_REGISTER,
-		USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-		00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
-
-	if (unlikely(ret < 0))
-		netdev_warn(dev->net, "Failed to write register index 0x%08x\n", index);
-
-	kfree(buf);
-
-	return ret;
+	return __smsc95xx_write_reg(dev, index, data, 0);
 }
-
 static int smsc95xx_set_feature(struct usbnet *dev, u32 feature)
 {
 	if (WARN_ON_ONCE(!dev))
 		return -EINVAL;
 
-	cpu_to_le32s(&feature);
-
-	return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
-		USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
-		USB_CTRL_SET_TIMEOUT);
+	return usbnet_write_cmd_nopm(dev, USB_REQ_SET_FEATURE,
+				     USB_RECIP_DEVICE, feature, 0,
+				     NULL, 0);
 }
 
 static int smsc95xx_clear_feature(struct usbnet *dev, u32 feature)
@@ -144,24 +169,23 @@
 	if (WARN_ON_ONCE(!dev))
 		return -EINVAL;
 
-	cpu_to_le32s(&feature);
-
-	return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
-		USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
-		USB_CTRL_SET_TIMEOUT);
+	return usbnet_write_cmd_nopm(dev, USB_REQ_CLEAR_FEATURE,
+				     USB_RECIP_DEVICE, feature,
+				     0, NULL, 0);
 }
 
 /* Loop until the read is completed with timeout
  * called with phy_mutex held */
-static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev)
+static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
+						     int in_pm)
 {
 	unsigned long start_time = jiffies;
 	u32 val;
 	int ret;
 
 	do {
-		ret = smsc95xx_read_reg(dev, MII_ADDR, &val);
-		check_warn_return(ret, "Error reading MII_ACCESS");
+		ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm);
+		check_warn_return(ret, "Error reading MII_ACCESS\n");
 		if (!(val & MII_BUSY_))
 			return 0;
 	} while (!time_after(jiffies, start_time + HZ));
@@ -169,7 +193,8 @@
 	return -EIO;
 }
 
-static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+static int __smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
+				int in_pm)
 {
 	struct usbnet *dev = netdev_priv(netdev);
 	u32 val, addr;
@@ -178,21 +203,21 @@
 	mutex_lock(&dev->phy_mutex);
 
 	/* confirm MII not busy */
-	ret = smsc95xx_phy_wait_not_busy(dev);
-	check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_read");
+	ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+	check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_read\n");
 
 	/* set the address, index & direction (read from PHY) */
 	phy_id &= dev->mii.phy_id_mask;
 	idx &= dev->mii.reg_num_mask;
 	addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
-	ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
-	check_warn_goto_done(ret, "Error writing MII_ADDR");
+	ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
+	check_warn_goto_done(ret, "Error writing MII_ADDR\n");
 
-	ret = smsc95xx_phy_wait_not_busy(dev);
-	check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx);
+	ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+	check_warn_goto_done(ret, "Timed out reading MII reg %02X\n", idx);
 
-	ret = smsc95xx_read_reg(dev, MII_DATA, &val);
-	check_warn_goto_done(ret, "Error reading MII_DATA");
+	ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm);
+	check_warn_goto_done(ret, "Error reading MII_DATA\n");
 
 	ret = (u16)(val & 0xFFFF);
 
@@ -201,8 +226,8 @@
 	return ret;
 }
 
-static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
-				int regval)
+static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id,
+				  int idx, int regval, int in_pm)
 {
 	struct usbnet *dev = netdev_priv(netdev);
 	u32 val, addr;
@@ -211,27 +236,50 @@
 	mutex_lock(&dev->phy_mutex);
 
 	/* confirm MII not busy */
-	ret = smsc95xx_phy_wait_not_busy(dev);
-	check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_write");
+	ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+	check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_write\n");
 
 	val = regval;
-	ret = smsc95xx_write_reg(dev, MII_DATA, val);
-	check_warn_goto_done(ret, "Error writing MII_DATA");
+	ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm);
+	check_warn_goto_done(ret, "Error writing MII_DATA\n");
 
 	/* set the address, index & direction (write to PHY) */
 	phy_id &= dev->mii.phy_id_mask;
 	idx &= dev->mii.reg_num_mask;
 	addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
-	ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
-	check_warn_goto_done(ret, "Error writing MII_ADDR");
+	ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
+	check_warn_goto_done(ret, "Error writing MII_ADDR\n");
 
-	ret = smsc95xx_phy_wait_not_busy(dev);
-	check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx);
+	ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+	check_warn_goto_done(ret, "Timed out writing MII reg %02X\n", idx);
 
 done:
 	mutex_unlock(&dev->phy_mutex);
 }
 
+static int smsc95xx_mdio_read_nopm(struct net_device *netdev, int phy_id,
+				   int idx)
+{
+	return __smsc95xx_mdio_read(netdev, phy_id, idx, 1);
+}
+
+static void smsc95xx_mdio_write_nopm(struct net_device *netdev, int phy_id,
+				     int idx, int regval)
+{
+	__smsc95xx_mdio_write(netdev, phy_id, idx, regval, 1);
+}
+
+static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+{
+	return __smsc95xx_mdio_read(netdev, phy_id, idx, 0);
+}
+
+static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
+				int regval)
+{
+	__smsc95xx_mdio_write(netdev, phy_id, idx, regval, 0);
+}
+
 static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
 {
 	unsigned long start_time = jiffies;
@@ -240,7 +288,7 @@
 
 	do {
 		ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
-		check_warn_return(ret, "Error reading E2P_CMD");
+		check_warn_return(ret, "Error reading E2P_CMD\n");
 		if (!(val & E2P_CMD_BUSY_) || (val & E2P_CMD_TIMEOUT_))
 			break;
 		udelay(40);
@@ -262,7 +310,7 @@
 
 	do {
 		ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
-		check_warn_return(ret, "Error reading E2P_CMD");
+		check_warn_return(ret, "Error reading E2P_CMD\n");
 
 		if (!(val & E2P_CMD_BUSY_))
 			return 0;
@@ -290,14 +338,14 @@
 	for (i = 0; i < length; i++) {
 		val = E2P_CMD_BUSY_ | E2P_CMD_READ_ | (offset & E2P_CMD_ADDR_);
 		ret = smsc95xx_write_reg(dev, E2P_CMD, val);
-		check_warn_return(ret, "Error writing E2P_CMD");
+		check_warn_return(ret, "Error writing E2P_CMD\n");
 
 		ret = smsc95xx_wait_eeprom(dev);
 		if (ret < 0)
 			return ret;
 
 		ret = smsc95xx_read_reg(dev, E2P_DATA, &val);
-		check_warn_return(ret, "Error reading E2P_DATA");
+		check_warn_return(ret, "Error reading E2P_DATA\n");
 
 		data[i] = val & 0xFF;
 		offset++;
@@ -322,7 +370,7 @@
 	/* Issue write/erase enable command */
 	val = E2P_CMD_BUSY_ | E2P_CMD_EWEN_;
 	ret = smsc95xx_write_reg(dev, E2P_CMD, val);
-	check_warn_return(ret, "Error writing E2P_DATA");
+	check_warn_return(ret, "Error writing E2P_DATA\n");
 
 	ret = smsc95xx_wait_eeprom(dev);
 	if (ret < 0)
@@ -333,12 +381,12 @@
 		/* Fill data register */
 		val = data[i];
 		ret = smsc95xx_write_reg(dev, E2P_DATA, val);
-		check_warn_return(ret, "Error writing E2P_DATA");
+		check_warn_return(ret, "Error writing E2P_DATA\n");
 
 		/* Send "write" command */
 		val = E2P_CMD_BUSY_ | E2P_CMD_WRITE_ | (offset & E2P_CMD_ADDR_);
 		ret = smsc95xx_write_reg(dev, E2P_CMD, val);
-		check_warn_return(ret, "Error writing E2P_CMD");
+		check_warn_return(ret, "Error writing E2P_CMD\n");
 
 		ret = smsc95xx_wait_eeprom(dev);
 		if (ret < 0)
@@ -350,60 +398,20 @@
 	return 0;
 }
 
-static void smsc95xx_async_cmd_callback(struct urb *urb)
-{
-	struct usb_context *usb_context = urb->context;
-	struct usbnet *dev = usb_context->dev;
-	int status = urb->status;
-
-	check_warn(status, "async callback failed with %d\n", status);
-
-	kfree(usb_context);
-	usb_free_urb(urb);
-}
-
 static int __must_check smsc95xx_write_reg_async(struct usbnet *dev, u16 index,
 						 u32 *data)
 {
-	struct usb_context *usb_context;
-	int status;
-	struct urb *urb;
 	const u16 size = 4;
+	int ret;
 
-	urb = usb_alloc_urb(0, GFP_ATOMIC);
-	if (!urb) {
-		netdev_warn(dev->net, "Error allocating URB\n");
-		return -ENOMEM;
-	}
-
-	usb_context = kmalloc(sizeof(struct usb_context), GFP_ATOMIC);
-	if (usb_context == NULL) {
-		netdev_warn(dev->net, "Error allocating control msg\n");
-		usb_free_urb(urb);
-		return -ENOMEM;
-	}
-
-	usb_context->req.bRequestType =
-		USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-	usb_context->req.bRequest = USB_VENDOR_REQUEST_WRITE_REGISTER;
-	usb_context->req.wValue = 00;
-	usb_context->req.wIndex = cpu_to_le16(index);
-	usb_context->req.wLength = cpu_to_le16(size);
-
-	usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
-		(void *)&usb_context->req, data, size,
-		smsc95xx_async_cmd_callback,
-		(void *)usb_context);
-
-	status = usb_submit_urb(urb, GFP_ATOMIC);
-	if (status < 0) {
-		netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
-			    status);
-		kfree(usb_context);
-		usb_free_urb(urb);
-	}
-
-	return status;
+	ret = usbnet_write_cmd_async(dev, USB_VENDOR_REQUEST_WRITE_REGISTER,
+				     USB_DIR_OUT | USB_TYPE_VENDOR |
+				     USB_RECIP_DEVICE,
+				     0, index, data, size);
+	if (ret < 0)
+		netdev_warn(dev->net, "Error write async cmd, sts=%d\n",
+			    ret);
+	return ret;
 }
 
 /* returns hash bit number for given MAC address
@@ -461,13 +469,13 @@
 
 	/* Initiate async writes, as we can't wait for completion here */
 	ret = smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
-	check_warn(ret, "failed to initiate async write to HASHH");
+	check_warn(ret, "failed to initiate async write to HASHH\n");
 
 	ret = smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
-	check_warn(ret, "failed to initiate async write to HASHL");
+	check_warn(ret, "failed to initiate async write to HASHL\n");
 
 	ret = smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
-	check_warn(ret, "failed to initiate async write to MAC_CR");
+	check_warn(ret, "failed to initiate async write to MAC_CR\n");
 }
 
 static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
@@ -476,7 +484,7 @@
 	u32 flow, afc_cfg = 0;
 
 	int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
-	check_warn_return(ret, "Error reading AFC_CFG");
+	check_warn_return(ret, "Error reading AFC_CFG\n");
 
 	if (duplex == DUPLEX_FULL) {
 		u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -501,10 +509,10 @@
 	}
 
 	ret = smsc95xx_write_reg(dev, FLOW, flow);
-	check_warn_return(ret, "Error writing FLOW");
+	check_warn_return(ret, "Error writing FLOW\n");
 
 	ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
-	check_warn_return(ret, "Error writing AFC_CFG");
+	check_warn_return(ret, "Error writing AFC_CFG\n");
 
 	return 0;
 }
@@ -520,10 +528,10 @@
 
 	/* clear interrupt status */
 	ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
-	check_warn_return(ret, "Error reading PHY_INT_SRC");
+	check_warn_return(ret, "Error reading PHY_INT_SRC\n");
 
 	ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
-	check_warn_return(ret, "Error writing INT_STS");
+	check_warn_return(ret, "Error writing INT_STS\n");
 
 	mii_check_media(mii, 1, 1);
 	mii_ethtool_gset(&dev->mii, &ecmd);
@@ -545,10 +553,10 @@
 	spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
 
 	ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
-	check_warn_return(ret, "Error writing MAC_CR");
+	check_warn_return(ret, "Error writing MAC_CR\n");
 
 	ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
-	check_warn_return(ret, "Error updating PHY flow control");
+	check_warn_return(ret, "Error updating PHY flow control\n");
 
 	return 0;
 }
@@ -765,7 +773,7 @@
 }
 
 /* Starts the Receive path */
-static int smsc95xx_start_rx_path(struct usbnet *dev)
+static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
 {
 	struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 	unsigned long flags;
@@ -775,7 +783,7 @@
 	pdata->mac_cr |= MAC_CR_RXEN_;
 	spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
 
-	ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+	ret = __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
 	check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
 
 	return 0;
@@ -813,7 +821,7 @@
 
 	/* read to clear */
 	ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
-	check_warn_return(ret, "Failed to read PHY_INT_SRC during init");
+	check_warn_return(ret, "Failed to read PHY_INT_SRC during init\n");
 
 	smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
 		PHY_INT_MASK_DEFAULT_);
@@ -867,14 +875,14 @@
 	if (ret < 0)
 		return ret;
 
-	netif_dbg(dev, ifup, dev->net,
-		  "MAC Address: %pM\n", dev->net->dev_addr);
+	netif_dbg(dev, ifup, dev->net, "MAC Address: %pM\n",
+		  dev->net->dev_addr);
 
 	ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
 	check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
 
-	netif_dbg(dev, ifup, dev->net,
-		  "Read Value from HW_CFG : 0x%08x\n", read_buf);
+	netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n",
+		  read_buf);
 
 	read_buf |= HW_CFG_BIR_;
 
@@ -898,8 +906,8 @@
 		dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE;
 	}
 
-	netif_dbg(dev, ifup, dev->net,
-		  "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size);
+	netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld\n",
+		  (ulong)dev->rx_urb_size);
 
 	ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
 	check_warn_return(ret, "Failed to write BURST_CAP: %d\n", ret);
@@ -924,8 +932,8 @@
 	ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
 	check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
 
-	netif_dbg(dev, ifup, dev->net,
-		  "Read Value from HW_CFG: 0x%08x\n", read_buf);
+	netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG: 0x%08x\n",
+		  read_buf);
 
 	if (turbo_mode)
 		read_buf |= (HW_CFG_MEF_ | HW_CFG_BCE_);
@@ -975,12 +983,12 @@
 
 	/* Enable or disable checksum offload engines */
 	ret = smsc95xx_set_features(dev->net, dev->net->features);
-	check_warn_return(ret, "Failed to set checksum offload features");
+	check_warn_return(ret, "Failed to set checksum offload features\n");
 
 	smsc95xx_set_multicast(dev->net);
 
 	ret = smsc95xx_phy_initialize(dev);
-	check_warn_return(ret, "Failed to init PHY");
+	check_warn_return(ret, "Failed to init PHY\n");
 
 	ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
 	check_warn_return(ret, "Failed to read INT_EP_CTL: %d\n", ret);
@@ -992,10 +1000,10 @@
 	check_warn_return(ret, "Failed to write INT_EP_CTL: %d\n", ret);
 
 	ret = smsc95xx_start_tx_path(dev);
-	check_warn_return(ret, "Failed to start TX path");
+	check_warn_return(ret, "Failed to start TX path\n");
 
-	ret = smsc95xx_start_rx_path(dev);
-	check_warn_return(ret, "Failed to start RX path");
+	ret = smsc95xx_start_rx_path(dev, 0);
+	check_warn_return(ret, "Failed to start RX path\n");
 
 	netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n");
 	return 0;
@@ -1017,6 +1025,7 @@
 static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
 {
 	struct smsc95xx_priv *pdata = NULL;
+	u32 val;
 	int ret;
 
 	printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
@@ -1047,6 +1056,19 @@
 	/* Init all registers */
 	ret = smsc95xx_reset(dev);
 
+	/* detect device revision as different features may be available */
+	ret = smsc95xx_read_reg(dev, ID_REV, &val);
+	check_warn_return(ret, "Failed to read ID_REV: %d\n", ret);
+	val >>= 16;
+
+	if ((val == ID_REV_CHIP_ID_9500A_) || (val == ID_REV_CHIP_ID_9530_) ||
+	    (val == ID_REV_CHIP_ID_89530_) || (val == ID_REV_CHIP_ID_9730_))
+		pdata->features = (FEATURE_8_WAKEUP_FILTERS |
+			FEATURE_PHY_NLP_CROSSOVER |
+			FEATURE_AUTOSUSPEND);
+	else if (val == ID_REV_CHIP_ID_9512_)
+		pdata->features = FEATURE_8_WAKEUP_FILTERS;
+
 	dev->net->netdev_ops = &smsc95xx_netdev_ops;
 	dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
 	dev->net->flags |= IFF_MULTICAST;
@@ -1066,115 +1088,351 @@
 	}
 }
 
-static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
+static u16 smsc_crc(const u8 *buffer, size_t len, int filter)
 {
-	struct usbnet *dev = usb_get_intfdata(intf);
-	struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+	return bitrev16(crc16(0xFFFF, buffer, len)) << ((filter % 2) * 16);
+}
+
+static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
+{
+	struct mii_if_info *mii = &dev->mii;
 	int ret;
+
+	netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n");
+
+	/* read to clear */
+	ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC);
+	check_warn_return(ret, "Error reading PHY_INT_SRC\n");
+
+	/* enable interrupt source */
+	ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK);
+	check_warn_return(ret, "Error reading PHY_INT_MASK\n");
+
+	ret |= mask;
+
+	smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret);
+
+	return 0;
+}
+
+static int smsc95xx_link_ok_nopm(struct usbnet *dev)
+{
+	struct mii_if_info *mii = &dev->mii;
+	int ret;
+
+	/* first, a dummy read, needed to latch some MII phys */
+	ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+	check_warn_return(ret, "Error reading MII_BMSR\n");
+
+	ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+	check_warn_return(ret, "Error reading MII_BMSR\n");
+
+	return !!(ret & BMSR_LSTATUS);
+}
+
+static int smsc95xx_enter_suspend0(struct usbnet *dev)
+{
+	struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 	u32 val;
+	int ret;
 
-	ret = usbnet_suspend(intf, message);
-	check_warn_return(ret, "usbnet_suspend error");
-
-	/* if no wol options set, enter lowest power SUSPEND2 mode */
-	if (!(pdata->wolopts & SUPPORTED_WAKE)) {
-		netdev_info(dev->net, "entering SUSPEND2 mode");
-
-		/* disable energy detect (link up) & wake up events */
-		ret = smsc95xx_read_reg(dev, WUCSR, &val);
-		check_warn_return(ret, "Error reading WUCSR");
-
-		val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
-
-		ret = smsc95xx_write_reg(dev, WUCSR, val);
-		check_warn_return(ret, "Error writing WUCSR");
-
-		ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
-		check_warn_return(ret, "Error reading PM_CTRL");
-
-		val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
-
-		ret = smsc95xx_write_reg(dev, PM_CTRL, val);
-		check_warn_return(ret, "Error writing PM_CTRL");
-
-		/* enter suspend2 mode */
-		ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
-		check_warn_return(ret, "Error reading PM_CTRL");
-
-		val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
-		val |= PM_CTL_SUS_MODE_2;
-
-		ret = smsc95xx_write_reg(dev, PM_CTRL, val);
-		check_warn_return(ret, "Error writing PM_CTRL");
-
-		return 0;
-	}
-
-	if (pdata->wolopts & WAKE_MAGIC) {
-		/* clear any pending magic packet status */
-		ret = smsc95xx_read_reg(dev, WUCSR, &val);
-		check_warn_return(ret, "Error reading WUCSR");
-
-		val |= WUCSR_MPR_;
-
-		ret = smsc95xx_write_reg(dev, WUCSR, val);
-		check_warn_return(ret, "Error writing WUCSR");
-	}
-
-	/* enable/disable magic packup wake */
-	ret = smsc95xx_read_reg(dev, WUCSR, &val);
-	check_warn_return(ret, "Error reading WUCSR");
-
-	if (pdata->wolopts & WAKE_MAGIC) {
-		netdev_info(dev->net, "enabling magic packet wakeup");
-		val |= WUCSR_MPEN_;
-	} else {
-		netdev_info(dev->net, "disabling magic packet wakeup");
-		val &= ~WUCSR_MPEN_;
-	}
-
-	ret = smsc95xx_write_reg(dev, WUCSR, val);
-	check_warn_return(ret, "Error writing WUCSR");
-
-	/* enable wol wakeup source */
-	ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
-	check_warn_return(ret, "Error reading PM_CTRL");
-
-	val |= PM_CTL_WOL_EN_;
-
-	ret = smsc95xx_write_reg(dev, PM_CTRL, val);
-	check_warn_return(ret, "Error writing PM_CTRL");
-
-	/* enable receiver */
-	smsc95xx_start_rx_path(dev);
-
-	/* some wol options are enabled, so enter SUSPEND0 */
-	netdev_info(dev->net, "entering SUSPEND0 mode");
-
-	ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
-	check_warn_return(ret, "Error reading PM_CTRL");
+	ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+	check_warn_return(ret, "Error reading PM_CTRL\n");
 
 	val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
 	val |= PM_CTL_SUS_MODE_0;
 
-	ret = smsc95xx_write_reg(dev, PM_CTRL, val);
-	check_warn_return(ret, "Error writing PM_CTRL");
+	ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+	check_warn_return(ret, "Error writing PM_CTRL\n");
 
 	/* clear wol status */
 	val &= ~PM_CTL_WUPS_;
 	val |= PM_CTL_WUPS_WOL_;
-	ret = smsc95xx_write_reg(dev, PM_CTRL, val);
-	check_warn_return(ret, "Error writing PM_CTRL");
+
+	/* enable energy detection */
+	if (pdata->wolopts & WAKE_PHY)
+		val |= PM_CTL_WUPS_ED_;
+
+	ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+	check_warn_return(ret, "Error writing PM_CTRL\n");
 
 	/* read back PM_CTRL */
-	ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
-	check_warn_return(ret, "Error reading PM_CTRL");
+	ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+	check_warn_return(ret, "Error reading PM_CTRL\n");
 
 	smsc95xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
 
 	return 0;
 }
 
+static int smsc95xx_enter_suspend1(struct usbnet *dev)
+{
+	struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+	struct mii_if_info *mii = &dev->mii;
+	u32 val;
+	int ret;
+
+	/* reconfigure link pulse detection timing for
+	 * compatibility with non-standard link partners
+	 */
+	if (pdata->features & FEATURE_PHY_NLP_CROSSOVER)
+		smsc95xx_mdio_write_nopm(dev->net, mii->phy_id,	PHY_EDPD_CONFIG,
+			PHY_EDPD_CONFIG_DEFAULT);
+
+	/* enable energy detect power-down mode */
+	ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS);
+	check_warn_return(ret, "Error reading PHY_MODE_CTRL_STS\n");
+
+	ret |= MODE_CTRL_STS_EDPWRDOWN_;
+
+	smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS, ret);
+
+	/* enter SUSPEND1 mode */
+	ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+	check_warn_return(ret, "Error reading PM_CTRL\n");
+
+	val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
+	val |= PM_CTL_SUS_MODE_1;
+
+	ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+	check_warn_return(ret, "Error writing PM_CTRL\n");
+
+	/* clear wol status, enable energy detection */
+	val &= ~PM_CTL_WUPS_;
+	val |= (PM_CTL_WUPS_ED_ | PM_CTL_ED_EN_);
+
+	ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+	check_warn_return(ret, "Error writing PM_CTRL\n");
+
+	smsc95xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+	return 0;
+}
+
+static int smsc95xx_enter_suspend2(struct usbnet *dev)
+{
+	u32 val;
+	int ret;
+
+	ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+	check_warn_return(ret, "Error reading PM_CTRL\n");
+
+	val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
+	val |= PM_CTL_SUS_MODE_2;
+
+	ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+	check_warn_return(ret, "Error writing PM_CTRL\n");
+
+	return 0;
+}
+
+static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+	struct usbnet *dev = usb_get_intfdata(intf);
+	struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+	u32 val, link_up;
+	int ret;
+
+	ret = usbnet_suspend(intf, message);
+	check_warn_return(ret, "usbnet_suspend error\n");
+
+	/* determine if link is up using only _nopm functions */
+	link_up = smsc95xx_link_ok_nopm(dev);
+
+	/* if no wol options set, or if link is down and we're not waking on
+	 * PHY activity, enter lowest power SUSPEND2 mode
+	 */
+	if (!(pdata->wolopts & SUPPORTED_WAKE) ||
+		!(link_up || (pdata->wolopts & WAKE_PHY))) {
+		netdev_info(dev->net, "entering SUSPEND2 mode\n");
+
+		/* disable energy detect (link up) & wake up events */
+		ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+		check_warn_return(ret, "Error reading WUCSR\n");
+
+		val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
+
+		ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+		check_warn_return(ret, "Error writing WUCSR\n");
+
+		ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+		check_warn_return(ret, "Error reading PM_CTRL\n");
+
+		val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
+
+		ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+		check_warn_return(ret, "Error writing PM_CTRL\n");
+
+		return smsc95xx_enter_suspend2(dev);
+	}
+
+	if (pdata->wolopts & WAKE_PHY) {
+		ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
+			(PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_LINK_DOWN_));
+		check_warn_return(ret, "error enabling PHY wakeup ints\n");
+
+		/* if link is down then configure EDPD and enter SUSPEND1,
+		 * otherwise enter SUSPEND0 below
+		 */
+		if (!link_up) {
+			netdev_info(dev->net, "entering SUSPEND1 mode\n");
+			return smsc95xx_enter_suspend1(dev);
+		}
+	}
+
+	if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) {
+		u32 *filter_mask = kzalloc(32, GFP_KERNEL);
+		u32 command[2];
+		u32 offset[2];
+		u32 crc[4];
+		int wuff_filter_count =
+			(pdata->features & FEATURE_8_WAKEUP_FILTERS) ?
+			LAN9500A_WUFF_NUM : LAN9500_WUFF_NUM;
+		int i, filter = 0;
+
+		memset(command, 0, sizeof(command));
+		memset(offset, 0, sizeof(offset));
+		memset(crc, 0, sizeof(crc));
+
+		if (pdata->wolopts & WAKE_BCAST) {
+			const u8 bcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+			netdev_info(dev->net, "enabling broadcast detection\n");
+			filter_mask[filter * 4] = 0x003F;
+			filter_mask[filter * 4 + 1] = 0x00;
+			filter_mask[filter * 4 + 2] = 0x00;
+			filter_mask[filter * 4 + 3] = 0x00;
+			command[filter/4] |= 0x05UL << ((filter % 4) * 8);
+			offset[filter/4] |= 0x00 << ((filter % 4) * 8);
+			crc[filter/2] |= smsc_crc(bcast, 6, filter);
+			filter++;
+		}
+
+		if (pdata->wolopts & WAKE_MCAST) {
+			const u8 mcast[] = {0x01, 0x00, 0x5E};
+			netdev_info(dev->net, "enabling multicast detection\n");
+			filter_mask[filter * 4] = 0x0007;
+			filter_mask[filter * 4 + 1] = 0x00;
+			filter_mask[filter * 4 + 2] = 0x00;
+			filter_mask[filter * 4 + 3] = 0x00;
+			command[filter/4] |= 0x09UL << ((filter % 4) * 8);
+			offset[filter/4] |= 0x00  << ((filter % 4) * 8);
+			crc[filter/2] |= smsc_crc(mcast, 3, filter);
+			filter++;
+		}
+
+		if (pdata->wolopts & WAKE_ARP) {
+			const u8 arp[] = {0x08, 0x06};
+			netdev_info(dev->net, "enabling ARP detection\n");
+			filter_mask[filter * 4] = 0x0003;
+			filter_mask[filter * 4 + 1] = 0x00;
+			filter_mask[filter * 4 + 2] = 0x00;
+			filter_mask[filter * 4 + 3] = 0x00;
+			command[filter/4] |= 0x05UL << ((filter % 4) * 8);
+			offset[filter/4] |= 0x0C << ((filter % 4) * 8);
+			crc[filter/2] |= smsc_crc(arp, 2, filter);
+			filter++;
+		}
+
+		if (pdata->wolopts & WAKE_UCAST) {
+			netdev_info(dev->net, "enabling unicast detection\n");
+			filter_mask[filter * 4] = 0x003F;
+			filter_mask[filter * 4 + 1] = 0x00;
+			filter_mask[filter * 4 + 2] = 0x00;
+			filter_mask[filter * 4 + 3] = 0x00;
+			command[filter/4] |= 0x01UL << ((filter % 4) * 8);
+			offset[filter/4] |= 0x00 << ((filter % 4) * 8);
+			crc[filter/2] |= smsc_crc(dev->net->dev_addr, ETH_ALEN, filter);
+			filter++;
+		}
+
+		for (i = 0; i < (wuff_filter_count * 4); i++) {
+			ret = smsc95xx_write_reg_nopm(dev, WUFF, filter_mask[i]);
+			if (ret < 0)
+				kfree(filter_mask);
+			check_warn_return(ret, "Error writing WUFF\n");
+		}
+		kfree(filter_mask);
+
+		for (i = 0; i < (wuff_filter_count / 4); i++) {
+			ret = smsc95xx_write_reg_nopm(dev, WUFF, command[i]);
+			check_warn_return(ret, "Error writing WUFF\n");
+		}
+
+		for (i = 0; i < (wuff_filter_count / 4); i++) {
+			ret = smsc95xx_write_reg_nopm(dev, WUFF, offset[i]);
+			check_warn_return(ret, "Error writing WUFF\n");
+		}
+
+		for (i = 0; i < (wuff_filter_count / 2); i++) {
+			ret = smsc95xx_write_reg_nopm(dev, WUFF, crc[i]);
+			check_warn_return(ret, "Error writing WUFF\n");
+		}
+
+		/* clear any pending pattern match packet status */
+		ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+		check_warn_return(ret, "Error reading WUCSR\n");
+
+		val |= WUCSR_WUFR_;
+
+		ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+		check_warn_return(ret, "Error writing WUCSR\n");
+	}
+
+	if (pdata->wolopts & WAKE_MAGIC) {
+		/* clear any pending magic packet status */
+		ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+		check_warn_return(ret, "Error reading WUCSR\n");
+
+		val |= WUCSR_MPR_;
+
+		ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+		check_warn_return(ret, "Error writing WUCSR\n");
+	}
+
+	/* enable/disable wakeup sources */
+	ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+	check_warn_return(ret, "Error reading WUCSR\n");
+
+	if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) {
+		netdev_info(dev->net, "enabling pattern match wakeup\n");
+		val |= WUCSR_WAKE_EN_;
+	} else {
+		netdev_info(dev->net, "disabling pattern match wakeup\n");
+		val &= ~WUCSR_WAKE_EN_;
+	}
+
+	if (pdata->wolopts & WAKE_MAGIC) {
+		netdev_info(dev->net, "enabling magic packet wakeup\n");
+		val |= WUCSR_MPEN_;
+	} else {
+		netdev_info(dev->net, "disabling magic packet wakeup\n");
+		val &= ~WUCSR_MPEN_;
+	}
+
+	ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+	check_warn_return(ret, "Error writing WUCSR\n");
+
+	/* enable wol wakeup source */
+	ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+	check_warn_return(ret, "Error reading PM_CTRL\n");
+
+	val |= PM_CTL_WOL_EN_;
+
+	/* phy energy detect wakeup source */
+	if (pdata->wolopts & WAKE_PHY)
+		val |= PM_CTL_ED_EN_;
+
+	ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+	check_warn_return(ret, "Error writing PM_CTRL\n");
+
+	/* enable receiver to enable frame reception */
+	smsc95xx_start_rx_path(dev, 1);
+
+	/* some wol options are enabled, so enter SUSPEND0 */
+	netdev_info(dev->net, "entering SUSPEND0 mode\n");
+	return smsc95xx_enter_suspend0(dev);
+}
+
 static int smsc95xx_resume(struct usb_interface *intf)
 {
 	struct usbnet *dev = usb_get_intfdata(intf);
@@ -1184,31 +1442,31 @@
 
 	BUG_ON(!dev);
 
-	if (pdata->wolopts & WAKE_MAGIC) {
+	if (pdata->wolopts) {
 		smsc95xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
 
-		/* Disable magic packup wake */
-		ret = smsc95xx_read_reg(dev, WUCSR, &val);
-		check_warn_return(ret, "Error reading WUCSR");
+		/* clear wake-up sources */
+		ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+		check_warn_return(ret, "Error reading WUCSR\n");
 
-		val &= ~WUCSR_MPEN_;
+		val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
 
-		ret = smsc95xx_write_reg(dev, WUCSR, val);
-		check_warn_return(ret, "Error writing WUCSR");
+		ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+		check_warn_return(ret, "Error writing WUCSR\n");
 
 		/* clear wake-up status */
-		ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
-		check_warn_return(ret, "Error reading PM_CTRL");
+		ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+		check_warn_return(ret, "Error reading PM_CTRL\n");
 
 		val &= ~PM_CTL_WOL_EN_;
 		val |= PM_CTL_WUPS_;
 
-		ret = smsc95xx_write_reg(dev, PM_CTRL, val);
-		check_warn_return(ret, "Error writing PM_CTRL");
+		ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+		check_warn_return(ret, "Error writing PM_CTRL\n");
 	}
 
-	return usbnet_resume(intf);
-	check_warn_return(ret, "usbnet_resume error");
+	ret = usbnet_resume(intf);
+	check_warn_return(ret, "usbnet_resume error\n");
 
 	return 0;
 }
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index 2ff9815..f360ee3 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -53,6 +53,11 @@
 #define ID_REV_CHIP_ID_MASK_		(0xFFFF0000)
 #define ID_REV_CHIP_REV_MASK_		(0x0000FFFF)
 #define ID_REV_CHIP_ID_9500_		(0x9500)
+#define ID_REV_CHIP_ID_9500A_		(0x9E00)
+#define ID_REV_CHIP_ID_9512_		(0xEC00)
+#define ID_REV_CHIP_ID_9530_		(0x9530)
+#define ID_REV_CHIP_ID_89530_		(0x9E08)
+#define ID_REV_CHIP_ID_9730_		(0x9730)
 
 #define INT_STS				(0x08)
 #define INT_STS_TX_STOP_		(0x00020000)
@@ -203,8 +208,11 @@
 #define VLAN2				(0x124)
 
 #define WUFF				(0x128)
+#define LAN9500_WUFF_NUM		(4)
+#define LAN9500A_WUFF_NUM		(8)
 
 #define WUCSR				(0x12C)
+#define WUCSR_WFF_PTR_RST_		(0x80000000)
 #define WUCSR_GUE_			(0x00000200)
 #define WUCSR_WUFR_			(0x00000040)
 #define WUCSR_MPR_			(0x00000020)
@@ -218,6 +226,23 @@
 
 /* Vendor-specific PHY Definitions */
 
+/* EDPD NLP / crossover time configuration (LAN9500A only) */
+#define PHY_EDPD_CONFIG			(16)
+#define PHY_EDPD_CONFIG_TX_NLP_EN_	((u16)0x8000)
+#define PHY_EDPD_CONFIG_TX_NLP_1000_	((u16)0x0000)
+#define PHY_EDPD_CONFIG_TX_NLP_768_	((u16)0x2000)
+#define PHY_EDPD_CONFIG_TX_NLP_512_	((u16)0x4000)
+#define PHY_EDPD_CONFIG_TX_NLP_256_	((u16)0x6000)
+#define PHY_EDPD_CONFIG_RX_1_NLP_	((u16)0x1000)
+#define PHY_EDPD_CONFIG_RX_NLP_64_	((u16)0x0000)
+#define PHY_EDPD_CONFIG_RX_NLP_256_	((u16)0x0400)
+#define PHY_EDPD_CONFIG_RX_NLP_512_	((u16)0x0800)
+#define PHY_EDPD_CONFIG_RX_NLP_1000_	((u16)0x0C00)
+#define PHY_EDPD_CONFIG_EXT_CROSSOVER_	((u16)0x0001)
+#define PHY_EDPD_CONFIG_DEFAULT		(PHY_EDPD_CONFIG_TX_NLP_EN_ | \
+					 PHY_EDPD_CONFIG_TX_NLP_768_ | \
+					 PHY_EDPD_CONFIG_RX_1_NLP_)
+
 /* Mode Control/Status Register */
 #define PHY_MODE_CTRL_STS		(17)
 #define MODE_CTRL_STS_EDPWRDOWN_	((u16)0x2000)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index edb81ed..c04110b 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1616,6 +1616,202 @@
 EXPORT_SYMBOL(usbnet_device_suggests_idle);
 
 /*-------------------------------------------------------------------------*/
+static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+			     u16 value, u16 index, void *data, u16 size)
+{
+	void *buf = NULL;
+	int err = -ENOMEM;
+
+	netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x"
+		   " value=0x%04x index=0x%04x size=%d\n",
+		   cmd, reqtype, value, index, size);
+
+	if (data) {
+		buf = kmalloc(size, GFP_KERNEL);
+		if (!buf)
+			goto out;
+	}
+
+	err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+			      cmd, reqtype, value, index, buf, size,
+			      USB_CTRL_GET_TIMEOUT);
+	if (err > 0 && err <= size)
+		memcpy(data, buf, err);
+	kfree(buf);
+out:
+	return err;
+}
+
+static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+			      u16 value, u16 index, const void *data,
+			      u16 size)
+{
+	void *buf = NULL;
+	int err = -ENOMEM;
+
+	netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
+		   " value=0x%04x index=0x%04x size=%d\n",
+		   cmd, reqtype, value, index, size);
+
+	if (data) {
+		buf = kmemdup(data, size, GFP_KERNEL);
+		if (!buf)
+			goto out;
+	}
+
+	err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+			      cmd, reqtype, value, index, buf, size,
+			      USB_CTRL_SET_TIMEOUT);
+	kfree(buf);
+
+out:
+	return err;
+}
+
+/*
+ * The function can't be called inside suspend/resume callback,
+ * otherwise deadlock will be caused.
+ */
+int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+		    u16 value, u16 index, void *data, u16 size)
+{
+	int ret;
+
+	if (usb_autopm_get_interface(dev->intf) < 0)
+		return -ENODEV;
+	ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index,
+				data, size);
+	usb_autopm_put_interface(dev->intf);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(usbnet_read_cmd);
+
+/*
+ * The function can't be called inside suspend/resume callback,
+ * otherwise deadlock will be caused.
+ */
+int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+		     u16 value, u16 index, const void *data, u16 size)
+{
+	int ret;
+
+	if (usb_autopm_get_interface(dev->intf) < 0)
+		return -ENODEV;
+	ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index,
+				 data, size);
+	usb_autopm_put_interface(dev->intf);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(usbnet_write_cmd);
+
+/*
+ * The function can be called inside suspend/resume callback safely
+ * and should only be called by suspend/resume callback generally.
+ */
+int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
+			  u16 value, u16 index, void *data, u16 size)
+{
+	return __usbnet_read_cmd(dev, cmd, reqtype, value, index,
+				 data, size);
+}
+EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm);
+
+/*
+ * The function can be called inside suspend/resume callback safely
+ * and should only be called by suspend/resume callback generally.
+ */
+int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
+			  u16 value, u16 index, const void *data,
+			  u16 size)
+{
+	return __usbnet_write_cmd(dev, cmd, reqtype, value, index,
+				  data, size);
+}
+EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm);
+
+static void usbnet_async_cmd_cb(struct urb *urb)
+{
+	struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
+	int status = urb->status;
+
+	if (status < 0)
+		dev_dbg(&urb->dev->dev, "%s failed with %d",
+			__func__, status);
+
+	kfree(req);
+	usb_free_urb(urb);
+}
+
+/*
+ * The caller must make sure that device can't be put into suspend
+ * state until the control URB completes.
+ */
+int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
+			   u16 value, u16 index, const void *data, u16 size)
+{
+	struct usb_ctrlrequest *req = NULL;
+	struct urb *urb;
+	int err = -ENOMEM;
+	void *buf = NULL;
+
+	netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
+		   " value=0x%04x index=0x%04x size=%d\n",
+		   cmd, reqtype, value, index, size);
+
+	urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!urb) {
+		netdev_err(dev->net, "Error allocating URB in"
+			   " %s!\n", __func__);
+		goto fail;
+	}
+
+	if (data) {
+		buf = kmemdup(data, size, GFP_ATOMIC);
+		if (!buf) {
+			netdev_err(dev->net, "Error allocating buffer"
+				   " in %s!\n", __func__);
+			goto fail_free;
+		}
+	}
+
+	req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
+	if (!req) {
+		netdev_err(dev->net, "Failed to allocate memory for %s\n",
+			   __func__);
+		goto fail_free_buf;
+	}
+
+	req->bRequestType = reqtype;
+	req->bRequest = cmd;
+	req->wValue = cpu_to_le16(value);
+	req->wIndex = cpu_to_le16(index);
+	req->wLength = cpu_to_le16(size);
+
+	usb_fill_control_urb(urb, dev->udev,
+			     usb_sndctrlpipe(dev->udev, 0),
+			     (void *)req, buf, size,
+			     usbnet_async_cmd_cb, req);
+	urb->transfer_flags |= URB_FREE_BUFFER;
+
+	err = usb_submit_urb(urb, GFP_ATOMIC);
+	if (err < 0) {
+		netdev_err(dev->net, "Error submitting the control"
+			   " message: status=%d\n", err);
+		goto fail_free;
+	}
+	return 0;
+
+fail_free_buf:
+	kfree(buf);
+fail_free:
+	kfree(req);
+	usb_free_urb(urb);
+fail:
+	return err;
+
+}
+EXPORT_SYMBOL_GPL(usbnet_write_cmd_async);
+/*-------------------------------------------------------------------------*/
 
 static int __init usbnet_init(void)
 {
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index e522ff7..24f6b27 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -264,6 +264,7 @@
 	ether_setup(dev);
 
 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
 	dev->netdev_ops = &veth_netdev_ops;
 	dev->ethtool_ops = &veth_ethtool_ops;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cbf8b06..26c502e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -212,8 +212,7 @@
 	 * the case of a broken device.
 	 */
 	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
-		if (net_ratelimit())
-			pr_debug("%s: too much data\n", skb->dev->name);
+		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
 		dev_kfree_skb(skb);
 		return NULL;
 	}
@@ -333,9 +332,8 @@
 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 			break;
 		default:
-			if (net_ratelimit())
-				printk(KERN_WARNING "%s: bad gso type %u.\n",
-				       dev->name, hdr->hdr.gso_type);
+			net_warn_ratelimited("%s: bad gso type %u.\n",
+					     dev->name, hdr->hdr.gso_type);
 			goto frame_err;
 		}
 
@@ -344,9 +342,7 @@
 
 		skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
 		if (skb_shinfo(skb)->gso_size == 0) {
-			if (net_ratelimit())
-				printk(KERN_WARNING "%s: zero gso size.\n",
-				       dev->name);
+			net_warn_ratelimited("%s: zero gso size.\n", dev->name);
 			goto frame_err;
 		}
 
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0ae1bcc..e4a192b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1094,10 +1094,10 @@
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 
-		BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
-		return vmxnet3_tq_xmit(skb,
-				       &adapter->tx_queue[skb->queue_mapping],
-				       adapter, netdev);
+	BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
+	return vmxnet3_tq_xmit(skb,
+			       &adapter->tx_queue[skb->queue_mapping],
+			       adapter, netdev);
 }
 
 
@@ -1243,8 +1243,8 @@
 			skb_reserve(new_skb, NET_IP_ALIGN);
 			rbi->skb = new_skb;
 			rbi->dma_addr = pci_map_single(adapter->pdev,
-					rbi->skb->data, rbi->len,
-					PCI_DMA_FROMDEVICE);
+						       rbi->skb->data, rbi->len,
+						       PCI_DMA_FROMDEVICE);
 			rxd->addr = cpu_to_le64(rbi->dma_addr);
 			rxd->len = rbi->len;
 
@@ -1331,14 +1331,14 @@
 		/* if needed, update the register */
 		if (unlikely(rq->shared->updateRxProd)) {
 			VMXNET3_WRITE_BAR0_REG(adapter,
-				rxprod_reg[ring_idx] + rq->qid * 8,
-				ring->next2fill);
+					       rxprod_reg[ring_idx] + rq->qid * 8,
+					       ring->next2fill);
 			rq->uncommitted[ring_idx] = 0;
 		}
 
 		vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
 		vmxnet3_getRxComp(rcd,
-		     &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
+				  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
 	}
 
 	return num_rxd;
@@ -1922,7 +1922,7 @@
 		free_irq(adapter->pdev->irq, adapter->netdev);
 		break;
 	default:
-		BUG_ON(true);
+		BUG();
 	}
 }
 
@@ -2949,11 +2949,11 @@
 
 	spin_lock_init(&adapter->cmd_lock);
 	adapter->shared = pci_alloc_consistent(adapter->pdev,
-			  sizeof(struct Vmxnet3_DriverShared),
-			  &adapter->shared_pa);
+					       sizeof(struct Vmxnet3_DriverShared),
+					       &adapter->shared_pa);
 	if (!adapter->shared) {
 		printk(KERN_ERR "Failed to allocate memory for %s\n",
-			pci_name(pdev));
+		       pci_name(pdev));
 		err = -ENOMEM;
 		goto err_alloc_shared;
 	}
@@ -2964,16 +2964,16 @@
 	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
 	size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
 	adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
-			     &adapter->queue_desc_pa);
+						  &adapter->queue_desc_pa);
 
 	if (!adapter->tqd_start) {
 		printk(KERN_ERR "Failed to allocate memory for %s\n",
-			pci_name(pdev));
+		       pci_name(pdev));
 		err = -ENOMEM;
 		goto err_alloc_queue_desc;
 	}
 	adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
-							adapter->num_tx_queues);
+							    adapter->num_tx_queues);
 
 	adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
 	if (adapter->pm_conf == NULL) {
@@ -3019,7 +3019,7 @@
 
 	adapter->dev_number = atomic_read(&devices_found);
 
-	 adapter->share_intr = irq_share_mode;
+	adapter->share_intr = irq_share_mode;
 	if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
 	    adapter->num_tx_queues != adapter->num_rx_queues)
 		adapter->share_intr = VMXNET3_INTR_DONTSHARE;
@@ -3065,7 +3065,7 @@
 
 	if (err) {
 		printk(KERN_ERR "Failed to register adapter %s\n",
-			pci_name(pdev));
+		       pci_name(pdev));
 		goto err_register;
 	}
 
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8b5c619..ce77b8b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -29,6 +29,8 @@
 #include <linux/etherdevice.h>
 #include <linux/if_ether.h>
 #include <linux/hash.h>
+#include <net/arp.h>
+#include <net/ndisc.h>
 #include <net/ip.h>
 #include <net/icmp.h>
 #include <net/udp.h>
@@ -110,18 +112,23 @@
 	__u16		  port_max;
 	__u8		  tos;		/* TOS override */
 	__u8		  ttl;
-	bool		  learn;
+	u32		  flags;	/* VXLAN_F_* below */
 
 	unsigned long	  age_interval;
 	struct timer_list age_timer;
 	spinlock_t	  hash_lock;
 	unsigned int	  addrcnt;
 	unsigned int	  addrmax;
-	unsigned int	  addrexceeded;
 
 	struct hlist_head fdb_head[FDB_HASH_SIZE];
 };
 
+#define VXLAN_F_LEARN	0x01
+#define VXLAN_F_PROXY	0x02
+#define VXLAN_F_RSC	0x04
+#define VXLAN_F_L2MISS	0x08
+#define VXLAN_F_L3MISS	0x10
+
 /* salt for hash table */
 static u32 vxlan_salt __read_mostly;
 
@@ -155,6 +162,7 @@
 	struct nda_cacheinfo ci;
 	struct nlmsghdr *nlh;
 	struct ndmsg *ndm;
+	bool send_ip, send_eth;
 
 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
 	if (nlh == NULL)
@@ -162,16 +170,24 @@
 
 	ndm = nlmsg_data(nlh);
 	memset(ndm, 0, sizeof(*ndm));
-	ndm->ndm_family	= AF_BRIDGE;
+
+	send_eth = send_ip = true;
+
+	if (type == RTM_GETNEIGH) {
+		ndm->ndm_family	= AF_INET;
+		send_ip = fdb->remote_ip != 0;
+		send_eth = !is_zero_ether_addr(fdb->eth_addr);
+	} else
+		ndm->ndm_family	= AF_BRIDGE;
 	ndm->ndm_state = fdb->state;
 	ndm->ndm_ifindex = vxlan->dev->ifindex;
 	ndm->ndm_flags = NTF_SELF;
 	ndm->ndm_type = NDA_DST;
 
-	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
+	if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
 		goto nla_put_failure;
 
-	if (nla_put_be32(skb, NDA_DST, fdb->remote_ip))
+	if (send_ip && nla_put_be32(skb, NDA_DST, fdb->remote_ip))
 		goto nla_put_failure;
 
 	ci.ndm_used	 = jiffies_to_clock_t(now - fdb->used);
@@ -223,6 +239,29 @@
 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
 }
 
+static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
+{
+	struct vxlan_dev *vxlan = netdev_priv(dev);
+	struct vxlan_fdb f;
+
+	memset(&f, 0, sizeof f);
+	f.state = NUD_STALE;
+	f.remote_ip = ipa; /* goes to NDA_DST */
+
+	vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
+}
+
+static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
+{
+	struct vxlan_fdb	f;
+
+	memset(&f, 0, sizeof f);
+	f.state = NUD_STALE;
+	memcpy(f.eth_addr, eth_addr, ETH_ALEN);
+
+	vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
+}
+
 /* Hash Ethernet address */
 static u32 eth_hash(const unsigned char *addr)
 {
@@ -552,6 +591,8 @@
 		goto drop;
 	}
 
+	skb_reset_mac_header(skb);
+
 	/* Re-examine inner Ethernet packet */
 	oip = ip_hdr(skb);
 	skb->protocol = eth_type_trans(skb, vxlan->dev);
@@ -561,7 +602,7 @@
 			       vxlan->dev->dev_addr) == 0)
 		goto drop;
 
-	if (vxlan->learn)
+	if (vxlan->flags & VXLAN_F_LEARN)
 		vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
 
 	__skb_tunnel_rx(skb, vxlan->dev);
@@ -600,6 +641,117 @@
 	return 0;
 }
 
+static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
+{
+	struct vxlan_dev *vxlan = netdev_priv(dev);
+	struct arphdr *parp;
+	u8 *arpptr, *sha;
+	__be32 sip, tip;
+	struct neighbour *n;
+
+	if (dev->flags & IFF_NOARP)
+		goto out;
+
+	if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
+		dev->stats.tx_dropped++;
+		goto out;
+	}
+	parp = arp_hdr(skb);
+
+	if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
+	     parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
+	    parp->ar_pro != htons(ETH_P_IP) ||
+	    parp->ar_op != htons(ARPOP_REQUEST) ||
+	    parp->ar_hln != dev->addr_len ||
+	    parp->ar_pln != 4)
+		goto out;
+	arpptr = (u8 *)parp + sizeof(struct arphdr);
+	sha = arpptr;
+	arpptr += dev->addr_len;	/* sha */
+	memcpy(&sip, arpptr, sizeof(sip));
+	arpptr += sizeof(sip);
+	arpptr += dev->addr_len;	/* tha */
+	memcpy(&tip, arpptr, sizeof(tip));
+
+	if (ipv4_is_loopback(tip) ||
+	    ipv4_is_multicast(tip))
+		goto out;
+
+	n = neigh_lookup(&arp_tbl, &tip, dev);
+
+	if (n) {
+		struct vxlan_dev *vxlan = netdev_priv(dev);
+		struct vxlan_fdb *f;
+		struct sk_buff	*reply;
+
+		if (!(n->nud_state & NUD_CONNECTED)) {
+			neigh_release(n);
+			goto out;
+		}
+
+		f = vxlan_find_mac(vxlan, n->ha);
+		if (f && f->remote_ip == 0) {
+			/* bridge-local neighbor */
+			neigh_release(n);
+			goto out;
+		}
+
+		reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
+				n->ha, sha);
+
+		neigh_release(n);
+
+		skb_reset_mac_header(reply);
+		__skb_pull(reply, skb_network_offset(reply));
+		reply->ip_summed = CHECKSUM_UNNECESSARY;
+		reply->pkt_type = PACKET_HOST;
+
+		if (netif_rx_ni(reply) == NET_RX_DROP)
+			dev->stats.rx_dropped++;
+	} else if (vxlan->flags & VXLAN_F_L3MISS)
+		vxlan_ip_miss(dev, tip);
+out:
+	consume_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
+{
+	struct vxlan_dev *vxlan = netdev_priv(dev);
+	struct neighbour *n;
+	struct iphdr *pip;
+
+	if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
+		return false;
+
+	n = NULL;
+	switch (ntohs(eth_hdr(skb)->h_proto)) {
+	case ETH_P_IP:
+		if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+			return false;
+		pip = ip_hdr(skb);
+		n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
+		break;
+	default:
+		return false;
+	}
+
+	if (n) {
+		bool diff;
+
+		diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
+		if (diff) {
+			memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+				dev->addr_len);
+			memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
+		}
+		neigh_release(n);
+		return diff;
+	} else if (vxlan->flags & VXLAN_F_L3MISS)
+		vxlan_ip_miss(dev, pip->daddr);
+	return false;
+}
+
 /* Extract dsfield from inner protocol */
 static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
 				   const struct sk_buff *skb)
@@ -622,22 +774,6 @@
 	return INET_ECN_encapsulate(tos, inner);
 }
 
-static __be32 vxlan_find_dst(struct vxlan_dev *vxlan, struct sk_buff *skb)
-{
-	const struct ethhdr *eth = (struct ethhdr *) skb->data;
-	const struct vxlan_fdb *f;
-
-	if (is_multicast_ether_addr(eth->h_dest))
-		return vxlan->gaddr;
-
-	f = vxlan_find_mac(vxlan, eth->h_dest);
-	if (f)
-		return f->remote_ip;
-	else
-		return vxlan->gaddr;
-
-}
-
 static void vxlan_sock_free(struct sk_buff *skb)
 {
 	sock_put(skb->sk);
@@ -684,6 +820,7 @@
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct rtable *rt;
 	const struct iphdr *old_iph;
+	struct ethhdr *eth;
 	struct iphdr *iph;
 	struct vxlanhdr *vxh;
 	struct udphdr *uh;
@@ -694,10 +831,50 @@
 	__be16 df = 0;
 	__u8 tos, ttl;
 	int err;
+	bool did_rsc = false;
+	const struct vxlan_fdb *f;
 
-	dst = vxlan_find_dst(vxlan, skb);
-	if (!dst)
+	skb_reset_mac_header(skb);
+	eth = eth_hdr(skb);
+
+	if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
+		return arp_reduce(dev, skb);
+	else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
+		did_rsc = route_shortcircuit(dev, skb);
+
+	f = vxlan_find_mac(vxlan, eth->h_dest);
+	if (f == NULL) {
+		did_rsc = false;
+		dst = vxlan->gaddr;
+		if (!dst && (vxlan->flags & VXLAN_F_L2MISS) &&
+		    !is_multicast_ether_addr(eth->h_dest))
+			vxlan_fdb_miss(vxlan, eth->h_dest);
+	} else
+		dst = f->remote_ip;
+
+	if (!dst) {
+		if (did_rsc) {
+			__skb_pull(skb, skb_network_offset(skb));
+			skb->ip_summed = CHECKSUM_NONE;
+			skb->pkt_type = PACKET_HOST;
+
+			/* short-circuited back to local bridge */
+			if (netif_rx(skb) == NET_RX_SUCCESS) {
+				struct vxlan_stats *stats =
+						this_cpu_ptr(vxlan->stats);
+
+				u64_stats_update_begin(&stats->syncp);
+				stats->tx_packets++;
+				stats->tx_bytes += pkt_len;
+				u64_stats_update_end(&stats->syncp);
+			} else {
+				dev->stats.tx_errors++;
+				dev->stats.tx_aborted_errors++;
+			}
+			return NETDEV_TX_OK;
+		}
 		goto drop;
+	}
 
 	/* Need space for new headers (invalidates iph ptr) */
 	if (skb_cow_head(skb, VXLAN_HEADROOM))
@@ -769,7 +946,7 @@
 
 	vxlan_set_owner(dev, skb);
 
-	/* See __IPTUNNEL_XMIT */
+	/* See iptunnel_xmit() */
 	skb->ip_summed = CHECKSUM_NONE;
 	ip_select_ident(iph, &rt->dst, NULL);
 
@@ -1020,6 +1197,10 @@
 	[IFLA_VXLAN_AGEING]	= { .type = NLA_U32 },
 	[IFLA_VXLAN_LIMIT]	= { .type = NLA_U32 },
 	[IFLA_VXLAN_PORT_RANGE] = { .len  = sizeof(struct ifla_vxlan_port_range) },
+	[IFLA_VXLAN_PROXY]	= { .type = NLA_U8 },
+	[IFLA_VXLAN_RSC]	= { .type = NLA_U8 },
+	[IFLA_VXLAN_L2MISS]	= { .type = NLA_U8 },
+	[IFLA_VXLAN_L3MISS]	= { .type = NLA_U8 },
 };
 
 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1111,14 +1292,29 @@
 	if (data[IFLA_VXLAN_TOS])
 		vxlan->tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
 
+	if (data[IFLA_VXLAN_TTL])
+		vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+
 	if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
-		vxlan->learn = true;
+		vxlan->flags |= VXLAN_F_LEARN;
 
 	if (data[IFLA_VXLAN_AGEING])
 		vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
 	else
 		vxlan->age_interval = FDB_AGE_DEFAULT;
 
+	if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
+		vxlan->flags |= VXLAN_F_PROXY;
+
+	if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
+		vxlan->flags |= VXLAN_F_RSC;
+
+	if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
+		vxlan->flags |= VXLAN_F_L2MISS;
+
+	if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
+		vxlan->flags |= VXLAN_F_L3MISS;
+
 	if (data[IFLA_VXLAN_LIMIT])
 		vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
 
@@ -1155,6 +1351,10 @@
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_TTL */
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_TOS */
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_LEARNING */
+		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_PROXY */
+		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_RSC */
+		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_L2MISS */
+		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_L3MISS */
 		nla_total_size(sizeof(__u32)) +	/* IFLA_VXLAN_AGEING */
 		nla_total_size(sizeof(__u32)) +	/* IFLA_VXLAN_LIMIT */
 		nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
@@ -1183,7 +1383,15 @@
 
 	if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
 	    nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
-	    nla_put_u8(skb, IFLA_VXLAN_LEARNING, vxlan->learn) ||
+	    nla_put_u8(skb, IFLA_VXLAN_LEARNING,
+			!!(vxlan->flags & VXLAN_F_LEARN)) ||
+	    nla_put_u8(skb, IFLA_VXLAN_PROXY,
+			!!(vxlan->flags & VXLAN_F_PROXY)) ||
+	    nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
+	    nla_put_u8(skb, IFLA_VXLAN_L2MISS,
+			!!(vxlan->flags & VXLAN_F_L2MISS)) ||
+	    nla_put_u8(skb, IFLA_VXLAN_L3MISS,
+			!!(vxlan->flags & VXLAN_F_L3MISS)) ||
 	    nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
 	    nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
 		goto nla_put_failure;
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index eac709b..df70248 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -52,9 +52,9 @@
 
 quiet_cmd_build_wanxlfw = BLD FW  $@
       cmd_build_wanxlfw = \
-	$(CPP) -Wp,-MD,$(depfile) -I$(srctree)/include $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
+	$(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
 	$(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \
-	hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x  ,//g;1s/^/static u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
+	hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x  ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
 	rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o
 
 $(obj)/wanxlfw.inc:	$(src)/wanxlfw.S
diff --git a/drivers/net/wan/wanxlfw.S b/drivers/net/wan/wanxlfw.S
index 73aae2b..21565d5 100644
--- a/drivers/net/wan/wanxlfw.S
+++ b/drivers/net/wan/wanxlfw.S
@@ -35,6 +35,7 @@
 */
 
 #include <linux/hdlc.h>
+#include <linux/hdlc/ioctl.h>
 #include "wanxl.h"
 
 /* memory addresses and offsets */
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 3cd05a71..57f7db1 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -7433,7 +7433,7 @@
 					num_null_ies++;
 				break;
 
-			case WLAN_EID_GENERIC:
+			case WLAN_EID_VENDOR_SPECIFIC:
 				if (ie[1] >= 4 &&
 				    ie[2] == 0x00 &&
 				    ie[3] == 0x50 &&
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 99b9ddf2..77fa428 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -379,7 +379,7 @@
 		 manifest_sync_timeout);
 
 	if (!size) {
-		dev_printk(KERN_ERR, &udev->dev, "FW buffer length invalid!\n");
+		dev_err(&udev->dev, "FW buffer length invalid!\n");
 		return -EINVAL;
 	}
 
@@ -391,8 +391,8 @@
 		if (need_dfu_state) {
 			ret = at76_dfu_get_state(udev, &dfu_state);
 			if (ret < 0) {
-				dev_printk(KERN_ERR, &udev->dev,
-					   "cannot get DFU state: %d\n", ret);
+				dev_err(&udev->dev,
+					"cannot get DFU state: %d\n", ret);
 				goto exit;
 			}
 			need_dfu_state = 0;
@@ -407,9 +407,9 @@
 				dfu_timeout = at76_get_timeout(&dfu_stat_buf);
 				need_dfu_state = 0;
 			} else
-				dev_printk(KERN_ERR, &udev->dev,
-					   "at76_dfu_get_status returned %d\n",
-					   ret);
+				dev_err(&udev->dev,
+					"at76_dfu_get_status returned %d\n",
+					ret);
 			break;
 
 		case STATE_DFU_DOWNLOAD_BUSY:
@@ -438,9 +438,9 @@
 			blockno++;
 
 			if (ret != bsize)
-				dev_printk(KERN_ERR, &udev->dev,
-					   "at76_load_int_fw_block "
-					   "returned %d\n", ret);
+				dev_err(&udev->dev,
+					"at76_load_int_fw_block returned %d\n",
+					ret);
 			need_dfu_state = 1;
 			break;
 
@@ -1255,8 +1255,7 @@
 	at76_dbg(DBG_DEVSTART, "opmode %d", op_mode);
 
 	if (op_mode != OPMODE_NORMAL_NIC_WITHOUT_FLASH) {
-		dev_printk(KERN_ERR, &udev->dev, "unexpected opmode %d\n",
-			   op_mode);
+		dev_err(&udev->dev, "unexpected opmode %d\n", op_mode);
 		return -EINVAL;
 	}
 
@@ -1275,9 +1274,9 @@
 			 size, bsize, blockno);
 		ret = at76_load_ext_fw_block(udev, blockno, block, bsize);
 		if (ret != bsize) {
-			dev_printk(KERN_ERR, &udev->dev,
-				   "loading %dth firmware block failed: %d\n",
-				   blockno, ret);
+			dev_err(&udev->dev,
+				"loading %dth firmware block failed: %d\n",
+				blockno, ret);
 			goto exit;
 		}
 		buf += bsize;
@@ -1293,8 +1292,8 @@
 exit:
 	kfree(block);
 	if (ret < 0)
-		dev_printk(KERN_ERR, &udev->dev,
-			   "downloading external firmware failed: %d\n", ret);
+		dev_err(&udev->dev,
+			"downloading external firmware failed: %d\n", ret);
 	return ret;
 }
 
@@ -1308,8 +1307,8 @@
 				   need_remap ? 0 : 2 * HZ);
 
 	if (ret < 0) {
-		dev_printk(KERN_ERR, &udev->dev,
-			   "downloading internal fw failed with %d\n", ret);
+		dev_err(&udev->dev,
+			"downloading internal fw failed with %d\n", ret);
 		goto exit;
 	}
 
@@ -1319,8 +1318,8 @@
 	if (need_remap) {
 		ret = at76_remap(udev);
 		if (ret < 0) {
-			dev_printk(KERN_ERR, &udev->dev,
-				   "sending REMAP failed with %d\n", ret);
+			dev_err(&udev->dev,
+				"sending REMAP failed with %d\n", ret);
 			goto exit;
 		}
 	}
@@ -1555,11 +1554,10 @@
 	at76_dbg(DBG_FW, "downloading firmware %s", fwe->fwname);
 	ret = request_firmware(&fwe->fw, fwe->fwname, &udev->dev);
 	if (ret < 0) {
-		dev_printk(KERN_ERR, &udev->dev, "firmware %s not found!\n",
-			   fwe->fwname);
-		dev_printk(KERN_ERR, &udev->dev,
-			   "you may need to download the firmware from "
-			   "http://developer.berlios.de/projects/at76c503a/\n");
+		dev_err(&udev->dev, "firmware %s not found!\n",
+			fwe->fwname);
+		dev_err(&udev->dev,
+			"you may need to download the firmware from http://developer.berlios.de/projects/at76c503a/\n");
 		goto exit;
 	}
 
@@ -1567,17 +1565,17 @@
 	fwh = (struct at76_fw_header *)(fwe->fw->data);
 
 	if (fwe->fw->size <= sizeof(*fwh)) {
-		dev_printk(KERN_ERR, &udev->dev,
-			   "firmware is too short (0x%zx)\n", fwe->fw->size);
+		dev_err(&udev->dev,
+			"firmware is too short (0x%zx)\n", fwe->fw->size);
 		goto exit;
 	}
 
 	/* CRC currently not checked */
 	fwe->board_type = le32_to_cpu(fwh->board_type);
 	if (fwe->board_type != board_type) {
-		dev_printk(KERN_ERR, &udev->dev,
-			   "board type mismatch, requested %u, got %u\n",
-			   board_type, fwe->board_type);
+		dev_err(&udev->dev,
+			"board type mismatch, requested %u, got %u\n",
+			board_type, fwe->board_type);
 		goto exit;
 	}
 
@@ -2150,8 +2148,7 @@
 	}
 
 	if (!ep_in || !ep_out) {
-		dev_printk(KERN_ERR, &interface->dev,
-			   "bulk endpoints missing\n");
+		dev_err(&interface->dev, "bulk endpoints missing\n");
 		return -ENXIO;
 	}
 
@@ -2161,15 +2158,14 @@
 	priv->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
 	priv->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!priv->rx_urb || !priv->tx_urb) {
-		dev_printk(KERN_ERR, &interface->dev, "cannot allocate URB\n");
+		dev_err(&interface->dev, "cannot allocate URB\n");
 		return -ENOMEM;
 	}
 
 	buffer_size = sizeof(struct at76_tx_buffer) + MAX_PADDING_SIZE;
 	priv->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
 	if (!priv->bulk_out_buffer) {
-		dev_printk(KERN_ERR, &interface->dev,
-			   "cannot allocate output buffer\n");
+		dev_err(&interface->dev, "cannot allocate output buffer\n");
 		return -ENOMEM;
 	}
 
@@ -2230,8 +2226,7 @@
 	/* MAC address */
 	ret = at76_get_hw_config(priv);
 	if (ret < 0) {
-		dev_printk(KERN_ERR, &interface->dev,
-			   "cannot get MAC address\n");
+		dev_err(&interface->dev, "cannot get MAC address\n");
 		goto exit;
 	}
 
@@ -2358,8 +2353,8 @@
 	   we get 204 with 2.4.23, Fiberline FL-WL240u (505A+RFMD2958) ??? */
 
 	if (op_mode == OPMODE_HW_CONFIG_MODE) {
-		dev_printk(KERN_ERR, &interface->dev,
-			   "cannot handle a device in HW_CONFIG_MODE\n");
+		dev_err(&interface->dev,
+			"cannot handle a device in HW_CONFIG_MODE\n");
 		ret = -EBUSY;
 		goto error;
 	}
@@ -2371,9 +2366,9 @@
 			   "downloading internal firmware\n");
 		ret = at76_load_internal_fw(udev, fwe);
 		if (ret < 0) {
-			dev_printk(KERN_ERR, &interface->dev,
-				   "error %d downloading internal firmware\n",
-				   ret);
+			dev_err(&interface->dev,
+				"error %d downloading internal firmware\n",
+				ret);
 			goto error;
 		}
 		usb_put_dev(udev);
@@ -2408,8 +2403,8 @@
 		/* Re-check firmware version */
 		ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv));
 		if (ret < 0) {
-			dev_printk(KERN_ERR, &interface->dev,
-				   "error %d getting firmware version\n", ret);
+			dev_err(&interface->dev,
+				"error %d getting firmware version\n", ret);
 			goto error;
 		}
 	}
@@ -2449,7 +2444,7 @@
 
 	wiphy_info(priv->hw->wiphy, "disconnecting\n");
 	at76_delete_device(priv);
-	dev_printk(KERN_INFO, &interface->dev, "disconnected\n");
+	dev_info(&interface->dev, "disconnected\n");
 }
 
 /* Structure for registering this driver with the USB subsystem */
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 0960224..c25dcf1 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -26,5 +26,6 @@
 source "drivers/net/wireless/ath/ath9k/Kconfig"
 source "drivers/net/wireless/ath/carl9170/Kconfig"
 source "drivers/net/wireless/ath/ath6kl/Kconfig"
+source "drivers/net/wireless/ath/ar5523/Kconfig"
 
 endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index d716b74..1e18621 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -2,6 +2,7 @@
 obj-$(CONFIG_ATH9K_HW)		+= ath9k/
 obj-$(CONFIG_CARL9170)		+= carl9170/
 obj-$(CONFIG_ATH6KL)		+= ath6kl/
+obj-$(CONFIG_AR5523)		+= ar5523/
 
 obj-$(CONFIG_ATH_COMMON)	+= ath.o
 
diff --git a/drivers/net/wireless/ath/ar5523/Kconfig b/drivers/net/wireless/ath/ar5523/Kconfig
new file mode 100644
index 0000000..11d99ee
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/Kconfig
@@ -0,0 +1,7 @@
+config AR5523
+       tristate "Atheros AR5523 wireless driver support"
+       depends on MAC80211 && USB
+       select FW_LOADER
+       ---help---
+         This module add support for AR5523 based USB dongles such as D-Link
+         DWL-G132, Netgear WPN111 and many more.
diff --git a/drivers/net/wireless/ath/ar5523/Makefile b/drivers/net/wireless/ath/ar5523/Makefile
new file mode 100644
index 0000000..ebf7f3b
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_AR5523)   := ar5523.o
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
new file mode 100644
index 0000000..7157f7d
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -0,0 +1,1798 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This driver is based on the uath driver written by Damien Bergamini for
+ * OpenBSD, who did black-box analysis of the Windows binary driver to find
+ * out how the hardware works.  It contains a lot magic numbers because of
+ * that and only has minimal functionality.
+ */
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+#include <net/mac80211.h>
+
+#include "ar5523.h"
+#include "ar5523_hw.h"
+
+/*
+ * Various supported device vendors/products.
+ * UB51: AR5005UG 802.11b/g, UB52: AR5005UX 802.11a/b/g
+ */
+
+static int ar5523_submit_rx_cmd(struct ar5523 *ar);
+static void ar5523_data_tx_pkt_put(struct ar5523 *ar);
+
+static void ar5523_read_reply(struct ar5523 *ar, struct ar5523_cmd_hdr *hdr,
+			      struct ar5523_tx_cmd *cmd)
+{
+	int dlen, olen;
+	__be32 *rp;
+
+	dlen = be32_to_cpu(hdr->len) - sizeof(*hdr);
+
+	if (dlen < 0) {
+		WARN_ON(1);
+		goto out;
+	}
+
+	ar5523_dbg(ar, "Code = %d len = %d\n", be32_to_cpu(hdr->code) & 0xff,
+		   dlen);
+
+	rp = (__be32 *)(hdr + 1);
+	if (dlen >= sizeof(u32)) {
+		olen = be32_to_cpu(rp[0]);
+		dlen -= sizeof(u32);
+		if (olen == 0) {
+			/* convention is 0 =>'s one word */
+			olen = sizeof(u32);
+		}
+	} else
+		olen = 0;
+
+	if (cmd->odata) {
+		if (cmd->olen < olen) {
+			ar5523_err(ar, "olen to small %d < %d\n",
+				   cmd->olen, olen);
+			cmd->olen = 0;
+			cmd->res = -EOVERFLOW;
+		} else {
+			cmd->olen = olen;
+			memcpy(cmd->odata, &rp[1], olen);
+			cmd->res = 0;
+		}
+	}
+
+out:
+	complete(&cmd->done);
+}
+
+static void ar5523_cmd_rx_cb(struct urb *urb)
+{
+	struct ar5523 *ar = urb->context;
+	struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+	struct ar5523_cmd_hdr *hdr = ar->rx_cmd_buf;
+	int dlen;
+	u32 code, hdrlen;
+
+	if (urb->status) {
+		if (urb->status != -ESHUTDOWN)
+			ar5523_err(ar, "RX USB error %d.\n", urb->status);
+		goto skip;
+	}
+
+	if (urb->actual_length < sizeof(struct ar5523_cmd_hdr)) {
+		ar5523_err(ar, "RX USB to short.\n");
+		goto skip;
+	}
+
+	ar5523_dbg(ar, "%s code %02x priv %d\n", __func__,
+		   be32_to_cpu(hdr->code) & 0xff, hdr->priv);
+
+	code = be32_to_cpu(hdr->code);
+	hdrlen = be32_to_cpu(hdr->len);
+
+	switch (code & 0xff) {
+	default:
+		/* reply to a read command */
+		if (hdr->priv != AR5523_CMD_ID) {
+			ar5523_err(ar, "Unexpected command id: %02x\n",
+				   code & 0xff);
+			goto skip;
+		}
+		ar5523_read_reply(ar, hdr, cmd);
+		break;
+
+	case WDCMSG_DEVICE_AVAIL:
+		ar5523_dbg(ar, "WDCMSG_DEVICE_AVAIL\n");
+		cmd->res = 0;
+		cmd->olen = 0;
+		complete(&cmd->done);
+		break;
+
+	case WDCMSG_SEND_COMPLETE:
+		ar5523_dbg(ar, "WDCMSG_SEND_COMPLETE: %d pending\n",
+			atomic_read(&ar->tx_nr_pending));
+		if (!test_bit(AR5523_HW_UP, &ar->flags))
+			ar5523_dbg(ar, "Unexpected WDCMSG_SEND_COMPLETE\n");
+		else {
+			mod_timer(&ar->tx_wd_timer,
+				  jiffies + AR5523_TX_WD_TIMEOUT);
+			ar5523_data_tx_pkt_put(ar);
+
+		}
+		break;
+
+	case WDCMSG_TARGET_START:
+		/* This command returns a bogus id so it needs special
+		   handling */
+		dlen = hdrlen - sizeof(*hdr);
+		if (dlen != (int)sizeof(u32)) {
+			ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START");
+			return;
+		}
+		memcpy(cmd->odata, hdr + 1, sizeof(u32));
+		cmd->olen = sizeof(u32);
+		cmd->res = 0;
+		complete(&cmd->done);
+		break;
+
+	case WDCMSG_STATS_UPDATE:
+		ar5523_dbg(ar, "WDCMSG_STATS_UPDATE\n");
+		break;
+	}
+
+skip:
+	ar5523_submit_rx_cmd(ar);
+}
+
+static int ar5523_alloc_rx_cmd(struct ar5523 *ar)
+{
+	ar->rx_cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!ar->rx_cmd_urb)
+		return -ENOMEM;
+
+	ar->rx_cmd_buf = usb_alloc_coherent(ar->dev, AR5523_MAX_RXCMDSZ,
+					    GFP_KERNEL,
+					    &ar->rx_cmd_urb->transfer_dma);
+	if (!ar->rx_cmd_buf) {
+		usb_free_urb(ar->rx_cmd_urb);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void ar5523_cancel_rx_cmd(struct ar5523 *ar)
+{
+	usb_kill_urb(ar->rx_cmd_urb);
+}
+
+static void ar5523_free_rx_cmd(struct ar5523 *ar)
+{
+	usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ,
+			  ar->rx_cmd_buf, ar->rx_cmd_urb->transfer_dma);
+	usb_free_urb(ar->rx_cmd_urb);
+}
+
+static int ar5523_submit_rx_cmd(struct ar5523 *ar)
+{
+	int error;
+
+	usb_fill_bulk_urb(ar->rx_cmd_urb, ar->dev,
+			  ar5523_cmd_rx_pipe(ar->dev), ar->rx_cmd_buf,
+			  AR5523_MAX_RXCMDSZ, ar5523_cmd_rx_cb, ar);
+	ar->rx_cmd_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+	error = usb_submit_urb(ar->rx_cmd_urb, GFP_ATOMIC);
+	if (error) {
+		if (error != -ENODEV)
+			ar5523_err(ar, "error %d when submitting rx urb\n",
+				   error);
+		return error;
+	}
+	return 0;
+}
+
+/*
+ * Command submitted cb
+ */
+static void ar5523_cmd_tx_cb(struct urb *urb)
+{
+	struct ar5523_tx_cmd *cmd = urb->context;
+	struct ar5523 *ar = cmd->ar;
+
+	if (urb->status) {
+		ar5523_err(ar, "Failed to TX command. Status = %d\n",
+			   urb->status);
+		cmd->res = urb->status;
+		complete(&cmd->done);
+		return;
+	}
+
+	if (!(cmd->flags & AR5523_CMD_FLAG_READ)) {
+		cmd->res = 0;
+		complete(&cmd->done);
+	}
+}
+
+static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
+		      int ilen, void *odata, int olen, int flags)
+{
+	struct ar5523_cmd_hdr *hdr;
+	struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+	int xferlen, error;
+
+	/* always bulk-out a multiple of 4 bytes */
+	xferlen = (sizeof(struct ar5523_cmd_hdr) + ilen + 3) & ~3;
+
+	hdr = (struct ar5523_cmd_hdr *)cmd->buf_tx;
+	memset(hdr, 0, sizeof(struct ar5523_cmd_hdr));
+	hdr->len  = cpu_to_be32(xferlen);
+	hdr->code = cpu_to_be32(code);
+	hdr->priv = AR5523_CMD_ID;
+
+	if (flags & AR5523_CMD_FLAG_MAGIC)
+		hdr->magic = cpu_to_be32(1 << 24);
+	memcpy(hdr + 1, idata, ilen);
+
+	cmd->odata = odata;
+	cmd->olen = olen;
+	cmd->flags = flags;
+
+	ar5523_dbg(ar, "do cmd %02x\n", code);
+
+	usb_fill_bulk_urb(cmd->urb_tx, ar->dev, ar5523_cmd_tx_pipe(ar->dev),
+			  cmd->buf_tx, xferlen, ar5523_cmd_tx_cb, cmd);
+	cmd->urb_tx->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+	error = usb_submit_urb(cmd->urb_tx, GFP_KERNEL);
+	if (error) {
+		ar5523_err(ar, "could not send command 0x%x, error=%d\n",
+			   code, error);
+		return error;
+	}
+
+	if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) {
+		cmd->odata = NULL;
+		ar5523_err(ar, "timeout waiting for command %02x reply\n",
+			   code);
+		cmd->res = -ETIMEDOUT;
+	}
+	return cmd->res;
+}
+
+static int ar5523_cmd_write(struct ar5523 *ar, u32 code, const void *data,
+			    int len, int flags)
+{
+	flags &= ~AR5523_CMD_FLAG_READ;
+	return ar5523_cmd(ar, code, data, len, NULL, 0, flags);
+}
+
+static int ar5523_cmd_read(struct ar5523 *ar, u32 code, const void *idata,
+			   int ilen, void *odata, int olen, int flags)
+{
+	flags |= AR5523_CMD_FLAG_READ;
+	return ar5523_cmd(ar, code, idata, ilen, odata, olen, flags);
+}
+
+static int ar5523_config(struct ar5523 *ar, u32 reg, u32 val)
+{
+	struct ar5523_write_mac write;
+	int error;
+
+	write.reg = cpu_to_be32(reg);
+	write.len = cpu_to_be32(0);	/* 0 = single write */
+	*(__be32 *)write.data = cpu_to_be32(val);
+
+	error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
+				 3 * sizeof(u32), 0);
+	if (error != 0)
+		ar5523_err(ar, "could not write register 0x%02x\n", reg);
+	return error;
+}
+
+static int ar5523_config_multi(struct ar5523 *ar, u32 reg, const void *data,
+			       int len)
+{
+	struct ar5523_write_mac write;
+	int error;
+
+	write.reg = cpu_to_be32(reg);
+	write.len = cpu_to_be32(len);
+	memcpy(write.data, data, len);
+
+	/* properly handle the case where len is zero (reset) */
+	error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
+	    (len == 0) ? sizeof(u32) : 2 * sizeof(u32) + len, 0);
+	if (error != 0)
+		ar5523_err(ar, "could not write %d bytes to register 0x%02x\n",
+			   len, reg);
+	return error;
+}
+
+static int ar5523_get_status(struct ar5523 *ar, u32 which, void *odata,
+			     int olen)
+{
+	int error;
+	__be32 which_be;
+
+	which_be = cpu_to_be32(which);
+	error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_STATUS,
+	    &which_be, sizeof(which_be), odata, olen, AR5523_CMD_FLAG_MAGIC);
+	if (error != 0)
+		ar5523_err(ar, "could not read EEPROM offset 0x%02x\n", which);
+	return error;
+}
+
+static int ar5523_get_capability(struct ar5523 *ar, u32 cap, u32 *val)
+{
+	int error;
+	__be32 cap_be, val_be;
+
+	cap_be = cpu_to_be32(cap);
+	error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_CAPABILITY, &cap_be,
+				sizeof(cap_be), &val_be, sizeof(__be32),
+				AR5523_CMD_FLAG_MAGIC);
+	if (error != 0) {
+		ar5523_err(ar, "could not read capability %u\n", cap);
+		return error;
+	}
+	*val = be32_to_cpu(val_be);
+	return error;
+}
+
+static int ar5523_get_devcap(struct ar5523 *ar)
+{
+#define	GETCAP(x) do {				\
+	error = ar5523_get_capability(ar, x, &cap);		\
+	if (error != 0)					\
+		return error;				\
+	ar5523_info(ar, "Cap: "			\
+	    "%s=0x%08x\n", #x, cap);	\
+} while (0)
+	int error;
+	u32 cap;
+
+	/* collect device capabilities */
+	GETCAP(CAP_TARGET_VERSION);
+	GETCAP(CAP_TARGET_REVISION);
+	GETCAP(CAP_MAC_VERSION);
+	GETCAP(CAP_MAC_REVISION);
+	GETCAP(CAP_PHY_REVISION);
+	GETCAP(CAP_ANALOG_5GHz_REVISION);
+	GETCAP(CAP_ANALOG_2GHz_REVISION);
+
+	GETCAP(CAP_REG_DOMAIN);
+	GETCAP(CAP_REG_CAP_BITS);
+	GETCAP(CAP_WIRELESS_MODES);
+	GETCAP(CAP_CHAN_SPREAD_SUPPORT);
+	GETCAP(CAP_COMPRESS_SUPPORT);
+	GETCAP(CAP_BURST_SUPPORT);
+	GETCAP(CAP_FAST_FRAMES_SUPPORT);
+	GETCAP(CAP_CHAP_TUNING_SUPPORT);
+	GETCAP(CAP_TURBOG_SUPPORT);
+	GETCAP(CAP_TURBO_PRIME_SUPPORT);
+	GETCAP(CAP_DEVICE_TYPE);
+	GETCAP(CAP_WME_SUPPORT);
+	GETCAP(CAP_TOTAL_QUEUES);
+	GETCAP(CAP_CONNECTION_ID_MAX);
+
+	GETCAP(CAP_LOW_5GHZ_CHAN);
+	GETCAP(CAP_HIGH_5GHZ_CHAN);
+	GETCAP(CAP_LOW_2GHZ_CHAN);
+	GETCAP(CAP_HIGH_2GHZ_CHAN);
+	GETCAP(CAP_TWICE_ANTENNAGAIN_5G);
+	GETCAP(CAP_TWICE_ANTENNAGAIN_2G);
+
+	GETCAP(CAP_CIPHER_AES_CCM);
+	GETCAP(CAP_CIPHER_TKIP);
+	GETCAP(CAP_MIC_TKIP);
+	return 0;
+}
+
+static int ar5523_set_ledsteady(struct ar5523 *ar, int lednum, int ledmode)
+{
+	struct ar5523_cmd_ledsteady led;
+
+	led.lednum = cpu_to_be32(lednum);
+	led.ledmode = cpu_to_be32(ledmode);
+
+	ar5523_dbg(ar, "set %s led %s (steady)\n",
+		   (lednum == UATH_LED_LINK) ? "link" : "activity",
+		   ledmode ? "on" : "off");
+	return ar5523_cmd_write(ar, WDCMSG_SET_LED_STEADY, &led, sizeof(led),
+				 0);
+}
+
+static int ar5523_set_rxfilter(struct ar5523 *ar, u32 bits, u32 op)
+{
+	struct ar5523_cmd_rx_filter rxfilter;
+
+	rxfilter.bits = cpu_to_be32(bits);
+	rxfilter.op = cpu_to_be32(op);
+
+	ar5523_dbg(ar, "setting Rx filter=0x%x flags=0x%x\n", bits, op);
+	return ar5523_cmd_write(ar, WDCMSG_RX_FILTER, &rxfilter,
+				 sizeof(rxfilter), 0);
+}
+
+static int ar5523_reset_tx_queues(struct ar5523 *ar)
+{
+	__be32 qid = cpu_to_be32(0);
+
+	ar5523_dbg(ar, "resetting Tx queue\n");
+	return ar5523_cmd_write(ar, WDCMSG_RELEASE_TX_QUEUE,
+				 &qid, sizeof(qid), 0);
+}
+
+static int ar5523_set_chan(struct ar5523 *ar)
+{
+	struct ieee80211_conf *conf = &ar->hw->conf;
+
+	struct ar5523_cmd_reset reset;
+
+	memset(&reset, 0, sizeof(reset));
+	reset.flags |= cpu_to_be32(UATH_CHAN_2GHZ);
+	reset.flags |= cpu_to_be32(UATH_CHAN_OFDM);
+	reset.freq = cpu_to_be32(conf->channel->center_freq);
+	reset.maxrdpower = cpu_to_be32(50);	/* XXX */
+	reset.channelchange = cpu_to_be32(1);
+	reset.keeprccontent = cpu_to_be32(0);
+
+	ar5523_dbg(ar, "set chan flags 0x%x freq %d\n",
+		   be32_to_cpu(reset.flags),
+		   conf->channel->center_freq);
+	return ar5523_cmd_write(ar, WDCMSG_RESET, &reset, sizeof(reset), 0);
+}
+
+static int ar5523_queue_init(struct ar5523 *ar)
+{
+	struct ar5523_cmd_txq_setup qinfo;
+
+	ar5523_dbg(ar, "setting up Tx queue\n");
+	qinfo.qid	     = cpu_to_be32(0);
+	qinfo.len	     = cpu_to_be32(sizeof(qinfo.attr));
+	qinfo.attr.priority  = cpu_to_be32(0);	/* XXX */
+	qinfo.attr.aifs	     = cpu_to_be32(3);
+	qinfo.attr.logcwmin  = cpu_to_be32(4);
+	qinfo.attr.logcwmax  = cpu_to_be32(10);
+	qinfo.attr.bursttime = cpu_to_be32(0);
+	qinfo.attr.mode	     = cpu_to_be32(0);
+	qinfo.attr.qflags    = cpu_to_be32(1);	/* XXX? */
+	return ar5523_cmd_write(ar, WDCMSG_SETUP_TX_QUEUE, &qinfo,
+				 sizeof(qinfo), 0);
+}
+
+static int ar5523_switch_chan(struct ar5523 *ar)
+{
+	int error;
+
+	error = ar5523_set_chan(ar);
+	if (error) {
+		ar5523_err(ar, "could not set chan, error %d\n", error);
+		goto out_err;
+	}
+
+	/* reset Tx rings */
+	error = ar5523_reset_tx_queues(ar);
+	if (error) {
+		ar5523_err(ar, "could not reset Tx queues, error %d\n",
+			   error);
+		goto out_err;
+	}
+	/* set Tx rings WME properties */
+	error = ar5523_queue_init(ar);
+	if (error)
+		ar5523_err(ar, "could not init wme, error %d\n", error);
+
+out_err:
+	return error;
+}
+
+static void ar5523_rx_data_put(struct ar5523 *ar,
+				struct ar5523_rx_data *data)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+	list_move(&data->list, &ar->rx_data_free);
+	spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+}
+
+static void ar5523_data_rx_cb(struct urb *urb)
+{
+	struct ar5523_rx_data *data = urb->context;
+	struct ar5523 *ar = data->ar;
+	struct ar5523_rx_desc *desc;
+	struct ar5523_chunk *chunk;
+	struct ieee80211_hw *hw = ar->hw;
+	struct ieee80211_rx_status *rx_status;
+	u32 rxlen;
+	int usblen = urb->actual_length;
+	int hdrlen, pad;
+
+	ar5523_dbg(ar, "%s\n", __func__);
+	/* sync/async unlink faults aren't errors */
+	if (urb->status) {
+		if (urb->status != -ESHUTDOWN)
+			ar5523_err(ar, "%s: USB err: %d\n", __func__,
+				   urb->status);
+		goto skip;
+	}
+
+	if (usblen < AR5523_MIN_RXBUFSZ) {
+		ar5523_err(ar, "RX: wrong xfer size (usblen=%d)\n", usblen);
+		goto skip;
+	}
+
+	chunk = (struct ar5523_chunk *) data->skb->data;
+
+	if (((chunk->flags & UATH_CFLAGS_FINAL) == 0) ||
+		chunk->seqnum != 0) {
+		ar5523_dbg(ar, "RX: No final flag. s: %d f: %02x l: %d\n",
+			   chunk->seqnum, chunk->flags,
+			   be16_to_cpu(chunk->length));
+		goto skip;
+	}
+
+	/* Rx descriptor is located at the end, 32-bit aligned */
+	desc = (struct ar5523_rx_desc *)
+		(data->skb->data + usblen - sizeof(struct ar5523_rx_desc));
+
+	rxlen = be32_to_cpu(desc->len);
+	if (rxlen > ar->rxbufsz) {
+		ar5523_dbg(ar, "RX: Bad descriptor (len=%d)\n",
+			   be32_to_cpu(desc->len));
+		goto skip;
+	}
+
+	if (!rxlen) {
+		ar5523_dbg(ar, "RX: rxlen is 0\n");
+		goto skip;
+	}
+
+	if (be32_to_cpu(desc->status) != 0) {
+		ar5523_dbg(ar, "Bad RX status (0x%x len = %d). Skip\n",
+			   be32_to_cpu(desc->status), be32_to_cpu(desc->len));
+		goto skip;
+	}
+
+	skb_reserve(data->skb, sizeof(*chunk));
+	skb_put(data->skb, rxlen - sizeof(struct ar5523_rx_desc));
+
+	hdrlen = ieee80211_get_hdrlen_from_skb(data->skb);
+	if (!IS_ALIGNED(hdrlen, 4)) {
+		ar5523_dbg(ar, "eek, alignment workaround activated\n");
+		pad = ALIGN(hdrlen, 4) - hdrlen;
+		memmove(data->skb->data + pad, data->skb->data, hdrlen);
+		skb_pull(data->skb, pad);
+		skb_put(data->skb, pad);
+	}
+
+	rx_status = IEEE80211_SKB_RXCB(data->skb);
+	memset(rx_status, 0, sizeof(*rx_status));
+	rx_status->freq = be32_to_cpu(desc->channel);
+	rx_status->band = hw->conf.channel->band;
+	rx_status->signal = -95 + be32_to_cpu(desc->rssi);
+
+	ieee80211_rx_irqsafe(hw, data->skb);
+	data->skb = NULL;
+
+skip:
+	if (data->skb) {
+		dev_kfree_skb_irq(data->skb);
+		data->skb = NULL;
+	}
+
+	ar5523_rx_data_put(ar, data);
+	if (atomic_inc_return(&ar->rx_data_free_cnt) >=
+	    AR5523_RX_DATA_REFILL_COUNT &&
+	    test_bit(AR5523_HW_UP, &ar->flags))
+		queue_work(ar->wq, &ar->rx_refill_work);
+}
+
+static void ar5523_rx_refill_work(struct work_struct *work)
+{
+	struct ar5523 *ar = container_of(work, struct ar5523, rx_refill_work);
+	struct ar5523_rx_data *data;
+	unsigned long flags;
+	int error;
+
+	ar5523_dbg(ar, "%s\n", __func__);
+	do {
+		spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+
+		if (!list_empty(&ar->rx_data_free))
+			data = (struct ar5523_rx_data *) ar->rx_data_free.next;
+		else
+			data = NULL;
+		spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+
+		if (!data)
+			goto done;
+
+		data->skb = alloc_skb(ar->rxbufsz, GFP_KERNEL);
+		if (!data->skb) {
+			ar5523_err(ar, "could not allocate rx skbuff\n");
+			return;
+		}
+
+		usb_fill_bulk_urb(data->urb, ar->dev,
+				  ar5523_data_rx_pipe(ar->dev), data->skb->data,
+				  ar->rxbufsz, ar5523_data_rx_cb, data);
+
+		spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+		list_move(&data->list, &ar->rx_data_used);
+		spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+		atomic_dec(&ar->rx_data_free_cnt);
+
+		error = usb_submit_urb(data->urb, GFP_KERNEL);
+		if (error) {
+			kfree_skb(data->skb);
+			if (error != -ENODEV)
+				ar5523_err(ar, "Err sending rx data urb %d\n",
+					   error);
+			ar5523_rx_data_put(ar, data);
+			atomic_inc(&ar->rx_data_free_cnt);
+			return;
+		}
+
+	} while (true);
+done:
+	return;
+}
+
+static void ar5523_cancel_rx_bufs(struct ar5523 *ar)
+{
+	struct ar5523_rx_data *data;
+	unsigned long flags;
+
+	do {
+		spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+		if (!list_empty(&ar->rx_data_used))
+			data = (struct ar5523_rx_data *) ar->rx_data_used.next;
+		else
+			data = NULL;
+		spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+
+		if (!data)
+			break;
+
+		usb_kill_urb(data->urb);
+		list_move(&data->list, &ar->rx_data_free);
+		atomic_inc(&ar->rx_data_free_cnt);
+	} while (data);
+}
+
+static void ar5523_free_rx_bufs(struct ar5523 *ar)
+{
+	struct ar5523_rx_data *data;
+
+	ar5523_cancel_rx_bufs(ar);
+	while (!list_empty(&ar->rx_data_free)) {
+		data = (struct ar5523_rx_data *) ar->rx_data_free.next;
+		list_del(&data->list);
+		usb_free_urb(data->urb);
+	}
+}
+
+static int ar5523_alloc_rx_bufs(struct ar5523 *ar)
+{
+	int i;
+
+	for (i = 0; i < AR5523_RX_DATA_COUNT; i++) {
+		struct ar5523_rx_data *data = &ar->rx_data[i];
+
+		data->ar = ar;
+		data->urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!data->urb) {
+			ar5523_err(ar, "could not allocate rx data urb\n");
+			goto err;
+		}
+		list_add_tail(&data->list, &ar->rx_data_free);
+		atomic_inc(&ar->rx_data_free_cnt);
+	}
+	return 0;
+
+err:
+	ar5523_free_rx_bufs(ar);
+	return -ENOMEM;
+}
+
+static void ar5523_data_tx_pkt_put(struct ar5523 *ar)
+{
+	atomic_dec(&ar->tx_nr_total);
+	if (!atomic_dec_return(&ar->tx_nr_pending)) {
+		del_timer(&ar->tx_wd_timer);
+		wake_up(&ar->tx_flush_waitq);
+	}
+
+	if (atomic_read(&ar->tx_nr_total) < AR5523_TX_DATA_RESTART_COUNT) {
+		ar5523_dbg(ar, "restart tx queue\n");
+		ieee80211_wake_queues(ar->hw);
+	}
+}
+
+static void ar5523_data_tx_cb(struct urb *urb)
+{
+	struct sk_buff *skb = urb->context;
+	struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+	struct ar5523_tx_data *data = (struct ar5523_tx_data *)
+				       txi->driver_data;
+	struct ar5523 *ar = data->ar;
+	unsigned long flags;
+
+	ar5523_dbg(ar, "data tx urb completed: %d\n", urb->status);
+
+	spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+	list_del(&data->list);
+	spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+	if (urb->status) {
+		ar5523_dbg(ar, "%s: urb status: %d\n", __func__, urb->status);
+		ar5523_data_tx_pkt_put(ar);
+		ieee80211_free_txskb(ar->hw, skb);
+	} else {
+		skb_pull(skb, sizeof(struct ar5523_tx_desc) + sizeof(__be32));
+		ieee80211_tx_status_irqsafe(ar->hw, skb);
+	}
+	usb_free_urb(urb);
+}
+
+static void ar5523_tx(struct ieee80211_hw *hw,
+		       struct ieee80211_tx_control *control,
+		       struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+	struct ar5523_tx_data *data = (struct ar5523_tx_data *)
+					txi->driver_data;
+	struct ar5523 *ar = hw->priv;
+	unsigned long flags;
+
+	ar5523_dbg(ar, "tx called\n");
+	if (atomic_inc_return(&ar->tx_nr_total) >= AR5523_TX_DATA_COUNT) {
+		ar5523_dbg(ar, "tx queue full\n");
+		ar5523_dbg(ar, "stop queues (tot %d pend %d)\n",
+			   atomic_read(&ar->tx_nr_total),
+			   atomic_read(&ar->tx_nr_pending));
+		ieee80211_stop_queues(hw);
+	}
+
+	data->skb = skb;
+
+	spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+	list_add_tail(&data->list, &ar->tx_queue_pending);
+	spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+	ieee80211_queue_work(ar->hw, &ar->tx_work);
+}
+
+static void ar5523_tx_work_locked(struct ar5523 *ar)
+{
+	struct ar5523_tx_data *data;
+	struct ar5523_tx_desc *desc;
+	struct ar5523_chunk *chunk;
+	struct ieee80211_tx_info *txi;
+	struct urb *urb;
+	struct sk_buff *skb;
+	int error = 0, paylen;
+	u32 txqid;
+	unsigned long flags;
+
+	BUILD_BUG_ON(sizeof(struct ar5523_tx_data) >
+		     IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+
+	ar5523_dbg(ar, "%s\n", __func__);
+	do {
+		spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+		if (!list_empty(&ar->tx_queue_pending)) {
+			data = (struct ar5523_tx_data *)
+				ar->tx_queue_pending.next;
+			list_del(&data->list);
+		} else
+			data = NULL;
+		spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+		if (!data)
+			break;
+
+		skb = data->skb;
+		txqid = 0;
+		txi = IEEE80211_SKB_CB(skb);
+		paylen = skb->len;
+		urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!urb) {
+			ar5523_err(ar, "Failed to allocate TX urb\n");
+			ieee80211_free_txskb(ar->hw, skb);
+			continue;
+		}
+
+		data->ar = ar;
+		data->urb = urb;
+
+		desc = (struct ar5523_tx_desc *)skb_push(skb, sizeof(*desc));
+		chunk = (struct ar5523_chunk *)skb_push(skb, sizeof(*chunk));
+
+		chunk->seqnum = 0;
+		chunk->flags = UATH_CFLAGS_FINAL;
+		chunk->length = cpu_to_be16(skb->len);
+
+		desc->msglen = cpu_to_be32(skb->len);
+		desc->msgid  = AR5523_DATA_ID;
+		desc->buflen = cpu_to_be32(paylen);
+		desc->type   = cpu_to_be32(WDCMSG_SEND);
+		desc->flags  = cpu_to_be32(UATH_TX_NOTIFY);
+
+		if (test_bit(AR5523_CONNECTED, &ar->flags))
+			desc->connid = cpu_to_be32(AR5523_ID_BSS);
+		else
+			desc->connid = cpu_to_be32(AR5523_ID_BROADCAST);
+
+		if (txi->flags & IEEE80211_TX_CTL_USE_MINRATE)
+			txqid |= UATH_TXQID_MINRATE;
+
+		desc->txqid = cpu_to_be32(txqid);
+
+		urb->transfer_flags = URB_ZERO_PACKET;
+		usb_fill_bulk_urb(urb, ar->dev, ar5523_data_tx_pipe(ar->dev),
+				  skb->data, skb->len, ar5523_data_tx_cb, skb);
+
+		spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+		list_add_tail(&data->list, &ar->tx_queue_submitted);
+		spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+		mod_timer(&ar->tx_wd_timer, jiffies + AR5523_TX_WD_TIMEOUT);
+		atomic_inc(&ar->tx_nr_pending);
+
+		ar5523_dbg(ar, "TX Frame (%d pending)\n",
+			   atomic_read(&ar->tx_nr_pending));
+		error = usb_submit_urb(urb, GFP_KERNEL);
+		if (error) {
+			ar5523_err(ar, "error %d when submitting tx urb\n",
+				   error);
+			spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+			list_del(&data->list);
+			spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+			atomic_dec(&ar->tx_nr_pending);
+			ar5523_data_tx_pkt_put(ar);
+			usb_free_urb(urb);
+			ieee80211_free_txskb(ar->hw, skb);
+		}
+	} while (true);
+}
+
+static void ar5523_tx_work(struct work_struct *work)
+{
+	struct ar5523 *ar = container_of(work, struct ar5523, tx_work);
+
+	ar5523_dbg(ar, "%s\n", __func__);
+	mutex_lock(&ar->mutex);
+	ar5523_tx_work_locked(ar);
+	mutex_unlock(&ar->mutex);
+}
+
+static void ar5523_tx_wd_timer(unsigned long arg)
+{
+	struct ar5523 *ar = (struct ar5523 *) arg;
+
+	ar5523_dbg(ar, "TX watchdog timer triggered\n");
+	ieee80211_queue_work(ar->hw, &ar->tx_wd_work);
+}
+
+static void ar5523_tx_wd_work(struct work_struct *work)
+{
+	struct ar5523 *ar = container_of(work, struct ar5523, tx_wd_work);
+
+	/* Occasionally the TX queues stop responding. The only way to
+	 * recover seems to be to reset the dongle.
+	 */
+
+	mutex_lock(&ar->mutex);
+	ar5523_err(ar, "TX queue stuck (tot %d pend %d)\n",
+		   atomic_read(&ar->tx_nr_total),
+		   atomic_read(&ar->tx_nr_pending));
+
+	ar5523_err(ar, "Will restart dongle.\n");
+	ar5523_cmd_write(ar, WDCMSG_TARGET_RESET, NULL, 0, 0);
+	mutex_unlock(&ar->mutex);
+}
+
+static void ar5523_flush_tx(struct ar5523 *ar)
+{
+	ar5523_tx_work_locked(ar);
+
+	/* Don't waste time trying to flush if USB is disconnected */
+	if (test_bit(AR5523_USB_DISCONNECTED, &ar->flags))
+		return;
+	if (!wait_event_timeout(ar->tx_flush_waitq,
+	    !atomic_read(&ar->tx_nr_pending), AR5523_FLUSH_TIMEOUT))
+		ar5523_err(ar, "flush timeout (tot %d pend %d)\n",
+			   atomic_read(&ar->tx_nr_total),
+			   atomic_read(&ar->tx_nr_pending));
+}
+
+static void ar5523_free_tx_cmd(struct ar5523 *ar)
+{
+	struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+
+	usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ, cmd->buf_tx,
+			  cmd->urb_tx->transfer_dma);
+	usb_free_urb(cmd->urb_tx);
+}
+
+static int ar5523_alloc_tx_cmd(struct ar5523 *ar)
+{
+	struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+
+	cmd->ar = ar;
+	init_completion(&cmd->done);
+
+	cmd->urb_tx = usb_alloc_urb(0, GFP_KERNEL);
+	if (!cmd->urb_tx) {
+		ar5523_err(ar, "could not allocate urb\n");
+		return -ENOMEM;
+	}
+	cmd->buf_tx = usb_alloc_coherent(ar->dev, AR5523_MAX_TXCMDSZ,
+					 GFP_KERNEL,
+					 &cmd->urb_tx->transfer_dma);
+	if (!cmd->buf_tx) {
+		usb_free_urb(cmd->urb_tx);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/*
+ * This function is called periodically (every second) when associated to
+ * query device statistics.
+ */
+static void ar5523_stat_work(struct work_struct *work)
+{
+	struct ar5523 *ar = container_of(work, struct ar5523, stat_work.work);
+	int error;
+
+	ar5523_dbg(ar, "%s\n", __func__);
+	mutex_lock(&ar->mutex);
+
+	/*
+	 * Send request for statistics asynchronously once a second. This
+	 * seems to be important. Throughput is a lot better if this is done.
+	 */
+	error = ar5523_cmd_write(ar, WDCMSG_TARGET_GET_STATS, NULL, 0, 0);
+	if (error)
+		ar5523_err(ar, "could not query stats, error %d\n", error);
+	mutex_unlock(&ar->mutex);
+	ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, HZ);
+}
+
+/*
+ * Interface routines to the mac80211 stack.
+ */
+static int ar5523_start(struct ieee80211_hw *hw)
+{
+	struct ar5523 *ar = hw->priv;
+	int error;
+	__be32 val;
+
+	ar5523_dbg(ar, "start called\n");
+
+	mutex_lock(&ar->mutex);
+	val = cpu_to_be32(0);
+	ar5523_cmd_write(ar, WDCMSG_BIND, &val, sizeof(val), 0);
+
+	/* set MAC address */
+	ar5523_config_multi(ar, CFG_MAC_ADDR, &ar->hw->wiphy->perm_addr,
+			    ETH_ALEN);
+
+	/* XXX honor net80211 state */
+	ar5523_config(ar, CFG_RATE_CONTROL_ENABLE, 0x00000001);
+	ar5523_config(ar, CFG_DIVERSITY_CTL, 0x00000001);
+	ar5523_config(ar, CFG_ABOLT, 0x0000003f);
+	ar5523_config(ar, CFG_WME_ENABLED, 0x00000000);
+
+	ar5523_config(ar, CFG_SERVICE_TYPE, 1);
+	ar5523_config(ar, CFG_TP_SCALE, 0x00000000);
+	ar5523_config(ar, CFG_TPC_HALF_DBM5, 0x0000003c);
+	ar5523_config(ar, CFG_TPC_HALF_DBM2, 0x0000003c);
+	ar5523_config(ar, CFG_OVERRD_TX_POWER, 0x00000000);
+	ar5523_config(ar, CFG_GMODE_PROTECTION, 0x00000000);
+	ar5523_config(ar, CFG_GMODE_PROTECT_RATE_INDEX, 0x00000003);
+	ar5523_config(ar, CFG_PROTECTION_TYPE, 0x00000000);
+	ar5523_config(ar, CFG_MODE_CTS, 0x00000002);
+
+	error = ar5523_cmd_read(ar, WDCMSG_TARGET_START, NULL, 0,
+	    &val, sizeof(val), AR5523_CMD_FLAG_MAGIC);
+	if (error) {
+		ar5523_dbg(ar, "could not start target, error %d\n", error);
+		goto err;
+	}
+	ar5523_dbg(ar, "WDCMSG_TARGET_START returns handle: 0x%x\n",
+		   be32_to_cpu(val));
+
+	ar5523_switch_chan(ar);
+
+	val = cpu_to_be32(TARGET_DEVICE_AWAKE);
+	ar5523_cmd_write(ar, WDCMSG_SET_PWR_MODE, &val, sizeof(val), 0);
+	/* XXX? check */
+	ar5523_cmd_write(ar, WDCMSG_RESET_KEY_CACHE, NULL, 0, 0);
+
+	set_bit(AR5523_HW_UP, &ar->flags);
+	queue_work(ar->wq, &ar->rx_refill_work);
+
+	/* enable Rx */
+	ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT);
+	ar5523_set_rxfilter(ar,
+			    UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
+			    UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON,
+			    UATH_FILTER_OP_SET);
+
+	ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_ON);
+	ar5523_dbg(ar, "start OK\n");
+
+err:
+	mutex_unlock(&ar->mutex);
+	return error;
+}
+
+static void ar5523_stop(struct ieee80211_hw *hw)
+{
+	struct ar5523 *ar = hw->priv;
+
+	ar5523_dbg(ar, "stop called\n");
+
+	cancel_delayed_work_sync(&ar->stat_work);
+	mutex_lock(&ar->mutex);
+	clear_bit(AR5523_HW_UP, &ar->flags);
+
+	ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
+	ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_OFF);
+
+	ar5523_cmd_write(ar, WDCMSG_TARGET_STOP, NULL, 0, 0);
+
+	del_timer_sync(&ar->tx_wd_timer);
+	cancel_work_sync(&ar->tx_wd_work);
+	cancel_work_sync(&ar->rx_refill_work);
+	ar5523_cancel_rx_bufs(ar);
+	mutex_unlock(&ar->mutex);
+}
+
+static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+	struct ar5523 *ar = hw->priv;
+	int ret;
+
+	ar5523_dbg(ar, "set_rts_threshold called\n");
+	mutex_lock(&ar->mutex);
+
+	ret = ar5523_config(ar, CFG_USER_RTS_THRESHOLD, value);
+
+	mutex_unlock(&ar->mutex);
+	return ret;
+}
+
+static void ar5523_flush(struct ieee80211_hw *hw, bool drop)
+{
+	struct ar5523 *ar = hw->priv;
+
+	ar5523_dbg(ar, "flush called\n");
+	ar5523_flush_tx(ar);
+}
+
+static int ar5523_add_interface(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif)
+{
+	struct ar5523 *ar = hw->priv;
+
+	ar5523_dbg(ar, "add interface called\n");
+
+	if (ar->vif) {
+		ar5523_dbg(ar, "invalid add_interface\n");
+		return -EOPNOTSUPP;
+	}
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		ar->vif = vif;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+static void ar5523_remove_interface(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif)
+{
+	struct ar5523 *ar = hw->priv;
+
+	ar5523_dbg(ar, "remove interface called\n");
+	ar->vif = NULL;
+}
+
+static int ar5523_hwconfig(struct ieee80211_hw *hw, u32 changed)
+{
+	struct ar5523 *ar = hw->priv;
+
+	ar5523_dbg(ar, "config called\n");
+	mutex_lock(&ar->mutex);
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		ar5523_dbg(ar, "Do channel switch\n");
+		ar5523_flush_tx(ar);
+		ar5523_switch_chan(ar);
+	}
+	mutex_unlock(&ar->mutex);
+	return 0;
+}
+
+static int ar5523_get_wlan_mode(struct ar5523 *ar,
+				struct ieee80211_bss_conf *bss_conf)
+{
+	struct ieee80211_supported_band *band;
+	int bit;
+	struct ieee80211_sta *sta;
+	u32 sta_rate_set;
+
+	band = ar->hw->wiphy->bands[ar->hw->conf.channel->band];
+	sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
+	if (!sta) {
+		ar5523_info(ar, "STA not found!\n");
+		return WLAN_MODE_11b;
+	}
+	sta_rate_set = sta->supp_rates[ar->hw->conf.channel->band];
+
+	for (bit = 0; bit < band->n_bitrates; bit++) {
+		if (sta_rate_set & 1) {
+			int rate = band->bitrates[bit].bitrate;
+			switch (rate) {
+			case 60:
+			case 90:
+			case 120:
+			case 180:
+			case 240:
+			case 360:
+			case 480:
+			case 540:
+				return WLAN_MODE_11g;
+			}
+		}
+		sta_rate_set >>= 1;
+	}
+	return WLAN_MODE_11b;
+}
+
+static void ar5523_create_rateset(struct ar5523 *ar,
+				  struct ieee80211_bss_conf *bss_conf,
+				  struct ar5523_cmd_rateset *rs,
+				  bool basic)
+{
+	struct ieee80211_supported_band *band;
+	struct ieee80211_sta *sta;
+	int bit, i = 0;
+	u32 sta_rate_set, basic_rate_set;
+
+	sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
+	basic_rate_set = bss_conf->basic_rates;
+	if (!sta) {
+		ar5523_info(ar, "STA not found. Cannot set rates\n");
+		sta_rate_set = bss_conf->basic_rates;
+	} else
+		sta_rate_set = sta->supp_rates[ar->hw->conf.channel->band];
+
+	ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set);
+
+	band = ar->hw->wiphy->bands[ar->hw->conf.channel->band];
+	for (bit = 0; bit < band->n_bitrates; bit++) {
+		BUG_ON(i >= AR5523_MAX_NRATES);
+		ar5523_dbg(ar, "Considering rate %d : %d\n",
+			   band->bitrates[bit].hw_value, sta_rate_set & 1);
+		if (sta_rate_set & 1) {
+			rs->set[i] = band->bitrates[bit].hw_value;
+			if (basic_rate_set & 1 && basic)
+				rs->set[i] |= 0x80;
+			i++;
+		}
+		sta_rate_set >>= 1;
+		basic_rate_set >>= 1;
+	}
+
+	rs->length = i;
+}
+
+static int ar5523_set_basic_rates(struct ar5523 *ar,
+				  struct ieee80211_bss_conf *bss)
+{
+	struct ar5523_cmd_rates rates;
+
+	memset(&rates, 0, sizeof(rates));
+	rates.connid = cpu_to_be32(2);		/* XXX */
+	rates.size   = cpu_to_be32(sizeof(struct ar5523_cmd_rateset));
+	ar5523_create_rateset(ar, bss, &rates.rateset, true);
+
+	return ar5523_cmd_write(ar, WDCMSG_SET_BASIC_RATE, &rates,
+				sizeof(rates), 0);
+}
+
+static int ar5523_create_connection(struct ar5523 *ar,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_bss_conf *bss)
+{
+	struct ar5523_cmd_create_connection create;
+	int wlan_mode;
+
+	memset(&create, 0, sizeof(create));
+	create.connid = cpu_to_be32(2);
+	create.bssid = cpu_to_be32(0);
+	/* XXX packed or not?  */
+	create.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset));
+
+	ar5523_create_rateset(ar, bss, &create.connattr.rateset, false);
+
+	wlan_mode = ar5523_get_wlan_mode(ar, bss);
+	create.connattr.wlanmode = cpu_to_be32(wlan_mode);
+
+	return ar5523_cmd_write(ar, WDCMSG_CREATE_CONNECTION, &create,
+				sizeof(create), 0);
+}
+
+static int ar5523_write_associd(struct ar5523 *ar,
+				struct ieee80211_bss_conf *bss)
+{
+	struct ar5523_cmd_set_associd associd;
+
+	memset(&associd, 0, sizeof(associd));
+	associd.defaultrateix = cpu_to_be32(0);	/* XXX */
+	associd.associd = cpu_to_be32(bss->aid);
+	associd.timoffset = cpu_to_be32(0x3b);	/* XXX */
+	memcpy(associd.bssid, bss->bssid, ETH_ALEN);
+	return ar5523_cmd_write(ar, WDCMSG_WRITE_ASSOCID, &associd,
+				sizeof(associd), 0);
+}
+
+static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_bss_conf *bss,
+				    u32 changed)
+{
+	struct ar5523 *ar = hw->priv;
+	int error;
+
+	ar5523_dbg(ar, "bss_info_changed called\n");
+	mutex_lock(&ar->mutex);
+
+	if (!(changed & BSS_CHANGED_ASSOC))
+		goto out_unlock;
+
+	if (bss->assoc) {
+		error = ar5523_create_connection(ar, vif, bss);
+		if (error) {
+			ar5523_err(ar, "could not create connection\n");
+			goto out_unlock;
+		}
+
+		error = ar5523_set_basic_rates(ar, bss);
+		if (error) {
+			ar5523_err(ar, "could not set negotiated rate set\n");
+			goto out_unlock;
+		}
+
+		error = ar5523_write_associd(ar, bss);
+		if (error) {
+			ar5523_err(ar, "could not set association\n");
+			goto out_unlock;
+		}
+
+		/* turn link LED on */
+		ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_ON);
+		set_bit(AR5523_CONNECTED, &ar->flags);
+		ieee80211_queue_delayed_work(hw, &ar->stat_work, HZ);
+
+	} else {
+		cancel_delayed_work(&ar->stat_work);
+		clear_bit(AR5523_CONNECTED, &ar->flags);
+		ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
+	}
+
+out_unlock:
+	mutex_unlock(&ar->mutex);
+
+}
+
+#define AR5523_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
+				  FIF_ALLMULTI | \
+				  FIF_FCSFAIL | \
+				  FIF_OTHER_BSS)
+
+static void ar5523_configure_filter(struct ieee80211_hw *hw,
+				    unsigned int changed_flags,
+				    unsigned int *total_flags,
+				    u64 multicast)
+{
+	struct ar5523 *ar = hw->priv;
+	u32 filter = 0;
+
+	ar5523_dbg(ar, "configure_filter called\n");
+	mutex_lock(&ar->mutex);
+	ar5523_flush_tx(ar);
+
+	*total_flags &= AR5523_SUPPORTED_FILTERS;
+
+	/* The filters seems strange. UATH_FILTER_RX_BCAST and
+	 * UATH_FILTER_RX_MCAST does not result in those frames being RXed.
+	 * The only way I have found to get [mb]cast frames seems to be
+	 * to set UATH_FILTER_RX_PROM. */
+	filter |= UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
+		  UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON |
+		  UATH_FILTER_RX_PROM;
+
+	ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT);
+	ar5523_set_rxfilter(ar, filter, UATH_FILTER_OP_SET);
+
+	mutex_unlock(&ar->mutex);
+}
+
+static const struct ieee80211_ops ar5523_ops = {
+	.start			= ar5523_start,
+	.stop			= ar5523_stop,
+	.tx			= ar5523_tx,
+	.set_rts_threshold	= ar5523_set_rts_threshold,
+	.add_interface		= ar5523_add_interface,
+	.remove_interface	= ar5523_remove_interface,
+	.config			= ar5523_hwconfig,
+	.bss_info_changed	= ar5523_bss_info_changed,
+	.configure_filter	= ar5523_configure_filter,
+	.flush			= ar5523_flush,
+};
+
+static int ar5523_host_available(struct ar5523 *ar)
+{
+	struct ar5523_cmd_host_available setup;
+
+	/* inform target the host is available */
+	setup.sw_ver_major = cpu_to_be32(ATH_SW_VER_MAJOR);
+	setup.sw_ver_minor = cpu_to_be32(ATH_SW_VER_MINOR);
+	setup.sw_ver_patch = cpu_to_be32(ATH_SW_VER_PATCH);
+	setup.sw_ver_build = cpu_to_be32(ATH_SW_VER_BUILD);
+	return ar5523_cmd_read(ar, WDCMSG_HOST_AVAILABLE,
+			       &setup, sizeof(setup), NULL, 0, 0);
+}
+
+static int ar5523_get_devstatus(struct ar5523 *ar)
+{
+	u8 macaddr[ETH_ALEN];
+	int error;
+
+	/* retrieve MAC address */
+	error = ar5523_get_status(ar, ST_MAC_ADDR, macaddr, ETH_ALEN);
+	if (error) {
+		ar5523_err(ar, "could not read MAC address\n");
+		return error;
+	}
+
+	SET_IEEE80211_PERM_ADDR(ar->hw, macaddr);
+
+	error = ar5523_get_status(ar, ST_SERIAL_NUMBER,
+	    &ar->serial[0], sizeof(ar->serial));
+	if (error) {
+		ar5523_err(ar, "could not read device serial number\n");
+		return error;
+	}
+	return 0;
+}
+
+#define AR5523_SANE_RXBUFSZ 2000
+
+static int ar5523_get_max_rxsz(struct ar5523 *ar)
+{
+	int error;
+	__be32 rxsize;
+
+	/* Get max rx size */
+	error = ar5523_get_status(ar, ST_WDC_TRANSPORT_CHUNK_SIZE, &rxsize,
+				  sizeof(rxsize));
+	if (error != 0) {
+		ar5523_err(ar, "could not read max RX size\n");
+		return error;
+	}
+
+	ar->rxbufsz = be32_to_cpu(rxsize);
+
+	if (!ar->rxbufsz || ar->rxbufsz > AR5523_SANE_RXBUFSZ) {
+		ar5523_err(ar, "Bad rxbufsz from device. Using %d instead\n",
+			   AR5523_SANE_RXBUFSZ);
+		ar->rxbufsz = AR5523_SANE_RXBUFSZ;
+	}
+
+	ar5523_dbg(ar, "Max RX buf size: %d\n", ar->rxbufsz);
+	return 0;
+}
+
+/*
+ * This is copied from rtl818x, but we should probably move this
+ * to common code as in OpenBSD.
+ */
+static const struct ieee80211_rate ar5523_rates[] = {
+	{ .bitrate = 10, .hw_value = 2, },
+	{ .bitrate = 20, .hw_value = 4 },
+	{ .bitrate = 55, .hw_value = 11, },
+	{ .bitrate = 110, .hw_value = 22, },
+	{ .bitrate = 60, .hw_value = 12, },
+	{ .bitrate = 90, .hw_value = 18, },
+	{ .bitrate = 120, .hw_value = 24, },
+	{ .bitrate = 180, .hw_value = 36, },
+	{ .bitrate = 240, .hw_value = 48, },
+	{ .bitrate = 360, .hw_value = 72, },
+	{ .bitrate = 480, .hw_value = 96, },
+	{ .bitrate = 540, .hw_value = 108, },
+};
+
+static const struct ieee80211_channel ar5523_channels[] = {
+	{ .center_freq = 2412 },
+	{ .center_freq = 2417 },
+	{ .center_freq = 2422 },
+	{ .center_freq = 2427 },
+	{ .center_freq = 2432 },
+	{ .center_freq = 2437 },
+	{ .center_freq = 2442 },
+	{ .center_freq = 2447 },
+	{ .center_freq = 2452 },
+	{ .center_freq = 2457 },
+	{ .center_freq = 2462 },
+	{ .center_freq = 2467 },
+	{ .center_freq = 2472 },
+	{ .center_freq = 2484 },
+};
+
+static int ar5523_init_modes(struct ar5523 *ar)
+{
+	BUILD_BUG_ON(sizeof(ar->channels) != sizeof(ar5523_channels));
+	BUILD_BUG_ON(sizeof(ar->rates) != sizeof(ar5523_rates));
+
+	memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels));
+	memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates));
+
+	ar->band.band = IEEE80211_BAND_2GHZ;
+	ar->band.channels = ar->channels;
+	ar->band.n_channels = ARRAY_SIZE(ar5523_channels);
+	ar->band.bitrates = ar->rates;
+	ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates);
+	ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band;
+	return 0;
+}
+
+/*
+ * Load the MIPS R4000 microcode into the device.  Once the image is loaded,
+ * the device will detach itself from the bus and reattach later with a new
+ * product Id (a la ezusb).
+ */
+static int ar5523_load_firmware(struct usb_device *dev)
+{
+	struct ar5523_fwblock *txblock, *rxblock;
+	const struct firmware *fw;
+	void *fwbuf;
+	int len, offset;
+	int foolen; /* XXX(hch): handle short transfers */
+	int error = -ENXIO;
+
+	if (request_firmware(&fw, AR5523_FIRMWARE_FILE, &dev->dev)) {
+		dev_err(&dev->dev, "no firmware found: %s\n",
+			AR5523_FIRMWARE_FILE);
+		return -ENOENT;
+	}
+
+	txblock = kmalloc(sizeof(*txblock), GFP_KERNEL);
+	if (!txblock)
+		goto out;
+
+	rxblock = kmalloc(sizeof(*rxblock), GFP_KERNEL);
+	if (!rxblock)
+		goto out_free_txblock;
+
+	fwbuf = kmalloc(AR5523_MAX_FWBLOCK_SIZE, GFP_KERNEL);
+	if (!fwbuf)
+		goto out_free_rxblock;
+
+	memset(txblock, 0, sizeof(struct ar5523_fwblock));
+	txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK);
+	txblock->total = cpu_to_be32(fw->size);
+
+	offset = 0;
+	len = fw->size;
+	while (len > 0) {
+		int mlen = min(len, AR5523_MAX_FWBLOCK_SIZE);
+
+		txblock->remain = cpu_to_be32(len - mlen);
+		txblock->len = cpu_to_be32(mlen);
+
+		/* send firmware block meta-data */
+		error = usb_bulk_msg(dev, ar5523_cmd_tx_pipe(dev),
+				     txblock, sizeof(*txblock), &foolen,
+				     AR5523_CMD_TIMEOUT);
+		if (error) {
+			dev_err(&dev->dev,
+				"could not send firmware block info\n");
+			goto out_free_fwbuf;
+		}
+
+		/* send firmware block data */
+		memcpy(fwbuf, fw->data + offset, mlen);
+		error = usb_bulk_msg(dev, ar5523_data_tx_pipe(dev),
+				     fwbuf, mlen, &foolen,
+				     AR5523_DATA_TIMEOUT);
+		if (error) {
+			dev_err(&dev->dev,
+				"could not send firmware block data\n");
+			goto out_free_fwbuf;
+		}
+
+		/* wait for ack from firmware */
+		error = usb_bulk_msg(dev, ar5523_cmd_rx_pipe(dev),
+				     rxblock, sizeof(*rxblock), &foolen,
+				     AR5523_CMD_TIMEOUT);
+		if (error) {
+			dev_err(&dev->dev,
+				"could not read firmware answer\n");
+			goto out_free_fwbuf;
+		}
+
+		len -= mlen;
+		offset += mlen;
+	}
+
+	/*
+	 * Set the error to -ENXIO to make sure we continue probing for
+	 * a driver.
+	 */
+	error = -ENXIO;
+
+ out_free_fwbuf:
+	kfree(fwbuf);
+ out_free_rxblock:
+	kfree(rxblock);
+ out_free_txblock:
+	kfree(txblock);
+ out:
+	release_firmware(fw);
+	return error;
+}
+
+static int ar5523_probe(struct usb_interface *intf,
+			const struct usb_device_id *id)
+{
+	struct usb_device *dev = interface_to_usbdev(intf);
+	struct ieee80211_hw *hw;
+	struct ar5523 *ar;
+	int error = -ENOMEM;
+
+	/*
+	 * Load firmware if the device requires it.  This will return
+	 * -ENXIO on success and we'll get called back afer the usb
+	 * id changes to indicate that the firmware is present.
+	 */
+	if (id->driver_info & AR5523_FLAG_PRE_FIRMWARE)
+		return ar5523_load_firmware(dev);
+
+
+	hw = ieee80211_alloc_hw(sizeof(*ar), &ar5523_ops);
+	if (!hw)
+		goto out;
+	SET_IEEE80211_DEV(hw, &intf->dev);
+
+	ar = hw->priv;
+	ar->hw = hw;
+	ar->dev = dev;
+	mutex_init(&ar->mutex);
+
+	INIT_DELAYED_WORK(&ar->stat_work, ar5523_stat_work);
+	init_timer(&ar->tx_wd_timer);
+	setup_timer(&ar->tx_wd_timer, ar5523_tx_wd_timer, (unsigned long) ar);
+	INIT_WORK(&ar->tx_wd_work, ar5523_tx_wd_work);
+	INIT_WORK(&ar->tx_work, ar5523_tx_work);
+	INIT_LIST_HEAD(&ar->tx_queue_pending);
+	INIT_LIST_HEAD(&ar->tx_queue_submitted);
+	spin_lock_init(&ar->tx_data_list_lock);
+	atomic_set(&ar->tx_nr_total, 0);
+	atomic_set(&ar->tx_nr_pending, 0);
+	init_waitqueue_head(&ar->tx_flush_waitq);
+
+	atomic_set(&ar->rx_data_free_cnt, 0);
+	INIT_WORK(&ar->rx_refill_work, ar5523_rx_refill_work);
+	INIT_LIST_HEAD(&ar->rx_data_free);
+	INIT_LIST_HEAD(&ar->rx_data_used);
+	spin_lock_init(&ar->rx_data_list_lock);
+
+	ar->wq = create_singlethread_workqueue("ar5523");
+	if (!ar->wq) {
+		ar5523_err(ar, "Could not create wq\n");
+		goto out_free_ar;
+	}
+
+	error = ar5523_alloc_rx_bufs(ar);
+	if (error) {
+		ar5523_err(ar, "Could not allocate rx buffers\n");
+		goto out_free_wq;
+	}
+
+	error = ar5523_alloc_rx_cmd(ar);
+	if (error) {
+		ar5523_err(ar, "Could not allocate rx command buffers\n");
+		goto out_free_rx_bufs;
+	}
+
+	error = ar5523_alloc_tx_cmd(ar);
+	if (error) {
+		ar5523_err(ar, "Could not allocate tx command buffers\n");
+		goto out_free_rx_cmd;
+	}
+
+	error = ar5523_submit_rx_cmd(ar);
+	if (error) {
+		ar5523_err(ar, "Failed to submit rx cmd\n");
+		goto out_free_tx_cmd;
+	}
+
+	/*
+	 * We're now ready to send/receive firmware commands.
+	 */
+	error = ar5523_host_available(ar);
+	if (error) {
+		ar5523_err(ar, "could not initialize adapter\n");
+		goto out_cancel_rx_cmd;
+	}
+
+	error = ar5523_get_max_rxsz(ar);
+	if (error) {
+		ar5523_err(ar, "could not get caps from adapter\n");
+		goto out_cancel_rx_cmd;
+	}
+
+	error = ar5523_get_devcap(ar);
+	if (error) {
+		ar5523_err(ar, "could not get caps from adapter\n");
+		goto out_cancel_rx_cmd;
+	}
+
+	error = ar5523_get_devstatus(ar);
+	if (error != 0) {
+		ar5523_err(ar, "could not get device status\n");
+		goto out_cancel_rx_cmd;
+	}
+
+	ar5523_info(ar, "MAC/BBP AR5523, RF AR%c112\n",
+			(id->driver_info & AR5523_FLAG_ABG) ? '5' : '2');
+
+	ar->vif = NULL;
+	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+		    IEEE80211_HW_SIGNAL_DBM |
+		    IEEE80211_HW_HAS_RATE_CONTROL;
+	hw->extra_tx_headroom = sizeof(struct ar5523_tx_desc) +
+				sizeof(struct ar5523_chunk);
+	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+	hw->queues = 1;
+
+	error = ar5523_init_modes(ar);
+	if (error)
+		goto out_cancel_rx_cmd;
+
+	usb_set_intfdata(intf, hw);
+
+	error = ieee80211_register_hw(hw);
+	if (error) {
+		ar5523_err(ar, "could not register device\n");
+		goto out_cancel_rx_cmd;
+	}
+
+	ar5523_info(ar, "Found and initialized AR5523 device\n");
+	return 0;
+
+out_cancel_rx_cmd:
+	ar5523_cancel_rx_cmd(ar);
+out_free_tx_cmd:
+	ar5523_free_tx_cmd(ar);
+out_free_rx_cmd:
+	ar5523_free_rx_cmd(ar);
+out_free_rx_bufs:
+	ar5523_free_rx_bufs(ar);
+out_free_wq:
+	destroy_workqueue(ar->wq);
+out_free_ar:
+	ieee80211_free_hw(hw);
+out:
+	return error;
+}
+
+static void ar5523_disconnect(struct usb_interface *intf)
+{
+	struct ieee80211_hw *hw = usb_get_intfdata(intf);
+	struct ar5523 *ar = hw->priv;
+
+	ar5523_dbg(ar, "detaching\n");
+	set_bit(AR5523_USB_DISCONNECTED, &ar->flags);
+
+	ieee80211_unregister_hw(hw);
+
+	ar5523_cancel_rx_cmd(ar);
+	ar5523_free_tx_cmd(ar);
+	ar5523_free_rx_cmd(ar);
+	ar5523_free_rx_bufs(ar);
+
+	destroy_workqueue(ar->wq);
+
+	ieee80211_free_hw(hw);
+	usb_set_intfdata(intf, NULL);
+}
+
+#define AR5523_DEVICE_UG(vendor, device) \
+	{ USB_DEVICE((vendor), (device)) }, \
+	{ USB_DEVICE((vendor), (device) + 1), \
+		.driver_info = AR5523_FLAG_PRE_FIRMWARE }
+#define AR5523_DEVICE_UX(vendor, device) \
+	{ USB_DEVICE((vendor), (device)), \
+		.driver_info = AR5523_FLAG_ABG }, \
+	{ USB_DEVICE((vendor), (device) + 1), \
+		.driver_info = AR5523_FLAG_ABG|AR5523_FLAG_PRE_FIRMWARE }
+
+static struct usb_device_id ar5523_id_table[] = {
+	AR5523_DEVICE_UG(0x168c, 0x0001),	/* Atheros / AR5523 */
+	AR5523_DEVICE_UG(0x0cf3, 0x0001),	/* Atheros2 / AR5523_1 */
+	AR5523_DEVICE_UG(0x0cf3, 0x0003),	/* Atheros2 / AR5523_2 */
+	AR5523_DEVICE_UX(0x0cf3, 0x0005),	/* Atheros2 / AR5523_3 */
+	AR5523_DEVICE_UG(0x0d8e, 0x7801),	/* Conceptronic / AR5523_1 */
+	AR5523_DEVICE_UX(0x0d8e, 0x7811),	/* Conceptronic / AR5523_2 */
+	AR5523_DEVICE_UX(0x2001, 0x3a00),	/* Dlink / DWLAG132 */
+	AR5523_DEVICE_UG(0x2001, 0x3a02),	/* Dlink / DWLG132 */
+	AR5523_DEVICE_UX(0x2001, 0x3a04),	/* Dlink / DWLAG122 */
+	AR5523_DEVICE_UG(0x1690, 0x0712),	/* Gigaset / AR5523 */
+	AR5523_DEVICE_UG(0x1690, 0x0710),	/* Gigaset / SMCWUSBTG */
+	AR5523_DEVICE_UG(0x129b, 0x160c),	/* Gigaset / USB stick 108
+						   (CyberTAN Technology) */
+	AR5523_DEVICE_UG(0x16ab, 0x7801),	/* Globalsun / AR5523_1 */
+	AR5523_DEVICE_UX(0x16ab, 0x7811),	/* Globalsun / AR5523_2 */
+	AR5523_DEVICE_UG(0x0d8e, 0x7802),	/* Globalsun / AR5523_3 */
+	AR5523_DEVICE_UX(0x0846, 0x4300),	/* Netgear / WG111U */
+	AR5523_DEVICE_UG(0x0846, 0x4250),	/* Netgear / WG111T */
+	AR5523_DEVICE_UG(0x0846, 0x5f00),	/* Netgear / WPN111 */
+	AR5523_DEVICE_UG(0x157e, 0x3006),	/* Umedia / AR5523_1 */
+	AR5523_DEVICE_UX(0x157e, 0x3205),	/* Umedia / AR5523_2 */
+	AR5523_DEVICE_UG(0x157e, 0x3006),	/* Umedia / TEW444UBEU */
+	AR5523_DEVICE_UG(0x1435, 0x0826),	/* Wistronneweb / AR5523_1 */
+	AR5523_DEVICE_UX(0x1435, 0x0828),	/* Wistronneweb / AR5523_2 */
+	AR5523_DEVICE_UG(0x0cde, 0x0012),	/* Zcom / AR5523 */
+	AR5523_DEVICE_UG(0x1385, 0x4250),	/* Netgear3 / WG111T (2) */
+	AR5523_DEVICE_UG(0x1385, 0x5f00),	/* Netgear / WPN111 */
+	AR5523_DEVICE_UG(0x1385, 0x5f02),	/* Netgear / WPN111 */
+	{ }
+};
+MODULE_DEVICE_TABLE(usb, ar5523_id_table);
+
+static struct usb_driver ar5523_driver = {
+	.name		= "ar5523",
+	.id_table	= ar5523_id_table,
+	.probe		= ar5523_probe,
+	.disconnect	= ar5523_disconnect,
+};
+
+module_usb_driver(ar5523_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(AR5523_FIRMWARE_FILE);
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.h b/drivers/net/wireless/ath/ar5523/ar5523.h
new file mode 100644
index 0000000..00c6fd3
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define AR5523_FLAG_PRE_FIRMWARE	(1 << 0)
+#define AR5523_FLAG_ABG			(1 << 1)
+
+#define AR5523_FIRMWARE_FILE	"ar5523.bin"
+
+#define AR5523_CMD_TX_PIPE	0x01
+#define	AR5523_DATA_TX_PIPE	0x02
+#define	AR5523_CMD_RX_PIPE	0x81
+#define	AR5523_DATA_RX_PIPE	0x82
+
+#define ar5523_cmd_tx_pipe(dev) \
+	usb_sndbulkpipe((dev), AR5523_CMD_TX_PIPE)
+#define ar5523_data_tx_pipe(dev) \
+	usb_sndbulkpipe((dev), AR5523_DATA_TX_PIPE)
+#define ar5523_cmd_rx_pipe(dev) \
+	usb_rcvbulkpipe((dev), AR5523_CMD_RX_PIPE)
+#define ar5523_data_rx_pipe(dev) \
+	usb_rcvbulkpipe((dev), AR5523_DATA_RX_PIPE)
+
+#define	AR5523_DATA_TIMEOUT	10000
+#define	AR5523_CMD_TIMEOUT	1000
+
+#define AR5523_TX_DATA_COUNT		8
+#define AR5523_TX_DATA_RESTART_COUNT	2
+#define AR5523_RX_DATA_COUNT		16
+#define AR5523_RX_DATA_REFILL_COUNT	8
+
+#define AR5523_CMD_ID	1
+#define AR5523_DATA_ID	2
+
+#define AR5523_TX_WD_TIMEOUT	(HZ * 2)
+#define AR5523_FLUSH_TIMEOUT	(HZ * 3)
+
+enum AR5523_flags {
+	AR5523_HW_UP,
+	AR5523_USB_DISCONNECTED,
+	AR5523_CONNECTED
+};
+
+struct ar5523_tx_cmd {
+	struct ar5523		*ar;
+	struct urb		*urb_tx;
+	void			*buf_tx;
+	void			*odata;
+	int			olen;
+	int			flags;
+	int			res;
+	struct completion	done;
+};
+
+/* This struct is placed in tx_info->driver_data. It must not be larger
+ *  than IEEE80211_TX_INFO_DRIVER_DATA_SIZE.
+ */
+struct ar5523_tx_data {
+	struct list_head	list;
+	struct ar5523		*ar;
+	struct sk_buff		*skb;
+	struct urb		*urb;
+};
+
+struct ar5523_rx_data {
+	struct	list_head	list;
+	struct ar5523		*ar;
+	struct urb		*urb;
+	struct sk_buff		*skb;
+};
+
+struct ar5523 {
+	struct usb_device	*dev;
+	struct ieee80211_hw	*hw;
+
+	unsigned long		flags;
+	struct mutex		mutex;
+	struct workqueue_struct *wq;
+
+	struct ar5523_tx_cmd	tx_cmd;
+
+	struct delayed_work	stat_work;
+
+	struct timer_list	tx_wd_timer;
+	struct work_struct	tx_wd_work;
+	struct work_struct	tx_work;
+	struct list_head	tx_queue_pending;
+	struct list_head	tx_queue_submitted;
+	spinlock_t		tx_data_list_lock;
+	wait_queue_head_t	tx_flush_waitq;
+
+	/* Queued + Submitted TX frames */
+	atomic_t		tx_nr_total;
+
+	/* Submitted TX frames */
+	atomic_t		tx_nr_pending;
+
+	void			*rx_cmd_buf;
+	struct urb		*rx_cmd_urb;
+
+	struct ar5523_rx_data	rx_data[AR5523_RX_DATA_COUNT];
+	spinlock_t		rx_data_list_lock;
+	struct list_head	rx_data_free;
+	struct list_head	rx_data_used;
+	atomic_t		rx_data_free_cnt;
+
+	struct work_struct	rx_refill_work;
+
+	unsigned int		rxbufsz;
+	u8			serial[16];
+
+	struct ieee80211_channel channels[14];
+	struct ieee80211_rate	rates[12];
+	struct ieee80211_supported_band band;
+	struct ieee80211_vif	*vif;
+};
+
+/* flags for sending firmware commands */
+#define AR5523_CMD_FLAG_READ	(1 << 1)
+#define AR5523_CMD_FLAG_MAGIC	(1 << 2)
+
+#define ar5523_dbg(ar, format, arg...) \
+	dev_dbg(&(ar)->dev->dev, format, ## arg)
+
+/* On USB hot-unplug there can be a lot of URBs in flight and they'll all
+ * fail. Instead of dealing with them in every possible place just surpress
+ * any messages on USB disconnect.
+ */
+#define ar5523_err(ar, format, arg...) \
+do { \
+	if (!test_bit(AR5523_USB_DISCONNECTED, &ar->flags)) { \
+		dev_err(&(ar)->dev->dev, format, ## arg); \
+	} \
+} while (0)
+#define ar5523_info(ar, format, arg...)	\
+	dev_info(&(ar)->dev->dev, format, ## arg)
diff --git a/drivers/net/wireless/ath/ar5523/ar5523_hw.h b/drivers/net/wireless/ath/ar5523/ar5523_hw.h
new file mode 100644
index 0000000..0fe2c80
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523_hw.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* all fields are big endian */
+struct ar5523_fwblock {
+	__be32		flags;
+#define AR5523_WRITE_BLOCK	(1 << 4)
+
+	__be32	len;
+#define AR5523_MAX_FWBLOCK_SIZE	2048
+
+	__be32		total;
+	__be32		remain;
+	__be32		rxtotal;
+	__be32		pad[123];
+} __packed;
+
+#define AR5523_MAX_RXCMDSZ	1024
+#define AR5523_MAX_TXCMDSZ	1024
+
+struct ar5523_cmd_hdr {
+	__be32		len;
+	__be32		code;
+/* NB: these are defined for rev 1.5 firmware; rev 1.6 is different */
+/* messages from Host -> Target */
+#define	WDCMSG_HOST_AVAILABLE		0x01
+#define WDCMSG_BIND			0x02
+#define WDCMSG_TARGET_RESET		0x03
+#define WDCMSG_TARGET_GET_CAPABILITY	0x04
+#define WDCMSG_TARGET_SET_CONFIG	0x05
+#define WDCMSG_TARGET_GET_STATUS	0x06
+#define WDCMSG_TARGET_GET_STATS		0x07
+#define WDCMSG_TARGET_START		0x08
+#define WDCMSG_TARGET_STOP		0x09
+#define WDCMSG_TARGET_ENABLE		0x0a
+#define WDCMSG_TARGET_DISABLE		0x0b
+#define	WDCMSG_CREATE_CONNECTION	0x0c
+#define WDCMSG_UPDATE_CONNECT_ATTR	0x0d
+#define	WDCMSG_DELETE_CONNECT		0x0e
+#define	WDCMSG_SEND			0x0f
+#define WDCMSG_FLUSH			0x10
+/* messages from Target -> Host */
+#define	WDCMSG_STATS_UPDATE		0x11
+#define	WDCMSG_BMISS			0x12
+#define	WDCMSG_DEVICE_AVAIL		0x13
+#define	WDCMSG_SEND_COMPLETE		0x14
+#define	WDCMSG_DATA_AVAIL		0x15
+#define	WDCMSG_SET_PWR_MODE		0x16
+#define	WDCMSG_BMISS_ACK		0x17
+#define	WDCMSG_SET_LED_STEADY		0x18
+#define	WDCMSG_SET_LED_BLINK		0x19
+/* more messages */
+#define	WDCMSG_SETUP_BEACON_DESC	0x1a
+#define	WDCMSG_BEACON_INIT		0x1b
+#define	WDCMSG_RESET_KEY_CACHE		0x1c
+#define	WDCMSG_RESET_KEY_CACHE_ENTRY	0x1d
+#define	WDCMSG_SET_KEY_CACHE_ENTRY	0x1e
+#define	WDCMSG_SET_DECOMP_MASK		0x1f
+#define	WDCMSG_SET_REGULATORY_DOMAIN	0x20
+#define	WDCMSG_SET_LED_STATE		0x21
+#define	WDCMSG_WRITE_ASSOCID		0x22
+#define	WDCMSG_SET_STA_BEACON_TIMERS	0x23
+#define	WDCMSG_GET_TSF			0x24
+#define	WDCMSG_RESET_TSF		0x25
+#define	WDCMSG_SET_ADHOC_MODE		0x26
+#define	WDCMSG_SET_BASIC_RATE		0x27
+#define	WDCMSG_MIB_CONTROL		0x28
+#define	WDCMSG_GET_CHANNEL_DATA		0x29
+#define	WDCMSG_GET_CUR_RSSI		0x2a
+#define	WDCMSG_SET_ANTENNA_SWITCH	0x2b
+#define	WDCMSG_USE_SHORT_SLOT_TIME	0x2f
+#define	WDCMSG_SET_POWER_MODE		0x30
+#define	WDCMSG_SETUP_PSPOLL_DESC	0x31
+#define	WDCMSG_SET_RX_MULTICAST_FILTER	0x32
+#define	WDCMSG_RX_FILTER		0x33
+#define	WDCMSG_PER_CALIBRATION		0x34
+#define	WDCMSG_RESET			0x35
+#define	WDCMSG_DISABLE			0x36
+#define	WDCMSG_PHY_DISABLE		0x37
+#define	WDCMSG_SET_TX_POWER_LIMIT	0x38
+#define	WDCMSG_SET_TX_QUEUE_PARAMS	0x39
+#define	WDCMSG_SETUP_TX_QUEUE		0x3a
+#define	WDCMSG_RELEASE_TX_QUEUE		0x3b
+#define	WDCMSG_SET_DEFAULT_KEY		0x43
+
+	__u32		priv;	/* driver private data,
+				   don't care about endianess */
+	__be32		magic;
+	__be32		reserved2[4];
+};
+
+struct ar5523_cmd_host_available {
+	__be32	sw_ver_major;
+	__be32	sw_ver_minor;
+	__be32	sw_ver_patch;
+	__be32	sw_ver_build;
+} __packed;
+
+#define	ATH_SW_VER_MAJOR	1
+#define	ATH_SW_VER_MINOR	5
+#define	ATH_SW_VER_PATCH	0
+#define	ATH_SW_VER_BUILD	9999
+
+struct ar5523_chunk {
+	u8		seqnum;		/* sequence number for ordering */
+	u8		flags;
+#define	UATH_CFLAGS_FINAL	0x01	/* final chunk of a msg */
+#define	UATH_CFLAGS_RXMSG	0x02	/* chunk contains rx completion */
+#define	UATH_CFLAGS_DEBUG	0x04	/* for debugging */
+	__be16		length;		/* chunk size in bytes */
+	/* chunk data follows */
+} __packed;
+
+/*
+ * Message format for a WDCMSG_DATA_AVAIL message from Target to Host.
+ */
+struct ar5523_rx_desc {
+	__be32	len;		/* msg length including header */
+	__be32	code;		/* WDCMSG_DATA_AVAIL */
+	__be32	gennum;		/* generation number */
+	__be32	status;		/* start of RECEIVE_INFO */
+#define	UATH_STATUS_OK			0
+#define	UATH_STATUS_STOP_IN_PROGRESS	1
+#define	UATH_STATUS_CRC_ERR		2
+#define	UATH_STATUS_PHY_ERR		3
+#define	UATH_STATUS_DECRYPT_CRC_ERR	4
+#define	UATH_STATUS_DECRYPT_MIC_ERR	5
+#define	UATH_STATUS_DECOMP_ERR		6
+#define	UATH_STATUS_KEY_ERR		7
+#define	UATH_STATUS_ERR			8
+	__be32	tstamp_low;	/* low-order 32-bits of rx timestamp */
+	__be32	tstamp_high;	/* high-order 32-bits of rx timestamp */
+	__be32	framelen;	/* frame length */
+	__be32	rate;		/* rx rate code */
+	__be32	antenna;
+	__be32	rssi;
+	__be32	channel;
+	__be32	phyerror;
+	__be32	connix;		/* key table ix for bss traffic */
+	__be32	decrypterror;
+	__be32	keycachemiss;
+	__be32	pad;		/* XXX? */
+} __packed;
+
+struct ar5523_tx_desc {
+	__be32	msglen;
+	u32	msgid;		/* msg id (supplied by host) */
+	__be32	type;		/* opcode: WDMSG_SEND or WDCMSG_FLUSH */
+	__be32	txqid;		/* tx queue id and flags */
+#define	UATH_TXQID_MASK		0x0f
+#define	UATH_TXQID_MINRATE	0x10	/* use min tx rate */
+#define	UATH_TXQID_FF		0x20	/* content is fast frame */
+	__be32	connid;		/* tx connection id */
+#define UATH_ID_INVALID	0xffffffff	/* for sending prior to connection */
+	__be32	flags;		/* non-zero if response desired */
+#define UATH_TX_NOTIFY	(1 << 24)	/* f/w will send a UATH_NOTIF_TX */
+	__be32	buflen;		/* payload length */
+} __packed;
+
+
+#define AR5523_ID_BSS		2
+#define AR5523_ID_BROADCAST	0xffffffff
+
+/* structure for command UATH_CMD_WRITE_MAC */
+struct ar5523_write_mac {
+	__be32	reg;
+	__be32	len;
+	u8		data[32];
+} __packed;
+
+struct ar5523_cmd_rateset {
+	__u8		length;
+#define AR5523_MAX_NRATES	32
+	__u8		set[AR5523_MAX_NRATES];
+};
+
+struct ar5523_cmd_set_associd {		/* AR5523_WRITE_ASSOCID */
+	__be32	defaultrateix;
+	__be32	associd;
+	__be32	timoffset;
+	__be32	turboprime;
+	__u8	bssid[6];
+} __packed;
+
+/* structure for command WDCMSG_RESET */
+struct ar5523_cmd_reset {
+	__be32	flags;		/* channel flags */
+#define	UATH_CHAN_TURBO	0x0100
+#define	UATH_CHAN_CCK	0x0200
+#define	UATH_CHAN_OFDM	0x0400
+#define	UATH_CHAN_2GHZ	0x1000
+#define	UATH_CHAN_5GHZ	0x2000
+	__be32	freq;		/* channel frequency */
+	__be32	maxrdpower;
+	__be32	cfgctl;
+	__be32	twiceantennareduction;
+	__be32	channelchange;
+	__be32	keeprccontent;
+} __packed;
+
+/* structure for command WDCMSG_SET_BASIC_RATE */
+struct ar5523_cmd_rates {
+	__be32	connid;
+	__be32	keeprccontent;
+	__be32	size;
+	struct ar5523_cmd_rateset rateset;
+} __packed;
+
+enum {
+	WLAN_MODE_NONE = 0,
+	WLAN_MODE_11b,
+	WLAN_MODE_11a,
+	WLAN_MODE_11g,
+	WLAN_MODE_11a_TURBO,
+	WLAN_MODE_11g_TURBO,
+	WLAN_MODE_11a_TURBO_PRIME,
+	WLAN_MODE_11g_TURBO_PRIME,
+	WLAN_MODE_11a_XR,
+	WLAN_MODE_11g_XR,
+};
+
+struct ar5523_cmd_connection_attr {
+	__be32	longpreambleonly;
+	struct ar5523_cmd_rateset	rateset;
+	__be32	wlanmode;
+} __packed;
+
+/* structure for command AR5523_CREATE_CONNECTION */
+struct ar5523_cmd_create_connection {
+	__be32	connid;
+	__be32	bssid;
+	__be32	size;
+	struct ar5523_cmd_connection_attr	connattr;
+} __packed;
+
+struct ar5523_cmd_ledsteady {		/* WDCMSG_SET_LED_STEADY */
+	__be32	lednum;
+#define UATH_LED_LINK		0
+#define UATH_LED_ACTIVITY	1
+	__be32	ledmode;
+#define UATH_LED_OFF	0
+#define UATH_LED_ON	1
+} __packed;
+
+struct ar5523_cmd_ledblink {		/* WDCMSG_SET_LED_BLINK */
+	__be32	lednum;
+	__be32	ledmode;
+	__be32	blinkrate;
+	__be32	slowmode;
+} __packed;
+
+struct ar5523_cmd_ledstate {		/* WDCMSG_SET_LED_STATE */
+	__be32	connected;
+} __packed;
+
+struct ar5523_cmd_txq_attr {
+	__be32	priority;
+	__be32	aifs;
+	__be32	logcwmin;
+	__be32	logcwmax;
+	__be32	bursttime;
+	__be32	mode;
+	__be32	qflags;
+} __packed;
+
+struct ar5523_cmd_txq_setup {		/* WDCMSG_SETUP_TX_QUEUE */
+	__be32	qid;
+	__be32	len;
+	struct ar5523_cmd_txq_attr attr;
+} __packed;
+
+struct ar5523_cmd_rx_filter {		/* WDCMSG_RX_FILTER */
+	__be32	bits;
+#define UATH_FILTER_RX_UCAST		0x00000001
+#define UATH_FILTER_RX_MCAST		0x00000002
+#define UATH_FILTER_RX_BCAST		0x00000004
+#define UATH_FILTER_RX_CONTROL		0x00000008
+#define UATH_FILTER_RX_BEACON		0x00000010	/* beacon frames */
+#define UATH_FILTER_RX_PROM		0x00000020	/* promiscuous mode */
+#define UATH_FILTER_RX_PHY_ERR		0x00000040	/* phy errors */
+#define UATH_FILTER_RX_PHY_RADAR	0x00000080	/* radar phy errors */
+#define UATH_FILTER_RX_XR_POOL		0x00000400	/* XR group polls */
+#define UATH_FILTER_RX_PROBE_REQ	0x00000800
+	__be32	op;
+#define UATH_FILTER_OP_INIT		0x0
+#define UATH_FILTER_OP_SET		0x1
+#define UATH_FILTER_OP_CLEAR		0x2
+#define UATH_FILTER_OP_TEMP		0x3
+#define UATH_FILTER_OP_RESTORE		0x4
+} __packed;
+
+enum {
+	CFG_NONE,			/* Sentinal to indicate "no config" */
+	CFG_REG_DOMAIN,			/* Regulatory Domain */
+	CFG_RATE_CONTROL_ENABLE,
+	CFG_DEF_XMIT_DATA_RATE,		/* NB: if rate control is not enabled */
+	CFG_HW_TX_RETRIES,
+	CFG_SW_TX_RETRIES,
+	CFG_SLOW_CLOCK_ENABLE,
+	CFG_COMP_PROC,
+	CFG_USER_RTS_THRESHOLD,
+	CFG_XR2NORM_RATE_THRESHOLD,
+	CFG_XRMODE_SWITCH_COUNT,
+	CFG_PROTECTION_TYPE,
+	CFG_BURST_SEQ_THRESHOLD,
+	CFG_ABOLT,
+	CFG_IQ_LOG_COUNT_MAX,
+	CFG_MODE_CTS,
+	CFG_WME_ENABLED,
+	CFG_GPRS_CBR_PERIOD,
+	CFG_SERVICE_TYPE,
+	/* MAC Address to use.  Overrides EEPROM */
+	CFG_MAC_ADDR,
+	CFG_DEBUG_EAR,
+	CFG_INIT_REGS,
+	/* An ID for use in error & debug messages */
+	CFG_DEBUG_ID,
+	CFG_COMP_WIN_SZ,
+	CFG_DIVERSITY_CTL,
+	CFG_TP_SCALE,
+	CFG_TPC_HALF_DBM5,
+	CFG_TPC_HALF_DBM2,
+	CFG_OVERRD_TX_POWER,
+	CFG_USE_32KHZ_CLOCK,
+	CFG_GMODE_PROTECTION,
+	CFG_GMODE_PROTECT_RATE_INDEX,
+	CFG_GMODE_NON_ERP_PREAMBLE,
+	CFG_WDC_TRANSPORT_CHUNK_SIZE,
+};
+
+enum {
+	/* Sentinal to indicate "no capability" */
+	CAP_NONE,
+	CAP_ALL,			/* ALL capabilities */
+	CAP_TARGET_VERSION,
+	CAP_TARGET_REVISION,
+	CAP_MAC_VERSION,
+	CAP_MAC_REVISION,
+	CAP_PHY_REVISION,
+	CAP_ANALOG_5GHz_REVISION,
+	CAP_ANALOG_2GHz_REVISION,
+	/* Target supports WDC message debug features */
+	CAP_DEBUG_WDCMSG_SUPPORT,
+
+	CAP_REG_DOMAIN,
+	CAP_COUNTRY_CODE,
+	CAP_REG_CAP_BITS,
+
+	CAP_WIRELESS_MODES,
+	CAP_CHAN_SPREAD_SUPPORT,
+	CAP_SLEEP_AFTER_BEACON_BROKEN,
+	CAP_COMPRESS_SUPPORT,
+	CAP_BURST_SUPPORT,
+	CAP_FAST_FRAMES_SUPPORT,
+	CAP_CHAP_TUNING_SUPPORT,
+	CAP_TURBOG_SUPPORT,
+	CAP_TURBO_PRIME_SUPPORT,
+	CAP_DEVICE_TYPE,
+	CAP_XR_SUPPORT,
+	CAP_WME_SUPPORT,
+	CAP_TOTAL_QUEUES,
+	CAP_CONNECTION_ID_MAX,		/* Should absorb CAP_KEY_CACHE_SIZE */
+
+	CAP_LOW_5GHZ_CHAN,
+	CAP_HIGH_5GHZ_CHAN,
+	CAP_LOW_2GHZ_CHAN,
+	CAP_HIGH_2GHZ_CHAN,
+
+	CAP_MIC_AES_CCM,
+	CAP_MIC_CKIP,
+	CAP_MIC_TKIP,
+	CAP_MIC_TKIP_WME,
+	CAP_CIPHER_AES_CCM,
+	CAP_CIPHER_CKIP,
+	CAP_CIPHER_TKIP,
+
+	CAP_TWICE_ANTENNAGAIN_5G,
+	CAP_TWICE_ANTENNAGAIN_2G,
+};
+
+enum {
+	ST_NONE,                    /* Sentinal to indicate "no status" */
+	ST_ALL,
+	ST_SERVICE_TYPE,
+	ST_WLAN_MODE,
+	ST_FREQ,
+	ST_BAND,
+	ST_LAST_RSSI,
+	ST_PS_FRAMES_DROPPED,
+	ST_CACHED_DEF_ANT,
+	ST_COUNT_OTHER_RX_ANT,
+	ST_USE_FAST_DIVERSITY,
+	ST_MAC_ADDR,
+	ST_RX_GENERATION_NUM,
+	ST_TX_QUEUE_DEPTH,
+	ST_SERIAL_NUMBER,
+	ST_WDC_TRANSPORT_CHUNK_SIZE,
+};
+
+enum {
+	TARGET_DEVICE_AWAKE,
+	TARGET_DEVICE_SLEEP,
+	TARGET_DEVICE_PWRDN,
+	TARGET_DEVICE_PWRSAVE,
+	TARGET_DEVICE_SUSPEND,
+	TARGET_DEVICE_RESUME,
+};
+
+/* this is in net/ieee80211.h, but that conflicts with the mac80211 headers */
+#define IEEE80211_2ADDR_LEN	16
+
+#define AR5523_MIN_RXBUFSZ				\
+	(((sizeof(__be32) + IEEE80211_2ADDR_LEN +	\
+	   sizeof(struct ar5523_rx_desc)) + 3) & ~3)
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index aec33cc..8e8bcc7a 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -236,17 +236,4 @@
 	},
 };
 
-static int __init
-ath5k_ahb_init(void)
-{
-	return platform_driver_register(&ath_ahb_driver);
-}
-
-static void __exit
-ath5k_ahb_exit(void)
-{
-	platform_driver_unregister(&ath_ahb_driver);
-}
-
-module_init(ath5k_ahb_init);
-module_exit(ath5k_ahb_exit);
+module_platform_driver(ath_ahb_driver);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9f31cfa..2fd5bab 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -511,8 +511,9 @@
 		ath5k_vif_iter(&iter_data, vif->addr, vif);
 
 	/* Get list of all active MAC addresses */
-	ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
-						   &iter_data);
+	ieee80211_iterate_active_interfaces_atomic(
+		ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		ath5k_vif_iter, &iter_data);
 	memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
 
 	ah->opmode = iter_data.opmode;
@@ -1348,7 +1349,7 @@
 	 * right now, so it's not too bad...
 	 */
 	rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
-	rxs->flag |= RX_FLAG_MACTIME_MPDU;
+	rxs->flag |= RX_FLAG_MACTIME_START;
 
 	rxs->freq = ah->curchan->center_freq;
 	rxs->band = ah->curchan->band;
@@ -3045,8 +3046,9 @@
 	iter_data.need_set_hw_addr = false;
 	iter_data.found_active = true;
 
-	ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
-						   &iter_data);
+	ieee80211_iterate_active_interfaces_atomic(
+		ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		ath5k_vif_iter, &iter_data);
 	return iter_data.any_assoc;
 }
 
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 7a28538..1ea8c87 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -452,8 +452,9 @@
 	iter_data.hw_macaddr = NULL;
 	iter_data.n_stas = 0;
 	iter_data.need_set_hw_addr = false;
-	ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
-						   &iter_data);
+	ieee80211_iterate_active_interfaces_atomic(
+		ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		ath5k_vif_iter, &iter_data);
 
 	/* Set up RX Filter */
 	if (iter_data.n_stas > 1) {
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 0c2dd47..4084b10 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -789,9 +789,9 @@
 		 * (I don't think it supports 44MHz) */
 		/* On 2425 initvals TURBO_SHORT is not present */
 		if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) {
-			turbo = AR5K_PHY_TURBO_MODE |
-				(ah->ah_radio == AR5K_RF2425) ? 0 :
-				AR5K_PHY_TURBO_SHORT;
+			turbo = AR5K_PHY_TURBO_MODE;
+			if (ah->ah_radio != AR5K_RF2425)
+				turbo |= AR5K_PHY_TURBO_SHORT;
 		} else if (ah->ah_bwmode != AR5K_BWMODE_DEFAULT) {
 			if (ah->ah_radio == AR5K_RF5413) {
 				mode |= (ah->ah_bwmode == AR5K_BWMODE_10MHZ) ?
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index d755a5e..26c4b72 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -30,3 +30,12 @@
 	depends on ATH6KL
 	---help---
 	  Enables debug support
+
+config ATH6KL_REGDOMAIN
+	bool "Atheros ath6kl regdomain support"
+	depends on ATH6KL
+	depends on CFG80211_CERTIFICATION_ONUS
+	---help---
+	  Enabling this makes it possible to change the regdomain in
+	  the firmware. This can be only enabled if regulatory requirements
+	  are taken into account.
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index 8cae888..cab0ec0 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -34,6 +34,7 @@
 ath6kl_core-y += txrx.o
 ath6kl_core-y += wmi.o
 ath6kl_core-y += core.o
+ath6kl_core-y += recovery.o
 ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
 
 obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 7089f81..5516a8c 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -147,15 +147,15 @@
 {
 	struct ath6kl *ar = vif->ar;
 
-	if (ar->state != ATH6KL_STATE_SCHED_SCAN)
+	if (!test_and_clear_bit(SCHED_SCANNING, &vif->flags))
 		return false;
 
 	del_timer_sync(&vif->sched_scan_timer);
 
-	ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
-					   ATH6KL_HOST_MODE_AWAKE);
+	if (ar->state == ATH6KL_STATE_RECOVERY)
+		return true;
 
-	ar->state = ATH6KL_STATE_ON;
+	ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, false);
 
 	return true;
 }
@@ -301,7 +301,7 @@
 
 static bool ath6kl_is_wpa_ie(const u8 *pos)
 {
-	return pos[0] == WLAN_EID_WPA && pos[1] >= 4 &&
+	return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
 		pos[2] == 0x00 && pos[3] == 0x50 &&
 		pos[4] == 0xf2 && pos[5] == 0x01;
 }
@@ -369,17 +369,13 @@
 {
 	switch (type) {
 	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_CLIENT:
 		*nw_type = INFRA_NETWORK;
 		break;
 	case NL80211_IFTYPE_ADHOC:
 		*nw_type = ADHOC_NETWORK;
 		break;
 	case NL80211_IFTYPE_AP:
-		*nw_type = AP_NETWORK;
-		break;
-	case NL80211_IFTYPE_P2P_CLIENT:
-		*nw_type = INFRA_NETWORK;
-		break;
 	case NL80211_IFTYPE_P2P_GO:
 		*nw_type = AP_NETWORK;
 		break;
@@ -1031,30 +1027,15 @@
 
 	vif->scan_req = request;
 
-	if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
-		     ar->fw_capabilities)) {
-		/*
-		 * If capable of doing P2P mgmt operations using
-		 * station interface, send additional information like
-		 * supported rates to advertise and xmit rates for
-		 * probe requests
-		 */
-		ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
-						WMI_LONG_SCAN, force_fg_scan,
-						false, 0,
-						ATH6KL_FG_SCAN_INTERVAL,
-						n_channels, channels,
-						request->no_cck,
-						request->rates);
-	} else {
-		ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx,
-						WMI_LONG_SCAN, force_fg_scan,
-						false, 0,
-						ATH6KL_FG_SCAN_INTERVAL,
-						n_channels, channels);
-	}
+	ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
+				       WMI_LONG_SCAN, force_fg_scan,
+				       false, 0,
+				       ATH6KL_FG_SCAN_INTERVAL,
+				       n_channels, channels,
+				       request->no_cck,
+				       request->rates);
 	if (ret) {
-		ath6kl_err("wmi_startscan_cmd failed\n");
+		ath6kl_err("failed to start scan: %d\n", ret);
 		vif->scan_req = NULL;
 	}
 
@@ -1093,15 +1074,18 @@
 void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
 				      enum wmi_phy_mode mode)
 {
-	enum nl80211_channel_type type;
+	struct cfg80211_chan_def chandef;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
 		   "channel switch notify nw_type %d freq %d mode %d\n",
 		   vif->nw_type, freq, mode);
 
-	type = (mode == WMI_11G_HT20) ? NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT;
+	cfg80211_chandef_create(&chandef,
+				ieee80211_get_channel(vif->ar->wiphy, freq),
+				(mode == WMI_11G_HT20) ?
+					NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
 
-	cfg80211_ch_switch_notify(vif->ndev, freq, type);
+	cfg80211_ch_switch_notify(vif->ndev, &chandef);
 }
 
 static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -1384,11 +1368,8 @@
 	return 0;
 }
 
-/*
- * The type nl80211_tx_power_setting replaces the following
- * data type from 2.6.36 onwards
-*/
 static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
+				       struct wireless_dev *wdev,
 				       enum nl80211_tx_power_setting type,
 				       int mbm)
 {
@@ -1423,7 +1404,9 @@
 	return 0;
 }
 
-static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
+static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
+				       struct wireless_dev *wdev,
+				       int *dbm)
 {
 	struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
 	struct ath6kl_vif *vif;
@@ -1614,8 +1597,8 @@
 	vif->ssid_len = ibss_param->ssid_len;
 	memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len);
 
-	if (ibss_param->channel)
-		vif->ch_hint = ibss_param->channel->center_freq;
+	if (ibss_param->chandef.chan)
+		vif->ch_hint = ibss_param->chandef.chan->center_freq;
 
 	if (ibss_param->channel_fixed) {
 		/*
@@ -1889,7 +1872,7 @@
 			  struct cfg80211_wowlan *wow, u32 *filter)
 {
 	int ret, pos;
-	u8 mask[WOW_MASK_SIZE];
+	u8 mask[WOW_PATTERN_SIZE];
 	u16 i;
 
 	/* Configure the patterns that we received from the user. */
@@ -2107,33 +2090,16 @@
 	return ret;
 }
 
-static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+static int ath6kl_wow_suspend_vif(struct ath6kl_vif *vif,
+				  struct cfg80211_wowlan *wow, u32 *filter)
 {
+	struct ath6kl *ar = vif->ar;
 	struct in_device *in_dev;
 	struct in_ifaddr *ifa;
-	struct ath6kl_vif *vif;
 	int ret;
-	u32 filter = 0;
 	u16 i, bmiss_time;
-	u8 index = 0;
 	__be32 ips[MAX_IP_ADDRS];
-
-	/* The FW currently can't support multi-vif WoW properly. */
-	if (ar->num_vif > 1)
-		return -EIO;
-
-	vif = ath6kl_vif_first(ar);
-	if (!vif)
-		return -EIO;
-
-	if (!ath6kl_cfg80211_ready(vif))
-		return -EIO;
-
-	if (!test_bit(CONNECTED, &vif->flags))
-		return -ENOTCONN;
-
-	if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
-		return -EINVAL;
+	u8 index = 0;
 
 	if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags) &&
 	    test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
@@ -2155,7 +2121,7 @@
 	 * the user.
 	 */
 	if (wow)
-		ret = ath6kl_wow_usr(ar, vif, wow, &filter);
+		ret = ath6kl_wow_usr(ar, vif, wow, filter);
 	else if (vif->nw_type == AP_NETWORK)
 		ret = ath6kl_wow_ap(ar, vif);
 	else
@@ -2190,12 +2156,10 @@
 			return ret;
 	}
 
-	ar->state = ATH6KL_STATE_SUSPENDING;
-
 	/* Setup own IP addr for ARP agent. */
 	in_dev = __in_dev_get_rtnl(vif->ndev);
 	if (!in_dev)
-		goto skip_arp;
+		return 0;
 
 	ifa = in_dev->ifa_list;
 	memset(&ips, 0, sizeof(ips));
@@ -2218,41 +2182,61 @@
 		return ret;
 	}
 
-skip_arp:
-	ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+	return ret;
+}
+
+static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+	struct ath6kl_vif *first_vif, *vif;
+	int ret = 0;
+	u32 filter = 0;
+	bool connected = false;
+
+	/* enter / leave wow suspend on first vif always */
+	first_vif = ath6kl_vif_first(ar);
+	if (WARN_ON(unlikely(!first_vif)) ||
+	    !ath6kl_cfg80211_ready(first_vif))
+		return -EIO;
+
+	if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
+		return -EINVAL;
+
+	/* install filters for each connected vif */
+	spin_lock_bh(&ar->list_lock);
+	list_for_each_entry(vif, &ar->vif_list, list) {
+		if (!test_bit(CONNECTED, &vif->flags) ||
+		    !ath6kl_cfg80211_ready(vif))
+			continue;
+		connected = true;
+
+		ret = ath6kl_wow_suspend_vif(vif, wow, &filter);
+		if (ret)
+			break;
+	}
+	spin_unlock_bh(&ar->list_lock);
+
+	if (!connected)
+		return -ENOTCONN;
+	else if (ret)
+		return ret;
+
+	ar->state = ATH6KL_STATE_SUSPENDING;
+
+	ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, first_vif->fw_vif_idx,
 					  ATH6KL_WOW_MODE_ENABLE,
 					  filter,
 					  WOW_HOST_REQ_DELAY);
 	if (ret)
 		return ret;
 
-	ret = ath6kl_cfg80211_host_sleep(ar, vif);
-	if (ret)
-		return ret;
-
-	return 0;
+	return ath6kl_cfg80211_host_sleep(ar, first_vif);
 }
 
-static int ath6kl_wow_resume(struct ath6kl *ar)
+static int ath6kl_wow_resume_vif(struct ath6kl_vif *vif)
 {
-	struct ath6kl_vif *vif;
+	struct ath6kl *ar = vif->ar;
 	int ret;
 
-	vif = ath6kl_vif_first(ar);
-	if (!vif)
-		return -EIO;
-
-	ar->state = ATH6KL_STATE_RESUMING;
-
-	ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
-						 ATH6KL_HOST_MODE_AWAKE);
-	if (ret) {
-		ath6kl_warn("Failed to configure host sleep mode for wow resume: %d\n",
-			    ret);
-		ar->state = ATH6KL_STATE_WOW;
-		return ret;
-	}
-
 	if (vif->nw_type != AP_NETWORK) {
 		ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
 						0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
@@ -2270,13 +2254,11 @@
 			return ret;
 	}
 
-	ar->state = ATH6KL_STATE_ON;
-
 	if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags) &&
 	    test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
 		     ar->fw_capabilities)) {
 		ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
-					vif->fw_vif_idx, true);
+						  vif->fw_vif_idx, true);
 		if (ret)
 			return ret;
 	}
@@ -2286,6 +2268,48 @@
 	return 0;
 }
 
+static int ath6kl_wow_resume(struct ath6kl *ar)
+{
+	struct ath6kl_vif *vif;
+	int ret;
+
+	vif = ath6kl_vif_first(ar);
+	if (WARN_ON(unlikely(!vif)) ||
+	    !ath6kl_cfg80211_ready(vif))
+		return -EIO;
+
+	ar->state = ATH6KL_STATE_RESUMING;
+
+	ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+						 ATH6KL_HOST_MODE_AWAKE);
+	if (ret) {
+		ath6kl_warn("Failed to configure host sleep mode for wow resume: %d\n",
+			    ret);
+		goto cleanup;
+	}
+
+	spin_lock_bh(&ar->list_lock);
+	list_for_each_entry(vif, &ar->vif_list, list) {
+		if (!test_bit(CONNECTED, &vif->flags) ||
+		    !ath6kl_cfg80211_ready(vif))
+			continue;
+		ret = ath6kl_wow_resume_vif(vif);
+		if (ret)
+			break;
+	}
+	spin_unlock_bh(&ar->list_lock);
+
+	if (ret)
+		goto cleanup;
+
+	ar->state = ATH6KL_STATE_ON;
+	return 0;
+
+cleanup:
+	ar->state = ATH6KL_STATE_WOW;
+	return ret;
+}
+
 static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar)
 {
 	struct ath6kl_vif *vif;
@@ -2422,13 +2446,6 @@
 
 		break;
 
-	case ATH6KL_CFG_SUSPEND_SCHED_SCAN:
-		/*
-		 * Nothing needed for schedule scan, firmware is already in
-		 * wow mode and sleeping most of the time.
-		 */
-		break;
-
 	default:
 		break;
 	}
@@ -2476,9 +2493,6 @@
 		}
 		break;
 
-	case ATH6KL_STATE_SCHED_SCAN:
-		break;
-
 	default:
 		break;
 	}
@@ -2495,14 +2509,23 @@
 {
 	struct ath6kl *ar = wiphy_priv(wiphy);
 
+	ath6kl_recovery_suspend(ar);
+
 	return ath6kl_hif_suspend(ar, wow);
 }
 
 static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
 {
 	struct ath6kl *ar = wiphy_priv(wiphy);
+	int err;
 
-	return ath6kl_hif_resume(ar);
+	err = ath6kl_hif_resume(ar);
+	if (err)
+		return err;
+
+	ath6kl_recovery_resume(ar);
+
+	return 0;
 }
 
 /*
@@ -2739,6 +2762,7 @@
 	int res;
 	int i, ret;
 	u16 rsn_capab = 0;
+	int inactivity_timeout = 0;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__);
 
@@ -2857,7 +2881,7 @@
 	p.ssid_len = vif->ssid_len;
 	memcpy(p.ssid, vif->ssid, vif->ssid_len);
 	p.dot11_auth_mode = vif->dot11_auth_mode;
-	p.ch = cpu_to_le16(info->channel->center_freq);
+	p.ch = cpu_to_le16(info->chandef.chan->center_freq);
 
 	/* Enable uAPSD support by default */
 	res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true);
@@ -2875,14 +2899,22 @@
 	}
 
 	if (info->inactivity_timeout) {
+
+		inactivity_timeout = info->inactivity_timeout;
+
+		if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)
+			inactivity_timeout = DIV_ROUND_UP(inactivity_timeout,
+							  60);
+
 		res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx,
-						  info->inactivity_timeout);
+						  inactivity_timeout);
 		if (res < 0)
 			return res;
 	}
 
-	if (ath6kl_set_htcap(vif, info->channel->band,
-			     info->channel_type != NL80211_CHAN_NO_HT))
+	if (ath6kl_set_htcap(vif, info->chandef.chan->band,
+			     cfg80211_get_chandef_type(&info->chandef)
+					!= NL80211_CHAN_NO_HT))
 		return -EIO;
 
 	/*
@@ -2898,6 +2930,7 @@
 					    WLAN_EID_RSN, WMI_RSN_IE_CAPB,
 					    (const u8 *) &rsn_capab,
 					    sizeof(rsn_capab));
+		vif->rsn_capab = rsn_capab;
 		if (res < 0)
 			return res;
 	}
@@ -2977,7 +3010,6 @@
 static int ath6kl_remain_on_channel(struct wiphy *wiphy,
 				    struct wireless_dev *wdev,
 				    struct ieee80211_channel *chan,
-				    enum nl80211_channel_type channel_type,
 				    unsigned int duration,
 				    u64 *cookie)
 {
@@ -3136,10 +3168,8 @@
 
 static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 			  struct ieee80211_channel *chan, bool offchan,
-			  enum nl80211_channel_type channel_type,
-			  bool channel_type_valid, unsigned int wait,
-			  const u8 *buf, size_t len, bool no_cck,
-			  bool dont_wait_for_ack, u64 *cookie)
+			  unsigned int wait, const u8 *buf, size_t len,
+			  bool no_cck, bool dont_wait_for_ack, u64 *cookie)
 {
 	struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
 	struct ath6kl *ar = ath6kl_priv(vif->ndev);
@@ -3211,7 +3241,7 @@
 	struct ath6kl *ar = ath6kl_priv(dev);
 	struct ath6kl_vif *vif = netdev_priv(dev);
 	u16 interval;
-	int ret;
+	int ret, rssi_thold;
 
 	if (ar->state != ATH6KL_STATE_ON)
 		return -EIO;
@@ -3219,10 +3249,6 @@
 	if (vif->sme_state != SME_DISCONNECTED)
 		return -EBUSY;
 
-	/* The FW currently can't support multi-vif WoW properly. */
-	if (ar->num_vif > 1)
-		return -EIO;
-
 	ath6kl_cfg80211_scan_complete_event(vif, true);
 
 	ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
@@ -3244,6 +3270,23 @@
 			return ret;
 	}
 
+	if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
+		     ar->fw_capabilities)) {
+		if (request->rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
+			rssi_thold = 0;
+		else if (request->rssi_thold < -127)
+			rssi_thold = -127;
+		else
+			rssi_thold = request->rssi_thold;
+
+		ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
+						     rssi_thold);
+		if (ret) {
+			ath6kl_err("failed to set RSSI threshold for scan\n");
+			return ret;
+		}
+	}
+
 	/* fw uses seconds, also make sure that it's >0 */
 	interval = max_t(u16, 1, request->interval / 1000);
 
@@ -3251,15 +3294,6 @@
 				  interval, interval,
 				  vif->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
 
-	ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
-					  ATH6KL_WOW_MODE_ENABLE,
-					  WOW_FILTER_SSID,
-					  WOW_HOST_REQ_DELAY);
-	if (ret) {
-		ath6kl_warn("Failed to enable wow with ssid filter: %d\n", ret);
-		return ret;
-	}
-
 	/* this also clears IE in fw if it's not set */
 	ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
 				       WMI_FRAME_PROBE_REQ,
@@ -3270,17 +3304,13 @@
 		return ret;
 	}
 
-	ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
-						 ATH6KL_HOST_MODE_ASLEEP);
-	if (ret) {
-		ath6kl_warn("Failed to enable host sleep mode for sched scan: %d\n",
-			    ret);
+	ret = ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, true);
+	if (ret)
 		return ret;
-	}
 
-	ar->state = ATH6KL_STATE_SCHED_SCAN;
+	set_bit(SCHED_SCANNING, &vif->flags);
 
-	return ret;
+	return 0;
 }
 
 static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
@@ -3309,6 +3339,27 @@
 					   mask);
 }
 
+static int ath6kl_cfg80211_set_txe_config(struct wiphy *wiphy,
+					  struct net_device *dev,
+					  u32 rate, u32 pkts, u32 intvl)
+{
+	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
+
+	if (vif->nw_type != INFRA_NETWORK ||
+	    !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, ar->fw_capabilities))
+		return -EOPNOTSUPP;
+
+	if (vif->sme_state != SME_CONNECTED)
+		return -ENOTCONN;
+
+	/* save this since the firmware won't report the interval */
+	vif->txe_intvl = intvl;
+
+	return ath6kl_wmi_set_txe_notify(ar->wmi, vif->fw_vif_idx,
+					 rate, pkts, intvl);
+}
+
 static const struct ieee80211_txrx_stypes
 ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
 	[NL80211_IFTYPE_STATION] = {
@@ -3375,6 +3426,7 @@
 	.sched_scan_start = ath6kl_cfg80211_sscan_start,
 	.sched_scan_stop = ath6kl_cfg80211_sscan_stop,
 	.set_bitrate_mask = ath6kl_cfg80211_set_bitrate,
+	.set_cqm_txe_config = ath6kl_cfg80211_set_txe_config,
 };
 
 void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
@@ -3395,16 +3447,22 @@
 		break;
 	}
 
-	if (test_bit(CONNECTED, &vif->flags) ||
-	    test_bit(CONNECT_PEND, &vif->flags))
+	if (vif->ar->state != ATH6KL_STATE_RECOVERY &&
+	    (test_bit(CONNECTED, &vif->flags) ||
+	    test_bit(CONNECT_PEND, &vif->flags)))
 		ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
 
 	vif->sme_state = SME_DISCONNECTED;
 	clear_bit(CONNECTED, &vif->flags);
 	clear_bit(CONNECT_PEND, &vif->flags);
 
+	/* Stop netdev queues, needed during recovery */
+	netif_stop_queue(vif->ndev);
+	netif_carrier_off(vif->ndev);
+
 	/* disable scanning */
-	if (ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF,
+	if (vif->ar->state != ATH6KL_STATE_RECOVERY &&
+	    ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF,
 				      0, 0, 0, 0, 0, 0, 0, 0, 0) != 0)
 		ath6kl_warn("failed to disable scan during stop\n");
 
@@ -3416,7 +3474,7 @@
 	struct ath6kl_vif *vif;
 
 	vif = ath6kl_vif_first(ar);
-	if (!vif) {
+	if (!vif && ar->state != ATH6KL_STATE_RECOVERY) {
 		/* save the current power mode before enabling power save */
 		ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
 
@@ -3434,6 +3492,56 @@
 		ath6kl_cfg80211_stop(vif);
 }
 
+static int ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
+				      struct regulatory_request *request)
+{
+	struct ath6kl *ar = wiphy_priv(wiphy);
+	u32 rates[IEEE80211_NUM_BANDS];
+	int ret, i;
+
+	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+		   "cfg reg_notify %c%c%s%s initiator %d hint_type %d\n",
+		   request->alpha2[0], request->alpha2[1],
+		   request->intersect ? " intersect" : "",
+		   request->processed ? " processed" : "",
+		   request->initiator, request->user_reg_hint_type);
+
+	/*
+	 * As firmware is not able intersect regdoms, we can only listen to
+	 * cellular hints.
+	 */
+	if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE)
+		return -EOPNOTSUPP;
+
+	ret = ath6kl_wmi_set_regdomain_cmd(ar->wmi, request->alpha2);
+	if (ret) {
+		ath6kl_err("failed to set regdomain: %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * Firmware will apply the regdomain change only after a scan is
+	 * issued and it will send a WMI_REGDOMAIN_EVENTID when it has been
+	 * changed.
+	 */
+
+	for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+		if (wiphy->bands[i])
+			rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
+
+
+	ret = ath6kl_wmi_beginscan_cmd(ar->wmi, 0, WMI_LONG_SCAN, false,
+				       false, 0, ATH6KL_FG_SCAN_INTERVAL,
+				       0, NULL, false, rates);
+	if (ret) {
+		ath6kl_err("failed to start scan for a regdomain change: %d\n",
+			   ret);
+		return ret;
+	}
+
+	return 0;
+}
+
 static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
 {
 	vif->aggr_cntxt = aggr_init(vif);
@@ -3506,9 +3614,13 @@
 	vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
 
 	memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
-	if (fw_vif_idx != 0)
+	if (fw_vif_idx != 0) {
 		ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
 				     0x2;
+		if (test_bit(ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR,
+			     ar->fw_capabilities))
+			ndev->dev_addr[4] ^= 0x80;
+	}
 
 	init_netdev(ndev);
 
@@ -3562,6 +3674,12 @@
 					  BIT(NL80211_IFTYPE_P2P_CLIENT);
 	}
 
+	if (config_enabled(CONFIG_ATH6KL_REGDOMAIN) &&
+	    test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) {
+		wiphy->reg_notifier = ath6kl_cfg80211_reg_notify;
+		ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS;
+	}
+
 	/* max num of ssids that can be probed during scanning */
 	wiphy->max_scan_ssids = MAX_PROBED_SSIDS;
 
@@ -3607,7 +3725,7 @@
 		ath6kl_band_5ghz.ht_cap.ht_supported = false;
 	}
 
-	if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES) {
+	if (ar->hw.flags & ATH6KL_HW_64BIT_RATES) {
 		ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
 		ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
 		ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff;
@@ -3646,12 +3764,12 @@
 			    WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
 			    WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
 
-	if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
+	if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, ar->fw_capabilities))
 		ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
 
 	if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
 		     ar->fw_capabilities))
-		ar->wiphy->features = NL80211_FEATURE_INACTIVITY_TIMER;
+		ar->wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
 
 	ar->wiphy->probe_resp_offload =
 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index 780f777..e5e70f3 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -22,7 +22,6 @@
 	ATH6KL_CFG_SUSPEND_DEEPSLEEP,
 	ATH6KL_CFG_SUSPEND_CUTPOWER,
 	ATH6KL_CFG_SUSPEND_WOW,
-	ATH6KL_CFG_SUSPEND_SCHED_SCAN,
 };
 
 struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 82c4dd2..4b46adb 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -33,6 +33,8 @@
 static unsigned int uart_debug;
 static unsigned int ath6kl_p2p;
 static unsigned int testmode;
+static unsigned int recovery_enable;
+static unsigned int heart_beat_poll;
 
 module_param(debug_mask, uint, 0644);
 module_param(suspend_mode, uint, 0644);
@@ -40,6 +42,12 @@
 module_param(uart_debug, uint, 0644);
 module_param(ath6kl_p2p, uint, 0644);
 module_param(testmode, uint, 0644);
+module_param(recovery_enable, uint, 0644);
+module_param(heart_beat_poll, uint, 0644);
+MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
+MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic"   \
+		 "polling. This also specifies the polling interval in"  \
+		 "msecs. Set reocvery_enable for this to be effective");
 
 void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
 {
@@ -202,6 +210,17 @@
 	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
 		   __func__, wdev->netdev->name, wdev->netdev, ar);
 
+	ar->fw_recovery.enable = !!recovery_enable;
+	if (!ar->fw_recovery.enable)
+		return ret;
+
+	if (heart_beat_poll &&
+	    test_bit(ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL,
+		     ar->fw_capabilities))
+		ar->fw_recovery.hb_poll = heart_beat_poll;
+
+	ath6kl_recovery_init(ar);
+
 	return ret;
 
 err_rxbuf_cleanup:
@@ -291,6 +310,8 @@
 {
 	ath6kl_hif_power_off(ar);
 
+	ath6kl_recovery_cleanup(ar);
+
 	destroy_workqueue(ar->ath6kl_wq);
 
 	if (ar->htc_target)
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index cec49a3..189d8fa 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -115,6 +115,27 @@
 	 */
 	ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
 
+	/* Firmware supports filtering BSS results by RSSI */
+	ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
+
+	/* FW sets mac_addr[4] ^= 0x80 for newly created interfaces */
+	ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR,
+
+	/* Firmware supports TX error rate notification */
+	ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY,
+
+	/* supports WMI_SET_REGDOMAIN_CMDID command */
+	ATH6KL_FW_CAPABILITY_REGDOMAIN,
+
+	/* Firmware supports sched scan decoupled from host sleep */
+	ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2,
+
+	/*
+	 * Firmware capability for hang detection through heart beat
+	 * challenge messages.
+	 */
+	ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL,
+
 	/* this needs to be last */
 	ATH6KL_FW_CAPABILITY_MAX,
 };
@@ -128,11 +149,15 @@
 };
 
 enum ath6kl_hw_flags {
-	ATH6KL_HW_FLAG_64BIT_RATES	= BIT(0),
+	ATH6KL_HW_64BIT_RATES		= BIT(0),
+	ATH6KL_HW_AP_INACTIVITY_MINS	= BIT(1),
+	ATH6KL_HW_MAP_LP_ENDPOINT	= BIT(2),
+	ATH6KL_HW_SDIO_CRC_ERROR_WAR	= BIT(3),
 };
 
 #define ATH6KL_FW_API2_FILE "fw-2.bin"
 #define ATH6KL_FW_API3_FILE "fw-3.bin"
+#define ATH6KL_FW_API4_FILE "fw-4.bin"
 
 /* AR6003 1.0 definitions */
 #define AR6003_HW_1_0_VERSION                 0x300002ba
@@ -186,6 +211,13 @@
 #define AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE \
 	AR6004_HW_1_2_FW_DIR "/bdata.bin"
 
+/* AR6004 1.3 definitions */
+#define AR6004_HW_1_3_VERSION			0x31c8088a
+#define AR6004_HW_1_3_FW_DIR			"ath6k/AR6004/hw1.3"
+#define AR6004_HW_1_3_FIRMWARE_FILE		"fw.ram.bin"
+#define AR6004_HW_1_3_BOARD_DATA_FILE		"ath6k/AR6004/hw1.3/bdata.bin"
+#define AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE	"ath6k/AR6004/hw1.3/bdata.bin"
+
 /* Per STA data, used in AP mode */
 #define STA_PS_AWAKE		BIT(0)
 #define	STA_PS_SLEEP		BIT(1)
@@ -536,6 +568,7 @@
 	HOST_SLEEP_MODE_CMD_PROCESSED,
 	NETDEV_MCAST_ALL_ON,
 	NETDEV_MCAST_ALL_OFF,
+	SCHED_SCANNING,
 };
 
 struct ath6kl_vif {
@@ -580,11 +613,13 @@
 	u16 assoc_bss_beacon_int;
 	u16 listen_intvl_t;
 	u16 bmiss_time_t;
+	u32 txe_intvl;
 	u16 bg_scan_period;
 	u8 assoc_bss_dtim_period;
 	struct net_device_stats net_stats;
 	struct target_stats target_stats;
 	struct wmi_connect_cmd profile;
+	u16 rsn_capab;
 
 	struct list_head mc_filter;
 };
@@ -609,6 +644,7 @@
 	SKIP_SCAN,
 	ROAM_TBL_PEND,
 	FIRST_BOOT,
+	RECOVERY_CLEANUP,
 };
 
 enum ath6kl_state {
@@ -619,7 +655,16 @@
 	ATH6KL_STATE_DEEPSLEEP,
 	ATH6KL_STATE_CUTPOWER,
 	ATH6KL_STATE_WOW,
-	ATH6KL_STATE_SCHED_SCAN,
+	ATH6KL_STATE_RECOVERY,
+};
+
+/* Fw error recovery */
+#define ATH6KL_HB_RESP_MISS_THRES	5
+
+enum ath6kl_fw_err {
+	ATH6KL_FW_ASSERT,
+	ATH6KL_FW_HB_RESP_FAILURE,
+	ATH6KL_FW_EP_FULL,
 };
 
 struct ath6kl {
@@ -679,6 +724,7 @@
 	struct ath6kl_req_key ap_mode_bkey;
 	struct sk_buff_head mcastpsq;
 	u32 want_ch_switch;
+	u16 last_ch;
 
 	/*
 	 * FIXME: protects access to mcastpsq but is actually useless as
@@ -764,6 +810,17 @@
 
 	bool wiphy_registered;
 
+	struct ath6kl_fw_recovery {
+		struct work_struct recovery_work;
+		unsigned long err_reason;
+		unsigned long hb_poll;
+		struct timer_list hb_timer;
+		u32 seq_num;
+		bool hb_pending;
+		u8 hb_misscnt;
+		bool enable;
+	} fw_recovery;
+
 #ifdef CONFIG_ATH6KL_DEBUG
 	struct {
 		struct sk_buff_head fwlog_queue;
@@ -899,4 +956,12 @@
 void ath6kl_core_cleanup(struct ath6kl *ar);
 void ath6kl_core_destroy(struct ath6kl *ar);
 
+/* Fw error recovery */
+void ath6kl_init_hw_restart(struct ath6kl *ar);
+void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason);
+void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie);
+void ath6kl_recovery_init(struct ath6kl *ar);
+void ath6kl_recovery_cleanup(struct ath6kl *ar);
+void ath6kl_recovery_suspend(struct ath6kl *ar);
+void ath6kl_recovery_resume(struct ath6kl *ar);
 #endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 49639d8..f97cd4e 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -44,6 +44,7 @@
 	ATH6KL_DBG_SUSPEND	= BIT(20),
 	ATH6KL_DBG_USB		= BIT(21),
 	ATH6KL_DBG_USB_BULK	= BIT(22),
+	ATH6KL_DBG_RECOVERY	= BIT(23),
 	ATH6KL_DBG_ANY	        = 0xffffffff  /* enable all logs */
 };
 
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index 68ed6c2..a6b6144 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -136,6 +136,7 @@
 
 	ath6kl_hif_dump_fw_crash(dev->ar);
 	ath6kl_read_fwlogs(dev->ar);
+	ath6kl_recovery_err_notify(dev->ar, ATH6KL_FW_ASSERT);
 
 	return ret;
 }
@@ -338,8 +339,7 @@
 	status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
 				     reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
 
-	if (status)
-		WARN_ON(1);
+	WARN_ON(status);
 
 	return status;
 }
@@ -383,8 +383,7 @@
 	status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
 				     reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
 
-	if (status)
-		WARN_ON(1);
+	WARN_ON(status);
 
 	return status;
 }
@@ -695,11 +694,6 @@
 	ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
 		   dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
 
-	/* usb doesn't support enabling interrupts */
-	/* FIXME: remove check once USB support is implemented */
-	if (dev->ar->hif_type == ATH6KL_HIF_TYPE_USB)
-		return 0;
-
 	status = ath6kl_hif_disable_intrs(dev);
 
 fail_setup:
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index cd0e1ba..fbb78df 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -2492,7 +2492,8 @@
 		max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
 	}
 
-	if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
+	if (WARN_ON_ONCE(assigned_ep == ENDPOINT_UNUSED ||
+			 assigned_ep >= ENDPOINT_MAX || !max_msg_sz)) {
 		status = -ENOMEM;
 		goto fail_tx;
 	}
@@ -2655,12 +2656,6 @@
 	struct htc_service_connect_resp resp;
 	int status;
 
-	/* FIXME: remove once USB support is implemented */
-	if (target->dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) {
-		ath6kl_err("HTC doesn't support USB yet. Patience!\n");
-		return -EOPNOTSUPP;
-	}
-
 	/* we should be getting 1 control message that the target is ready */
 	packet = htc_wait_for_ctrl_msg(target);
 
@@ -2890,9 +2885,7 @@
 {
 	struct htc_packet *packet, *tmp_packet;
 
-	/* FIXME: remove check once USB support is implemented */
-	if (target->dev->ar->hif_type != ATH6KL_HIF_TYPE_USB)
-		ath6kl_hif_cleanup_scatter(target->dev->ar);
+	ath6kl_hif_cleanup_scatter(target->dev->ar);
 
 	list_for_each_entry_safe(packet, tmp_packet,
 				 &target->free_ctrl_txbuf, list) {
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index f9626c7..ba6bd49 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -374,9 +374,8 @@
 				packet = list_first_entry(txq,
 							  struct htc_packet,
 							  list);
-				list_del(&packet->list);
-				/* insert into local queue */
-				list_add_tail(&packet->list, &send_queue);
+				/* move to local queue */
+				list_move_tail(&packet->list, &send_queue);
 			}
 
 			/*
@@ -399,11 +398,10 @@
 					 * for cleanup */
 				} else {
 					/* callback wants to keep this packet,
-					 * remove from caller's queue */
-					list_del(&packet->list);
-					/* put it in the send queue */
-					list_add_tail(&packet->list,
-						      &send_queue);
+					 * move from caller's queue to the send
+					 * queue */
+					list_move_tail(&packet->list,
+						       &send_queue);
 				}
 
 			}
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index f90b5db..f21fa32 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -42,7 +42,7 @@
 		.reserved_ram_size		= 6912,
 		.refclk_hz			= 26000000,
 		.uarttx_pin			= 8,
-		.flags				= 0,
+		.flags				= ATH6KL_HW_SDIO_CRC_ERROR_WAR,
 
 		/* hw2.0 needs override address hardcoded */
 		.app_start_override_addr	= 0x944C00,
@@ -68,7 +68,7 @@
 		.refclk_hz			= 26000000,
 		.uarttx_pin			= 8,
 		.testscript_addr		= 0x57ef74,
-		.flags				= 0,
+		.flags				= ATH6KL_HW_SDIO_CRC_ERROR_WAR,
 
 		.fw = {
 			.dir		= AR6003_HW_2_1_1_FW_DIR,
@@ -93,7 +93,8 @@
 		.board_addr			= 0x433900,
 		.refclk_hz			= 26000000,
 		.uarttx_pin			= 11,
-		.flags				= ATH6KL_HW_FLAG_64BIT_RATES,
+		.flags				= ATH6KL_HW_64BIT_RATES |
+						  ATH6KL_HW_AP_INACTIVITY_MINS,
 
 		.fw = {
 			.dir		= AR6004_HW_1_0_FW_DIR,
@@ -113,8 +114,8 @@
 		.board_addr			= 0x43d400,
 		.refclk_hz			= 40000000,
 		.uarttx_pin			= 11,
-		.flags				= ATH6KL_HW_FLAG_64BIT_RATES,
-
+		.flags				= ATH6KL_HW_64BIT_RATES |
+						  ATH6KL_HW_AP_INACTIVITY_MINS,
 		.fw = {
 			.dir		= AR6004_HW_1_1_FW_DIR,
 			.fw		= AR6004_HW_1_1_FIRMWARE_FILE,
@@ -133,7 +134,8 @@
 		.board_addr			= 0x435c00,
 		.refclk_hz			= 40000000,
 		.uarttx_pin			= 11,
-		.flags				= ATH6KL_HW_FLAG_64BIT_RATES,
+		.flags				= ATH6KL_HW_64BIT_RATES |
+						  ATH6KL_HW_AP_INACTIVITY_MINS,
 
 		.fw = {
 			.dir		= AR6004_HW_1_2_FW_DIR,
@@ -142,6 +144,28 @@
 		.fw_board		= AR6004_HW_1_2_BOARD_DATA_FILE,
 		.fw_default_board	= AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE,
 	},
+	{
+		.id				= AR6004_HW_1_3_VERSION,
+		.name				= "ar6004 hw 1.3",
+		.dataset_patch_addr		= 0x437860,
+		.app_load_addr			= 0x1234,
+		.board_ext_data_addr		= 0x437000,
+		.reserved_ram_size		= 7168,
+		.board_addr			= 0x436400,
+		.refclk_hz                      = 40000000,
+		.uarttx_pin                     = 11,
+		.flags				= ATH6KL_HW_64BIT_RATES |
+						  ATH6KL_HW_AP_INACTIVITY_MINS |
+						  ATH6KL_HW_MAP_LP_ENDPOINT,
+
+		.fw = {
+			.dir            = AR6004_HW_1_3_FW_DIR,
+			.fw             = AR6004_HW_1_3_FIRMWARE_FILE,
+		},
+
+		.fw_board               = AR6004_HW_1_3_BOARD_DATA_FILE,
+		.fw_default_board       = AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE,
+	},
 };
 
 /*
@@ -337,7 +361,7 @@
 	if (ath6kl_connectservice(ar, &connect, "WMI DATA BK"))
 		return -EIO;
 
-	/* connect to Video service, map this to to HI PRI */
+	/* connect to Video service, map this to HI PRI */
 	connect.svc_id = WMI_DATA_VI_SVC;
 	if (ath6kl_connectservice(ar, &connect, "WMI DATA VI"))
 		return -EIO;
@@ -1088,6 +1112,12 @@
 	if (ret)
 		return ret;
 
+	ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API4_FILE);
+	if (ret == 0) {
+		ar->fw_api = 4;
+		goto out;
+	}
+
 	ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API3_FILE);
 	if (ret == 0) {
 		ar->fw_api = 3;
@@ -1401,8 +1431,7 @@
 		return status;
 
 	/* WAR to avoid SDIO CRC err */
-	if (ar->version.target_ver == AR6003_HW_2_0_VERSION ||
-	    ar->version.target_ver == AR6003_HW_2_1_1_VERSION) {
+	if (ar->hw.flags & ATH6KL_HW_SDIO_CRC_ERROR_WAR) {
 		ath6kl_err("temporary war to avoid sdio crc error\n");
 
 		param = 0x28;
@@ -1520,7 +1549,7 @@
 	return NULL;
 }
 
-int ath6kl_init_hw_start(struct ath6kl *ar)
+static int __ath6kl_init_hw_start(struct ath6kl *ar)
 {
 	long timeleft;
 	int ret, i;
@@ -1616,8 +1645,6 @@
 			goto err_htc_stop;
 	}
 
-	ar->state = ATH6KL_STATE_ON;
-
 	return 0;
 
 err_htc_stop:
@@ -1630,7 +1657,18 @@
 	return ret;
 }
 
-int ath6kl_init_hw_stop(struct ath6kl *ar)
+int ath6kl_init_hw_start(struct ath6kl *ar)
+{
+	int err;
+
+	err = __ath6kl_init_hw_start(ar);
+	if (err)
+		return err;
+	ar->state = ATH6KL_STATE_ON;
+	return 0;
+}
+
+static int __ath6kl_init_hw_stop(struct ath6kl *ar)
 {
 	int ret;
 
@@ -1646,11 +1684,37 @@
 	if (ret)
 		ath6kl_warn("failed to power off hif: %d\n", ret);
 
-	ar->state = ATH6KL_STATE_OFF;
-
 	return 0;
 }
 
+int ath6kl_init_hw_stop(struct ath6kl *ar)
+{
+	int err;
+
+	err = __ath6kl_init_hw_stop(ar);
+	if (err)
+		return err;
+	ar->state = ATH6KL_STATE_OFF;
+	return 0;
+}
+
+void ath6kl_init_hw_restart(struct ath6kl *ar)
+{
+	clear_bit(WMI_READY, &ar->flag);
+
+	ath6kl_cfg80211_stop_all(ar);
+
+	if (__ath6kl_init_hw_stop(ar)) {
+		ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Failed to stop during fw error recovery\n");
+		return;
+	}
+
+	if (__ath6kl_init_hw_start(ar)) {
+		ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Failed to restart during fw error recovery\n");
+		return;
+	}
+}
+
 /* FIXME: move this to cfg80211.c and rename to ath6kl_cfg80211_vif_stop() */
 void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
 {
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index c189e28..bd50b6b 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -293,13 +293,17 @@
 	}
 
 	address = TARG_VTOP(ar->target_type, debug_hdr_addr);
-	ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr));
+	ret = ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr));
+	if (ret)
+		goto out;
 
 	address = TARG_VTOP(ar->target_type,
 			    le32_to_cpu(debug_hdr.dbuf_addr));
 	firstbuf = address;
 	dropped = le32_to_cpu(debug_hdr.dropped);
-	ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
+	ret = ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
+	if (ret)
+		goto out;
 
 	loop = 100;
 
@@ -322,7 +326,8 @@
 
 		address = TARG_VTOP(ar->target_type,
 				    le32_to_cpu(debug_buf.next));
-		ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
+		ret = ath6kl_diag_read(ar, address, &debug_buf,
+				       sizeof(debug_buf));
 		if (ret)
 			goto out;
 
@@ -436,12 +441,9 @@
 		break;
 	}
 
-	if (ar->want_ch_switch & (1 << vif->fw_vif_idx)) {
-		ar->want_ch_switch &= ~(1 << vif->fw_vif_idx);
+	if (ar->last_ch != channel)
 		/* we actually don't know the phymode, default to HT20 */
-		ath6kl_cfg80211_ch_switch_notify(vif, channel,
-						 WMI_11G_HT20);
-	}
+		ath6kl_cfg80211_ch_switch_notify(vif, channel, WMI_11G_HT20);
 
 	ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
 	set_bit(CONNECTED, &vif->flags);
@@ -606,6 +608,18 @@
 
 	switch (vif->nw_type) {
 	case AP_NETWORK:
+		/*
+		 * reconfigure any saved RSN IE capabilites in the beacon /
+		 * probe response to stay in sync with the supplicant.
+		 */
+		if (vif->rsn_capab &&
+		    test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
+			     ar->fw_capabilities))
+			ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx,
+					      WLAN_EID_RSN, WMI_RSN_IE_CAPB,
+					      (const u8 *) &vif->rsn_capab,
+					      sizeof(vif->rsn_capab));
+
 		return ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx,
 						    &vif->profile);
 	default:
@@ -628,6 +642,9 @@
 		if (ar->want_ch_switch & (1 << vif->fw_vif_idx))
 			res = ath6kl_commit_ch_switch(vif, channel);
 
+		/* if channel switch failed, oh well we tried */
+		ar->want_ch_switch &= ~(1 << vif->fw_vif_idx);
+
 		if (res)
 			ath6kl_err("channel switch failed nw_type %d res %d\n",
 				   vif->nw_type, res);
@@ -981,8 +998,25 @@
 	if (vif->nw_type == AP_NETWORK) {
 		/* disconnect due to other STA vif switching channels */
 		if (reason == BSS_DISCONNECTED &&
-		    prot_reason_status == WMI_AP_REASON_STA_ROAM)
+		    prot_reason_status == WMI_AP_REASON_STA_ROAM) {
 			ar->want_ch_switch |= 1 << vif->fw_vif_idx;
+			/* bail back to this channel if STA vif fails connect */
+			ar->last_ch = le16_to_cpu(vif->profile.ch);
+		}
+
+		if (prot_reason_status == WMI_AP_REASON_MAX_STA) {
+			/* send max client reached notification to user space */
+			cfg80211_conn_failed(vif->ndev, bssid,
+					     NL80211_CONN_FAIL_MAX_CLIENTS,
+					     GFP_KERNEL);
+		}
+
+		if (prot_reason_status == WMI_AP_REASON_ACL) {
+			/* send blocked client notification to user space */
+			cfg80211_conn_failed(vif->ndev, bssid,
+					     NL80211_CONN_FAIL_BLOCKED_CLIENT,
+					     GFP_KERNEL);
+		}
 
 		if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
 			return;
@@ -1041,6 +1075,9 @@
 		}
 	}
 
+	/* restart disconnected concurrent vifs waiting for new channel */
+	ath6kl_check_ch_switch(ar, ar->last_ch);
+
 	/* update connect & link status atomically */
 	spin_lock_bh(&vif->if_lock);
 	clear_bit(CONNECTED, &vif->flags);
diff --git a/drivers/net/wireless/ath/ath6kl/recovery.c b/drivers/net/wireless/ath/ath6kl/recovery.c
new file mode 100644
index 0000000..3a8d5e9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/recovery.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "cfg80211.h"
+#include "debug.h"
+
+static void ath6kl_recovery_work(struct work_struct *work)
+{
+	struct ath6kl *ar = container_of(work, struct ath6kl,
+					 fw_recovery.recovery_work);
+
+	ar->state = ATH6KL_STATE_RECOVERY;
+
+	del_timer_sync(&ar->fw_recovery.hb_timer);
+
+	ath6kl_init_hw_restart(ar);
+
+	ar->state = ATH6KL_STATE_ON;
+	clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
+
+	ar->fw_recovery.err_reason = 0;
+
+	if (ar->fw_recovery.hb_poll)
+		mod_timer(&ar->fw_recovery.hb_timer, jiffies +
+			  msecs_to_jiffies(ar->fw_recovery.hb_poll));
+}
+
+void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason)
+{
+	if (!ar->fw_recovery.enable)
+		return;
+
+	ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Fw error detected, reason:%d\n",
+		   reason);
+
+	set_bit(reason, &ar->fw_recovery.err_reason);
+
+	if (!test_bit(RECOVERY_CLEANUP, &ar->flag) &&
+	    ar->state != ATH6KL_STATE_RECOVERY)
+		queue_work(ar->ath6kl_wq, &ar->fw_recovery.recovery_work);
+}
+
+void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie)
+{
+	if (cookie == ar->fw_recovery.seq_num)
+		ar->fw_recovery.hb_pending = false;
+}
+
+static void ath6kl_recovery_hb_timer(unsigned long data)
+{
+	struct ath6kl *ar = (struct ath6kl *) data;
+	int err;
+
+	if (test_bit(RECOVERY_CLEANUP, &ar->flag) ||
+	    (ar->state == ATH6KL_STATE_RECOVERY))
+		return;
+
+	if (ar->fw_recovery.hb_pending)
+		ar->fw_recovery.hb_misscnt++;
+	else
+		ar->fw_recovery.hb_misscnt = 0;
+
+	if (ar->fw_recovery.hb_misscnt > ATH6KL_HB_RESP_MISS_THRES) {
+		ar->fw_recovery.hb_misscnt = 0;
+		ar->fw_recovery.seq_num = 0;
+		ar->fw_recovery.hb_pending = false;
+		ath6kl_recovery_err_notify(ar, ATH6KL_FW_HB_RESP_FAILURE);
+		return;
+	}
+
+	ar->fw_recovery.seq_num++;
+	ar->fw_recovery.hb_pending = true;
+
+	err = ath6kl_wmi_get_challenge_resp_cmd(ar->wmi,
+						ar->fw_recovery.seq_num, 0);
+	if (err)
+		ath6kl_warn("Failed to send hb challenge request, err:%d\n",
+			    err);
+
+	mod_timer(&ar->fw_recovery.hb_timer, jiffies +
+		  msecs_to_jiffies(ar->fw_recovery.hb_poll));
+}
+
+void ath6kl_recovery_init(struct ath6kl *ar)
+{
+	struct ath6kl_fw_recovery *recovery = &ar->fw_recovery;
+
+	clear_bit(RECOVERY_CLEANUP, &ar->flag);
+	INIT_WORK(&recovery->recovery_work, ath6kl_recovery_work);
+	recovery->seq_num = 0;
+	recovery->hb_misscnt = 0;
+	ar->fw_recovery.hb_pending = false;
+	ar->fw_recovery.hb_timer.function = ath6kl_recovery_hb_timer;
+	ar->fw_recovery.hb_timer.data = (unsigned long) ar;
+	init_timer_deferrable(&ar->fw_recovery.hb_timer);
+
+	if (ar->fw_recovery.hb_poll)
+		mod_timer(&ar->fw_recovery.hb_timer, jiffies +
+			  msecs_to_jiffies(ar->fw_recovery.hb_poll));
+}
+
+void ath6kl_recovery_cleanup(struct ath6kl *ar)
+{
+	if (!ar->fw_recovery.enable)
+		return;
+
+	set_bit(RECOVERY_CLEANUP, &ar->flag);
+
+	del_timer_sync(&ar->fw_recovery.hb_timer);
+	cancel_work_sync(&ar->fw_recovery.recovery_work);
+}
+
+void ath6kl_recovery_suspend(struct ath6kl *ar)
+{
+	if (!ar->fw_recovery.enable)
+		return;
+
+	ath6kl_recovery_cleanup(ar);
+
+	if (!ar->fw_recovery.err_reason)
+		return;
+
+	/* Process pending fw error detection */
+	ar->fw_recovery.err_reason = 0;
+	WARN_ON(ar->state != ATH6KL_STATE_ON);
+	ar->state = ATH6KL_STATE_RECOVERY;
+	ath6kl_init_hw_restart(ar);
+	ar->state = ATH6KL_STATE_ON;
+}
+
+void ath6kl_recovery_resume(struct ath6kl *ar)
+{
+	if (!ar->fw_recovery.enable)
+		return;
+
+	clear_bit(RECOVERY_CLEANUP, &ar->flag);
+
+	if (!ar->fw_recovery.hb_poll)
+		return;
+
+	ar->fw_recovery.hb_pending = false;
+	ar->fw_recovery.seq_num = 0;
+	ar->fw_recovery.hb_misscnt = 0;
+	mod_timer(&ar->fw_recovery.hb_timer,
+		  jiffies + msecs_to_jiffies(ar->fw_recovery.hb_poll));
+}
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 05b9540..d111980 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -709,7 +709,7 @@
 {
 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 	struct htc_target *target = ar->htc_target;
-	int ret;
+	int ret = 0;
 	bool virt_scat = false;
 
 	if (ar_sdio->scatter_enabled)
@@ -844,22 +844,6 @@
 	bool try_deepsleep = false;
 	int ret;
 
-	if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
-		ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sched scan is in progress\n");
-
-		ret = ath6kl_set_sdio_pm_caps(ar);
-		if (ret)
-			goto cut_pwr;
-
-		ret =  ath6kl_cfg80211_suspend(ar,
-					       ATH6KL_CFG_SUSPEND_SCHED_SCAN,
-					       NULL);
-		if (ret)
-			goto cut_pwr;
-
-		return 0;
-	}
-
 	if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
 	    (!ar->suspend_mode && wow)) {
 
@@ -942,14 +926,14 @@
 	case ATH6KL_STATE_WOW:
 		break;
 
-	case ATH6KL_STATE_SCHED_SCAN:
-		break;
-
 	case ATH6KL_STATE_SUSPENDING:
 		break;
 
 	case ATH6KL_STATE_RESUMING:
 		break;
+
+	case ATH6KL_STATE_RECOVERY:
+		break;
 	}
 
 	ath6kl_cfg80211_resume(ar);
@@ -1462,3 +1446,6 @@
 MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE);
 MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
 MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 7dfa0fd..78b3692 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -288,8 +288,16 @@
 	int status = 0;
 	struct ath6kl_cookie *cookie = NULL;
 
-	if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW))
+	if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
+		dev_kfree_skb(skb);
 		return -EACCES;
+	}
+
+	if (WARN_ON_ONCE(eid == ENDPOINT_UNUSED ||
+			 eid >= ENDPOINT_MAX)) {
+		status = -EINVAL;
+		goto fail_ctrl_tx;
+	}
 
 	spin_lock_bh(&ar->lock);
 
@@ -591,6 +599,7 @@
 		 */
 		set_bit(WMI_CTRL_EP_FULL, &ar->flag);
 		ath6kl_err("wmi ctrl ep is full\n");
+		ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL);
 		return action;
 	}
 
@@ -695,22 +704,31 @@
 					  list);
 		list_del(&packet->list);
 
+		if (WARN_ON_ONCE(packet->endpoint == ENDPOINT_UNUSED ||
+				 packet->endpoint >= ENDPOINT_MAX))
+			continue;
+
 		ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
-		if (!ath6kl_cookie)
-			goto fatal;
+		if (WARN_ON_ONCE(!ath6kl_cookie))
+			continue;
 
 		status = packet->status;
 		skb = ath6kl_cookie->skb;
 		eid = packet->endpoint;
 		map_no = ath6kl_cookie->map_no;
 
-		if (!skb || !skb->data)
-			goto fatal;
+		if (WARN_ON_ONCE(!skb || !skb->data)) {
+			dev_kfree_skb(skb);
+			ath6kl_free_cookie(ar, ath6kl_cookie);
+			continue;
+		}
 
 		__skb_queue_tail(&skb_queue, skb);
 
-		if (!status && (packet->act_len != skb->len))
-			goto fatal;
+		if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
+			ath6kl_free_cookie(ar, ath6kl_cookie);
+			continue;
+		}
 
 		ar->tx_pending[eid]--;
 
@@ -792,11 +810,6 @@
 		wake_up(&ar->event_wq);
 
 	return;
-
-fatal:
-	WARN_ON(1);
-	spin_unlock_bh(&ar->lock);
-	return;
 }
 
 void ath6kl_tx_data_cleanup(struct ath6kl *ar)
@@ -885,8 +898,11 @@
 			break;
 
 		packet = (struct htc_packet *) skb->head;
-		if (!IS_ALIGNED((unsigned long) skb->data, 4))
+		if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
+			size_t len = skb_headlen(skb);
 			skb->data = PTR_ALIGN(skb->data - 4, 4);
+			skb_set_tail_pointer(skb, len);
+		}
 		set_htc_rxpkt_info(packet, skb, skb->data,
 				   ATH6KL_BUFFER_SIZE, endpoint);
 		packet->skb = skb;
@@ -908,8 +924,11 @@
 			return;
 
 		packet = (struct htc_packet *) skb->head;
-		if (!IS_ALIGNED((unsigned long) skb->data, 4))
+		if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
+			size_t len = skb_headlen(skb);
 			skb->data = PTR_ALIGN(skb->data - 4, 4);
+			skb_set_tail_pointer(skb, len);
+		}
 		set_htc_rxpkt_info(packet, skb, skb->data,
 				   ATH6KL_AMSDU_BUFFER_SIZE, 0);
 		packet->skb = skb;
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 3740c3d..62bcc0d 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -185,9 +185,10 @@
 	for (i = 0; i < urb_cnt; i++) {
 		urb_context = kzalloc(sizeof(struct ath6kl_urb_context),
 				      GFP_KERNEL);
-		if (urb_context == NULL)
-			/* FIXME: set status to -ENOMEM */
-			break;
+		if (urb_context == NULL) {
+			status = -ENOMEM;
+			goto fail_alloc_pipe_resources;
+		}
 
 		urb_context->pipe = pipe;
 
@@ -204,6 +205,7 @@
 		   pipe->logical_pipe_num, pipe->usb_pipe_handle,
 		   pipe->urb_alloc);
 
+fail_alloc_pipe_resources:
 	return status;
 }
 
@@ -803,7 +805,11 @@
 		*dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
 		break;
 	case WMI_DATA_VI_SVC:
-		*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
+
+		if (ar->hw.flags & ATH6KL_HW_MAP_LP_ENDPOINT)
+			*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
+		else
+			*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
 		/*
 		* Disable rxdata2 directly, it will be enabled
 		* if FW enable rxdata2
@@ -811,7 +817,11 @@
 		*dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
 		break;
 	case WMI_DATA_VO_SVC:
-		*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_HP;
+
+		if (ar->hw.flags & ATH6KL_HW_MAP_LP_ENDPOINT)
+			*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
+		else
+			*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
 		/*
 		* Disable rxdata2 directly, it will be enabled
 		* if FW enable rxdata2
@@ -1196,7 +1206,14 @@
 
 static int ath6kl_usb_init(void)
 {
-	usb_register(&ath6kl_usb_driver);
+	int ret;
+
+	ret = usb_register(&ath6kl_usb_driver);
+	if (ret) {
+		ath6kl_err("usb registration failed: %d\n", ret);
+		return ret;
+	}
+
 	return 0;
 }
 
@@ -1220,3 +1237,6 @@
 MODULE_FIRMWARE(AR6004_HW_1_2_FIRMWARE_FILE);
 MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
 MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index c30ab4b..998f8b0 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -474,7 +474,7 @@
 		return -EINVAL;
 	}
 	id = vif->last_roc_id;
-	cfg80211_ready_on_channel(&vif->wdev, id, chan, NL80211_CHAN_NO_HT,
+	cfg80211_ready_on_channel(&vif->wdev, id, chan,
 				  dur, GFP_ATOMIC);
 
 	return 0;
@@ -513,8 +513,7 @@
 	else
 		id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
 	vif->last_cancel_roc_id = 0;
-	cfg80211_remain_on_channel_expired(&vif->wdev, id, chan,
-					   NL80211_CHAN_NO_HT, GFP_ATOMIC);
+	cfg80211_remain_on_channel_expired(&vif->wdev, id, chan, GFP_ATOMIC);
 
 	return 0;
 }
@@ -936,8 +935,12 @@
 
 		regpair = ath6kl_get_regpair((u16) reg_code);
 		country = ath6kl_regd_find_country_by_rd((u16) reg_code);
-		ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
-			   regpair->regDmnEnum);
+		if (regpair)
+			ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
+				   regpair->regDmnEnum);
+		else
+			ath6kl_warn("Regpair not found reg_code 0x%0x\n",
+				    reg_code);
 	}
 
 	if (country && wmi->parent_dev->wiphy_registered) {
@@ -1116,7 +1119,7 @@
 	 * the timer would not ever fire if the scan interval is short
 	 * enough.
 	 */
-	if (ar->state == ATH6KL_STATE_SCHED_SCAN &&
+	if (test_bit(SCHED_SCANNING, &vif->flags) &&
 	    !timer_pending(&vif->sched_scan_timer)) {
 		mod_timer(&vif->sched_scan_timer, jiffies +
 			  msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY));
@@ -1170,6 +1173,9 @@
 		rate = RATE_AUTO;
 	} else {
 		index = reply->rate_index & 0x7f;
+		if (WARN_ON_ONCE(index > (RATE_MCS_7_40 + 1)))
+			return -EINVAL;
+
 		sgi = (reply->rate_index & 0x80) ? 1 : 0;
 		rate = wmi_rate_tbl[index][sgi];
 	}
@@ -1531,6 +1537,68 @@
 	return 0;
 }
 
+static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len,
+					  struct ath6kl_vif *vif)
+{
+	struct wmi_txe_notify_event *ev;
+	u32 rate, pkts;
+
+	if (len < sizeof(*ev))
+		return -EINVAL;
+
+	if (vif->sme_state != SME_CONNECTED)
+		return -ENOTCONN;
+
+	ev = (struct wmi_txe_notify_event *) datap;
+	rate = le32_to_cpu(ev->rate);
+	pkts = le32_to_cpu(ev->pkts);
+
+	ath6kl_dbg(ATH6KL_DBG_WMI, "TXE notify event: peer %pM rate %d% pkts %d intvl %ds\n",
+		   vif->bssid, rate, pkts, vif->txe_intvl);
+
+	cfg80211_cqm_txe_notify(vif->ndev, vif->bssid, pkts,
+				rate, vif->txe_intvl, GFP_KERNEL);
+
+	return 0;
+}
+
+int ath6kl_wmi_set_txe_notify(struct wmi *wmi, u8 idx,
+			      u32 rate, u32 pkts, u32 intvl)
+{
+	struct sk_buff *skb;
+	struct wmi_txe_notify_cmd *cmd;
+
+	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_txe_notify_cmd *) skb->data;
+	cmd->rate = cpu_to_le32(rate);
+	cmd->pkts = cpu_to_le32(pkts);
+	cmd->intvl = cpu_to_le32(intvl);
+
+	return ath6kl_wmi_cmd_send(wmi, idx, skb, WMI_SET_TXE_NOTIFY_CMDID,
+				   NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi)
+{
+	struct sk_buff *skb;
+	struct wmi_set_rssi_filter_cmd *cmd;
+	int ret;
+
+	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_set_rssi_filter_cmd *) skb->data;
+	cmd->rssi = rssi;
+
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_RSSI_FILTER_CMDID,
+				  NO_SYNC_WMIFLAG);
+	return ret;
+}
+
 static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
 			struct wmi_snr_threshold_params_cmd *snr_cmd)
 {
@@ -1677,8 +1745,11 @@
 	int ret;
 	u16 info1;
 
-	if (WARN_ON(skb == NULL || (if_idx > (wmi->parent_dev->vif_max - 1))))
+	if (WARN_ON(skb == NULL ||
+		    (if_idx > (wmi->parent_dev->vif_max - 1)))) {
+		dev_kfree_skb(skb);
 		return -EINVAL;
+	}
 
 	ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n",
 		   cmd_id, skb->len, sync_flag);
@@ -1833,6 +1904,59 @@
 	return ret;
 }
 
+/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use
+ * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P
+ * mgmt operations using station interface.
+ */
+static int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+				    enum wmi_scan_type scan_type,
+				    u32 force_fgscan, u32 is_legacy,
+				    u32 home_dwell_time,
+				    u32 force_scan_interval,
+				    s8 num_chan, u16 *ch_list)
+{
+	struct sk_buff *skb;
+	struct wmi_start_scan_cmd *sc;
+	s8 size;
+	int i, ret;
+
+	size = sizeof(struct wmi_start_scan_cmd);
+
+	if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
+		return -EINVAL;
+
+	if (num_chan > WMI_MAX_CHANNELS)
+		return -EINVAL;
+
+	if (num_chan)
+		size += sizeof(u16) * (num_chan - 1);
+
+	skb = ath6kl_wmi_get_new_buf(size);
+	if (!skb)
+		return -ENOMEM;
+
+	sc = (struct wmi_start_scan_cmd *) skb->data;
+	sc->scan_type = scan_type;
+	sc->force_fg_scan = cpu_to_le32(force_fgscan);
+	sc->is_legacy = cpu_to_le32(is_legacy);
+	sc->home_dwell_time = cpu_to_le32(home_dwell_time);
+	sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
+	sc->num_ch = num_chan;
+
+	for (i = 0; i < num_chan; i++)
+		sc->ch_list[i] = cpu_to_le16(ch_list[i]);
+
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
+				  NO_SYNC_WMIFLAG);
+
+	return ret;
+}
+
+/*
+ * beginscan supports (compared to old startscan) P2P mgmt operations using
+ * station interface, send additional information like supported rates to
+ * advertise and xmit rates for probe requests
+ */
 int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
 			     enum wmi_scan_type scan_type,
 			     u32 force_fgscan, u32 is_legacy,
@@ -1848,6 +1972,15 @@
 	int num_rates;
 	u32 ratemask;
 
+	if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+		      ar->fw_capabilities)) {
+		return ath6kl_wmi_startscan_cmd(wmi, if_idx,
+						scan_type, force_fgscan,
+						is_legacy, home_dwell_time,
+						force_scan_interval,
+						num_chan, ch_list);
+	}
+
 	size = sizeof(struct wmi_begin_scan_cmd);
 
 	if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
@@ -1900,50 +2033,24 @@
 	return ret;
 }
 
-/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use
- * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P
- * mgmt operations using station interface.
- */
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
-			     enum wmi_scan_type scan_type,
-			     u32 force_fgscan, u32 is_legacy,
-			     u32 home_dwell_time, u32 force_scan_interval,
-			     s8 num_chan, u16 *ch_list)
+int ath6kl_wmi_enable_sched_scan_cmd(struct wmi *wmi, u8 if_idx, bool enable)
 {
 	struct sk_buff *skb;
-	struct wmi_start_scan_cmd *sc;
-	s8 size;
-	int i, ret;
+	struct wmi_enable_sched_scan_cmd *sc;
+	int ret;
 
-	size = sizeof(struct wmi_start_scan_cmd);
-
-	if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
-		return -EINVAL;
-
-	if (num_chan > WMI_MAX_CHANNELS)
-		return -EINVAL;
-
-	if (num_chan)
-		size += sizeof(u16) * (num_chan - 1);
-
-	skb = ath6kl_wmi_get_new_buf(size);
+	skb = ath6kl_wmi_get_new_buf(sizeof(*sc));
 	if (!skb)
 		return -ENOMEM;
 
-	sc = (struct wmi_start_scan_cmd *) skb->data;
-	sc->scan_type = scan_type;
-	sc->force_fg_scan = cpu_to_le32(force_fgscan);
-	sc->is_legacy = cpu_to_le32(is_legacy);
-	sc->home_dwell_time = cpu_to_le32(home_dwell_time);
-	sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
-	sc->num_ch = num_chan;
+	ath6kl_dbg(ATH6KL_DBG_WMI, "%s scheduled scan on vif %d\n",
+		   enable ? "enabling" : "disabling", if_idx);
+	sc = (struct wmi_enable_sched_scan_cmd *) skb->data;
+	sc->enable = enable ? 1 : 0;
 
-	for (i = 0; i < num_chan; i++)
-		sc->ch_list[i] = cpu_to_le16(ch_list[i]);
-
-	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+				  WMI_ENABLE_SCHED_SCAN_CMDID,
 				  NO_SYNC_WMIFLAG);
-
 	return ret;
 }
 
@@ -2275,8 +2382,10 @@
 	struct wmi_data_hdr *data_hdr;
 	int ret;
 
-	if (WARN_ON(skb == NULL || ep_id == wmi->ep_id))
+	if (WARN_ON(skb == NULL || ep_id == wmi->ep_id)) {
+		dev_kfree_skb(skb);
 		return -EINVAL;
+	}
 
 	skb_push(skb, sizeof(struct wmi_data_hdr));
 
@@ -2313,10 +2422,8 @@
 	spin_unlock_bh(&wmi->lock);
 
 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
-	if (!skb) {
-		ret = -ENOMEM;
-		goto free_skb;
-	}
+	if (!skb)
+		return -ENOMEM;
 
 	cmd = (struct wmi_sync_cmd *) skb->data;
 
@@ -2339,7 +2446,7 @@
 	 * then do not send the Synchronize cmd on the control ep
 	 */
 	if (ret)
-		goto free_skb;
+		goto free_cmd_skb;
 
 	/*
 	 * Send sync cmd followed by sync data messages on all
@@ -2349,15 +2456,12 @@
 				  NO_SYNC_WMIFLAG);
 
 	if (ret)
-		goto free_skb;
-
-	/* cmd buffer sent, we no longer own it */
-	skb = NULL;
+		goto free_data_skb;
 
 	for (index = 0; index < num_pri_streams; index++) {
 
 		if (WARN_ON(!data_sync_bufs[index].skb))
-			break;
+			goto free_data_skb;
 
 		ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
 					       data_sync_bufs[index].
@@ -2366,17 +2470,20 @@
 		    ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
 					      ep_id, if_idx);
 
-		if (ret)
-			break;
-
 		data_sync_bufs[index].skb = NULL;
+
+		if (ret)
+			goto free_data_skb;
 	}
 
-free_skb:
+	return 0;
+
+free_cmd_skb:
 	/* free up any resources left over (possibly due to an error) */
 	if (skb)
 		dev_kfree_skb(skb);
 
+free_data_skb:
 	for (index = 0; index < num_pri_streams; index++) {
 		if (data_sync_bufs[index].skb != NULL) {
 			dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].
@@ -2618,11 +2725,13 @@
 {
 	struct sk_buff *skb;
 	int ret, mode, band;
-	u64 mcsrate, ratemask[IEEE80211_NUM_BANDS];
+	u64 mcsrate, ratemask[ATH6KL_NUM_BANDS];
 	struct wmi_set_tx_select_rates64_cmd *cmd;
 
 	memset(&ratemask, 0, sizeof(ratemask));
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+
+	/* only check 2.4 and 5 GHz bands, skip the rest */
+	for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
 		/* copy legacy rate mask */
 		ratemask[band] = mask->control[band].legacy;
 		if (band == IEEE80211_BAND_5GHZ)
@@ -2668,11 +2777,13 @@
 {
 	struct sk_buff *skb;
 	int ret, mode, band;
-	u32 mcsrate, ratemask[IEEE80211_NUM_BANDS];
+	u32 mcsrate, ratemask[ATH6KL_NUM_BANDS];
 	struct wmi_set_tx_select_rates32_cmd *cmd;
 
 	memset(&ratemask, 0, sizeof(ratemask));
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+
+	/* only check 2.4 and 5 GHz bands, skip the rest */
+	for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
 		/* copy legacy rate mask */
 		ratemask[band] = mask->control[band].legacy;
 		if (band == IEEE80211_BAND_5GHZ)
@@ -2716,7 +2827,7 @@
 {
 	struct ath6kl *ar = wmi->parent_dev;
 
-	if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES)
+	if (ar->hw.flags & ATH6KL_HW_64BIT_RATES)
 		return ath6kl_set_bitrate_mask64(wmi, if_idx, mask);
 	else
 		return ath6kl_set_bitrate_mask32(wmi, if_idx, mask);
@@ -3139,12 +3250,40 @@
 	return ret;
 }
 
+int ath6kl_wmi_set_regdomain_cmd(struct wmi *wmi, const char *alpha2)
+{
+	struct sk_buff *skb;
+	struct wmi_set_regdomain_cmd *cmd;
+
+	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_set_regdomain_cmd *) skb->data;
+	memcpy(cmd->iso_name, alpha2, 2);
+
+	return ath6kl_wmi_cmd_send(wmi, 0, skb,
+				   WMI_SET_REGDOMAIN_CMDID,
+				   NO_SYNC_WMIFLAG);
+}
+
 s32 ath6kl_wmi_get_rate(s8 rate_index)
 {
+	u8 sgi = 0;
+
 	if (rate_index == RATE_AUTO)
 		return 0;
 
-	return wmi_rate_tbl[(u32) rate_index][0];
+	/* SGI is stored as the MSB of the rate_index */
+	if (rate_index & RATE_INDEX_MSB) {
+		rate_index &= RATE_INDEX_WITHOUT_SGI_MASK;
+		sgi = 1;
+	}
+
+	if (WARN_ON(rate_index > RATE_MCS_7_40))
+		rate_index = RATE_MCS_7_40;
+
+	return wmi_rate_tbl[(u32) rate_index][sgi];
 }
 
 static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
@@ -3634,6 +3773,19 @@
 				   NO_SYNC_WMIFLAG);
 }
 
+static void ath6kl_wmi_hb_challenge_resp_event(struct wmi *wmi, u8 *datap,
+					       int len)
+{
+	struct wmix_hb_challenge_resp_cmd *cmd;
+
+	if (len < sizeof(struct wmix_hb_challenge_resp_cmd))
+		return;
+
+	cmd = (struct wmix_hb_challenge_resp_cmd *) datap;
+	ath6kl_recovery_hb_event(wmi->parent_dev,
+				 le32_to_cpu(cmd->cookie));
+}
+
 static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
 {
 	struct wmix_cmd_hdr *cmd;
@@ -3658,6 +3810,7 @@
 	switch (id) {
 	case WMIX_HB_CHALLENGE_RESP_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event hb challenge resp\n");
+		ath6kl_wmi_hb_challenge_resp_event(wmi, datap, len);
 		break;
 	case WMIX_DBGLOG_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event dbglog len %d\n", len);
@@ -3750,6 +3903,9 @@
 	case WMI_RX_ACTION_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
 		return ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
+	case WMI_TXE_NOTIFY_EVENTID:
+		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TXE_NOTIFY_EVENTID\n");
+		return ath6kl_wmi_txe_notify_event_rx(wmi, datap, len, vif);
 	default:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", cmd_id);
 		return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 43339ac..98b1755 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -48,7 +48,7 @@
 
 #define A_BAND_24GHZ           0
 #define A_BAND_5GHZ            1
-#define A_NUM_BANDS            2
+#define ATH6KL_NUM_BANDS       2
 
 /* in ms */
 #define WMI_IMPLICIT_PSTREAM_INACTIVITY_INT 5000
@@ -628,6 +628,20 @@
 	WMI_SET_MCASTRATE,
 
 	WMI_STA_BMISS_ENHANCE_CMDID,
+
+	WMI_SET_REGDOMAIN_CMDID,
+
+	WMI_SET_RSSI_FILTER_CMDID,
+
+	WMI_SET_KEEP_ALIVE_EXT,
+
+	WMI_VOICE_DETECTION_ENABLE_CMDID,
+
+	WMI_SET_TXE_NOTIFY_CMDID,
+
+	WMI_SET_RECOVERY_TEST_PARAMETER_CMDID, /*0xf094*/
+
+	WMI_ENABLE_SCHED_SCAN_CMDID,
 };
 
 enum wmi_mgmt_frame_type {
@@ -843,7 +857,7 @@
 	u8 scan_type;
 
 	/* Supported rates to advertise in the probe request frames */
-	struct wmi_supp_rates supp_rates[IEEE80211_NUM_BANDS];
+	struct wmi_supp_rates supp_rates[ATH6KL_NUM_BANDS];
 
 	/* how many channels follow */
 	u8 num_ch;
@@ -941,6 +955,11 @@
 	__le32 max_dfsch_act_time;
 } __packed;
 
+/* WMI_ENABLE_SCHED_SCAN_CMDID */
+struct wmi_enable_sched_scan_cmd {
+	u8 enable;
+} __packed;
+
 /* WMI_SET_BSS_FILTER_CMDID */
 enum wmi_bss_filter {
 	/* no beacons forwarded */
@@ -1032,6 +1051,11 @@
 	u8 enable;
 } __packed;
 
+struct wmi_set_regdomain_cmd {
+	u8 length;
+	u8 iso_name[2];
+} __packed;
+
 /* WMI_SET_POWER_MODE_CMDID */
 enum wmi_power_mode {
 	REC_POWER = 0x01,
@@ -1276,6 +1300,11 @@
 	u8 reserved[3];
 } __packed;
 
+/* Don't report BSSs with signal (RSSI) below this threshold */
+struct wmi_set_rssi_filter_cmd {
+	s8 rssi;
+} __packed;
+
 enum wmi_preamble_policy {
 	WMI_IGNORE_BARKER_IN_ERP = 0,
 	WMI_FOLLOW_BARKER_IN_ERP,
@@ -1455,6 +1484,20 @@
 	WMI_P2P_CAPABILITIES_EVENTID,
 	WMI_RX_ACTION_EVENTID,
 	WMI_P2P_INFO_EVENTID,
+
+	/* WPS Events */
+	WMI_WPS_GET_STATUS_EVENTID,
+	WMI_WPS_PROFILE_EVENTID,
+
+	/* more P2P events */
+	WMI_NOA_INFO_EVENTID,
+	WMI_OPPPS_INFO_EVENTID,
+	WMI_PORT_STATUS_EVENTID,
+
+	/* 802.11w */
+	WMI_GET_RSN_CAP_EVENTID,
+
+	WMI_TXE_NOTIFY_EVENTID,
 };
 
 struct wmi_ready_event_2 {
@@ -1749,6 +1792,9 @@
 	a_sle32 ucast_rate;
 } __packed;
 
+#define RATE_INDEX_WITHOUT_SGI_MASK     0x7f
+#define RATE_INDEX_MSB     0x80
+
 struct tkip_ccmp_stats {
 	__le32 tkip_local_mic_fail;
 	__le32 tkip_cnter_measures_invoked;
@@ -2019,7 +2065,6 @@
 
 #define WOW_MAX_FILTERS_PER_LIST 4
 #define WOW_PATTERN_SIZE	 64
-#define WOW_MASK_SIZE		 64
 
 #define MAC_MAX_FILTERS_PER_LIST 4
 
@@ -2028,7 +2073,7 @@
 	u8 wow_filter_id;
 	u8 wow_filter_size;
 	u8 wow_filter_offset;
-	u8 wow_filter_mask[WOW_MASK_SIZE];
+	u8 wow_filter_mask[WOW_PATTERN_SIZE];
 	u8 wow_filter_pattern[WOW_PATTERN_SIZE];
 } __packed;
 
@@ -2087,6 +2132,19 @@
 	__le16 filter_id;
 } __packed;
 
+/* WMI_SET_TXE_NOTIFY_CMDID */
+struct wmi_txe_notify_cmd {
+	__le32 rate;
+	__le32 pkts;
+	__le32 intvl;
+} __packed;
+
+/* WMI_TXE_NOTIFY_EVENTID */
+struct wmi_txe_notify_event {
+	__le32 rate;
+	__le32 pkts;
+} __packed;
+
 /* WMI_SET_AKMP_PARAMS_CMD */
 
 struct wmi_pmkid {
@@ -2505,11 +2563,6 @@
 int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
 			     u16 channel);
 int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx);
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
-			     enum wmi_scan_type scan_type,
-			     u32 force_fgscan, u32 is_legacy,
-			     u32 home_dwell_time, u32 force_scan_interval,
-			     s8 num_chan, u16 *ch_list);
 
 int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
 			     enum wmi_scan_type scan_type,
@@ -2517,6 +2570,7 @@
 			     u32 home_dwell_time, u32 force_scan_interval,
 			     s8 num_chan, u16 *ch_list, u32 no_cck,
 			     u32 *rates);
+int ath6kl_wmi_enable_sched_scan_cmd(struct wmi *wmi, u8 if_idx, bool enable);
 
 int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec,
 			      u16 fg_end_sec, u16 bg_sec,
@@ -2592,6 +2646,7 @@
 				   const u8 *mask);
 int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
 				   u16 list_id, u16 filter_id);
+int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi);
 int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
 int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period);
 int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
@@ -2600,6 +2655,9 @@
 int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
 					u8 *filter, bool add_filter);
 int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enable);
+int ath6kl_wmi_set_txe_notify(struct wmi *wmi, u8 idx,
+			      u32 rate, u32 pkts, u32 intvl);
+int ath6kl_wmi_set_regdomain_cmd(struct wmi *wmi, const char *alpha2);
 
 /* AP mode uAPSD */
 int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable);
@@ -2658,6 +2716,8 @@
 
 void ath6kl_wmi_sscan_timer(unsigned long ptr);
 
+int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
+
 struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
 void *ath6kl_wmi_init(struct ath6kl *devt);
 void ath6kl_wmi_shutdown(struct wmi *wmi);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 84b558d..8b0d8dc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -276,6 +276,11 @@
 				offset_array[i],
 				REG_READ(ah, offset_array[i]));
 
+			if (AR_SREV_9565(ah) &&
+			    (iCoff == 63 || qCoff == 63 ||
+			     iCoff == -63 || qCoff == -63))
+				return;
+
 			REG_RMW_FIELD(ah, offset_array[i],
 				      AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
 				      iCoff);
@@ -886,6 +891,74 @@
 		      AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
 }
 
+static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
+{
+	int offset[8], total = 0, test;
+	int agc_out, i;
+
+	REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+		      AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0x1);
+	REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+		      AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC, 0x0);
+	if (is_2g)
+		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+			      AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0);
+	else
+		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+			      AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0);
+
+	REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
+		      AR_PHY_65NM_RXTX2_RXON_OVR, 0x1);
+	REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
+		      AR_PHY_65NM_RXTX2_RXON, 0x0);
+
+	REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+		      AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+	REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+		      AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR, 0x1);
+	REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+		      AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1);
+	if (is_2g)
+		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+			      AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0);
+	else
+		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+			      AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0);
+
+	for (i = 6; i > 0; i--) {
+		offset[i] = BIT(i - 1);
+		test = total + offset[i];
+
+		if (is_2g)
+			REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+				      AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+				      test);
+		else
+			REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+				      AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+				      test);
+		udelay(100);
+		agc_out = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+					 AR_PHY_65NM_RXRF_AGC_AGC_OUT);
+		offset[i] = (agc_out) ? 0 : 1;
+		total += (offset[i] << (i - 1));
+	}
+
+	if (is_2g)
+		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+			      AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, total);
+	else
+		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+			      AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, total);
+
+	REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+		      AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0);
+	REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
+		      AR_PHY_65NM_RXTX2_RXON_OVR, 0);
+	REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+		      AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
+}
+
 static bool ar9003_hw_init_cal(struct ath_hw *ah,
 			       struct ath9k_channel *chan)
 {
@@ -984,6 +1057,14 @@
 		status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
 				       AR_PHY_AGC_CONTROL_CAL,
 				       0, AH_WAIT_TIMEOUT);
+		if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+			for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+				if (!(ah->rxchainmask & (1 << i)))
+					continue;
+				ar9003_hw_manual_peak_cal(ah, i,
+							  IS_CHAN_2GHZ(chan));
+			}
+		}
 	}
 
 	if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 5bbe505..c86cb640 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -18,6 +18,7 @@
 #include "hw.h"
 #include "ar9003_phy.h"
 #include "ar9003_eeprom.h"
+#include "ar9003_mci.h"
 
 #define COMP_HDR_LEN 4
 #define COMP_CKSUM_LEN 2
@@ -41,7 +42,6 @@
 static int ar9003_hw_power_interpolate(int32_t x,
 				       int32_t *px, int32_t *py, u_int16_t np);
 
-
 static const struct ar9300_eeprom ar9300_default = {
 	.eepromVersion = 2,
 	.templateVersion = 2,
@@ -2989,7 +2989,7 @@
 	case EEP_PAPRD:
 		if (AR_SREV_9462(ah))
 			return false;
-		if (!ah->config.enable_paprd);
+		if (!ah->config.enable_paprd)
 			return false;
 		return !!(pBase->featureEnable & BIT(5));
 	case EEP_CHAIN_MASK_REDUCE:
@@ -3601,7 +3601,7 @@
 	 *   7:4 R/W  SWITCH_TABLE_COM_SPDT_WLAN_IDLE
 	 * SWITCH_TABLE_COM_SPDT_WLAN_IDLE
 	 */
-	if (AR_SREV_9462_20_OR_LATER(ah)) {
+	if (AR_SREV_9462_20(ah) || AR_SREV_9565(ah)) {
 		value = ar9003_switch_com_spdt_get(ah, is2ghz);
 		REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
 				AR_SWITCH_TABLE_COM_SPDT_ALL, value);
@@ -5037,16 +5037,28 @@
 		case CTL_5GHT20:
 		case CTL_2GHT20:
 			for (i = ALL_TARGET_HT20_0_8_16;
-			     i <= ALL_TARGET_HT20_23; i++)
+			     i <= ALL_TARGET_HT20_23; i++) {
 				pPwrArray[i] = (u8)min((u16)pPwrArray[i],
 						       minCtlPower);
+				if (ath9k_hw_mci_is_enabled(ah))
+					pPwrArray[i] =
+						(u8)min((u16)pPwrArray[i],
+						ar9003_mci_get_max_txpower(ah,
+							pCtlMode[ctlMode]));
+			}
 			break;
 		case CTL_5GHT40:
 		case CTL_2GHT40:
 			for (i = ALL_TARGET_HT40_0_8_16;
-			     i <= ALL_TARGET_HT40_23; i++)
+			     i <= ALL_TARGET_HT40_23; i++) {
 				pPwrArray[i] = (u8)min((u16)pPwrArray[i],
 						       minCtlPower);
+				if (ath9k_hw_mci_is_enabled(ah))
+					pPwrArray[i] =
+						(u8)min((u16)pPwrArray[i],
+						ar9003_mci_get_max_txpower(ah,
+							pCtlMode[ctlMode]));
+			}
 			break;
 		default:
 			break;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 1a36fa2..74fd397 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -35,12 +35,6 @@
  */
 static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 {
-#define AR9462_BB_CTX_COEFJ(x)	\
-		ar9462_##x##_baseband_core_txfir_coeff_japan_2484
-
-#define AR9462_BBC_TXIFR_COEFFJ \
-		ar9462_2p0_baseband_core_txfir_coeff_japan_2484
-
 	if (AR_SREV_9330_11(ah)) {
 		/* mac */
 		INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -70,6 +64,10 @@
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 				ar9331_modes_lowest_ob_db_tx_gain_1p1);
 
+		/* Japan 2484 Mhz CCK */
+		INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+			       ar9331_1p1_baseband_core_txfir_coeff_japan_2484);
+
 		/* additional clock settings */
 		if (ah->is_clk_25mhz)
 			INIT_INI_ARRAY(&ah->iniAdditional,
@@ -106,6 +104,10 @@
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 				ar9331_modes_lowest_ob_db_tx_gain_1p2);
 
+		/* Japan 2484 Mhz CCK */
+		INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+			       ar9331_1p2_baseband_core_txfir_coeff_japan_2484);
+
 		/* additional clock settings */
 		if (ah->is_clk_25mhz)
 			INIT_INI_ARRAY(&ah->iniAdditional,
@@ -180,6 +182,10 @@
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 				ar9485_modes_lowest_ob_db_tx_gain_1_1);
 
+		/* Japan 2484 Mhz CCK */
+		INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+			       ar9485_1_1_baseband_core_txfir_coeff_japan_2484);
+
 		/* Load PCIE SERDES settings from INI */
 
 		/* Awake Setting */
@@ -219,19 +225,17 @@
 
 		/* Awake -> Sleep Setting */
 		INIT_INI_ARRAY(&ah->iniPcieSerdes,
-			       ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
+			       ar9462_pciephy_clkreq_disable_L1_2p0);
 		/* Sleep -> Awake Setting */
 		INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-			       ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
+			       ar9462_pciephy_clkreq_disable_L1_2p0);
 
 		/* Fast clock modal settings */
 		INIT_INI_ARRAY(&ah->iniModesFastClock,
 				ar9462_modes_fast_clock_2p0);
 
 		INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
-				AR9462_BB_CTX_COEFJ(2p0));
-
-		INIT_INI_ARRAY(&ah->ini_japan2484, AR9462_BBC_TXIFR_COEFFJ);
+			       ar9462_2p0_baseband_core_txfir_coeff_japan_2484);
 	} else if (AR_SREV_9550(ah)) {
 		/* mac */
 		INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -328,9 +332,9 @@
 			       ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
 
 		INIT_INI_ARRAY(&ah->iniPcieSerdes,
-			       ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
+			       ar9565_1p0_pciephy_clkreq_disable_L1);
 		INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-			       ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
+			       ar9565_1p0_pciephy_clkreq_disable_L1);
 
 		INIT_INI_ARRAY(&ah->iniModesFastClock,
 				ar9565_1p0_modes_fast_clock);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 44c202c..8dd0692 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -714,7 +714,6 @@
 
 	return true;
 }
-EXPORT_SYMBOL(ar9003_mci_start_reset);
 
 int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 			 struct ath9k_hw_cal_data *caldata)
@@ -750,6 +749,9 @@
 
 	mci_hw->bt_state = MCI_BT_AWAKE;
 
+	REG_CLR_BIT(ah, AR_PHY_TIMING4,
+		    1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
+
 	if (caldata) {
 		caldata->done_txiqcal_once = false;
 		caldata->done_txclcal_once = false;
@@ -759,6 +761,9 @@
 	if (!ath9k_hw_init_cal(ah, chan))
 		return -EIO;
 
+	REG_SET_BIT(ah, AR_PHY_TIMING4,
+		    1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
+
 exit:
 	ar9003_mci_enable_interrupt(ah);
 	return 0;
@@ -799,6 +804,9 @@
 	REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
 		      AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
 
+	if (AR_SREV_9565(ah))
+		REG_RMW_FIELD(ah, AR_MCI_MISC, AR_MCI_MISC_HW_FIX_EN, 1);
+
 	if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
 		thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
 		REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
@@ -818,7 +826,7 @@
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
-	u32 regval;
+	u32 regval, i;
 
 	ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n",
 		is_full_sleep, is_2g);
@@ -847,11 +855,18 @@
 		 SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
 		 SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
 		 SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
-		 SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
-		 SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
 		 SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
 		 SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
 		 SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+	if (AR_SREV_9565(ah)) {
+		regval |= SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+			  SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
+		REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+			      AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
+	} else {
+		regval |= SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+			  SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
+	}
 
 	REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
 
@@ -865,9 +880,24 @@
 	REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
 		      AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
 
-	REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
+	REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 0);
 	REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
 
+	/* Set the time out to 3.125ms (5 BT slots) */
+	REG_RMW_FIELD(ah, AR_BTCOEX_WL_LNA, AR_BTCOEX_WL_LNA_TIMEOUT, 0x3D090);
+
+	/* concurrent tx priority */
+	if (mci->config & ATH_MCI_CONFIG_CONCUR_TX) {
+		REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+			      AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE, 0);
+		REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+			      AR_BTCOEX_CTRL2_TXPWR_THRESH, 0x7f);
+		REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+			      AR_BTCOEX_CTRL_REDUCE_TXPWR, 0);
+		for (i = 0; i < 8; i++)
+			REG_WRITE(ah, AR_BTCOEX_MAX_TXPWR(i), 0x7f7f7f7f);
+	}
+
 	regval = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
 	REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, regval);
 	REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
@@ -910,6 +940,9 @@
 	mci->ready = true;
 	ar9003_mci_prep_interface(ah);
 
+	if (AR_SREV_9565(ah))
+		REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+			      AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
 	if (en_int)
 		ar9003_mci_enable_interrupt(ah);
 
@@ -1028,7 +1061,9 @@
 
 		if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
 			ar9003_mci_osla_setup(ah, true);
-		REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
+
+		if (AR_SREV_9462(ah))
+			REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
 	} else {
 		ar9003_mci_send_lna_take(ah, true);
 		udelay(5);
@@ -1170,7 +1205,7 @@
 u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
 {
 	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
-	u32 value = 0;
+	u32 value = 0, tsf;
 	u8 query_type;
 
 	switch (state_type) {
@@ -1228,6 +1263,14 @@
 		ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
 		break;
 	case MCI_STATE_RECOVER_RX:
+		tsf = ath9k_hw_gettsf32(ah);
+		if ((tsf - mci->last_recovery) <= MCI_RECOVERY_DUR_TSF) {
+			ath_dbg(ath9k_hw_common(ah), MCI,
+				"(MCI) ignore Rx recovery\n");
+			break;
+		}
+		ath_dbg(ath9k_hw_common(ah), MCI, "(MCI) RECOVER RX\n");
+		mci->last_recovery = tsf;
 		ar9003_mci_prep_interface(ah);
 		mci->query_bt = true;
 		mci->need_flush_btinfo = true;
@@ -1426,3 +1469,17 @@
 	ar9003_mci_send_coex_wlan_channels(ah, true);
 }
 EXPORT_SYMBOL(ar9003_mci_send_wlan_channels);
+
+u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode)
+{
+	if (!ah->btcoex_hw.mci.concur_tx)
+		goto out;
+
+	if (ctlmode == CTL_2GHT20)
+		return ATH_BTCOEX_HT20_MAX_TXPOWER;
+	else if (ctlmode == CTL_2GHT40)
+		return ATH_BTCOEX_HT40_MAX_TXPOWER;
+
+out:
+	return -1;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index 2a2d018..66d7ab9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -18,6 +18,7 @@
 #define AR9003_MCI_H
 
 #define MCI_FLAG_DISABLE_TIMESTAMP      0x00000001      /* Disable time stamp */
+#define MCI_RECOVERY_DUR_TSF		(100 * 1000)    /* 100 ms */
 
 /* Default remote BT device MCI COEX version */
 #define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT  3
@@ -125,6 +126,7 @@
 	MCI_GPM_COEX_PROFILE_HID,
 	MCI_GPM_COEX_PROFILE_BNEP,
 	MCI_GPM_COEX_PROFILE_VOICE,
+	MCI_GPM_COEX_PROFILE_A2DPVO,
 	MCI_GPM_COEX_PROFILE_MAX
 };
 
@@ -196,7 +198,6 @@
 	MCI_STATE_SEND_WLAN_COEX_VERSION,
 	MCI_STATE_SEND_VERSION_QUERY,
 	MCI_STATE_SEND_STATUS_QUERY,
-	MCI_STATE_SET_CONCUR_TX_PRI,
 	MCI_STATE_RECOVER_RX,
 	MCI_STATE_NEED_FTP_STOMP,
 	MCI_STATE_DEBUG,
@@ -278,6 +279,7 @@
 void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
 void ar9003_mci_set_power_awake(struct ath_hw *ah);
 void ar9003_mci_check_gpm_offset(struct ath_hw *ah);
+u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode);
 
 #else
 
@@ -324,6 +326,10 @@
 static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
 {
 }
+static inline u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode)
+{
+	return -1;
+}
 #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
 
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 759f5f5..ce19c09 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -784,7 +784,7 @@
 	REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
 
 	if (chan->channel == 2484)
-		ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
+		ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
 
 	if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
 		REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 9a48e3d..4c3d06d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -32,6 +32,7 @@
 #define AR_PHY_SPUR_REG     (AR_CHAN_BASE + 0x1c)
 #define AR_PHY_RX_IQCAL_CORR_B0    (AR_CHAN_BASE + 0xdc)
 #define AR_PHY_TX_IQCAL_CONTROL_3  (AR_CHAN_BASE + 0xb0)
+#define AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT 16
 
 #define AR_PHY_TIMING11_SPUR_FREQ_SD    0x3FF00000
 #define AR_PHY_TIMING11_SPUR_FREQ_SD_S  20
@@ -697,13 +698,6 @@
 #define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT   0x0000ff00
 #define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8
 
-#define AR_PHY_65NM_CH0_RXTX1       0x16100
-#define AR_PHY_65NM_CH0_RXTX2       0x16104
-#define AR_PHY_65NM_CH1_RXTX1       0x16500
-#define AR_PHY_65NM_CH1_RXTX2       0x16504
-#define AR_PHY_65NM_CH2_RXTX1       0x16900
-#define AR_PHY_65NM_CH2_RXTX2       0x16904
-
 #define AR_CH0_TOP2		(AR_SREV_9300(ah) ? 0x1628c : \
 					(AR_SREV_9462(ah) ? 0x16290 : 0x16284))
 #define AR_CH0_TOP2_XPABIASLVL		0xf000
@@ -1285,4 +1279,43 @@
 #define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD          0xFC000000
 #define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD_S        26
 
+/* Manual Peak detector calibration */
+#define AR_PHY_65NM_BASE                               0x16000
+#define AR_PHY_65NM_RXRF_GAINSTAGES(i)                 (AR_PHY_65NM_BASE + \
+							(i * 0x400) + 0x8)
+#define AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE        0x80000000
+#define AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE_S      31
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC        0x00000002
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC_S      1
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR     0x70000000
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_S   28
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR     0x03800000
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_S   23
+
+#define AR_PHY_65NM_RXTX2(i)                           (AR_PHY_65NM_BASE + \
+							(i * 0x400) + 0x104)
+#define AR_PHY_65NM_RXTX2_RXON_OVR                     0x00001000
+#define AR_PHY_65NM_RXTX2_RXON_OVR_S                   12
+#define AR_PHY_65NM_RXTX2_RXON                         0x00000800
+#define AR_PHY_65NM_RXTX2_RXON_S                       11
+
+#define AR_PHY_65NM_RXRF_AGC(i)                        (AR_PHY_65NM_BASE + \
+							(i * 0x400) + 0xc)
+#define AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE              0x80000000
+#define AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE_S            31
+#define AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR                0x40000000
+#define AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR_S              30
+#define AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR               0x20000000
+#define AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR_S             29
+#define AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR           0x1E000000
+#define AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR_S         25
+#define AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR           0x00078000
+#define AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR_S         15
+#define AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR          0x01F80000
+#define AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR_S        19
+#define AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR          0x00007e00
+#define AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR_S        9
+#define AR_PHY_65NM_RXRF_AGC_AGC_OUT                   0x00000004
+#define AR_PHY_65NM_RXRF_AGC_AGC_OUT_S                 2
+
 #endif  /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 58f30f6..ccc42a7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -78,7 +78,7 @@
 	{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
 	{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
 	{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+	{0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
 	{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
 	{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index fb4497f..a3710f3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -18,7 +18,7 @@
 #ifndef INITVALS_9485_H
 #define INITVALS_9485_H
 
-/* AR9485 1.0 */
+/* AR9485 1.1 */
 
 #define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
 
@@ -31,6 +31,11 @@
 
 static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
 	/* Addr      allmodes  */
+	{0x00009e00, 0x037216a0},
+	{0x00009e04, 0x00182020},
+	{0x00009e18, 0x00000000},
+	{0x00009e2c, 0x00004121},
+	{0x00009e44, 0x02282324},
 	{0x0000a000, 0x00060005},
 	{0x0000a004, 0x00810080},
 	{0x0000a008, 0x00830082},
@@ -164,6 +169,11 @@
 static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+	{0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
 	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
 	{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
@@ -198,6 +208,22 @@
 	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
 	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
 	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+	{0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+	{0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+	{0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+	{0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+	{0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
 	{0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -234,9 +260,193 @@
 	{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
 };
 
-#define ar9485Modes_high_ob_db_tx_gain_1_1 ar9485Modes_high_power_tx_gain_1_1
+static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+	{0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84},
+	{0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000},
+	{0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000},
+	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+	{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+	{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+	{0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+	{0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+	{0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+	{0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+	{0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+	{0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+	{0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+	{0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+	{0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+	{0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+	{0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+	{0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+	{0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+	{0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+	{0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+	{0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+	{0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+	{0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+	{0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+	{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+	{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+	{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+	{0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+	{0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+	{0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+	{0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+	{0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+	{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
 
-#define ar9485Modes_low_ob_db_tx_gain_1_1 ar9485Modes_high_ob_db_tx_gain_1_1
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+	{0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+	{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+	{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+	{0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+	{0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+	{0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+	{0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+	{0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+	{0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+	{0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+	{0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+	{0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+	{0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+	{0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+	{0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+	{0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+	{0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+	{0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+	{0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+	{0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+	{0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+	{0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+	{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+	{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+	{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+	{0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+	{0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+	{0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+	{0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+	{0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+	{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
 
 #define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
 
@@ -245,19 +455,19 @@
 	{0x0000a580, 0x00000000},
 	{0x0000a584, 0x00000000},
 	{0x0000a588, 0x00000000},
-	{0x0000a58c, 0x00000000},
-	{0x0000a590, 0x00000000},
-	{0x0000a594, 0x00000000},
-	{0x0000a598, 0x00000000},
-	{0x0000a59c, 0x00000000},
-	{0x0000a5a0, 0x00000000},
-	{0x0000a5a4, 0x00000000},
-	{0x0000a5a8, 0x00000000},
-	{0x0000a5ac, 0x00000000},
-	{0x0000a5b0, 0x00000000},
-	{0x0000a5b4, 0x00000000},
-	{0x0000a5b8, 0x00000000},
-	{0x0000a5bc, 0x00000000},
+	{0x0000a58c, 0x01804000},
+	{0x0000a590, 0x02808a02},
+	{0x0000a594, 0x0340ca02},
+	{0x0000a598, 0x0340cd03},
+	{0x0000a59c, 0x0340cd03},
+	{0x0000a5a0, 0x06415304},
+	{0x0000a5a4, 0x04c11905},
+	{0x0000a5a8, 0x06415905},
+	{0x0000a5ac, 0x06415905},
+	{0x0000a5b0, 0x06415905},
+	{0x0000a5b4, 0x06415905},
+	{0x0000a5b8, 0x06415905},
+	{0x0000a5bc, 0x06415905},
 };
 
 static const u32 ar9485_1_1_radio_core[][2] = {
@@ -340,7 +550,7 @@
 	{0x00009880, 0x201fff00},
 	{0x00009884, 0x00001042},
 	{0x000098a4, 0x00200400},
-	{0x000098b0, 0x52440bbe},
+	{0x000098b0, 0x32840bbe},
 	{0x000098d0, 0x004b6a8e},
 	{0x000098d4, 0x00000820},
 	{0x000098dc, 0x00000000},
@@ -362,7 +572,7 @@
 	{0x00009d18, 0x00000000},
 	{0x00009d1c, 0x00000000},
 	{0x00009e08, 0x0038233c},
-	{0x00009e24, 0x9927b515},
+	{0x00009e24, 0x992bb515},
 	{0x00009e28, 0x12ef0200},
 	{0x00009e30, 0x06336f77},
 	{0x00009e34, 0x6af6532f},
@@ -427,7 +637,7 @@
 	{0x0000a408, 0x0e79e5c6},
 	{0x0000a40c, 0x00820820},
 	{0x0000a414, 0x1ce739cf},
-	{0x0000a418, 0x2d0019ce},
+	{0x0000a418, 0x2d0021ce},
 	{0x0000a41c, 0x1ce739ce},
 	{0x0000a420, 0x000001ce},
 	{0x0000a424, 0x1ce739ce},
@@ -443,8 +653,8 @@
 	{0x0000a44c, 0x00000001},
 	{0x0000a450, 0x00010000},
 	{0x0000a5c4, 0xbfad9d74},
-	{0x0000a5c8, 0x0048060a},
-	{0x0000a5cc, 0x00000637},
+	{0x0000a5c8, 0x00480605},
+	{0x0000a5cc, 0x00002e37},
 	{0x0000a760, 0x03020100},
 	{0x0000a764, 0x09080504},
 	{0x0000a768, 0x0d0c0b0a},
@@ -464,17 +674,22 @@
 
 static const u32 ar9485_common_rx_gain_1_1[][2] = {
 	/* Addr      allmodes  */
-	{0x0000a000, 0x00010000},
-	{0x0000a004, 0x00030002},
-	{0x0000a008, 0x00050004},
-	{0x0000a00c, 0x00810080},
-	{0x0000a010, 0x01800082},
-	{0x0000a014, 0x01820181},
-	{0x0000a018, 0x01840183},
-	{0x0000a01c, 0x01880185},
-	{0x0000a020, 0x018a0189},
-	{0x0000a024, 0x02850284},
-	{0x0000a028, 0x02890288},
+	{0x00009e00, 0x03721b20},
+	{0x00009e04, 0x00082020},
+	{0x00009e18, 0x0300501e},
+	{0x00009e2c, 0x00002e21},
+	{0x00009e44, 0x02182324},
+	{0x0000a000, 0x00060005},
+	{0x0000a004, 0x00810080},
+	{0x0000a008, 0x00830082},
+	{0x0000a00c, 0x00850084},
+	{0x0000a010, 0x01820181},
+	{0x0000a014, 0x01840183},
+	{0x0000a018, 0x01880185},
+	{0x0000a01c, 0x018a0189},
+	{0x0000a020, 0x02850284},
+	{0x0000a024, 0x02890288},
+	{0x0000a028, 0x028b028a},
 	{0x0000a02c, 0x03850384},
 	{0x0000a030, 0x03890388},
 	{0x0000a034, 0x038b038a},
@@ -496,15 +711,15 @@
 	{0x0000a074, 0x00000000},
 	{0x0000a078, 0x00000000},
 	{0x0000a07c, 0x00000000},
-	{0x0000a080, 0x28282828},
-	{0x0000a084, 0x28282828},
-	{0x0000a088, 0x28282828},
-	{0x0000a08c, 0x28282828},
-	{0x0000a090, 0x28282828},
-	{0x0000a094, 0x21212128},
-	{0x0000a098, 0x171c1c1c},
-	{0x0000a09c, 0x02020212},
-	{0x0000a0a0, 0x00000202},
+	{0x0000a080, 0x18181818},
+	{0x0000a084, 0x18181818},
+	{0x0000a088, 0x18181818},
+	{0x0000a08c, 0x18181818},
+	{0x0000a090, 0x18181818},
+	{0x0000a094, 0x18181818},
+	{0x0000a098, 0x17181818},
+	{0x0000a09c, 0x02020b0b},
+	{0x0000a0a0, 0x02020202},
 	{0x0000a0a4, 0x00000000},
 	{0x0000a0a8, 0x00000000},
 	{0x0000a0ac, 0x00000000},
@@ -512,22 +727,22 @@
 	{0x0000a0b4, 0x00000000},
 	{0x0000a0b8, 0x00000000},
 	{0x0000a0bc, 0x00000000},
-	{0x0000a0c0, 0x001f0000},
-	{0x0000a0c4, 0x111f1100},
-	{0x0000a0c8, 0x111d111e},
-	{0x0000a0cc, 0x111b111c},
-	{0x0000a0d0, 0x22032204},
-	{0x0000a0d4, 0x22012202},
-	{0x0000a0d8, 0x221f2200},
-	{0x0000a0dc, 0x221d221e},
-	{0x0000a0e0, 0x33013302},
-	{0x0000a0e4, 0x331f3300},
-	{0x0000a0e8, 0x4402331e},
-	{0x0000a0ec, 0x44004401},
-	{0x0000a0f0, 0x441e441f},
-	{0x0000a0f4, 0x55015502},
-	{0x0000a0f8, 0x551f5500},
-	{0x0000a0fc, 0x6602551e},
+	{0x0000a0c0, 0x22072208},
+	{0x0000a0c4, 0x22052206},
+	{0x0000a0c8, 0x22032204},
+	{0x0000a0cc, 0x22012202},
+	{0x0000a0d0, 0x221f2200},
+	{0x0000a0d4, 0x221d221e},
+	{0x0000a0d8, 0x33023303},
+	{0x0000a0dc, 0x33003301},
+	{0x0000a0e0, 0x331e331f},
+	{0x0000a0e4, 0x4402331d},
+	{0x0000a0e8, 0x44004401},
+	{0x0000a0ec, 0x441e441f},
+	{0x0000a0f0, 0x55025503},
+	{0x0000a0f4, 0x55005501},
+	{0x0000a0f8, 0x551e551f},
+	{0x0000a0fc, 0x6602551d},
 	{0x0000a100, 0x66006601},
 	{0x0000a104, 0x661e661f},
 	{0x0000a108, 0x7703661d},
@@ -636,17 +851,12 @@
 	{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
 	{0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
 	{0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
-	{0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
-	{0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
 	{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
 	{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
-	{0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
-	{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
-	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
 	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
-	{0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
 	{0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
@@ -850,4 +1060,6 @@
 	{0x000083d0, 0x000301ff},
 };
 
+#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+
 #endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index 843e79f..0c2ac0c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -768,9 +768,9 @@
 	{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 };
 
-static const u32 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1[][2] = {
+static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = {
 	/* Addr      allmodes  */
-	{0x00018c00, 0x18212ede},
+	{0x00018c00, 0x18213ede},
 	{0x00018c04, 0x000801d8},
 	{0x00018c08, 0x0003780c},
 };
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index dfe6a47..80bab1b 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -129,10 +129,10 @@
 #define ATH_TXMAXTRY            13
 
 #define TID_TO_WME_AC(_tid)				\
-	((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE :	\
-	 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK :	\
-	 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI :	\
-	 WME_AC_VO)
+	((((_tid) == 0) || ((_tid) == 3)) ? IEEE80211_AC_BE :	\
+	 (((_tid) == 1) || ((_tid) == 2)) ? IEEE80211_AC_BK :	\
+	 (((_tid) == 4) || ((_tid) == 5)) ? IEEE80211_AC_VI :	\
+	 IEEE80211_AC_VO)
 
 #define ATH_AGGR_DELIM_SZ          4
 #define ATH_AGGR_MINPLEN           256 /* in bytes, minimum packet length */
@@ -259,13 +259,10 @@
 };
 
 struct ath_node {
-#ifdef CONFIG_ATH9K_DEBUGFS
-	struct list_head list; /* for sc->nodes */
-#endif
 	struct ieee80211_sta *sta; /* station struct we're part of */
 	struct ieee80211_vif *vif; /* interface with which we're associated */
 	struct ath_atx_tid tid[WME_NUM_TID];
-	struct ath_atx_ac ac[WME_NUM_AC];
+	struct ath_atx_ac ac[IEEE80211_NUM_ACS];
 	int ps_key;
 
 	u16 maxampdu;
@@ -299,9 +296,9 @@
 	struct list_head txbuf;
 	struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
 	struct ath_descdma txdma;
-	struct ath_txq *txq_map[WME_NUM_AC];
-	u32 txq_max_pending[WME_NUM_AC];
-	u16 max_aggr_framelen[WME_NUM_AC][4][32];
+	struct ath_txq *txq_map[IEEE80211_NUM_ACS];
+	u32 txq_max_pending[IEEE80211_NUM_ACS];
+	u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
 };
 
 struct ath_rx_edma {
@@ -437,6 +434,7 @@
 #define ATH_LONG_CALINTERVAL_INT  1000    /* 1000 ms */
 #define ATH_LONG_CALINTERVAL      30000   /* 30 seconds */
 #define ATH_RESTART_CALINTERVAL   1200000 /* 20 minutes */
+#define ATH_ANI_MAX_SKIP_COUNT  10
 
 #define ATH_PAPRD_TIMEOUT	100 /* msecs */
 #define ATH_PLL_WORK_INTERVAL   100
@@ -460,6 +458,12 @@
 /* BTCOEX */
 /**********/
 
+#define ATH_DUMP_BTCOEX(_s, _val)				\
+	do {							\
+		len += snprintf(buf + len, size - len,		\
+				"%20s : %10d\n", _s, (_val));	\
+	} while (0)
+
 enum bt_op_flags {
 	BT_OP_PRIORITY_DETECTED,
 	BT_OP_SCAN,
@@ -478,8 +482,10 @@
 	u32 btscan_no_stomp; /* in usec */
 	u32 duty_cycle;
 	u32 bt_wait_time;
+	int rssi_count;
 	struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
 	struct ath_mci_profile mci;
+	u8 stomp_audio;
 };
 
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
@@ -492,6 +498,7 @@
 void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status);
 u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen);
 void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc);
+int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size);
 #else
 static inline int ath9k_init_btcoex(struct ath_softc *sc)
 {
@@ -518,6 +525,10 @@
 static inline void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
 {
 }
+static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
+{
+	return 0;
+}
 #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
 
 struct ath9k_wow_pattern {
@@ -642,6 +653,7 @@
 #define PS_WAIT_FOR_PSPOLL_DATA   BIT(2)
 #define PS_WAIT_FOR_TX_ACK        BIT(3)
 #define PS_BEACON_SYNC            BIT(4)
+#define PS_WAIT_FOR_ANI           BIT(5)
 
 struct ath_rate_table;
 
@@ -708,9 +720,6 @@
 
 #ifdef CONFIG_ATH9K_DEBUGFS
 	struct ath9k_debug debug;
-	spinlock_t nodes_lock;
-	struct list_head nodes; /* basically, stations */
-	unsigned int tx_complete_poll_work_seen;
 #endif
 	struct ath_beacon_config cur_beacon_conf;
 	struct delayed_work tx_complete_work;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1b48414..531fffd 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -46,7 +46,7 @@
 		qi.tqi_cwmax = 0;
 	} else {
 		/* Adhoc mode; important thing is to use 2x cwmin. */
-		txq = sc->tx.txq_map[WME_AC_BE];
+		txq = sc->tx.txq_map[IEEE80211_AC_BE];
 		ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
 		qi.tqi_aifs = qi_be.tqi_aifs;
 		if (ah->slottime == ATH9K_SLOT_TIME_20)
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 419e9a3..9963b0b 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -49,6 +49,7 @@
 	{ 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
 	{ 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
 	{ 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */
+	{ 0xffffff01, 0xffffffff, 0xffffff01, 0xffffffff }, /* STOMP_AUDIO */
 };
 
 void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
@@ -195,7 +196,7 @@
 	ah->btcoex_hw.mci.need_flush_btinfo = false;
 	ah->btcoex_hw.mci.wlan_cal_seq = 0;
 	ah->btcoex_hw.mci.wlan_cal_done = 0;
-	ah->btcoex_hw.mci.config = 0x2201;
+	ah->btcoex_hw.mci.config = (AR_SREV_9462(ah)) ? 0x2201 : 0xa4c1;
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_init_mci);
 
@@ -218,27 +219,45 @@
 				enum ath_stomp_type stomp_type)
 {
 	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+	struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+	u8 txprio_shift[] = { 24, 16, 16, 0 }; /* tx priority weight */
+	bool concur_tx = (mci_hw->concur_tx && btcoex_hw->tx_prio[stomp_type]);
+	const u32 *weight = ar9003_wlan_weights[stomp_type];
+	int i;
 
-	if (AR_SREV_9300_20_OR_LATER(ah)) {
-		const u32 *weight = ar9003_wlan_weights[stomp_type];
-		int i;
-
-		if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
-			if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
-			    btcoex_hw->mci.stomp_ftp)
-				stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
-			weight = mci_wlan_weights[stomp_type];
-		}
-
-		for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
-			btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
-			btcoex_hw->wlan_weight[i] = weight[i];
-		}
-	} else {
+	if (!AR_SREV_9300_20_OR_LATER(ah)) {
 		btcoex_hw->bt_coex_weights =
 			SM(bt_weight, AR_BTCOEX_BT_WGHT) |
 			SM(wlan_weight, AR_BTCOEX_WL_WGHT);
+		return;
 	}
+
+	if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+		enum ath_stomp_type stype =
+			((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
+			 btcoex_hw->mci.stomp_ftp) ?
+			ATH_BTCOEX_STOMP_LOW_FTP : stomp_type;
+		weight = mci_wlan_weights[stype];
+	}
+
+	for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
+		btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
+		btcoex_hw->wlan_weight[i] = weight[i];
+		if (concur_tx && i) {
+			btcoex_hw->wlan_weight[i] &=
+				~(0xff << txprio_shift[i-1]);
+			btcoex_hw->wlan_weight[i] |=
+				(btcoex_hw->tx_prio[stomp_type] <<
+				 txprio_shift[i-1]);
+		}
+	}
+	/* Last WLAN weight has to be adjusted wrt tx priority */
+	if (concur_tx) {
+		btcoex_hw->wlan_weight[i-1] &= ~(0xff << txprio_shift[i-1]);
+		btcoex_hw->wlan_weight[i-1] |= (btcoex_hw->tx_prio[stomp_type]
+						      << txprio_shift[i-1]);
+	}
+
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
 
@@ -385,3 +404,13 @@
 	}
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);
+
+void ath9k_hw_btcoex_set_concur_txprio(struct ath_hw *ah, u8 *stomp_txprio)
+{
+	struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+	int i;
+
+	for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
+		btcoex->tx_prio[i] = stomp_txprio[i];
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_set_concur_txprio);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 385197ad7..6de26ea 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -39,6 +39,9 @@
 #define ATH_BTCOEX_RX_WAIT_TIME       100
 #define ATH_BTCOEX_STOMP_FTP_THRESH   5
 
+#define ATH_BTCOEX_HT20_MAX_TXPOWER   0x14
+#define ATH_BTCOEX_HT40_MAX_TXPOWER   0x10
+
 #define AR9300_NUM_BT_WEIGHTS   4
 #define AR9300_NUM_WLAN_WEIGHTS 4
 /* Defines the BT AR_BT_COEX_WGHT used */
@@ -47,6 +50,7 @@
 	ATH_BTCOEX_STOMP_LOW,
 	ATH_BTCOEX_STOMP_NONE,
 	ATH_BTCOEX_STOMP_LOW_FTP,
+	ATH_BTCOEX_STOMP_AUDIO,
 	ATH_BTCOEX_STOMP_MAX
 };
 
@@ -84,6 +88,8 @@
 	u8 bt_ver_minor;
 	u8 bt_state;
 	u8 stomp_ftp;
+	bool concur_tx;
+	u32 last_recovery;
 };
 
 struct ath_btcoex_hw {
@@ -98,6 +104,7 @@
 	u32 bt_coex_mode2; 	/* Register setting for AR_BT_COEX_MODE2 */
 	u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
 	u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
+	u8 tx_prio[ATH_BTCOEX_STOMP_MAX];
 };
 
 void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah);
@@ -112,5 +119,6 @@
 void ath9k_hw_btcoex_disable(struct ath_hw *ah);
 void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
 			      enum ath_stomp_type stomp_type);
+void ath9k_hw_btcoex_set_concur_txprio(struct ath_hw *ah, u8 *stomp_txprio);
 
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index e5cceb0..f3448a0 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -410,6 +410,7 @@
 
 	ah->caldata->channel = chan->channel;
 	ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
+	ah->caldata->chanmode = chan->chanmode;
 	h = ah->caldata->nfCalHist;
 	default_nf = ath9k_hw_get_default_nf(ah, chan);
 	for (i = 0; i < NUM_NF_READINGS; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index ad14fec..76b5439 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -28,13 +28,6 @@
 #define WME_MAX_BA              WME_BA_BMP_SIZE
 #define ATH_TID_MAX_BUFS        (2 * WME_MAX_BA)
 
-/* These must match mac80211 skb queue mapping numbers */
-#define WME_AC_VO   0
-#define WME_AC_VI   1
-#define WME_AC_BE   2
-#define WME_AC_BK   3
-#define WME_NUM_AC  4
-
 #define ATH_RSSI_DUMMY_MARKER   0x127
 #define ATH_RSSI_LPF_LEN 		10
 #define RSSI_LPF_THRESHOLD		-20
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 6727b56..939308c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -512,62 +512,19 @@
 	.llseek = default_llseek,
 };
 
-#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
-#define PR(str, elem)							\
-	do {								\
-		len += snprintf(buf + len, size - len,			\
-				"%s%13u%11u%10u%10u\n", str,		\
-		sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem, \
-		sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem, \
-		sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem, \
-		sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem); \
-		if (len >= size)			  \
-			goto done;			  \
-} while(0)
-
-#define PRX(str, elem)							\
-do {									\
-	len += snprintf(buf + len, size - len,				\
-			"%s%13u%11u%10u%10u\n", str,			\
-			(unsigned int)(sc->tx.txq_map[WME_AC_BE]->elem),	\
-			(unsigned int)(sc->tx.txq_map[WME_AC_BK]->elem),	\
-			(unsigned int)(sc->tx.txq_map[WME_AC_VI]->elem),	\
-			(unsigned int)(sc->tx.txq_map[WME_AC_VO]->elem));	\
-	if (len >= size)						\
-		goto done;						\
-} while(0)
-
-#define PRQLE(str, elem)						\
-do {									\
-	len += snprintf(buf + len, size - len,				\
-			"%s%13i%11i%10i%10i\n", str,			\
-			list_empty(&sc->tx.txq_map[WME_AC_BE]->elem),	\
-			list_empty(&sc->tx.txq_map[WME_AC_BK]->elem),	\
-			list_empty(&sc->tx.txq_map[WME_AC_VI]->elem),	\
-			list_empty(&sc->tx.txq_map[WME_AC_VO]->elem));	\
-	if (len >= size)						\
-		goto done;						\
-} while (0)
-
 static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
 			      size_t count, loff_t *ppos)
 {
 	struct ath_softc *sc = file->private_data;
 	char *buf;
-	unsigned int len = 0, size = 8000;
-	int i;
+	unsigned int len = 0, size = 2048;
 	ssize_t retval = 0;
-	char tmp[32];
 
 	buf = kzalloc(size, GFP_KERNEL);
 	if (buf == NULL)
 		return -ENOMEM;
 
-	len += sprintf(buf, "Num-Tx-Queues: %i  tx-queues-setup: 0x%x"
-		       " poll-work-seen: %u\n"
-		       "%30s %10s%10s%10s\n\n",
-		       ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup,
-		       sc->tx_complete_poll_work_seen,
+	len += sprintf(buf, "%30s %10s%10s%10s\n\n",
 		       "BE", "BK", "VI", "VO");
 
 	PR("MPDUs Queued:    ", queued);
@@ -587,62 +544,11 @@
 	PR("DELIM Underrun:  ", delim_underrun);
 	PR("TX-Pkts-All:     ", tx_pkts_all);
 	PR("TX-Bytes-All:    ", tx_bytes_all);
-	PR("hw-put-tx-buf:   ", puttxbuf);
-	PR("hw-tx-start:     ", txstart);
-	PR("hw-tx-proc-desc: ", txprocdesc);
+	PR("HW-put-tx-buf:   ", puttxbuf);
+	PR("HW-tx-start:     ", txstart);
+	PR("HW-tx-proc-desc: ", txprocdesc);
 	PR("TX-Failed:       ", txfailed);
-	len += snprintf(buf + len, size - len,
-			"%s%11p%11p%10p%10p\n", "txq-memory-address:",
-			sc->tx.txq_map[WME_AC_BE],
-			sc->tx.txq_map[WME_AC_BK],
-			sc->tx.txq_map[WME_AC_VI],
-			sc->tx.txq_map[WME_AC_VO]);
-	if (len >= size)
-		goto done;
 
-	PRX("axq-qnum:        ", axq_qnum);
-	PRX("axq-depth:       ", axq_depth);
-	PRX("axq-ampdu_depth: ", axq_ampdu_depth);
-	PRX("axq-stopped      ", stopped);
-	PRX("tx-in-progress   ", axq_tx_inprogress);
-	PRX("pending-frames   ", pending_frames);
-	PRX("txq_headidx:     ", txq_headidx);
-	PRX("txq_tailidx:     ", txq_headidx);
-
-	PRQLE("axq_q empty:       ", axq_q);
-	PRQLE("axq_acq empty:     ", axq_acq);
-	for (i = 0; i < ATH_TXFIFO_DEPTH; i++) {
-		snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i);
-		PRQLE(tmp, txq_fifo[i]);
-	}
-
-	/* Print out more detailed queue-info */
-	for (i = 0; i <= WME_AC_BK; i++) {
-		struct ath_txq *txq = &(sc->tx.txq[i]);
-		struct ath_atx_ac *ac;
-		struct ath_atx_tid *tid;
-		if (len >= size)
-			goto done;
-		spin_lock_bh(&txq->axq_lock);
-		if (!list_empty(&txq->axq_acq)) {
-			ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac,
-					      list);
-			len += snprintf(buf + len, size - len,
-					"txq[%i] first-ac: %p sched: %i\n",
-					i, ac, ac->sched);
-			if (list_empty(&ac->tid_q) || (len >= size))
-				goto done_for;
-			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
-					       list);
-			len += snprintf(buf + len, size - len,
-					" first-tid: %p sched: %i paused: %i\n",
-					tid, tid->sched, tid->paused);
-		}
-	done_for:
-		spin_unlock_bh(&txq->axq_lock);
-	}
-
-done:
 	if (len > size)
 		len = size;
 
@@ -652,62 +558,41 @@
 	return retval;
 }
 
-static ssize_t read_file_stations(struct file *file, char __user *user_buf,
-				  size_t count, loff_t *ppos)
+static ssize_t read_file_queues(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
 {
 	struct ath_softc *sc = file->private_data;
+	struct ath_txq *txq;
 	char *buf;
-	unsigned int len = 0, size = 64000;
-	struct ath_node *an = NULL;
+	unsigned int len = 0, size = 1024;
 	ssize_t retval = 0;
-	int q;
+	int i;
+	char *qname[4] = {"VO", "VI", "BE", "BK"};
 
 	buf = kzalloc(size, GFP_KERNEL);
 	if (buf == NULL)
 		return -ENOMEM;
 
-	len += snprintf(buf + len, size - len,
-			"Stations:\n"
-			" tid: addr sched paused buf_q-empty an ac baw\n"
-			" ac: addr sched tid_q-empty txq\n");
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		txq = sc->tx.txq_map[i];
+		len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
 
-	spin_lock(&sc->nodes_lock);
-	list_for_each_entry(an, &sc->nodes, list) {
-		unsigned short ma = an->maxampdu;
-		if (ma == 0)
-			ma = 65535; /* see ath_lookup_rate */
-		len += snprintf(buf + len, size - len,
-				"iface: %pM  sta: %pM max-ampdu: %hu mpdu-density: %uus\n",
-				an->vif->addr, an->sta->addr, ma,
-				(unsigned int)(an->mpdudensity));
-		if (len >= size)
-			goto done;
+		ath_txq_lock(sc, txq);
 
-		for (q = 0; q < WME_NUM_TID; q++) {
-			struct ath_atx_tid *tid = &(an->tid[q]);
-			len += snprintf(buf + len, size - len,
-					" tid: %p %s %s %i %p %p %hu\n",
-					tid, tid->sched ? "sched" : "idle",
-					tid->paused ? "paused" : "running",
-					skb_queue_empty(&tid->buf_q),
-					tid->an, tid->ac, tid->baw_size);
-			if (len >= size)
-				goto done;
-		}
+		len += snprintf(buf + len, size - len, "%s: %d ",
+				"qnum", txq->axq_qnum);
+		len += snprintf(buf + len, size - len, "%s: %2d ",
+				"qdepth", txq->axq_depth);
+		len += snprintf(buf + len, size - len, "%s: %2d ",
+				"ampdu-depth", txq->axq_ampdu_depth);
+		len += snprintf(buf + len, size - len, "%s: %3d ",
+				"pending", txq->pending_frames);
+		len += snprintf(buf + len, size - len, "%s: %d\n",
+				"stopped", txq->stopped);
 
-		for (q = 0; q < WME_NUM_AC; q++) {
-			struct ath_atx_ac *ac = &(an->ac[q]);
-			len += snprintf(buf + len, size - len,
-					" ac: %p %s %i %p\n",
-					ac, ac->sched ? "sched" : "idle",
-					list_empty(&ac->tid_q), ac->txq);
-			if (len >= size)
-				goto done;
-		}
+		ath_txq_unlock(sc, txq);
 	}
 
-done:
-	spin_unlock(&sc->nodes_lock);
 	if (len > size)
 		len = size;
 
@@ -837,6 +722,9 @@
 	len += snprintf(buf + len, sizeof(buf) - len,
 			"%17s: %2d\n", "PLL RX Hang",
 			sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
+	len += snprintf(buf + len, sizeof(buf) - len,
+			"%17s: %2d\n", "MCI Reset",
+			sc->debug.stats.reset[RESET_TYPE_MCI]);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -919,8 +807,8 @@
 	.llseek = default_llseek,
 };
 
-static const struct file_operations fops_stations = {
-	.read = read_file_stations,
+static const struct file_operations fops_queues = {
+	.read = read_file_queues,
 	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
@@ -1586,6 +1474,41 @@
 
 #endif
 
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	u32 len = 0, size = 1500;
+	char *buf;
+	size_t retval;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	if (!sc->sc_ah->common.btcoex_enabled) {
+		len = snprintf(buf, size, "%s\n",
+			       "BTCOEX is disabled");
+		goto exit;
+	}
+
+	len = ath9k_dump_btcoex(sc, buf, size);
+exit:
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return retval;
+}
+
+static const struct file_operations fops_btcoex = {
+	.read = read_file_btcoex,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+#endif
+
 int ath9k_init_debug(struct ath_hw *ah)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -1609,16 +1532,16 @@
 			    &fops_interrupt);
 	debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc,
 			    &fops_xmit);
+	debugfs_create_file("queues", S_IRUSR, sc->debug.debugfs_phy, sc,
+			    &fops_queues);
 	debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
-			   &sc->tx.txq_max_pending[WME_AC_BK]);
+			   &sc->tx.txq_max_pending[IEEE80211_AC_BK]);
 	debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
-			   &sc->tx.txq_max_pending[WME_AC_BE]);
+			   &sc->tx.txq_max_pending[IEEE80211_AC_BE]);
 	debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
-			   &sc->tx.txq_max_pending[WME_AC_VI]);
+			   &sc->tx.txq_max_pending[IEEE80211_AC_VI]);
 	debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
-			   &sc->tx.txq_max_pending[WME_AC_VO]);
-	debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, sc,
-			    &fops_stations);
+			   &sc->tx.txq_max_pending[IEEE80211_AC_VO]);
 	debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc,
 			    &fops_misc);
 	debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc,
@@ -1658,6 +1581,9 @@
 			   sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
 	debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
 			    sc->debug.debugfs_phy, sc, &fops_ant_diversity);
-
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+	debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
+			    &fops_btcoex);
+#endif
 	return 0;
 }
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 2ed9785..f9bee18 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -41,6 +41,7 @@
 	RESET_TYPE_PLL_HANG,
 	RESET_TYPE_MAC_HANG,
 	RESET_TYPE_BEACON_STUCK,
+	RESET_TYPE_MCI,
 	__RESET_TYPE_MAX
 };
 
@@ -178,6 +179,21 @@
 	u32 txfailed;
 };
 
+/*
+ * Various utility macros to print TX/Queue counters.
+ */
+#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
+#define TXSTATS sc->debug.stats.txstats
+#define PR(str, elem)							\
+	do {								\
+		len += snprintf(buf + len, size - len,			\
+				"%s%13u%11u%10u%10u\n", str,		\
+				TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,	\
+				TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,	\
+				TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,	\
+				TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
+	} while(0)
+
 #define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
 
 /**
@@ -226,7 +242,7 @@
 
 struct ath_stats {
 	struct ath_interrupt_stats istats;
-	struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
+	struct ath_tx_stats txstats[IEEE80211_NUM_ACS];
 	struct ath_rx_stats rxstats;
 	struct ath_dfs_stats dfs_stats;
 	u32 reset[__RESET_TYPE_MAX];
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
index ea2a6cf..24877b0 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
@@ -42,10 +42,15 @@
 #define MIN_PPB_THRESH	50
 #define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
 #define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
+/* percentage of pulse width tolerance */
+#define WIDTH_TOLERANCE 5
+#define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
+#define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
 
 #define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB)	\
 {								\
-	ID, WMIN, WMAX, (PRF2PRI(PMAX) - PRI_TOLERANCE),	\
+	ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX),		\
+	(PRF2PRI(PMAX) - PRI_TOLERANCE),			\
 	(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF,	\
 	PPB_THRESH(PPB), PRI_TOLERANCE,				\
 }
@@ -274,7 +279,7 @@
 
 static struct dfs_pattern_detector default_dpd = {
 	.exit		= dpd_exit,
-	.set_domain	= dpd_set_domain,
+	.set_dfs_domain	= dpd_set_domain,
 	.add_pulse	= dpd_add_pulse,
 	.region		= NL80211_DFS_UNSET,
 };
@@ -291,10 +296,11 @@
 	*dpd = default_dpd;
 	INIT_LIST_HEAD(&dpd->channel_detectors);
 
-	if (dpd->set_domain(dpd, region))
+	if (dpd->set_dfs_domain(dpd, region))
 		return dpd;
 
 	pr_err("Could not set DFS domain to %d. ", region);
+	kfree(dpd);
 	return NULL;
 }
 EXPORT_SYMBOL(dfs_pattern_detector_init);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
index fd0328a..cda52f3 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
@@ -62,7 +62,7 @@
 /**
  * struct dfs_pattern_detector - DFS pattern detector
  * @exit(): destructor
- * @set_domain(): set DFS domain, resets detector lines upon domain changes
+ * @set_dfs_domain(): set DFS domain, resets detector lines upon domain changes
  * @add_pulse(): add radar pulse to detector, returns true on detection
  * @region: active DFS region, NL80211_DFS_UNSET until set
  * @num_radar_types: number of different radar types
@@ -72,7 +72,7 @@
  */
 struct dfs_pattern_detector {
 	void (*exit)(struct dfs_pattern_detector *dpd);
-	bool (*set_domain)(struct dfs_pattern_detector *dpd,
+	bool (*set_dfs_domain)(struct dfs_pattern_detector *dpd,
 			   enum nl80211_dfs_regions region);
 	bool (*add_pulse)(struct dfs_pattern_detector *dpd,
 			  struct pulse_event *pe);
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index d9ed141..4b412aa 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -187,34 +187,12 @@
 	}
 }
 
-/*
- * This is the master bt coex timer which runs for every
- * 45ms, bt traffic will be given priority during 55% of this
- * period while wlan gets remaining 45%
- */
-static void ath_btcoex_period_timer(unsigned long data)
+static void ath_mci_ftp_adjust(struct ath_softc *sc)
 {
-	struct ath_softc *sc = (struct ath_softc *) data;
-	struct ath_hw *ah = sc->sc_ah;
 	struct ath_btcoex *btcoex = &sc->btcoex;
 	struct ath_mci_profile *mci = &btcoex->mci;
-	u32 timer_period;
-	bool is_btscan;
-	unsigned long flags;
+	struct ath_hw *ah = sc->sc_ah;
 
-	spin_lock_irqsave(&sc->sc_pm_lock, flags);
-	if (sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP) {
-		spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
-		goto skip_hw_wakeup;
-	}
-	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
-
-	ath9k_ps_wakeup(sc);
-	if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
-		ath_detect_bt_priority(sc);
-	is_btscan = test_bit(BT_OP_SCAN, &btcoex->op_flags);
-
-	btcoex->bt_wait_time += btcoex->btcoex_period;
 	if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
 		if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
 		    (mci->num_pan || mci->num_other_acl))
@@ -225,13 +203,58 @@
 		btcoex->bt_wait_time = 0;
 		sc->rx.num_pkts = 0;
 	}
+}
+
+/*
+ * This is the master bt coex timer which runs for every
+ * 45ms, bt traffic will be given priority during 55% of this
+ * period while wlan gets remaining 45%
+ */
+static void ath_btcoex_period_timer(unsigned long data)
+{
+	struct ath_softc *sc = (struct ath_softc *) data;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_btcoex *btcoex = &sc->btcoex;
+	enum ath_stomp_type stomp_type;
+	u32 timer_period;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sc->sc_pm_lock, flags);
+	if (sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP) {
+		btcoex->bt_wait_time += btcoex->btcoex_period;
+		spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+		goto skip_hw_wakeup;
+	}
+	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+
+	ath9k_mci_update_rssi(sc);
+
+	ath9k_ps_wakeup(sc);
+
+	if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+		ath_detect_bt_priority(sc);
+
+	if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+		ath_mci_ftp_adjust(sc);
 
 	spin_lock_bh(&btcoex->btcoex_lock);
 
-	ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
-			      btcoex->bt_stomp_type);
+	stomp_type = btcoex->bt_stomp_type;
+	timer_period = btcoex->btcoex_no_stomp;
 
+	if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) {
+		if (test_bit(BT_OP_SCAN, &btcoex->op_flags)) {
+			stomp_type = ATH_BTCOEX_STOMP_ALL;
+			timer_period = btcoex->btscan_no_stomp;
+		}
+	} else if (btcoex->stomp_audio >= 5) {
+		stomp_type = ATH_BTCOEX_STOMP_AUDIO;
+		btcoex->stomp_audio = 0;
+	}
+
+	ath9k_hw_btcoex_bt_stomp(ah, stomp_type);
 	ath9k_hw_btcoex_enable(ah);
+
 	spin_unlock_bh(&btcoex->btcoex_lock);
 
 	/*
@@ -243,17 +266,16 @@
 		if (btcoex->hw_timer_enabled)
 			ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
 
-		timer_period = is_btscan ? btcoex->btscan_no_stomp :
-					   btcoex->btcoex_no_stomp;
 		ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, timer_period,
 				      timer_period * 10);
 		btcoex->hw_timer_enabled = true;
 	}
 
 	ath9k_ps_restore(sc);
+
 skip_hw_wakeup:
-	timer_period = btcoex->btcoex_period;
-	mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period));
+	mod_timer(&btcoex->period_timer,
+		  jiffies + msecs_to_jiffies(btcoex->btcoex_period));
 }
 
 /*
@@ -273,9 +295,10 @@
 	spin_lock_bh(&btcoex->btcoex_lock);
 
 	if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
-	    test_bit(BT_OP_SCAN, &btcoex->op_flags))
+	    (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI) &&
+	     test_bit(BT_OP_SCAN, &btcoex->op_flags)))
 		ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
-	 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
+	else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
 		ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
 
 	ath9k_hw_btcoex_enable(ah);
@@ -451,7 +474,7 @@
 		r = ath_init_btcoex_timer(sc);
 		if (r)
 			return -1;
-		txq = sc->tx.txq_map[WME_AC_BE];
+		txq = sc->tx.txq_map[IEEE80211_AC_BE];
 		ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
 		sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
 		if (ath9k_hw_mci_is_enabled(ah)) {
@@ -474,4 +497,71 @@
 	return 0;
 }
 
+static int ath9k_dump_mci_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
+{
+	struct ath_btcoex *btcoex = &sc->btcoex;
+	struct ath_mci_profile *mci = &btcoex->mci;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+	u32 len = 0;
+	int i;
+
+	ATH_DUMP_BTCOEX("Total BT profiles", NUM_PROF(mci));
+	ATH_DUMP_BTCOEX("MGMT", mci->num_mgmt);
+	ATH_DUMP_BTCOEX("SCO", mci->num_sco);
+	ATH_DUMP_BTCOEX("A2DP", mci->num_a2dp);
+	ATH_DUMP_BTCOEX("HID", mci->num_hid);
+	ATH_DUMP_BTCOEX("PAN", mci->num_pan);
+	ATH_DUMP_BTCOEX("ACL", mci->num_other_acl);
+	ATH_DUMP_BTCOEX("BDR", mci->num_bdr);
+	ATH_DUMP_BTCOEX("Aggr. Limit", mci->aggr_limit);
+	ATH_DUMP_BTCOEX("Stomp Type", btcoex->bt_stomp_type);
+	ATH_DUMP_BTCOEX("BTCoex Period (msec)", btcoex->btcoex_period);
+	ATH_DUMP_BTCOEX("Duty Cycle", btcoex->duty_cycle);
+	ATH_DUMP_BTCOEX("BT Wait time", btcoex->bt_wait_time);
+	ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
+	ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count);
+
+	len += snprintf(buf + len, size - len, "BT Weights: ");
+	for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+		len += snprintf(buf + len, size - len, "%08x ",
+				btcoex_hw->bt_weight[i]);
+	len += snprintf(buf + len, size - len, "\n");
+	len += snprintf(buf + len, size - len, "WLAN Weights: ");
+	for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+		len += snprintf(buf + len, size - len, "%08x ",
+				btcoex_hw->wlan_weight[i]);
+	len += snprintf(buf + len, size - len, "\n");
+	len += snprintf(buf + len, size - len, "Tx Priorities: ");
+	for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
+		len += snprintf(buf + len, size - len, "%08x ",
+				btcoex_hw->tx_prio[i]);
+
+	len += snprintf(buf + len, size - len, "\n");
+
+	return len;
+}
+
+static int ath9k_dump_legacy_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
+{
+
+	struct ath_btcoex *btcoex = &sc->btcoex;
+	u32 len = 0;
+
+	ATH_DUMP_BTCOEX("Stomp Type", btcoex->bt_stomp_type);
+	ATH_DUMP_BTCOEX("BTCoex Period (msec)", btcoex->btcoex_period);
+	ATH_DUMP_BTCOEX("Duty Cycle", btcoex->duty_cycle);
+	ATH_DUMP_BTCOEX("BT Wait time", btcoex->bt_wait_time);
+
+	return len;
+}
+
+int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
+{
+	if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+		return ath9k_dump_mci_btcoex(sc, buf, size);
+	else
+		return ath9k_dump_legacy_btcoex(sc, buf, size);
+}
+
 #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index b30596f..96bfb180 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -331,7 +331,7 @@
 	u32 skb_success;
 	u32 skb_failed;
 	u32 cab_queued;
-	u32 queue_stats[WME_NUM_AC];
+	u32 queue_stats[IEEE80211_NUM_ACS];
 };
 
 struct ath_rx_stats {
@@ -493,7 +493,7 @@
 
 	int beaconq;
 	int cabq;
-	int hwq_map[WME_NUM_AC];
+	int hwq_map[IEEE80211_NUM_ACS];
 
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 	struct ath_btcoex btcoex;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index f42d2eb..d0ce1f5 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -33,7 +33,7 @@
 		qi.tqi_cwmin = 0;
 		qi.tqi_cwmax = 0;
 	} else if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
-		int qnum = priv->hwq_map[WME_AC_BE];
+		int qnum = priv->hwq_map[IEEE80211_AC_BE];
 
 		ath9k_hw_get_txq_props(ah, qnum, &qi_be);
 
@@ -587,9 +587,9 @@
 	    (priv->num_sta_vif > 1) &&
 	    (vif->type == NL80211_IFTYPE_STATION)) {
 		beacon_configured = false;
-		ieee80211_iterate_active_interfaces_atomic(priv->hw,
-							   ath9k_htc_beacon_iter,
-							   &beacon_configured);
+		ieee80211_iterate_active_interfaces_atomic(
+			priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+			ath9k_htc_beacon_iter, &beacon_configured);
 
 		if (beacon_configured) {
 			ath_dbg(common, CONFIG,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index 3035deb..87110de5 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -218,16 +218,16 @@
 
 	len += snprintf(buf + len, sizeof(buf) - len,
 			"%20s : %10u\n", "BE queued",
-			priv->debug.tx_stats.queue_stats[WME_AC_BE]);
+			priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
 	len += snprintf(buf + len, sizeof(buf) - len,
 			"%20s : %10u\n", "BK queued",
-			priv->debug.tx_stats.queue_stats[WME_AC_BK]);
+			priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
 	len += snprintf(buf + len, sizeof(buf) - len,
 			"%20s : %10u\n", "VI queued",
-			priv->debug.tx_stats.queue_stats[WME_AC_VI]);
+			priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
 	len += snprintf(buf + len, sizeof(buf) - len,
 			"%20s : %10u\n", "VO queued",
-			priv->debug.tx_stats.queue_stats[WME_AC_VO]);
+			priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 0eacfc1..105582d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -207,7 +207,7 @@
 		priv->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
 		ath9k_hw_btcoex_init_3wire(priv->ah);
 		ath_htc_init_btcoex_work(priv);
-		qnum = priv->hwq_map[WME_AC_BE];
+		qnum = priv->hwq_map[IEEE80211_AC_BE];
 		ath9k_hw_init_btcoex_hw(priv->ah, qnum);
 		break;
 	default:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index d98255e..05d5ba6 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -549,20 +549,20 @@
 		goto err;
 	}
 
-	if (!ath9k_htc_txq_setup(priv, WME_AC_BE)) {
+	if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_BE)) {
 		ath_err(common, "Unable to setup xmit queue for BE traffic\n");
 		goto err;
 	}
 
-	if (!ath9k_htc_txq_setup(priv, WME_AC_BK)) {
+	if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_BK)) {
 		ath_err(common, "Unable to setup xmit queue for BK traffic\n");
 		goto err;
 	}
-	if (!ath9k_htc_txq_setup(priv, WME_AC_VI)) {
+	if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_VI)) {
 		ath_err(common, "Unable to setup xmit queue for VI traffic\n");
 		goto err;
 	}
-	if (!ath9k_htc_txq_setup(priv, WME_AC_VO)) {
+	if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_VO)) {
 		ath_err(common, "Unable to setup xmit queue for VO traffic\n");
 		goto err;
 	}
@@ -694,6 +694,20 @@
 	return ret;
 }
 
+static const struct ieee80211_iface_limit if_limits[] = {
+	{ .max = 2,	.types = BIT(NL80211_IFTYPE_STATION) |
+				 BIT(NL80211_IFTYPE_P2P_CLIENT) },
+	{ .max = 2,	.types = BIT(NL80211_IFTYPE_AP) |
+				 BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+	.limits = if_limits,
+	.n_limits = ARRAY_SIZE(if_limits),
+	.max_interfaces = 2,
+	.num_different_channels = 1,
+};
+
 static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
 			       struct ieee80211_hw *hw)
 {
@@ -716,6 +730,9 @@
 		BIT(NL80211_IFTYPE_P2P_GO) |
 		BIT(NL80211_IFTYPE_P2P_CLIENT);
 
+	hw->wiphy->iface_combinations = &if_comb;
+	hw->wiphy->n_iface_combinations = 1;
+
 	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index ca78e33..9c07a8f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -127,8 +127,9 @@
 	priv->rearm_ani = false;
 	priv->reconfig_beacon = false;
 
-	ieee80211_iterate_active_interfaces_atomic(priv->hw,
-						   ath9k_htc_vif_iter, priv);
+	ieee80211_iterate_active_interfaces_atomic(
+		priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		ath9k_htc_vif_iter, priv);
 	if (priv->rearm_ani)
 		ath9k_htc_start_ani(priv);
 
@@ -165,8 +166,9 @@
 		ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
 
 	/* Get list of all active MAC addresses */
-	ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_bssid_iter,
-						   &iter_data);
+	ieee80211_iterate_active_interfaces_atomic(
+		priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		ath9k_htc_bssid_iter, &iter_data);
 
 	memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
 	ath_hw_setbssidmask(common);
@@ -1036,26 +1038,6 @@
 
 	mutex_lock(&priv->mutex);
 
-	if (priv->nvifs >= ATH9K_HTC_MAX_VIF) {
-		mutex_unlock(&priv->mutex);
-		return -ENOBUFS;
-	}
-
-	if (priv->num_ibss_vif ||
-	    (priv->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
-		ath_err(common, "IBSS coexistence with other modes is not allowed\n");
-		mutex_unlock(&priv->mutex);
-		return -ENOBUFS;
-	}
-
-	if (((vif->type == NL80211_IFTYPE_AP) ||
-	     (vif->type == NL80211_IFTYPE_ADHOC)) &&
-	    ((priv->num_ap_vif + priv->num_ibss_vif) >= ATH9K_HTC_MAX_BCN_VIF)) {
-		ath_err(common, "Max. number of beaconing interfaces reached\n");
-		mutex_unlock(&priv->mutex);
-		return -ENOBUFS;
-	}
-
 	ath9k_htc_ps_wakeup(priv);
 	memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
 	memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
@@ -1164,8 +1146,9 @@
 	 */
 	if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) {
 		priv->rearm_ani = false;
-		ieee80211_iterate_active_interfaces_atomic(priv->hw,
-						   ath9k_htc_vif_iter, priv);
+		ieee80211_iterate_active_interfaces_atomic(
+			priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+			ath9k_htc_vif_iter, priv);
 		if (!priv->rearm_ani)
 			ath9k_htc_stop_ani(priv);
 	}
@@ -1366,7 +1349,7 @@
 	struct ath9k_tx_queue_info qi;
 	int ret = 0, qnum;
 
-	if (queue >= WME_NUM_AC)
+	if (queue >= IEEE80211_NUM_ACS)
 		return 0;
 
 	mutex_lock(&priv->mutex);
@@ -1393,7 +1376,7 @@
 	}
 
 	if ((priv->ah->opmode == NL80211_IFTYPE_ADHOC) &&
-	    (qnum == priv->hwq_map[WME_AC_BE]))
+	    (qnum == priv->hwq_map[IEEE80211_AC_BE]))
 		    ath9k_htc_beaconq_config(priv);
 out:
 	ath9k_htc_ps_restore(priv);
@@ -1486,8 +1469,9 @@
 static void ath9k_htc_choose_set_bssid(struct ath9k_htc_priv *priv)
 {
 	if (priv->num_sta_assoc_vif == 1) {
-		ieee80211_iterate_active_interfaces_atomic(priv->hw,
-							   ath9k_htc_bss_iter, priv);
+		ieee80211_iterate_active_interfaces_atomic(
+			priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+			ath9k_htc_bss_iter, priv);
 		ath9k_htc_set_bssid(priv);
 	}
 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 06cdcb7..28cd50e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -21,10 +21,10 @@
 /******/
 
 static const int subtype_txq_to_hwq[] = {
-	[WME_AC_BE] = ATH_TXQ_AC_BE,
-	[WME_AC_BK] = ATH_TXQ_AC_BK,
-	[WME_AC_VI] = ATH_TXQ_AC_VI,
-	[WME_AC_VO] = ATH_TXQ_AC_VO,
+	[IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
+	[IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
+	[IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
+	[IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
 };
 
 #define ATH9K_HTC_INIT_TXQ(subtype) do {			\
@@ -41,15 +41,15 @@
 {
 	switch (queue) {
 	case 0:
-		return hwq_map[WME_AC_VO];
+		return hwq_map[IEEE80211_AC_VO];
 	case 1:
-		return hwq_map[WME_AC_VI];
+		return hwq_map[IEEE80211_AC_VI];
 	case 2:
-		return hwq_map[WME_AC_BE];
+		return hwq_map[IEEE80211_AC_BE];
 	case 3:
-		return hwq_map[WME_AC_BK];
+		return hwq_map[IEEE80211_AC_BK];
 	default:
-		return hwq_map[WME_AC_BE];
+		return hwq_map[IEEE80211_AC_BE];
 	}
 }
 
@@ -106,20 +106,20 @@
 
 	switch (qnum) {
 	case 0:
-		TX_QSTAT_INC(WME_AC_VO);
+		TX_QSTAT_INC(IEEE80211_AC_VO);
 		epid = priv->data_vo_ep;
 		break;
 	case 1:
-		TX_QSTAT_INC(WME_AC_VI);
+		TX_QSTAT_INC(IEEE80211_AC_VI);
 		epid = priv->data_vi_ep;
 		break;
 	case 2:
-		TX_QSTAT_INC(WME_AC_BE);
+		TX_QSTAT_INC(IEEE80211_AC_BE);
 		epid = priv->data_be_ep;
 		break;
 	case 3:
 	default:
-		TX_QSTAT_INC(WME_AC_BK);
+		TX_QSTAT_INC(IEEE80211_AC_BK);
 		epid = priv->data_bk_ep;
 		break;
 	}
@@ -1082,7 +1082,7 @@
 	rx_status->freq = hw->conf.channel->center_freq;
 	rx_status->signal =  rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
 	rx_status->antenna = rxbuf->rxstatus.rs_antenna;
-	rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+	rx_status->flag |= RX_FLAG_MACTIME_START;
 
 	return true;
 
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1829b44..e06bcec 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2153,9 +2153,6 @@
 		    AR_RTC_FORCE_WAKE_EN);
 	udelay(50);
 
-	if (ath9k_hw_mci_is_enabled(ah))
-		ar9003_mci_set_power_awake(ah);
-
 	for (i = POWER_UP_TIME / 50; i > 0; i--) {
 		val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
 		if (val == AR_RTC_STATUS_ON)
@@ -2171,6 +2168,9 @@
 		return false;
 	}
 
+	if (ath9k_hw_mci_is_enabled(ah))
+		ar9003_mci_set_power_awake(ah);
+
 	REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
 
 	return true;
@@ -2561,11 +2561,6 @@
 			pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
 	}
 
-	if (AR_SREV_9485_10(ah)) {
-		pCap->pcie_lcr_extsync_en = true;
-		pCap->pcie_lcr_offset = 0x80;
-	}
-
 	if (ath9k_hw_dfs_tested(ah))
 		pCap->hw_caps |= ATH9K_HW_CAP_DFS;
 
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dbc1b7a..3636dab 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -273,8 +273,6 @@
 	u8 rx_status_len;
 	u8 tx_desc_len;
 	u8 txs_len;
-	u16 pcie_lcr_offset;
-	bool pcie_lcr_extsync_en;
 };
 
 struct ath9k_ops_config {
@@ -401,6 +399,7 @@
 struct ath9k_hw_cal_data {
 	u16 channel;
 	u32 channelFlags;
+	u32 chanmode;
 	int32_t CalValid;
 	int8_t iCoff;
 	int8_t qCoff;
@@ -834,6 +833,7 @@
 	int coarse_low[5];
 	int firpwr[5];
 	enum ath9k_ani_cmd ani_function;
+	u32 ani_skip_count;
 
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 	struct ath_btcoex_hw btcoex_hw;
@@ -875,7 +875,6 @@
 	struct ar5416IniArray iniModesTxGain;
 	struct ar5416IniArray iniCckfirNormal;
 	struct ar5416IniArray iniCckfirJapan2484;
-	struct ar5416IniArray ini_japan2484;
 	struct ar5416IniArray iniModes_9271_ANI_reg;
 	struct ar5416IniArray ini_radio_post_sys2ant;
 
@@ -928,7 +927,6 @@
 	void (*read_cachesize)(struct ath_common *common, int *csz);
 	bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
 	void (*bt_coex_prep)(struct ath_common *common);
-	void (*extn_synch_en)(struct ath_common *common);
 	void (*aspm_init)(struct ath_common *common);
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index fad3ccd..80cae53 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -435,7 +435,7 @@
 	sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
 	ath_cabq_update(sc);
 
-	for (i = 0; i < WME_NUM_AC; i++) {
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 		sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
 		sc->tx.txq_map[i]->mac80211_qnum = i;
 		sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
@@ -563,10 +563,6 @@
 	spin_lock_init(&sc->sc_serial_rw);
 	spin_lock_init(&sc->sc_pm_lock);
 	mutex_init(&sc->mutex);
-#ifdef CONFIG_ATH9K_DEBUGFS
-	spin_lock_init(&sc->nodes_lock);
-	INIT_LIST_HEAD(&sc->nodes);
-#endif
 #ifdef CONFIG_ATH9K_MAC_DEBUG
 	spin_lock_init(&sc->debug.samp_lock);
 #endif
@@ -687,6 +683,7 @@
 	.n_limits = ARRAY_SIZE(if_limits),
 	.max_interfaces = 2048,
 	.num_different_channels = 1,
+	.beacon_int_infra_match = true,
 };
 
 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 7b88b9c..fc6b075 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -27,9 +27,6 @@
 	struct ath_txq *txq;
 	int i;
 	bool needreset = false;
-#ifdef CONFIG_ATH9K_DEBUGFS
-	sc->tx_complete_poll_work_seen++;
-#endif
 
 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
 		if (ATH_TXQ_SETUP(sc, i)) {
@@ -211,7 +208,7 @@
 	int time_left;
 
 	memset(&txctl, 0, sizeof(txctl));
-	txctl.txq = sc->tx.txq_map[WME_AC_BE];
+	txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];
 
 	memset(tx_info, 0, sizeof(*tx_info));
 	tx_info->band = hw->conf.channel->band;
@@ -350,8 +347,18 @@
 		ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
 
 	/* Only calibrate if awake */
-	if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
+	if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) {
+		if (++ah->ani_skip_count >= ATH_ANI_MAX_SKIP_COUNT) {
+			spin_lock_irqsave(&sc->sc_pm_lock, flags);
+			sc->ps_flags |= PS_WAIT_FOR_ANI;
+			spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+		}
 		goto set_timer;
+	}
+	ah->ani_skip_count = 0;
+	spin_lock_irqsave(&sc->sc_pm_lock, flags);
+	sc->ps_flags &= ~PS_WAIT_FOR_ANI;
+	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
 
 	ath9k_ps_wakeup(sc);
 
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dd45edf..0653dbc 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -131,7 +131,8 @@
 		   !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
 				     PS_WAIT_FOR_CAB |
 				     PS_WAIT_FOR_PSPOLL_DATA |
-				     PS_WAIT_FOR_TX_ACK))) {
+				     PS_WAIT_FOR_TX_ACK |
+				     PS_WAIT_FOR_ANI))) {
 		mode = ATH9K_PM_NETWORK_SLEEP;
 		if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
 			ath9k_btcoex_stop_gen_timer(sc);
@@ -292,6 +293,10 @@
 		goto out;
 	}
 
+	if (ath9k_hw_mci_is_enabled(sc->sc_ah) &&
+	    (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+		ath9k_mci_set_txpower(sc, true, false);
+
 	if (!ath_complete_reset(sc, true))
 		r = -EIO;
 
@@ -326,11 +331,6 @@
 	u8 density;
 	an = (struct ath_node *)sta->drv_priv;
 
-#ifdef CONFIG_ATH9K_DEBUGFS
-	spin_lock(&sc->nodes_lock);
-	list_add(&an->list, &sc->nodes);
-	spin_unlock(&sc->nodes_lock);
-#endif
 	an->sta = sta;
 	an->vif = vif;
 
@@ -347,13 +347,6 @@
 {
 	struct ath_node *an = (struct ath_node *)sta->drv_priv;
 
-#ifdef CONFIG_ATH9K_DEBUGFS
-	spin_lock(&sc->nodes_lock);
-	list_del(&an->list);
-	spin_unlock(&sc->nodes_lock);
-	an->sta = NULL;
-#endif
-
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
 		ath_tx_node_cleanup(sc, an);
 }
@@ -489,17 +482,6 @@
 	if (status & SCHED_INTR)
 		sched = true;
 
-#ifdef CONFIG_PM_SLEEP
-	if (status & ATH9K_INT_BMISS) {
-		if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
-			ath_dbg(common, ANY, "during WoW we got a BMISS\n");
-			atomic_inc(&sc->wow_got_bmiss_intr);
-			atomic_dec(&sc->wow_sleep_proc_intr);
-		}
-	ath_dbg(common, INTERRUPT, "beacon miss interrupt\n");
-	}
-#endif
-
 	/*
 	 * If a FATAL or RXORN interrupt is received, we have to reset the
 	 * chip immediately.
@@ -518,7 +500,15 @@
 
 		goto chip_reset;
 	}
-
+#ifdef CONFIG_PM_SLEEP
+	if (status & ATH9K_INT_BMISS) {
+		if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
+			ath_dbg(common, ANY, "during WoW we got a BMISS\n");
+			atomic_inc(&sc->wow_got_bmiss_intr);
+			atomic_dec(&sc->wow_sleep_proc_intr);
+		}
+	}
+#endif
 	if (status & ATH9K_INT_SWBA)
 		tasklet_schedule(&sc->bcon_tasklet);
 
@@ -681,9 +671,6 @@
 
 	spin_unlock_bh(&sc->sc_pcu_lock);
 
-	if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
-		common->bus_ops->extn_synch_en(common);
-
 	mutex_unlock(&sc->mutex);
 
 	ath9k_ps_restore(sc);
@@ -919,8 +906,9 @@
 		ath9k_vif_iter(iter_data, vif->addr, vif);
 
 	/* Get list of all active MAC addresses */
-	ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
-						   iter_data);
+	ieee80211_iterate_active_interfaces_atomic(
+		sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		ath9k_vif_iter, iter_data);
 }
 
 /* Called with sc->mutex held. */
@@ -970,8 +958,9 @@
 	if (ah->opmode == NL80211_IFTYPE_STATION &&
 	    old_opmode == NL80211_IFTYPE_AP &&
 	    test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
-		ieee80211_iterate_active_interfaces_atomic(sc->hw,
-						   ath9k_sta_vif_iter, sc);
+		ieee80211_iterate_active_interfaces_atomic(
+			sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+			ath9k_sta_vif_iter, sc);
 	}
 }
 
@@ -1324,7 +1313,7 @@
 	struct ath9k_tx_queue_info qi;
 	int ret = 0;
 
-	if (queue >= WME_NUM_AC)
+	if (queue >= IEEE80211_NUM_ACS)
 		return 0;
 
 	txq = sc->tx.txq_map[queue];
@@ -1449,6 +1438,9 @@
 	sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
 	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
 
+	if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+		ath9k_mci_update_wlan_channels(sc, false);
+
 	ath_dbg(common, CONFIG,
 		"Primary Station interface: %pM, BSSID: %pM\n",
 		vif->addr, common->curbssid);
@@ -1497,14 +1489,17 @@
 				clear_bit(SC_OP_BEACONS, &sc->sc_flags);
 		}
 
-		ieee80211_iterate_active_interfaces_atomic(sc->hw,
-						   ath9k_bss_assoc_iter, sc);
+		ieee80211_iterate_active_interfaces_atomic(
+			sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+			ath9k_bss_assoc_iter, sc);
 
 		if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags) &&
 		    ah->opmode == NL80211_IFTYPE_STATION) {
 			memset(common->curbssid, 0, ETH_ALEN);
 			common->curaid = 0;
 			ath9k_hw_write_associd(sc->sc_ah);
+			if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+				ath9k_mci_update_wlan_channels(sc, true);
 		}
 	}
 
@@ -1946,13 +1941,12 @@
 	return 0;
 }
 
-#define PR_QNUM(_n) (sc->tx.txq_map[_n]->axq_qnum)
 #define AWDATA(elem)							\
 	do {								\
-		data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem; \
-		data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem; \
-		data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem; \
-		data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem; \
+		data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].elem; \
+		data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].elem; \
+		data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].elem; \
+		data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].elem; \
 	} while (0)
 
 #define AWDATA_RX(elem)						\
@@ -1967,14 +1961,14 @@
 	struct ath_softc *sc = hw->priv;
 	int i = 0;
 
-	data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_pkts_all +
-		     sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_pkts_all +
-		     sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_pkts_all +
-		     sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_pkts_all);
-	data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_bytes_all +
-		     sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_bytes_all +
-		     sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_bytes_all +
-		     sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_bytes_all);
+	data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all +
+		     sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_pkts_all +
+		     sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_pkts_all +
+		     sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_pkts_all);
+	data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all +
+		     sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_bytes_all +
+		     sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_bytes_all +
+		     sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_bytes_all);
 	AWDATA_RX(rx_pkts_all);
 	AWDATA_RX(rx_bytes_all);
 
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index ec2d7c8..706378e 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -43,6 +43,7 @@
 				struct ath_mci_profile_info *info)
 {
 	struct ath_mci_profile_info *entry;
+	u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
 
 	if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
 	    (info->type == MCI_GPM_COEX_PROFILE_VOICE))
@@ -59,6 +60,12 @@
 	memcpy(entry, info, 10);
 	INC_PROF(mci, info);
 	list_add_tail(&entry->list, &mci->info);
+	if (info->type == MCI_GPM_COEX_PROFILE_VOICE) {
+		if (info->voice_type < sizeof(voice_priority))
+			mci->voice_priority = voice_priority[info->voice_type];
+		else
+			mci->voice_priority = 110;
+	}
 
 	return true;
 }
@@ -150,7 +157,7 @@
 			 * For single PAN/FTP profile, allocate 35% for BT
 			 * to improve WLAN throughput.
 			 */
-			btcoex->duty_cycle = 35;
+			btcoex->duty_cycle = AR_SREV_9565(sc->sc_ah) ? 40 : 35;
 			btcoex->btcoex_period = 53;
 			ath_dbg(common, MCI,
 				"Single PAN/FTP bt period %d ms dutycycle %d\n",
@@ -200,23 +207,6 @@
 	ath9k_btcoex_timer_resume(sc);
 }
 
-static void ath_mci_wait_btcal_done(struct ath_softc *sc)
-{
-	struct ath_hw *ah = sc->sc_ah;
-
-	/* Stop tx & rx */
-	ieee80211_stop_queues(sc->hw);
-	ath_stoprecv(sc);
-	ath_drain_all_txq(sc, false);
-
-	/* Wait for cal done */
-	ar9003_mci_start_reset(ah, ah->curchan);
-
-	/* Resume tx & rx */
-	ath_startrecv(sc);
-	ieee80211_wake_queues(sc->hw);
-}
-
 static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
 {
 	struct ath_hw *ah = sc->sc_ah;
@@ -228,7 +218,7 @@
 	case MCI_GPM_BT_CAL_REQ:
 		if (mci_hw->bt_state == MCI_BT_AWAKE) {
 			mci_hw->bt_state = MCI_BT_CAL_START;
-			ath_mci_wait_btcal_done(sc);
+			ath9k_queue_reset(sc, RESET_TYPE_MCI);
 		}
 		ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
 		break;
@@ -250,6 +240,57 @@
 	ath_mci_update_scheme(sc);
 }
 
+static void ath_mci_update_stomp_txprio(u8 cur_txprio, u8 *stomp_prio)
+{
+	if (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_NONE])
+		stomp_prio[ATH_BTCOEX_STOMP_NONE] = cur_txprio;
+
+	if (cur_txprio > stomp_prio[ATH_BTCOEX_STOMP_ALL])
+		stomp_prio[ATH_BTCOEX_STOMP_ALL] = cur_txprio;
+
+	if ((cur_txprio > ATH_MCI_HI_PRIO) &&
+	    (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_LOW]))
+		stomp_prio[ATH_BTCOEX_STOMP_LOW] = cur_txprio;
+}
+
+static void ath_mci_set_concur_txprio(struct ath_softc *sc)
+{
+	struct ath_btcoex *btcoex = &sc->btcoex;
+	struct ath_mci_profile *mci = &btcoex->mci;
+	u8 stomp_txprio[] = { 0, 0, 0, 0 }; /* all, low, none, low_ftp */
+
+	if (mci->num_mgmt) {
+		stomp_txprio[ATH_BTCOEX_STOMP_ALL] = ATH_MCI_INQUIRY_PRIO;
+		if (!mci->num_pan && !mci->num_other_acl)
+			stomp_txprio[ATH_BTCOEX_STOMP_NONE] =
+				ATH_MCI_INQUIRY_PRIO;
+	} else {
+		u8 prof_prio[] = { 50, 90, 94, 52 };/* RFCOMM, A2DP, HID, PAN */
+
+		stomp_txprio[ATH_BTCOEX_STOMP_LOW] =
+		stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0xff;
+
+		if (mci->num_sco)
+			ath_mci_update_stomp_txprio(mci->voice_priority,
+						    stomp_txprio);
+		if (mci->num_other_acl)
+			ath_mci_update_stomp_txprio(prof_prio[0], stomp_txprio);
+		if (mci->num_a2dp)
+			ath_mci_update_stomp_txprio(prof_prio[1], stomp_txprio);
+		if (mci->num_hid)
+			ath_mci_update_stomp_txprio(prof_prio[2], stomp_txprio);
+		if (mci->num_pan)
+			ath_mci_update_stomp_txprio(prof_prio[3], stomp_txprio);
+
+		if (stomp_txprio[ATH_BTCOEX_STOMP_NONE] == 0xff)
+			stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0;
+
+		if (stomp_txprio[ATH_BTCOEX_STOMP_LOW] == 0xff)
+			stomp_txprio[ATH_BTCOEX_STOMP_LOW] = 0;
+	}
+	ath9k_hw_btcoex_set_concur_txprio(sc->sc_ah, stomp_txprio);
+}
+
 static u8 ath_mci_process_profile(struct ath_softc *sc,
 				  struct ath_mci_profile_info *info)
 {
@@ -281,6 +322,7 @@
 	} else
 		ath_mci_del_profile(common, mci, entry);
 
+	ath_mci_set_concur_txprio(sc);
 	return 1;
 }
 
@@ -314,6 +356,7 @@
 			mci->num_mgmt++;
 	} while (++i < ATH_MCI_MAX_PROFILE);
 
+	ath_mci_set_concur_txprio(sc);
 	if (old_num_mgmt != mci->num_mgmt)
 		return 1;
 
@@ -518,6 +561,8 @@
 		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
 
 		while (more_data == MCI_GPM_MORE) {
+			if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+				return;
 
 			pgpm = mci->gpm_buf.bf_addr;
 			offset = ar9003_mci_get_next_gpm_offset(ah, false,
@@ -600,3 +645,130 @@
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
 		sc->sc_ah->imask |= ATH9K_INT_MCI;
 }
+
+void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	struct ath9k_channel *chan = ah->curchan;
+	u32 channelmap[] = {0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff};
+	int i;
+	s16 chan_start, chan_end;
+	u16 wlan_chan;
+
+	if (!chan || !IS_CHAN_2GHZ(chan))
+		return;
+
+	if (allow_all)
+		goto send_wlan_chan;
+
+	wlan_chan = chan->channel - 2402;
+
+	chan_start = wlan_chan - 10;
+	chan_end = wlan_chan + 10;
+
+	if (chan->chanmode == CHANNEL_G_HT40PLUS)
+		chan_end += 20;
+	else if (chan->chanmode == CHANNEL_G_HT40MINUS)
+		chan_start -= 20;
+
+	/* adjust side band */
+	chan_start -= 7;
+	chan_end += 7;
+
+	if (chan_start <= 0)
+		chan_start = 0;
+	if (chan_end >= ATH_MCI_NUM_BT_CHANNELS)
+		chan_end = ATH_MCI_NUM_BT_CHANNELS - 1;
+
+	ath_dbg(ath9k_hw_common(ah), MCI,
+		"WLAN current channel %d mask BT channel %d - %d\n",
+		wlan_chan, chan_start, chan_end);
+
+	for (i = chan_start; i < chan_end; i++)
+		MCI_GPM_CLR_CHANNEL_BIT(&channelmap, i);
+
+send_wlan_chan:
+	/* update and send wlan channels info to BT */
+	for (i = 0; i < 4; i++)
+		mci->wlan_channels[i] = channelmap[i];
+	ar9003_mci_send_wlan_channels(ah);
+	ar9003_mci_state(ah, MCI_STATE_SEND_VERSION_QUERY);
+}
+
+void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
+			   bool concur_tx)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
+	bool old_concur_tx = mci_hw->concur_tx;
+
+	if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX)) {
+		mci_hw->concur_tx = false;
+		return;
+	}
+
+	if (!IS_CHAN_2GHZ(ah->curchan))
+		return;
+
+	if (setchannel) {
+		struct ath9k_hw_cal_data *caldata = &sc->caldata;
+		if ((caldata->chanmode == CHANNEL_G_HT40PLUS) &&
+		    (ah->curchan->channel > caldata->channel) &&
+		    (ah->curchan->channel <= caldata->channel + 20))
+			return;
+		if ((caldata->chanmode == CHANNEL_G_HT40MINUS) &&
+		    (ah->curchan->channel < caldata->channel) &&
+		    (ah->curchan->channel >= caldata->channel - 20))
+			return;
+		mci_hw->concur_tx = false;
+	} else
+		mci_hw->concur_tx = concur_tx;
+
+	if (old_concur_tx != mci_hw->concur_tx)
+		ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
+}
+
+static void ath9k_mci_stomp_audio(struct ath_softc *sc)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_btcoex *btcoex = &sc->btcoex;
+	struct ath_mci_profile *mci = &btcoex->mci;
+
+	if (!mci->num_sco && !mci->num_a2dp)
+		return;
+
+	if (ah->stats.avgbrssi > 25) {
+		btcoex->stomp_audio = 0;
+		return;
+	}
+
+	btcoex->stomp_audio++;
+}
+void ath9k_mci_update_rssi(struct ath_softc *sc)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_btcoex *btcoex = &sc->btcoex;
+	struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
+
+	ath9k_mci_stomp_audio(sc);
+
+	if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX))
+		return;
+
+	if (ah->stats.avgbrssi >= 40) {
+		if (btcoex->rssi_count < 0)
+			btcoex->rssi_count = 0;
+		if (++btcoex->rssi_count >= ATH_MCI_CONCUR_TX_SWITCH) {
+			btcoex->rssi_count = 0;
+			ath9k_mci_set_txpower(sc, false, true);
+		}
+	} else {
+		if (btcoex->rssi_count > 0)
+			btcoex->rssi_count = 0;
+		if (--btcoex->rssi_count <= -ATH_MCI_CONCUR_TX_SWITCH) {
+			btcoex->rssi_count = 0;
+			ath9k_mci_set_txpower(sc, false, false);
+		}
+	}
+}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
index fc14eea..0695883 100644
--- a/drivers/net/wireless/ath/ath9k/mci.h
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -32,6 +32,27 @@
 #define ATH_MCI_MAX_PROFILE		(ATH_MCI_MAX_ACL_PROFILE +\
 					 ATH_MCI_MAX_SCO_PROFILE)
 
+#define ATH_MCI_INQUIRY_PRIO         62
+#define ATH_MCI_HI_PRIO              60
+#define ATH_MCI_NUM_BT_CHANNELS      79
+#define ATH_MCI_CONCUR_TX_SWITCH      5
+
+#define MCI_GPM_SET_CHANNEL_BIT(_p_gpm, _bt_chan)			  \
+	do {								  \
+		if (_bt_chan < ATH_MCI_NUM_BT_CHANNELS) {		  \
+			*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_CHANNEL_MAP + \
+				(_bt_chan / 8)) |= (1 << (_bt_chan & 7)); \
+		}							  \
+	} while (0)
+
+#define MCI_GPM_CLR_CHANNEL_BIT(_p_gpm, _bt_chan)			  \
+	do {								  \
+		if (_bt_chan < ATH_MCI_NUM_BT_CHANNELS) {		  \
+			*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_CHANNEL_MAP + \
+				(_bt_chan / 8)) &= ~(1 << (_bt_chan & 7));\
+		}							  \
+	} while (0)
+
 #define INC_PROF(_mci, _info) do {		 \
 		switch (_info->type) {		 \
 		case MCI_GPM_COEX_PROFILE_RFCOMM:\
@@ -49,6 +70,7 @@
 			_mci->num_pan++;	 \
 			break;			 \
 		case MCI_GPM_COEX_PROFILE_VOICE: \
+		case MCI_GPM_COEX_PROFILE_A2DPVO:\
 			_mci->num_sco++;	 \
 			break;			 \
 		default:			 \
@@ -73,6 +95,7 @@
 			_mci->num_pan--;	 \
 			break;			 \
 		case MCI_GPM_COEX_PROFILE_VOICE: \
+		case MCI_GPM_COEX_PROFILE_A2DPVO:\
 			_mci->num_sco--;	 \
 			break;			 \
 		default:			 \
@@ -113,6 +136,7 @@
 	u8 num_pan;
 	u8 num_other_acl;
 	u8 num_bdr;
+	u8 voice_priority;
 };
 
 struct ath_mci_buf {
@@ -130,13 +154,25 @@
 int ath_mci_setup(struct ath_softc *sc);
 void ath_mci_cleanup(struct ath_softc *sc);
 void ath_mci_intr(struct ath_softc *sc);
+void ath9k_mci_update_rssi(struct ath_softc *sc);
 
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 void ath_mci_enable(struct ath_softc *sc);
+void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all);
+void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
+			   bool concur_tx);
 #else
 static inline void ath_mci_enable(struct ath_softc *sc)
 {
 }
+static inline void ath9k_mci_update_wlan_channels(struct ath_softc *sc,
+						  bool allow_all)
+{
+}
+static inline void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
+					 bool concur_tx)
+{
+}
 #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
 
 #endif /* MCI_H*/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f088f4b..9553203 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -96,17 +96,6 @@
 	return true;
 }
 
-static void ath_pci_extn_synch_enable(struct ath_common *common)
-{
-	struct ath_softc *sc = (struct ath_softc *) common->priv;
-	struct pci_dev *pdev = to_pci_dev(sc->dev);
-	u8 lnkctl;
-
-	pci_read_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, &lnkctl);
-	lnkctl |= PCI_EXP_LNKCTL_ES;
-	pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl);
-}
-
 /* Need to be called after we discover btcoex capabilities */
 static void ath_pci_aspm_init(struct ath_common *common)
 {
@@ -153,7 +142,6 @@
 	.ath_bus_type = ATH_PCI,
 	.read_cachesize = ath_pci_read_cachesize,
 	.eeprom_read = ath_pci_eeprom_read,
-	.extn_synch_en = ath_pci_extn_synch_enable,
 	.aspm_init = ath_pci_aspm_init,
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 27ed80b5..714558d 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -982,16 +982,6 @@
 	}
 }
 
-static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
-				   int xretries, int retries, u8 per)
-{
-	struct ath_rc_stats *stats = &rc->rcstats[rix];
-
-	stats->xretries += xretries;
-	stats->retries += retries;
-	stats->per = per;
-}
-
 static void ath_rc_update_ht(struct ath_softc *sc,
 			     struct ath_rate_priv *ath_rc_priv,
 			     struct ieee80211_tx_info *tx_info,
@@ -1065,14 +1055,6 @@
 
 }
 
-static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
-	struct ath_rc_stats *stats;
-
-	stats = &rc->rcstats[final_rate];
-	stats->success++;
-}
-
 static void ath_rc_tx_status(struct ath_softc *sc,
 			     struct ath_rate_priv *ath_rc_priv,
 			     struct sk_buff *skb)
@@ -1350,7 +1332,25 @@
 	}
 }
 
-#ifdef CONFIG_ATH9K_DEBUGFS
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
+
+void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
+{
+	struct ath_rc_stats *stats;
+
+	stats = &rc->rcstats[final_rate];
+	stats->success++;
+}
+
+void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
+			    int xretries, int retries, u8 per)
+{
+	struct ath_rc_stats *stats = &rc->rcstats[rix];
+
+	stats->xretries += xretries;
+	stats->retries += retries;
+	stats->per = per;
+}
 
 static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
 				size_t count, loff_t *ppos)
@@ -1428,10 +1428,17 @@
 				     struct dentry *dir)
 {
 	struct ath_rate_priv *rc = priv_sta;
-	debugfs_create_file("rc_stats", S_IRUGO, dir, rc, &fops_rcstat);
+	rc->debugfs_rcstats = debugfs_create_file("rc_stats", S_IRUGO,
+						  dir, rc, &fops_rcstat);
 }
 
-#endif /* CONFIG_ATH9K_DEBUGFS */
+static void ath_rate_remove_sta_debugfs(void *priv, void *priv_sta)
+{
+	struct ath_rate_priv *rc = priv_sta;
+	debugfs_remove(rc->debugfs_rcstats);
+}
+
+#endif /* CONFIG_MAC80211_DEBUGFS && CONFIG_ATH9K_DEBUGFS */
 
 static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
 {
@@ -1476,8 +1483,10 @@
 	.free = ath_rate_free,
 	.alloc_sta = ath_rate_alloc_sta,
 	.free_sta = ath_rate_free_sta,
-#ifdef CONFIG_ATH9K_DEBUGFS
+
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
 	.add_sta_debugfs = ath_rate_add_sta_debugfs,
+	.remove_sta_debugfs = ath_rate_remove_sta_debugfs,
 #endif
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 268e67d..267dbfc 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -211,10 +211,26 @@
 	struct ath_rateset neg_ht_rates;
 	const struct ath_rate_table *rate_table;
 
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
 	struct dentry *debugfs_rcstats;
 	struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
+#endif
 };
 
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
+void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate);
+void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
+			    int xretries, int retries, u8 per);
+#else
+static inline void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
+{
+}
+static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
+					  int xretries, int retries, u8 per)
+{
+}
+#endif
+
 #ifdef CONFIG_ATH9K_RATE_CONTROL
 int ath_rate_control_register(void);
 void ath_rate_control_unregister(void);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 83d16e7..6aafbb7 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -976,7 +976,7 @@
 	rx_status->freq = hw->conf.channel->center_freq;
 	rx_status->signal = ah->noise + rx_stats->rs_rssi;
 	rx_status->antenna = rx_stats->rs_antenna;
-	rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+	rx_status->flag |= RX_FLAG_MACTIME_START;
 	if (rx_stats->rs_moreaggr)
 		rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 
@@ -1105,7 +1105,10 @@
 		else
 			rs.is_mybeacon = false;
 
-		sc->rx.num_pkts++;
+		if (ieee80211_is_data_present(hdr->frame_control) &&
+		    !ieee80211_is_qos_nullfunc(hdr->frame_control))
+			sc->rx.num_pkts++;
+
 		ath_debug_stat_rx(sc, &rs);
 
 		/*
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 4e6760f..ad3c82c 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -907,10 +907,6 @@
 	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
 	((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
 
-#define AR_SREV_9462_20_OR_LATER(_ah) \
-	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
-	((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
-
 #define AR_SREV_9565(_ah) \
 	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
 
@@ -2315,6 +2311,8 @@
 #define AR_BTCOEX_MAX_TXPWR(_x)				(0x18c0 + ((_x) << 2))
 #define AR_BTCOEX_WL_LNA				0x1940
 #define AR_BTCOEX_RFGAIN_CTRL				0x1944
+#define AR_BTCOEX_WL_LNA_TIMEOUT			0x003FFFFF
+#define AR_BTCOEX_WL_LNA_TIMEOUT_S			0
 
 #define AR_BTCOEX_CTRL2					0x1948
 #define AR_BTCOEX_CTRL2_TXPWR_THRESH			0x0007F800
@@ -2360,4 +2358,11 @@
 #define AR_GLB_SWREG_DISCONT_MODE         0x2002c
 #define AR_GLB_SWREG_DISCONT_EN_BT_WLAN   0x3
 
+#define AR_MCI_MISC                    0x1a74
+#define AR_MCI_MISC_HW_FIX_EN          0x00000001
+#define AR_MCI_MISC_HW_FIX_EN_S        0
+#define AR_MCI_DBG_CNT_CTRL            0x1a78
+#define AR_MCI_DBG_CNT_CTRL_ENABLE     0x00000001
+#define AR_MCI_DBG_CNT_CTRL_ENABLE_S   0
+
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index a483d51..9f85630 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -118,7 +118,7 @@
 		       (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
 	data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
 
-	if (AR_SREV_9462_20_OR_LATER(ah)) {
+	if (AR_SREV_9462_20(ah)) {
 		/* AR9462 2.0 has an extra descriptor word (time based
 		 * discard) compared to other chips */
 		REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 741918a..3413094 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1354,10 +1354,10 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath9k_tx_queue_info qi;
 	static const int subtype_txq_to_hwq[] = {
-		[WME_AC_BE] = ATH_TXQ_AC_BE,
-		[WME_AC_BK] = ATH_TXQ_AC_BK,
-		[WME_AC_VI] = ATH_TXQ_AC_VI,
-		[WME_AC_VO] = ATH_TXQ_AC_VO,
+		[IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
+		[IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
+		[IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
+		[IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
 	};
 	int axq_qnum, i;
 
@@ -2319,6 +2319,8 @@
 
 		ath_txq_lock(sc, txq);
 
+		TX_STAT_INC(txq->axq_qnum, txprocdesc);
+
 		if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
 			ath_txq_unlock(sc, txq);
 			return;
@@ -2464,7 +2466,7 @@
 	}
 
 	for (acno = 0, ac = &an->ac[acno];
-	     acno < WME_NUM_AC; acno++, ac++) {
+	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
 		ac->sched    = false;
 		ac->txq = sc->tx.txq_map[acno];
 		INIT_LIST_HEAD(&ac->tid_q);
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 24ac287..aaebecd 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -28,11 +28,6 @@
 #include "fwcmd.h"
 #include "version.h"
 
-#define MAKE_STR(symbol) #symbol
-#define TO_STR(symbol) MAKE_STR(symbol)
-#define CARL9170FW_API_VER_STR TO_STR(CARL9170FW_API_MAX_VER)
-MODULE_VERSION(CARL9170FW_API_VER_STR ":" CARL9170FW_VERSION_GIT);
-
 static const u8 otus_magic[4] = { OTUS_MAGIC };
 
 static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4],
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index e3b1b6e..24d75ab 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -343,7 +343,24 @@
 			break;
 		}
 	} else {
-		mac_addr = NULL;
+		/*
+		 * Enable monitor mode
+		 *
+		 * rx_ctrl |= AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER;
+		 * sniffer |= AR9170_MAC_SNIFFER_ENABLE_PROMISC;
+		 *
+		 * When the hardware is in SNIFFER_PROMISC mode,
+		 * it generates spurious ACKs for every incoming
+		 * frame. This confuses every peer in the
+		 * vicinity and the network throughput will suffer
+		 * badly.
+		 *
+		 * Hence, the hardware will be put into station
+		 * mode and just the rx filters are disabled.
+		 */
+		cam_mode |= AR9170_MAC_CAM_STA;
+		rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
+		mac_addr = common->macaddr;
 		bssid = NULL;
 	}
 	rcu_read_unlock();
@@ -355,8 +372,6 @@
 		enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
 
 	if (ar->sniffer_enabled) {
-		rx_ctrl |= AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER;
-		sniffer |= AR9170_MAC_SNIFFER_ENABLE_PROMISC;
 		enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
 	}
 
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index a0b7230..6d22382 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -164,9 +164,6 @@
 	struct carl9170_rsp *cmd = buf;
 	struct ieee80211_vif *vif;
 
-	if (carl9170_check_sequence(ar, cmd->hdr.seq))
-		return;
-
 	if ((cmd->hdr.cmd & CARL9170_RSP_FLAG) != CARL9170_RSP_FLAG) {
 		if (!(cmd->hdr.cmd & CARL9170_CMD_ASYNC_FLAG))
 			carl9170_cmd_callback(ar, len, buf);
@@ -663,6 +660,35 @@
 	return false;
 }
 
+static int carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len,
+				struct ieee80211_rx_status *status)
+{
+	struct sk_buff *skb;
+
+	/* (driver) frame trap handler
+	 *
+	 * Because power-saving mode handing has to be implemented by
+	 * the driver/firmware. We have to check each incoming beacon
+	 * from the associated AP, if there's new data for us (either
+	 * broadcast/multicast or unicast) we have to react quickly.
+	 *
+	 * So, if you have you want to add additional frame trap
+	 * handlers, this would be the perfect place!
+	 */
+
+	carl9170_ps_beacon(ar, buf, len);
+
+	carl9170_ba_check(ar, buf, len);
+
+	skb = carl9170_rx_copy_data(buf, len);
+	if (!skb)
+		return -ENOMEM;
+
+	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
+	ieee80211_rx(ar->hw, skb);
+	return 0;
+}
+
 /*
  * If the frame alignment is right (or the kernel has
  * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
@@ -672,14 +698,12 @@
  * mode, and we need to observe the proper ordering,
  * this is non-trivial.
  */
-
-static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
+static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len)
 {
 	struct ar9170_rx_head *head;
 	struct ar9170_rx_macstatus *mac;
 	struct ar9170_rx_phystatus *phy = NULL;
 	struct ieee80211_rx_status status;
-	struct sk_buff *skb;
 	int mpdu_len;
 	u8 mac_status;
 
@@ -791,18 +815,10 @@
 	if (phy)
 		carl9170_rx_phy_status(ar, phy, &status);
 
-	carl9170_ps_beacon(ar, buf, mpdu_len);
-
-	carl9170_ba_check(ar, buf, mpdu_len);
-
-	skb = carl9170_rx_copy_data(buf, mpdu_len);
-	if (!skb)
+	if (carl9170_handle_mpdu(ar, buf, mpdu_len, &status))
 		goto drop;
 
-	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
-	ieee80211_rx(ar->hw, skb);
 	return;
-
 drop:
 	ar->rx_dropped++;
 }
@@ -820,6 +836,9 @@
 		if (unlikely(i > resplen))
 			break;
 
+		if (carl9170_check_sequence(ar, cmd->hdr.seq))
+			break;
+
 		carl9170_handle_command_response(ar, cmd, cmd->hdr.len + 4);
 	}
 
@@ -851,7 +870,7 @@
 	if (i == 12)
 		carl9170_rx_untie_cmds(ar, buf, len);
 	else
-		carl9170_handle_mpdu(ar, buf, len);
+		carl9170_rx_untie_data(ar, buf, len);
 }
 
 static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len)
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 888152c..307bc0d 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -295,6 +295,13 @@
 		goto resubmit;
 	}
 
+	/*
+	 * While the carl9170 firmware does not use this EP, the
+	 * firmware loader in the EEPROM unfortunately does.
+	 * Therefore we need to be ready to handle out-of-band
+	 * responses and traps in case the firmware crashed and
+	 * the loader took over again.
+	 */
 	carl9170_handle_command_response(ar, urb->transfer_buffer,
 					 urb->actual_length);
 
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index 19befb3..39e8a59 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -20,8 +20,8 @@
 #include "ath.h"
 #include "reg.h"
 
-#define REG_READ	(common->ops->read)
-#define REG_WRITE	(common->ops->write)
+#define REG_READ			(common->ops->read)
+#define REG_WRITE(_ah, _reg, _val)	(common->ops->write)(_ah, _val, _reg)
 
 /**
  * ath_hw_set_bssid_mask - filter out bssids we listen
@@ -119,8 +119,8 @@
 {
 	void *ah = common->ah;
 
-	REG_WRITE(ah, get_unaligned_le32(common->bssidmask), AR_BSSMSKL);
-	REG_WRITE(ah, get_unaligned_le16(common->bssidmask + 4), AR_BSSMSKU);
+	REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(common->bssidmask));
+	REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(common->bssidmask + 4));
 }
 EXPORT_SYMBOL(ath_hw_setbssidmask);
 
@@ -139,7 +139,7 @@
 	void *ah = common->ah;
 
 	/* freeze */
-	REG_WRITE(ah, AR_MIBC_FMC, AR_MIBC);
+	REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
 
 	/* read */
 	cycles = REG_READ(ah, AR_CCCNT);
@@ -148,13 +148,13 @@
 	tx = REG_READ(ah, AR_TFCNT);
 
 	/* clear */
-	REG_WRITE(ah, 0, AR_CCCNT);
-	REG_WRITE(ah, 0, AR_RFCNT);
-	REG_WRITE(ah, 0, AR_RCCNT);
-	REG_WRITE(ah, 0, AR_TFCNT);
+	REG_WRITE(ah, AR_CCCNT, 0);
+	REG_WRITE(ah, AR_RFCNT, 0);
+	REG_WRITE(ah, AR_RCCNT, 0);
+	REG_WRITE(ah, AR_TFCNT, 0);
 
 	/* unfreeze */
-	REG_WRITE(ah, 0, AR_MIBC);
+	REG_WRITE(ah, AR_MIBC, 0);
 
 	/* update all cycle counters here */
 	common->cc_ani.cycles += cycles;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c5a99c8..ddd6a4f 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4652,7 +4652,7 @@
 	switch (dev->dev->bus_type) {
 #ifdef CONFIG_B43_BCMA
 	case B43_BUS_BCMA:
-		bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci,
+		bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0],
 				      dev->dev->bdev, true);
 		break;
 #endif
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 136510e..8cb206a 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -796,7 +796,7 @@
 		status.mactime += mactime;
 		if (low_mactime_now <= mactime)
 			status.mactime -= 0x10000;
-		status.flag |= RX_FLAG_MACTIME_MPDU;
+		status.flag |= RX_FLAG_MACTIME_START;
 	}
 
 	chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index b8ffea6..849a28c 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -557,7 +557,7 @@
 		status.mactime += mactime;
 		if (low_mactime_now <= mactime)
 			status.mactime -= 0x10000;
-		status.flag |= RX_FLAG_MACTIME_MPDU;
+		status.flag |= RX_FLAG_MACTIME_START;
 	}
 
 	chanid = (chanstat & B43legacy_RX_CHAN_ID) >>
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index c9d811e..1d92d87 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -55,13 +55,16 @@
 	  IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
 	  use the driver for an USB wireless card.
 
-config BRCMISCAN
-	bool "Broadcom I-Scan (OBSOLETE)"
-	depends on BRCMFMAC
+config BRCM_TRACING
+	bool "Broadcom device tracing"
+	depends on BRCMSMAC || BRCMFMAC
 	---help---
-	  This option enables the I-Scan method. By default fullmac uses the
-	  new E-Scan method which uses less memory in firmware and gives no
-	  limitation on the number of scan results.
+	  If you say Y here, the Broadcom wireless drivers will register
+	  with ftrace to dump event information into the trace ringbuffer.
+	  Tracing can be enabled at runtime to aid in debugging wireless
+	  issues. This option adds a small amount of overhead when tracing
+	  is disabled. If unsure, say Y to allow developers to better help
+	  you when wireless problems occur.
 
 config BRCMDBG
 	bool "Broadcom driver debug functions"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 9d5170b..1a6661a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -24,6 +24,8 @@
 obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
 brcmfmac-objs += \
 		wl_cfg80211.o \
+		fwil.o \
+		fweh.o \
 		dhd_cdc.o \
 		dhd_common.o \
 		dhd_linux.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 3b2c4c2..334ddab 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -42,7 +42,8 @@
 #ifdef CONFIG_BRCMFMAC_SDIO_OOB
 static irqreturn_t brcmf_sdio_irqhandler(int irq, void *dev_id)
 {
-	struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(dev_id);
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 
 	brcmf_dbg(INTR, "oob intr triggered\n");
 
@@ -71,7 +72,7 @@
 	brcmf_dbg(ERROR, "requesting irq %d\n", sdiodev->irq);
 	ret = request_irq(sdiodev->irq, brcmf_sdio_irqhandler,
 			  sdiodev->irq_flags, "brcmf_oob_intr",
-			  &sdiodev->func[1]->card->dev);
+			  &sdiodev->func[1]->dev);
 	if (ret != 0)
 		return ret;
 	spin_lock_init(&sdiodev->irq_en_lock);
@@ -84,6 +85,8 @@
 		return ret;
 	sdiodev->irq_wake = true;
 
+	sdio_claim_host(sdiodev->func[1]);
+
 	/* must configure SDIO_CCCR_IENx to enable irq */
 	data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
 	data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
@@ -95,6 +98,8 @@
 		data |= SDIO_SEPINT_ACT_HI;
 	brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
 
+	sdio_release_host(sdiodev->func[1]);
+
 	return 0;
 }
 
@@ -102,14 +107,16 @@
 {
 	brcmf_dbg(TRACE, "Entering\n");
 
+	sdio_claim_host(sdiodev->func[1]);
 	brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
 	brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
+	sdio_release_host(sdiodev->func[1]);
 
 	if (sdiodev->irq_wake) {
 		disable_irq_wake(sdiodev->irq);
 		sdiodev->irq_wake = false;
 	}
-	free_irq(sdiodev->irq, &sdiodev->func[1]->card->dev);
+	free_irq(sdiodev->irq, &sdiodev->func[1]->dev);
 	sdiodev->irq_en = false;
 
 	return 0;
@@ -117,7 +124,8 @@
 #else		/* CONFIG_BRCMFMAC_SDIO_OOB */
 static void brcmf_sdio_irqhandler(struct sdio_func *func)
 {
-	struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
+	struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 
 	brcmf_dbg(INTR, "ib intr triggered\n");
 
@@ -249,9 +257,7 @@
 	int retval;
 
 	brcmf_dbg(INFO, "addr:0x%08x\n", addr);
-	sdio_claim_host(sdiodev->func[1]);
 	retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
-	sdio_release_host(sdiodev->func[1]);
 	brcmf_dbg(INFO, "data:0x%02x\n", data);
 
 	if (ret)
@@ -266,9 +272,7 @@
 	int retval;
 
 	brcmf_dbg(INFO, "addr:0x%08x\n", addr);
-	sdio_claim_host(sdiodev->func[1]);
 	retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
-	sdio_release_host(sdiodev->func[1]);
 	brcmf_dbg(INFO, "data:0x%08x\n", data);
 
 	if (ret)
@@ -283,9 +287,7 @@
 	int retval;
 
 	brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data);
-	sdio_claim_host(sdiodev->func[1]);
 	retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
-	sdio_release_host(sdiodev->func[1]);
 
 	if (ret)
 		*ret = retval;
@@ -297,9 +299,7 @@
 	int retval;
 
 	brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data);
-	sdio_claim_host(sdiodev->func[1]);
 	retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
-	sdio_release_host(sdiodev->func[1]);
 
 	if (ret)
 		*ret = retval;
@@ -364,8 +364,6 @@
 	brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
 		  fn, addr, pkt->len);
 
-	sdio_claim_host(sdiodev->func[1]);
-
 	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
 	err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
 	if (err)
@@ -376,8 +374,6 @@
 					 fn, addr, pkt);
 
 done:
-	sdio_release_host(sdiodev->func[1]);
-
 	return err;
 }
 
@@ -391,8 +387,6 @@
 	brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
 		  fn, addr, pktq->qlen);
 
-	sdio_claim_host(sdiodev->func[1]);
-
 	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
 	err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
 	if (err)
@@ -403,8 +397,6 @@
 					pktq);
 
 done:
-	sdio_release_host(sdiodev->func[1]);
-
 	return err;
 }
 
@@ -446,8 +438,6 @@
 	if (flags & SDIO_REQ_ASYNC)
 		return -ENOTSUPP;
 
-	sdio_claim_host(sdiodev->func[1]);
-
 	if (bar0 != sdiodev->sbwad) {
 		err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
 		if (err)
@@ -467,8 +457,6 @@
 					 addr, pkt);
 
 done:
-	sdio_release_host(sdiodev->func[1]);
-
 	return err;
 }
 
@@ -510,10 +498,8 @@
 	brcmf_dbg(TRACE, "Enter\n");
 
 	/* issue abort cmd52 command through F0 */
-	sdio_claim_host(sdiodev->func[1]);
 	brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
 				 SDIO_CCCR_ABORT, &t_func);
-	sdio_release_host(sdiodev->func[1]);
 
 	brcmf_dbg(TRACE, "Exit\n");
 	return 0;
@@ -530,9 +516,6 @@
 
 	regs = SI_ENUM_BASE;
 
-	/* Report the BAR, to fix if needed */
-	sdiodev->sbwad = SI_ENUM_BASE;
-
 	/* try to attach to the target device */
 	sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
 	if (!sdiodev->bus) {
@@ -551,6 +534,8 @@
 
 int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
 {
+	sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+
 	if (sdiodev->bus) {
 		brcmf_sdbrcm_disconnect(sdiodev->bus);
 		sdiodev->bus = NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index c3247d5..a800502 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -372,9 +372,7 @@
 	}
 
 	/* Enable Function 1 */
-	sdio_claim_host(sdiodev->func[1]);
 	err_ret = sdio_enable_func(sdiodev->func[1]);
-	sdio_release_host(sdiodev->func[1]);
 	if (err_ret)
 		brcmf_dbg(ERROR, "Failed to enable F1 Err: 0x%08x\n", err_ret);
 
@@ -393,16 +391,14 @@
 	sdiodev->num_funcs = 2;
 
 	sdio_claim_host(sdiodev->func[1]);
+
 	err_ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
-	sdio_release_host(sdiodev->func[1]);
 	if (err_ret) {
 		brcmf_dbg(ERROR, "Failed to set F1 blocksize\n");
 		goto out;
 	}
 
-	sdio_claim_host(sdiodev->func[2]);
 	err_ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
-	sdio_release_host(sdiodev->func[2]);
 	if (err_ret) {
 		brcmf_dbg(ERROR, "Failed to set F2 blocksize\n");
 		goto out;
@@ -411,6 +407,7 @@
 	brcmf_sdioh_enablefuncs(sdiodev);
 
 out:
+	sdio_release_host(sdiodev->func[1]);
 	brcmf_dbg(TRACE, "Done\n");
 	return err_ret;
 }
@@ -459,95 +456,106 @@
 #endif		/* CONFIG_BRCMFMAC_SDIO_OOB */
 
 static int brcmf_ops_sdio_probe(struct sdio_func *func,
-			      const struct sdio_device_id *id)
+				const struct sdio_device_id *id)
 {
-	int ret = 0;
+	int err;
 	struct brcmf_sdio_dev *sdiodev;
 	struct brcmf_bus *bus_if;
 
 	brcmf_dbg(TRACE, "Enter\n");
-	brcmf_dbg(TRACE, "func->class=%x\n", func->class);
-	brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
-	brcmf_dbg(TRACE, "sdio_device: 0x%04x\n", func->device);
-	brcmf_dbg(TRACE, "Function#: 0x%04x\n", func->num);
+	brcmf_dbg(TRACE, "Class=%x\n", func->class);
+	brcmf_dbg(TRACE, "sdio vendor ID: 0x%04x\n", func->vendor);
+	brcmf_dbg(TRACE, "sdio device ID: 0x%04x\n", func->device);
+	brcmf_dbg(TRACE, "Function#: %d\n", func->num);
 
-	if (func->num == 1) {
-		if (dev_get_drvdata(&func->card->dev)) {
-			brcmf_dbg(ERROR, "card private drvdata occupied\n");
-			return -ENXIO;
-		}
-		bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
-		if (!bus_if)
-			return -ENOMEM;
-		sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
-		if (!sdiodev) {
-			kfree(bus_if);
-			return -ENOMEM;
-		}
-		sdiodev->func[0] = func;
-		sdiodev->func[1] = func;
-		sdiodev->bus_if = bus_if;
-		bus_if->bus_priv.sdio = sdiodev;
-		bus_if->type = SDIO_BUS;
-		bus_if->align = BRCMF_SDALIGN;
-		dev_set_drvdata(&func->card->dev, sdiodev);
+	/* Consume func num 1 but dont do anything with it. */
+	if (func->num == 1)
+		return 0;
 
-		atomic_set(&sdiodev->suspend, false);
-		init_waitqueue_head(&sdiodev->request_byte_wait);
-		init_waitqueue_head(&sdiodev->request_word_wait);
-		init_waitqueue_head(&sdiodev->request_chain_wait);
-		init_waitqueue_head(&sdiodev->request_buffer_wait);
+	/* Ignore anything but func 2 */
+	if (func->num != 2)
+		return -ENODEV;
+
+	bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+	if (!bus_if)
+		return -ENOMEM;
+	sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
+	if (!sdiodev) {
+		kfree(bus_if);
+		return -ENOMEM;
 	}
 
-	if (func->num == 2) {
-		sdiodev = dev_get_drvdata(&func->card->dev);
-		if ((!sdiodev) || (sdiodev->func[1]->card != func->card))
-			return -ENODEV;
+	sdiodev->func[0] = func->card->sdio_func[0];
+	sdiodev->func[1] = func->card->sdio_func[0];
+	sdiodev->func[2] = func;
 
-		ret = brcmf_sdio_getintrcfg(sdiodev);
-		if (ret)
-			return ret;
-		sdiodev->func[2] = func;
+	sdiodev->bus_if = bus_if;
+	bus_if->bus_priv.sdio = sdiodev;
+	bus_if->align = BRCMF_SDALIGN;
+	dev_set_drvdata(&func->dev, bus_if);
+	dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
+	sdiodev->dev = &sdiodev->func[1]->dev;
 
-		bus_if = sdiodev->bus_if;
-		sdiodev->dev = &func->dev;
-		dev_set_drvdata(&func->dev, bus_if);
+	atomic_set(&sdiodev->suspend, false);
+	init_waitqueue_head(&sdiodev->request_byte_wait);
+	init_waitqueue_head(&sdiodev->request_word_wait);
+	init_waitqueue_head(&sdiodev->request_chain_wait);
+	init_waitqueue_head(&sdiodev->request_buffer_wait);
+	err = brcmf_sdio_getintrcfg(sdiodev);
+	if (err)
+		goto fail;
 
-		brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
-		ret = brcmf_sdio_probe(sdiodev);
+	brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
+	err = brcmf_sdio_probe(sdiodev);
+	if (err) {
+		brcmf_dbg(ERROR, "F2 error, probe failed %d...\n", err);
+		goto fail;
 	}
+	brcmf_dbg(TRACE, "F2 init completed...\n");
+	return 0;
 
-	return ret;
+fail:
+	dev_set_drvdata(&func->dev, NULL);
+	dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+	kfree(sdiodev);
+	kfree(bus_if);
+	return err;
 }
 
 static void brcmf_ops_sdio_remove(struct sdio_func *func)
 {
 	struct brcmf_bus *bus_if;
 	struct brcmf_sdio_dev *sdiodev;
-	brcmf_dbg(TRACE, "Enter\n");
-	brcmf_dbg(INFO, "func->class=%x\n", func->class);
-	brcmf_dbg(INFO, "sdio_vendor: 0x%04x\n", func->vendor);
-	brcmf_dbg(INFO, "sdio_device: 0x%04x\n", func->device);
-	brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
 
-	if (func->num == 2) {
-		bus_if = dev_get_drvdata(&func->dev);
+	brcmf_dbg(TRACE, "Enter\n");
+	brcmf_dbg(TRACE, "sdio vendor ID: 0x%04x\n", func->vendor);
+	brcmf_dbg(TRACE, "sdio device ID: 0x%04x\n", func->device);
+	brcmf_dbg(TRACE, "Function: %d\n", func->num);
+
+	if (func->num != 1 && func->num != 2)
+		return;
+
+	bus_if = dev_get_drvdata(&func->dev);
+	if (bus_if) {
 		sdiodev = bus_if->bus_priv.sdio;
-		brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
 		brcmf_sdio_remove(sdiodev);
-		dev_set_drvdata(&func->card->dev, NULL);
-		dev_set_drvdata(&func->dev, NULL);
+
+		dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+		dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
+
 		kfree(bus_if);
 		kfree(sdiodev);
 	}
+
+	brcmf_dbg(TRACE, "Exit\n");
 }
 
 #ifdef CONFIG_PM_SLEEP
 static int brcmf_sdio_suspend(struct device *dev)
 {
 	mmc_pm_flag_t sdio_flags;
-	struct sdio_func *func = dev_to_sdio_func(dev);
-	struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 	int ret = 0;
 
 	brcmf_dbg(TRACE, "\n");
@@ -573,8 +581,8 @@
 
 static int brcmf_sdio_resume(struct device *dev)
 {
-	struct sdio_func *func = dev_to_sdio_func(dev);
-	struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 
 	brcmf_sdio_wdtmr_enable(sdiodev, true);
 	atomic_set(&sdiodev->suspend, false);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 17e7ae7..24bc4e3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -23,6 +23,8 @@
 
 #define BRCMF_VERSION_STR		"4.218.248.5"
 
+#include "fweh.h"
+
 /*******************************************************************************
  * IO codes that are interpreted by dongle firmware
  ******************************************************************************/
@@ -38,8 +40,11 @@
 #define BRCMF_C_GET_SSID			25
 #define BRCMF_C_SET_SSID			26
 #define BRCMF_C_GET_CHANNEL			29
+#define BRCMF_C_SET_CHANNEL			30
 #define BRCMF_C_GET_SRL				31
+#define BRCMF_C_SET_SRL				32
 #define BRCMF_C_GET_LRL				33
+#define BRCMF_C_SET_LRL				34
 #define BRCMF_C_GET_RADIO			37
 #define BRCMF_C_SET_RADIO			38
 #define BRCMF_C_GET_PHYTYPE			39
@@ -58,6 +63,7 @@
 #define BRCMF_C_SET_COUNTRY			84
 #define BRCMF_C_GET_PM				85
 #define BRCMF_C_SET_PM				86
+#define BRCMF_C_GET_CURR_RATESET		114
 #define BRCMF_C_GET_AP				117
 #define BRCMF_C_SET_AP				118
 #define BRCMF_C_GET_RSSI			127
@@ -65,6 +71,7 @@
 #define BRCMF_C_SET_WSEC			134
 #define BRCMF_C_GET_PHY_NOISE			135
 #define BRCMF_C_GET_BSS_INFO			136
+#define BRCMF_C_GET_PHYLIST			180
 #define BRCMF_C_SET_SCAN_CHANNEL_TIME		185
 #define BRCMF_C_SET_SCAN_UNASSOC_TIME		187
 #define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON	201
@@ -100,29 +107,8 @@
 #define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
 #define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
 
-#define BRCMF_SCAN_ACTION_START      1
-#define BRCMF_SCAN_ACTION_CONTINUE   2
-#define WL_SCAN_ACTION_ABORT      3
-
-#define BRCMF_ISCAN_REQ_VERSION 1
-
-/* brcmf_iscan_results status values */
-#define BRCMF_SCAN_RESULTS_SUCCESS	0
-#define BRCMF_SCAN_RESULTS_PARTIAL	1
-#define BRCMF_SCAN_RESULTS_PENDING	2
-#define BRCMF_SCAN_RESULTS_ABORTED	3
-#define BRCMF_SCAN_RESULTS_NO_MEM	4
-
-/* Indicates this key is using soft encrypt */
-#define WL_SOFT_KEY	(1 << 0)
 /* primary (ie tx) key */
 #define BRCMF_PRIMARY_KEY	(1 << 1)
-/* Reserved for backward compat */
-#define WL_KF_RES_4	(1 << 4)
-/* Reserved for backward compat */
-#define WL_KF_RES_5	(1 << 5)
-/* Indicates a group key for a IBSS PEER */
-#define WL_IBSS_PEER_GROUP_KEY	(1 << 6)
 
 /* For supporting multiple interfaces */
 #define BRCMF_MAX_IFS	16
@@ -130,10 +116,6 @@
 #define DOT11_BSSTYPE_ANY			2
 #define DOT11_MAX_DEFAULT_KEYS	4
 
-#define BRCMF_EVENT_MSG_LINK		0x01
-#define BRCMF_EVENT_MSG_FLUSHTXQ	0x02
-#define BRCMF_EVENT_MSG_GROUP		0x04
-
 #define BRCMF_ESCAN_REQ_VERSION 1
 
 #define WLC_BSS_RSSI_ON_CHANNEL		0x0002
@@ -141,108 +123,6 @@
 #define BRCMF_MAXRATES_IN_SET		16	/* max # of rates in rateset */
 #define BRCMF_STA_ASSOC			0x10		/* Associated */
 
-struct brcmf_event_msg {
-	__be16 version;
-	__be16 flags;
-	__be32 event_type;
-	__be32 status;
-	__be32 reason;
-	__be32 auth_type;
-	__be32 datalen;
-	u8 addr[ETH_ALEN];
-	char ifname[IFNAMSIZ];
-	u8 ifidx;
-	u8 bsscfgidx;
-} __packed;
-
-struct brcm_ethhdr {
-	u16 subtype;
-	u16 length;
-	u8 version;
-	u8 oui[3];
-	u16 usr_subtype;
-} __packed;
-
-struct brcmf_event {
-	struct ethhdr eth;
-	struct brcm_ethhdr hdr;
-	struct brcmf_event_msg msg;
-} __packed;
-
-/* event codes sent by the dongle to this driver */
-#define BRCMF_E_SET_SSID			0
-#define BRCMF_E_JOIN				1
-#define BRCMF_E_START				2
-#define BRCMF_E_AUTH				3
-#define BRCMF_E_AUTH_IND			4
-#define BRCMF_E_DEAUTH				5
-#define BRCMF_E_DEAUTH_IND			6
-#define BRCMF_E_ASSOC				7
-#define BRCMF_E_ASSOC_IND			8
-#define BRCMF_E_REASSOC				9
-#define BRCMF_E_REASSOC_IND			10
-#define BRCMF_E_DISASSOC			11
-#define BRCMF_E_DISASSOC_IND			12
-#define BRCMF_E_QUIET_START			13
-#define BRCMF_E_QUIET_END			14
-#define BRCMF_E_BEACON_RX			15
-#define BRCMF_E_LINK				16
-#define BRCMF_E_MIC_ERROR			17
-#define BRCMF_E_NDIS_LINK			18
-#define BRCMF_E_ROAM				19
-#define BRCMF_E_TXFAIL				20
-#define BRCMF_E_PMKID_CACHE			21
-#define BRCMF_E_RETROGRADE_TSF			22
-#define BRCMF_E_PRUNE				23
-#define BRCMF_E_AUTOAUTH			24
-#define BRCMF_E_EAPOL_MSG			25
-#define BRCMF_E_SCAN_COMPLETE			26
-#define BRCMF_E_ADDTS_IND			27
-#define BRCMF_E_DELTS_IND			28
-#define BRCMF_E_BCNSENT_IND			29
-#define BRCMF_E_BCNRX_MSG			30
-#define BRCMF_E_BCNLOST_MSG			31
-#define BRCMF_E_ROAM_PREP			32
-#define BRCMF_E_PFN_NET_FOUND			33
-#define BRCMF_E_PFN_NET_LOST			34
-#define BRCMF_E_RESET_COMPLETE			35
-#define BRCMF_E_JOIN_START			36
-#define BRCMF_E_ROAM_START			37
-#define BRCMF_E_ASSOC_START			38
-#define BRCMF_E_IBSS_ASSOC			39
-#define BRCMF_E_RADIO				40
-#define BRCMF_E_PSM_WATCHDOG			41
-#define BRCMF_E_PROBREQ_MSG			44
-#define BRCMF_E_SCAN_CONFIRM_IND		45
-#define BRCMF_E_PSK_SUP				46
-#define BRCMF_E_COUNTRY_CODE_CHANGED		47
-#define	BRCMF_E_EXCEEDED_MEDIUM_TIME		48
-#define BRCMF_E_ICV_ERROR			49
-#define BRCMF_E_UNICAST_DECODE_ERROR		50
-#define BRCMF_E_MULTICAST_DECODE_ERROR		51
-#define BRCMF_E_TRACE				52
-#define BRCMF_E_IF				54
-#define BRCMF_E_RSSI				56
-#define BRCMF_E_PFN_SCAN_COMPLETE		57
-#define BRCMF_E_EXTLOG_MSG			58
-#define BRCMF_E_ACTION_FRAME			59
-#define BRCMF_E_ACTION_FRAME_COMPLETE		60
-#define BRCMF_E_PRE_ASSOC_IND			61
-#define BRCMF_E_PRE_REASSOC_IND			62
-#define BRCMF_E_CHANNEL_ADOPTED			63
-#define BRCMF_E_AP_STARTED			64
-#define BRCMF_E_DFS_AP_STOP			65
-#define BRCMF_E_DFS_AP_RESUME			66
-#define BRCMF_E_RESERVED1			67
-#define BRCMF_E_RESERVED2			68
-#define BRCMF_E_ESCAN_RESULT			69
-#define BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE	70
-#define BRCMF_E_DCS_REQUEST			73
-
-#define BRCMF_E_FIFO_CREDIT_MAP			74
-
-#define BRCMF_E_LAST				75
-
 #define BRCMF_E_STATUS_SUCCESS			0
 #define BRCMF_E_STATUS_FAIL			1
 #define BRCMF_E_STATUS_TIMEOUT			2
@@ -318,6 +198,12 @@
 #define BRCMF_E_LINK_ASSOC_REC			3
 #define BRCMF_E_LINK_BSSCFG_DIS			4
 
+/* Small, medium and maximum buffer size for dcmd
+ */
+#define BRCMF_DCMD_SMLEN	256
+#define BRCMF_DCMD_MEDLEN	1536
+#define BRCMF_DCMD_MAXLEN	8192
+
 /* Pattern matching filter. Specifies an offset within received packets to
  * start matching, the pattern to match, the size of the pattern, and a bitmask
  * that indicates which bits within the pattern should be matched.
@@ -397,7 +283,7 @@
 	/* # rates in this set */
 	__le32 count;
 	/* rates in 500kbps units w/hi bit set if basic */
-	u8 rates[WL_NUMRATES];
+	u8 rates[BRCMF_MAXRATES_IN_SET];
 };
 
 struct brcmf_ssid {
@@ -446,14 +332,6 @@
 	__le16 channel_list[1];	/* list of chanspecs */
 };
 
-/* incremental scan struct */
-struct brcmf_iscan_params_le {
-	__le32 version;
-	__le16 action;
-	__le16 scan_duration;
-	struct brcmf_scan_params_le params_le;
-};
-
 struct brcmf_scan_results {
 	u32 buflen;
 	u32 version;
@@ -461,12 +339,6 @@
 	struct brcmf_bss_info_le bss_info_le[];
 };
 
-struct brcmf_scan_results_le {
-	__le32 buflen;
-	__le32 version;
-	__le32 count;
-};
-
 struct brcmf_escan_params_le {
 	__le32 version;
 	__le16 action;
@@ -502,23 +374,6 @@
 	struct brcmf_assoc_params_le params_le;
 };
 
-/* incremental scan results struct */
-struct brcmf_iscan_results {
-	union {
-		u32 status;
-		__le32 status_le;
-	};
-	union {
-		struct brcmf_scan_results results;
-		struct brcmf_scan_results_le results_le;
-	};
-};
-
-/* size of brcmf_iscan_results not including variable length array */
-#define BRCMF_ISCAN_RESULTS_FIXED_SIZE \
-	(sizeof(struct brcmf_scan_results) + \
-	 offsetof(struct brcmf_iscan_results, results))
-
 struct brcmf_wsec_key {
 	u32 index;		/* key index */
 	u32 len;		/* key length */
@@ -623,7 +478,6 @@
 	u8 wme_dp;		/* wme discard priority */
 
 	/* Dongle media info */
-	bool iswl;		/* Dongle-resident driver is wl */
 	unsigned long drv_version;	/* Version of dongle-resident driver */
 	u8 mac[ETH_ALEN];		/* MAC address obtained from dongle */
 
@@ -651,26 +505,26 @@
 	int in_suspend;		/* flag set to 1 when early suspend called */
 	int dtim_skip;		/* dtim skip , default 0 means wake each dtim */
 
-	/* Pkt filter defination */
-	char *pktfilter[100];
-	int pktfilter_count;
-
-	u8 country_code[BRCM_CNTRY_BUF_SZ];
-	char eventmask[BRCMF_EVENTING_MASK_LEN];
-
 	struct brcmf_if *iflist[BRCMF_MAX_IFS];
 
 	struct mutex proto_block;
+	unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
 
-	struct work_struct setmacaddr_work;
-	struct work_struct multicast_work;
 	u8 macvalue[ETH_ALEN];
 	atomic_t pend_8021x_cnt;
+	wait_queue_head_t pend_8021x_wait;
+
+	struct brcmf_fweh_info fweh;
 #ifdef DEBUG
 	struct dentry *dbgfs_dir;
 #endif
 };
 
+struct bcmevent_name {
+	uint event;
+	const char *name;
+};
+
 struct brcmf_if_event {
 	u8 ifidx;
 	u8 action;
@@ -678,47 +532,54 @@
 	u8 bssidx;
 };
 
-struct bcmevent_name {
-	uint event;
-	const char *name;
+/* forward declaration */
+struct brcmf_cfg80211_vif;
+
+/**
+ * struct brcmf_if - interface control information.
+ *
+ * @drvr: points to device related information.
+ * @vif: points to cfg80211 specific interface information.
+ * @ndev: associated network device.
+ * @stats: interface specific network statistics.
+ * @idx: interface index in device firmware.
+ * @bssidx: index of bss associated with this interface.
+ * @mac_addr: assigned mac address.
+ */
+struct brcmf_if {
+	struct brcmf_pub *drvr;
+	struct brcmf_cfg80211_vif *vif;
+	struct net_device *ndev;
+	struct net_device_stats stats;
+	struct work_struct setmacaddr_work;
+	struct work_struct multicast_work;
+	int idx;
+	s32 bssidx;
+	u8 mac_addr[ETH_ALEN];
 };
 
+static inline s32 brcmf_ndev_bssidx(struct net_device *ndev)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	return ifp->bssidx;
+}
+
 extern const struct bcmevent_name bcmevent_names[];
 
-extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
-			  char *buf, uint len);
-extern uint brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
-				   char *buf, uint buflen, s32 bssidx);
-
 extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 
-extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
-extern int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd);
-
 /* Return pointer to interface name */
 extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
 
 /* Query dongle */
 extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
 				       uint cmd, void *buf, uint len);
+extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+				    void *buf, uint len);
 
-#ifdef DEBUG
-extern int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size);
-#endif				/* DEBUG */
-
-extern int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name);
-extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
-			      void *pktdata, struct brcmf_event_msg *,
-			      void **data_ptr);
-
+extern int brcmf_net_attach(struct brcmf_if *ifp);
+extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx,
+				     s32 bssidx, char *name, u8 *mac_addr);
 extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
 
-extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
-extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
-					     int enable, int master_mode);
-
-#define	BRCMF_DCMD_SMLEN	256	/* "small" cmd buffer required */
-#define BRCMF_DCMD_MEDLEN	1536	/* "med" cmd buffer required */
-#define	BRCMF_DCMD_MAXLEN	8192	/* max length cmd buffer required */
-
 #endif				/* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 9b8ee19..b8f2487 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -45,7 +45,6 @@
 
 /* interface structure between common and bus layer */
 struct brcmf_bus {
-	u8 type;		/* bus type */
 	union {
 		struct brcmf_sdio_dev *sdio;
 		struct brcmf_usbdev *usb;
@@ -85,7 +84,7 @@
 			 struct sk_buff *pkt, int prec);
 
 /* Receive frame for delivery to OS.  Callee disposes of rxp. */
-extern void brcmf_rx_frame(struct device *dev, int ifidx,
+extern void brcmf_rx_frame(struct device *dev, u8 ifidx,
 			   struct sk_buff_head *rxlist);
 static inline void brcmf_rx_packet(struct device *dev, int ifidx,
 				   struct sk_buff *pkt)
@@ -111,9 +110,6 @@
 
 extern int brcmf_bus_start(struct device *dev);
 
-extern int brcmf_add_if(struct device *dev, int ifidx,
-			char *name, u8 *mac_addr);
-
 #ifdef CONFIG_BRCMFMAC_SDIO
 extern void brcmf_sdio_exit(void);
 extern void brcmf_sdio_init(void);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index a5c15ca..87536d3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -23,8 +23,6 @@
 
 #include <linux/types.h>
 #include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <defs.h>
 
 #include <brcmu_utils.h>
 #include <brcmu_wifi.h>
@@ -277,76 +275,6 @@
 	return ret;
 }
 
-int
-brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
-		  int len)
-{
-	struct brcmf_proto *prot = drvr->prot;
-	int ret = -1;
-
-	if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
-		brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
-		return ret;
-	}
-	mutex_lock(&drvr->proto_block);
-
-	brcmf_dbg(TRACE, "Enter\n");
-
-	if (len > BRCMF_DCMD_MAXLEN)
-		goto done;
-
-	if (prot->pending == true) {
-		brcmf_dbg(TRACE, "CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
-			  dcmd->cmd, (unsigned long)dcmd->cmd, prot->lastcmd,
-			  (unsigned long)prot->lastcmd);
-		if (dcmd->cmd == BRCMF_C_SET_VAR ||
-		    dcmd->cmd == BRCMF_C_GET_VAR)
-			brcmf_dbg(TRACE, "iovar cmd=%s\n", (char *)dcmd->buf);
-
-		goto done;
-	}
-
-	prot->pending = true;
-	prot->lastcmd = dcmd->cmd;
-	if (dcmd->set)
-		ret = brcmf_proto_cdc_set_dcmd(drvr, ifidx, dcmd->cmd,
-						   dcmd->buf, len);
-	else {
-		ret = brcmf_proto_cdc_query_dcmd(drvr, ifidx, dcmd->cmd,
-						     dcmd->buf, len);
-		if (ret > 0)
-			dcmd->used = ret -
-					sizeof(struct brcmf_proto_cdc_dcmd);
-	}
-
-	if (ret >= 0)
-		ret = 0;
-	else {
-		struct brcmf_proto_cdc_dcmd *msg = &prot->msg;
-		/* len == needed when set/query fails from dongle */
-		dcmd->needed = le32_to_cpu(msg->len);
-	}
-
-	/* Intercept the wme_dp dongle cmd here */
-	if (!ret && dcmd->cmd == BRCMF_C_SET_VAR &&
-	    !strcmp(dcmd->buf, "wme_dp")) {
-		int slen;
-		__le32 val = 0;
-
-		slen = strlen("wme_dp") + 1;
-		if (len >= (int)(slen + sizeof(int)))
-			memcpy(&val, (char *)dcmd->buf + slen, sizeof(int));
-		drvr->wme_dp = (u8) le32_to_cpu(val);
-	}
-
-	prot->pending = false;
-
-done:
-	mutex_unlock(&drvr->proto_block);
-
-	return ret;
-}
-
 static bool pkt_sum_needed(struct sk_buff *skb)
 {
 	return skb->ip_summed == CHECKSUM_PARTIAL;
@@ -458,35 +386,6 @@
 	drvr->prot = NULL;
 }
 
-int brcmf_proto_init(struct brcmf_pub *drvr)
-{
-	int ret = 0;
-	char buf[128];
-
-	brcmf_dbg(TRACE, "Enter\n");
-
-	mutex_lock(&drvr->proto_block);
-
-	/* Get the device MAC address */
-	strcpy(buf, "cur_etheraddr");
-	ret = brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR,
-					  buf, sizeof(buf));
-	if (ret < 0) {
-		mutex_unlock(&drvr->proto_block);
-		return ret;
-	}
-	memcpy(drvr->mac, buf, ETH_ALEN);
-
-	mutex_unlock(&drvr->proto_block);
-
-	ret = brcmf_c_preinit_dcmds(drvr);
-
-	/* Always assumes wl for now */
-	drvr->iswl = true;
-
-	return ret;
-}
-
 void brcmf_proto_stop(struct brcmf_pub *drvr)
 {
 	/* Nothing to do for CDC */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 15c5db5..eee7175 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -18,28 +18,21 @@
 
 #include <linux/kernel.h>
 #include <linux/string.h>
-#include <linux/sched.h>
 #include <linux/netdevice.h>
-#include <asm/unaligned.h>
-#include <defs.h>
 #include <brcmu_wifi.h>
 #include <brcmu_utils.h>
 #include "dhd.h"
 #include "dhd_bus.h"
 #include "dhd_proto.h"
 #include "dhd_dbg.h"
+#include "fwil.h"
 
-#define BRCM_OUI			"\x00\x10\x18"
-#define DOT11_OUI_LEN			3
-#define BCMILCP_BCM_SUBTYPE_EVENT	1
-#define PKTFILTER_BUF_SIZE		2048
+#define PKTFILTER_BUF_SIZE		128
 #define BRCMF_ARPOL_MODE		0xb	/* agent|snoop|peer_autoreply */
-
-#define MSGTRACE_VERSION	1
-
-#define BRCMF_PKT_FILTER_FIXED_LEN	offsetof(struct brcmf_pkt_filter_le, u)
-#define BRCMF_PKT_FILTER_PATTERN_FIXED_LEN	\
-	offsetof(struct brcmf_pkt_filter_pattern_le, mask_and_pattern)
+#define BRCMF_DEFAULT_BCN_TIMEOUT	3
+#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME	40
+#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME	40
+#define BRCMF_DEFAULT_PACKET_FILTER	"100 0 0 0 0x01 0x00"
 
 #ifdef DEBUG
 static const char brcmf_version[] =
@@ -50,89 +43,6 @@
 	"Dongle Host Driver, version " BRCMF_VERSION_STR;
 #endif
 
-/* Message trace header */
-struct msgtrace_hdr {
-	u8 version;
-	u8 spare;
-	__be16 len;		/* Len of the trace */
-	__be32 seqnum;		/* Sequence number of message. Useful
-				 * if the messsage has been lost
-				 * because of DMA error or a bus reset
-				 * (ex: SDIO Func2)
-				 */
-	__be32 discarded_bytes;	/* Number of discarded bytes because of
-				 trace overflow  */
-	__be32 discarded_printf;	/* Number of discarded printf
-				 because of trace overflow */
-} __packed;
-
-
-uint
-brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
-{
-	uint len;
-
-	len = strlen(name) + 1;
-
-	if ((len + datalen) > buflen)
-		return 0;
-
-	strncpy(buf, name, buflen);
-
-	/* append data onto the end of the name string */
-	if (data && datalen) {
-		memcpy(&buf[len], data, datalen);
-		len += datalen;
-	}
-
-	return len;
-}
-
-uint
-brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
-		       char *buf, uint buflen, s32 bssidx)
-{
-	const s8 *prefix = "bsscfg:";
-	s8 *p;
-	u32 prefixlen;
-	u32 namelen;
-	u32 iolen;
-	__le32 bssidx_le;
-
-	if (bssidx == 0)
-		return brcmf_c_mkiovar(name, data, datalen, buf, buflen);
-
-	prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
-	namelen = (u32) strlen(name) + 1; /* lengh of iovar  name + null */
-	iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
-
-	if (buflen < 0 || iolen > (u32)buflen) {
-		brcmf_dbg(ERROR, "buffer is too short\n");
-		return 0;
-	}
-
-	p = buf;
-
-	/* copy prefix, no null */
-	memcpy(p, prefix, prefixlen);
-	p += prefixlen;
-
-	/* copy iovar name including null */
-	memcpy(p, name, namelen);
-	p += namelen;
-
-	/* bss config index as first data */
-	bssidx_le = cpu_to_le32(bssidx);
-	memcpy(p, &bssidx_le, sizeof(bssidx_le));
-	p += sizeof(bssidx_le);
-
-	/* parameter buffer follows */
-	if (datalen)
-		memcpy(p, data, datalen);
-
-	return iolen;
-
-}
 
 bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
 		      struct sk_buff *pkt, int prec)
@@ -184,399 +94,6 @@
 	return p != NULL;
 }
 
-#ifdef DEBUG
-static void
-brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
-{
-	uint i, status, reason;
-	bool group = false, flush_txq = false, link = false;
-	char *auth_str, *event_name;
-	unsigned char *buf;
-	char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
-	static struct {
-		uint event;
-		char *event_name;
-	} event_names[] = {
-		{
-		BRCMF_E_SET_SSID, "SET_SSID"}, {
-		BRCMF_E_JOIN, "JOIN"}, {
-		BRCMF_E_START, "START"}, {
-		BRCMF_E_AUTH, "AUTH"}, {
-		BRCMF_E_AUTH_IND, "AUTH_IND"}, {
-		BRCMF_E_DEAUTH, "DEAUTH"}, {
-		BRCMF_E_DEAUTH_IND, "DEAUTH_IND"}, {
-		BRCMF_E_ASSOC, "ASSOC"}, {
-		BRCMF_E_ASSOC_IND, "ASSOC_IND"}, {
-		BRCMF_E_REASSOC, "REASSOC"}, {
-		BRCMF_E_REASSOC_IND, "REASSOC_IND"}, {
-		BRCMF_E_DISASSOC, "DISASSOC"}, {
-		BRCMF_E_DISASSOC_IND, "DISASSOC_IND"}, {
-		BRCMF_E_QUIET_START, "START_QUIET"}, {
-		BRCMF_E_QUIET_END, "END_QUIET"}, {
-		BRCMF_E_BEACON_RX, "BEACON_RX"}, {
-		BRCMF_E_LINK, "LINK"}, {
-		BRCMF_E_MIC_ERROR, "MIC_ERROR"}, {
-		BRCMF_E_NDIS_LINK, "NDIS_LINK"}, {
-		BRCMF_E_ROAM, "ROAM"}, {
-		BRCMF_E_TXFAIL, "TXFAIL"}, {
-		BRCMF_E_PMKID_CACHE, "PMKID_CACHE"}, {
-		BRCMF_E_RETROGRADE_TSF, "RETROGRADE_TSF"}, {
-		BRCMF_E_PRUNE, "PRUNE"}, {
-		BRCMF_E_AUTOAUTH, "AUTOAUTH"}, {
-		BRCMF_E_EAPOL_MSG, "EAPOL_MSG"}, {
-		BRCMF_E_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
-		BRCMF_E_ADDTS_IND, "ADDTS_IND"}, {
-		BRCMF_E_DELTS_IND, "DELTS_IND"}, {
-		BRCMF_E_BCNSENT_IND, "BCNSENT_IND"}, {
-		BRCMF_E_BCNRX_MSG, "BCNRX_MSG"}, {
-		BRCMF_E_BCNLOST_MSG, "BCNLOST_MSG"}, {
-		BRCMF_E_ROAM_PREP, "ROAM_PREP"}, {
-		BRCMF_E_PFN_NET_FOUND, "PNO_NET_FOUND"}, {
-		BRCMF_E_PFN_NET_LOST, "PNO_NET_LOST"}, {
-		BRCMF_E_RESET_COMPLETE, "RESET_COMPLETE"}, {
-		BRCMF_E_JOIN_START, "JOIN_START"}, {
-		BRCMF_E_ROAM_START, "ROAM_START"}, {
-		BRCMF_E_ASSOC_START, "ASSOC_START"}, {
-		BRCMF_E_IBSS_ASSOC, "IBSS_ASSOC"}, {
-		BRCMF_E_RADIO, "RADIO"}, {
-		BRCMF_E_PSM_WATCHDOG, "PSM_WATCHDOG"}, {
-		BRCMF_E_PROBREQ_MSG, "PROBREQ_MSG"}, {
-		BRCMF_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND"}, {
-		BRCMF_E_PSK_SUP, "PSK_SUP"}, {
-		BRCMF_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED"}, {
-		BRCMF_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME"}, {
-		BRCMF_E_ICV_ERROR, "ICV_ERROR"}, {
-		BRCMF_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR"}, {
-		BRCMF_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR"}, {
-		BRCMF_E_TRACE, "TRACE"}, {
-		BRCMF_E_ACTION_FRAME, "ACTION FRAME"}, {
-		BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, {
-		BRCMF_E_IF, "IF"}, {
-		BRCMF_E_RSSI, "RSSI"}, {
-		BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
-		BRCMF_E_ESCAN_RESULT, "ESCAN_RESULT"}
-	};
-	uint event_type, flags, auth_type, datalen;
-	static u32 seqnum_prev;
-	struct msgtrace_hdr hdr;
-	u32 nblost;
-	char *s, *p;
-
-	event_type = be32_to_cpu(event->event_type);
-	flags = be16_to_cpu(event->flags);
-	status = be32_to_cpu(event->status);
-	reason = be32_to_cpu(event->reason);
-	auth_type = be32_to_cpu(event->auth_type);
-	datalen = be32_to_cpu(event->datalen);
-	/* debug dump of event messages */
-	sprintf(eabuf, "%pM", event->addr);
-
-	event_name = "UNKNOWN";
-	for (i = 0; i < ARRAY_SIZE(event_names); i++) {
-		if (event_names[i].event == event_type)
-			event_name = event_names[i].event_name;
-	}
-
-	brcmf_dbg(EVENT, "EVENT: %s, event ID = %d\n", event_name, event_type);
-	brcmf_dbg(EVENT, "flags 0x%04x, status %d, reason %d, auth_type %d MAC %s\n",
-		  flags, status, reason, auth_type, eabuf);
-
-	if (flags & BRCMF_EVENT_MSG_LINK)
-		link = true;
-	if (flags & BRCMF_EVENT_MSG_GROUP)
-		group = true;
-	if (flags & BRCMF_EVENT_MSG_FLUSHTXQ)
-		flush_txq = true;
-
-	switch (event_type) {
-	case BRCMF_E_START:
-	case BRCMF_E_DEAUTH:
-	case BRCMF_E_DISASSOC:
-		brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf);
-		break;
-
-	case BRCMF_E_ASSOC_IND:
-	case BRCMF_E_REASSOC_IND:
-		brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf);
-		break;
-
-	case BRCMF_E_ASSOC:
-	case BRCMF_E_REASSOC:
-		if (status == BRCMF_E_STATUS_SUCCESS)
-			brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, SUCCESS\n",
-				  event_name, eabuf);
-		else if (status == BRCMF_E_STATUS_TIMEOUT)
-			brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, TIMEOUT\n",
-				  event_name, eabuf);
-		else if (status == BRCMF_E_STATUS_FAIL)
-			brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
-				  event_name, eabuf, (int)reason);
-		else
-			brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, unexpected status %d\n",
-				  event_name, eabuf, (int)status);
-		break;
-
-	case BRCMF_E_DEAUTH_IND:
-	case BRCMF_E_DISASSOC_IND:
-		brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, reason %d\n",
-			  event_name, eabuf, (int)reason);
-		break;
-
-	case BRCMF_E_AUTH:
-	case BRCMF_E_AUTH_IND:
-		if (auth_type == WLAN_AUTH_OPEN)
-			auth_str = "Open System";
-		else if (auth_type == WLAN_AUTH_SHARED_KEY)
-			auth_str = "Shared Key";
-		else {
-			sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
-			auth_str = err_msg;
-		}
-		if (event_type == BRCMF_E_AUTH_IND)
-			brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s\n",
-				  event_name, eabuf, auth_str);
-		else if (status == BRCMF_E_STATUS_SUCCESS)
-			brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, SUCCESS\n",
-				  event_name, eabuf, auth_str);
-		else if (status == BRCMF_E_STATUS_TIMEOUT)
-			brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
-				  event_name, eabuf, auth_str);
-		else if (status == BRCMF_E_STATUS_FAIL) {
-			brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
-				  event_name, eabuf, auth_str, (int)reason);
-		}
-
-		break;
-
-	case BRCMF_E_JOIN:
-	case BRCMF_E_ROAM:
-	case BRCMF_E_SET_SSID:
-		if (status == BRCMF_E_STATUS_SUCCESS)
-			brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n",
-				  event_name, eabuf);
-		else if (status == BRCMF_E_STATUS_FAIL)
-			brcmf_dbg(EVENT, "MACEVENT: %s, failed\n", event_name);
-		else if (status == BRCMF_E_STATUS_NO_NETWORKS)
-			brcmf_dbg(EVENT, "MACEVENT: %s, no networks found\n",
-				  event_name);
-		else
-			brcmf_dbg(EVENT, "MACEVENT: %s, unexpected status %d\n",
-				  event_name, (int)status);
-		break;
-
-	case BRCMF_E_BEACON_RX:
-		if (status == BRCMF_E_STATUS_SUCCESS)
-			brcmf_dbg(EVENT, "MACEVENT: %s, SUCCESS\n", event_name);
-		else if (status == BRCMF_E_STATUS_FAIL)
-			brcmf_dbg(EVENT, "MACEVENT: %s, FAIL\n", event_name);
-		else
-			brcmf_dbg(EVENT, "MACEVENT: %s, status %d\n",
-				  event_name, status);
-		break;
-
-	case BRCMF_E_LINK:
-		brcmf_dbg(EVENT, "MACEVENT: %s %s\n",
-			  event_name, link ? "UP" : "DOWN");
-		break;
-
-	case BRCMF_E_MIC_ERROR:
-		brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
-			  event_name, eabuf, group, flush_txq);
-		break;
-
-	case BRCMF_E_ICV_ERROR:
-	case BRCMF_E_UNICAST_DECODE_ERROR:
-	case BRCMF_E_MULTICAST_DECODE_ERROR:
-		brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf);
-		break;
-
-	case BRCMF_E_TXFAIL:
-		brcmf_dbg(EVENT, "MACEVENT: %s, RA %s\n", event_name, eabuf);
-		break;
-
-	case BRCMF_E_SCAN_COMPLETE:
-	case BRCMF_E_PMKID_CACHE:
-		brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
-		break;
-
-	case BRCMF_E_ESCAN_RESULT:
-		brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
-		datalen = 0;
-		break;
-
-	case BRCMF_E_PFN_NET_FOUND:
-	case BRCMF_E_PFN_NET_LOST:
-	case BRCMF_E_PFN_SCAN_COMPLETE:
-		brcmf_dbg(EVENT, "PNOEVENT: %s\n", event_name);
-		break;
-
-	case BRCMF_E_PSK_SUP:
-	case BRCMF_E_PRUNE:
-		brcmf_dbg(EVENT, "MACEVENT: %s, status %d, reason %d\n",
-			  event_name, (int)status, (int)reason);
-		break;
-
-	case BRCMF_E_TRACE:
-		buf = (unsigned char *) event_data;
-		memcpy(&hdr, buf, sizeof(struct msgtrace_hdr));
-
-		if (hdr.version != MSGTRACE_VERSION) {
-			brcmf_dbg(ERROR,
-				  "MACEVENT: %s [unsupported version --> brcmf"
-				  " version:%d dongle version:%d]\n",
-				  event_name, MSGTRACE_VERSION, hdr.version);
-			/* Reset datalen to avoid display below */
-			datalen = 0;
-			break;
-		}
-
-		/* There are 2 bytes available at the end of data */
-		*(buf + sizeof(struct msgtrace_hdr)
-			 + be16_to_cpu(hdr.len)) = '\0';
-
-		if (be32_to_cpu(hdr.discarded_bytes)
-		    || be32_to_cpu(hdr.discarded_printf))
-			brcmf_dbg(ERROR,
-				  "WLC_E_TRACE: [Discarded traces in dongle -->"
-				  " discarded_bytes %d discarded_printf %d]\n",
-				  be32_to_cpu(hdr.discarded_bytes),
-				  be32_to_cpu(hdr.discarded_printf));
-
-		nblost = be32_to_cpu(hdr.seqnum) - seqnum_prev - 1;
-		if (nblost > 0)
-			brcmf_dbg(ERROR, "WLC_E_TRACE: [Event lost --> seqnum "
-				  " %d nblost %d\n", be32_to_cpu(hdr.seqnum),
-				  nblost);
-		seqnum_prev = be32_to_cpu(hdr.seqnum);
-
-		/* Display the trace buffer. Advance from \n to \n to
-		 * avoid display big
-		 * printf (issue with Linux printk )
-		 */
-		p = (char *)&buf[sizeof(struct msgtrace_hdr)];
-		while ((s = strstr(p, "\n")) != NULL) {
-			*s = '\0';
-			pr_debug("%s\n", p);
-			p = s + 1;
-		}
-		pr_debug("%s\n", p);
-
-		/* Reset datalen to avoid display below */
-		datalen = 0;
-		break;
-
-	case BRCMF_E_RSSI:
-		brcmf_dbg(EVENT, "MACEVENT: %s %d\n",
-			  event_name, be32_to_cpu(*((__be32 *)event_data)));
-		break;
-
-	default:
-		brcmf_dbg(EVENT,
-			  "MACEVENT: %s %d, MAC %s, status %d, reason %d, "
-			  "auth %d\n", event_name, event_type, eabuf,
-			  (int)status, (int)reason, (int)auth_type);
-		break;
-	}
-
-	/* show any appended data */
-	brcmf_dbg_hex_dump(datalen, event_data, datalen, "Received data");
-}
-#endif				/* DEBUG */
-
-int
-brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
-		   struct brcmf_event_msg *event, void **data_ptr)
-{
-	/* check whether packet is a BRCM event pkt */
-	struct brcmf_event *pvt_data = (struct brcmf_event *) pktdata;
-	struct brcmf_if_event *ifevent;
-	char *event_data;
-	u32 type, status;
-	u16 flags;
-	int evlen;
-
-	if (memcmp(BRCM_OUI, &pvt_data->hdr.oui[0], DOT11_OUI_LEN)) {
-		brcmf_dbg(ERROR, "mismatched OUI, bailing\n");
-		return -EBADE;
-	}
-
-	/* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
-	if (get_unaligned_be16(&pvt_data->hdr.usr_subtype) !=
-	    BCMILCP_BCM_SUBTYPE_EVENT) {
-		brcmf_dbg(ERROR, "mismatched subtype, bailing\n");
-		return -EBADE;
-	}
-
-	*data_ptr = &pvt_data[1];
-	event_data = *data_ptr;
-
-	/* memcpy since BRCM event pkt may be unaligned. */
-	memcpy(event, &pvt_data->msg, sizeof(struct brcmf_event_msg));
-
-	type = get_unaligned_be32(&event->event_type);
-	flags = get_unaligned_be16(&event->flags);
-	status = get_unaligned_be32(&event->status);
-	evlen = get_unaligned_be32(&event->datalen) +
-		sizeof(struct brcmf_event);
-
-	switch (type) {
-	case BRCMF_E_IF:
-		ifevent = (struct brcmf_if_event *) event_data;
-		brcmf_dbg(TRACE, "if event\n");
-
-		if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) {
-			if (ifevent->action == BRCMF_E_IF_ADD)
-				brcmf_add_if(drvr->dev, ifevent->ifidx,
-					     event->ifname,
-					     pvt_data->eth.h_dest);
-			else
-				brcmf_del_if(drvr, ifevent->ifidx);
-		} else {
-			brcmf_dbg(ERROR, "Invalid ifidx %d for %s\n",
-				  ifevent->ifidx, event->ifname);
-		}
-
-		/* send up the if event: btamp user needs it */
-		*ifidx = brcmf_ifname2idx(drvr, event->ifname);
-		break;
-
-		/* These are what external supplicant/authenticator wants */
-	case BRCMF_E_LINK:
-	case BRCMF_E_ASSOC_IND:
-	case BRCMF_E_REASSOC_IND:
-	case BRCMF_E_DISASSOC_IND:
-	case BRCMF_E_MIC_ERROR:
-	default:
-		/* Fall through: this should get _everything_  */
-
-		*ifidx = brcmf_ifname2idx(drvr, event->ifname);
-		brcmf_dbg(TRACE, "MAC event %d, flags %x, status %x\n",
-			  type, flags, status);
-
-		/* put it back to BRCMF_E_NDIS_LINK */
-		if (type == BRCMF_E_NDIS_LINK) {
-			u32 temp1;
-			__be32 temp2;
-
-			temp1 = get_unaligned_be32(&event->event_type);
-			brcmf_dbg(TRACE, "Converted to WLC_E_LINK type %d\n",
-				  temp1);
-
-			temp2 = cpu_to_be32(BRCMF_E_NDIS_LINK);
-			memcpy((void *)(&pvt_data->msg.event_type), &temp2,
-			       sizeof(pvt_data->msg.event_type));
-		}
-		break;
-	}
-
-#ifdef DEBUG
-	if (BRCMF_EVENT_ON())
-		brcmf_c_show_host_event(event, event_data);
-#endif /* DEBUG */
-
-	return 0;
-}
-
 /* Convert user's input in hex pattern to byte-size mask */
 static int brcmf_c_pattern_atoh(char *src, char *dst)
 {
@@ -603,90 +120,57 @@
 	return i;
 }
 
-void
-brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, int enable,
-			     int master_mode)
+static void
+brcmf_c_pktfilter_offload_enable(struct brcmf_if *ifp, char *arg, int enable,
+				 int master_mode)
 {
 	unsigned long res;
-	char *argv[8];
-	int i = 0;
-	const char *str;
-	int buf_len;
-	int str_len;
+	char *argv;
 	char *arg_save = NULL, *arg_org = NULL;
-	int rc;
-	char buf[128];
+	s32 err;
 	struct brcmf_pkt_filter_enable_le enable_parm;
-	struct brcmf_pkt_filter_enable_le *pkt_filterp;
-	__le32 mmode_le;
 
-	arg_save = kmalloc(strlen(arg) + 1, GFP_ATOMIC);
+	arg_save = kstrdup(arg, GFP_ATOMIC);
 	if (!arg_save)
 		goto fail;
 
 	arg_org = arg_save;
-	memcpy(arg_save, arg, strlen(arg) + 1);
 
-	argv[i] = strsep(&arg_save, " ");
+	argv = strsep(&arg_save, " ");
 
-	i = 0;
-	if (NULL == argv[i]) {
+	if (argv == NULL) {
 		brcmf_dbg(ERROR, "No args provided\n");
 		goto fail;
 	}
 
-	str = "pkt_filter_enable";
-	str_len = strlen(str);
-	strncpy(buf, str, str_len);
-	buf[str_len] = '\0';
-	buf_len = str_len + 1;
-
-	pkt_filterp = (struct brcmf_pkt_filter_enable_le *) (buf + str_len + 1);
-
 	/* Parse packet filter id. */
 	enable_parm.id = 0;
-	if (!kstrtoul(argv[i], 0, &res))
+	if (!kstrtoul(argv, 0, &res))
 		enable_parm.id = cpu_to_le32((u32)res);
 
-	/* Parse enable/disable value. */
+	/* Enable/disable the specified filter. */
 	enable_parm.enable = cpu_to_le32(enable);
 
-	buf_len += sizeof(enable_parm);
-	memcpy((char *)pkt_filterp, &enable_parm, sizeof(enable_parm));
+	err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_enable", &enable_parm,
+				       sizeof(enable_parm));
+	if (err)
+		brcmf_dbg(ERROR, "Set pkt_filter_enable error (%d)\n", err);
 
-	/* Enable/disable the specified filter. */
-	rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
-	rc = rc >= 0 ? 0 : rc;
-	if (rc)
-		brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
-			  arg, rc);
-	else
-		brcmf_dbg(TRACE, "successfully added pktfilter %s\n", arg);
-
-	/* Contorl the master mode */
-	mmode_le = cpu_to_le32(master_mode);
-	brcmf_c_mkiovar("pkt_filter_mode", (char *)&mmode_le, 4, buf,
-		    sizeof(buf));
-	rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf,
-				       sizeof(buf));
-	rc = rc >= 0 ? 0 : rc;
-	if (rc)
-		brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
-			  arg, rc);
+	/* Control the master mode */
+	err = brcmf_fil_iovar_int_set(ifp, "pkt_filter_mode", master_mode);
+	if (err)
+		brcmf_dbg(ERROR, "Set pkt_filter_mode error (%d)\n", err);
 
 fail:
 	kfree(arg_org);
 }
 
-void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg)
+static void brcmf_c_pktfilter_offload_set(struct brcmf_if *ifp, char *arg)
 {
-	const char *str;
-	struct brcmf_pkt_filter_le pkt_filter;
-	struct brcmf_pkt_filter_le *pkt_filterp;
+	struct brcmf_pkt_filter_le *pkt_filter;
 	unsigned long res;
 	int buf_len;
-	int str_len;
-	int rc;
+	s32 err;
 	u32 mask_size;
 	u32 pattern_size;
 	char *argv[8], *buf = NULL;
@@ -704,104 +188,64 @@
 		goto fail;
 
 	argv[i] = strsep(&arg_save, " ");
-	while (argv[i++])
+	while (argv[i]) {
+		i++;
+		if (i >= 8) {
+			brcmf_dbg(ERROR, "Too many parameters\n");
+			goto fail;
+		}
 		argv[i] = strsep(&arg_save, " ");
+	}
 
-	i = 0;
-	if (NULL == argv[i]) {
-		brcmf_dbg(ERROR, "No args provided\n");
+	if (i != 6) {
+		brcmf_dbg(ERROR, "Not enough args provided %d\n", i);
 		goto fail;
 	}
 
-	str = "pkt_filter_add";
-	strcpy(buf, str);
-	str_len = strlen(str);
-	buf_len = str_len + 1;
-
-	pkt_filterp = (struct brcmf_pkt_filter_le *) (buf + str_len + 1);
+	pkt_filter = (struct brcmf_pkt_filter_le *)buf;
 
 	/* Parse packet filter id. */
-	pkt_filter.id = 0;
-	if (!kstrtoul(argv[i], 0, &res))
-		pkt_filter.id = cpu_to_le32((u32)res);
-
-	if (NULL == argv[++i]) {
-		brcmf_dbg(ERROR, "Polarity not provided\n");
-		goto fail;
-	}
+	pkt_filter->id = 0;
+	if (!kstrtoul(argv[0], 0, &res))
+		pkt_filter->id = cpu_to_le32((u32)res);
 
 	/* Parse filter polarity. */
-	pkt_filter.negate_match = 0;
-	if (!kstrtoul(argv[i], 0, &res))
-		pkt_filter.negate_match = cpu_to_le32((u32)res);
-
-	if (NULL == argv[++i]) {
-		brcmf_dbg(ERROR, "Filter type not provided\n");
-		goto fail;
-	}
+	pkt_filter->negate_match = 0;
+	if (!kstrtoul(argv[1], 0, &res))
+		pkt_filter->negate_match = cpu_to_le32((u32)res);
 
 	/* Parse filter type. */
-	pkt_filter.type = 0;
-	if (!kstrtoul(argv[i], 0, &res))
-		pkt_filter.type = cpu_to_le32((u32)res);
-
-	if (NULL == argv[++i]) {
-		brcmf_dbg(ERROR, "Offset not provided\n");
-		goto fail;
-	}
+	pkt_filter->type = 0;
+	if (!kstrtoul(argv[2], 0, &res))
+		pkt_filter->type = cpu_to_le32((u32)res);
 
 	/* Parse pattern filter offset. */
-	pkt_filter.u.pattern.offset = 0;
-	if (!kstrtoul(argv[i], 0, &res))
-		pkt_filter.u.pattern.offset = cpu_to_le32((u32)res);
-
-	if (NULL == argv[++i]) {
-		brcmf_dbg(ERROR, "Bitmask not provided\n");
-		goto fail;
-	}
+	pkt_filter->u.pattern.offset = 0;
+	if (!kstrtoul(argv[3], 0, &res))
+		pkt_filter->u.pattern.offset = cpu_to_le32((u32)res);
 
 	/* Parse pattern filter mask. */
-	mask_size =
-	    brcmf_c_pattern_atoh
-		   (argv[i], (char *)pkt_filterp->u.pattern.mask_and_pattern);
-
-	if (NULL == argv[++i]) {
-		brcmf_dbg(ERROR, "Pattern not provided\n");
-		goto fail;
-	}
+	mask_size = brcmf_c_pattern_atoh(argv[4],
+			(char *)pkt_filter->u.pattern.mask_and_pattern);
 
 	/* Parse pattern filter pattern. */
-	pattern_size =
-	    brcmf_c_pattern_atoh(argv[i],
-				   (char *)&pkt_filterp->u.pattern.
-				   mask_and_pattern[mask_size]);
+	pattern_size = brcmf_c_pattern_atoh(argv[5],
+		(char *)&pkt_filter->u.pattern.mask_and_pattern[mask_size]);
 
 	if (mask_size != pattern_size) {
 		brcmf_dbg(ERROR, "Mask and pattern not the same size\n");
 		goto fail;
 	}
 
-	pkt_filter.u.pattern.size_bytes = cpu_to_le32(mask_size);
-	buf_len += BRCMF_PKT_FILTER_FIXED_LEN;
-	buf_len += (BRCMF_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+	pkt_filter->u.pattern.size_bytes = cpu_to_le32(mask_size);
+	buf_len = offsetof(struct brcmf_pkt_filter_le,
+			   u.pattern.mask_and_pattern);
+	buf_len += mask_size + pattern_size;
 
-	/* Keep-alive attributes are set in local
-	 * variable (keep_alive_pkt), and
-	 ** then memcpy'ed into buffer (keep_alive_pktp) since there is no
-	 ** guarantee that the buffer is properly aligned.
-	 */
-	memcpy((char *)pkt_filterp,
-	       &pkt_filter,
-	       BRCMF_PKT_FILTER_FIXED_LEN + BRCMF_PKT_FILTER_PATTERN_FIXED_LEN);
-
-	rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
-	rc = rc >= 0 ? 0 : rc;
-
-	if (rc)
-		brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
-			  arg, rc);
-	else
-		brcmf_dbg(TRACE, "successfully added pktfilter %s\n", arg);
+	err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_add", pkt_filter,
+				       buf_len);
+	if (err)
+		brcmf_dbg(ERROR, "Set pkt_filter_add error (%d)\n", err);
 
 fail:
 	kfree(arg_org);
@@ -809,130 +253,125 @@
 	kfree(buf);
 }
 
-static void brcmf_c_arp_offload_set(struct brcmf_pub *drvr, int arp_mode)
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 {
-	char iovbuf[32];
-	int retcode;
-	__le32 arp_mode_le;
-
-	arp_mode_le = cpu_to_le32(arp_mode);
-	brcmf_c_mkiovar("arp_ol", (char *)&arp_mode_le, 4, iovbuf,
-			sizeof(iovbuf));
-	retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
-				   iovbuf, sizeof(iovbuf));
-	retcode = retcode >= 0 ? 0 : retcode;
-	if (retcode)
-		brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, retcode = %d\n",
-			  arp_mode, retcode);
-	else
-		brcmf_dbg(TRACE, "successfully set ARP offload mode to 0x%x\n",
-			  arp_mode);
-}
-
-static void brcmf_c_arp_offload_enable(struct brcmf_pub *drvr, int arp_enable)
-{
-	char iovbuf[32];
-	int retcode;
-	__le32 arp_enable_le;
-
-	arp_enable_le = cpu_to_le32(arp_enable);
-
-	brcmf_c_mkiovar("arpoe", (char *)&arp_enable_le, 4,
-			iovbuf, sizeof(iovbuf));
-	retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
-				   iovbuf, sizeof(iovbuf));
-	retcode = retcode >= 0 ? 0 : retcode;
-	if (retcode)
-		brcmf_dbg(TRACE, "failed to enable ARP offload to %d, retcode = %d\n",
-			  arp_enable, retcode);
-	else
-		brcmf_dbg(TRACE, "successfully enabled ARP offload to %d\n",
-			  arp_enable);
-}
-
-int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
-{
-	char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];	/*  Room for
-				 "event_msgs" + '\0' + bitvec  */
-	char buf[128], *ptr;
-	__le32 roaming_le = cpu_to_le32(1);
-	__le32 bcn_timeout_le = cpu_to_le32(3);
-	__le32 scan_assoc_time_le = cpu_to_le32(40);
-	__le32 scan_unassoc_time_le = cpu_to_le32(40);
-	int i;
+	s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+	u8 buf[BRCMF_DCMD_SMLEN];
+	char *ptr;
+	s32 err;
 	struct brcmf_bus_dcmd *cmdlst;
 	struct list_head *cur, *q;
 
-	mutex_lock(&drvr->proto_block);
-
-	/* Set Country code */
-	if (drvr->country_code[0] != 0) {
-		if (brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_COUNTRY,
-					      drvr->country_code,
-					      sizeof(drvr->country_code)) < 0)
-			brcmf_dbg(ERROR, "country code setting failed\n");
+	/* retreive mac address */
+	err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
+				       sizeof(ifp->mac_addr));
+	if (err < 0) {
+		brcmf_dbg(ERROR, "Retreiving cur_etheraddr failed, %d\n",
+			  err);
+		goto done;
 	}
+	memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
 
 	/* query for 'ver' to get version info from firmware */
 	memset(buf, 0, sizeof(buf));
-	ptr = buf;
-	brcmf_c_mkiovar("ver", NULL, 0, buf, sizeof(buf));
-	brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, buf, sizeof(buf));
+	strcpy(buf, "ver");
+	err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
+	if (err < 0) {
+		brcmf_dbg(ERROR, "Retreiving version information failed, %d\n",
+			  err);
+		goto done;
+	}
+	ptr = (char *)buf;
 	strsep(&ptr, "\n");
 	/* Print fw version info */
 	brcmf_dbg(ERROR, "Firmware version = %s\n", buf);
 
-	/* Setup timeout if Beacons are lost and roam is off to report
-		 link down */
-	brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout_le, 4, iovbuf,
-		    sizeof(iovbuf));
-	brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
-				  sizeof(iovbuf));
-
-	/* Enable/Disable build-in roaming to allowed ext supplicant to take
-		 of romaing */
-	brcmf_c_mkiovar("roam_off", (char *)&roaming_le, 4,
-		      iovbuf, sizeof(iovbuf));
-	brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
-				  sizeof(iovbuf));
-
-	/* Setup event_msgs */
-	brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
-		      iovbuf, sizeof(iovbuf));
-	brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
-				  sizeof(iovbuf));
-
-	brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME,
-		 (char *)&scan_assoc_time_le, sizeof(scan_assoc_time_le));
-	brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME,
-		 (char *)&scan_unassoc_time_le, sizeof(scan_unassoc_time_le));
-
-	/* Set and enable ARP offload feature */
-	brcmf_c_arp_offload_set(drvr, BRCMF_ARPOL_MODE);
-	brcmf_c_arp_offload_enable(drvr, true);
-
-	/* Set up pkt filter */
-	for (i = 0; i < drvr->pktfilter_count; i++) {
-		brcmf_c_pktfilter_offload_set(drvr, drvr->pktfilter[i]);
-		brcmf_c_pktfilter_offload_enable(drvr, drvr->pktfilter[i],
-						 0, true);
+	/*
+	 * Setup timeout if Beacons are lost and roam is off to report
+	 * link down
+	 */
+	err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout",
+				      BRCMF_DEFAULT_BCN_TIMEOUT);
+	if (err) {
+		brcmf_dbg(ERROR, "bcn_timeout error (%d)\n", err);
+		goto done;
 	}
 
+	/* Enable/Disable build-in roaming to allowed ext supplicant to take
+	 * of romaing
+	 */
+	err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
+	if (err) {
+		brcmf_dbg(ERROR, "roam_off error (%d)\n", err);
+		goto done;
+	}
+
+	/* Setup event_msgs, enable E_IF */
+	err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
+				       BRCMF_EVENTING_MASK_LEN);
+	if (err) {
+		brcmf_dbg(ERROR, "Get event_msgs error (%d)\n", err);
+		goto done;
+	}
+	setbit(eventmask, BRCMF_E_IF);
+	err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
+				       BRCMF_EVENTING_MASK_LEN);
+	if (err) {
+		brcmf_dbg(ERROR, "Set event_msgs error (%d)\n", err);
+		goto done;
+	}
+
+	/* Setup default scan channel time */
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+				    BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
+	if (err) {
+		brcmf_dbg(ERROR, "BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n",
+			  err);
+		goto done;
+	}
+
+	/* Setup default scan unassoc time */
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+				    BRCMF_DEFAULT_SCAN_UNASSOC_TIME);
+	if (err) {
+		brcmf_dbg(ERROR, "BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n",
+			  err);
+		goto done;
+	}
+
+	/* Try to set and enable ARP offload feature, this may fail */
+	err = brcmf_fil_iovar_int_set(ifp, "arp_ol", BRCMF_ARPOL_MODE);
+	if (err) {
+		brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n",
+			  BRCMF_ARPOL_MODE, err);
+		err = 0;
+	} else {
+		err = brcmf_fil_iovar_int_set(ifp, "arpoe", 1);
+		if (err) {
+			brcmf_dbg(TRACE, "failed to enable ARP offload err = %d\n",
+				  err);
+			err = 0;
+		} else
+			brcmf_dbg(TRACE, "successfully enabled ARP offload to 0x%x\n",
+				  BRCMF_ARPOL_MODE);
+	}
+
+	/* Setup packet filter */
+	brcmf_c_pktfilter_offload_set(ifp, BRCMF_DEFAULT_PACKET_FILTER);
+	brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER,
+					 0, true);
+
 	/* set bus specific command if there is any */
-	list_for_each_safe(cur, q, &drvr->bus_if->dcmd_list) {
+	list_for_each_safe(cur, q, &ifp->drvr->bus_if->dcmd_list) {
 		cmdlst = list_entry(cur, struct brcmf_bus_dcmd, list);
 		if (cmdlst->name && cmdlst->param && cmdlst->param_len) {
-			brcmf_c_mkiovar(cmdlst->name, cmdlst->param,
-					cmdlst->param_len, iovbuf,
-					sizeof(iovbuf));
-			brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
-						 iovbuf, sizeof(iovbuf));
+			brcmf_fil_iovar_data_set(ifp, cmdlst->name,
+						 cmdlst->param,
+						 cmdlst->param_len);
 		}
 		list_del(cur);
 		kfree(cmdlst);
 	}
-
-	mutex_unlock(&drvr->proto_block);
-
-	return 0;
+done:
+	return err;
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 7f89540..7e58e8c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -14,16 +14,12 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 #include <linux/debugfs.h>
-#include <linux/if_ether.h>
-#include <linux/if.h>
-#include <linux/ieee80211.h>
+#include <linux/netdevice.h>
 #include <linux/module.h>
 
-#include <defs.h>
 #include <brcmu_wifi.h>
 #include <brcmu_utils.h>
 #include "dhd.h"
-#include "dhd_bus.h"
 #include "dhd_dbg.h"
 
 static struct dentry *root_folder;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index fb508c2..a0e18a1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -27,10 +27,11 @@
 #define BRCMF_HDRS_VAL	0x0040
 #define BRCMF_BYTES_VAL	0x0080
 #define BRCMF_INTR_VAL	0x0100
-#define BRCMF_GLOM_VAL	0x0400
-#define BRCMF_EVENT_VAL	0x0800
-#define BRCMF_BTA_VAL	0x1000
-#define BRCMF_ISCAN_VAL 0x2000
+#define BRCMF_GLOM_VAL	0x0200
+#define BRCMF_EVENT_VAL	0x0400
+#define BRCMF_BTA_VAL	0x0800
+#define BRCMF_FIL_VAL	0x1000
+#define BRCMF_USB_VAL	0x2000
 
 #if defined(DEBUG)
 
@@ -56,6 +57,7 @@
 #define BRCMF_BYTES_ON()	(brcmf_msg_level & BRCMF_BYTES_VAL)
 #define BRCMF_GLOM_ON()		(brcmf_msg_level & BRCMF_GLOM_VAL)
 #define BRCMF_EVENT_ON()	(brcmf_msg_level & BRCMF_EVENT_VAL)
+#define BRCMF_FIL_ON()		(brcmf_msg_level & BRCMF_FIL_VAL)
 
 #else	/* (defined DEBUG) || (defined DEBUG) */
 
@@ -67,6 +69,7 @@
 #define BRCMF_BYTES_ON()	0
 #define BRCMF_GLOM_ON()		0
 #define BRCMF_EVENT_ON()	0
+#define BRCMF_FIL_ON()		0
 
 #endif				/* defined(DEBUG) */
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index d7c76ce..b6c86b0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -16,27 +16,11 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/random.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/fcntl.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/hardirq.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
 #include <linux/module.h>
 #include <net/cfg80211.h>
 #include <net/rtnetlink.h>
-#include <defs.h>
 #include <brcmu_utils.h>
 #include <brcmu_wifi.h>
 
@@ -45,45 +29,19 @@
 #include "dhd_proto.h"
 #include "dhd_dbg.h"
 #include "wl_cfg80211.h"
+#include "fwil.h"
 
 MODULE_AUTHOR("Broadcom Corporation");
-MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
-MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN fullmac cards");
+MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
+MODULE_SUPPORTED_DEVICE("Broadcom 802.11 WLAN fullmac cards");
 MODULE_LICENSE("Dual BSD/GPL");
 
-
-/* Interface control information */
-struct brcmf_if {
-	struct brcmf_pub *drvr;	/* back pointer to brcmf_pub */
-	/* OS/stack specifics */
-	struct net_device *ndev;
-	struct net_device_stats stats;
-	int idx;		/* iface idx in dongle */
-	u8 mac_addr[ETH_ALEN];	/* assigned MAC address */
-};
+#define MAX_WAIT_FOR_8021X_TX		50	/* msecs */
 
 /* Error bits */
 int brcmf_msg_level = BRCMF_ERROR_VAL;
 module_param(brcmf_msg_level, int, 0);
 
-int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name)
-{
-	int i = BRCMF_MAX_IFS;
-	struct brcmf_if *ifp;
-
-	if (name == NULL || *name == '\0')
-		return 0;
-
-	while (--i > 0) {
-		ifp = drvr->iflist[i];
-		if (ifp && !strncmp(ifp->ndev->name, name, IFNAMSIZ))
-			break;
-	}
-
-	brcmf_dbg(TRACE, "return idx %d for \"%s\"\n", i, name);
-
-	return i;		/* default - the primary interface */
-}
 
 char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
 {
@@ -105,38 +63,33 @@
 
 static void _brcmf_set_multicast_list(struct work_struct *work)
 {
+	struct brcmf_if *ifp;
 	struct net_device *ndev;
 	struct netdev_hw_addr *ha;
-	u32 dcmd_value, cnt;
+	u32 cmd_value, cnt;
 	__le32 cnt_le;
-	__le32 dcmd_le_value;
-
-	struct brcmf_dcmd dcmd;
 	char *buf, *bufp;
-	uint buflen;
-	int ret;
+	u32 buflen;
+	s32 err;
 
-	struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
-						    multicast_work);
+	brcmf_dbg(TRACE, "enter\n");
 
-	ndev = drvr->iflist[0]->ndev;
-	cnt = netdev_mc_count(ndev);
+	ifp = container_of(work, struct brcmf_if, multicast_work);
+	ndev = ifp->ndev;
 
 	/* Determine initial value of allmulti flag */
-	dcmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
+	cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
 
 	/* Send down the multicast list first. */
-
-	buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETH_ALEN);
-	bufp = buf = kmalloc(buflen, GFP_ATOMIC);
-	if (!bufp)
+	cnt = netdev_mc_count(ndev);
+	buflen = sizeof(cnt) + (cnt * ETH_ALEN);
+	buf = kmalloc(buflen, GFP_ATOMIC);
+	if (!buf)
 		return;
-
-	strcpy(bufp, "mcast_list");
-	bufp += strlen("mcast_list") + 1;
+	bufp = buf;
 
 	cnt_le = cpu_to_le32(cnt);
-	memcpy(bufp, &cnt_le, sizeof(cnt));
+	memcpy(bufp, &cnt_le, sizeof(cnt_le));
 	bufp += sizeof(cnt_le);
 
 	netdev_for_each_mc_addr(ha, ndev) {
@@ -147,129 +100,66 @@
 		cnt--;
 	}
 
-	memset(&dcmd, 0, sizeof(dcmd));
-	dcmd.cmd = BRCMF_C_SET_VAR;
-	dcmd.buf = buf;
-	dcmd.len = buflen;
-	dcmd.set = true;
-
-	ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
-	if (ret < 0) {
-		brcmf_dbg(ERROR, "%s: set mcast_list failed, cnt %d\n",
-			  brcmf_ifname(drvr, 0), cnt);
-		dcmd_value = cnt ? true : dcmd_value;
+	err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
+	if (err < 0) {
+		brcmf_dbg(ERROR, "Setting mcast_list failed, %d\n", err);
+		cmd_value = cnt ? true : cmd_value;
 	}
 
 	kfree(buf);
 
-	/* Now send the allmulti setting.  This is based on the setting in the
+	/*
+	 * Now send the allmulti setting.  This is based on the setting in the
 	 * net_device flags, but might be modified above to be turned on if we
 	 * were trying to set some addresses and dongle rejected it...
 	 */
+	err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
+	if (err < 0)
+		brcmf_dbg(ERROR, "Setting allmulti failed, %d\n", err);
 
-	buflen = sizeof("allmulti") + sizeof(dcmd_value);
-	buf = kmalloc(buflen, GFP_ATOMIC);
-	if (!buf)
-		return;
-
-	dcmd_le_value = cpu_to_le32(dcmd_value);
-
-	if (!brcmf_c_mkiovar
-	    ("allmulti", (void *)&dcmd_le_value,
-	    sizeof(dcmd_le_value), buf, buflen)) {
-		brcmf_dbg(ERROR, "%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
-			  brcmf_ifname(drvr, 0),
-			  (int)sizeof(dcmd_value), buflen);
-		kfree(buf);
-		return;
-	}
-
-	memset(&dcmd, 0, sizeof(dcmd));
-	dcmd.cmd = BRCMF_C_SET_VAR;
-	dcmd.buf = buf;
-	dcmd.len = buflen;
-	dcmd.set = true;
-
-	ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
-	if (ret < 0) {
-		brcmf_dbg(ERROR, "%s: set allmulti %d failed\n",
-			  brcmf_ifname(drvr, 0),
-			  le32_to_cpu(dcmd_le_value));
-	}
-
-	kfree(buf);
-
-	/* Finally, pick up the PROMISC flag as well, like the NIC
-		 driver does */
-
-	dcmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
-	dcmd_le_value = cpu_to_le32(dcmd_value);
-
-	memset(&dcmd, 0, sizeof(dcmd));
-	dcmd.cmd = BRCMF_C_SET_PROMISC;
-	dcmd.buf = &dcmd_le_value;
-	dcmd.len = sizeof(dcmd_le_value);
-	dcmd.set = true;
-
-	ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
-	if (ret < 0) {
-		brcmf_dbg(ERROR, "%s: set promisc %d failed\n",
-			  brcmf_ifname(drvr, 0),
-			  le32_to_cpu(dcmd_le_value));
-	}
+	/*Finally, pick up the PROMISC flag */
+	cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
+	if (err < 0)
+		brcmf_dbg(ERROR, "Setting BRCMF_C_SET_PROMISC failed, %d\n",
+			  err);
 }
 
 static void
 _brcmf_set_mac_address(struct work_struct *work)
 {
-	char buf[32];
-	struct brcmf_dcmd dcmd;
-	int ret;
-
-	struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
-						    setmacaddr_work);
+	struct brcmf_if *ifp;
+	s32 err;
 
 	brcmf_dbg(TRACE, "enter\n");
-	if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr->macvalue,
-			   ETH_ALEN, buf, 32)) {
-		brcmf_dbg(ERROR, "%s: mkiovar failed for cur_etheraddr\n",
-			  brcmf_ifname(drvr, 0));
-		return;
+
+	ifp = container_of(work, struct brcmf_if, setmacaddr_work);
+	err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
+				       ETH_ALEN);
+	if (err < 0) {
+		brcmf_dbg(ERROR, "Setting cur_etheraddr failed, %d\n", err);
+	} else {
+		brcmf_dbg(TRACE, "MAC address updated to %pM\n",
+			  ifp->mac_addr);
+		memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
 	}
-	memset(&dcmd, 0, sizeof(dcmd));
-	dcmd.cmd = BRCMF_C_SET_VAR;
-	dcmd.buf = buf;
-	dcmd.len = 32;
-	dcmd.set = true;
-
-	ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
-	if (ret < 0)
-		brcmf_dbg(ERROR, "%s: set cur_etheraddr failed\n",
-			  brcmf_ifname(drvr, 0));
-	else
-		memcpy(drvr->iflist[0]->ndev->dev_addr,
-		       drvr->macvalue, ETH_ALEN);
-
-	return;
 }
 
 static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
 {
 	struct brcmf_if *ifp = netdev_priv(ndev);
-	struct brcmf_pub *drvr = ifp->drvr;
 	struct sockaddr *sa = (struct sockaddr *)addr;
 
-	memcpy(&drvr->macvalue, sa->sa_data, ETH_ALEN);
-	schedule_work(&drvr->setmacaddr_work);
+	memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
+	schedule_work(&ifp->setmacaddr_work);
 	return 0;
 }
 
 static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
 {
 	struct brcmf_if *ifp = netdev_priv(ndev);
-	struct brcmf_pub *drvr = ifp->drvr;
 
-	schedule_work(&drvr->multicast_work);
+	schedule_work(&ifp->multicast_work);
 }
 
 static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
@@ -282,7 +172,7 @@
 
 	/* Reject if down */
 	if (!drvr->bus_if->drvr_up ||
-	    (drvr->bus_if->state == BRCMF_BUS_DOWN)) {
+	    (drvr->bus_if->state != BRCMF_BUS_DATA)) {
 		brcmf_dbg(ERROR, "xmit rejected drvup=%d state=%d\n",
 			  drvr->bus_if->drvr_up,
 			  drvr->bus_if->state);
@@ -360,32 +250,13 @@
 		}
 }
 
-static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx,
-			    void *pktdata, struct brcmf_event_msg *event,
-			    void **data)
-{
-	int bcmerror = 0;
-
-	bcmerror = brcmf_c_host_event(drvr, ifidx, pktdata, event, data);
-	if (bcmerror != 0)
-		return bcmerror;
-
-	if (drvr->iflist[*ifidx]->ndev)
-		brcmf_cfg80211_event(drvr->iflist[*ifidx]->ndev,
-				     event, *data);
-
-	return bcmerror;
-}
-
-void brcmf_rx_frame(struct device *dev, int ifidx,
+void brcmf_rx_frame(struct device *dev, u8 ifidx,
 		    struct sk_buff_head *skb_list)
 {
 	unsigned char *eth;
 	uint len;
-	void *data;
 	struct sk_buff *skb, *pnext;
 	struct brcmf_if *ifp;
-	struct brcmf_event_msg event;
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_pub *drvr = bus_if->drvr;
 
@@ -432,10 +303,7 @@
 		skb_pull(skb, ETH_HLEN);
 
 		/* Process special event packets and then discard them */
-		if (ntohs(skb->protocol) == ETH_P_LINK_CTL)
-			brcmf_host_event(drvr, &ifidx,
-					  skb_mac_header(skb),
-					  &event, &data);
+		brcmf_fweh_process_skb(drvr, skb, &ifidx);
 
 		if (drvr->iflist[ifidx]) {
 			ifp = drvr->iflist[ifidx];
@@ -471,9 +339,11 @@
 	eh = (struct ethhdr *)(txp->data);
 	type = ntohs(eh->h_proto);
 
-	if (type == ETH_P_PAE)
+	if (type == ETH_P_PAE) {
 		atomic_dec(&drvr->pend_8021x_cnt);
-
+		if (waitqueue_active(&drvr->pend_8021x_wait))
+			wake_up(&drvr->pend_8021x_wait);
+	}
 }
 
 static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
@@ -497,83 +367,26 @@
 	return &ifp->stats;
 }
 
-/* Retrieve current toe component enables, which are kept
-	 as a bitmap in toe_ol iovar */
-static int brcmf_toe_get(struct brcmf_pub *drvr, int ifidx, u32 *toe_ol)
+/*
+ * Set current toe component enables in toe_ol iovar,
+ * and set toe global enable iovar
+ */
+static int brcmf_toe_set(struct brcmf_if *ifp, u32 toe_ol)
 {
-	struct brcmf_dcmd dcmd;
-	__le32 toe_le;
-	char buf[32];
-	int ret;
+	s32 err;
 
-	memset(&dcmd, 0, sizeof(dcmd));
-
-	dcmd.cmd = BRCMF_C_GET_VAR;
-	dcmd.buf = buf;
-	dcmd.len = (uint) sizeof(buf);
-	dcmd.set = false;
-
-	strcpy(buf, "toe_ol");
-	ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
-	if (ret < 0) {
-		/* Check for older dongle image that doesn't support toe_ol */
-		if (ret == -EIO) {
-			brcmf_dbg(ERROR, "%s: toe not supported by device\n",
-				  brcmf_ifname(drvr, ifidx));
-			return -EOPNOTSUPP;
-		}
-
-		brcmf_dbg(INFO, "%s: could not get toe_ol: ret=%d\n",
-			  brcmf_ifname(drvr, ifidx), ret);
-		return ret;
+	err = brcmf_fil_iovar_int_set(ifp, "toe_ol", toe_ol);
+	if (err < 0) {
+		brcmf_dbg(ERROR, "Setting toe_ol failed, %d\n", err);
+		return err;
 	}
 
-	memcpy(&toe_le, buf, sizeof(u32));
-	*toe_ol = le32_to_cpu(toe_le);
-	return 0;
-}
+	err = brcmf_fil_iovar_int_set(ifp, "toe", (toe_ol != 0));
+	if (err < 0)
+		brcmf_dbg(ERROR, "Setting toe failed, %d\n", err);
 
-/* Set current toe component enables in toe_ol iovar,
-	 and set toe global enable iovar */
-static int brcmf_toe_set(struct brcmf_pub *drvr, int ifidx, u32 toe_ol)
-{
-	struct brcmf_dcmd dcmd;
-	char buf[32];
-	int ret;
-	__le32 toe_le = cpu_to_le32(toe_ol);
+	return err;
 
-	memset(&dcmd, 0, sizeof(dcmd));
-
-	dcmd.cmd = BRCMF_C_SET_VAR;
-	dcmd.buf = buf;
-	dcmd.len = (uint) sizeof(buf);
-	dcmd.set = true;
-
-	/* Set toe_ol as requested */
-	strcpy(buf, "toe_ol");
-	memcpy(&buf[sizeof("toe_ol")], &toe_le, sizeof(u32));
-
-	ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
-	if (ret < 0) {
-		brcmf_dbg(ERROR, "%s: could not set toe_ol: ret=%d\n",
-			  brcmf_ifname(drvr, ifidx), ret);
-		return ret;
-	}
-
-	/* Enable toe globally only if any components are enabled. */
-	toe_le = cpu_to_le32(toe_ol != 0);
-
-	strcpy(buf, "toe");
-	memcpy(&buf[sizeof("toe")], &toe_le, sizeof(u32));
-
-	ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
-	if (ret < 0) {
-		brcmf_dbg(ERROR, "%s: could not set toe: ret=%d\n",
-			  brcmf_ifname(drvr, ifidx), ret);
-		return ret;
-	}
-
-	return 0;
 }
 
 static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
@@ -591,8 +404,9 @@
 	.get_drvinfo = brcmf_ethtool_get_drvinfo,
 };
 
-static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
+static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
 {
+	struct brcmf_pub *drvr = ifp->drvr;
 	struct ethtool_drvinfo info;
 	char drvname[sizeof(info.driver)];
 	u32 cmd;
@@ -629,12 +443,9 @@
 			brcmf_dbg(ERROR, "dongle is not up\n");
 			return -ENODEV;
 		}
-
 		/* finally, report dongle driver type */
-		else if (drvr->iswl)
-			sprintf(info.driver, "wl");
 		else
-			sprintf(info.driver, "xx");
+			sprintf(info.driver, "wl");
 
 		sprintf(info.version, "%lu", drvr->drv_version);
 		if (copy_to_user(uaddr, &info, sizeof(info)))
@@ -646,7 +457,7 @@
 		/* Get toe offload components from dongle */
 	case ETHTOOL_GRXCSUM:
 	case ETHTOOL_GTXCSUM:
-		ret = brcmf_toe_get(drvr, 0, &toe_cmpnt);
+		ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
 		if (ret < 0)
 			return ret;
 
@@ -667,7 +478,7 @@
 			return -EFAULT;
 
 		/* Read the current settings, update and write back */
-		ret = brcmf_toe_get(drvr, 0, &toe_cmpnt);
+		ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
 		if (ret < 0)
 			return ret;
 
@@ -679,18 +490,16 @@
 		else
 			toe_cmpnt &= ~csum_dir;
 
-		ret = brcmf_toe_set(drvr, 0, toe_cmpnt);
+		ret = brcmf_toe_set(ifp, toe_cmpnt);
 		if (ret < 0)
 			return ret;
 
 		/* If setting TX checksum mode, tell Linux the new mode */
 		if (cmd == ETHTOOL_STXCSUM) {
 			if (edata.data)
-				drvr->iflist[0]->ndev->features |=
-				    NETIF_F_IP_CSUM;
+				ifp->ndev->features |= NETIF_F_IP_CSUM;
 			else
-				drvr->iflist[0]->ndev->features &=
-				    ~NETIF_F_IP_CSUM;
+				ifp->ndev->features &= ~NETIF_F_IP_CSUM;
 		}
 
 		break;
@@ -714,80 +523,23 @@
 		return -1;
 
 	if (cmd == SIOCETHTOOL)
-		return brcmf_ethtool(drvr, ifr->ifr_data);
+		return brcmf_ethtool(ifp, ifr->ifr_data);
 
 	return -EOPNOTSUPP;
 }
 
-/* called only from within this driver. Sends a command to the dongle. */
-s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
-{
-	struct brcmf_dcmd dcmd;
-	s32 err = 0;
-	int buflen = 0;
-	bool is_set_key_cmd;
-	struct brcmf_if *ifp = netdev_priv(ndev);
-	struct brcmf_pub *drvr = ifp->drvr;
-
-	memset(&dcmd, 0, sizeof(dcmd));
-	dcmd.cmd = cmd;
-	dcmd.buf = arg;
-	dcmd.len = len;
-
-	if (dcmd.buf != NULL)
-		buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
-
-	/* send to dongle (must be up, and wl) */
-	if ((drvr->bus_if->state != BRCMF_BUS_DATA)) {
-		brcmf_dbg(ERROR, "DONGLE_DOWN\n");
-		err = -EIO;
-		goto done;
-	}
-
-	if (!drvr->iswl) {
-		err = -EIO;
-		goto done;
-	}
-
-	/*
-	 * Intercept BRCMF_C_SET_KEY CMD - serialize M4 send and
-	 * set key CMD to prevent M4 encryption.
-	 */
-	is_set_key_cmd = ((dcmd.cmd == BRCMF_C_SET_KEY) ||
-			  ((dcmd.cmd == BRCMF_C_SET_VAR) &&
-			   !(strncmp("wsec_key", dcmd.buf, 9))) ||
-			  ((dcmd.cmd == BRCMF_C_SET_VAR) &&
-			   !(strncmp("bsscfg:wsec_key", dcmd.buf, 15))));
-	if (is_set_key_cmd)
-		brcmf_netdev_wait_pend8021x(ndev);
-
-	err = brcmf_proto_dcmd(drvr, ifp->idx, &dcmd, buflen);
-
-done:
-	if (err > 0)
-		err = 0;
-
-	return err;
-}
-
-int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd)
-{
-	brcmf_dbg(TRACE, "enter: cmd %x buf %p len %d\n",
-		  dcmd->cmd, dcmd->buf, dcmd->len);
-
-	return brcmf_exec_dcmd(ndev, dcmd->cmd, dcmd->buf, dcmd->len);
-}
-
 static int brcmf_netdev_stop(struct net_device *ndev)
 {
 	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_pub *drvr = ifp->drvr;
 
 	brcmf_dbg(TRACE, "Enter\n");
-	brcmf_cfg80211_down(drvr->config);
+
 	if (drvr->bus_if->drvr_up == 0)
 		return 0;
 
+	brcmf_cfg80211_down(ndev);
+
 	/* Set state and stop OS transmissions */
 	drvr->bus_if->drvr_up = false;
 	netif_stop_queue(ndev);
@@ -802,38 +554,35 @@
 	struct brcmf_bus *bus_if = drvr->bus_if;
 	u32 toe_ol;
 	s32 ret = 0;
-	uint up = 0;
 
 	brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
 
-	if (ifp->idx == 0) {	/* do it only for primary eth0 */
-		/* If bus is not ready, can't continue */
-		if (bus_if->state != BRCMF_BUS_DATA) {
-			brcmf_dbg(ERROR, "failed bus is not ready\n");
-			return -EAGAIN;
-		}
-
-		atomic_set(&drvr->pend_8021x_cnt, 0);
-
-		memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
-
-		/* Get current TOE mode from dongle */
-		if (brcmf_toe_get(drvr, ifp->idx, &toe_ol) >= 0
-		    && (toe_ol & TOE_TX_CSUM_OL) != 0)
-			drvr->iflist[ifp->idx]->ndev->features |=
-				NETIF_F_IP_CSUM;
-		else
-			drvr->iflist[ifp->idx]->ndev->features &=
-				~NETIF_F_IP_CSUM;
+	/* If bus is not ready, can't continue */
+	if (bus_if->state != BRCMF_BUS_DATA) {
+		brcmf_dbg(ERROR, "failed bus is not ready\n");
+		return -EAGAIN;
 	}
 
+	atomic_set(&drvr->pend_8021x_cnt, 0);
+
+	memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
+
+	/* Get current TOE mode from dongle */
+	if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
+	    && (toe_ol & TOE_TX_CSUM_OL) != 0)
+		drvr->iflist[ifp->idx]->ndev->features |=
+			NETIF_F_IP_CSUM;
+	else
+		drvr->iflist[ifp->idx]->ndev->features &=
+			~NETIF_F_IP_CSUM;
+
 	/* make sure RF is ready for work */
-	brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_UP, (char *)&up, sizeof(up));
+	brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
 
 	/* Allow transmit calls */
 	netif_start_queue(ndev);
 	drvr->bus_if->drvr_up = true;
-	if (brcmf_cfg80211_up(drvr->config)) {
+	if (brcmf_cfg80211_up(ndev)) {
 		brcmf_dbg(ERROR, "failed to bring up cfg80211\n");
 		return -1;
 	}
@@ -851,48 +600,38 @@
 	.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
 };
 
-static int brcmf_net_attach(struct brcmf_if *ifp)
+static const struct net_device_ops brcmf_netdev_ops_virt = {
+	.ndo_open = brcmf_cfg80211_up,
+	.ndo_stop = brcmf_cfg80211_down,
+	.ndo_get_stats = brcmf_netdev_get_stats,
+	.ndo_do_ioctl = brcmf_netdev_ioctl_entry,
+	.ndo_start_xmit = brcmf_netdev_start_xmit,
+	.ndo_set_mac_address = brcmf_netdev_set_mac_address,
+	.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
+};
+
+int brcmf_net_attach(struct brcmf_if *ifp)
 {
 	struct brcmf_pub *drvr = ifp->drvr;
 	struct net_device *ndev;
-	u8 temp_addr[ETH_ALEN];
 
-	brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
+	brcmf_dbg(TRACE, "ifidx %d mac %pM\n", ifp->idx, ifp->mac_addr);
+	ndev = ifp->ndev;
 
-	ndev = drvr->iflist[ifp->idx]->ndev;
-	ndev->netdev_ops = &brcmf_netdev_ops_pri;
-
-	/*
-	 * determine mac address to use
-	 */
-	if (is_valid_ether_addr(ifp->mac_addr))
-		memcpy(temp_addr, ifp->mac_addr, ETH_ALEN);
+	/* set appropriate operations */
+	if (!ifp->idx)
+		ndev->netdev_ops = &brcmf_netdev_ops_pri;
 	else
-		memcpy(temp_addr, drvr->mac, ETH_ALEN);
+		ndev->netdev_ops = &brcmf_netdev_ops_virt;
 
-	if (ifp->idx == 1) {
-		brcmf_dbg(TRACE, "ACCESS POINT MAC:\n");
-		/*  ACCESSPOINT INTERFACE CASE */
-		temp_addr[0] |= 0X02;	/* set bit 2 ,
-			 - Locally Administered address  */
-
-	}
 	ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
 	ndev->ethtool_ops = &brcmf_ethtool_ops;
 
 	drvr->rxsz = ndev->mtu + ndev->hard_header_len +
 			      drvr->hdrlen;
 
-	memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
-
-	/* attach to cfg80211 for primary interface */
-	if (!ifp->idx) {
-		drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
-		if (drvr->config == NULL) {
-			brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
-			goto fail;
-		}
-	}
+	/* set the mac address */
+	memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
 
 	if (register_netdev(ndev) != 0) {
 		brcmf_dbg(ERROR, "couldn't register the net device\n");
@@ -908,13 +647,12 @@
 	return -EBADE;
 }
 
-int
-brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
+			      char *name, u8 *addr_mask)
 {
 	struct brcmf_if *ifp;
 	struct net_device *ndev;
-	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
-	struct brcmf_pub *drvr = bus_if->drvr;
+	int i;
 
 	brcmf_dbg(TRACE, "idx %d\n", ifidx);
 
@@ -924,19 +662,24 @@
 	 * in case we missed the BRCMF_E_IF_DEL event.
 	 */
 	if (ifp) {
-		brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
+		brcmf_dbg(ERROR, "ERROR: netdev:%s already exists\n",
 			  ifp->ndev->name);
-		netif_stop_queue(ifp->ndev);
-		unregister_netdev(ifp->ndev);
-		free_netdev(ifp->ndev);
-		drvr->iflist[ifidx] = NULL;
+		if (ifidx) {
+			netif_stop_queue(ifp->ndev);
+			unregister_netdev(ifp->ndev);
+			free_netdev(ifp->ndev);
+			drvr->iflist[ifidx] = NULL;
+		} else {
+			brcmf_dbg(ERROR, "ignore IF event\n");
+			return ERR_PTR(-EINVAL);
+		}
 	}
 
 	/* Allocate netdev, including space for private structure */
 	ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup);
 	if (!ndev) {
 		brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	ifp = netdev_priv(ndev);
@@ -944,20 +687,19 @@
 	ifp->drvr = drvr;
 	drvr->iflist[ifidx] = ifp;
 	ifp->idx = ifidx;
-	if (mac_addr != NULL)
-		memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
+	ifp->bssidx = bssidx;
 
-	if (brcmf_net_attach(ifp)) {
-		brcmf_dbg(ERROR, "brcmf_net_attach failed");
-		free_netdev(ifp->ndev);
-		drvr->iflist[ifidx] = NULL;
-		return -EOPNOTSUPP;
-	}
+	INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
+	INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
 
-	brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
-		  current->pid, ifp->ndev->name);
+	if (addr_mask != NULL)
+		for (i = 0; i < ETH_ALEN; i++)
+			ifp->mac_addr[i] = drvr->mac[i] ^ addr_mask[i];
 
-	return 0;
+	brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
+		  current->pid, ifp->ndev->name, ifp->mac_addr);
+
+	return ifp;
 }
 
 void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
@@ -982,6 +724,9 @@
 			netif_stop_queue(ifp->ndev);
 		}
 
+		cancel_work_sync(&ifp->setmacaddr_work);
+		cancel_work_sync(&ifp->multicast_work);
+
 		unregister_netdev(ifp->ndev);
 		drvr->iflist[ifidx] = NULL;
 		if (ifidx == 0)
@@ -1020,11 +765,13 @@
 		goto fail;
 	}
 
-	INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address);
-	INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list);
+	/* attach firmware event handler */
+	brcmf_fweh_attach(drvr);
 
 	INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
 
+	init_waitqueue_head(&drvr->pend_8021x_wait);
+
 	return ret;
 
 fail:
@@ -1036,10 +783,9 @@
 int brcmf_bus_start(struct device *dev)
 {
 	int ret = -1;
-	/* Room for "event_msgs" + '\0' + bitvec */
-	char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_pub *drvr = bus_if->drvr;
+	struct brcmf_if *ifp;
 
 	brcmf_dbg(TRACE, "\n");
 
@@ -1050,49 +796,40 @@
 		return ret;
 	}
 
-	brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
-		      iovbuf, sizeof(iovbuf));
-	brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, iovbuf,
-				    sizeof(iovbuf));
-	memcpy(drvr->eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
-
-	setbit(drvr->eventmask, BRCMF_E_SET_SSID);
-	setbit(drvr->eventmask, BRCMF_E_PRUNE);
-	setbit(drvr->eventmask, BRCMF_E_AUTH);
-	setbit(drvr->eventmask, BRCMF_E_REASSOC);
-	setbit(drvr->eventmask, BRCMF_E_REASSOC_IND);
-	setbit(drvr->eventmask, BRCMF_E_DEAUTH_IND);
-	setbit(drvr->eventmask, BRCMF_E_DISASSOC_IND);
-	setbit(drvr->eventmask, BRCMF_E_DISASSOC);
-	setbit(drvr->eventmask, BRCMF_E_JOIN);
-	setbit(drvr->eventmask, BRCMF_E_ASSOC_IND);
-	setbit(drvr->eventmask, BRCMF_E_PSK_SUP);
-	setbit(drvr->eventmask, BRCMF_E_LINK);
-	setbit(drvr->eventmask, BRCMF_E_NDIS_LINK);
-	setbit(drvr->eventmask, BRCMF_E_MIC_ERROR);
-	setbit(drvr->eventmask, BRCMF_E_PMKID_CACHE);
-	setbit(drvr->eventmask, BRCMF_E_TXFAIL);
-	setbit(drvr->eventmask, BRCMF_E_JOIN_START);
-	setbit(drvr->eventmask, BRCMF_E_SCAN_COMPLETE);
-
-/* enable dongle roaming event */
-
-	drvr->pktfilter_count = 1;
-	/* Setup filter to allow only unicast */
-	drvr->pktfilter[0] = "100 0 0 0 0x01 0x00";
-
-	/* Bus is ready, do any protocol initialization */
-	ret = brcmf_proto_init(drvr);
-	if (ret < 0)
-		return ret;
-
 	/* add primary networking interface */
-	ret = brcmf_add_if(dev, 0, "wlan%d", drvr->mac);
-	if (ret < 0)
-		return ret;
+	ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
+	if (IS_ERR(ifp))
+		return PTR_ERR(ifp);
 
 	/* signal bus ready */
 	bus_if->state = BRCMF_BUS_DATA;
+
+	/* Bus is ready, do any initialization */
+	ret = brcmf_c_preinit_dcmds(ifp);
+	if (ret < 0)
+		goto fail;
+
+	drvr->config = brcmf_cfg80211_attach(drvr);
+	if (drvr->config == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = brcmf_fweh_activate_events(ifp);
+	if (ret < 0)
+		goto fail;
+
+	ret = brcmf_net_attach(ifp);
+fail:
+	if (ret < 0) {
+		brcmf_dbg(ERROR, "failed: %d\n", ret);
+		if (drvr->config)
+			brcmf_cfg80211_detach(drvr->config);
+		free_netdev(drvr->iflist[0]->ndev);
+		drvr->iflist[0] = NULL;
+		return ret;
+	}
+
 	return 0;
 }
 
@@ -1117,6 +854,11 @@
 
 	brcmf_dbg(TRACE, "Enter\n");
 
+	if (drvr == NULL)
+		return;
+
+	/* stop firmware event handling */
+	brcmf_fweh_detach(drvr);
 
 	/* make sure primary interface removed last */
 	for (i = BRCMF_MAX_IFS-1; i > -1; i--)
@@ -1126,8 +868,6 @@
 	brcmf_bus_detach(drvr);
 
 	if (drvr->prot) {
-		cancel_work_sync(&drvr->setmacaddr_work);
-		cancel_work_sync(&drvr->multicast_work);
 		brcmf_proto_detach(drvr);
 	}
 
@@ -1141,64 +881,21 @@
 	return atomic_read(&drvr->pend_8021x_cnt);
 }
 
-#define MAX_WAIT_FOR_8021X_TX	10
-
 int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
 {
 	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_pub *drvr = ifp->drvr;
-	int timeout = 10 * HZ / 1000;
-	int ntimes = MAX_WAIT_FOR_8021X_TX;
-	int pend = brcmf_get_pend_8021x_cnt(drvr);
+	int err;
 
-	while (ntimes && pend) {
-		if (pend) {
-			set_current_state(TASK_INTERRUPTIBLE);
-			schedule_timeout(timeout);
-			set_current_state(TASK_RUNNING);
-			ntimes--;
-		}
-		pend = brcmf_get_pend_8021x_cnt(drvr);
-	}
-	return pend;
+	err = wait_event_timeout(drvr->pend_8021x_wait,
+				 !brcmf_get_pend_8021x_cnt(drvr),
+				 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
+
+	WARN_ON(!err);
+
+	return !err;
 }
 
-#ifdef DEBUG
-int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size)
-{
-	int ret = 0;
-	struct file *fp;
-	mm_segment_t old_fs;
-	loff_t pos = 0;
-
-	/* change to KERNEL_DS address limit */
-	old_fs = get_fs();
-	set_fs(KERNEL_DS);
-
-	/* open file to write */
-	fp = filp_open("/tmp/mem_dump", O_WRONLY | O_CREAT, 0640);
-	if (!fp) {
-		brcmf_dbg(ERROR, "open file error\n");
-		ret = -1;
-		goto exit;
-	}
-
-	/* Write buf to file */
-	fp->f_op->write(fp, (char __user *)buf, size, &pos);
-
-exit:
-	/* free buf before return */
-	kfree(buf);
-	/* close file before return */
-	if (fp)
-		filp_close(fp, NULL);
-	/* restore previous address limit */
-	set_fs(old_fs);
-
-	return ret;
-}
-#endif				/* DEBUG */
-
 static void brcmf_driver_init(struct work_struct *work)
 {
 	brcmf_debugfs_init();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index 6bc4425..48fa703 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -27,11 +27,6 @@
 /* Unlink, frees allocated protocol memory (including brcmf_proto) */
 extern void brcmf_proto_detach(struct brcmf_pub *drvr);
 
-/* Initialize protocol: sync w/dongle state.
- * Sets dongle media info (iswl, drv_version, mac address).
- */
-extern int brcmf_proto_init(struct brcmf_pub *drvr);
-
 /* Stop protocol: sync w/dongle state. */
 extern void brcmf_proto_stop(struct brcmf_pub *drvr);
 
@@ -41,13 +36,7 @@
 extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx,
 				struct sk_buff *txp);
 
-/* Use protocol to issue command to dongle */
-extern int brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx,
-				struct brcmf_dcmd *dcmd, int len);
-
-extern int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr);
-
-extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx,
-				     uint cmd, void *buf, uint len);
+/* Sets dongle media info (drv_version, mac address). */
+extern int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
 
 #endif				/* _BRCMF_PROTO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 3564686..4572545 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -533,9 +533,11 @@
 	u8 *rxbuf;		/* Buffer for receiving control packets */
 	uint rxblen;		/* Allocated length of rxbuf */
 	u8 *rxctl;		/* Aligned pointer into rxbuf */
+	u8 *rxctl_orig;		/* pointer for freeing rxctl */
 	u8 *databuf;		/* Buffer for receiving big glom packet */
 	u8 *dataptr;		/* Aligned pointer into databuf */
 	uint rxlen;		/* Length of valid data in buffer */
+	spinlock_t rxctl_lock;	/* protection lock for ctrl frame resources */
 
 	u8 sdpcm_ver;	/* Bus protocol reported by dongle */
 
@@ -582,8 +584,6 @@
 	struct list_head dpc_tsklst;
 	spinlock_t dpc_tl_lock;
 
-	struct semaphore sdsem;
-
 	const struct firmware *firmware;
 	u32 fw_ptr;
 
@@ -614,6 +614,12 @@
 
 #define ALIGNMENT  4
 
+enum brcmf_sdio_frmtype {
+	BRCMF_SDIO_FT_NORMAL,
+	BRCMF_SDIO_FT_SUPER,
+	BRCMF_SDIO_FT_SUB,
+};
+
 static void pkt_align(struct sk_buff *p, int len, int align)
 {
 	uint datalign;
@@ -1031,8 +1037,9 @@
 	}
 }
 
-static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
-				struct brcmf_sdio_read *rd)
+static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
+			       struct brcmf_sdio_read *rd,
+			       enum brcmf_sdio_frmtype type)
 {
 	u16 len, checksum;
 	u8 rx_seq, fc, tx_seq_max;
@@ -1047,17 +1054,26 @@
 	/* All zero means no more to read */
 	if (!(len | checksum)) {
 		bus->rxpending = false;
-		return false;
+		return -ENODATA;
 	}
 	if ((u16)(~(len ^ checksum))) {
 		brcmf_dbg(ERROR, "HW header checksum error\n");
 		bus->sdcnt.rx_badhdr++;
 		brcmf_sdbrcm_rxfail(bus, false, false);
-		return false;
+		return -EIO;
 	}
 	if (len < SDPCM_HDRLEN) {
 		brcmf_dbg(ERROR, "HW header length error\n");
-		return false;
+		return -EPROTO;
+	}
+	if (type == BRCMF_SDIO_FT_SUPER &&
+	    (roundup(len, bus->blocksize) != rd->len)) {
+		brcmf_dbg(ERROR, "HW superframe header length error\n");
+		return -EPROTO;
+	}
+	if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
+		brcmf_dbg(ERROR, "HW subframe header length error\n");
+		return -EPROTO;
 	}
 	rd->len = len;
 
@@ -1071,15 +1087,33 @@
 	 * Byte 5: Maximum Sequence number allow for Tx
 	 * Byte 6~7: Reserved
 	 */
+	if (type == BRCMF_SDIO_FT_SUPER &&
+	    SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) {
+		brcmf_dbg(ERROR, "Glom descriptor found in superframe head\n");
+		rd->len = 0;
+		return -EINVAL;
+	}
 	rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
 	rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
-	if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL) {
+	if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
+	    type != BRCMF_SDIO_FT_SUPER) {
 		brcmf_dbg(ERROR, "HW header length too long\n");
 		bus->sdiodev->bus_if->dstats.rx_errors++;
 		bus->sdcnt.rx_toolong++;
 		brcmf_sdbrcm_rxfail(bus, false, false);
 		rd->len = 0;
-		return false;
+		return -EPROTO;
+	}
+	if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
+		brcmf_dbg(ERROR, "Wrong channel for superframe\n");
+		rd->len = 0;
+		return -EINVAL;
+	}
+	if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
+	    rd->channel != SDPCM_EVENT_CHANNEL) {
+		brcmf_dbg(ERROR, "Wrong channel for subframe\n");
+		rd->len = 0;
+		return -EINVAL;
 	}
 	rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
 	if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
@@ -1087,7 +1121,7 @@
 		bus->sdcnt.rx_badhdr++;
 		brcmf_sdbrcm_rxfail(bus, false, false);
 		rd->len = 0;
-		return false;
+		return -ENXIO;
 	}
 	if (rd->seq_num != rx_seq) {
 		brcmf_dbg(ERROR, "seq %d: sequence number error, expect %d\n",
@@ -1095,6 +1129,9 @@
 		bus->sdcnt.rx_badseq++;
 		rd->seq_num = rx_seq;
 	}
+	/* no need to check the reset for subframe */
+	if (type == BRCMF_SDIO_FT_SUB)
+		return 0;
 	rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
 	if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
 		/* only warm for NON glom packet */
@@ -1118,7 +1155,7 @@
 	}
 	bus->tx_max = tx_seq_max;
 
-	return true;
+	return 0;
 }
 
 static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
@@ -1126,16 +1163,16 @@
 	u16 dlen, totlen;
 	u8 *dptr, num = 0;
 
-	u16 sublen, check;
+	u16 sublen;
 	struct sk_buff *pfirst, *pnext;
 
 	int errcode;
-	u8 chan, seq, doff, sfdoff;
-	u8 txmax;
+	u8 doff, sfdoff;
 
 	int ifidx = 0;
 	bool usechain = bus->use_rxchain;
-	u16 next_len;
+
+	struct brcmf_sdio_read rd_new;
 
 	/* If packets, issue read(s) and send up packet chain */
 	/* Return sequence numbers consumed? */
@@ -1235,6 +1272,7 @@
 		 * read directly into the chained packet, or allocate a large
 		 * packet and and copy into the chain.
 		 */
+		sdio_claim_host(bus->sdiodev->func[1]);
 		if (usechain) {
 			errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
 					bus->sdiodev->sbwad,
@@ -1256,6 +1294,7 @@
 				  dlen);
 			errcode = -1;
 		}
+		sdio_release_host(bus->sdiodev->func[1]);
 		bus->sdcnt.f2rxdata++;
 
 		/* On failure, kill the superframe, allow a couple retries */
@@ -1264,6 +1303,7 @@
 				  dlen, errcode);
 			bus->sdiodev->bus_if->dstats.rx_errors++;
 
+			sdio_claim_host(bus->sdiodev->func[1]);
 			if (bus->glomerr++ < 3) {
 				brcmf_sdbrcm_rxfail(bus, true, true);
 			} else {
@@ -1272,6 +1312,7 @@
 				bus->sdcnt.rxglomfail++;
 				brcmf_sdbrcm_free_glom(bus);
 			}
+			sdio_release_host(bus->sdiodev->func[1]);
 			return 0;
 		}
 
@@ -1279,68 +1320,17 @@
 				   pfirst->data, min_t(int, pfirst->len, 48),
 				   "SUPERFRAME:\n");
 
-		/* Validate the superframe header */
-		dptr = (u8 *) (pfirst->data);
-		sublen = get_unaligned_le16(dptr);
-		check = get_unaligned_le16(dptr + sizeof(u16));
-
-		chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
-		seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
-		next_len = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
-		if ((next_len << 4) > MAX_RX_DATASZ) {
-			brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n",
-				  next_len, seq);
-			next_len = 0;
-		}
-		bus->cur_read.len = next_len << 4;
-		doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
-		txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
-
-		errcode = 0;
-		if ((u16)~(sublen ^ check)) {
-			brcmf_dbg(ERROR, "(superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
-				  sublen, check);
-			errcode = -1;
-		} else if (roundup(sublen, bus->blocksize) != dlen) {
-			brcmf_dbg(ERROR, "(superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
-				  sublen, roundup(sublen, bus->blocksize),
-				  dlen);
-			errcode = -1;
-		} else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) !=
-			   SDPCM_GLOM_CHANNEL) {
-			brcmf_dbg(ERROR, "(superframe): bad channel %d\n",
-				  SDPCM_PACKET_CHANNEL(
-					  &dptr[SDPCM_FRAMETAG_LEN]));
-			errcode = -1;
-		} else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
-			brcmf_dbg(ERROR, "(superframe): got 2nd descriptor?\n");
-			errcode = -1;
-		} else if ((doff < SDPCM_HDRLEN) ||
-			   (doff > (pfirst->len - SDPCM_HDRLEN))) {
-			brcmf_dbg(ERROR, "(superframe): Bad data offset %d: HW %d pkt %d min %d\n",
-				  doff, sublen, pfirst->len, SDPCM_HDRLEN);
-			errcode = -1;
-		}
-
-		/* Check sequence number of superframe SW header */
-		if (rxseq != seq) {
-			brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n",
-				  seq, rxseq);
-			bus->sdcnt.rx_badseq++;
-			rxseq = seq;
-		}
-
-		/* Check window for sanity */
-		if ((u8) (txmax - bus->tx_seq) > 0x40) {
-			brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
-				  txmax, bus->tx_seq);
-			txmax = bus->tx_seq + 2;
-		}
-		bus->tx_max = txmax;
+		rd_new.seq_num = rxseq;
+		rd_new.len = dlen;
+		sdio_claim_host(bus->sdiodev->func[1]);
+		errcode = brcmf_sdio_hdparser(bus, pfirst->data, &rd_new,
+					      BRCMF_SDIO_FT_SUPER);
+		sdio_release_host(bus->sdiodev->func[1]);
+		bus->cur_read.len = rd_new.len_nxtfrm << 4;
 
 		/* Remove superframe header, remember offset */
-		skb_pull(pfirst, doff);
-		sfdoff = doff;
+		skb_pull(pfirst, rd_new.dat_offset);
+		sfdoff = rd_new.dat_offset;
 		num = 0;
 
 		/* Validate all the subframe headers */
@@ -1349,40 +1339,22 @@
 			if (errcode)
 				break;
 
-			dptr = (u8 *) (pnext->data);
-			dlen = (u16) (pnext->len);
-			sublen = get_unaligned_le16(dptr);
-			check = get_unaligned_le16(dptr + sizeof(u16));
-			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
-			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+			rd_new.len = pnext->len;
+			rd_new.seq_num = rxseq++;
+			sdio_claim_host(bus->sdiodev->func[1]);
+			errcode = brcmf_sdio_hdparser(bus, pnext->data, &rd_new,
+						      BRCMF_SDIO_FT_SUB);
+			sdio_release_host(bus->sdiodev->func[1]);
 			brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
-					   dptr, 32, "subframe:\n");
+					   pnext->data, 32, "subframe:\n");
 
-			if ((u16)~(sublen ^ check)) {
-				brcmf_dbg(ERROR, "(subframe %d): HW hdr error: len/check 0x%04x/0x%04x\n",
-					  num, sublen, check);
-				errcode = -1;
-			} else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
-				brcmf_dbg(ERROR, "(subframe %d): length mismatch: len 0x%04x, expect 0x%04x\n",
-					  num, sublen, dlen);
-				errcode = -1;
-			} else if ((chan != SDPCM_DATA_CHANNEL) &&
-				   (chan != SDPCM_EVENT_CHANNEL)) {
-				brcmf_dbg(ERROR, "(subframe %d): bad channel %d\n",
-					  num, chan);
-				errcode = -1;
-			} else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
-				brcmf_dbg(ERROR, "(subframe %d): Bad data offset %d: HW %d min %d\n",
-					  num, doff, sublen, SDPCM_HDRLEN);
-				errcode = -1;
-			}
-			/* increase the subframe count */
 			num++;
 		}
 
 		if (errcode) {
 			/* Terminate frame on error, request
 				 a couple retries */
+			sdio_claim_host(bus->sdiodev->func[1]);
 			if (bus->glomerr++ < 3) {
 				/* Restore superframe header space */
 				skb_push(pfirst, sfdoff);
@@ -1393,6 +1365,7 @@
 				bus->sdcnt.rxglomfail++;
 				brcmf_sdbrcm_free_glom(bus);
 			}
+			sdio_release_host(bus->sdiodev->func[1]);
 			bus->cur_read.len = 0;
 			return 0;
 		}
@@ -1402,27 +1375,11 @@
 		skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
 			dptr = (u8 *) (pfirst->data);
 			sublen = get_unaligned_le16(dptr);
-			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
-			seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
 			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
 
-			brcmf_dbg(GLOM, "Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
-				  num, pfirst, pfirst->data,
-				  pfirst->len, sublen, chan, seq);
-
-			/* precondition: chan == SDPCM_DATA_CHANNEL ||
-					 chan == SDPCM_EVENT_CHANNEL */
-
-			if (rxseq != seq) {
-				brcmf_dbg(GLOM, "rx_seq %d, expected %d\n",
-					  seq, rxseq);
-				bus->sdcnt.rx_badseq++;
-				rxseq = seq;
-			}
-			rxseq++;
-
 			brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
-					   dptr, dlen, "Rx Subframe Data:\n");
+					   dptr, pfirst->len,
+					   "Rx Subframe Data:\n");
 
 			__skb_trim(pfirst, sublen);
 			skb_pull(pfirst, doff);
@@ -1449,11 +1406,8 @@
 					   pfirst->prev);
 		}
 		/* sent any remaining packets up */
-		if (bus->glom.qlen) {
-			up(&bus->sdsem);
+		if (bus->glom.qlen)
 			brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom);
-			down(&bus->sdsem);
-		}
 
 		bus->sdcnt.rxglomframes++;
 		bus->sdcnt.rxglompkts += bus->glom.qlen;
@@ -1494,21 +1448,24 @@
 brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
 {
 	uint rdlen, pad;
-
+	u8 *buf = NULL, *rbuf;
 	int sdret;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	/* Set rxctl for frame (w/optional alignment) */
-	bus->rxctl = bus->rxbuf;
-	bus->rxctl += BRCMF_FIRSTREAD;
-	pad = ((unsigned long)bus->rxctl % BRCMF_SDALIGN);
+	if (bus->rxblen)
+		buf = vzalloc(bus->rxblen);
+	if (!buf) {
+		brcmf_dbg(ERROR, "no memory for control frame\n");
+		goto done;
+	}
+	rbuf = bus->rxbuf;
+	pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
 	if (pad)
-		bus->rxctl += (BRCMF_SDALIGN - pad);
-	bus->rxctl -= BRCMF_FIRSTREAD;
+		rbuf += (BRCMF_SDALIGN - pad);
 
 	/* Copy the already-read portion over */
-	memcpy(bus->rxctl, hdr, BRCMF_FIRSTREAD);
+	memcpy(buf, hdr, BRCMF_FIRSTREAD);
 	if (len <= BRCMF_FIRSTREAD)
 		goto gotpkt;
 
@@ -1545,11 +1502,11 @@
 		goto done;
 	}
 
-	/* Read remainder of frame body into the rxctl buffer */
+	/* Read remain of frame body */
 	sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
 				bus->sdiodev->sbwad,
 				SDIO_FUNC_2,
-				F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
+				F2SYNC, rbuf, rdlen);
 	bus->sdcnt.f2rxdata++;
 
 	/* Control frame failures need retransmission */
@@ -1559,16 +1516,26 @@
 		bus->sdcnt.rxc_errors++;
 		brcmf_sdbrcm_rxfail(bus, true, true);
 		goto done;
-	}
+	} else
+		memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
 
 gotpkt:
 
 	brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
-			   bus->rxctl, len, "RxCtrl:\n");
+			   buf, len, "RxCtrl:\n");
 
 	/* Point to valid data and indicate its length */
-	bus->rxctl += doff;
+	spin_lock_bh(&bus->rxctl_lock);
+	if (bus->rxctl) {
+		brcmf_dbg(ERROR, "last control frame is being processed.\n");
+		spin_unlock_bh(&bus->rxctl_lock);
+		vfree(buf);
+		goto done;
+	}
+	bus->rxctl = buf + doff;
+	bus->rxctl_orig = buf;
 	bus->rxlen = len - doff;
+	spin_unlock_bh(&bus->rxctl_lock);
 
 done:
 	/* Awake any waiters */
@@ -1623,6 +1590,7 @@
 
 		rd->len_left = rd->len;
 		/* read header first for unknow frame length */
+		sdio_claim_host(bus->sdiodev->func[1]);
 		if (!rd->len) {
 			sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
 						      bus->sdiodev->sbwad,
@@ -1635,6 +1603,7 @@
 					  sdret);
 				bus->sdcnt.rx_hdrfail++;
 				brcmf_sdbrcm_rxfail(bus, true, true);
+				sdio_release_host(bus->sdiodev->func[1]);
 				continue;
 			}
 
@@ -1642,7 +1611,9 @@
 					   bus->rxhdr, SDPCM_HDRLEN,
 					   "RxHdr:\n");
 
-			if (!brcmf_sdio_hdparser(bus, bus->rxhdr, rd)) {
+			if (brcmf_sdio_hdparser(bus, bus->rxhdr, rd,
+						BRCMF_SDIO_FT_NORMAL)) {
+				sdio_release_host(bus->sdiodev->func[1]);
 				if (!bus->rxpending)
 					break;
 				else
@@ -1658,6 +1629,7 @@
 				rd->len_nxtfrm = 0;
 				/* treat all packet as event if we don't know */
 				rd->channel = SDPCM_EVENT_CHANNEL;
+				sdio_release_host(bus->sdiodev->func[1]);
 				continue;
 			}
 			rd->len_left = rd->len > BRCMF_FIRSTREAD ?
@@ -1675,6 +1647,7 @@
 			bus->sdiodev->bus_if->dstats.rx_dropped++;
 			brcmf_sdbrcm_rxfail(bus, false,
 					    RETRYCHAN(rd->channel));
+			sdio_release_host(bus->sdiodev->func[1]);
 			continue;
 		}
 		skb_pull(pkt, head_read);
@@ -1683,14 +1656,17 @@
 		sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
 					      SDIO_FUNC_2, F2SYNC, pkt);
 		bus->sdcnt.f2rxdata++;
+		sdio_release_host(bus->sdiodev->func[1]);
 
 		if (sdret < 0) {
 			brcmf_dbg(ERROR, "read %d bytes from channel %d failed: %d\n",
 				  rd->len, rd->channel, sdret);
 			brcmu_pkt_buf_free_skb(pkt);
 			bus->sdiodev->bus_if->dstats.rx_errors++;
+			sdio_claim_host(bus->sdiodev->func[1]);
 			brcmf_sdbrcm_rxfail(bus, true,
 					    RETRYCHAN(rd->channel));
+			sdio_release_host(bus->sdiodev->func[1]);
 			continue;
 		}
 
@@ -1701,7 +1677,9 @@
 		} else {
 			memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
 			rd_new.seq_num = rd->seq_num;
-			if (!brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new)) {
+			sdio_claim_host(bus->sdiodev->func[1]);
+			if (brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new,
+						BRCMF_SDIO_FT_NORMAL)) {
 				rd->len = 0;
 				brcmu_pkt_buf_free_skb(pkt);
 			}
@@ -1712,9 +1690,11 @@
 					  roundup(rd_new.len, 16) >> 4);
 				rd->len = 0;
 				brcmf_sdbrcm_rxfail(bus, true, true);
+				sdio_release_host(bus->sdiodev->func[1]);
 				brcmu_pkt_buf_free_skb(pkt);
 				continue;
 			}
+			sdio_release_host(bus->sdiodev->func[1]);
 			rd->len_nxtfrm = rd_new.len_nxtfrm;
 			rd->channel = rd_new.channel;
 			rd->dat_offset = rd_new.dat_offset;
@@ -1730,7 +1710,9 @@
 					  rd_new.seq_num);
 				/* Force retry w/normal header read */
 				rd->len = 0;
+				sdio_claim_host(bus->sdiodev->func[1]);
 				brcmf_sdbrcm_rxfail(bus, false, true);
+				sdio_release_host(bus->sdiodev->func[1]);
 				brcmu_pkt_buf_free_skb(pkt);
 				continue;
 			}
@@ -1753,7 +1735,9 @@
 			} else {
 				brcmf_dbg(ERROR, "%s: glom superframe w/o "
 					  "descriptor!\n", __func__);
+				sdio_claim_host(bus->sdiodev->func[1]);
 				brcmf_sdbrcm_rxfail(bus, false, false);
+				sdio_release_host(bus->sdiodev->func[1]);
 			}
 			/* prepare the descriptor for the next read */
 			rd->len = rd->len_nxtfrm << 4;
@@ -1784,10 +1768,7 @@
 			continue;
 		}
 
-		/* Unlock during rx call */
-		up(&bus->sdsem);
 		brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
-		down(&bus->sdsem);
 	}
 
 	rxcount = maxframes - rxleft;
@@ -1805,15 +1786,6 @@
 }
 
 static void
-brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
-{
-	up(&bus->sdsem);
-	wait_event_interruptible_timeout(bus->ctrl_wait, !*lockvar, HZ * 2);
-	down(&bus->sdsem);
-	return;
-}
-
-static void
 brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
 {
 	if (waitqueue_active(&bus->ctrl_wait))
@@ -1914,6 +1886,7 @@
 	if (len & (ALIGNMENT - 1))
 			len = roundup(len, ALIGNMENT);
 
+	sdio_claim_host(bus->sdiodev->func[1]);
 	ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
 				    SDIO_FUNC_2, F2SYNC, pkt);
 	bus->sdcnt.f2txdata++;
@@ -1941,15 +1914,14 @@
 		}
 
 	}
+	sdio_release_host(bus->sdiodev->func[1]);
 	if (ret == 0)
 		bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
 
 done:
 	/* restore pkt buffer pointer before calling tx complete routine */
 	skb_pull(pkt, SDPCM_HDRLEN + pad);
-	up(&bus->sdsem);
 	brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0);
-	down(&bus->sdsem);
 
 	if (free_pkt)
 		brcmu_pkt_buf_free_skb(pkt);
@@ -1990,9 +1962,11 @@
 		/* In poll mode, need to check for other events */
 		if (!bus->intr && cnt) {
 			/* Check device status, signal pending interrupt */
+			sdio_claim_host(bus->sdiodev->func[1]);
 			ret = r_sdreg32(bus, &intstatus,
 					offsetof(struct sdpcmd_regs,
 						 intstatus));
+			sdio_release_host(bus->sdiodev->func[1]);
 			bus->sdcnt.f2txdata++;
 			if (ret != 0)
 				break;
@@ -2029,7 +2003,7 @@
 		bus->watchdog_tsk = NULL;
 	}
 
-	down(&bus->sdsem);
+	sdio_claim_host(bus->sdiodev->func[1]);
 
 	/* Enable clock for device interrupts */
 	brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
@@ -2063,6 +2037,7 @@
 
 	/* Turn off the backplane clock (only) */
 	brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+	sdio_release_host(bus->sdiodev->func[1]);
 
 	/* Clear the data packet queues */
 	brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
@@ -2073,14 +2048,14 @@
 	brcmf_sdbrcm_free_glom(bus);
 
 	/* Clear rx control and wake any waiters */
+	spin_lock_bh(&bus->rxctl_lock);
 	bus->rxlen = 0;
+	spin_unlock_bh(&bus->rxctl_lock);
 	brcmf_sdbrcm_dcmd_resp_wake(bus);
 
 	/* Reset some F2 state stuff */
 	bus->rxskip = false;
 	bus->tx_seq = bus->rx_seq = 0;
-
-	up(&bus->sdsem);
 }
 
 #ifdef CONFIG_BRCMFMAC_SDIO_OOB
@@ -2164,7 +2139,7 @@
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	down(&bus->sdsem);
+	sdio_claim_host(bus->sdiodev->func[1]);
 
 	/* If waiting for HTAVAIL, check status */
 	if (bus->clkstate == CLK_PENDING) {
@@ -2218,9 +2193,7 @@
 	/* Pending interrupt indicates new device status */
 	if (atomic_read(&bus->ipend) > 0) {
 		atomic_set(&bus->ipend, 0);
-		sdio_claim_host(bus->sdiodev->func[1]);
 		err = brcmf_sdio_intr_rstatus(bus);
-		sdio_release_host(bus->sdiodev->func[1]);
 	}
 
 	/* Start with leftover status bits */
@@ -2249,6 +2222,8 @@
 		intstatus |= brcmf_sdbrcm_hostmail(bus);
 	}
 
+	sdio_release_host(bus->sdiodev->func[1]);
+
 	/* Generally don't ask for these, can get CRC errors... */
 	if (intstatus & I_WR_OOSYNC) {
 		brcmf_dbg(ERROR, "Dongle reports WR_OOSYNC\n");
@@ -2295,6 +2270,7 @@
 		(bus->clkstate == CLK_AVAIL)) {
 		int i;
 
+		sdio_claim_host(bus->sdiodev->func[1]);
 		err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
 			SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
 			(u32) bus->ctrl_frame_len);
@@ -2328,6 +2304,7 @@
 		} else {
 			bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
 		}
+		sdio_release_host(bus->sdiodev->func[1]);
 		bus->ctrl_frame_stat = false;
 		brcmf_sdbrcm_wait_event_wakeup(bus);
 	}
@@ -2357,10 +2334,10 @@
 	if ((bus->clkstate != CLK_PENDING)
 	    && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
 		bus->activity = false;
+		sdio_claim_host(bus->sdiodev->func[1]);
 		brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+		sdio_release_host(bus->sdiodev->func[1]);
 	}
-
-	up(&bus->sdsem);
 }
 
 static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
@@ -2651,11 +2628,10 @@
 
 	/* precondition: IS_ALIGNED((unsigned long)frame, 2) */
 
-	/* Need to lock here to protect txseq and SDIO tx calls */
-	down(&bus->sdsem);
-
 	/* Make sure backplane clock is on */
+	sdio_claim_host(bus->sdiodev->func[1]);
 	brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+	sdio_release_host(bus->sdiodev->func[1]);
 
 	/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
 	*(__le16 *) frame = cpu_to_le16((u16) msglen);
@@ -2678,7 +2654,9 @@
 		bus->ctrl_frame_buf = frame;
 		bus->ctrl_frame_len = len;
 
-		brcmf_sdbrcm_wait_for_event(bus, &bus->ctrl_frame_stat);
+		wait_event_interruptible_timeout(bus->ctrl_wait,
+						 !bus->ctrl_frame_stat,
+						 msecs_to_jiffies(2000));
 
 		if (!bus->ctrl_frame_stat) {
 			brcmf_dbg(INFO, "ctrl_frame_stat == false\n");
@@ -2697,7 +2675,9 @@
 				   frame, min_t(u16, len, 16), "TxHdr:\n");
 
 		do {
+			sdio_claim_host(bus->sdiodev->func[1]);
 			ret = brcmf_tx_frame(bus, frame, len);
+			sdio_release_host(bus->sdiodev->func[1]);
 		} while (ret < 0 && retries++ < TXRETRIES);
 	}
 
@@ -2707,13 +2687,13 @@
 		spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
 
 		bus->activity = false;
+		sdio_claim_host(bus->sdiodev->func[1]);
 		brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
+		sdio_release_host(bus->sdiodev->func[1]);
 	} else {
 		spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
 	}
 
-	up(&bus->sdsem);
-
 	if (ret)
 		bus->sdcnt.tx_ctlerrs++;
 	else
@@ -2743,8 +2723,10 @@
 	 * Read last word in socram to determine
 	 * address of sdpcm_shared structure
 	 */
+	sdio_claim_host(bus->sdiodev->func[1]);
 	rv = brcmf_sdbrcm_membytes(bus, false, shaddr,
 				   (u8 *)&addr_le, 4);
+	sdio_claim_host(bus->sdiodev->func[1]);
 	if (rv < 0)
 		return rv;
 
@@ -2763,8 +2745,10 @@
 	}
 
 	/* Read hndrte_shared structure */
+	sdio_claim_host(bus->sdiodev->func[1]);
 	rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le,
 				   sizeof(struct sdpcm_shared_le));
+	sdio_release_host(bus->sdiodev->func[1]);
 	if (rv < 0)
 		return rv;
 
@@ -2867,12 +2851,14 @@
 	if ((sh->flags & SDPCM_SHARED_TRAP) == 0)
 		return 0;
 
+	sdio_claim_host(bus->sdiodev->func[1]);
 	error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr,
 				      sizeof(struct brcmf_trap_info));
 	if (error < 0)
 		return error;
 
 	nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
+	sdio_release_host(bus->sdiodev->func[1]);
 	if (nbytes < 0)
 		return nbytes;
 
@@ -2918,6 +2904,7 @@
 		return 0;
 	}
 
+	sdio_claim_host(bus->sdiodev->func[1]);
 	if (sh->assert_file_addr != 0) {
 		error = brcmf_sdbrcm_membytes(bus, false, sh->assert_file_addr,
 					      (u8 *)file, 80);
@@ -2930,6 +2917,7 @@
 		if (error < 0)
 			return error;
 	}
+	sdio_release_host(bus->sdiodev->func[1]);
 
 	res = scnprintf(buf, sizeof(buf),
 			"dongle assert: %s:%d: assert(%s)\n",
@@ -2942,9 +2930,7 @@
 	int error;
 	struct sdpcm_shared sh;
 
-	down(&bus->sdsem);
 	error = brcmf_sdio_readshared(bus, &sh);
-	up(&bus->sdsem);
 
 	if (error < 0)
 		return error;
@@ -2971,7 +2957,6 @@
 	if (pos != 0)
 		return 0;
 
-	down(&bus->sdsem);
 	error = brcmf_sdio_readshared(bus, &sh);
 	if (error < 0)
 		goto done;
@@ -2988,7 +2973,6 @@
 	error += nbytes;
 	*ppos += error;
 done:
-	up(&bus->sdsem);
 	return error;
 }
 
@@ -3039,6 +3023,7 @@
 	int timeleft;
 	uint rxlen = 0;
 	bool pending;
+	u8 *buf;
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 	struct brcmf_sdio *bus = sdiodev->bus;
@@ -3048,11 +3033,15 @@
 	/* Wait until control frame is available */
 	timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
 
-	down(&bus->sdsem);
+	spin_lock_bh(&bus->rxctl_lock);
 	rxlen = bus->rxlen;
 	memcpy(msg, bus->rxctl, min(msglen, rxlen));
+	bus->rxctl = NULL;
+	buf = bus->rxctl_orig;
+	bus->rxctl_orig = NULL;
 	bus->rxlen = 0;
-	up(&bus->sdsem);
+	spin_unlock_bh(&bus->rxctl_lock);
+	vfree(buf);
 
 	if (rxlen) {
 		brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
@@ -3388,13 +3377,16 @@
 {
 	bool ret;
 
-	/* Download the firmware */
+	sdio_claim_host(bus->sdiodev->func[1]);
+
 	brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
 
 	ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
 
 	brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
 
+	sdio_release_host(bus->sdiodev->func[1]);
+
 	return ret;
 }
 
@@ -3423,7 +3415,7 @@
 	bus->sdcnt.tickcnt = 0;
 	brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
 
-	down(&bus->sdsem);
+	sdio_claim_host(bus->sdiodev->func[1]);
 
 	/* Make sure backplane clock is on, needed to generate F2 interrupt */
 	brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
@@ -3492,7 +3484,7 @@
 		brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
 
 exit:
-	up(&bus->sdsem);
+	sdio_release_host(bus->sdiodev->func[1]);
 
 	return ret;
 }
@@ -3539,8 +3531,6 @@
 
 	brcmf_dbg(TIMER, "Enter\n");
 
-	down(&bus->sdsem);
-
 	/* Poll period: check device if appropriate. */
 	if (bus->poll && (++bus->polltick >= bus->pollrate)) {
 		u32 intstatus = 0;
@@ -3557,9 +3547,11 @@
 				u8 devpend;
 				spin_unlock_irqrestore(&bus->dpc_tl_lock,
 						       flags);
+				sdio_claim_host(bus->sdiodev->func[1]);
 				devpend = brcmf_sdio_regrb(bus->sdiodev,
 							   SDIO_CCCR_INTx,
 							   NULL);
+				sdio_release_host(bus->sdiodev->func[1]);
 				intstatus =
 				    devpend & (INTR_STATUS_FUNC1 |
 					       INTR_STATUS_FUNC2);
@@ -3584,16 +3576,18 @@
 	}
 #ifdef DEBUG
 	/* Poll for console output periodically */
-	if (bus_if->state == BRCMF_BUS_DATA &&
+	if (bus_if && bus_if->state == BRCMF_BUS_DATA &&
 	    bus->console_interval != 0) {
 		bus->console.count += BRCMF_WD_POLL_MS;
 		if (bus->console.count >= bus->console_interval) {
 			bus->console.count -= bus->console_interval;
+			sdio_claim_host(bus->sdiodev->func[1]);
 			/* Make sure backplane clock is on */
 			brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
 			if (brcmf_sdbrcm_readconsole(bus) < 0)
 				/* stop on error */
 				bus->console_interval = 0;
+			sdio_release_host(bus->sdiodev->func[1]);
 		}
 	}
 #endif				/* DEBUG */
@@ -3606,13 +3600,13 @@
 				bus->activity = false;
 				brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
 			} else {
+				sdio_claim_host(bus->sdiodev->func[1]);
 				brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+				sdio_release_host(bus->sdiodev->func[1]);
 			}
 		}
 	}
 
-	up(&bus->sdsem);
-
 	return (atomic_read(&bus->ipend) > 0);
 }
 
@@ -3707,6 +3701,8 @@
 
 	bus->alp_only = true;
 
+	sdio_claim_host(bus->sdiodev->func[1]);
+
 	pr_debug("F1 signature read @0x18000000=0x%4x\n",
 		 brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
 
@@ -3754,6 +3750,8 @@
 	reg_val = brcmf_sdio_regrl(bus->sdiodev, reg_addr, NULL);
 	brcmf_sdio_regwl(bus->sdiodev, reg_addr, reg_val | CC_BPRESEN, NULL);
 
+	sdio_release_host(bus->sdiodev->func[1]);
+
 	brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
 
 	/* Locate an appropriately-aligned portion of hdrbuf */
@@ -3769,6 +3767,7 @@
 	return true;
 
 fail:
+	sdio_release_host(bus->sdiodev->func[1]);
 	return false;
 }
 
@@ -3776,6 +3775,8 @@
 {
 	brcmf_dbg(TRACE, "Enter\n");
 
+	sdio_claim_host(bus->sdiodev->func[1]);
+
 	/* Disable F2 to clear any intermediate frame state on the dongle */
 	brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
 			 SDIO_FUNC_ENABLE_1, NULL);
@@ -3786,6 +3787,8 @@
 	/* Done with backplane-dependent accesses, can drop clock... */
 	brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
 
+	sdio_release_host(bus->sdiodev->func[1]);
+
 	/* ...and initialize clock/power states */
 	bus->clkstate = CLK_SDONLY;
 	bus->idletime = BRCMF_IDLE_INTERVAL;
@@ -3841,8 +3844,10 @@
 	brcmf_dbg(TRACE, "Enter\n");
 
 	if (bus->ci) {
+		sdio_claim_host(bus->sdiodev->func[1]);
 		brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
 		brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+		sdio_release_host(bus->sdiodev->func[1]);
 		brcmf_sdio_chip_detach(&bus->ci);
 		if (bus->vars && bus->varsz)
 			kfree(bus->vars);
@@ -3862,7 +3867,8 @@
 		brcmf_sdio_intr_unregister(bus->sdiodev);
 
 		cancel_work_sync(&bus->datawork);
-		destroy_workqueue(bus->brcmf_wq);
+		if (bus->brcmf_wq)
+			destroy_workqueue(bus->brcmf_wq);
 
 		if (bus->sdiodev->bus_if->drvr) {
 			brcmf_detach(bus->sdiodev->dev);
@@ -3904,31 +3910,29 @@
 	bus->txminmax = BRCMF_TXMINMAX;
 	bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
 
+	INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
+	bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
+	if (bus->brcmf_wq == NULL) {
+		brcmf_dbg(ERROR, "insufficient memory to create txworkqueue\n");
+		goto fail;
+	}
+
 	/* attempt to attach to the dongle */
 	if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
 		brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_attach failed\n");
 		goto fail;
 	}
 
+	spin_lock_init(&bus->rxctl_lock);
 	spin_lock_init(&bus->txqlock);
 	init_waitqueue_head(&bus->ctrl_wait);
 	init_waitqueue_head(&bus->dcmd_resp_wait);
 
-	bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
-	if (bus->brcmf_wq == NULL) {
-		brcmf_dbg(ERROR, "insufficient memory to create txworkqueue\n");
-		goto fail;
-	}
-	INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
-
 	/* Set up the watchdog timer */
 	init_timer(&bus->timer);
 	bus->timer.data = (unsigned long)bus;
 	bus->timer.function = brcmf_sdbrcm_watchdog;
 
-	/* Initialize thread based operation and lock */
-	sema_init(&bus->sdsem, 1);
-
 	/* Initialize watchdog thread */
 	init_completion(&bus->watchdog_wait);
 	bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
@@ -3991,10 +3995,8 @@
 	/* if firmware path present try to download and bring up bus */
 	ret = brcmf_bus_start(bus->sdiodev->dev);
 	if (ret != 0) {
-		if (ret == -ENOLINK) {
-			brcmf_dbg(ERROR, "dongle is not responding\n");
-			goto fail;
-		}
+		brcmf_dbg(ERROR, "dongle is not responding\n");
+		goto fail;
 	}
 
 	return bus;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
new file mode 100644
index 0000000..fa8fc44
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/netdevice.h>
+
+#include "brcmu_wifi.h"
+#include "brcmu_utils.h"
+
+#include "dhd.h"
+#include "dhd_dbg.h"
+#include "fweh.h"
+#include "fwil.h"
+
+/**
+ * struct brcm_ethhdr - broadcom specific ether header.
+ *
+ * @subtype: subtype for this packet.
+ * @length: TODO: length of appended data.
+ * @version: version indication.
+ * @oui: OUI of this packet.
+ * @usr_subtype: subtype for this OUI.
+ */
+struct brcm_ethhdr {
+	__be16 subtype;
+	__be16 length;
+	u8 version;
+	u8 oui[3];
+	__be16 usr_subtype;
+} __packed;
+
+struct brcmf_event_msg_be {
+	__be16 version;
+	__be16 flags;
+	__be32 event_type;
+	__be32 status;
+	__be32 reason;
+	__be32 auth_type;
+	__be32 datalen;
+	u8 addr[ETH_ALEN];
+	char ifname[IFNAMSIZ];
+	u8 ifidx;
+	u8 bsscfgidx;
+} __packed;
+
+/**
+ * struct brcmf_event - contents of broadcom event packet.
+ *
+ * @eth: standard ether header.
+ * @hdr: broadcom specific ether header.
+ * @msg: common part of the actual event message.
+ */
+struct brcmf_event {
+	struct ethhdr eth;
+	struct brcm_ethhdr hdr;
+	struct brcmf_event_msg_be msg;
+} __packed;
+
+/**
+ * struct brcmf_fweh_queue_item - event item on event queue.
+ *
+ * @q: list element for queuing.
+ * @code: event code.
+ * @ifidx: interface index related to this event.
+ * @ifaddr: ethernet address for interface.
+ * @emsg: common parameters of the firmware event message.
+ * @data: event specific data part of the firmware event.
+ */
+struct brcmf_fweh_queue_item {
+	struct list_head q;
+	enum brcmf_fweh_event_code code;
+	u8 ifidx;
+	u8 ifaddr[ETH_ALEN];
+	struct brcmf_event_msg_be emsg;
+	u8 data[0];
+};
+
+/**
+ * struct brcmf_fweh_event_name - code, name mapping entry.
+ */
+struct brcmf_fweh_event_name {
+	enum brcmf_fweh_event_code code;
+	const char *name;
+};
+
+#ifdef DEBUG
+/* array for mapping code to event name */
+static struct brcmf_fweh_event_name fweh_event_names[] = {
+	{ BRCMF_E_SET_SSID, "SET_SSID" },
+	{ BRCMF_E_JOIN, "JOIN" },
+	{ BRCMF_E_START, "START" },
+	{ BRCMF_E_AUTH, "AUTH" },
+	{ BRCMF_E_AUTH_IND, "AUTH_IND" },
+	{ BRCMF_E_DEAUTH, "DEAUTH" },
+	{ BRCMF_E_DEAUTH_IND, "DEAUTH_IND" },
+	{ BRCMF_E_ASSOC, "ASSOC" },
+	{ BRCMF_E_ASSOC_IND, "ASSOC_IND" },
+	{ BRCMF_E_REASSOC, "REASSOC" },
+	{ BRCMF_E_REASSOC_IND, "REASSOC_IND" },
+	{ BRCMF_E_DISASSOC, "DISASSOC" },
+	{ BRCMF_E_DISASSOC_IND, "DISASSOC_IND" },
+	{ BRCMF_E_QUIET_START, "START_QUIET" },
+	{ BRCMF_E_QUIET_END, "END_QUIET" },
+	{ BRCMF_E_BEACON_RX, "BEACON_RX" },
+	{ BRCMF_E_LINK, "LINK" },
+	{ BRCMF_E_MIC_ERROR, "MIC_ERROR" },
+	{ BRCMF_E_NDIS_LINK, "NDIS_LINK" },
+	{ BRCMF_E_ROAM, "ROAM" },
+	{ BRCMF_E_TXFAIL, "TXFAIL" },
+	{ BRCMF_E_PMKID_CACHE, "PMKID_CACHE" },
+	{ BRCMF_E_RETROGRADE_TSF, "RETROGRADE_TSF" },
+	{ BRCMF_E_PRUNE, "PRUNE" },
+	{ BRCMF_E_AUTOAUTH, "AUTOAUTH" },
+	{ BRCMF_E_EAPOL_MSG, "EAPOL_MSG" },
+	{ BRCMF_E_SCAN_COMPLETE, "SCAN_COMPLETE" },
+	{ BRCMF_E_ADDTS_IND, "ADDTS_IND" },
+	{ BRCMF_E_DELTS_IND, "DELTS_IND" },
+	{ BRCMF_E_BCNSENT_IND, "BCNSENT_IND" },
+	{ BRCMF_E_BCNRX_MSG, "BCNRX_MSG" },
+	{ BRCMF_E_BCNLOST_MSG, "BCNLOST_MSG" },
+	{ BRCMF_E_ROAM_PREP, "ROAM_PREP" },
+	{ BRCMF_E_PFN_NET_FOUND, "PNO_NET_FOUND" },
+	{ BRCMF_E_PFN_NET_LOST, "PNO_NET_LOST" },
+	{ BRCMF_E_RESET_COMPLETE, "RESET_COMPLETE" },
+	{ BRCMF_E_JOIN_START, "JOIN_START" },
+	{ BRCMF_E_ROAM_START, "ROAM_START" },
+	{ BRCMF_E_ASSOC_START, "ASSOC_START" },
+	{ BRCMF_E_IBSS_ASSOC, "IBSS_ASSOC" },
+	{ BRCMF_E_RADIO, "RADIO" },
+	{ BRCMF_E_PSM_WATCHDOG, "PSM_WATCHDOG" },
+	{ BRCMF_E_PROBREQ_MSG, "PROBREQ_MSG" },
+	{ BRCMF_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND" },
+	{ BRCMF_E_PSK_SUP, "PSK_SUP" },
+	{ BRCMF_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED" },
+	{ BRCMF_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME" },
+	{ BRCMF_E_ICV_ERROR, "ICV_ERROR" },
+	{ BRCMF_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR" },
+	{ BRCMF_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR" },
+	{ BRCMF_E_TRACE, "TRACE" },
+	{ BRCMF_E_IF, "IF" },
+	{ BRCMF_E_RSSI, "RSSI" },
+	{ BRCMF_E_PFN_SCAN_COMPLETE, "PFN_SCAN_COMPLETE" },
+	{ BRCMF_E_EXTLOG_MSG, "EXTLOG_MSG" },
+	{ BRCMF_E_ACTION_FRAME, "ACTION_FRAME" },
+	{ BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION_FRAME_COMPLETE" },
+	{ BRCMF_E_PRE_ASSOC_IND, "PRE_ASSOC_IND" },
+	{ BRCMF_E_PRE_REASSOC_IND, "PRE_REASSOC_IND" },
+	{ BRCMF_E_CHANNEL_ADOPTED, "CHANNEL_ADOPTED" },
+	{ BRCMF_E_AP_STARTED, "AP_STARTED" },
+	{ BRCMF_E_DFS_AP_STOP, "DFS_AP_STOP" },
+	{ BRCMF_E_DFS_AP_RESUME, "DFS_AP_RESUME" },
+	{ BRCMF_E_ESCAN_RESULT, "ESCAN_RESULT" },
+	{ BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE, "ACTION_FRM_OFF_CHAN_CMPLT" },
+	{ BRCMF_E_DCS_REQUEST, "DCS_REQUEST" },
+	{ BRCMF_E_FIFO_CREDIT_MAP, "FIFO_CREDIT_MAP"}
+};
+
+/**
+ * brcmf_fweh_event_name() - returns name for given event code.
+ *
+ * @code: code to lookup.
+ */
+static const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(fweh_event_names); i++) {
+		if (fweh_event_names[i].code == code)
+			return fweh_event_names[i].name;
+	}
+	return "unknown";
+}
+#else
+static const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
+{
+	return "nodebug";
+}
+#endif
+
+/**
+ * brcmf_fweh_queue_event() - create and queue event.
+ *
+ * @fweh: firmware event handling info.
+ * @event: event queue entry.
+ */
+static void brcmf_fweh_queue_event(struct brcmf_fweh_info *fweh,
+				   struct brcmf_fweh_queue_item *event)
+{
+	ulong flags;
+
+	spin_lock_irqsave(&fweh->evt_q_lock, flags);
+	list_add_tail(&event->q, &fweh->event_q);
+	spin_unlock_irqrestore(&fweh->evt_q_lock, flags);
+	schedule_work(&fweh->event_work);
+}
+
+static int brcmf_fweh_call_event_handler(struct brcmf_if *ifp,
+					 enum brcmf_fweh_event_code code,
+					 struct brcmf_event_msg *emsg,
+					 void *data)
+{
+	struct brcmf_fweh_info *fweh;
+	int err = -EINVAL;
+
+	if (ifp) {
+		fweh = &ifp->drvr->fweh;
+
+		/* handle the event if valid interface and handler */
+		if (ifp->ndev && fweh->evt_handler[code])
+			err = fweh->evt_handler[code](ifp, emsg, data);
+		else
+			brcmf_dbg(ERROR, "unhandled event %d ignored\n", code);
+	} else {
+		brcmf_dbg(ERROR, "no interface object\n");
+	}
+	return err;
+}
+
+/**
+ * brcmf_fweh_handle_if_event() - handle IF event.
+ *
+ * @drvr: driver information object.
+ * @item: queue entry.
+ * @ifpp: interface object (may change upon ADD action).
+ */
+static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
+				       struct brcmf_event_msg *emsg,
+				       void *data)
+{
+	struct brcmf_if_event *ifevent = data;
+	struct brcmf_if *ifp;
+	int err = 0;
+
+	brcmf_dbg(EVENT, "action: %u idx: %u bsscfg: %u flags: %u\n",
+		  ifevent->action, ifevent->ifidx,
+		  ifevent->bssidx, ifevent->flags);
+
+	if (ifevent->ifidx >= BRCMF_MAX_IFS) {
+		brcmf_dbg(ERROR, "invalid interface index: %u\n",
+			  ifevent->ifidx);
+		return;
+	}
+
+	ifp = drvr->iflist[ifevent->ifidx];
+
+	if (ifevent->action == BRCMF_E_IF_ADD) {
+		brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname,
+			  emsg->addr);
+		ifp = brcmf_add_if(drvr, ifevent->ifidx, ifevent->bssidx,
+				   emsg->ifname, emsg->addr);
+		if (IS_ERR(ifp))
+			return;
+
+		if (!drvr->fweh.evt_handler[BRCMF_E_IF])
+			err = brcmf_net_attach(ifp);
+	}
+
+	err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
+
+	if (ifevent->action == BRCMF_E_IF_DEL)
+		brcmf_del_if(drvr, ifevent->ifidx);
+}
+
+/**
+ * brcmf_fweh_dequeue_event() - get event from the queue.
+ *
+ * @fweh: firmware event handling info.
+ */
+static struct brcmf_fweh_queue_item *
+brcmf_fweh_dequeue_event(struct brcmf_fweh_info *fweh)
+{
+	struct brcmf_fweh_queue_item *event = NULL;
+	ulong flags;
+
+	spin_lock_irqsave(&fweh->evt_q_lock, flags);
+	if (!list_empty(&fweh->event_q)) {
+		event = list_first_entry(&fweh->event_q,
+					 struct brcmf_fweh_queue_item, q);
+		list_del(&event->q);
+	}
+	spin_unlock_irqrestore(&fweh->evt_q_lock, flags);
+
+	return event;
+}
+
+/**
+ * brcmf_fweh_event_worker() - firmware event worker.
+ *
+ * @work: worker object.
+ */
+static void brcmf_fweh_event_worker(struct work_struct *work)
+{
+	struct brcmf_pub *drvr;
+	struct brcmf_if *ifp;
+	struct brcmf_fweh_info *fweh;
+	struct brcmf_fweh_queue_item *event;
+	int err = 0;
+	struct brcmf_event_msg_be *emsg_be;
+	struct brcmf_event_msg emsg;
+
+	fweh = container_of(work, struct brcmf_fweh_info, event_work);
+	drvr = container_of(fweh, struct brcmf_pub, fweh);
+
+	while ((event = brcmf_fweh_dequeue_event(fweh))) {
+		ifp = drvr->iflist[event->ifidx];
+
+		brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM:\n",
+			  brcmf_fweh_event_name(event->code), event->code,
+			  event->emsg.ifidx, event->emsg.bsscfgidx,
+			  event->emsg.addr);
+
+		/* convert event message */
+		emsg_be = &event->emsg;
+		emsg.version = be16_to_cpu(emsg_be->version);
+		emsg.flags = be16_to_cpu(emsg_be->flags);
+		emsg.event_code = event->code;
+		emsg.status = be32_to_cpu(emsg_be->status);
+		emsg.reason = be32_to_cpu(emsg_be->reason);
+		emsg.auth_type = be32_to_cpu(emsg_be->auth_type);
+		emsg.datalen = be32_to_cpu(emsg_be->datalen);
+		memcpy(emsg.addr, emsg_be->addr, ETH_ALEN);
+		memcpy(emsg.ifname, emsg_be->ifname, sizeof(emsg.ifname));
+		emsg.ifidx = emsg_be->ifidx;
+		emsg.bsscfgidx = emsg_be->bsscfgidx;
+
+		brcmf_dbg(EVENT, "  version %u flags %u status %u reason %u\n",
+			  emsg.version, emsg.flags, emsg.status, emsg.reason);
+		brcmf_dbg_hex_dump(BRCMF_EVENT_ON(), event->data,
+				   min_t(u32, emsg.datalen, 64),
+				   "appended:");
+
+		/* special handling of interface event */
+		if (event->code == BRCMF_E_IF) {
+			brcmf_fweh_handle_if_event(drvr, &emsg, event->data);
+			goto event_free;
+		}
+
+		err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg,
+						    event->data);
+		if (err) {
+			brcmf_dbg(ERROR, "event handler failed (%d)\n",
+				  event->code);
+			err = 0;
+		}
+event_free:
+		kfree(event);
+	}
+}
+
+/**
+ * brcmf_fweh_attach() - initialize firmware event handling.
+ *
+ * @drvr: driver information object.
+ */
+void brcmf_fweh_attach(struct brcmf_pub *drvr)
+{
+	struct brcmf_fweh_info *fweh = &drvr->fweh;
+	INIT_WORK(&fweh->event_work, brcmf_fweh_event_worker);
+	spin_lock_init(&fweh->evt_q_lock);
+	INIT_LIST_HEAD(&fweh->event_q);
+}
+
+/**
+ * brcmf_fweh_detach() - cleanup firmware event handling.
+ *
+ * @drvr: driver information object.
+ */
+void brcmf_fweh_detach(struct brcmf_pub *drvr)
+{
+	struct brcmf_fweh_info *fweh = &drvr->fweh;
+	struct brcmf_if *ifp = drvr->iflist[0];
+	s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+
+	if (ifp) {
+		/* clear all events */
+		memset(eventmask, 0, BRCMF_EVENTING_MASK_LEN);
+		(void)brcmf_fil_iovar_data_set(ifp, "event_msgs",
+					       eventmask,
+					       BRCMF_EVENTING_MASK_LEN);
+	}
+	/* cancel the worker */
+	cancel_work_sync(&fweh->event_work);
+	WARN_ON(!list_empty(&fweh->event_q));
+	memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
+}
+
+/**
+ * brcmf_fweh_register() - register handler for given event code.
+ *
+ * @drvr: driver information object.
+ * @code: event code.
+ * @handler: handler for the given event code.
+ */
+int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
+			brcmf_fweh_handler_t handler)
+{
+	if (drvr->fweh.evt_handler[code]) {
+		brcmf_dbg(ERROR, "event code %d already registered\n", code);
+		return -ENOSPC;
+	}
+	drvr->fweh.evt_handler[code] = handler;
+	brcmf_dbg(TRACE, "event handler registered for %s\n",
+		  brcmf_fweh_event_name(code));
+	return 0;
+}
+
+/**
+ * brcmf_fweh_unregister() - remove handler for given code.
+ *
+ * @drvr: driver information object.
+ * @code: event code.
+ */
+void brcmf_fweh_unregister(struct brcmf_pub *drvr,
+			   enum brcmf_fweh_event_code code)
+{
+	brcmf_dbg(TRACE, "event handler cleared for %s\n",
+		  brcmf_fweh_event_name(code));
+	drvr->fweh.evt_handler[code] = NULL;
+}
+
+/**
+ * brcmf_fweh_activate_events() - enables firmware events registered.
+ *
+ * @ifp: primary interface object.
+ */
+int brcmf_fweh_activate_events(struct brcmf_if *ifp)
+{
+	int i, err;
+	s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+
+	for (i = 0; i < BRCMF_E_LAST; i++) {
+		if (ifp->drvr->fweh.evt_handler[i]) {
+			brcmf_dbg(EVENT, "enable event %s\n",
+				  brcmf_fweh_event_name(i));
+			setbit(eventmask, i);
+		}
+	}
+
+	/* want to handle IF event as well */
+	brcmf_dbg(EVENT, "enable event IF\n");
+	setbit(eventmask, BRCMF_E_IF);
+
+	err = brcmf_fil_iovar_data_set(ifp, "event_msgs",
+				       eventmask, BRCMF_EVENTING_MASK_LEN);
+	if (err)
+		brcmf_dbg(ERROR, "Set event_msgs error (%d)\n", err);
+
+	return err;
+}
+
+/**
+ * brcmf_fweh_process_event() - process skb as firmware event.
+ *
+ * @drvr: driver information object.
+ * @event_packet: event packet to process.
+ * @ifidx: index of the firmware interface (may change).
+ *
+ * If the packet buffer contains a firmware event message it will
+ * dispatch the event to a registered handler (using worker).
+ */
+void brcmf_fweh_process_event(struct brcmf_pub *drvr,
+			      struct brcmf_event *event_packet, u8 *ifidx)
+{
+	enum brcmf_fweh_event_code code;
+	struct brcmf_fweh_info *fweh = &drvr->fweh;
+	struct brcmf_fweh_queue_item *event;
+	gfp_t alloc_flag = GFP_KERNEL;
+	void *data;
+	u32 datalen;
+
+	/* get event info */
+	code = get_unaligned_be32(&event_packet->msg.event_type);
+	datalen = get_unaligned_be32(&event_packet->msg.datalen);
+	*ifidx = event_packet->msg.ifidx;
+	data = &event_packet[1];
+
+	if (code >= BRCMF_E_LAST)
+		return;
+
+	if (code != BRCMF_E_IF && !fweh->evt_handler[code])
+		return;
+
+	if (in_interrupt())
+		alloc_flag = GFP_ATOMIC;
+
+	event = kzalloc(sizeof(*event) + datalen, alloc_flag);
+	if (!event)
+		return;
+
+	event->code = code;
+	event->ifidx = *ifidx;
+
+	/* use memcpy to get aligned event message */
+	memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg));
+	memcpy(event->data, data, datalen);
+	memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN);
+
+	brcmf_fweh_queue_event(fweh, event);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
new file mode 100644
index 0000000..b39246a
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef FWEH_H_
+#define FWEH_H_
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/if.h>
+
+/* formward declarations */
+struct brcmf_pub;
+struct brcmf_if;
+struct brcmf_cfg80211_info;
+struct brcmf_event;
+
+/* firmware event codes sent by the dongle */
+enum brcmf_fweh_event_code {
+	BRCMF_E_SET_SSID		= 0,
+	BRCMF_E_JOIN			= 1,
+	BRCMF_E_START			= 2,
+	BRCMF_E_AUTH			= 3,
+	BRCMF_E_AUTH_IND		= 4,
+	BRCMF_E_DEAUTH			= 5,
+	BRCMF_E_DEAUTH_IND		= 6,
+	BRCMF_E_ASSOC			= 7,
+	BRCMF_E_ASSOC_IND		= 8,
+	BRCMF_E_REASSOC			= 9,
+	BRCMF_E_REASSOC_IND		= 10,
+	BRCMF_E_DISASSOC		= 11,
+	BRCMF_E_DISASSOC_IND		= 12,
+	BRCMF_E_QUIET_START		= 13,
+	BRCMF_E_QUIET_END		= 14,
+	BRCMF_E_BEACON_RX		= 15,
+	BRCMF_E_LINK			= 16,
+	BRCMF_E_MIC_ERROR		= 17,
+	BRCMF_E_NDIS_LINK		= 18,
+	BRCMF_E_ROAM			= 19,
+	BRCMF_E_TXFAIL			= 20,
+	BRCMF_E_PMKID_CACHE		= 21,
+	BRCMF_E_RETROGRADE_TSF		= 22,
+	BRCMF_E_PRUNE			= 23,
+	BRCMF_E_AUTOAUTH		= 24,
+	BRCMF_E_EAPOL_MSG		= 25,
+	BRCMF_E_SCAN_COMPLETE		= 26,
+	BRCMF_E_ADDTS_IND		= 27,
+	BRCMF_E_DELTS_IND		= 28,
+	BRCMF_E_BCNSENT_IND		= 29,
+	BRCMF_E_BCNRX_MSG		= 30,
+	BRCMF_E_BCNLOST_MSG		= 31,
+	BRCMF_E_ROAM_PREP		= 32,
+	BRCMF_E_PFN_NET_FOUND		= 33,
+	BRCMF_E_PFN_NET_LOST		= 34,
+	BRCMF_E_RESET_COMPLETE		= 35,
+	BRCMF_E_JOIN_START		= 36,
+	BRCMF_E_ROAM_START		= 37,
+	BRCMF_E_ASSOC_START		= 38,
+	BRCMF_E_IBSS_ASSOC		= 39,
+	BRCMF_E_RADIO			= 40,
+	BRCMF_E_PSM_WATCHDOG		= 41,
+	BRCMF_E_PROBREQ_MSG		= 44,
+	BRCMF_E_SCAN_CONFIRM_IND	= 45,
+	BRCMF_E_PSK_SUP			= 46,
+	BRCMF_E_COUNTRY_CODE_CHANGED	= 47,
+	BRCMF_E_EXCEEDED_MEDIUM_TIME	= 48,
+	BRCMF_E_ICV_ERROR		= 49,
+	BRCMF_E_UNICAST_DECODE_ERROR	= 50,
+	BRCMF_E_MULTICAST_DECODE_ERROR	= 51,
+	BRCMF_E_TRACE			= 52,
+	BRCMF_E_IF			= 54,
+	BRCMF_E_RSSI			= 56,
+	BRCMF_E_PFN_SCAN_COMPLETE	= 57,
+	BRCMF_E_EXTLOG_MSG		= 58,
+	BRCMF_E_ACTION_FRAME		= 59,
+	BRCMF_E_ACTION_FRAME_COMPLETE	= 60,
+	BRCMF_E_PRE_ASSOC_IND		= 61,
+	BRCMF_E_PRE_REASSOC_IND		= 62,
+	BRCMF_E_CHANNEL_ADOPTED		= 63,
+	BRCMF_E_AP_STARTED		= 64,
+	BRCMF_E_DFS_AP_STOP		= 65,
+	BRCMF_E_DFS_AP_RESUME		= 66,
+	BRCMF_E_ESCAN_RESULT		= 69,
+	BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE	= 70,
+	BRCMF_E_DCS_REQUEST		= 73,
+	BRCMF_E_FIFO_CREDIT_MAP		= 74,
+	BRCMF_E_LAST
+};
+
+/* flags field values in struct brcmf_event_msg */
+#define BRCMF_EVENT_MSG_LINK		0x01
+#define BRCMF_EVENT_MSG_FLUSHTXQ	0x02
+#define BRCMF_EVENT_MSG_GROUP		0x04
+
+/**
+ * definitions for event packet validation.
+ */
+#define BRCMF_EVENT_OUI_OFFSET		19
+#define BRCM_OUI			"\x00\x10\x18"
+#define DOT11_OUI_LEN			3
+#define BCMILCP_BCM_SUBTYPE_EVENT	1
+
+
+/**
+ * struct brcmf_event_msg - firmware event message.
+ *
+ * @version: version information.
+ * @flags: event flags.
+ * @event_code: firmware event code.
+ * @status: status information.
+ * @reason: reason code.
+ * @auth_type: authentication type.
+ * @datalen: lenght of event data buffer.
+ * @addr: ether address.
+ * @ifname: interface name.
+ * @ifidx: interface index.
+ * @bsscfgidx: bsscfg index.
+ */
+struct brcmf_event_msg {
+	u16 version;
+	u16 flags;
+	u32 event_code;
+	u32 status;
+	u32 reason;
+	s32 auth_type;
+	u32 datalen;
+	u8 addr[ETH_ALEN];
+	char ifname[IFNAMSIZ];
+	u8 ifidx;
+	u8 bsscfgidx;
+};
+
+typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
+				    const struct brcmf_event_msg *evtmsg,
+				    void *data);
+
+/**
+ * struct brcmf_fweh_info - firmware event handling information.
+ *
+ * @event_work: event worker.
+ * @evt_q_lock: lock for event queue protection.
+ * @event_q: event queue.
+ * @evt_handler: registered event handlers.
+ */
+struct brcmf_fweh_info {
+	struct work_struct event_work;
+	struct spinlock evt_q_lock;
+	struct list_head event_q;
+	int (*evt_handler[BRCMF_E_LAST])(struct brcmf_if *ifp,
+					 const struct brcmf_event_msg *evtmsg,
+					 void *data);
+};
+
+void brcmf_fweh_attach(struct brcmf_pub *drvr);
+void brcmf_fweh_detach(struct brcmf_pub *drvr);
+int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
+			int (*handler)(struct brcmf_if *ifp,
+				       const struct brcmf_event_msg *evtmsg,
+				       void *data));
+void brcmf_fweh_unregister(struct brcmf_pub *drvr,
+			   enum brcmf_fweh_event_code code);
+int brcmf_fweh_activate_events(struct brcmf_if *ifp);
+void brcmf_fweh_process_event(struct brcmf_pub *drvr,
+			      struct brcmf_event *event_packet, u8 *ifidx);
+
+static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
+					  struct sk_buff *skb, u8 *ifidx)
+{
+	struct brcmf_event *event_packet;
+	u8 *data;
+	u16 usr_stype;
+
+	/* only process events when protocol matches */
+	if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL))
+		return;
+
+	/* check for BRCM oui match */
+	event_packet = (struct brcmf_event *)skb_mac_header(skb);
+	data = (u8 *)event_packet;
+	data += BRCMF_EVENT_OUI_OFFSET;
+	if (memcmp(BRCM_OUI, data, DOT11_OUI_LEN))
+		return;
+
+	/* final match on usr_subtype */
+	data += DOT11_OUI_LEN;
+	usr_stype = get_unaligned_be16(data);
+	if (usr_stype != BCMILCP_BCM_SUBTYPE_EVENT)
+		return;
+
+	brcmf_fweh_process_event(drvr, event_packet, ifidx);
+}
+
+#endif /* FWEH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
new file mode 100644
index 0000000..51a1450
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* FWIL is the Firmware Interface Layer. In this module the support functions
+ * are located to set and get variables to and from the firmware.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_dbg.h"
+#include "fwil.h"
+
+
+#define MAX_HEX_DUMP_LEN	64
+
+
+static s32
+brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	s32 err;
+
+	if (drvr->bus_if->state != BRCMF_BUS_DATA) {
+		brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
+		return -EIO;
+	}
+
+	if (data != NULL)
+		len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
+	if (set)
+		err = brcmf_proto_cdc_set_dcmd(drvr, ifp->idx, cmd, data, len);
+	else
+		err = brcmf_proto_cdc_query_dcmd(drvr, ifp->idx, cmd, data,
+						 len);
+
+	if (err >= 0)
+		err = 0;
+	else
+		brcmf_dbg(ERROR, "Failed err=%d\n", err);
+
+	return err;
+}
+
+s32
+brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
+{
+	s32 err;
+
+	mutex_lock(&ifp->drvr->proto_block);
+
+	brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
+	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+
+	err = brcmf_fil_cmd_data(ifp, cmd, data, len, true);
+	mutex_unlock(&ifp->drvr->proto_block);
+
+	return err;
+}
+
+s32
+brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
+{
+	s32 err;
+
+	mutex_lock(&ifp->drvr->proto_block);
+	err = brcmf_fil_cmd_data(ifp, cmd, data, len, false);
+
+	brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
+	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+
+	mutex_unlock(&ifp->drvr->proto_block);
+
+	return err;
+}
+
+
+s32
+brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
+{
+	s32 err;
+	__le32 data_le = cpu_to_le32(data);
+
+	mutex_lock(&ifp->drvr->proto_block);
+	err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
+	mutex_unlock(&ifp->drvr->proto_block);
+
+	return err;
+}
+
+s32
+brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
+{
+	s32 err;
+	__le32 data_le = cpu_to_le32(*data);
+
+	mutex_lock(&ifp->drvr->proto_block);
+	err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
+	mutex_unlock(&ifp->drvr->proto_block);
+	*data = le32_to_cpu(data_le);
+
+	return err;
+}
+
+static u32
+brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
+{
+	u32 len;
+
+	len = strlen(name) + 1;
+
+	if ((len + datalen) > buflen)
+		return 0;
+
+	memcpy(buf, name, len);
+
+	/* append data onto the end of the name string */
+	if (data && datalen)
+		memcpy(&buf[len], data, datalen);
+
+	return len + datalen;
+}
+
+
+s32
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+			 u32 len)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	s32 err;
+	u32 buflen;
+
+	mutex_lock(&drvr->proto_block);
+
+	brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
+	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+
+	buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
+				    sizeof(drvr->proto_buf));
+	if (buflen) {
+		err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf,
+					 buflen, true);
+	} else {
+		err = -EPERM;
+		brcmf_dbg(ERROR, "Creating iovar failed\n");
+	}
+
+	mutex_unlock(&drvr->proto_block);
+	return err;
+}
+
+s32
+brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+			 u32 len)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	s32 err;
+	u32 buflen;
+
+	mutex_lock(&drvr->proto_block);
+
+	buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
+				    sizeof(drvr->proto_buf));
+	if (buflen) {
+		err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf,
+					 buflen, false);
+		if (err == 0)
+			memcpy(data, drvr->proto_buf, len);
+	} else {
+		err = -EPERM;
+		brcmf_dbg(ERROR, "Creating iovar failed\n");
+	}
+
+	brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
+	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+
+	mutex_unlock(&drvr->proto_block);
+	return err;
+}
+
+s32
+brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
+{
+	__le32 data_le = cpu_to_le32(data);
+
+	return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
+}
+
+s32
+brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+{
+	__le32 data_le = cpu_to_le32(*data);
+	s32 err;
+
+	err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
+	if (err == 0)
+		*data = le32_to_cpu(data_le);
+	return err;
+}
+
+static u32
+brcmf_create_bsscfg(s32 bssidx, char *name, char *data, u32 datalen, char *buf,
+		    u32 buflen)
+{
+	const s8 *prefix = "bsscfg:";
+	s8 *p;
+	u32 prefixlen;
+	u32 namelen;
+	u32 iolen;
+	__le32 bssidx_le;
+
+	if (bssidx == 0)
+		return brcmf_create_iovar(name, data, datalen, buf, buflen);
+
+	prefixlen = strlen(prefix);
+	namelen = strlen(name) + 1; /* lengh of iovar  name + null */
+	iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
+
+	if (buflen < iolen) {
+		brcmf_dbg(ERROR, "buffer is too short\n");
+		return 0;
+	}
+
+	p = buf;
+
+	/* copy prefix, no null */
+	memcpy(p, prefix, prefixlen);
+	p += prefixlen;
+
+	/* copy iovar name including null */
+	memcpy(p, name, namelen);
+	p += namelen;
+
+	/* bss config index as first data */
+	bssidx_le = cpu_to_le32(bssidx);
+	memcpy(p, &bssidx_le, sizeof(bssidx_le));
+	p += sizeof(bssidx_le);
+
+	/* parameter buffer follows */
+	if (datalen)
+		memcpy(p, data, datalen);
+
+	return iolen;
+}
+
+s32
+brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
+			  void *data, u32 len)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	s32 err;
+	u32 buflen;
+
+	mutex_lock(&drvr->proto_block);
+
+	brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
+	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+
+	buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
+				     drvr->proto_buf, sizeof(drvr->proto_buf));
+	if (buflen) {
+		err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf,
+					 buflen, true);
+	} else {
+		err = -EPERM;
+		brcmf_dbg(ERROR, "Creating bsscfg failed\n");
+	}
+
+	mutex_unlock(&drvr->proto_block);
+	return err;
+}
+
+s32
+brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
+			  void *data, u32 len)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	s32 err;
+	u32 buflen;
+
+	mutex_lock(&drvr->proto_block);
+
+	buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
+				     drvr->proto_buf, sizeof(drvr->proto_buf));
+	if (buflen) {
+		err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf,
+					 buflen, false);
+		if (err == 0)
+			memcpy(data, drvr->proto_buf, len);
+	} else {
+		err = -EPERM;
+		brcmf_dbg(ERROR, "Creating bsscfg failed\n");
+	}
+	brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
+	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+
+	mutex_unlock(&drvr->proto_block);
+	return err;
+
+}
+
+s32
+brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
+{
+	__le32 data_le = cpu_to_le32(data);
+
+	return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
+					 sizeof(data_le));
+}
+
+s32
+brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+{
+	__le32 data_le = cpu_to_le32(*data);
+	s32 err;
+
+	err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
+					sizeof(data_le));
+	if (err == 0)
+		*data = le32_to_cpu(data_le);
+	return err;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
new file mode 100644
index 0000000..16eb820
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _fwil_h_
+#define _fwil_h_
+
+s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
+s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
+s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
+s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
+
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+			     u32 len);
+s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+			     u32 len);
+s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data);
+s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data);
+
+s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name, void *data,
+			      u32 len);
+s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, void *data,
+			      u32 len);
+s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data);
+s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data);
+
+#endif /* _fwil_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 7a6dfdc..39a5baa 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -14,24 +14,12 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/fcntl.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
 #include <linux/firmware.h>
 #include <linux/usb.h>
 #include <linux/vmalloc.h>
-#include <net/cfg80211.h>
 
-#include <defs.h>
 #include <brcmu_utils.h>
 #include <brcmu_wifi.h>
 #include <dhd_bus.h>
@@ -42,14 +30,11 @@
 
 #define IOCTL_RESP_TIMEOUT  2000
 
-#define BRCMF_USB_SYNC_TIMEOUT		300	/* ms */
-#define BRCMF_USB_DLIMAGE_SPINWAIT	100	/* in unit of ms */
-#define BRCMF_USB_DLIMAGE_LIMIT		500	/* spinwait limit (ms) */
+#define BRCMF_USB_RESET_GETVER_SPINWAIT	100	/* in unit of ms */
+#define BRCMF_USB_RESET_GETVER_LOOP_CNT	10
 
 #define BRCMF_POSTBOOT_ID		0xA123  /* ID to detect if dongle
 						   has boot up */
-#define BRCMF_USB_RESETCFG_SPINWAIT	1	/* wait after resetcfg (ms) */
-
 #define BRCMF_USB_NRXQ	50
 #define BRCMF_USB_NTXQ	50
 
@@ -70,16 +55,6 @@
 #define BRCMF_USB_43236_FW_NAME	"brcm/brcmfmac43236b.bin"
 #define BRCMF_USB_43242_FW_NAME	"brcm/brcmfmac43242a.bin"
 
-enum usbdev_suspend_state {
-	USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow
-						  suspend */
-	USBOS_SUSPEND_STATE_SUSPEND_PENDING,	/* Device is idle, can be
-						 * suspended. Wating PM to
-						 * suspend the device
-						 */
-	USBOS_SUSPEND_STATE_SUSPENDED	/* Device suspended */
-};
-
 struct brcmf_usb_image {
 	struct list_head list;
 	s8 *fwname;
@@ -100,10 +75,8 @@
 	struct list_head rx_postq;
 	struct list_head tx_freeq;
 	struct list_head tx_postq;
-	enum usbdev_suspend_state suspend_state;
 	uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
 
-	bool activity;
 	int rx_low_watermark;
 	int tx_low_watermark;
 	int tx_high_watermark;
@@ -116,10 +89,6 @@
 	u8 *image;	/* buffer for combine fw and nvram */
 	int image_len;
 
-	wait_queue_head_t wait;
-	bool waitdone;
-	int sync_urb_status;
-
 	struct usb_device *usbdev;
 	struct device *dev;
 
@@ -131,7 +100,6 @@
 	int ctl_urb_status;
 	int ctl_completed;
 	wait_queue_head_t ioctl_resp_wait;
-	wait_queue_head_t ctrl_wait;
 	ulong ctl_op;
 
 	struct urb *bulk_urb; /* used for FW download */
@@ -176,6 +144,7 @@
 static void
 brcmf_usb_ctl_complete(struct brcmf_usbdev_info *devinfo, int type, int status)
 {
+	brcmf_dbg(USB, "Enter, status=%d\n", status);
 
 	if (unlikely(devinfo == NULL))
 		return;
@@ -203,6 +172,7 @@
 	struct brcmf_usbdev_info *devinfo =
 		(struct brcmf_usbdev_info *)urb->context;
 
+	brcmf_dbg(USB, "Enter\n");
 	devinfo->ctl_urb_actual_length = urb->actual_length;
 	brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_READ,
 		urb->status);
@@ -214,33 +184,22 @@
 	struct brcmf_usbdev_info *devinfo =
 		(struct brcmf_usbdev_info *)urb->context;
 
+	brcmf_dbg(USB, "Enter\n");
 	brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_WRITE,
 		urb->status);
 }
 
-static int brcmf_usb_pnp(struct brcmf_usbdev_info *devinfo, uint state)
-{
-	return 0;
-}
-
 static int
 brcmf_usb_send_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
 {
 	int ret;
 	u16 size;
 
+	brcmf_dbg(USB, "Enter\n");
 	if (devinfo == NULL || buf == NULL ||
 	    len == 0 || devinfo->ctl_urb == NULL)
 		return -EINVAL;
 
-	/* If the USB/HSIC bus in sleep state, wake it up */
-	if (devinfo->suspend_state == USBOS_SUSPEND_STATE_SUSPENDED)
-		if (brcmf_usb_pnp(devinfo, BCMFMAC_USB_PNP_RESUME) != 0) {
-			brcmf_dbg(ERROR, "Could not Resume the bus!\n");
-			return -EIO;
-		}
-
-	devinfo->activity = true;
 	size = len;
 	devinfo->ctl_write.wLength = cpu_to_le16p(&size);
 	devinfo->ctl_urb->transfer_buffer_length = size;
@@ -268,6 +227,7 @@
 	int ret;
 	u16 size;
 
+	brcmf_dbg(USB, "Enter\n");
 	if ((devinfo == NULL) || (buf == NULL) || (len == 0)
 		|| (devinfo->ctl_urb == NULL))
 		return -EINVAL;
@@ -301,10 +261,9 @@
 	int timeout = 0;
 	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
 
-	if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
-		/* TODO: handle suspend/resume */
+	brcmf_dbg(USB, "Enter\n");
+	if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
 		return -EIO;
-	}
 
 	if (test_and_set_bit(0, &devinfo->ctl_op))
 		return -EIO;
@@ -331,10 +290,10 @@
 	int timeout = 0;
 	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
 
-	if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
-		/* TODO: handle suspend/resume */
+	brcmf_dbg(USB, "Enter\n");
+	if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
 		return -EIO;
-	}
+
 	if (test_and_set_bit(0, &devinfo->ctl_op))
 		return -EIO;
 
@@ -459,6 +418,8 @@
 	struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
 	struct brcmf_usbdev_info *devinfo = req->devinfo;
 
+	brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
+		  req->skb);
 	brcmf_usb_del_fromq(devinfo, req);
 	if (urb->status == 0)
 		devinfo->bus_pub.bus->dstats.tx_packets++;
@@ -484,6 +445,7 @@
 	struct sk_buff *skb;
 	int ifidx = 0;
 
+	brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
 	brcmf_usb_del_fromq(devinfo, req);
 	skb = req->skb;
 	req->skb = NULL;
@@ -497,7 +459,7 @@
 		return;
 	}
 
-	if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP) {
+	if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
 		skb_put(skb, urb->actual_length);
 		if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) {
 			brcmf_dbg(ERROR, "rx protocol error\n");
@@ -550,8 +512,8 @@
 {
 	struct brcmf_usbreq *req;
 
-	if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
-		brcmf_dbg(ERROR, "bus is not up\n");
+	if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
+		brcmf_dbg(ERROR, "bus is not up=%d\n", devinfo->bus_pub.state);
 		return;
 	}
 	while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL)
@@ -564,29 +526,24 @@
 	struct brcmf_bus *bcmf_bus = devinfo->bus_pub.bus;
 	int old_state;
 
+	brcmf_dbg(USB, "Enter, current state=%d, new state=%d\n",
+		  devinfo->bus_pub.state, state);
 
 	if (devinfo->bus_pub.state == state)
 		return;
 
 	old_state = devinfo->bus_pub.state;
-	brcmf_dbg(TRACE, "dbus state change from %d to to %d\n",
-		  old_state, state);
-
-	/* Don't update state if it's PnP firmware re-download */
-	if (state != BCMFMAC_USB_STATE_PNP_FWDL) /* TODO */
-		devinfo->bus_pub.state = state;
-
-	if ((old_state  == BCMFMAC_USB_STATE_SLEEP)
-		&& (state == BCMFMAC_USB_STATE_UP)) {
-		brcmf_usb_rx_fill_all(devinfo);
-	}
+	devinfo->bus_pub.state = state;
 
 	/* update state of upper layer */
-	if (state == BCMFMAC_USB_STATE_DOWN) {
-		brcmf_dbg(INFO, "DBUS is down\n");
+	if (state == BRCMFMAC_USB_STATE_DOWN) {
+		brcmf_dbg(USB, "DBUS is down\n");
 		bcmf_bus->state = BRCMF_BUS_DOWN;
+	} else if (state == BRCMFMAC_USB_STATE_UP) {
+		brcmf_dbg(USB, "DBUS is up\n");
+		bcmf_bus->state = BRCMF_BUS_DATA;
 	} else {
-		brcmf_dbg(INFO, "DBUS current state=%d\n", state);
+		brcmf_dbg(USB, "DBUS current state=%d\n", state);
 	}
 }
 
@@ -595,30 +552,32 @@
 {
 	struct brcmf_usbdev_info *devinfo =
 			(struct brcmf_usbdev_info *)urb->context;
-	bool killed;
+	int err;
+
+	brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
 
 	if (devinfo == NULL)
 		return;
 
 	if (unlikely(urb->status)) {
-		if (devinfo->suspend_state ==
-			USBOS_SUSPEND_STATE_SUSPEND_PENDING)
-			killed = true;
-
-		if ((urb->status == -ENOENT && (!killed))
-			|| urb->status == -ESHUTDOWN ||
-			urb->status == -ENODEV) {
-			brcmf_usb_state_change(devinfo, BCMFMAC_USB_STATE_DOWN);
+		if (urb->status == -ENOENT ||
+		    urb->status == -ESHUTDOWN ||
+		    urb->status == -ENODEV) {
+			brcmf_usb_state_change(devinfo,
+					       BRCMFMAC_USB_STATE_DOWN);
 		}
 	}
 
-	if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_DOWN) {
+	if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN) {
 		brcmf_dbg(ERROR, "intr cb when DBUS down, ignoring\n");
 		return;
 	}
 
-	if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP)
-		usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
+	if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
+		err = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
+		if (err)
+			brcmf_dbg(ERROR, "usb_submit_urb, err=%d\n", err);
+	}
 }
 
 static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
@@ -627,10 +586,9 @@
 	struct brcmf_usbreq  *req;
 	int ret;
 
-	if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
-		/* TODO: handle suspend/resume */
+	brcmf_dbg(USB, "Enter, skb=%p\n", skb);
+	if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
 		return -EIO;
-	}
 
 	req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
 					&devinfo->tx_freecount);
@@ -670,25 +628,16 @@
 {
 	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
 	u16 ifnum;
+	int ret;
 
-	if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP)
+	brcmf_dbg(USB, "Enter\n");
+	if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
 		return 0;
 
-	/* If the USB/HSIC bus in sleep state, wake it up */
-	if (devinfo->suspend_state == USBOS_SUSPEND_STATE_SUSPENDED) {
-		if (brcmf_usb_pnp(devinfo, BCMFMAC_USB_PNP_RESUME) != 0) {
-			brcmf_dbg(ERROR, "Could not Resume the bus!\n");
-			return -EIO;
-		}
-	}
-	devinfo->activity = true;
-
 	/* Success, indicate devinfo is fully up */
-	brcmf_usb_state_change(devinfo, BCMFMAC_USB_STATE_UP);
+	brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP);
 
 	if (devinfo->intr_urb) {
-		int ret;
-
 		usb_fill_int_urb(devinfo->intr_urb, devinfo->usbdev,
 			devinfo->intr_pipe,
 			&devinfo->intr,
@@ -733,14 +682,14 @@
 {
 	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
 
+	brcmf_dbg(USB, "Enter\n");
 	if (devinfo == NULL)
 		return;
 
-	brcmf_dbg(TRACE, "enter\n");
-	if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_DOWN)
+	if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN)
 		return;
 
-	brcmf_usb_state_change(devinfo, BCMFMAC_USB_STATE_DOWN);
+	brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
 	if (devinfo->intr_urb)
 		usb_kill_urb(devinfo->intr_urb);
 
@@ -754,34 +703,14 @@
 	brcmf_usb_free_q(&devinfo->rx_postq, true);
 }
 
-static int
-brcmf_usb_sync_wait(struct brcmf_usbdev_info *devinfo, u16 time)
-{
-	int ret;
-	int err = 0;
-	int ms = time;
-
-	ret = wait_event_interruptible_timeout(devinfo->wait,
-		devinfo->waitdone == true, (ms * HZ / 1000));
-
-	if ((devinfo->waitdone == false) || (devinfo->sync_urb_status)) {
-		brcmf_dbg(ERROR, "timeout(%d) or urb err=%d\n",
-			  ret, devinfo->sync_urb_status);
-		err = -EINVAL;
-	}
-	devinfo->waitdone = false;
-	return err;
-}
-
 static void
 brcmf_usb_sync_complete(struct urb *urb)
 {
 	struct brcmf_usbdev_info *devinfo =
 			(struct brcmf_usbdev_info *)urb->context;
 
-	devinfo->waitdone = true;
-	wake_up_interruptible(&devinfo->wait);
-	devinfo->sync_urb_status = urb->status;
+	devinfo->ctl_completed = true;
+	brcmf_usb_ioctl_resp_wake(devinfo);
 }
 
 static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
@@ -813,6 +742,7 @@
 		(void *) tmpbuf, size,
 		(usb_complete_t)brcmf_usb_sync_complete, devinfo);
 
+	devinfo->ctl_completed = false;
 	ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
 	if (ret < 0) {
 		brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret);
@@ -820,11 +750,11 @@
 		return false;
 	}
 
-	ret = brcmf_usb_sync_wait(devinfo, BRCMF_USB_SYNC_TIMEOUT);
+	ret = brcmf_usb_ioctl_resp_wait(devinfo);
 	memcpy(buffer, tmpbuf, buflen);
 	kfree(tmpbuf);
 
-	return (ret == 0);
+	return ret;
 }
 
 static bool
@@ -833,27 +763,25 @@
 	struct bootrom_id_le id;
 	u32 chipid, chiprev;
 
-	brcmf_dbg(TRACE, "enter\n");
+	brcmf_dbg(USB, "Enter\n");
 
 	if (devinfo == NULL)
 		return false;
 
 	/* Check if firmware downloaded already by querying runtime ID */
 	id.chip = cpu_to_le32(0xDEAD);
-	brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id,
-		sizeof(struct bootrom_id_le));
+	brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
 
 	chipid = le32_to_cpu(id.chip);
 	chiprev = le32_to_cpu(id.chiprev);
 
 	if ((chipid & 0x4300) == 0x4300)
-		brcmf_dbg(INFO, "chip %x rev 0x%x\n", chipid, chiprev);
+		brcmf_dbg(USB, "chip %x rev 0x%x\n", chipid, chiprev);
 	else
-		brcmf_dbg(INFO, "chip %d rev 0x%x\n", chipid, chiprev);
+		brcmf_dbg(USB, "chip %d rev 0x%x\n", chipid, chiprev);
 	if (chipid == BRCMF_POSTBOOT_ID) {
-		brcmf_dbg(INFO, "firmware already downloaded\n");
-		brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id,
-			sizeof(struct bootrom_id_le));
+		brcmf_dbg(USB, "firmware already downloaded\n");
+		brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id));
 		return false;
 	} else {
 		devinfo->bus_pub.devid = chipid;
@@ -866,38 +794,29 @@
 brcmf_usb_resetcfg(struct brcmf_usbdev_info *devinfo)
 {
 	struct bootrom_id_le id;
-	u16 wait = 0, wait_time;
+	u32 loop_cnt;
 
-	brcmf_dbg(TRACE, "enter\n");
+	brcmf_dbg(USB, "Enter\n");
 
-	if (devinfo == NULL)
-		return -EINVAL;
-
-	/* Give dongle chance to boot */
-	wait_time = BRCMF_USB_DLIMAGE_SPINWAIT;
-	while (wait < BRCMF_USB_DLIMAGE_LIMIT) {
-		mdelay(wait_time);
-		wait += wait_time;
+	loop_cnt = 0;
+	do {
+		mdelay(BRCMF_USB_RESET_GETVER_SPINWAIT);
+		loop_cnt++;
 		id.chip = cpu_to_le32(0xDEAD);       /* Get the ID */
-		brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id,
-			sizeof(struct bootrom_id_le));
+		brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
 		if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID))
 			break;
-	}
+	} while (loop_cnt < BRCMF_USB_RESET_GETVER_LOOP_CNT);
 
 	if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID)) {
-		brcmf_dbg(INFO, "download done %d ms postboot chip 0x%x/rev 0x%x\n",
-			  wait, le32_to_cpu(id.chip), le32_to_cpu(id.chiprev));
+		brcmf_dbg(USB, "postboot chip 0x%x/rev 0x%x\n",
+			  le32_to_cpu(id.chip), le32_to_cpu(id.chiprev));
 
-		brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id,
-			sizeof(struct bootrom_id_le));
-
-		/* XXX this wait may not be necessary */
-		mdelay(BRCMF_USB_RESETCFG_SPINWAIT);
+		brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id));
 		return 0;
 	} else {
 		brcmf_dbg(ERROR, "Cannot talk to Dongle. Firmware is not UP, %d ms\n",
-			  wait);
+			  BRCMF_USB_RESET_GETVER_SPINWAIT * loop_cnt);
 		return -EINVAL;
 	}
 }
@@ -918,13 +837,14 @@
 
 	devinfo->bulk_urb->transfer_flags |= URB_ZERO_PACKET;
 
+	devinfo->ctl_completed = false;
 	ret = usb_submit_urb(devinfo->bulk_urb, GFP_ATOMIC);
 	if (ret) {
 		brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret);
 		return ret;
 	}
-	ret = brcmf_usb_sync_wait(devinfo, BRCMF_USB_SYNC_TIMEOUT);
-	return ret;
+	ret = brcmf_usb_ioctl_resp_wait(devinfo);
+	return (ret == 0);
 }
 
 static int
@@ -935,7 +855,8 @@
 	struct rdl_state_le state;
 	u32 rdlstate, rdlbytes;
 	int err = 0;
-	brcmf_dbg(TRACE, "fw %p, len %d\n", fw, fwlen);
+
+	brcmf_dbg(USB, "Enter, fw %p, len %d\n", fw, fwlen);
 
 	bulkchunk = kmalloc(RDL_CHUNK, GFP_ATOMIC);
 	if (bulkchunk == NULL) {
@@ -1010,7 +931,7 @@
 
 fail:
 	kfree(bulkchunk);
-	brcmf_dbg(TRACE, "err=%d\n", err);
+	brcmf_dbg(USB, "Exit, err=%d\n", err);
 	return err;
 }
 
@@ -1018,7 +939,7 @@
 {
 	int err;
 
-	brcmf_dbg(TRACE, "enter\n");
+	brcmf_dbg(USB, "Enter\n");
 
 	if (devinfo == NULL)
 		return -EINVAL;
@@ -1028,10 +949,10 @@
 
 	err = brcmf_usb_dl_writeimage(devinfo, fw, len);
 	if (err == 0)
-		devinfo->bus_pub.state = BCMFMAC_USB_STATE_DL_DONE;
+		devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_DONE;
 	else
-		devinfo->bus_pub.state = BCMFMAC_USB_STATE_DL_PENDING;
-	brcmf_dbg(TRACE, "exit: err=%d\n", err);
+		devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_FAIL;
+	brcmf_dbg(USB, "Exit, err=%d\n", err);
 
 	return err;
 }
@@ -1040,7 +961,7 @@
 {
 	struct rdl_state_le state;
 
-	brcmf_dbg(TRACE, "enter\n");
+	brcmf_dbg(USB, "Enter\n");
 	if (!devinfo)
 		return -EINVAL;
 
@@ -1063,7 +984,7 @@
 		brcmf_dbg(ERROR, "Dongle not runnable\n");
 		return -EINVAL;
 	}
-	brcmf_dbg(TRACE, "exit\n");
+	brcmf_dbg(USB, "Exit\n");
 	return 0;
 }
 
@@ -1090,7 +1011,7 @@
 	int devid, chiprev;
 	int err;
 
-	brcmf_dbg(TRACE, "enter\n");
+	brcmf_dbg(USB, "Enter\n");
 	if (devinfo == NULL)
 		return -ENODEV;
 
@@ -1118,7 +1039,7 @@
 
 static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
 {
-	brcmf_dbg(TRACE, "devinfo %p\n", devinfo);
+	brcmf_dbg(USB, "Enter, devinfo %p\n", devinfo);
 
 	/* free the URBS */
 	brcmf_usb_free_q(&devinfo->rx_freeq, false);
@@ -1153,6 +1074,7 @@
 	struct trx_header_le *trx;
 	int actual_len = -1;
 
+	brcmf_dbg(USB, "Enter\n");
 	/* Extract trx header */
 	trx = (struct trx_header_le *) headers;
 	if (trx->magic != cpu_to_le32(TRX_MAGIC))
@@ -1174,6 +1096,7 @@
 	struct brcmf_usb_image *fw_image;
 	int err;
 
+	brcmf_dbg(USB, "Enter\n");
 	switch (devinfo->bus_pub.devid) {
 	case 43143:
 		fwname = BRCMF_USB_43143_FW_NAME;
@@ -1190,7 +1113,7 @@
 		return -EINVAL;
 		break;
 	}
-
+	brcmf_dbg(USB, "Loading FW %s\n", fwname);
 	list_for_each_entry(fw_image, &fw_image_list, list) {
 		if (fw_image->fwname == fwname) {
 			devinfo->image = fw_image->image;
@@ -1235,10 +1158,13 @@
 struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
 				      int nrxq, int ntxq)
 {
+	brcmf_dbg(USB, "Enter\n");
+
 	devinfo->bus_pub.nrxq = nrxq;
 	devinfo->rx_low_watermark = nrxq / 2;
 	devinfo->bus_pub.devinfo = devinfo;
 	devinfo->bus_pub.ntxq = ntxq;
+	devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DOWN;
 
 	/* flow control when too many tx urbs posted */
 	devinfo->tx_low_watermark = ntxq / 4;
@@ -1284,11 +1210,10 @@
 		goto error;
 	}
 
-	init_waitqueue_head(&devinfo->wait);
 	if (!brcmf_usb_dlneeded(devinfo))
 		return &devinfo->bus_pub;
 
-	brcmf_dbg(TRACE, "start fw downloading\n");
+	brcmf_dbg(USB, "Start fw downloading\n");
 	if (brcmf_usb_get_fw(devinfo))
 		goto error;
 
@@ -1303,14 +1228,14 @@
 	return NULL;
 }
 
-static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
-			      const char *desc,	u32 bustype, u32 hdrlen)
+static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
 {
 	struct brcmf_bus *bus = NULL;
 	struct brcmf_usbdev *bus_pub = NULL;
 	int ret;
 	struct device *dev = devinfo->dev;
 
+	brcmf_dbg(USB, "Enter\n");
 	bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
 	if (!bus_pub)
 		return -ENODEV;
@@ -1327,14 +1252,13 @@
 	bus->brcmf_bus_stop = brcmf_usb_down;
 	bus->brcmf_bus_txctl = brcmf_usb_tx_ctlpkt;
 	bus->brcmf_bus_rxctl = brcmf_usb_rx_ctlpkt;
-	bus->type = bustype;
 	bus->bus_priv.usb = bus_pub;
 	dev_set_drvdata(dev, bus);
 
 	/* Attach to the common driver interface */
-	ret = brcmf_attach(hdrlen, dev);
+	ret = brcmf_attach(0, dev);
 	if (ret) {
-		brcmf_dbg(ERROR, "dhd_attach failed\n");
+		brcmf_dbg(ERROR, "brcmf_attach failed\n");
 		goto fail;
 	}
 
@@ -1358,7 +1282,7 @@
 {
 	if (!devinfo)
 		return;
-	brcmf_dbg(TRACE, "enter: bus_pub %p\n", devinfo);
+	brcmf_dbg(USB, "Enter, bus_pub %p\n", devinfo);
 
 	brcmf_detach(devinfo->dev);
 	kfree(devinfo->bus_pub.bus);
@@ -1376,7 +1300,7 @@
 	u8 endpoint_num;
 	struct brcmf_usbdev_info *devinfo;
 
-	brcmf_dbg(TRACE, "enter\n");
+	brcmf_dbg(USB, "Enter\n");
 
 	devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
 	if (devinfo == NULL)
@@ -1477,11 +1401,11 @@
 	devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
 
 	if (usb->speed == USB_SPEED_HIGH)
-		brcmf_dbg(INFO, "Broadcom high speed USB wireless device detected\n");
+		brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n");
 	else
-		brcmf_dbg(INFO, "Broadcom full speed USB wireless device detected\n");
+		brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n");
 
-	ret = brcmf_usb_probe_cb(devinfo, "", USB_BUS, 0);
+	ret = brcmf_usb_probe_cb(devinfo);
 	if (ret)
 		goto fail;
 
@@ -1501,40 +1425,55 @@
 {
 	struct brcmf_usbdev_info *devinfo;
 
-	brcmf_dbg(TRACE, "enter\n");
+	brcmf_dbg(USB, "Enter\n");
 	devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
 	brcmf_usb_disconnect_cb(devinfo);
 	kfree(devinfo);
+	brcmf_dbg(USB, "Exit\n");
 }
 
 /*
- *	only need to signal the bus being down and update the suspend state.
+ * only need to signal the bus being down and update the state.
  */
 static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state)
 {
 	struct usb_device *usb = interface_to_usbdev(intf);
 	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
 
-	brcmf_dbg(TRACE, "enter\n");
-	devinfo->bus_pub.state = BCMFMAC_USB_STATE_DOWN;
-	devinfo->suspend_state = USBOS_SUSPEND_STATE_SUSPENDED;
+	brcmf_dbg(USB, "Enter\n");
+	devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP;
+	brcmf_detach(&usb->dev);
 	return 0;
 }
 
 /*
- *	mark suspend state active and crank up the bus.
+ * (re-) start the bus.
  */
 static int brcmf_usb_resume(struct usb_interface *intf)
 {
 	struct usb_device *usb = interface_to_usbdev(intf);
 	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
 
-	brcmf_dbg(TRACE, "enter\n");
-	devinfo->suspend_state = USBOS_SUSPEND_STATE_DEVICE_ACTIVE;
-	brcmf_bus_start(&usb->dev);
+	brcmf_dbg(USB, "Enter\n");
+	if (!brcmf_attach(0, devinfo->dev))
+		return brcmf_bus_start(&usb->dev);
+
 	return 0;
 }
 
+static int brcmf_usb_reset_resume(struct usb_interface *intf)
+{
+	struct usb_device *usb = interface_to_usbdev(intf);
+	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+
+	brcmf_dbg(USB, "Enter\n");
+
+	if (!brcmf_usb_fw_download(devinfo))
+		return brcmf_usb_resume(intf);
+
+	return -EIO;
+}
+
 #define BRCMF_USB_VENDOR_ID_BROADCOM	0x0a5c
 #define BRCMF_USB_DEVICE_ID_43143	0xbd1e
 #define BRCMF_USB_DEVICE_ID_43236	0xbd17
@@ -1554,7 +1493,6 @@
 MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
 MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME);
 
-/* TODO: suspend and resume entries */
 static struct usb_driver brcmf_usbdrvr = {
 	.name = KBUILD_MODNAME,
 	.probe = brcmf_usb_probe,
@@ -1562,6 +1500,7 @@
 	.id_table = brcmf_usb_devid_table,
 	.suspend = brcmf_usb_suspend,
 	.resume = brcmf_usb_resume,
+	.reset_resume = brcmf_usb_reset_resume,
 	.supports_autosuspend = 1,
 	.disable_hub_initiated_lpm = 1,
 };
@@ -1579,12 +1518,14 @@
 
 void brcmf_usb_exit(void)
 {
+	brcmf_dbg(USB, "Enter\n");
 	usb_deregister(&brcmf_usbdrvr);
 	brcmf_release_fw(&fw_image_list);
 }
 
 void brcmf_usb_init(void)
 {
+	brcmf_dbg(USB, "Enter\n");
 	INIT_LIST_HEAD(&fw_image_list);
 	usb_register(&brcmf_usbdrvr);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.h b/drivers/net/wireless/brcm80211/brcmfmac/usb.h
index acfa5e8..f483a8c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.h
@@ -17,19 +17,11 @@
 #define BRCMFMAC_USB_H
 
 enum brcmf_usb_state {
-	BCMFMAC_USB_STATE_DL_PENDING,
-	BCMFMAC_USB_STATE_DL_DONE,
-	BCMFMAC_USB_STATE_UP,
-	BCMFMAC_USB_STATE_DOWN,
-	BCMFMAC_USB_STATE_PNP_FWDL,
-	BCMFMAC_USB_STATE_DISCONNECT,
-	BCMFMAC_USB_STATE_SLEEP
-};
-
-enum brcmf_usb_pnp_state {
-	BCMFMAC_USB_PNP_DISCONNECT,
-	BCMFMAC_USB_PNP_SLEEP,
-	BCMFMAC_USB_PNP_RESUME,
+	BRCMFMAC_USB_STATE_DOWN,
+	BRCMFMAC_USB_STATE_DL_FAIL,
+	BRCMFMAC_USB_STATE_DL_DONE,
+	BRCMFMAC_USB_STATE_UP,
+	BRCMFMAC_USB_STATE_SLEEP
 };
 
 struct brcmf_stats {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 481345c..2044fdb5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -19,14 +19,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
-#include <linux/if_arp.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#include <linux/netdevice.h>
-#include <linux/bitops.h>
 #include <linux/etherdevice.h>
-#include <linux/ieee80211.h>
-#include <linux/uaccess.h>
 #include <net/cfg80211.h>
 #include <net/netlink.h>
 
@@ -35,6 +28,7 @@
 #include <brcmu_wifi.h>
 #include "dhd.h"
 #include "wl_cfg80211.h"
+#include "fwil.h"
 
 #define BRCMF_SCAN_IE_LEN_MAX		2048
 #define BRCMF_PNO_VERSION		2
@@ -48,6 +42,8 @@
 #define BRCMF_PNO_SCAN_COMPLETE		1
 #define BRCMF_PNO_SCAN_INCOMPLETE	0
 
+#define BRCMF_IFACE_MAX_CNT		2
+
 #define TLV_LEN_OFF			1	/* length offset */
 #define TLV_HDR_LEN			2	/* header length */
 #define TLV_BODY_OFF			2	/* body offset */
@@ -91,16 +87,13 @@
 #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
 	(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
 
-static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
-
 static u32 brcmf_dbg_level = WL_DBG_ERR;
 
-static bool check_sys_up(struct wiphy *wiphy)
+static bool check_vif_up(struct brcmf_cfg80211_vif *vif)
 {
-	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-	if (!test_bit(WL_STATUS_READY, &cfg->status)) {
-		WL_INFO("device is not ready : status (%d)\n",
-			(int)cfg->status);
+	if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) {
+		WL_INFO("device is not ready : status (%lu)\n",
+			vif->sme_state);
 		return false;
 	}
 	return true;
@@ -391,55 +384,29 @@
 	return qdbm;
 }
 
-/* function for reading/writing a single u32 from/to the dongle */
-static int
-brcmf_exec_dcmd_u32(struct net_device *ndev, u32 cmd, u32 *par)
+static u16 channel_to_chanspec(struct ieee80211_channel *ch)
 {
-	int err;
-	__le32 par_le = cpu_to_le32(*par);
+	u16 chanspec;
 
-	err = brcmf_exec_dcmd(ndev, cmd, &par_le, sizeof(__le32));
-	*par = le32_to_cpu(par_le);
+	chanspec = ieee80211_frequency_to_channel(ch->center_freq);
+	chanspec &= WL_CHANSPEC_CHAN_MASK;
 
-	return err;
-}
+	if (ch->band == IEEE80211_BAND_2GHZ)
+		chanspec |= WL_CHANSPEC_BAND_2G;
+	else
+		chanspec |= WL_CHANSPEC_BAND_5G;
 
-static s32
-brcmf_dev_iovar_setbuf_bsscfg(struct net_device *ndev, s8 *name,
-			      void *param, s32 paramlen,
-			      void *buf, s32 buflen, s32 bssidx)
-{
-	s32 err = -ENOMEM;
-	u32 len;
-
-	len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
-				     buf, buflen, bssidx);
-	BUG_ON(!len);
-	if (len > 0)
-		err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
-	if (err)
-		WL_ERR("error (%d)\n", err);
-
-	return err;
-}
-
-static s32
-brcmf_dev_iovar_getbuf_bsscfg(struct net_device *ndev, s8 *name,
-			      void *param, s32 paramlen,
-			      void *buf, s32 buflen, s32 bssidx)
-{
-	s32 err = -ENOMEM;
-	u32 len;
-
-	len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
-				     buf, buflen, bssidx);
-	BUG_ON(!len);
-	if (len > 0)
-		err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, buf, len);
-	if (err)
-		WL_ERR("error (%d)\n", err);
-
-	return err;
+	if (ch->flags & IEEE80211_CHAN_NO_HT40) {
+		chanspec |= WL_CHANSPEC_BW_20;
+		chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+	} else {
+		chanspec |= WL_CHANSPEC_BW_40;
+		if (ch->flags & IEEE80211_CHAN_NO_HT40PLUS)
+			chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
+		else
+			chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
+	}
+	return chanspec;
 }
 
 static void convert_key_from_CPU(struct brcmf_wsec_key *key,
@@ -457,18 +424,17 @@
 }
 
 static int
-send_key_to_dongle(struct brcmf_cfg80211_info *cfg, s32 bssidx,
-		   struct net_device *ndev, struct brcmf_wsec_key *key)
+send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key)
 {
 	int err;
 	struct brcmf_wsec_key_le key_le;
 
 	convert_key_from_CPU(key, &key_le);
 
-	err  = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
-					     sizeof(key_le),
-					     cfg->extra_buf,
-					     WL_EXTRA_BUF_MAX, bssidx);
+	brcmf_netdev_wait_pend8021x(ndev);
+
+	err = brcmf_fil_bsscfg_data_set(netdev_priv(ndev), "wsec_key", &key_le,
+					sizeof(key_le));
 
 	if (err)
 		WL_ERR("wsec_key error (%d)\n", err);
@@ -480,6 +446,7 @@
 			 enum nl80211_iftype type, u32 *flags,
 			 struct vif_params *params)
 {
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	s32 infra = 0;
 	s32 ap = 0;
@@ -511,17 +478,11 @@
 	}
 
 	if (ap) {
-		set_bit(WL_STATUS_AP_CREATING, &cfg->status);
-		if (!cfg->ap_info)
-			cfg->ap_info = kzalloc(sizeof(*cfg->ap_info),
-					       GFP_KERNEL);
-		if (!cfg->ap_info) {
-			err = -ENOMEM;
-			goto done;
-		}
+		set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
 		WL_INFO("IF Type = AP\n");
 	} else {
-		err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
+		err = brcmf_fil_cmd_int_set(netdev_priv(ndev),
+					    BRCMF_C_SET_INFRA, infra);
 		if (err) {
 			WL_ERR("WLC_SET_INFRA error (%d)\n", err);
 			err = -EAGAIN;
@@ -539,99 +500,13 @@
 	return err;
 }
 
-static s32 brcmf_dev_intvar_set(struct net_device *ndev, s8 *name, s32 val)
-{
-	s8 buf[BRCMF_DCMD_SMLEN];
-	u32 len;
-	s32 err = 0;
-	__le32 val_le;
-
-	val_le = cpu_to_le32(val);
-	len = brcmf_c_mkiovar(name, (char *)(&val_le), sizeof(val_le), buf,
-			    sizeof(buf));
-	BUG_ON(!len);
-
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
-	if (err)
-		WL_ERR("error (%d)\n", err);
-
-	return err;
-}
-
-static s32
-brcmf_dev_intvar_get(struct net_device *ndev, s8 *name, s32 *retval)
-{
-	union {
-		s8 buf[BRCMF_DCMD_SMLEN];
-		__le32 val;
-	} var;
-	u32 len;
-	u32 data_null;
-	s32 err = 0;
-
-	len =
-	    brcmf_c_mkiovar(name, (char *)(&data_null), 0, (char *)(&var),
-			sizeof(var.buf));
-	BUG_ON(!len);
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, &var, len);
-	if (err)
-		WL_ERR("error (%d)\n", err);
-
-	*retval = le32_to_cpu(var.val);
-
-	return err;
-}
-
-static s32
-brcmf_dev_intvar_set_bsscfg(struct net_device *ndev, s8 *name, u32 val,
-			    s32 bssidx)
-{
-	s8 buf[BRCMF_DCMD_SMLEN];
-	__le32 val_le;
-
-	val_le = cpu_to_le32(val);
-
-	return brcmf_dev_iovar_setbuf_bsscfg(ndev, name, &val_le,
-					     sizeof(val_le), buf, sizeof(buf),
-					     bssidx);
-}
-
-static s32
-brcmf_dev_intvar_get_bsscfg(struct net_device *ndev, s8 *name, s32 *val,
-			    s32 bssidx)
-{
-	s8 buf[BRCMF_DCMD_SMLEN];
-	s32 err;
-	__le32 val_le;
-
-	memset(buf, 0, sizeof(buf));
-	err = brcmf_dev_iovar_getbuf_bsscfg(ndev, name, val, sizeof(*val), buf,
-					    sizeof(buf), bssidx);
-	if (err == 0) {
-		memcpy(&val_le, buf, sizeof(val_le));
-		*val = le32_to_cpu(val_le);
-	}
-	return err;
-}
-
-
-/*
- * For now brcmf_find_bssidx will return 0. Once p2p gets implemented this
- * should return the ndev matching bssidx.
- */
-static s32
-brcmf_find_bssidx(struct brcmf_cfg80211_info *cfg, struct net_device *ndev)
-{
-	return 0;
-}
-
 static void brcmf_set_mpc(struct net_device *ndev, int mpc)
 {
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	s32 err = 0;
-	struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
 
-	if (test_bit(WL_STATUS_READY, &cfg->status)) {
-		err = brcmf_dev_intvar_set(ndev, "mpc", mpc);
+	if (check_vif_up(ifp->vif)) {
+		err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
 		if (err) {
 			WL_ERR("fail to set mpc\n");
 			return;
@@ -640,209 +515,6 @@
 	}
 }
 
-static void brcmf_iscan_prep(struct brcmf_scan_params_le *params_le,
-			     struct brcmf_ssid *ssid)
-{
-	memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
-	params_le->bss_type = DOT11_BSSTYPE_ANY;
-	params_le->scan_type = 0;
-	params_le->channel_num = 0;
-	params_le->nprobes = cpu_to_le32(-1);
-	params_le->active_time = cpu_to_le32(-1);
-	params_le->passive_time = cpu_to_le32(-1);
-	params_le->home_time = cpu_to_le32(-1);
-	if (ssid && ssid->SSID_len) {
-		params_le->ssid_le.SSID_len = cpu_to_le32(ssid->SSID_len);
-		memcpy(&params_le->ssid_le.SSID, ssid->SSID, ssid->SSID_len);
-	}
-}
-
-static s32
-brcmf_dev_iovar_setbuf(struct net_device *ndev, s8 * iovar, void *param,
-		    s32 paramlen, void *bufptr, s32 buflen)
-{
-	s32 iolen;
-
-	iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen);
-	BUG_ON(!iolen);
-
-	return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, bufptr, iolen);
-}
-
-static s32
-brcmf_dev_iovar_getbuf(struct net_device *ndev, s8 * iovar, void *param,
-		    s32 paramlen, void *bufptr, s32 buflen)
-{
-	s32 iolen;
-
-	iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen);
-	BUG_ON(!iolen);
-
-	return brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, bufptr, buflen);
-}
-
-static s32
-brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
-		struct brcmf_ssid *ssid, u16 action)
-{
-	s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
-			  offsetof(struct brcmf_iscan_params_le, params_le);
-	struct brcmf_iscan_params_le *params;
-	s32 err = 0;
-
-	if (ssid && ssid->SSID_len)
-		params_size += sizeof(struct brcmf_ssid);
-	params = kzalloc(params_size, GFP_KERNEL);
-	if (!params)
-		return -ENOMEM;
-	BUG_ON(params_size >= BRCMF_DCMD_SMLEN);
-
-	brcmf_iscan_prep(&params->params_le, ssid);
-
-	params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION);
-	params->action = cpu_to_le16(action);
-	params->scan_duration = cpu_to_le16(0);
-
-	err = brcmf_dev_iovar_setbuf(iscan->ndev, "iscan", params, params_size,
-				     iscan->dcmd_buf, BRCMF_DCMD_SMLEN);
-	if (err) {
-		if (err == -EBUSY)
-			WL_INFO("system busy : iscan canceled\n");
-		else
-			WL_ERR("error (%d)\n", err);
-	}
-
-	kfree(params);
-	return err;
-}
-
-static s32 brcmf_do_iscan(struct brcmf_cfg80211_info *cfg)
-{
-	struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
-	struct net_device *ndev = cfg_to_ndev(cfg);
-	struct brcmf_ssid ssid;
-	__le32 passive_scan;
-	s32 err = 0;
-
-	/* Broadcast scan by default */
-	memset(&ssid, 0, sizeof(ssid));
-
-	iscan->state = WL_ISCAN_STATE_SCANING;
-
-	passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
-	err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_SET_PASSIVE_SCAN,
-			&passive_scan, sizeof(passive_scan));
-	if (err) {
-		WL_ERR("error (%d)\n", err);
-		return err;
-	}
-	brcmf_set_mpc(ndev, 0);
-	cfg->iscan_kickstart = true;
-	err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START);
-	if (err) {
-		brcmf_set_mpc(ndev, 1);
-		cfg->iscan_kickstart = false;
-		return err;
-	}
-	mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
-	iscan->timer_on = 1;
-	return err;
-}
-
-static s32
-brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev,
-		     struct cfg80211_scan_request *request,
-		     struct cfg80211_ssid *this_ssid)
-{
-	struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
-	struct cfg80211_ssid *ssids;
-	struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
-	__le32 passive_scan;
-	bool iscan_req;
-	bool spec_scan;
-	s32 err = 0;
-	u32 SSID_len;
-
-	if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
-		WL_ERR("Scanning already : status (%lu)\n", cfg->status);
-		return -EAGAIN;
-	}
-	if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
-		WL_ERR("Scanning being aborted : status (%lu)\n",
-		       cfg->status);
-		return -EAGAIN;
-	}
-	if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
-		WL_ERR("Connecting : status (%lu)\n",
-		       cfg->status);
-		return -EAGAIN;
-	}
-
-	iscan_req = false;
-	spec_scan = false;
-	if (request) {
-		/* scan bss */
-		ssids = request->ssids;
-		if (cfg->iscan_on && (!ssids || !ssids->ssid_len))
-			iscan_req = true;
-	} else {
-		/* scan in ibss */
-		/* we don't do iscan in ibss */
-		ssids = this_ssid;
-	}
-
-	cfg->scan_request = request;
-	set_bit(WL_STATUS_SCANNING, &cfg->status);
-	if (iscan_req) {
-		err = brcmf_do_iscan(cfg);
-		if (!err)
-			return err;
-		else
-			goto scan_out;
-	} else {
-		WL_SCAN("ssid \"%s\", ssid_len (%d)\n",
-		       ssids->ssid, ssids->ssid_len);
-		memset(&sr->ssid_le, 0, sizeof(sr->ssid_le));
-		SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len);
-		sr->ssid_le.SSID_len = cpu_to_le32(0);
-		if (SSID_len) {
-			memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len);
-			sr->ssid_le.SSID_len = cpu_to_le32(SSID_len);
-			spec_scan = true;
-		} else {
-			WL_SCAN("Broadcast scan\n");
-		}
-
-		passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
-		err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
-				&passive_scan, sizeof(passive_scan));
-		if (err) {
-			WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
-			goto scan_out;
-		}
-		brcmf_set_mpc(ndev, 0);
-		err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
-				      sizeof(sr->ssid_le));
-		if (err) {
-			if (err == -EBUSY)
-				WL_INFO("system busy : scan for \"%s\" "
-					"canceled\n", sr->ssid_le.SSID);
-			else
-				WL_ERR("WLC_SCAN error (%d)\n", err);
-
-			brcmf_set_mpc(ndev, 1);
-			goto scan_out;
-		}
-	}
-
-	return 0;
-
-scan_out:
-	clear_bit(WL_STATUS_SCANNING, &cfg->status);
-	cfg->scan_request = NULL;
-	return err;
-}
-
 static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
 			     struct cfg80211_scan_request *request)
 {
@@ -851,12 +523,10 @@
 	s32 i;
 	s32 offset;
 	u16 chanspec;
-	u16 channel;
-	struct ieee80211_channel *req_channel;
 	char *ptr;
 	struct brcmf_ssid_le ssid_le;
 
-	memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
+	memset(params_le->bssid, 0xFF, ETH_ALEN);
 	params_le->bss_type = DOT11_BSSTYPE_ANY;
 	params_le->scan_type = 0;
 	params_le->channel_num = 0;
@@ -876,30 +546,9 @@
 	WL_SCAN("### List of channelspecs to scan ### %d\n", n_channels);
 	if (n_channels > 0) {
 		for (i = 0; i < n_channels; i++) {
-			chanspec = 0;
-			req_channel = request->channels[i];
-			channel = ieee80211_frequency_to_channel(
-					req_channel->center_freq);
-			if (req_channel->band == IEEE80211_BAND_2GHZ)
-				chanspec |= WL_CHANSPEC_BAND_2G;
-			else
-				chanspec |= WL_CHANSPEC_BAND_5G;
-
-			if (req_channel->flags & IEEE80211_CHAN_NO_HT40) {
-				chanspec |= WL_CHANSPEC_BW_20;
-				chanspec |= WL_CHANSPEC_CTL_SB_NONE;
-			} else {
-				chanspec |= WL_CHANSPEC_BW_40;
-				if (req_channel->flags &
-						IEEE80211_CHAN_NO_HT40PLUS)
-					chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
-				else
-					chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
-			}
-
-			chanspec |= (channel & WL_CHANSPEC_CHAN_MASK);
+			chanspec = channel_to_chanspec(request->channels[i]);
 			WL_SCAN("Chan : %d, Channel spec: %x\n",
-				channel, chanspec);
+				request->channels[i]->hw_value, chanspec);
 			params_le->channel_list[i] = cpu_to_le16(chanspec);
 		}
 	} else {
@@ -966,7 +615,7 @@
 		/* Do a scan abort to stop the driver's scan engine */
 		WL_SCAN("ABORT scan in firmware\n");
 		memset(&params_le, 0, sizeof(params_le));
-		memcpy(params_le.bssid, ether_bcast, ETH_ALEN);
+		memset(params_le.bssid, 0xFF, ETH_ALEN);
 		params_le.bss_type = DOT11_BSSTYPE_ANY;
 		params_le.scan_type = 0;
 		params_le.channel_num = cpu_to_le32(1);
@@ -977,8 +626,8 @@
 		/* Scan is aborted by setting channel_list[0] to -1 */
 		params_le.channel_list[0] = cpu_to_le16(-1);
 		/* E-Scan (or anyother type) can be aborted by SCAN */
-		err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &params_le,
-			sizeof(params_le));
+		err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
+					     &params_le, sizeof(params_le));
 		if (err)
 			WL_ERR("Scan abort  failed\n");
 	}
@@ -998,7 +647,7 @@
 		cfg80211_scan_done(scan_request, aborted);
 		brcmf_set_mpc(ndev, 1);
 	}
-	if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
+	if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
 		WL_ERR("Scan complete while device not scanning\n");
 		return -EPERM;
 	}
@@ -1036,8 +685,8 @@
 	params->action = cpu_to_le16(action);
 	params->sync_id = cpu_to_le16(0x1234);
 
-	err = brcmf_dev_iovar_setbuf(ndev, "escan", params, params_size,
-			cfg->escan_ioctl_buf, BRCMF_DCMD_MEDLEN);
+	err = brcmf_fil_iovar_data_set(netdev_priv(ndev), "escan",
+				       params, params_size);
 	if (err) {
 		if (err == -EBUSY)
 			WL_INFO("system busy : escan canceled\n");
@@ -1055,16 +704,16 @@
 	       struct net_device *ndev, struct cfg80211_scan_request *request)
 {
 	s32 err;
-	__le32 passive_scan;
+	u32 passive_scan;
 	struct brcmf_scan_results *results;
 
 	WL_SCAN("Enter\n");
 	cfg->escan_info.ndev = ndev;
 	cfg->escan_info.wiphy = wiphy;
 	cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
-	passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
-			&passive_scan, sizeof(passive_scan));
+	passive_scan = cfg->active_scan ? 0 : 1;
+	err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PASSIVE_SCAN,
+				    passive_scan);
 	if (err) {
 		WL_ERR("error (%d)\n", err);
 		return err;
@@ -1086,10 +735,11 @@
 		     struct cfg80211_scan_request *request,
 		     struct cfg80211_ssid *this_ssid)
 {
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
 	struct cfg80211_ssid *ssids;
-	struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
-	__le32 passive_scan;
+	struct brcmf_cfg80211_scan_req *sr = &cfg->scan_req_int;
+	u32 passive_scan;
 	bool escan_req;
 	bool spec_scan;
 	s32 err;
@@ -1097,18 +747,17 @@
 
 	WL_SCAN("START ESCAN\n");
 
-	if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
-		WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+	if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+		WL_ERR("Scanning already: status (%lu)\n", cfg->scan_status);
 		return -EAGAIN;
 	}
-	if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
-		WL_ERR("Scanning being aborted : status (%lu)\n",
-		       cfg->status);
+	if (test_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status)) {
+		WL_ERR("Scanning being aborted: status (%lu)\n",
+		       cfg->scan_status);
 		return -EAGAIN;
 	}
-	if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
-		WL_ERR("Connecting : status (%lu)\n",
-		       cfg->status);
+	if (test_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) {
+		WL_ERR("Connecting: status (%lu)\n", ifp->vif->sme_state);
 		return -EAGAIN;
 	}
 
@@ -1128,12 +777,10 @@
 	}
 
 	cfg->scan_request = request;
-	set_bit(WL_STATUS_SCANNING, &cfg->status);
+	set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
 	if (escan_req) {
 		err = brcmf_do_escan(cfg, wiphy, ndev, request);
-		if (!err)
-			return err;
-		else
+		if (err)
 			goto scan_out;
 	} else {
 		WL_SCAN("ssid \"%s\", ssid_len (%d)\n",
@@ -1149,16 +796,16 @@
 		} else
 			WL_SCAN("Broadcast scan\n");
 
-		passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
-		err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
-				&passive_scan, sizeof(passive_scan));
+		passive_scan = cfg->active_scan ? 0 : 1;
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
+					    passive_scan);
 		if (err) {
 			WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
 			goto scan_out;
 		}
 		brcmf_set_mpc(ndev, 0);
-		err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
-				      sizeof(sr->ssid_le));
+		err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
+					     &sr->ssid_le, sizeof(sr->ssid_le));
 		if (err) {
 			if (err == -EBUSY)
 				WL_INFO("BUSY: scan for \"%s\" canceled\n",
@@ -1174,7 +821,7 @@
 	return 0;
 
 scan_out:
-	clear_bit(WL_STATUS_SCANNING, &cfg->status);
+	clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
 	if (timer_pending(&cfg->escan_timeout))
 		del_timer_sync(&cfg->escan_timeout);
 	cfg->scan_request = NULL;
@@ -1182,22 +829,18 @@
 }
 
 static s32
-brcmf_cfg80211_scan(struct wiphy *wiphy,
-		 struct cfg80211_scan_request *request)
+brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
 {
 	struct net_device *ndev = request->wdev->netdev;
-	struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
 	s32 err = 0;
 
 	WL_TRACE("Enter\n");
 
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(container_of(request->wdev,
+				       struct brcmf_cfg80211_vif, wdev)))
 		return -EIO;
 
-	if (cfg->iscan_on)
-		err = brcmf_cfg80211_iscan(wiphy, ndev, request, NULL);
-	else if (cfg->escan_on)
-		err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL);
+	err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL);
 
 	if (err)
 		WL_ERR("scan error (%d)\n", err);
@@ -1210,7 +853,8 @@
 {
 	s32 err = 0;
 
-	err = brcmf_dev_intvar_set(ndev, "rtsthresh", rts_threshold);
+	err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "rtsthresh",
+				      rts_threshold);
 	if (err)
 		WL_ERR("Error (%d)\n", err);
 
@@ -1221,7 +865,8 @@
 {
 	s32 err = 0;
 
-	err = brcmf_dev_intvar_set(ndev, "fragthresh", frag_threshold);
+	err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "fragthresh",
+				      frag_threshold);
 	if (err)
 		WL_ERR("Error (%d)\n", err);
 
@@ -1231,9 +876,9 @@
 static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
 {
 	s32 err = 0;
-	u32 cmd = (l ? BRCM_SET_LRL : BRCM_SET_SRL);
+	u32 cmd = (l ? BRCMF_C_SET_LRL : BRCMF_C_SET_SRL);
 
-	err = brcmf_exec_dcmd_u32(ndev, cmd, &retry);
+	err = brcmf_fil_cmd_int_set(netdev_priv(ndev), cmd, retry);
 	if (err) {
 		WL_ERR("cmd (%d) , error (%d)\n", cmd, err);
 		return err;
@@ -1245,10 +890,11 @@
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	struct net_device *ndev = cfg_to_ndev(cfg);
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	s32 err = 0;
 
 	WL_TRACE("Enter\n");
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
@@ -1327,7 +973,8 @@
 	if (cfg->link_up) {
 		ndev = cfg_to_ndev(cfg);
 		WL_INFO("Call WLC_DISASSOC to stop excess roaming\n ");
-		err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0);
+		err = brcmf_fil_cmd_data_set(netdev_priv(ndev),
+					     BRCMF_C_DISASSOC, NULL, 0);
 		if (err)
 			WL_ERR("WLC_DISASSOC failed (%d)\n", err);
 		cfg->link_up = false;
@@ -1340,7 +987,8 @@
 		      struct cfg80211_ibss_params *params)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
 	struct brcmf_join_params join_params;
 	size_t join_params_size = 0;
 	s32 err = 0;
@@ -1348,7 +996,7 @@
 	s32 bcnprd;
 
 	WL_TRACE("Enter\n");
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	if (params->ssid)
@@ -1358,15 +1006,15 @@
 		return -EOPNOTSUPP;
 	}
 
-	set_bit(WL_STATUS_CONNECTING, &cfg->status);
+	set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
 
 	if (params->bssid)
 		WL_CONN("BSSID: %pM\n", params->bssid);
 	else
 		WL_CONN("No BSSID specified\n");
 
-	if (params->channel)
-		WL_CONN("channel: %d\n", params->channel->center_freq);
+	if (params->chandef.chan)
+		WL_CONN("channel: %d\n", params->chandef.chan->center_freq);
 	else
 		WL_CONN("no channel specified\n");
 
@@ -1399,7 +1047,7 @@
 	if (params->privacy)
 		wsec |= WEP_ENABLED;
 
-	err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
+	err = brcmf_fil_iovar_int_set(ifp, "wsec", wsec);
 	if (err) {
 		WL_ERR("wsec failed (%d)\n", err);
 		goto done;
@@ -1411,7 +1059,7 @@
 	else
 		bcnprd = 100;
 
-	err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_BCNPRD, &bcnprd);
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, bcnprd);
 	if (err) {
 		WL_ERR("WLC_SET_BCNPRD failed (%d)\n", err);
 		goto done;
@@ -1434,17 +1082,17 @@
 				   BRCMF_ASSOC_PARAMS_FIXED_SIZE;
 		memcpy(profile->bssid, params->bssid, ETH_ALEN);
 	} else {
-		memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
+		memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
 		memset(profile->bssid, 0, ETH_ALEN);
 	}
 
 	/* Channel */
-	if (params->channel) {
+	if (params->chandef.chan) {
 		u32 target_channel;
 
 		cfg->channel =
 			ieee80211_frequency_to_channel(
-				params->channel->center_freq);
+				params->chandef.chan->center_freq);
 		if (params->channel_fixed) {
 			/* adding chanspec */
 			brcmf_ch_to_chanspec(cfg->channel,
@@ -1453,8 +1101,8 @@
 
 		/* set channel for starter */
 		target_channel = cfg->channel;
-		err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL,
-					  &target_channel);
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_CHANNEL,
+					    target_channel);
 		if (err) {
 			WL_ERR("WLC_SET_CHANNEL failed (%d)\n", err);
 			goto done;
@@ -1465,8 +1113,8 @@
 	cfg->ibss_starter = false;
 
 
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
-			   &join_params, join_params_size);
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+				     &join_params, join_params_size);
 	if (err) {
 		WL_ERR("WLC_SET_SSID failed (%d)\n", err);
 		goto done;
@@ -1474,7 +1122,7 @@
 
 done:
 	if (err)
-		clear_bit(WL_STATUS_CONNECTING, &cfg->status);
+		clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
 	WL_TRACE("Exit\n");
 	return err;
 }
@@ -1483,10 +1131,11 @@
 brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	s32 err = 0;
 
 	WL_TRACE("Enter\n");
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	brcmf_link_down(cfg);
@@ -1499,8 +1148,7 @@
 static s32 brcmf_set_wpa_version(struct net_device *ndev,
 				 struct cfg80211_connect_params *sme)
 {
-	struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
 	struct brcmf_cfg80211_security *sec;
 	s32 val = 0;
 	s32 err = 0;
@@ -1512,7 +1160,7 @@
 	else
 		val = WPA_AUTH_DISABLED;
 	WL_CONN("setting wpa_auth to 0x%0x\n", val);
-	err = brcmf_dev_intvar_set(ndev, "wpa_auth", val);
+	err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wpa_auth", val);
 	if (err) {
 		WL_ERR("set wpa_auth failed (%d)\n", err);
 		return err;
@@ -1525,8 +1173,7 @@
 static s32 brcmf_set_auth_type(struct net_device *ndev,
 			       struct cfg80211_connect_params *sme)
 {
-	struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
 	struct brcmf_cfg80211_security *sec;
 	s32 val = 0;
 	s32 err = 0;
@@ -1552,7 +1199,7 @@
 		break;
 	}
 
-	err = brcmf_dev_intvar_set(ndev, "auth", val);
+	err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "auth", val);
 	if (err) {
 		WL_ERR("set auth failed (%d)\n", err);
 		return err;
@@ -1566,8 +1213,7 @@
 brcmf_set_set_cipher(struct net_device *ndev,
 		     struct cfg80211_connect_params *sme)
 {
-	struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
 	struct brcmf_cfg80211_security *sec;
 	s32 pval = 0;
 	s32 gval = 0;
@@ -1617,7 +1263,7 @@
 	}
 
 	WL_CONN("pval (%d) gval (%d)\n", pval, gval);
-	err = brcmf_dev_intvar_set(ndev, "wsec", pval | gval);
+	err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wsec", pval | gval);
 	if (err) {
 		WL_ERR("error (%d)\n", err);
 		return err;
@@ -1633,14 +1279,14 @@
 static s32
 brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
 {
-	struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
 	struct brcmf_cfg80211_security *sec;
 	s32 val = 0;
 	s32 err = 0;
 
 	if (sme->crypto.n_akm_suites) {
-		err = brcmf_dev_intvar_get(ndev, "wpa_auth", &val);
+		err = brcmf_fil_iovar_int_get(netdev_priv(ndev),
+					      "wpa_auth", &val);
 		if (err) {
 			WL_ERR("could not get wpa_auth (%d)\n", err);
 			return err;
@@ -1674,7 +1320,8 @@
 		}
 
 		WL_CONN("setting wpa_auth to %d\n", val);
-		err = brcmf_dev_intvar_set(ndev, "wpa_auth", val);
+		err = brcmf_fil_iovar_int_set(netdev_priv(ndev),
+					      "wpa_auth", val);
 		if (err) {
 			WL_ERR("could not set wpa_auth (%d)\n", err);
 			return err;
@@ -1690,13 +1337,11 @@
 brcmf_set_sharedkey(struct net_device *ndev,
 		    struct cfg80211_connect_params *sme)
 {
-	struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
 	struct brcmf_cfg80211_security *sec;
 	struct brcmf_wsec_key key;
 	s32 val;
 	s32 err = 0;
-	s32 bssidx;
 
 	WL_CONN("key len (%d)\n", sme->key_len);
 
@@ -1739,15 +1384,14 @@
 	WL_CONN("key length (%d) key index (%d) algo (%d)\n",
 		key.len, key.index, key.algo);
 	WL_CONN("key \"%s\"\n", key.data);
-	bssidx = brcmf_find_bssidx(cfg, ndev);
-	err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+	err = send_key_to_dongle(ndev, &key);
 	if (err)
 		return err;
 
 	if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
 		WL_CONN("set auth_type to shared key\n");
 		val = WL_AUTH_SHARED_KEY;	/* shared key */
-		err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", val, bssidx);
+		err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val);
 		if (err)
 			WL_ERR("set auth failed (%d)\n", err);
 	}
@@ -1759,7 +1403,8 @@
 		    struct cfg80211_connect_params *sme)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
 	struct ieee80211_channel *chan = sme->channel;
 	struct brcmf_join_params join_params;
 	size_t join_params_size;
@@ -1768,7 +1413,7 @@
 	s32 err = 0;
 
 	WL_TRACE("Enter\n");
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	if (!sme->ssid) {
@@ -1776,7 +1421,7 @@
 		return -EOPNOTSUPP;
 	}
 
-	set_bit(WL_STATUS_CONNECTING, &cfg->status);
+	set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
 
 	if (chan) {
 		cfg->channel =
@@ -1827,7 +1472,7 @@
 	memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
 	join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
 
-	memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
+	memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
 
 	if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN)
 		WL_CONN("ssid \"%s\", len (%d)\n",
@@ -1835,14 +1480,14 @@
 
 	brcmf_ch_to_chanspec(cfg->channel,
 			     &join_params, &join_params_size);
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
-			   &join_params, join_params_size);
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+				     &join_params, join_params_size);
 	if (err)
 		WL_ERR("WLC_SET_SSID failed (%d)\n", err);
 
 done:
 	if (err)
-		clear_bit(WL_STATUS_CONNECTING, &cfg->status);
+		clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
 	WL_TRACE("Exit\n");
 	return err;
 }
@@ -1852,20 +1497,21 @@
 		       u16 reason_code)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
 	struct brcmf_scb_val_le scbval;
 	s32 err = 0;
 
 	WL_TRACE("Enter. Reason code = %d\n", reason_code);
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
-	clear_bit(WL_STATUS_CONNECTED, &cfg->status);
+	clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
 
 	memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
 	scbval.val = cpu_to_le32(reason_code);
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval,
-			      sizeof(struct brcmf_scb_val_le));
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_DISASSOC,
+				     &scbval, sizeof(scbval));
 	if (err)
 		WL_ERR("error (%d)\n", err);
 
@@ -1876,19 +1522,20 @@
 }
 
 static s32
-brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
+brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
 			    enum nl80211_tx_power_setting type, s32 mbm)
 {
 
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	struct net_device *ndev = cfg_to_ndev(cfg);
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	u16 txpwrmw;
 	s32 err = 0;
 	s32 disable = 0;
 	s32 dbm = MBM_TO_DBM(mbm);
 
 	WL_TRACE("Enter\n");
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	switch (type) {
@@ -1905,7 +1552,7 @@
 	}
 	/* Make sure radio is off or on as far as software is concerned */
 	disable = WL_RADIO_SW_DISABLE << 16;
-	err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_RADIO, &disable);
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_RADIO, disable);
 	if (err)
 		WL_ERR("WLC_SET_RADIO error (%d)\n", err);
 
@@ -1913,8 +1560,8 @@
 		txpwrmw = 0xffff;
 	else
 		txpwrmw = (u16) dbm;
-	err = brcmf_dev_intvar_set(ndev, "qtxpower",
-			(s32) (brcmf_mw_to_qdbm(txpwrmw)));
+	err = brcmf_fil_iovar_int_set(ifp, "qtxpower",
+				      (s32)brcmf_mw_to_qdbm(txpwrmw));
 	if (err)
 		WL_ERR("qtxpower error (%d)\n", err);
 	cfg->conf->tx_power = dbm;
@@ -1924,19 +1571,21 @@
 	return err;
 }
 
-static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy,
+				       struct wireless_dev *wdev,
+				       s32 *dbm)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-	struct net_device *ndev = cfg_to_ndev(cfg);
+	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
 	s32 txpwrdbm;
 	u8 result;
 	s32 err = 0;
 
 	WL_TRACE("Enter\n");
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
-	err = brcmf_dev_intvar_get(ndev, "qtxpower", &txpwrdbm);
+	err = brcmf_fil_iovar_int_get(ifp, "qtxpower", &txpwrdbm);
 	if (err) {
 		WL_ERR("error (%d)\n", err);
 		goto done;
@@ -1954,19 +1603,17 @@
 brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
 			       u8 key_idx, bool unicast, bool multicast)
 {
-	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	u32 index;
 	u32 wsec;
 	s32 err = 0;
-	s32 bssidx;
 
 	WL_TRACE("Enter\n");
 	WL_CONN("key index (%d)\n", key_idx);
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
-	bssidx = brcmf_find_bssidx(cfg, ndev);
-	err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
+	err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
 	if (err) {
 		WL_ERR("WLC_GET_WSEC error (%d)\n", err);
 		goto done;
@@ -1975,8 +1622,8 @@
 	if (wsec & WEP_ENABLED) {
 		/* Just select a new current key */
 		index = key_idx;
-		err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_KEY_PRIMARY,
-					  &index);
+		err = brcmf_fil_cmd_int_set(ifp,
+					    BRCMF_C_SET_KEY_PRIMARY, index);
 		if (err)
 			WL_ERR("error (%d)\n", err);
 	}
@@ -1989,11 +1636,8 @@
 brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
 	      u8 key_idx, const u8 *mac_addr, struct key_params *params)
 {
-	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	struct brcmf_wsec_key key;
-	struct brcmf_wsec_key_le key_le;
 	s32 err = 0;
-	s32 bssidx;
 
 	memset(&key, 0, sizeof(key));
 	key.index = (u32) key_idx;
@@ -2002,11 +1646,10 @@
 	if (!is_multicast_ether_addr(mac_addr))
 		memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
 	key.len = (u32) params->key_len;
-	bssidx = brcmf_find_bssidx(cfg, ndev);
 	/* check for key index change */
 	if (key.len == 0) {
 		/* key delete */
-		err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+		err = send_key_to_dongle(ndev, &key);
 		if (err)
 			WL_ERR("key delete error (%d)\n", err);
 	} else {
@@ -2061,13 +1704,7 @@
 			WL_ERR("Invalid cipher (0x%x)\n", params->cipher);
 			return -EINVAL;
 		}
-		convert_key_from_CPU(&key, &key_le);
-
-		brcmf_netdev_wait_pend8021x(ndev);
-		err  = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
-						     sizeof(key_le),
-						     cfg->extra_buf,
-						     WL_EXTRA_BUF_MAX, bssidx);
+		err = send_key_to_dongle(ndev, &key);
 		if (err)
 			WL_ERR("wsec_key error (%d)\n", err);
 	}
@@ -2080,16 +1717,16 @@
 		    struct key_params *params)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_wsec_key key;
 	s32 val;
 	s32 wsec;
 	s32 err = 0;
 	u8 keybuf[8];
-	s32 bssidx;
 
 	WL_TRACE("Enter\n");
 	WL_CONN("key index (%d)\n", key_idx);
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	if (mac_addr) {
@@ -2147,18 +1784,17 @@
 		goto done;
 	}
 
-	bssidx = brcmf_find_bssidx(cfg, ndev);
-	err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+	err = send_key_to_dongle(ndev, &key);
 	if (err)
 		goto done;
 
-	err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
+	err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
 	if (err) {
 		WL_ERR("get wsec error (%d)\n", err);
 		goto done;
 	}
 	wsec |= val;
-	err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
+	err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
 	if (err) {
 		WL_ERR("set wsec error (%d)\n", err);
 		goto done;
@@ -2173,15 +1809,20 @@
 brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
 		    u8 key_idx, bool pairwise, const u8 *mac_addr)
 {
-	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_wsec_key key;
 	s32 err = 0;
-	s32 bssidx;
 
 	WL_TRACE("Enter\n");
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
+	if (key_idx >= DOT11_MAX_DEFAULT_KEYS) {
+		/* we ignore this key index in this case */
+		WL_ERR("invalid key index (%d)\n", key_idx);
+		return -EINVAL;
+	}
+
 	memset(&key, 0, sizeof(key));
 
 	key.index = (u32) key_idx;
@@ -2191,17 +1832,7 @@
 	WL_CONN("key index (%d)\n", key_idx);
 
 	/* Set the new key/index */
-	bssidx = brcmf_find_bssidx(cfg, ndev);
-	err = send_key_to_dongle(cfg, bssidx, ndev, &key);
-	if (err) {
-		if (err == -EINVAL) {
-			if (key.index >= DOT11_MAX_DEFAULT_KEYS)
-				/* we ignore this key index in this case */
-				WL_ERR("invalid key index (%d)\n", key_idx);
-		}
-		/* Ignore this error, may happen during DISASSOC */
-		err = -EAGAIN;
-	}
+	err = send_key_to_dongle(ndev, &key);
 
 	WL_TRACE("Exit\n");
 	return err;
@@ -2213,22 +1844,20 @@
 		    void (*callback) (void *cookie, struct key_params * params))
 {
 	struct key_params params;
-	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
 	struct brcmf_cfg80211_security *sec;
 	s32 wsec;
 	s32 err = 0;
-	s32 bssidx;
 
 	WL_TRACE("Enter\n");
 	WL_CONN("key index (%d)\n", key_idx);
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	memset(&params, 0, sizeof(params));
 
-	bssidx = brcmf_find_bssidx(cfg, ndev);
-	err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
+	err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
 	if (err) {
 		WL_ERR("WLC_GET_WSEC error (%d)\n", err);
 		/* Ignore this error, may happen during DISASSOC */
@@ -2280,33 +1909,33 @@
 			   u8 *mac, struct station_info *sinfo)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
 	struct brcmf_scb_val_le scb_val;
 	int rssi;
 	s32 rate;
 	s32 err = 0;
 	u8 *bssid = profile->bssid;
-	struct brcmf_sta_info_le *sta_info_le;
+	struct brcmf_sta_info_le sta_info_le;
 
 	WL_TRACE("Enter, MAC %pM\n", mac);
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	if (cfg->conf->mode == WL_MODE_AP) {
-		err = brcmf_dev_iovar_getbuf(ndev, "sta_info", mac, ETH_ALEN,
-					     cfg->dcmd_buf,
-					     WL_DCMD_LEN_MAX);
+		memcpy(&sta_info_le, mac, ETH_ALEN);
+		err = brcmf_fil_iovar_data_get(ifp, "sta_info",
+					       &sta_info_le,
+					       sizeof(sta_info_le));
 		if (err < 0) {
 			WL_ERR("GET STA INFO failed, %d\n", err);
 			goto done;
 		}
-		sta_info_le = (struct brcmf_sta_info_le *)cfg->dcmd_buf;
-
 		sinfo->filled = STATION_INFO_INACTIVE_TIME;
-		sinfo->inactive_time = le32_to_cpu(sta_info_le->idle) * 1000;
-		if (le32_to_cpu(sta_info_le->flags) & BRCMF_STA_ASSOC) {
+		sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
+		if (le32_to_cpu(sta_info_le.flags) & BRCMF_STA_ASSOC) {
 			sinfo->filled |= STATION_INFO_CONNECTED_TIME;
-			sinfo->connected_time = le32_to_cpu(sta_info_le->in);
+			sinfo->connected_time = le32_to_cpu(sta_info_le.in);
 		}
 		WL_TRACE("STA idle time : %d ms, connected time :%d sec\n",
 			 sinfo->inactive_time, sinfo->connected_time);
@@ -2318,7 +1947,7 @@
 			goto done;
 		}
 		/* Report the current tx rate */
-		err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
+	err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
 		if (err) {
 			WL_ERR("Could not get rate (%d)\n", err);
 			goto done;
@@ -2328,10 +1957,11 @@
 			WL_CONN("Rate %d Mbps\n", rate / 2);
 		}
 
-		if (test_bit(WL_STATUS_CONNECTED, &cfg->status)) {
+		if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+			     &ifp->vif->sme_state)) {
 			memset(&scb_val, 0, sizeof(scb_val));
-			err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
-					      sizeof(scb_val));
+			err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
+						     &scb_val, sizeof(scb_val));
 			if (err) {
 				WL_ERR("Could not get rssi (%d)\n", err);
 				goto done;
@@ -2356,6 +1986,7 @@
 	s32 pm;
 	s32 err = 0;
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
 
 	WL_TRACE("Enter\n");
 
@@ -2367,7 +1998,7 @@
 	 * FW later while initializing the dongle
 	 */
 	cfg->pwr_save = enabled;
-	if (!test_bit(WL_STATUS_READY, &cfg->status)) {
+	if (!check_vif_up(ifp->vif)) {
 
 		WL_INFO("Device is not ready, storing the value in cfg_info struct\n");
 		goto done;
@@ -2376,7 +2007,7 @@
 	pm = enabled ? PM_FAST : PM_OFF;
 	WL_INFO("power save %s\n", (pm ? "enabled" : "disabled"));
 
-	err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &pm);
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, pm);
 	if (err) {
 		if (err == -ENODEV)
 			WL_ERR("net_device is not ready yet\n");
@@ -2393,6 +2024,7 @@
 			     const u8 *addr,
 			     const struct cfg80211_bitrate_mask *mask)
 {
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcm_rateset_le rateset_le;
 	s32 rate;
 	s32 val;
@@ -2402,13 +2034,13 @@
 	s32 err = 0;
 
 	WL_TRACE("Enter\n");
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	/* addr param is always NULL. ignore it */
 	/* Get current rateset */
-	err = brcmf_exec_dcmd(ndev, BRCM_GET_CURR_RATESET, &rateset_le,
-			      sizeof(rateset_le));
+	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CURR_RATESET,
+				     &rateset_le, sizeof(rateset_le));
 	if (err) {
 		WL_ERR("could not get current rateset (%d)\n", err);
 		goto done;
@@ -2435,8 +2067,8 @@
 	 *      Set rate override,
 	 *      Since the is a/b/g-blind, both a/bg_rate are enforced.
 	 */
-	err_bg = brcmf_dev_intvar_set(ndev, "bg_rate", rate);
-	err_a = brcmf_dev_intvar_set(ndev, "a_rate", rate);
+	err_bg = brcmf_fil_iovar_int_set(ifp, "bg_rate", rate);
+	err_a = brcmf_fil_iovar_int_set(ifp, "a_rate", rate);
 	if (err_bg && err_a) {
 		WL_ERR("could not set fixed rate (%d) (%d)\n", err_bg, err_a);
 		err = err_bg | err_a;
@@ -2522,13 +2154,14 @@
 	int i;
 
 	bss_list = cfg->bss_list;
-	if (bss_list->version != BRCMF_BSS_INFO_VERSION) {
+	if (bss_list->count != 0 &&
+	    bss_list->version != BRCMF_BSS_INFO_VERSION) {
 		WL_ERR("Version %d != WL_BSS_INFO_VERSION\n",
 		       bss_list->version);
 		return -EOPNOTSUPP;
 	}
 	WL_SCAN("scanned AP count (%d)\n", bss_list->count);
-	for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
+	for (i = 0; i < bss_list->count; i++) {
 		bi = next_bss_le(bss_list, bi);
 		err = brcmf_inform_single_bss(cfg, bi);
 		if (err)
@@ -2565,7 +2198,8 @@
 
 	*(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
 
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
+	err = brcmf_fil_cmd_data_get(netdev_priv(ndev), BRCMF_C_GET_BSS_INFO,
+				     buf, WL_BSS_INFO_MAX);
 	if (err) {
 		WL_ERR("WLC_GET_BSS_INFO failed: %d\n", err);
 		goto CleanUp;
@@ -2674,12 +2308,12 @@
 	return false;
 }
 
-struct brcmf_vs_tlv *
+static struct brcmf_vs_tlv *
 brcmf_find_wpaie(u8 *parse, u32 len)
 {
 	struct brcmf_tlv *ie;
 
-	while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_WPA))) {
+	while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
 		if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
 				     WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
 			return (struct brcmf_vs_tlv *)ie;
@@ -2689,7 +2323,9 @@
 
 static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
 {
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct net_device *ndev = cfg_to_ndev(cfg);
+	struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_bss_info_le *bi;
 	struct brcmf_ssid *ssid;
 	struct brcmf_tlv *tim;
@@ -2706,8 +2342,8 @@
 	ssid = &profile->ssid;
 
 	*(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
-	err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_GET_BSS_INFO,
-			cfg->extra_buf, WL_EXTRA_BUF_MAX);
+	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
+				     cfg->extra_buf, WL_EXTRA_BUF_MAX);
 	if (err) {
 		WL_ERR("Could not get bss info %d\n", err);
 		goto update_bss_info_out;
@@ -2732,8 +2368,7 @@
 		* so we speficially query dtim information to dongle.
 		*/
 		u32 var;
-		err = brcmf_dev_intvar_get(cfg_to_ndev(cfg),
-					   "dtim_assoc", &var);
+		err = brcmf_fil_iovar_int_get(ifp, "dtim_assoc", &var);
 		if (err) {
 			WL_ERR("wl dtim_assoc failed (%d)\n", err);
 			goto update_bss_info_out;
@@ -2741,9 +2376,6 @@
 		dtim_period = (u8)var;
 	}
 
-	profile->beacon_interval = beacon_interval;
-	profile->dtim_period = dtim_period;
-
 update_bss_info_out:
 	WL_TRACE("Exit");
 	return err;
@@ -2751,233 +2383,15 @@
 
 static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
 {
-	struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
 	struct escan_info *escan = &cfg->escan_info;
-	struct brcmf_ssid ssid;
 
-	set_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
-	if (cfg->iscan_on) {
-		iscan->state = WL_ISCAN_STATE_IDLE;
-
-		if (iscan->timer_on) {
-			del_timer_sync(&iscan->timer);
-			iscan->timer_on = 0;
-		}
-
-		cancel_work_sync(&iscan->work);
-
-		/* Abort iscan running in FW */
-		memset(&ssid, 0, sizeof(ssid));
-		brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT);
-
-		if (cfg->scan_request) {
-			/* Indidate scan abort to cfg80211 layer */
-			WL_INFO("Terminating scan in progress\n");
-			cfg80211_scan_done(cfg->scan_request, true);
-			cfg->scan_request = NULL;
-		}
-	}
-	if (cfg->escan_on && cfg->scan_request) {
+	set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
+	if (cfg->scan_request) {
 		escan->escan_state = WL_ESCAN_STATE_IDLE;
 		brcmf_notify_escan_complete(cfg, escan->ndev, true, true);
 	}
-	clear_bit(WL_STATUS_SCANNING, &cfg->status);
-	clear_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
-}
-
-static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
-					bool aborted)
-{
-	struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
-	struct net_device *ndev = cfg_to_ndev(cfg);
-
-	if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
-		WL_ERR("Scan complete while device not scanning\n");
-		return;
-	}
-	if (cfg->scan_request) {
-		WL_SCAN("ISCAN Completed scan: %s\n",
-				aborted ? "Aborted" : "Done");
-		cfg80211_scan_done(cfg->scan_request, aborted);
-		brcmf_set_mpc(ndev, 1);
-		cfg->scan_request = NULL;
-	}
-	cfg->iscan_kickstart = false;
-}
-
-static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan)
-{
-	if (iscan->state != WL_ISCAN_STATE_IDLE) {
-		WL_SCAN("wake up iscan\n");
-		schedule_work(&iscan->work);
-		return 0;
-	}
-
-	return -EIO;
-}
-
-static s32
-brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
-		     struct brcmf_scan_results **bss_list)
-{
-	struct brcmf_iscan_results list;
-	struct brcmf_scan_results *results;
-	struct brcmf_scan_results_le *results_le;
-	struct brcmf_iscan_results *list_buf;
-	s32 err = 0;
-
-	memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX);
-	list_buf = (struct brcmf_iscan_results *)iscan->scan_buf;
-	results = &list_buf->results;
-	results_le = &list_buf->results_le;
-	results->buflen = BRCMF_ISCAN_RESULTS_FIXED_SIZE;
-	results->version = 0;
-	results->count = 0;
-
-	memset(&list, 0, sizeof(list));
-	list.results_le.buflen = cpu_to_le32(WL_ISCAN_BUF_MAX);
-	err = brcmf_dev_iovar_getbuf(iscan->ndev, "iscanresults", &list,
-				     BRCMF_ISCAN_RESULTS_FIXED_SIZE,
-				     iscan->scan_buf, WL_ISCAN_BUF_MAX);
-	if (err) {
-		WL_ERR("error (%d)\n", err);
-		return err;
-	}
-	results->buflen = le32_to_cpu(results_le->buflen);
-	results->version = le32_to_cpu(results_le->version);
-	results->count = le32_to_cpu(results_le->count);
-	WL_SCAN("results->count = %d\n", results_le->count);
-	WL_SCAN("results->buflen = %d\n", results_le->buflen);
-	*status = le32_to_cpu(list_buf->status_le);
-	WL_SCAN("status = %d\n", *status);
-	*bss_list = results;
-
-	return err;
-}
-
-static s32 brcmf_iscan_done(struct brcmf_cfg80211_info *cfg)
-{
-	struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
-	s32 err = 0;
-
-	iscan->state = WL_ISCAN_STATE_IDLE;
-	brcmf_inform_bss(cfg);
-	brcmf_notify_iscan_complete(iscan, false);
-
-	return err;
-}
-
-static s32 brcmf_iscan_pending(struct brcmf_cfg80211_info *cfg)
-{
-	struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
-	s32 err = 0;
-
-	/* Reschedule the timer */
-	mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
-	iscan->timer_on = 1;
-
-	return err;
-}
-
-static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_info *cfg)
-{
-	struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
-	s32 err = 0;
-
-	brcmf_inform_bss(cfg);
-	brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE);
-	/* Reschedule the timer */
-	mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
-	iscan->timer_on = 1;
-
-	return err;
-}
-
-static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_info *cfg)
-{
-	struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
-	s32 err = 0;
-
-	iscan->state = WL_ISCAN_STATE_IDLE;
-	brcmf_notify_iscan_complete(iscan, true);
-
-	return err;
-}
-
-static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
-{
-	struct brcmf_cfg80211_iscan_ctrl *iscan =
-			container_of(work, struct brcmf_cfg80211_iscan_ctrl,
-				     work);
-	struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
-	struct brcmf_cfg80211_iscan_eloop *el = &iscan->el;
-	u32 status = BRCMF_SCAN_RESULTS_PARTIAL;
-
-	if (iscan->timer_on) {
-		del_timer_sync(&iscan->timer);
-		iscan->timer_on = 0;
-	}
-
-	if (brcmf_get_iscan_results(iscan, &status, &cfg->bss_list)) {
-		status = BRCMF_SCAN_RESULTS_ABORTED;
-		WL_ERR("Abort iscan\n");
-	}
-
-	el->handler[status](cfg);
-}
-
-static void brcmf_iscan_timer(unsigned long data)
-{
-	struct brcmf_cfg80211_iscan_ctrl *iscan =
-			(struct brcmf_cfg80211_iscan_ctrl *)data;
-
-	if (iscan) {
-		iscan->timer_on = 0;
-		WL_SCAN("timer expired\n");
-		brcmf_wakeup_iscan(iscan);
-	}
-}
-
-static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_info *cfg)
-{
-	struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
-
-	if (cfg->iscan_on) {
-		iscan->state = WL_ISCAN_STATE_IDLE;
-		INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler);
-	}
-
-	return 0;
-}
-
-static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el)
-{
-	memset(el, 0, sizeof(*el));
-	el->handler[BRCMF_SCAN_RESULTS_SUCCESS] = brcmf_iscan_done;
-	el->handler[BRCMF_SCAN_RESULTS_PARTIAL] = brcmf_iscan_inprogress;
-	el->handler[BRCMF_SCAN_RESULTS_PENDING] = brcmf_iscan_pending;
-	el->handler[BRCMF_SCAN_RESULTS_ABORTED] = brcmf_iscan_aborted;
-	el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted;
-}
-
-static s32 brcmf_init_iscan(struct brcmf_cfg80211_info *cfg)
-{
-	struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
-	int err = 0;
-
-	if (cfg->iscan_on) {
-		iscan->ndev = cfg_to_ndev(cfg);
-		brcmf_init_iscan_eloop(&iscan->el);
-		iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
-		init_timer(&iscan->timer);
-		iscan->timer.data = (unsigned long) iscan;
-		iscan->timer.function = brcmf_iscan_timer;
-		err = brcmf_invoke_iscan(cfg);
-		if (!err)
-			iscan->data = cfg;
-	}
-
-	return err;
+	clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+	clear_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
 }
 
 static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
@@ -2997,8 +2411,7 @@
 
 	if (cfg->scan_request) {
 		WL_ERR("timer expired\n");
-		if (cfg->escan_on)
-			schedule_work(&cfg->escan_timeout_work);
+		schedule_work(&cfg->escan_timeout_work);
 	}
 }
 
@@ -3035,10 +2448,11 @@
 }
 
 static s32
-brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
-			     struct net_device *ndev,
+brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
 			     const struct brcmf_event_msg *e, void *data)
 {
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	struct net_device *ndev = ifp->ndev;
 	s32 status;
 	s32 err = 0;
 	struct brcmf_escan_result_le *escan_result_le;
@@ -3049,13 +2463,11 @@
 	u32 i;
 	bool aborted;
 
-	status = be32_to_cpu(e->status);
+	status = e->status;
 
-	if (!ndev || !cfg->escan_on ||
-			!test_bit(WL_STATUS_SCANNING, &cfg->status)) {
-		WL_ERR("scan not ready ndev %p wl->escan_on %d drv_status %x\n",
-			ndev, cfg->escan_on,
-			!test_bit(WL_STATUS_SCANNING, &cfg->status));
+	if (!ndev || !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+		WL_ERR("scan not ready ndev %p drv_status %x\n", ndev,
+		       !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status));
 		return -EPERM;
 	}
 
@@ -3132,18 +2544,15 @@
 
 static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
 {
-
-	if (cfg->escan_on) {
-		cfg->el.handler[BRCMF_E_ESCAN_RESULT] =
-			brcmf_cfg80211_escan_handler;
-		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
-		/* Init scan_timeout timer */
-		init_timer(&cfg->escan_timeout);
-		cfg->escan_timeout.data = (unsigned long) cfg;
-		cfg->escan_timeout.function = brcmf_escan_timeout;
-		INIT_WORK(&cfg->escan_timeout_work,
-			brcmf_cfg80211_escan_timeout_worker);
-	}
+	brcmf_fweh_register(cfg->pub, BRCMF_E_ESCAN_RESULT,
+			    brcmf_cfg80211_escan_handler);
+	cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+	/* Init scan_timeout timer */
+	init_timer(&cfg->escan_timeout);
+	cfg->escan_timeout.data = (unsigned long) cfg;
+	cfg->escan_timeout.function = brcmf_escan_timeout;
+	INIT_WORK(&cfg->escan_timeout_work,
+		  brcmf_cfg80211_escan_timeout_worker);
 }
 
 static __always_inline void brcmf_delay(u32 ms)
@@ -3158,19 +2567,8 @@
 
 static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
 {
-	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-
-	/*
-	 * Check for WL_STATUS_READY before any function call which
-	 * could result is bus access. Don't block the resume for
-	 * any driver error conditions
-	 */
 	WL_TRACE("Enter\n");
 
-	if (test_bit(WL_STATUS_READY, &cfg->status))
-		brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
-
-	WL_TRACE("Exit\n");
 	return 0;
 }
 
@@ -3179,85 +2577,53 @@
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	struct net_device *ndev = cfg_to_ndev(cfg);
+	struct brcmf_cfg80211_vif *vif;
 
 	WL_TRACE("Enter\n");
 
 	/*
-	 * Check for WL_STATUS_READY before any function call which
-	 * could result is bus access. Don't block the suspend for
-	 * any driver error conditions
+	 * if the primary net_device is not READY there is nothing
+	 * we can do but pray resume goes smoothly.
 	 */
+	vif = ((struct brcmf_if *)netdev_priv(ndev))->vif;
+	if (!check_vif_up(vif))
+		goto exit;
 
-	/*
-	 * While going to suspend if associated with AP disassociate
-	 * from AP to save power while system is in suspended state
-	 */
-	if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
-	     test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
-	     test_bit(WL_STATUS_READY, &cfg->status)) {
-		WL_INFO("Disassociating from AP"
-			" while entering suspend state\n");
-		brcmf_link_down(cfg);
-
+	list_for_each_entry(vif, &cfg->vif_list, list) {
+		if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state))
+			continue;
 		/*
-		 * Make sure WPA_Supplicant receives all the event
-		 * generated due to DISASSOC call to the fw to keep
-		 * the state fw and WPA_Supplicant state consistent
+		 * While going to suspend if associated with AP disassociate
+		 * from AP to save power while system is in suspended state
 		 */
-		brcmf_delay(500);
+		if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state) ||
+		    test_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state)) {
+			WL_INFO("Disassociating from AP before suspend\n");
+			brcmf_link_down(cfg);
+
+			/* Make sure WPA_Supplicant receives all the event
+			 * generated due to DISASSOC call to the fw to keep
+			 * the state fw and WPA_Supplicant state consistent
+			 */
+			brcmf_delay(500);
+		}
 	}
 
-	if (test_bit(WL_STATUS_READY, &cfg->status))
+	/* end any scanning */
+	if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
 		brcmf_abort_scanning(cfg);
-	else
-		clear_bit(WL_STATUS_SCANNING, &cfg->status);
 
 	/* Turn off watchdog timer */
-	if (test_bit(WL_STATUS_READY, &cfg->status))
-		brcmf_set_mpc(ndev, 1);
+	brcmf_set_mpc(ndev, 1);
 
+exit:
 	WL_TRACE("Exit\n");
-
+	/* clear any scanning activity */
+	cfg->scan_status = 0;
 	return 0;
 }
 
 static __used s32
-brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len)
-{
-	struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
-	u32 buflen;
-
-	buflen = brcmf_c_mkiovar(name, buf, len, cfg->dcmd_buf,
-			       WL_DCMD_LEN_MAX);
-	BUG_ON(!buflen);
-
-	return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg->dcmd_buf,
-			       buflen);
-}
-
-static s32
-brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf,
-		  s32 buf_len)
-{
-	struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
-	u32 len;
-	s32 err = 0;
-
-	len = brcmf_c_mkiovar(name, NULL, 0, cfg->dcmd_buf,
-			    WL_DCMD_LEN_MAX);
-	BUG_ON(!len);
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg->dcmd_buf,
-			      WL_DCMD_LEN_MAX);
-	if (err) {
-		WL_ERR("error (%d)\n", err);
-		return err;
-	}
-	memcpy(buf, cfg->dcmd_buf, buf_len);
-
-	return err;
-}
-
-static __used s32
 brcmf_update_pmklist(struct net_device *ndev,
 		     struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
 {
@@ -3275,8 +2641,8 @@
 	}
 
 	if (!err)
-		brcmf_dev_bufvar_set(ndev, "pmkid_info", (char *)pmk_list,
-					sizeof(*pmk_list));
+		brcmf_fil_iovar_data_set(netdev_priv(ndev), "pmkid_info",
+					 (char *)pmk_list, sizeof(*pmk_list));
 
 	return err;
 }
@@ -3286,13 +2652,14 @@
 			 struct cfg80211_pmksa *pmksa)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct pmkid_list *pmkids = &cfg->pmk_list->pmkids;
 	s32 err = 0;
 	int i;
 	int pmkid_len;
 
 	WL_TRACE("Enter\n");
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	pmkid_len = le32_to_cpu(pmkids->npmkid);
@@ -3325,12 +2692,13 @@
 		      struct cfg80211_pmksa *pmksa)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct pmkid_list pmkid;
 	s32 err = 0;
 	int i, pmkid_len;
 
 	WL_TRACE("Enter\n");
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
@@ -3375,10 +2743,11 @@
 brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	s32 err = 0;
 
 	WL_TRACE("Enter\n");
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
@@ -3398,10 +2767,11 @@
  * cfg80211_scan_request one out of the received PNO event.
  */
 static s32
-brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
-				struct net_device *ndev,
+brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
 				const struct brcmf_event_msg *e, void *data)
 {
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	struct net_device *ndev = ifp->ndev;
 	struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
 	struct cfg80211_scan_request *request = NULL;
 	struct cfg80211_ssid *ssid = NULL;
@@ -3416,7 +2786,7 @@
 
 	WL_SCAN("Enter\n");
 
-	if (e->event_type == cpu_to_be32(BRCMF_E_PFN_NET_LOST)) {
+	if (e->event_code == BRCMF_E_PFN_NET_LOST) {
 		WL_SCAN("PFN NET LOST event. Do Nothing\n");
 		return 0;
 	}
@@ -3478,15 +2848,15 @@
 		if (request->n_ssids)
 			request->ssids = &ssid[0];
 
-		if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+		if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
 			/* Abort any on-going scan */
 			brcmf_abort_scanning(cfg);
 		}
 
-		set_bit(WL_STATUS_SCANNING, &cfg->status);
+		set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
 		err = brcmf_do_escan(cfg, wiphy, ndev, request);
 		if (err) {
-			clear_bit(WL_STATUS_SCANNING, &cfg->status);
+			clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
 			goto out_err;
 		}
 		cfg->sched_escan = true;
@@ -3509,18 +2879,16 @@
 	return err;
 }
 
-#ifndef CONFIG_BRCMISCAN
 static int brcmf_dev_pno_clean(struct net_device *ndev)
 {
-	char iovbuf[128];
 	int ret;
 
 	/* Disable pfn */
-	ret = brcmf_dev_intvar_set(ndev, "pfn", 0);
+	ret = brcmf_fil_iovar_int_set(netdev_priv(ndev), "pfn", 0);
 	if (ret == 0) {
 		/* clear pfn */
-		ret = brcmf_dev_iovar_setbuf(ndev, "pfnclear", NULL, 0,
-					     iovbuf, sizeof(iovbuf));
+		ret = brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfnclear",
+					       NULL, 0);
 	}
 	if (ret < 0)
 		WL_ERR("failed code %d\n", ret);
@@ -3531,7 +2899,6 @@
 static int brcmf_dev_pno_config(struct net_device *ndev)
 {
 	struct brcmf_pno_param_le pfn_param;
-	char iovbuf[128];
 
 	memset(&pfn_param, 0, sizeof(pfn_param));
 	pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
@@ -3544,9 +2911,8 @@
 	/* set up pno scan fr */
 	pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME);
 
-	return brcmf_dev_iovar_setbuf(ndev, "pfn_set",
-				      &pfn_param, sizeof(pfn_param),
-				      iovbuf, sizeof(iovbuf));
+	return brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfn_set",
+					&pfn_param, sizeof(pfn_param));
 }
 
 static int
@@ -3554,7 +2920,7 @@
 				struct net_device *ndev,
 				struct cfg80211_sched_scan_request *request)
 {
-	char iovbuf[128];
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
 	struct brcmf_pno_net_param_le pfn;
 	int i;
@@ -3562,8 +2928,8 @@
 
 	WL_SCAN("Enter n_match_sets:%d   n_ssids:%d\n",
 		request->n_match_sets, request->n_ssids);
-	if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
-		WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+	if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+		WL_ERR("Scanning already: status (%lu)\n", cfg->scan_status);
 		return -EAGAIN;
 	}
 
@@ -3620,15 +2986,14 @@
 			pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
 			pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
 			memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
-			ret = brcmf_dev_iovar_setbuf(ndev, "pfn_add",
-						     &pfn, sizeof(pfn),
-						     iovbuf, sizeof(iovbuf));
+			ret = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn,
+						       sizeof(pfn));
 			WL_SCAN(">>> PNO filter %s for ssid (%s)\n",
 				ret == 0 ? "set" : "failed",
 				ssid->ssid);
 		}
 		/* Enable the PNO */
-		if (brcmf_dev_intvar_set(ndev, "pfn", 1) < 0) {
+		if (brcmf_fil_iovar_int_set(ifp, "pfn", 1) < 0) {
 			WL_ERR("PNO enable failed!! ret=%d\n", ret);
 			return -EINVAL;
 		}
@@ -3650,18 +3015,25 @@
 		brcmf_notify_escan_complete(cfg, ndev, true, true);
 	return 0;
 }
-#endif /* CONFIG_BRCMISCAN */
 
 #ifdef CONFIG_NL80211_TESTMODE
 static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-	struct net_device *ndev = cfg->wdev->netdev;
+	struct net_device *ndev = cfg_to_ndev(cfg);
 	struct brcmf_dcmd *dcmd = data;
 	struct sk_buff *reply;
 	int ret;
 
-	ret = brcmf_netlink_dcmd(ndev, dcmd);
+	WL_TRACE("cmd %x set %d buf %p len %d\n", dcmd->cmd, dcmd->set,
+		 dcmd->buf, dcmd->len);
+
+	if (dcmd->set)
+		ret = brcmf_fil_cmd_data_set(netdev_priv(ndev), dcmd->cmd,
+					     dcmd->buf, dcmd->len);
+	else
+		ret = brcmf_fil_cmd_data_get(netdev_priv(ndev), dcmd->cmd,
+					     dcmd->buf, dcmd->len);
 	if (ret == 0) {
 		reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd));
 		nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd);
@@ -3673,23 +3045,23 @@
 
 static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
 {
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	s32 err;
 
 	/* set auth */
-	err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", 0, bssidx);
+	err = brcmf_fil_bsscfg_int_set(ifp, "auth", 0);
 	if (err < 0) {
 		WL_ERR("auth error %d\n", err);
 		return err;
 	}
 	/* set wsec */
-	err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", 0, bssidx);
+	err = brcmf_fil_bsscfg_int_set(ifp, "wsec", 0);
 	if (err < 0) {
 		WL_ERR("wsec error %d\n", err);
 		return err;
 	}
 	/* set upper-layer auth */
-	err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth",
-					  WPA_AUTH_NONE, bssidx);
+	err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", WPA_AUTH_NONE);
 	if (err < 0) {
 		WL_ERR("wpa_auth error %d\n", err);
 		return err;
@@ -3708,8 +3080,9 @@
 
 static s32
 brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
-		     bool is_rsn_ie, s32 bssidx)
+		     bool is_rsn_ie)
 {
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	u32 auth = 0; /* d11 open authentication */
 	u16 count;
 	s32 err = 0;
@@ -3850,8 +3223,8 @@
 				wme_bss_disable = 0;
 		}
 		/* set wme_bss_disable to sync RSN Capabilities */
-		err = brcmf_dev_intvar_set_bsscfg(ndev, "wme_bss_disable",
-						  wme_bss_disable, bssidx);
+		err = brcmf_fil_bsscfg_int_set(ifp, "wme_bss_disable",
+					       wme_bss_disable);
 		if (err < 0) {
 			WL_ERR("wme_bss_disable error %d\n", err);
 			goto exit;
@@ -3861,19 +3234,19 @@
 	wsec = (pval | gval | SES_OW_ENABLED);
 
 	/* set auth */
-	err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", auth, bssidx);
+	err = brcmf_fil_bsscfg_int_set(ifp, "auth", auth);
 	if (err < 0) {
 		WL_ERR("auth error %d\n", err);
 		goto exit;
 	}
 	/* set wsec */
-	err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
+	err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
 	if (err < 0) {
 		WL_ERR("wsec error %d\n", err);
 		goto exit;
 	}
 	/* set upper-layer auth */
-	err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth", wpa_auth, bssidx);
+	err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_auth);
 	if (err < 0) {
 		WL_ERR("wpa_auth error %d\n", err);
 		goto exit;
@@ -3884,7 +3257,7 @@
 }
 
 static s32
-brcmf_parse_vndr_ies(u8 *vndr_ie_buf, u32 vndr_ie_len,
+brcmf_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len,
 		     struct parsed_vndr_ies *vndr_ies)
 {
 	s32 err = 0;
@@ -3963,17 +3336,18 @@
 	return ie_len + VNDR_IE_HDR_SIZE;
 }
 
-s32
-brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
-			struct net_device *ndev, s32 bssidx, s32 pktflag,
-			u8 *vndr_ie_buf, u32 vndr_ie_len)
+static
+s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
+			  const u8 *vndr_ie_buf, u32 vndr_ie_len)
 {
+	struct brcmf_if *ifp;
+	struct vif_saved_ie *saved_ie;
 	s32 err = 0;
 	u8  *iovar_ie_buf;
 	u8  *curr_ie_buf;
 	u8  *mgmt_ie_buf = NULL;
 	int mgmt_ie_buf_len;
-	u32 *mgmt_ie_len = 0;
+	u32 *mgmt_ie_len;
 	u32 del_add_ie_buf_len = 0;
 	u32 total_ie_buf_len = 0;
 	u32 parsed_ie_buf_len = 0;
@@ -3984,31 +3358,33 @@
 	u8 *ptr;
 	int remained_buf_len;
 
-	WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag);
+	if (!vif)
+		return -ENODEV;
+	ifp = vif->ifp;
+	saved_ie = &vif->saved_ie;
+
+	WL_TRACE("bssidx %d, pktflag : 0x%02X\n", ifp->bssidx, pktflag);
 	iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
 	if (!iovar_ie_buf)
 		return -ENOMEM;
 	curr_ie_buf = iovar_ie_buf;
-	if (test_bit(WL_STATUS_AP_CREATING, &cfg->status) ||
-	    test_bit(WL_STATUS_AP_CREATED, &cfg->status)) {
+	if (ifp->vif->mode == WL_MODE_AP) {
 		switch (pktflag) {
 		case VNDR_IE_PRBRSP_FLAG:
-			mgmt_ie_buf = cfg->ap_info->probe_res_ie;
-			mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
-			mgmt_ie_buf_len =
-				sizeof(cfg->ap_info->probe_res_ie);
+			mgmt_ie_buf = saved_ie->probe_res_ie;
+			mgmt_ie_len = &saved_ie->probe_res_ie_len;
+			mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
 			break;
 		case VNDR_IE_BEACON_FLAG:
-			mgmt_ie_buf = cfg->ap_info->beacon_ie;
-			mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
-			mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
+			mgmt_ie_buf = saved_ie->beacon_ie;
+			mgmt_ie_len = &saved_ie->beacon_ie_len;
+			mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
 			break;
 		default:
 			err = -EPERM;
 			WL_ERR("not suitable type\n");
 			goto exit;
 		}
-		bssidx = 0;
 	} else {
 		err = -EPERM;
 		WL_ERR("not suitable type\n");
@@ -4104,11 +3480,8 @@
 		}
 	}
 	if (total_ie_buf_len) {
-		err  = brcmf_dev_iovar_setbuf_bsscfg(ndev, "vndr_ie",
-						     iovar_ie_buf,
-						     total_ie_buf_len,
-						     cfg->extra_buf,
-						     WL_EXTRA_BUF_MAX, bssidx);
+		err  = brcmf_fil_bsscfg_data_set(ifp, "vndr_ie", iovar_ie_buf,
+						 total_ie_buf_len);
 		if (err)
 			WL_ERR("vndr ie set error : %d\n", err);
 	}
@@ -4123,24 +3496,24 @@
 			struct cfg80211_ap_settings *settings)
 {
 	s32 ie_offset;
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_tlv *ssid_ie;
 	struct brcmf_ssid_le ssid_le;
-	s32 ioctl_value;
 	s32 err = -EPERM;
 	struct brcmf_tlv *rsn_ie;
 	struct brcmf_vs_tlv *wpa_ie;
 	struct brcmf_join_params join_params;
-	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	s32 bssidx = 0;
 
 	WL_TRACE("channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
-		 settings->channel_type, settings->beacon_interval,
+		 cfg80211_get_chandef_type(&settings->chandef),
+		 settings->beacon_interval,
 		 settings->dtim_period);
-	WL_TRACE("ssid=%s(%d), auth_type=%d, inactivity_timeout=%d\n",
+	WL_TRACE("ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
 		 settings->ssid, settings->ssid_len, settings->auth_type,
 		 settings->inactivity_timeout);
 
-	if (!test_bit(WL_STATUS_AP_CREATING, &cfg->status)) {
+	if (!test_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state)) {
 		WL_ERR("Not in AP creation mode\n");
 		return -EPERM;
 	}
@@ -4164,20 +3537,17 @@
 	}
 
 	brcmf_set_mpc(ndev, 0);
-	ioctl_value = 1;
-	err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_DOWN, &ioctl_value);
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
 	if (err < 0) {
 		WL_ERR("BRCMF_C_DOWN error %d\n", err);
 		goto exit;
 	}
-	ioctl_value = 1;
-	err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &ioctl_value);
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
 	if (err < 0) {
 		WL_ERR("SET INFRA error %d\n", err);
 		goto exit;
 	}
-	ioctl_value = 1;
-	err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
 	if (err < 0) {
 		WL_ERR("setting AP mode failed %d\n", err);
 		goto exit;
@@ -4191,80 +3561,61 @@
 	wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
 				  settings->beacon.tail_len);
 
-	kfree(cfg->ap_info->rsn_ie);
-	cfg->ap_info->rsn_ie = NULL;
-	kfree(cfg->ap_info->wpa_ie);
-	cfg->ap_info->wpa_ie = NULL;
-
 	if ((wpa_ie != NULL || rsn_ie != NULL)) {
 		WL_TRACE("WPA(2) IE is found\n");
 		if (wpa_ie != NULL) {
 			/* WPA IE */
-			err = brcmf_configure_wpaie(ndev, wpa_ie, false,
-						    bssidx);
+			err = brcmf_configure_wpaie(ndev, wpa_ie, false);
 			if (err < 0)
 				goto exit;
-			cfg->ap_info->wpa_ie = kmemdup(wpa_ie,
-							    wpa_ie->len +
-							    TLV_HDR_LEN,
-							    GFP_KERNEL);
 		} else {
 			/* RSN IE */
 			err = brcmf_configure_wpaie(ndev,
-				(struct brcmf_vs_tlv *)rsn_ie, true, bssidx);
+				(struct brcmf_vs_tlv *)rsn_ie, true);
 			if (err < 0)
 				goto exit;
-			cfg->ap_info->rsn_ie = kmemdup(rsn_ie,
-							    rsn_ie->len +
-							    TLV_HDR_LEN,
-							    GFP_KERNEL);
 		}
-		cfg->ap_info->security_mode = true;
 	} else {
 		WL_TRACE("No WPA(2) IEs found\n");
 		brcmf_configure_opensecurity(ndev, bssidx);
-		cfg->ap_info->security_mode = false;
 	}
 	/* Set Beacon IEs to FW */
-	err = brcmf_set_management_ie(cfg, ndev, bssidx,
-				      VNDR_IE_BEACON_FLAG,
-				      (u8 *)settings->beacon.tail,
-				      settings->beacon.tail_len);
+	err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
+				    VNDR_IE_BEACON_FLAG,
+				    settings->beacon.tail,
+				    settings->beacon.tail_len);
 	if (err)
 		WL_ERR("Set Beacon IE Failed\n");
 	else
 		WL_TRACE("Applied Vndr IEs for Beacon\n");
 
 	/* Set Probe Response IEs to FW */
-	err = brcmf_set_management_ie(cfg, ndev, bssidx,
-				      VNDR_IE_PRBRSP_FLAG,
-				      (u8 *)settings->beacon.proberesp_ies,
-				      settings->beacon.proberesp_ies_len);
+	err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
+				    VNDR_IE_PRBRSP_FLAG,
+				    settings->beacon.proberesp_ies,
+				    settings->beacon.proberesp_ies_len);
 	if (err)
 		WL_ERR("Set Probe Resp IE Failed\n");
 	else
 		WL_TRACE("Applied Vndr IEs for Probe Resp\n");
 
 	if (settings->beacon_interval) {
-		ioctl_value = settings->beacon_interval;
-		err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_BCNPRD,
-					  &ioctl_value);
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
+					    settings->beacon_interval);
 		if (err < 0) {
 			WL_ERR("Beacon Interval Set Error, %d\n", err);
 			goto exit;
 		}
 	}
 	if (settings->dtim_period) {
-		ioctl_value = settings->dtim_period;
-		err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_DTIMPRD,
-					  &ioctl_value);
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD,
+					    settings->dtim_period);
 		if (err < 0) {
 			WL_ERR("DTIM Interval Set Error, %d\n", err);
 			goto exit;
 		}
 	}
-	ioctl_value = 1;
-	err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
 	if (err < 0) {
 		WL_ERR("BRCMF_C_UP error (%d)\n", err);
 		goto exit;
@@ -4274,14 +3625,14 @@
 	/* join parameters starts with ssid */
 	memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
 	/* create softap */
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params,
-			      sizeof(join_params));
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+				     &join_params, sizeof(join_params));
 	if (err < 0) {
 		WL_ERR("SET SSID error (%d)\n", err);
 		goto exit;
 	}
-	clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
-	set_bit(WL_STATUS_AP_CREATED, &cfg->status);
+	clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
+	set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
 
 exit:
 	if (err)
@@ -4291,8 +3642,8 @@
 
 static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
 {
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-	s32 ioctl_value;
 	s32 err = -EPERM;
 
 	WL_TRACE("Enter\n");
@@ -4301,21 +3652,20 @@
 		/* Due to most likely deauths outstanding we sleep */
 		/* first to make sure they get processed by fw. */
 		msleep(400);
-		ioctl_value = 0;
-		err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
+		err = brcmf_fil_cmd_int_set(netdev_priv(ndev),
+					    BRCMF_C_SET_AP, 0);
 		if (err < 0) {
 			WL_ERR("setting AP mode failed %d\n", err);
 			goto exit;
 		}
-		ioctl_value = 0;
-		err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
+		err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_UP, 0);
 		if (err < 0) {
 			WL_ERR("BRCMF_C_UP error %d\n", err);
 			goto exit;
 		}
 		brcmf_set_mpc(ndev, 1);
-		clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
-		clear_bit(WL_STATUS_AP_CREATED, &cfg->status);
+		clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
+		clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
 	}
 exit:
 	return err;
@@ -4326,6 +3676,7 @@
 			   u8 *mac)
 {
 	struct brcmf_scb_val_le scbval;
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	s32 err;
 
 	if (!mac)
@@ -4333,13 +3684,13 @@
 
 	WL_TRACE("Enter %pM\n", mac);
 
-	if (!check_sys_up(wiphy))
+	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
 	memcpy(&scbval.ea, mac, ETH_ALEN);
 	scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
-			      &scbval, sizeof(scbval));
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
+				     &scbval, sizeof(scbval));
 	if (err)
 		WL_ERR("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err);
 
@@ -4373,11 +3724,8 @@
 	.start_ap = brcmf_cfg80211_start_ap,
 	.stop_ap = brcmf_cfg80211_stop_ap,
 	.del_station = brcmf_cfg80211_del_station,
-#ifndef CONFIG_BRCMISCAN
-	/* scheduled scan need e-scan, which is mutual exclusive with i-scan */
 	.sched_scan_start = brcmf_cfg80211_sched_scan_start,
 	.sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
-#endif
 #ifdef CONFIG_NL80211_TESTMODE
 	.testmode_cmd = brcmf_cfg80211_testmode
 #endif
@@ -4401,88 +3749,111 @@
 
 static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
 {
-#ifndef CONFIG_BRCMISCAN
 	/* scheduled scan settings */
 	wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
 	wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
 	wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
 	wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-#endif
 }
 
-static struct wireless_dev *brcmf_alloc_wdev(struct device *ndev)
+static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
 {
-	struct wireless_dev *wdev;
+	struct wiphy *wiphy;
 	s32 err = 0;
 
-	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
-	if (!wdev)
-		return ERR_PTR(-ENOMEM);
-
-	wdev->wiphy = wiphy_new(&wl_cfg80211_ops,
-				sizeof(struct brcmf_cfg80211_info));
-	if (!wdev->wiphy) {
+	wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct brcmf_cfg80211_info));
+	if (!wiphy) {
 		WL_ERR("Could not allocate wiphy device\n");
-		err = -ENOMEM;
-		goto wiphy_new_out;
+		return ERR_PTR(-ENOMEM);
 	}
-	set_wiphy_dev(wdev->wiphy, ndev);
-	wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
-	wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
-	wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-				       BIT(NL80211_IFTYPE_ADHOC) |
-				       BIT(NL80211_IFTYPE_AP);
-	wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
-	wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a;	/* Set
+	set_wiphy_dev(wiphy, phydev);
+	wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
+	wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+				 BIT(NL80211_IFTYPE_ADHOC) |
+				 BIT(NL80211_IFTYPE_AP);
+	wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+	wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a;	/* Set
 						* it as 11a by default.
 						* This will be updated with
 						* 11n phy tables in
 						* "ifconfig up"
 						* if phy has 11n capability
 						*/
-	wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-	wdev->wiphy->cipher_suites = __wl_cipher_suites;
-	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
-	wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;	/* enable power
+	wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	wiphy->cipher_suites = __wl_cipher_suites;
+	wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+	wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;	/* enable power
 								 * save mode
 								 * by default
 								 */
-	brcmf_wiphy_pno_params(wdev->wiphy);
-	err = wiphy_register(wdev->wiphy);
+	brcmf_wiphy_pno_params(wiphy);
+	err = wiphy_register(wiphy);
 	if (err < 0) {
 		WL_ERR("Could not register wiphy device (%d)\n", err);
-		goto wiphy_register_out;
+		wiphy_free(wiphy);
+		return ERR_PTR(err);
 	}
-	return wdev;
-
-wiphy_register_out:
-	wiphy_free(wdev->wiphy);
-
-wiphy_new_out:
-	kfree(wdev);
-
-	return ERR_PTR(err);
+	return wiphy;
 }
 
-static void brcmf_free_wdev(struct brcmf_cfg80211_info *cfg)
+static
+struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
+					   struct net_device *netdev,
+					   s32 mode, bool pm_block)
 {
-	struct wireless_dev *wdev = cfg->wdev;
+	struct brcmf_cfg80211_vif *vif;
 
-	if (!wdev) {
-		WL_ERR("wdev is invalid\n");
-		return;
+	if (cfg->vif_cnt == BRCMF_IFACE_MAX_CNT)
+		return ERR_PTR(-ENOSPC);
+
+	vif = kzalloc(sizeof(*vif), GFP_KERNEL);
+	if (!vif)
+		return ERR_PTR(-ENOMEM);
+
+	vif->wdev.wiphy = cfg->wiphy;
+	vif->wdev.netdev = netdev;
+	vif->wdev.iftype = brcmf_mode_to_nl80211_iftype(mode);
+
+	if (netdev) {
+		vif->ifp = netdev_priv(netdev);
+		netdev->ieee80211_ptr = &vif->wdev;
+		SET_NETDEV_DEV(netdev, wiphy_dev(cfg->wiphy));
 	}
-	wiphy_unregister(wdev->wiphy);
-	wiphy_free(wdev->wiphy);
-	kfree(wdev);
-	cfg->wdev = NULL;
+
+	vif->mode = mode;
+	vif->pm_block = pm_block;
+	vif->roam_off = -1;
+
+	brcmf_init_prof(&vif->profile);
+
+	list_add_tail(&vif->list, &cfg->vif_list);
+	cfg->vif_cnt++;
+	return vif;
+}
+
+static void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
+{
+	struct brcmf_cfg80211_info *cfg;
+	struct wiphy *wiphy;
+
+	wiphy = vif->wdev.wiphy;
+	cfg = wiphy_priv(wiphy);
+	list_del(&vif->list);
+	cfg->vif_cnt--;
+
+	kfree(vif);
+	if (!cfg->vif_cnt) {
+		wiphy_unregister(wiphy);
+		wiphy_free(wiphy);
+	}
 }
 
 static bool brcmf_is_linkup(struct brcmf_cfg80211_info *cfg,
 			    const struct brcmf_event_msg *e)
 {
-	u32 event = be32_to_cpu(e->event_type);
-	u32 status = be32_to_cpu(e->status);
+	u32 event = e->event_code;
+	u32 status = e->status;
 
 	if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
 		WL_CONN("Processing set ssid\n");
@@ -4496,8 +3867,8 @@
 static bool brcmf_is_linkdown(struct brcmf_cfg80211_info *cfg,
 			      const struct brcmf_event_msg *e)
 {
-	u32 event = be32_to_cpu(e->event_type);
-	u16 flags = be16_to_cpu(e->flags);
+	u32 event = e->event_code;
+	u16 flags = e->flags;
 
 	if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) {
 		WL_CONN("Processing link down\n");
@@ -4509,13 +3880,12 @@
 static bool brcmf_is_nonetwork(struct brcmf_cfg80211_info *cfg,
 			       const struct brcmf_event_msg *e)
 {
-	u32 event = be32_to_cpu(e->event_type);
-	u32 status = be32_to_cpu(e->status);
+	u32 event = e->event_code;
+	u32 status = e->status;
 
 	if (event == BRCMF_E_LINK && status == BRCMF_E_STATUS_NO_NETWORKS) {
 		WL_CONN("Processing Link %s & no network found\n",
-				be16_to_cpu(e->flags) & BRCMF_EVENT_MSG_LINK ?
-				"up" : "down");
+			e->flags & BRCMF_EVENT_MSG_LINK ? "up" : "down");
 		return true;
 	}
 
@@ -4541,7 +3911,7 @@
 
 static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
 {
-	struct net_device *ndev = cfg_to_ndev(cfg);
+	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
 	struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
 	struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
 	u32 req_len;
@@ -4550,8 +3920,8 @@
 
 	brcmf_clear_assoc_ies(cfg);
 
-	err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg->extra_buf,
-				WL_ASSOC_INFO_MAX);
+	err = brcmf_fil_iovar_data_get(ifp, "assoc_info",
+				       cfg->extra_buf, WL_ASSOC_INFO_MAX);
 	if (err) {
 		WL_ERR("could not get assoc info (%d)\n", err);
 		return err;
@@ -4561,9 +3931,9 @@
 	req_len = le32_to_cpu(assoc_info->req_len);
 	resp_len = le32_to_cpu(assoc_info->resp_len);
 	if (req_len) {
-		err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies",
-					   cfg->extra_buf,
-					   WL_ASSOC_INFO_MAX);
+		err = brcmf_fil_iovar_data_get(ifp, "assoc_req_ies",
+					       cfg->extra_buf,
+					       WL_ASSOC_INFO_MAX);
 		if (err) {
 			WL_ERR("could not get assoc req (%d)\n", err);
 			return err;
@@ -4577,9 +3947,9 @@
 		conn_info->req_ie = NULL;
 	}
 	if (resp_len) {
-		err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies",
-					   cfg->extra_buf,
-					   WL_ASSOC_INFO_MAX);
+		err = brcmf_fil_iovar_data_get(ifp, "assoc_resp_ies",
+					       cfg->extra_buf,
+					       WL_ASSOC_INFO_MAX);
 		if (err) {
 			WL_ERR("could not get assoc resp (%d)\n", err);
 			return err;
@@ -4603,7 +3973,8 @@
 		       struct net_device *ndev,
 		       const struct brcmf_event_msg *e)
 {
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
 	struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
 	struct wiphy *wiphy = cfg_to_wiphy(cfg);
 	struct ieee80211_channel *notify_channel = NULL;
@@ -4628,7 +3999,8 @@
 
 	/* data sent to dongle has to be little endian */
 	*(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
+	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
+				     buf, WL_BSS_INFO_MAX);
 
 	if (err)
 		goto done;
@@ -4652,7 +4024,7 @@
 			conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
 	WL_CONN("Report roaming result\n");
 
-	set_bit(WL_STATUS_CONNECTED, &cfg->status);
+	set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
 	WL_TRACE("Exit\n");
 	return err;
 }
@@ -4662,13 +4034,15 @@
 		       struct net_device *ndev, const struct brcmf_event_msg *e,
 		       bool completed)
 {
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
 	struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
 	s32 err = 0;
 
 	WL_TRACE("Enter\n");
 
-	if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg->status)) {
+	if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+			       &ifp->vif->sme_state)) {
 		if (completed) {
 			brcmf_get_assoc_ies(cfg);
 			memcpy(profile->bssid, e->addr, ETH_ALEN);
@@ -4684,7 +4058,8 @@
 						    WLAN_STATUS_AUTH_TIMEOUT,
 					GFP_KERNEL);
 		if (completed)
-			set_bit(WL_STATUS_CONNECTED, &cfg->status);
+			set_bit(BRCMF_VIF_STATUS_CONNECTED,
+				&ifp->vif->sme_state);
 		WL_CONN("Report connect result - connection %s\n",
 				completed ? "succeeded" : "failed");
 	}
@@ -4698,9 +4073,9 @@
 			       const struct brcmf_event_msg *e, void *data)
 {
 	s32 err = 0;
-	u32 event = be32_to_cpu(e->event_type);
-	u32 reason = be32_to_cpu(e->reason);
-	u32 len = be32_to_cpu(e->datalen);
+	u32 event = e->event_code;
+	u32 reason = e->reason;
+	u32 len = e->datalen;
 	static int generation;
 
 	struct station_info sinfo;
@@ -4732,11 +4107,12 @@
 }
 
 static s32
-brcmf_notify_connect_status(struct brcmf_cfg80211_info *cfg,
-			    struct net_device *ndev,
+brcmf_notify_connect_status(struct brcmf_if *ifp,
 			    const struct brcmf_event_msg *e, void *data)
 {
-	struct brcmf_cfg80211_profile *profile = cfg->profile;
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	struct net_device *ndev = ifp->ndev;
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
 	s32 err = 0;
 
 	if (cfg->conf->mode == WL_MODE_AP) {
@@ -4747,30 +4123,34 @@
 			memcpy(profile->bssid, e->addr, ETH_ALEN);
 			wl_inform_ibss(cfg, ndev, e->addr);
 			cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
-			clear_bit(WL_STATUS_CONNECTING, &cfg->status);
-			set_bit(WL_STATUS_CONNECTED, &cfg->status);
+			clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+				  &ifp->vif->sme_state);
+			set_bit(BRCMF_VIF_STATUS_CONNECTED,
+				&ifp->vif->sme_state);
 		} else
 			brcmf_bss_connect_done(cfg, ndev, e, true);
 	} else if (brcmf_is_linkdown(cfg, e)) {
 		WL_CONN("Linkdown\n");
 		if (brcmf_is_ibssmode(cfg)) {
-			clear_bit(WL_STATUS_CONNECTING, &cfg->status);
-			if (test_and_clear_bit(WL_STATUS_CONNECTED,
-				&cfg->status))
+			clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+				  &ifp->vif->sme_state);
+			if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
+					       &ifp->vif->sme_state))
 				brcmf_link_down(cfg);
 		} else {
 			brcmf_bss_connect_done(cfg, ndev, e, false);
-			if (test_and_clear_bit(WL_STATUS_CONNECTED,
-				&cfg->status)) {
+			if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
+					       &ifp->vif->sme_state)) {
 				cfg80211_disconnected(ndev, 0, NULL, 0,
-					GFP_KERNEL);
+						      GFP_KERNEL);
 				brcmf_link_down(cfg);
 			}
 		}
-		brcmf_init_prof(cfg->profile);
+		brcmf_init_prof(ndev_to_prof(ndev));
 	} else if (brcmf_is_nonetwork(cfg, e)) {
 		if (brcmf_is_ibssmode(cfg))
-			clear_bit(WL_STATUS_CONNECTING, &cfg->status);
+			clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+				  &ifp->vif->sme_state);
 		else
 			brcmf_bss_connect_done(cfg, ndev, e, false);
 	}
@@ -4779,30 +4159,29 @@
 }
 
 static s32
-brcmf_notify_roaming_status(struct brcmf_cfg80211_info *cfg,
-			    struct net_device *ndev,
+brcmf_notify_roaming_status(struct brcmf_if *ifp,
 			    const struct brcmf_event_msg *e, void *data)
 {
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
 	s32 err = 0;
-	u32 event = be32_to_cpu(e->event_type);
-	u32 status = be32_to_cpu(e->status);
+	u32 event = e->event_code;
+	u32 status = e->status;
 
 	if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
-		if (test_bit(WL_STATUS_CONNECTED, &cfg->status))
-			brcmf_bss_roaming_done(cfg, ndev, e);
+		if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
+			brcmf_bss_roaming_done(cfg, ifp->ndev, e);
 		else
-			brcmf_bss_connect_done(cfg, ndev, e, true);
+			brcmf_bss_connect_done(cfg, ifp->ndev, e, true);
 	}
 
 	return err;
 }
 
 static s32
-brcmf_notify_mic_status(struct brcmf_cfg80211_info *cfg,
-			struct net_device *ndev,
+brcmf_notify_mic_status(struct brcmf_if *ifp,
 			const struct brcmf_event_msg *e, void *data)
 {
-	u16 flags = be16_to_cpu(e->flags);
+	u16 flags = e->flags;
 	enum nl80211_key_type key_type;
 
 	if (flags & BRCMF_EVENT_MSG_GROUP)
@@ -4810,82 +4189,12 @@
 	else
 		key_type = NL80211_KEYTYPE_PAIRWISE;
 
-	cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
+	cfg80211_michael_mic_failure(ifp->ndev, (u8 *)&e->addr, key_type, -1,
 				     NULL, GFP_KERNEL);
 
 	return 0;
 }
 
-static s32
-brcmf_notify_scan_status(struct brcmf_cfg80211_info *cfg,
-			 struct net_device *ndev,
-			 const struct brcmf_event_msg *e, void *data)
-{
-	struct brcmf_channel_info_le channel_inform_le;
-	struct brcmf_scan_results_le *bss_list_le;
-	u32 len = WL_SCAN_BUF_MAX;
-	s32 err = 0;
-	bool scan_abort = false;
-	u32 scan_channel;
-
-	WL_TRACE("Enter\n");
-
-	if (cfg->iscan_on && cfg->iscan_kickstart) {
-		WL_TRACE("Exit\n");
-		return brcmf_wakeup_iscan(cfg_to_iscan(cfg));
-	}
-
-	if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
-		WL_ERR("Scan complete while device not scanning\n");
-		scan_abort = true;
-		err = -EINVAL;
-		goto scan_done_out;
-	}
-
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_inform_le,
-			      sizeof(channel_inform_le));
-	if (err) {
-		WL_ERR("scan busy (%d)\n", err);
-		scan_abort = true;
-		goto scan_done_out;
-	}
-	scan_channel = le32_to_cpu(channel_inform_le.scan_channel);
-	if (scan_channel)
-		WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel);
-	cfg->bss_list = cfg->scan_results;
-	bss_list_le = (struct brcmf_scan_results_le *) cfg->bss_list;
-
-	memset(cfg->scan_results, 0, len);
-	bss_list_le->buflen = cpu_to_le32(len);
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS,
-			      cfg->scan_results, len);
-	if (err) {
-		WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
-		err = -EINVAL;
-		scan_abort = true;
-		goto scan_done_out;
-	}
-	cfg->scan_results->buflen = le32_to_cpu(bss_list_le->buflen);
-	cfg->scan_results->version = le32_to_cpu(bss_list_le->version);
-	cfg->scan_results->count = le32_to_cpu(bss_list_le->count);
-
-	err = brcmf_inform_bss(cfg);
-	if (err)
-		scan_abort = true;
-
-scan_done_out:
-	if (cfg->scan_request) {
-		WL_SCAN("calling cfg80211_scan_done\n");
-		cfg80211_scan_done(cfg->scan_request, scan_abort);
-		brcmf_set_mpc(ndev, 1);
-		cfg->scan_request = NULL;
-	}
-
-	WL_TRACE("Exit\n");
-
-	return err;
-}
-
 static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
 {
 	conf->mode = (u32)-1;
@@ -4896,82 +4205,53 @@
 	conf->tx_power = -1;
 }
 
-static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el)
+static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg)
 {
-	memset(el, 0, sizeof(*el));
-	el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status;
-	el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status;
-	el->handler[BRCMF_E_DEAUTH_IND] = brcmf_notify_connect_status;
-	el->handler[BRCMF_E_DEAUTH] = brcmf_notify_connect_status;
-	el->handler[BRCMF_E_DISASSOC_IND] = brcmf_notify_connect_status;
-	el->handler[BRCMF_E_ASSOC_IND] = brcmf_notify_connect_status;
-	el->handler[BRCMF_E_REASSOC_IND] = brcmf_notify_connect_status;
-	el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status;
-	el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status;
-	el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status;
-	el->handler[BRCMF_E_PFN_NET_FOUND] = brcmf_notify_sched_scan_results;
+	brcmf_fweh_register(cfg->pub, BRCMF_E_LINK,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_DEAUTH_IND,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_DEAUTH,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_DISASSOC_IND,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_ASSOC_IND,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_REASSOC_IND,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_ROAM,
+			    brcmf_notify_roaming_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_MIC_ERROR,
+			    brcmf_notify_mic_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_SET_SSID,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND,
+			    brcmf_notify_sched_scan_results);
 }
 
 static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
 {
-	kfree(cfg->scan_results);
-	cfg->scan_results = NULL;
-	kfree(cfg->bss_info);
-	cfg->bss_info = NULL;
 	kfree(cfg->conf);
 	cfg->conf = NULL;
-	kfree(cfg->profile);
-	cfg->profile = NULL;
-	kfree(cfg->scan_req_int);
-	cfg->scan_req_int = NULL;
 	kfree(cfg->escan_ioctl_buf);
 	cfg->escan_ioctl_buf = NULL;
-	kfree(cfg->dcmd_buf);
-	cfg->dcmd_buf = NULL;
 	kfree(cfg->extra_buf);
 	cfg->extra_buf = NULL;
-	kfree(cfg->iscan);
-	cfg->iscan = NULL;
 	kfree(cfg->pmk_list);
 	cfg->pmk_list = NULL;
-	if (cfg->ap_info) {
-		kfree(cfg->ap_info->wpa_ie);
-		kfree(cfg->ap_info->rsn_ie);
-		kfree(cfg->ap_info);
-		cfg->ap_info = NULL;
-	}
 }
 
 static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
 {
-	cfg->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
-	if (!cfg->scan_results)
-		goto init_priv_mem_out;
 	cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
 	if (!cfg->conf)
 		goto init_priv_mem_out;
-	cfg->profile = kzalloc(sizeof(*cfg->profile), GFP_KERNEL);
-	if (!cfg->profile)
-		goto init_priv_mem_out;
-	cfg->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
-	if (!cfg->bss_info)
-		goto init_priv_mem_out;
-	cfg->scan_req_int = kzalloc(sizeof(*cfg->scan_req_int),
-					 GFP_KERNEL);
-	if (!cfg->scan_req_int)
-		goto init_priv_mem_out;
 	cfg->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
 	if (!cfg->escan_ioctl_buf)
 		goto init_priv_mem_out;
-	cfg->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL);
-	if (!cfg->dcmd_buf)
-		goto init_priv_mem_out;
 	cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
 	if (!cfg->extra_buf)
 		goto init_priv_mem_out;
-	cfg->iscan = kzalloc(sizeof(*cfg->iscan), GFP_KERNEL);
-	if (!cfg->iscan)
-		goto init_priv_mem_out;
 	cfg->pmk_list = kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
 	if (!cfg->pmk_list)
 		goto init_priv_mem_out;
@@ -4984,152 +4264,24 @@
 	return -ENOMEM;
 }
 
-/*
-* retrieve first queued event from head
-*/
-
-static struct brcmf_cfg80211_event_q *brcmf_deq_event(
-	struct brcmf_cfg80211_info *cfg)
-{
-	struct brcmf_cfg80211_event_q *e = NULL;
-
-	spin_lock_irq(&cfg->evt_q_lock);
-	if (!list_empty(&cfg->evt_q_list)) {
-		e = list_first_entry(&cfg->evt_q_list,
-				     struct brcmf_cfg80211_event_q, evt_q_list);
-		list_del(&e->evt_q_list);
-	}
-	spin_unlock_irq(&cfg->evt_q_lock);
-
-	return e;
-}
-
-/*
-*	push event to tail of the queue
-*
-*	remark: this function may not sleep as it is called in atomic context.
-*/
-
-static s32
-brcmf_enq_event(struct brcmf_cfg80211_info *cfg, u32 event,
-		const struct brcmf_event_msg *msg, void *data)
-{
-	struct brcmf_cfg80211_event_q *e;
-	s32 err = 0;
-	ulong flags;
-	u32 data_len;
-	u32 total_len;
-
-	total_len = sizeof(struct brcmf_cfg80211_event_q);
-	if (data)
-		data_len = be32_to_cpu(msg->datalen);
-	else
-		data_len = 0;
-	total_len += data_len;
-	e = kzalloc(total_len, GFP_ATOMIC);
-	if (!e)
-		return -ENOMEM;
-
-	e->etype = event;
-	memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg));
-	if (data)
-		memcpy(&e->edata, data, data_len);
-
-	spin_lock_irqsave(&cfg->evt_q_lock, flags);
-	list_add_tail(&e->evt_q_list, &cfg->evt_q_list);
-	spin_unlock_irqrestore(&cfg->evt_q_lock, flags);
-
-	return err;
-}
-
-static void brcmf_put_event(struct brcmf_cfg80211_event_q *e)
-{
-	kfree(e);
-}
-
-static void brcmf_cfg80211_event_handler(struct work_struct *work)
-{
-	struct brcmf_cfg80211_info *cfg =
-			container_of(work, struct brcmf_cfg80211_info,
-				     event_work);
-	struct brcmf_cfg80211_event_q *e;
-
-	e = brcmf_deq_event(cfg);
-	if (unlikely(!e)) {
-		WL_ERR("event queue empty...\n");
-		return;
-	}
-
-	do {
-		WL_INFO("event type (%d)\n", e->etype);
-		if (cfg->el.handler[e->etype])
-			cfg->el.handler[e->etype](cfg,
-						       cfg_to_ndev(cfg),
-						       &e->emsg, e->edata);
-		else
-			WL_INFO("Unknown Event (%d): ignoring\n", e->etype);
-		brcmf_put_event(e);
-	} while ((e = brcmf_deq_event(cfg)));
-
-}
-
-static void brcmf_init_eq(struct brcmf_cfg80211_info *cfg)
-{
-	spin_lock_init(&cfg->evt_q_lock);
-	INIT_LIST_HEAD(&cfg->evt_q_list);
-}
-
-static void brcmf_flush_eq(struct brcmf_cfg80211_info *cfg)
-{
-	struct brcmf_cfg80211_event_q *e;
-
-	spin_lock_irq(&cfg->evt_q_lock);
-	while (!list_empty(&cfg->evt_q_list)) {
-		e = list_first_entry(&cfg->evt_q_list,
-				     struct brcmf_cfg80211_event_q, evt_q_list);
-		list_del(&e->evt_q_list);
-		kfree(e);
-	}
-	spin_unlock_irq(&cfg->evt_q_lock);
-}
-
 static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
 {
 	s32 err = 0;
 
 	cfg->scan_request = NULL;
 	cfg->pwr_save = true;
-#ifdef CONFIG_BRCMISCAN
-	cfg->iscan_on = true;	/* iscan on & off switch.
-				 we enable iscan per default */
-	cfg->escan_on = false;	/* escan on & off switch.
-				 we disable escan per default */
-#else
-	cfg->iscan_on = false;	/* iscan on & off switch.
-				 we disable iscan per default */
-	cfg->escan_on = true;	/* escan on & off switch.
-				 we enable escan per default */
-#endif
 	cfg->roam_on = true;	/* roam on & off switch.
 				 we enable roam per default */
-
-	cfg->iscan_kickstart = false;
 	cfg->active_scan = true;	/* we do active scan for
 				 specific scan per default */
 	cfg->dongle_up = false;	/* dongle is not up yet */
-	brcmf_init_eq(cfg);
 	err = brcmf_init_priv_mem(cfg);
 	if (err)
 		return err;
-	INIT_WORK(&cfg->event_work, brcmf_cfg80211_event_handler);
-	brcmf_init_eloop_handler(&cfg->el);
+	brcmf_register_event_handlers(cfg);
 	mutex_init(&cfg->usr_sync);
-	err = brcmf_init_iscan(cfg);
-	if (err)
-		return err;
 	brcmf_init_escan(cfg);
 	brcmf_init_conf(cfg->conf);
-	brcmf_init_prof(cfg->profile);
 	brcmf_link_down(cfg);
 
 	return err;
@@ -5137,20 +4289,20 @@
 
 static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
 {
-	cancel_work_sync(&cfg->event_work);
 	cfg->dongle_up = false;	/* dongle down */
-	brcmf_flush_eq(cfg);
 	brcmf_link_down(cfg);
 	brcmf_abort_scanning(cfg);
 	brcmf_deinit_priv_mem(cfg);
 }
 
-struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
-						  struct device *busdev,
-						  struct brcmf_pub *drvr)
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr)
 {
-	struct wireless_dev *wdev;
+	struct net_device *ndev = drvr->iflist[0]->ndev;
+	struct device *busdev = drvr->dev;
 	struct brcmf_cfg80211_info *cfg;
+	struct wiphy *wiphy;
+	struct brcmf_cfg80211_vif *vif;
+	struct brcmf_if *ifp;
 	s32 err = 0;
 
 	if (!ndev) {
@@ -5158,122 +4310,61 @@
 		return NULL;
 	}
 
-	wdev = brcmf_alloc_wdev(busdev);
-	if (IS_ERR(wdev)) {
+	ifp = netdev_priv(ndev);
+	wiphy = brcmf_setup_wiphy(busdev);
+	if (IS_ERR(wiphy))
+		return NULL;
+
+	cfg = wiphy_priv(wiphy);
+	cfg->wiphy = wiphy;
+	cfg->pub = drvr;
+	INIT_LIST_HEAD(&cfg->vif_list);
+
+	vif = brcmf_alloc_vif(cfg, ndev, WL_MODE_BSS, false);
+	if (IS_ERR(vif)) {
+		wiphy_free(wiphy);
 		return NULL;
 	}
 
-	wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS);
-	cfg = wdev_to_cfg(wdev);
-	cfg->wdev = wdev;
-	cfg->pub = drvr;
-	ndev->ieee80211_ptr = wdev;
-	SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
-	wdev->netdev = ndev;
 	err = wl_init_priv(cfg);
 	if (err) {
 		WL_ERR("Failed to init iwm_priv (%d)\n", err);
 		goto cfg80211_attach_out;
 	}
 
+	ifp->vif = vif;
 	return cfg;
 
 cfg80211_attach_out:
-	brcmf_free_wdev(cfg);
+	brcmf_free_vif(vif);
 	return NULL;
 }
 
 void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
 {
+	struct brcmf_cfg80211_vif *vif;
+	struct brcmf_cfg80211_vif *tmp;
+
 	wl_deinit_priv(cfg);
-	brcmf_free_wdev(cfg);
-}
-
-void
-brcmf_cfg80211_event(struct net_device *ndev,
-		  const struct brcmf_event_msg *e, void *data)
-{
-	u32 event_type = be32_to_cpu(e->event_type);
-	struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
-
-	if (!brcmf_enq_event(cfg, event_type, e, data))
-		schedule_work(&cfg->event_work);
-}
-
-static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
-{
-	/* Room for "event_msgs" + '\0' + bitvec */
-	s8 iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
-	s8 eventmask[BRCMF_EVENTING_MASK_LEN];
-	s32 err = 0;
-
-	WL_TRACE("Enter\n");
-
-	/* Setup event_msgs */
-	brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
-			iovbuf, sizeof(iovbuf));
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, iovbuf, sizeof(iovbuf));
-	if (err) {
-		WL_ERR("Get event_msgs error (%d)\n", err);
-		goto dongle_eventmsg_out;
+	list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) {
+		brcmf_free_vif(vif);
 	}
-	memcpy(eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
-
-	setbit(eventmask, BRCMF_E_SET_SSID);
-	setbit(eventmask, BRCMF_E_ROAM);
-	setbit(eventmask, BRCMF_E_PRUNE);
-	setbit(eventmask, BRCMF_E_AUTH);
-	setbit(eventmask, BRCMF_E_REASSOC);
-	setbit(eventmask, BRCMF_E_REASSOC_IND);
-	setbit(eventmask, BRCMF_E_DEAUTH_IND);
-	setbit(eventmask, BRCMF_E_DISASSOC_IND);
-	setbit(eventmask, BRCMF_E_DISASSOC);
-	setbit(eventmask, BRCMF_E_JOIN);
-	setbit(eventmask, BRCMF_E_ASSOC_IND);
-	setbit(eventmask, BRCMF_E_PSK_SUP);
-	setbit(eventmask, BRCMF_E_LINK);
-	setbit(eventmask, BRCMF_E_NDIS_LINK);
-	setbit(eventmask, BRCMF_E_MIC_ERROR);
-	setbit(eventmask, BRCMF_E_PMKID_CACHE);
-	setbit(eventmask, BRCMF_E_TXFAIL);
-	setbit(eventmask, BRCMF_E_JOIN_START);
-	setbit(eventmask, BRCMF_E_SCAN_COMPLETE);
-	setbit(eventmask, BRCMF_E_ESCAN_RESULT);
-	setbit(eventmask, BRCMF_E_PFN_NET_FOUND);
-
-	brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
-			iovbuf, sizeof(iovbuf));
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
-	if (err) {
-		WL_ERR("Set event_msgs error (%d)\n", err);
-		goto dongle_eventmsg_out;
-	}
-
-dongle_eventmsg_out:
-	WL_TRACE("Exit\n");
-	return err;
 }
 
 static s32
 brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
 {
-	s8 iovbuf[32];
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	s32 err = 0;
 	__le32 roamtrigger[2];
 	__le32 roam_delta[2];
-	__le32 bcn_to_le;
-	__le32 roamvar_le;
 
 	/*
 	 * Setup timeout if Beacons are lost and roam is
 	 * off to report link down
 	 */
 	if (roamvar) {
-		bcn_to_le = cpu_to_le32(bcn_timeout);
-		brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_to_le,
-			sizeof(bcn_to_le), iovbuf, sizeof(iovbuf));
-		err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR,
-				   iovbuf, sizeof(iovbuf));
+		err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout);
 		if (err) {
 			WL_ERR("bcn_timeout error (%d)\n", err);
 			goto dongle_rom_out;
@@ -5285,10 +4376,7 @@
 	 * to take care of roaming
 	 */
 	WL_INFO("Internal Roaming = %s\n", roamvar ? "Off" : "On");
-	roamvar_le = cpu_to_le32(roamvar);
-	brcmf_c_mkiovar("roam_off", (char *)&roamvar_le,
-				sizeof(roamvar_le), iovbuf, sizeof(iovbuf));
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
+	err = brcmf_fil_iovar_int_set(ifp, "roam_off", roamvar);
 	if (err) {
 		WL_ERR("roam_off error (%d)\n", err);
 		goto dongle_rom_out;
@@ -5296,8 +4384,8 @@
 
 	roamtrigger[0] = cpu_to_le32(WL_ROAM_TRIGGER_LEVEL);
 	roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL);
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_TRIGGER,
-			(void *)roamtrigger, sizeof(roamtrigger));
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_TRIGGER,
+				     (void *)roamtrigger, sizeof(roamtrigger));
 	if (err) {
 		WL_ERR("WLC_SET_ROAM_TRIGGER error (%d)\n", err);
 		goto dongle_rom_out;
@@ -5305,8 +4393,8 @@
 
 	roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA);
 	roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL);
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_DELTA,
-				(void *)roam_delta, sizeof(roam_delta));
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_DELTA,
+				     (void *)roam_delta, sizeof(roam_delta));
 	if (err) {
 		WL_ERR("WLC_SET_ROAM_DELTA error (%d)\n", err);
 		goto dongle_rom_out;
@@ -5320,13 +4408,11 @@
 brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
 		      s32 scan_unassoc_time, s32 scan_passive_time)
 {
+	struct brcmf_if *ifp = netdev_priv(ndev);
 	s32 err = 0;
-	__le32 scan_assoc_tm_le = cpu_to_le32(scan_assoc_time);
-	__le32 scan_unassoc_tm_le = cpu_to_le32(scan_unassoc_time);
-	__le32 scan_passive_tm_le = cpu_to_le32(scan_passive_time);
 
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_CHANNEL_TIME,
-			   &scan_assoc_tm_le, sizeof(scan_assoc_tm_le));
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+				    scan_assoc_time);
 	if (err) {
 		if (err == -EOPNOTSUPP)
 			WL_INFO("Scan assoc time is not supported\n");
@@ -5334,8 +4420,8 @@
 			WL_ERR("Scan assoc time error (%d)\n", err);
 		goto dongle_scantime_out;
 	}
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_UNASSOC_TIME,
-			   &scan_unassoc_tm_le, sizeof(scan_unassoc_tm_le));
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+				    scan_unassoc_time);
 	if (err) {
 		if (err == -EOPNOTSUPP)
 			WL_INFO("Scan unassoc time is not supported\n");
@@ -5344,8 +4430,8 @@
 		goto dongle_scantime_out;
 	}
 
-	err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_PASSIVE_TIME,
-			   &scan_passive_tm_le, sizeof(scan_passive_tm_le));
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_PASSIVE_TIME,
+				    scan_passive_time);
 	if (err) {
 		if (err == -EOPNOTSUPP)
 			WL_INFO("Scan passive time is not supported\n");
@@ -5360,13 +4446,14 @@
 
 static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg)
 {
+	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
 	struct wiphy *wiphy;
 	s32 phy_list;
 	s8 phy;
 	s32 err = 0;
 
-	err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCM_GET_PHYLIST,
-			      &phy_list, sizeof(phy_list));
+	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_PHYLIST,
+				     &phy_list, sizeof(phy_list));
 	if (err) {
 		WL_ERR("error (%d)\n", err);
 		return err;
@@ -5403,12 +4490,9 @@
 	brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
 			WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME);
 
-	err = brcmf_dongle_eventmsg(ndev);
-	if (err)
-		goto default_conf_out;
-
 	power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
-	err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode);
+	err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PM,
+				    power_mode);
 	if (err)
 		goto default_conf_out;
 	WL_INFO("power save set to %s\n",
@@ -5436,66 +4520,26 @@
 
 }
 
-static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_info *cfg)
+static s32 __brcmf_cfg80211_up(struct brcmf_if *ifp)
 {
-	char buf[10+IFNAMSIZ];
-	struct dentry *fd;
-	s32 err = 0;
+	set_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state);
+	if (ifp->idx)
+		return 0;
 
-	sprintf(buf, "netdev:%s", cfg_to_ndev(cfg)->name);
-	cfg->debugfsdir = debugfs_create_dir(buf,
-					cfg_to_wiphy(cfg)->debugfsdir);
-
-	fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg->debugfsdir,
-		(u16 *)&cfg->profile->beacon_interval);
-	if (!fd) {
-		err = -ENOMEM;
-		goto err_out;
-	}
-
-	fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg->debugfsdir,
-		(u8 *)&cfg->profile->dtim_period);
-	if (!fd) {
-		err = -ENOMEM;
-		goto err_out;
-	}
-
-err_out:
-	return err;
+	return brcmf_config_dongle(ifp->drvr->config);
 }
 
-static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_info *cfg)
+static s32 __brcmf_cfg80211_down(struct brcmf_if *ifp)
 {
-	debugfs_remove_recursive(cfg->debugfsdir);
-	cfg->debugfsdir = NULL;
-}
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
 
-static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
-{
-	s32 err = 0;
-
-	set_bit(WL_STATUS_READY, &cfg->status);
-
-	brcmf_debugfs_add_netdev_params(cfg);
-
-	err = brcmf_config_dongle(cfg);
-	if (err)
-		return err;
-
-	brcmf_invoke_iscan(cfg);
-
-	return err;
-}
-
-static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
-{
 	/*
 	 * While going down, if associated with AP disassociate
 	 * from AP to save power
 	 */
-	if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
-	     test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
-	     test_bit(WL_STATUS_READY, &cfg->status)) {
+	if ((test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state) ||
+	     test_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) &&
+	     check_vif_up(ifp->vif)) {
 		WL_INFO("Disassociating from AP");
 		brcmf_link_down(cfg);
 
@@ -5507,30 +4551,32 @@
 	}
 
 	brcmf_abort_scanning(cfg);
-	clear_bit(WL_STATUS_READY, &cfg->status);
-
-	brcmf_debugfs_remove_netdev(cfg);
+	clear_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state);
 
 	return 0;
 }
 
-s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
+s32 brcmf_cfg80211_up(struct net_device *ndev)
 {
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
 	s32 err = 0;
 
 	mutex_lock(&cfg->usr_sync);
-	err = __brcmf_cfg80211_up(cfg);
+	err = __brcmf_cfg80211_up(ifp);
 	mutex_unlock(&cfg->usr_sync);
 
 	return err;
 }
 
-s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
+s32 brcmf_cfg80211_down(struct net_device *ndev)
 {
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
 	s32 err = 0;
 
 	mutex_lock(&cfg->usr_sync);
-	err = __brcmf_cfg80211_down(cfg);
+	err = __brcmf_cfg80211_down(ifp);
 	mutex_unlock(&cfg->usr_sync);
 
 	return err;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 71ced17..e2ef851 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -84,31 +84,12 @@
 #define	WL_CONN(fmt, args...)
 #endif /* (defined DEBUG) */
 
-#define WL_NUM_SCAN_MAX		1
-#define WL_NUM_PMKIDS_MAX	MAXPMKID	/* will be used
-						 * for 2.6.33 kernel
-						 * or later
-						 */
-#define WL_SCAN_BUF_MAX			(1024 * 8)
+#define WL_NUM_SCAN_MAX			10
+#define WL_NUM_PMKIDS_MAX		MAXPMKID
 #define WL_TLV_INFO_MAX			1024
 #define WL_BSS_INFO_MAX			2048
-#define WL_ASSOC_INFO_MAX	512	/*
-				 * needs to grab assoc info from dongle to
-				 * report it to cfg80211 through "connect"
-				 * event
-				 */
-#define WL_DCMD_LEN_MAX	1024
-#define WL_EXTRA_BUF_MAX	2048
-#define WL_ISCAN_BUF_MAX	2048	/*
-				 * the buf length can be BRCMF_DCMD_MAXLEN
-				 * to reduce iteration
-				 */
-#define WL_ISCAN_TIMER_INTERVAL_MS	3000
-#define WL_SCAN_ERSULTS_LAST	(BRCMF_SCAN_RESULTS_NO_MEM+1)
-#define WL_AP_MAX	256	/* virtually unlimitted as long
-				 * as kernel memory allows
-				 */
-
+#define WL_ASSOC_INFO_MAX		512	/* assoc related fil max buf */
+#define WL_EXTRA_BUF_MAX		2048
 #define WL_ROAM_TRIGGER_LEVEL		-75
 #define WL_ROAM_DELTA			20
 #define WL_BEACON_TIMEOUT		3
@@ -127,15 +108,15 @@
 #define WL_AUTH_SHARED_KEY		1	/* d11 shared authentication */
 #define IE_MAX_LEN			512
 
-/* dongle status */
-enum wl_status {
-	WL_STATUS_READY,
-	WL_STATUS_SCANNING,
-	WL_STATUS_SCAN_ABORTING,
-	WL_STATUS_CONNECTING,
-	WL_STATUS_CONNECTED,
-	WL_STATUS_AP_CREATING,
-	WL_STATUS_AP_CREATED
+/**
+ * enum brcmf_scan_status - dongle scan status
+ *
+ * @BRCMF_SCAN_STATUS_BUSY: scanning in progress on dongle.
+ * @BRCMF_SCAN_STATUS_ABORT: scan being aborted on dongle.
+ */
+enum brcmf_scan_status {
+	BRCMF_SCAN_STATUS_BUSY,
+	BRCMF_SCAN_STATUS_ABORT,
 };
 
 /* wi-fi mode */
@@ -145,25 +126,6 @@
 	WL_MODE_AP
 };
 
-/* dongle profile list */
-enum wl_prof_list {
-	WL_PROF_MODE,
-	WL_PROF_SSID,
-	WL_PROF_SEC,
-	WL_PROF_IBSS,
-	WL_PROF_BAND,
-	WL_PROF_BSSID,
-	WL_PROF_ACT,
-	WL_PROF_BEACONINT,
-	WL_PROF_DTIMPERIOD
-};
-
-/* dongle iscan state */
-enum wl_iscan_state {
-	WL_ISCAN_STATE_IDLE,
-	WL_ISCAN_STATE_SCANING
-};
-
 /* dongle configuration */
 struct brcmf_cfg80211_conf {
 	u32 mode;		/* adhoc , infrastructure or ap */
@@ -175,17 +137,6 @@
 	struct ieee80211_channel channel;
 };
 
-/* forward declaration */
-struct brcmf_cfg80211_info;
-
-/* cfg80211 main event loop */
-struct brcmf_cfg80211_event_loop {
-	s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_info *cfg,
-				     struct net_device *ndev,
-				     const struct brcmf_event_msg *e,
-				     void *data);
-};
-
 /* basic structure of scan request */
 struct brcmf_cfg80211_scan_req {
 	struct brcmf_ssid_le ssid_le;
@@ -197,14 +148,6 @@
 	u8 buf[WL_TLV_INFO_MAX];
 };
 
-/* event queue for cfg80211 main event */
-struct brcmf_cfg80211_event_q {
-	struct list_head evt_q_list;
-	u32 etype;
-	struct brcmf_event_msg emsg;
-	s8 edata[1];
-};
-
 /* security information with currently associated ap */
 struct brcmf_cfg80211_security {
 	u32 wpa_versions;
@@ -214,45 +157,73 @@
 	u32 wpa_auth;
 };
 
-/* ibss information for currently joined ibss network */
-struct brcmf_cfg80211_ibss {
-	u8 beacon_interval;	/* in millisecond */
-	u8 atim;		/* in millisecond */
-	s8 join_only;
-	u8 band;
-	u8 channel;
-};
-
-/* dongle profile */
+/**
+ * struct brcmf_cfg80211_profile - profile information.
+ *
+ * @ssid: ssid of associated/associating ap.
+ * @bssid: bssid of joined/joining ibss.
+ * @sec: security information.
+ */
 struct brcmf_cfg80211_profile {
-	u32 mode;
 	struct brcmf_ssid ssid;
 	u8 bssid[ETH_ALEN];
-	u16 beacon_interval;
-	u8 dtim_period;
 	struct brcmf_cfg80211_security sec;
-	struct brcmf_cfg80211_ibss ibss;
-	s32 band;
 };
 
-/* dongle iscan event loop */
-struct brcmf_cfg80211_iscan_eloop {
-	s32 (*handler[WL_SCAN_ERSULTS_LAST])
-		(struct brcmf_cfg80211_info *cfg);
+/**
+ * enum brcmf_vif_status - bit indices for vif status.
+ *
+ * @BRCMF_VIF_STATUS_READY: ready for operation.
+ * @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
+ * @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
+ * @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation.
+ * @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
+ */
+enum brcmf_vif_status {
+	BRCMF_VIF_STATUS_READY,
+	BRCMF_VIF_STATUS_CONNECTING,
+	BRCMF_VIF_STATUS_CONNECTED,
+	BRCMF_VIF_STATUS_AP_CREATING,
+	BRCMF_VIF_STATUS_AP_CREATED
 };
 
-/* dongle iscan controller */
-struct brcmf_cfg80211_iscan_ctrl {
-	struct net_device *ndev;
-	struct timer_list timer;
-	u32 timer_ms;
-	u32 timer_on;
-	s32 state;
-	struct work_struct work;
-	struct brcmf_cfg80211_iscan_eloop el;
-	void *data;
-	s8 dcmd_buf[BRCMF_DCMD_SMLEN];
-	s8 scan_buf[WL_ISCAN_BUF_MAX];
+/**
+ * struct vif_saved_ie - holds saved IEs for a virtual interface.
+ *
+ * @probe_res_ie: IE info for probe response.
+ * @beacon_ie: IE info for beacon frame.
+ * @probe_res_ie_len: IE info length for probe response.
+ * @beacon_ie_len: IE info length for beacon frame.
+ */
+struct vif_saved_ie {
+	u8  probe_res_ie[IE_MAX_LEN];
+	u8  beacon_ie[IE_MAX_LEN];
+	u32 probe_res_ie_len;
+	u32 beacon_ie_len;
+};
+
+/**
+ * struct brcmf_cfg80211_vif - virtual interface specific information.
+ *
+ * @ifp: lower layer interface pointer
+ * @wdev: wireless device.
+ * @profile: profile information.
+ * @mode: operating mode.
+ * @roam_off: roaming state.
+ * @sme_state: SME state using enum brcmf_vif_status bits.
+ * @pm_block: power-management blocked.
+ * @list: linked list.
+ */
+struct brcmf_cfg80211_vif {
+	struct brcmf_if *ifp;
+	struct wireless_dev wdev;
+	struct brcmf_cfg80211_profile profile;
+	s32 mode;
+	s32 roam_off;
+	unsigned long sme_state;
+	bool pm_block;
+	struct vif_saved_ie saved_ie;
+	struct list_head list;
 };
 
 /* association inform */
@@ -288,17 +259,6 @@
 	struct net_device *ndev;
 };
 
-/* Structure to hold WPS, WPA IEs for a AP */
-struct ap_info {
-	u8 probe_res_ie[IE_MAX_LEN];
-	u8 beacon_ie[IE_MAX_LEN];
-	u32 probe_res_ie_len;
-	u32 beacon_ie_len;
-	u8 *wpa_ie;
-	u8 *rsn_ie;
-	bool security_mode;
-};
-
 /**
  * struct brcmf_pno_param_le - PNO scan configuration parameters
  *
@@ -383,28 +343,19 @@
 /**
  * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
  *
- * @wdev: representing wl cfg80211 device.
+ * @wiphy: wiphy object for cfg80211 interface.
  * @conf: dongle configuration.
  * @scan_request: cfg80211 scan request object.
- * @el: main event loop.
- * @evt_q_list: used for event queue.
- * @evt_q_lock: for event queue synchronization.
  * @usr_sync: mainly for dongle up/down synchronization.
  * @bss_list: bss_list holding scanned ap information.
- * @scan_results: results of the last scan.
  * @scan_req_int: internal scan request object.
  * @bss_info: bss information for cfg80211 layer.
  * @ie: information element object for internal purpose.
- * @profile: holding dongle profile.
- * @iscan: iscan controller information.
  * @conn_info: association info.
  * @pmk_list: wpa2 pmk list.
- * @event_work: event handler work struct.
- * @status: current dongle status.
+ * @scan_status: scan activity on the dongle.
  * @pub: common driver information.
  * @channel: current channel.
- * @iscan_on: iscan on/off switch.
- * @iscan_kickstart: indicate iscan already started.
  * @active_scan: current scan mode.
  * @sched_escan: e-scan for scheduled scan support running.
  * @ibss_starter: indicates this sta is ibss starter.
@@ -416,37 +367,27 @@
  * @dcmd_buf: dcmd buffer.
  * @extra_buf: mainly to grab assoc information.
  * @debugfsdir: debugfs folder for this device.
- * @escan_on: escan on/off switch.
  * @escan_info: escan information.
  * @escan_timeout: Timer for catch scan timeout.
  * @escan_timeout_work: scan timeout worker.
  * @escan_ioctl_buf: dongle command buffer for escan commands.
- * @ap_info: host ap information.
- * @ci: used to link this structure to netdev private data.
+ * @vif_list: linked list of vif instances.
+ * @vif_cnt: number of vif instances.
  */
 struct brcmf_cfg80211_info {
-	struct wireless_dev *wdev;
+	struct wiphy *wiphy;
 	struct brcmf_cfg80211_conf *conf;
 	struct cfg80211_scan_request *scan_request;
-	struct brcmf_cfg80211_event_loop el;
-	struct list_head evt_q_list;
-	spinlock_t	 evt_q_lock;
 	struct mutex usr_sync;
 	struct brcmf_scan_results *bss_list;
-	struct brcmf_scan_results *scan_results;
-	struct brcmf_cfg80211_scan_req *scan_req_int;
+	struct brcmf_cfg80211_scan_req scan_req_int;
 	struct wl_cfg80211_bss_info *bss_info;
 	struct brcmf_cfg80211_ie ie;
-	struct brcmf_cfg80211_profile *profile;
-	struct brcmf_cfg80211_iscan_ctrl *iscan;
 	struct brcmf_cfg80211_connect_info conn_info;
 	struct brcmf_cfg80211_pmk_list *pmk_list;
-	struct work_struct event_work;
-	unsigned long status;
+	unsigned long scan_status;
 	struct brcmf_pub *pub;
 	u32 channel;
-	bool iscan_on;
-	bool iscan_kickstart;
 	bool active_scan;
 	bool sched_escan;
 	bool ibss_starter;
@@ -458,17 +399,17 @@
 	u8 *dcmd_buf;
 	u8 *extra_buf;
 	struct dentry *debugfsdir;
-	bool escan_on;
 	struct escan_info escan_info;
 	struct timer_list escan_timeout;
 	struct work_struct escan_timeout_work;
 	u8 *escan_ioctl_buf;
-	struct ap_info *ap_info;
+	struct list_head vif_list;
+	u8 vif_cnt;
 };
 
-static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *w)
+static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg)
 {
-	return w->wdev->wiphy;
+	return cfg->wiphy;
 }
 
 static inline struct brcmf_cfg80211_info *wiphy_to_cfg(struct wiphy *w)
@@ -481,9 +422,12 @@
 	return (struct brcmf_cfg80211_info *)(wdev_priv(wd));
 }
 
-static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
+static inline
+struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
 {
-	return cfg->wdev->netdev;
+	struct brcmf_cfg80211_vif *vif;
+	vif = list_first_entry(&cfg->vif_list, struct brcmf_cfg80211_vif, list);
+	return vif->wdev.netdev;
 }
 
 static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
@@ -491,8 +435,17 @@
 	return wdev_to_cfg(ndev->ieee80211_ptr);
 }
 
-#define iscan_to_cfg(i) ((struct brcmf_cfg80211_info *)(i->data))
-#define cfg_to_iscan(w) (w->iscan)
+static inline struct brcmf_cfg80211_profile *ndev_to_prof(struct net_device *nd)
+{
+	struct brcmf_if *ifp = netdev_priv(nd);
+	return &ifp->vif->profile;
+}
+
+static inline struct brcmf_cfg80211_vif *ndev_to_vif(struct net_device *ndev)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	return ifp->vif;
+}
 
 static inline struct
 brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
@@ -500,15 +453,9 @@
 	return &cfg->conn_info;
 }
 
-struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
-						  struct device *busdev,
-						  struct brcmf_pub *drvr);
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr);
 void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
-
-/* event handler from dongle */
-void brcmf_cfg80211_event(struct net_device *ndev,
-			  const struct brcmf_event_msg *e, void *data);
-s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg);
-s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg);
+s32 brcmf_cfg80211_up(struct net_device *ndev);
+s32 brcmf_cfg80211_down(struct net_device *ndev);
 
 #endif				/* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
index e227c4c..d3d4151 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
@@ -40,7 +40,8 @@
 	phy/phytbl_n.o \
 	phy/phy_qmath.o \
 	dma.o \
-	brcms_trace_events.o
+	brcms_trace_events.o \
+	debug.o
 
 MODULEPFX := brcmsmac
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index b89f127..de96290 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -692,7 +692,7 @@
 	sii = container_of(sih, struct si_info, pub);
 
 	if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
-		bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, true);
+		bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], true);
 }
 
 /* Unconfigure and/or apply various WARs when going down */
@@ -703,7 +703,7 @@
 	sii = container_of(sih, struct si_info, pub);
 
 	if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
-		bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, false);
+		bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], false);
 }
 
 /* Enable BT-COEX & Ex-PA for 4313 */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index be5bcfb..1de94f3 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -21,6 +21,8 @@
 #include "antsel.h"
 #include "main.h"
 #include "ampdu.h"
+#include "debug.h"
+#include "brcms_trace_events.h"
 
 /* max number of mpdus in an ampdu */
 #define AMPDU_MAX_MPDU			32
@@ -40,8 +42,6 @@
 #define AMPDU_DEF_RETRY_LIMIT		5
 /* default tx retry limit at reg rate */
 #define AMPDU_DEF_RR_RETRY_LIMIT	2
-/* default weight of ampdu in txfifo */
-#define AMPDU_DEF_TXPKT_WEIGHT		2
 /* default ffpld reserved bytes */
 #define AMPDU_DEF_FFPLD_RSVD		2048
 /* # of inis to be freed on detach */
@@ -114,7 +114,6 @@
  * mpdu_density: min mpdu spacing (0-7) ==> 2^(x-1)/8 usec
  * max_pdu: max pdus allowed in ampdu
  * dur: max duration of an ampdu (in msec)
- * txpkt_weight: weight of ampdu in txfifo; reduces rate lag
  * rx_factor: maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes
  * ffpld_rsvd: number of bytes to reserve for preload
  * max_txlen: max size of ampdu per mcs, bw and sgi
@@ -136,7 +135,6 @@
 	u8 mpdu_density;
 	s8 max_pdu;
 	u8 dur;
-	u8 txpkt_weight;
 	u8 rx_factor;
 	u32 ffpld_rsvd;
 	u32 max_txlen[MCS_TABLE_SIZE][2][2];
@@ -183,18 +181,19 @@
 static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on)
 {
 	struct brcms_c_info *wlc = ampdu->wlc;
+	struct bcma_device *core = wlc->hw->d11core;
 
 	wlc->pub->_ampdu = false;
 
 	if (on) {
 		if (!(wlc->pub->_n_enab & SUPPORT_11N)) {
-			wiphy_err(ampdu->wlc->wiphy, "wl%d: driver not "
-				"nmode enabled\n", wlc->pub->unit);
+			brcms_err(core, "wl%d: driver not nmode enabled\n",
+				  wlc->pub->unit);
 			return -ENOTSUPP;
 		}
 		if (!brcms_c_ampdu_cap(ampdu)) {
-			wiphy_err(ampdu->wlc->wiphy, "wl%d: device not "
-				"ampdu capable\n", wlc->pub->unit);
+			brcms_err(core, "wl%d: device not ampdu capable\n",
+				  wlc->pub->unit);
 			return -ENOTSUPP;
 		}
 		wlc->pub->_ampdu = on;
@@ -247,7 +246,6 @@
 	ampdu->mpdu_density = AMPDU_DEF_MPDU_DENSITY;
 	ampdu->max_pdu = AUTO;
 	ampdu->dur = AMPDU_MAX_DUR;
-	ampdu->txpkt_weight = AMPDU_DEF_TXPKT_WEIGHT;
 
 	ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
 	/*
@@ -374,7 +372,8 @@
 				      offsetof(struct macstat, txfunfl[fid]));
 	new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
 	if (new_txunfl == 0) {
-		BCMMSG(wlc->wiphy, "TX status FRAG set but no tx underflows\n");
+		brcms_dbg_ht(wlc->hw->d11core,
+			     "TX status FRAG set but no tx underflows\n");
 		return -1;
 	}
 	fifo->prev_txfunfl = cur_txunfl;
@@ -396,8 +395,8 @@
 	if (fifo->accum_txfunfl < 10)
 		return 0;
 
-	BCMMSG(wlc->wiphy, "ampdu_count %d  tx_underflows %d\n",
-		current_ampdu_cnt, fifo->accum_txfunfl);
+	brcms_dbg_ht(wlc->hw->d11core, "ampdu_count %d  tx_underflows %d\n",
+		     current_ampdu_cnt, fifo->accum_txfunfl);
 
 	/*
 	   compute the current ratio of tx unfl per ampdu.
@@ -450,9 +449,10 @@
 		      (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
 		     / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
 
-		BCMMSG(wlc->wiphy, "DMA estimated transfer rate %d; "
-			"pre-load size %d\n",
-			fifo->dmaxferrate, fifo->ampdu_pld_size);
+		brcms_dbg_ht(wlc->hw->d11core,
+			     "DMA estimated transfer rate %d; "
+			     "pre-load size %d\n",
+			     fifo->dmaxferrate, fifo->ampdu_pld_size);
 	} else {
 
 		/* decrease ampdu size */
@@ -486,7 +486,7 @@
 	scb_ampdu = &scb->scb_ampdu;
 
 	if (!ampdu->ini_enable[tid]) {
-		wiphy_err(ampdu->wlc->wiphy, "%s: Rejecting tid %d\n",
+		brcms_err(wlc->hw->d11core, "%s: Rejecting tid %d\n",
 			  __func__, tid);
 		return;
 	}
@@ -498,378 +498,324 @@
 	scb_ampdu->max_rx_ampdu_bytes = max_rx_ampdu_bytes;
 }
 
-int
-brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
-	      struct sk_buff **pdu, int prec)
+void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
+				 struct brcms_c_info *wlc)
 {
-	struct brcms_c_info *wlc;
-	struct sk_buff *p, *pkt[AMPDU_MAX_MPDU];
-	u8 tid, ndelim;
-	int err = 0;
+	session->wlc = wlc;
+	skb_queue_head_init(&session->skb_list);
+	session->max_ampdu_len = 0;    /* determined from first MPDU */
+	session->max_ampdu_frames = 0; /* determined from first MPDU */
+	session->ampdu_len = 0;
+	session->dma_len = 0;
+}
+
+/*
+ * Preps the given packet for AMPDU based on the session data. If the
+ * frame cannot be accomodated in the current session, -ENOSPC is
+ * returned.
+ */
+int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
+			    struct sk_buff *p)
+{
+	struct brcms_c_info *wlc = session->wlc;
+	struct ampdu_info *ampdu = wlc->ampdu;
+	struct scb *scb = &wlc->pri_scb;
+	struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
+	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
+	struct ieee80211_tx_rate *txrate = tx_info->status.rates;
+	struct d11txh *txh = (struct d11txh *)p->data;
+	unsigned ampdu_frames;
+	u8 ndelim, tid;
+	u8 *plcp;
+	uint len;
+	u16 mcl;
+	bool fbr_iscck;
+	bool rr;
+
+	ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
+	plcp = (u8 *)(txh + 1);
+	fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
+	len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
+			  BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
+	len = roundup(len, 4) + (ndelim + 1) * AMPDU_DELIMITER_LEN;
+
+	ampdu_frames = skb_queue_len(&session->skb_list);
+	if (ampdu_frames != 0) {
+		struct sk_buff *first;
+
+		if (ampdu_frames + 1 > session->max_ampdu_frames ||
+		    session->ampdu_len + len > session->max_ampdu_len)
+			return -ENOSPC;
+
+		/*
+		 * We aren't really out of space if the new frame is of
+		 * a different priority, but we want the same behaviour
+		 * so return -ENOSPC anyway.
+		 *
+		 * XXX: The old AMPDU code did this, but is it really
+		 * necessary?
+		 */
+		first = skb_peek(&session->skb_list);
+		if (p->priority != first->priority)
+			return -ENOSPC;
+	}
+
+	/*
+	 * Now that we're sure this frame can be accomodated, update the
+	 * session information.
+	 */
+	session->ampdu_len += len;
+	session->dma_len += p->len;
+
+	tid = (u8)p->priority;
+
+	/* Handle retry limits */
+	if (txrate[0].count <= ampdu->rr_retry_limit_tid[tid]) {
+		txrate[0].count++;
+		rr = true;
+	} else {
+		txrate[1].count++;
+		rr = false;
+	}
+
+	if (ampdu_frames == 0) {
+		u8 plcp0, plcp3, is40, sgi, mcs;
+		uint fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
+		struct brcms_fifo_info *f = &ampdu->fifo_tb[fifo];
+
+		if (rr) {
+			plcp0 = plcp[0];
+			plcp3 = plcp[3];
+		} else {
+			plcp0 = txh->FragPLCPFallback[0];
+			plcp3 = txh->FragPLCPFallback[3];
+
+		}
+
+		/* Limit AMPDU size based on MCS */
+		is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
+		sgi = plcp3_issgi(plcp3) ? 1 : 0;
+		mcs = plcp0 & ~MIMO_PLCP_40MHZ;
+		session->max_ampdu_len = min(scb_ampdu->max_rx_ampdu_bytes,
+					     ampdu->max_txlen[mcs][is40][sgi]);
+
+		session->max_ampdu_frames = scb_ampdu->max_pdu;
+		if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
+			session->max_ampdu_frames =
+				min_t(u16, f->mcs2ampdu_table[mcs],
+				      session->max_ampdu_frames);
+		}
+	}
+
+	/*
+	 * Treat all frames as "middle" frames of AMPDU here. First and
+	 * last frames must be fixed up after all MPDUs have been prepped.
+	 */
+	mcl = le16_to_cpu(txh->MacTxControlLow);
+	mcl &= ~TXC_AMPDU_MASK;
+	mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
+	mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
+	txh->MacTxControlLow = cpu_to_le16(mcl);
+	txh->PreloadSize = 0;	/* always default to 0 */
+
+	skb_queue_tail(&session->skb_list, p);
+
+	return 0;
+}
+
+void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session)
+{
+	struct brcms_c_info *wlc = session->wlc;
+	struct ampdu_info *ampdu = wlc->ampdu;
+	struct sk_buff *first, *last;
+	struct d11txh *txh;
+	struct ieee80211_tx_info *tx_info;
+	struct ieee80211_tx_rate *txrate;
+	u8 ndelim;
+	u8 *plcp;
+	uint len;
+	uint fifo;
+	struct brcms_fifo_info *f;
+	u16 mcl;
+	bool fbr;
+	bool fbr_iscck;
+	struct ieee80211_rts *rts;
+	bool use_rts = false, use_cts = false;
+	u16 dma_len = session->dma_len;
+	u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
+	u32 rspec = 0, rspec_fallback = 0;
+	u32 rts_rspec = 0, rts_rspec_fallback = 0;
+	u8 plcp0, plcp3, is40, sgi, mcs;
+	u16 mch;
 	u8 preamble_type = BRCMS_GF_PREAMBLE;
 	u8 fbr_preamble_type = BRCMS_GF_PREAMBLE;
 	u8 rts_preamble_type = BRCMS_LONG_PREAMBLE;
 	u8 rts_fbr_preamble_type = BRCMS_LONG_PREAMBLE;
 
-	bool rr = true, fbr = false;
-	uint i, count = 0, fifo, seg_cnt = 0;
-	u16 plen, len, seq = 0, mcl, mch, index, frameid, dma_len = 0;
-	u32 ampdu_len, max_ampdu_bytes = 0;
-	struct d11txh *txh = NULL;
-	u8 *plcp;
-	struct ieee80211_hdr *h;
-	struct scb *scb;
-	struct scb_ampdu *scb_ampdu;
-	struct scb_ampdu_tid_ini *ini;
-	u8 mcs = 0;
-	bool use_rts = false, use_cts = false;
-	u32 rspec = 0, rspec_fallback = 0;
-	u32 rts_rspec = 0, rts_rspec_fallback = 0;
-	u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
-	struct ieee80211_rts *rts;
-	u8 rr_retry_limit;
-	struct brcms_fifo_info *f;
-	bool fbr_iscck;
-	struct ieee80211_tx_info *tx_info;
-	u16 qlen;
-	struct wiphy *wiphy;
+	if (skb_queue_empty(&session->skb_list))
+		return;
 
-	wlc = ampdu->wlc;
-	wiphy = wlc->wiphy;
-	p = *pdu;
+	first = skb_peek(&session->skb_list);
+	last = skb_peek_tail(&session->skb_list);
 
-	tid = (u8) (p->priority);
+	/* Need to fix up last MPDU first to adjust AMPDU length */
+	txh = (struct d11txh *)last->data;
+	fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
+	f = &ampdu->fifo_tb[fifo];
 
-	f = ampdu->fifo_tb + prio2fifo[tid];
+	mcl = le16_to_cpu(txh->MacTxControlLow);
+	mcl &= ~TXC_AMPDU_MASK;
+	mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
+	txh->MacTxControlLow = cpu_to_le16(mcl);
 
-	scb = &wlc->pri_scb;
-	scb_ampdu = &scb->scb_ampdu;
-	ini = &scb_ampdu->ini[tid];
+	/* remove the null delimiter after last mpdu */
+	ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
+	txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
+	session->ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
 
-	/* Let pressure continue to build ... */
-	qlen = pktq_plen(&qi->q, prec);
-	if (ini->tx_in_transit > 0 &&
-	    qlen < min(scb_ampdu->max_pdu, ini->ba_wsize))
-		/* Collect multiple MPDU's to be sent in the next AMPDU */
-		return -EBUSY;
+	/* remove the pad len from last mpdu */
+	fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
+	len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
+			  BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
+	session->ampdu_len -= roundup(len, 4) - len;
 
-	/* at this point we intend to transmit an AMPDU */
-	rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
-	ampdu_len = 0;
-	dma_len = 0;
-	while (p) {
-		struct ieee80211_tx_rate *txrate;
+	/* Now fix up the first MPDU */
+	tx_info = IEEE80211_SKB_CB(first);
+	txrate = tx_info->status.rates;
+	txh = (struct d11txh *)first->data;
+	plcp = (u8 *)(txh + 1);
+	rts = (struct ieee80211_rts *)&txh->rts_frame;
 
-		tx_info = IEEE80211_SKB_CB(p);
-		txrate = tx_info->status.rates;
-
-		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
-			err = brcms_c_prep_pdu(wlc, p, &fifo);
-		} else {
-			wiphy_err(wiphy, "%s: AMPDU flag is off!\n", __func__);
-			*pdu = NULL;
-			err = 0;
-			break;
-		}
-
-		if (err) {
-			if (err == -EBUSY) {
-				wiphy_err(wiphy, "wl%d: sendampdu: "
-					  "prep_xdu retry; seq 0x%x\n",
-					  wlc->pub->unit, seq);
-				*pdu = p;
-				break;
-			}
-
-			/* error in the packet; reject it */
-			wiphy_err(wiphy, "wl%d: sendampdu: prep_xdu "
-				  "rejected; seq 0x%x\n", wlc->pub->unit, seq);
-			*pdu = NULL;
-			break;
-		}
-
-		/* pkt is good to be aggregated */
-		txh = (struct d11txh *) p->data;
-		plcp = (u8 *) (txh + 1);
-		h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
-		seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
-		index = TX_SEQ_TO_INDEX(seq);
-
-		/* check mcl fields and test whether it can be agg'd */
-		mcl = le16_to_cpu(txh->MacTxControlLow);
+	mcl = le16_to_cpu(txh->MacTxControlLow);
+	/* If only one MPDU leave it marked as last */
+	if (first != last) {
 		mcl &= ~TXC_AMPDU_MASK;
-		fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x3);
-		txh->PreloadSize = 0;	/* always default to 0 */
-
-		/*  Handle retry limits */
-		if (txrate[0].count <= rr_retry_limit) {
-			txrate[0].count++;
-			rr = true;
-			fbr = false;
-		} else {
-			fbr = true;
-			rr = false;
-			txrate[1].count++;
-		}
-
-		/* extract the length info */
-		len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
-		    : BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
-
-		/* retrieve null delimiter count */
-		ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
-		seg_cnt += 1;
-
-		BCMMSG(wlc->wiphy, "wl%d: mpdu %d plcp_len %d\n",
-			wlc->pub->unit, count, len);
-
-		/*
-		 * aggregateable mpdu. For ucode/hw agg,
-		 * test whether need to break or change the epoch
-		 */
-		if (count == 0) {
-			mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
-			/* refill the bits since might be a retx mpdu */
-			mcl |= TXC_STARTMSDU;
-			rts = (struct ieee80211_rts *)&txh->rts_frame;
-
-			if (ieee80211_is_rts(rts->frame_control)) {
-				mcl |= TXC_SENDRTS;
-				use_rts = true;
-			}
-			if (ieee80211_is_cts(rts->frame_control)) {
-				mcl |= TXC_SENDCTS;
-				use_cts = true;
-			}
-		} else {
-			mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
-			mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
-		}
-
-		len = roundup(len, 4);
-		ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN);
-
-		dma_len += (u16) p->len;
-
-		BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d"
-			" seg_cnt %d null delim %d\n",
-			wlc->pub->unit, ampdu_len, seg_cnt, ndelim);
-
-		txh->MacTxControlLow = cpu_to_le16(mcl);
-
-		/* this packet is added */
-		pkt[count++] = p;
-
-		/* patch the first MPDU */
-		if (count == 1) {
-			u8 plcp0, plcp3, is40, sgi;
-
-			if (rr) {
-				plcp0 = plcp[0];
-				plcp3 = plcp[3];
-			} else {
-				plcp0 = txh->FragPLCPFallback[0];
-				plcp3 = txh->FragPLCPFallback[3];
-
-			}
-			is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
-			sgi = plcp3_issgi(plcp3) ? 1 : 0;
-			mcs = plcp0 & ~MIMO_PLCP_40MHZ;
-			max_ampdu_bytes =
-			    min(scb_ampdu->max_rx_ampdu_bytes,
-				ampdu->max_txlen[mcs][is40][sgi]);
-
-			if (is40)
-				mimo_ctlchbw =
-				   CHSPEC_SB_UPPER(wlc_phy_chanspec_get(
-								 wlc->band->pi))
-				   ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
-
-			/* rebuild the rspec and rspec_fallback */
-			rspec = RSPEC_MIMORATE;
-			rspec |= plcp[0] & ~MIMO_PLCP_40MHZ;
-			if (plcp[0] & MIMO_PLCP_40MHZ)
-				rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
-
-			if (fbr_iscck)	/* CCK */
-				rspec_fallback = cck_rspec(cck_phy2mac_rate
-						    (txh->FragPLCPFallback[0]));
-			else {	/* MIMO */
-				rspec_fallback = RSPEC_MIMORATE;
-				rspec_fallback |=
-				    txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ;
-				if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ)
-					rspec_fallback |=
-					    (PHY_TXC1_BW_40MHZ <<
-					     RSPEC_BW_SHIFT);
-			}
-
-			if (use_rts || use_cts) {
-				rts_rspec =
-				    brcms_c_rspec_to_rts_rspec(wlc,
-					rspec, false, mimo_ctlchbw);
-				rts_rspec_fallback =
-				    brcms_c_rspec_to_rts_rspec(wlc,
-					rspec_fallback, false, mimo_ctlchbw);
-			}
-		}
-
-		/* if (first mpdu for host agg) */
-		/* test whether to add more */
-		if ((mcs_2_rate(mcs, true, false) >= f->dmaxferrate) &&
-		    (count == f->mcs2ampdu_table[mcs])) {
-			BCMMSG(wlc->wiphy, "wl%d: PR 37644: stopping"
-				" ampdu at %d for mcs %d\n",
-				wlc->pub->unit, count, mcs);
-			break;
-		}
-
-		if (count == scb_ampdu->max_pdu)
-			break;
-
-		/*
-		 * check to see if the next pkt is
-		 * a candidate for aggregation
-		 */
-		p = pktq_ppeek(&qi->q, prec);
-		if (p) {
-			tx_info = IEEE80211_SKB_CB(p);
-			if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
-			    ((u8) (p->priority) == tid)) {
-				plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
-				plen = max(scb_ampdu->min_len, plen);
-
-				if ((plen + ampdu_len) > max_ampdu_bytes) {
-					p = NULL;
-					continue;
-				}
-
-				/*
-				 * check if there are enough
-				 * descriptors available
-				 */
-				if (*wlc->core->txavail[fifo] <= seg_cnt + 1) {
-					wiphy_err(wiphy, "%s: No fifo space  "
-						  "!!\n", __func__);
-					p = NULL;
-					continue;
-				}
-				/* next packet fit for aggregation so dequeue */
-				p = brcmu_pktq_pdeq(&qi->q, prec);
-			} else {
-				p = NULL;
-			}
-		}
-	}			/* end while(p) */
-
-	ini->tx_in_transit += count;
-
-	if (count) {
-		/* patch up the last txh */
-		txh = (struct d11txh *) pkt[count - 1]->data;
-		mcl = le16_to_cpu(txh->MacTxControlLow);
-		mcl &= ~TXC_AMPDU_MASK;
-		mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
-		txh->MacTxControlLow = cpu_to_le16(mcl);
-
-		/* remove the null delimiter after last mpdu */
-		ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
-		txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
-		ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
-
-		/* remove the pad len from last mpdu */
-		fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
-		len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
-		    : BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
-		ampdu_len -= roundup(len, 4) - len;
-
-		/* patch up the first txh & plcp */
-		txh = (struct d11txh *) pkt[0]->data;
-		plcp = (u8 *) (txh + 1);
-
-		BRCMS_SET_MIMO_PLCP_LEN(plcp, ampdu_len);
-		/* mark plcp to indicate ampdu */
-		BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
-
-		/* reset the mixed mode header durations */
-		if (txh->MModeLen) {
-			u16 mmodelen =
-			    brcms_c_calc_lsig_len(wlc, rspec, ampdu_len);
-			txh->MModeLen = cpu_to_le16(mmodelen);
-			preamble_type = BRCMS_MM_PREAMBLE;
-		}
-		if (txh->MModeFbrLen) {
-			u16 mmfbrlen =
-			    brcms_c_calc_lsig_len(wlc, rspec_fallback,
-						  ampdu_len);
-			txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
-			fbr_preamble_type = BRCMS_MM_PREAMBLE;
-		}
-
-		/* set the preload length */
-		if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
-			dma_len = min(dma_len, f->ampdu_pld_size);
-			txh->PreloadSize = cpu_to_le16(dma_len);
-		} else
-			txh->PreloadSize = 0;
-
-		mch = le16_to_cpu(txh->MacTxControlHigh);
-
-		/* update RTS dur fields */
-		if (use_rts || use_cts) {
-			u16 durid;
-			rts = (struct ieee80211_rts *)&txh->rts_frame;
-			if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
-			    TXC_PREAMBLE_RTS_MAIN_SHORT)
-				rts_preamble_type = BRCMS_SHORT_PREAMBLE;
-
-			if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
-			    TXC_PREAMBLE_RTS_FB_SHORT)
-				rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE;
-
-			durid =
-			    brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec,
-						   rspec, rts_preamble_type,
-						   preamble_type, ampdu_len,
-						   true);
-			rts->duration = cpu_to_le16(durid);
-			durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
-						       rts_rspec_fallback,
-						       rspec_fallback,
-						       rts_fbr_preamble_type,
-						       fbr_preamble_type,
-						       ampdu_len, true);
-			txh->RTSDurFallback = cpu_to_le16(durid);
-			/* set TxFesTimeNormal */
-			txh->TxFesTimeNormal = rts->duration;
-			/* set fallback rate version of TxFesTimeNormal */
-			txh->TxFesTimeFallback = txh->RTSDurFallback;
-		}
-
-		/* set flag and plcp for fallback rate */
-		if (fbr) {
-			mch |= TXC_AMPDU_FBR;
-			txh->MacTxControlHigh = cpu_to_le16(mch);
-			BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
-			BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
-		}
-
-		BCMMSG(wlc->wiphy, "wl%d: count %d ampdu_len %d\n",
-			wlc->pub->unit, count, ampdu_len);
-
-		/* inform rate_sel if it this is a rate probe pkt */
-		frameid = le16_to_cpu(txh->TxFrameID);
-		if (frameid & TXFID_RATE_PROBE_MASK)
-			wiphy_err(wiphy, "%s: XXX what to do with "
-				  "TXFID_RATE_PROBE_MASK!?\n", __func__);
-
-		for (i = 0; i < count; i++)
-			brcms_c_txfifo(wlc, fifo, pkt[i], i == (count - 1),
-				   ampdu->txpkt_weight);
-
+		mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
 	}
-	/* endif (count) */
-	return err;
+	mcl |= TXC_STARTMSDU;
+	if (ieee80211_is_rts(rts->frame_control)) {
+		mcl |= TXC_SENDRTS;
+		use_rts = true;
+	}
+	if (ieee80211_is_cts(rts->frame_control)) {
+		mcl |= TXC_SENDCTS;
+		use_cts = true;
+	}
+	txh->MacTxControlLow = cpu_to_le16(mcl);
+
+	fbr = txrate[1].count > 0;
+	if (!fbr) {
+		plcp0 = plcp[0];
+		plcp3 = plcp[3];
+	} else {
+		plcp0 = txh->FragPLCPFallback[0];
+		plcp3 = txh->FragPLCPFallback[3];
+	}
+	is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
+	sgi = plcp3_issgi(plcp3) ? 1 : 0;
+	mcs = plcp0 & ~MIMO_PLCP_40MHZ;
+
+	if (is40) {
+		if (CHSPEC_SB_UPPER(wlc_phy_chanspec_get(wlc->band->pi)))
+			mimo_ctlchbw = PHY_TXC1_BW_20MHZ_UP;
+		else
+			mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
+	}
+
+	/* rebuild the rspec and rspec_fallback */
+	rspec = RSPEC_MIMORATE;
+	rspec |= plcp[0] & ~MIMO_PLCP_40MHZ;
+	if (plcp[0] & MIMO_PLCP_40MHZ)
+		rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
+
+	fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
+	if (fbr_iscck) {
+		rspec_fallback =
+			cck_rspec(cck_phy2mac_rate(txh->FragPLCPFallback[0]));
+	} else {
+		rspec_fallback = RSPEC_MIMORATE;
+		rspec_fallback |= txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ;
+		if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ)
+			rspec_fallback |= PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT;
+	}
+
+	if (use_rts || use_cts) {
+		rts_rspec =
+			brcms_c_rspec_to_rts_rspec(wlc, rspec,
+						   false, mimo_ctlchbw);
+		rts_rspec_fallback =
+			brcms_c_rspec_to_rts_rspec(wlc, rspec_fallback,
+						   false, mimo_ctlchbw);
+	}
+
+	BRCMS_SET_MIMO_PLCP_LEN(plcp, session->ampdu_len);
+	/* mark plcp to indicate ampdu */
+	BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
+
+	/* reset the mixed mode header durations */
+	if (txh->MModeLen) {
+		u16 mmodelen = brcms_c_calc_lsig_len(wlc, rspec,
+						     session->ampdu_len);
+		txh->MModeLen = cpu_to_le16(mmodelen);
+		preamble_type = BRCMS_MM_PREAMBLE;
+	}
+	if (txh->MModeFbrLen) {
+		u16 mmfbrlen = brcms_c_calc_lsig_len(wlc, rspec_fallback,
+						     session->ampdu_len);
+		txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
+		fbr_preamble_type = BRCMS_MM_PREAMBLE;
+	}
+
+	/* set the preload length */
+	if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
+		dma_len = min(dma_len, f->ampdu_pld_size);
+		txh->PreloadSize = cpu_to_le16(dma_len);
+	} else {
+		txh->PreloadSize = 0;
+	}
+
+	mch = le16_to_cpu(txh->MacTxControlHigh);
+
+	/* update RTS dur fields */
+	if (use_rts || use_cts) {
+		u16 durid;
+		if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
+		    TXC_PREAMBLE_RTS_MAIN_SHORT)
+			rts_preamble_type = BRCMS_SHORT_PREAMBLE;
+
+		if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
+		     TXC_PREAMBLE_RTS_FB_SHORT)
+			rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE;
+
+		durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec,
+						   rspec, rts_preamble_type,
+						   preamble_type,
+						   session->ampdu_len, true);
+		rts->duration = cpu_to_le16(durid);
+		durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
+						   rts_rspec_fallback,
+						   rspec_fallback,
+						   rts_fbr_preamble_type,
+						   fbr_preamble_type,
+						   session->ampdu_len, true);
+		txh->RTSDurFallback = cpu_to_le16(durid);
+		/* set TxFesTimeNormal */
+		txh->TxFesTimeNormal = rts->duration;
+		/* set fallback rate version of TxFesTimeNormal */
+		txh->TxFesTimeFallback = txh->RTSDurFallback;
+	}
+
+	/* set flag and plcp for fallback rate */
+	if (fbr) {
+		mch |= TXC_AMPDU_FBR;
+		txh->MacTxControlHigh = cpu_to_le16(mch);
+		BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
+		BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
+	}
+
+	brcms_dbg_ht(wlc->hw->d11core, "wl%d: count %d ampdu_len %d\n",
+		     wlc->pub->unit, skb_queue_len(&session->skb_list),
+		     session->ampdu_len);
 }
 
 static void
@@ -909,7 +855,6 @@
 	u8 antselid = 0;
 	u8 retry_limit, rr_retry_limit;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
-	struct wiphy *wiphy = wlc->wiphy;
 
 #ifdef DEBUG
 	u8 hole[AMPDU_MAX_MPDU];
@@ -955,13 +900,14 @@
 		if (supr_status) {
 			update_rate = false;
 			if (supr_status == TX_STATUS_SUPR_BADCH) {
-				wiphy_err(wiphy,
+				brcms_err(wlc->hw->d11core,
 					  "%s: Pkt tx suppressed, illegal channel possibly %d\n",
 					  __func__, CHSPEC_CHANNEL(
 					  wlc->default_bss->chanspec));
 			} else {
 				if (supr_status != TX_STATUS_SUPR_FRAG)
-					wiphy_err(wiphy, "%s: supr_status 0x%x\n",
+					brcms_err(wlc->hw->d11core,
+						  "%s: supr_status 0x%x\n",
 						  __func__, supr_status);
 			}
 			/* no need to retry for badch; will fail again */
@@ -977,20 +923,14 @@
 				 * if there were underflows, but pre-loading
 				 * is not active, notify rate adaptation.
 				 */
-				if (brcms_c_ffpld_check_txfunfl(wlc,
-					prio2fifo[tid]) > 0)
+				if (brcms_c_ffpld_check_txfunfl(wlc, queue) > 0)
 					tx_error = true;
 			}
 		} else if (txs->phyerr) {
 			update_rate = false;
-			wiphy_err(wiphy, "%s: ampdu tx phy error (0x%x)\n",
+			brcms_err(wlc->hw->d11core,
+				  "%s: ampdu tx phy error (0x%x)\n",
 				  __func__, txs->phyerr);
-
-			if (brcm_msg_level & LOG_ERROR_VAL) {
-				brcmu_prpkt("txpkt (AMPDU)", p);
-				brcms_c_print_txdesc((struct d11txh *) p->data);
-			}
-			brcms_c_print_txstatus(txs);
 		}
 	}
 
@@ -1003,6 +943,8 @@
 		h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
 		seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
 
+		trace_brcms_txdesc(&wlc->hw->d11core->dev, txh, sizeof(*txh));
+
 		if (tot_mpdu == 0) {
 			mcs = plcp[0] & MIMO_PLCP_MCS_MASK;
 			mimoantsel = le16_to_cpu(txh->ABI_MimoAntSel);
@@ -1012,10 +954,10 @@
 		ack_recd = false;
 		if (ba_recd) {
 			bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
-			BCMMSG(wiphy,
-			       "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
-			       tid, seq, start_seq, bindex,
-			       isset(bitmap, bindex), index);
+			brcms_dbg_ht(wlc->hw->d11core,
+				     "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
+				     tid, seq, start_seq, bindex,
+				     isset(bitmap, bindex), index);
 			/* if acked then clear bit and free packet */
 			if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
 			    && isset(bitmap, bindex)) {
@@ -1046,14 +988,16 @@
 		/* either retransmit or send bar if ack not recd */
 		if (!ack_recd) {
 			if (retry && (ini->txretry[index] < (int)retry_limit)) {
+				int ret;
 				ini->txretry[index]++;
 				ini->tx_in_transit--;
+				ret = brcms_c_txfifo(wlc, queue, p);
 				/*
-				 * Use high prededence for retransmit to
-				 * give some punch
+				 * We shouldn't be out of space in the DMA
+				 * ring here since we're reinserting a frame
+				 * that was just pulled out.
 				 */
-				brcms_c_txq_enq(wlc, scb, p,
-						BRCMS_PRIO_TO_HI_PREC(tid));
+				WARN_ONCE(ret, "queue %d out of txds\n", queue);
 			} else {
 				/* Retry timeout */
 				ini->tx_in_transit--;
@@ -1064,9 +1008,9 @@
 				    IEEE80211_TX_STAT_AMPDU_NO_BACK;
 				skb_pull(p, D11_PHY_HDR_LEN);
 				skb_pull(p, D11_TXH_LEN);
-				BCMMSG(wiphy,
-				       "BA Timeout, seq %d, in_transit %d\n",
-				       seq, ini->tx_in_transit);
+				brcms_dbg_ht(wlc->hw->d11core,
+					     "BA Timeout, seq %d, in_transit %d\n",
+					     seq, ini->tx_in_transit);
 				ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
 							    p);
 			}
@@ -1080,12 +1024,9 @@
 
 		p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
 	}
-	brcms_c_send_q(wlc);
 
 	/* update rate state */
 	antselid = brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
-
-	brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
 }
 
 void
@@ -1133,6 +1074,8 @@
 		while (p) {
 			tx_info = IEEE80211_SKB_CB(p);
 			txh = (struct d11txh *) p->data;
+			trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
+					   sizeof(*txh));
 			mcl = le16_to_cpu(txh->MacTxControlLow);
 			brcmu_pkt_buf_free_skb(p);
 			/* break out if last packet of ampdu */
@@ -1142,7 +1085,6 @@
 			p = dma_getnexttxp(wlc->hw->di[queue],
 					   DMA_RANGE_TRANSMITTED);
 		}
-		brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
 	}
 }
 
@@ -1182,23 +1124,6 @@
 }
 
 /*
- * callback function that helps flushing ampdu packets from a priority queue
- */
-static bool cb_del_ampdu_pkt(struct sk_buff *mpdu, void *arg_a)
-{
-	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(mpdu);
-	struct cb_del_ampdu_pars *ampdu_pars =
-				 (struct cb_del_ampdu_pars *)arg_a;
-	bool rc;
-
-	rc = tx_info->flags & IEEE80211_TX_CTL_AMPDU ? true : false;
-	rc = rc && (tx_info->rate_driver_data[0] == NULL || ampdu_pars->sta == NULL ||
-		    tx_info->rate_driver_data[0] == ampdu_pars->sta);
-	rc = rc && ((u8)(mpdu->priority) == ampdu_pars->tid);
-	return rc;
-}
-
-/*
  * callback function that helps invalidating ampdu packets in a DMA queue
  */
 static void dma_cb_fn_ampdu(void *txi, void *arg_a)
@@ -1218,15 +1143,5 @@
 void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
 		     struct ieee80211_sta *sta, u16 tid)
 {
-	struct brcms_txq_info *qi = wlc->pkt_queue;
-	struct pktq *pq = &qi->q;
-	int prec;
-	struct cb_del_ampdu_pars ampdu_pars;
-
-	ampdu_pars.sta = sta;
-	ampdu_pars.tid = tid;
-	for (prec = 0; prec < pq->num_prec; prec++)
-		brcmu_pktq_pflush(pq, prec, true, cb_del_ampdu_pkt,
-			    (void *)&ampdu_pars);
 	brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
index 421f4ba..73d01e5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
@@ -17,11 +17,34 @@
 #ifndef _BRCM_AMPDU_H_
 #define _BRCM_AMPDU_H_
 
+/*
+ * Data structure representing an in-progress session for accumulating
+ * frames for AMPDU.
+ *
+ * wlc: pointer to common driver data
+ * skb_list: queue of skb's for AMPDU
+ * max_ampdu_len: maximum length for this AMPDU
+ * max_ampdu_frames: maximum number of frames for this AMPDU
+ * ampdu_len: total number of bytes accumulated for this AMPDU
+ * dma_len: DMA length of this AMPDU
+ */
+struct brcms_ampdu_session {
+	struct brcms_c_info *wlc;
+	struct sk_buff_head skb_list;
+	unsigned max_ampdu_len;
+	u16 max_ampdu_frames;
+	u16 ampdu_len;
+	u16 dma_len;
+};
+
+extern void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
+					struct brcms_c_info *wlc);
+extern int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
+				   struct sk_buff *p);
+extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
+
 extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
 extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
-extern int brcms_c_sendampdu(struct ampdu_info *ampdu,
-			     struct brcms_txq_info *qi,
-			     struct sk_buff **aggp, int prec);
 extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
 				 struct sk_buff *p, struct tx_status *txs);
 extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/antsel.c b/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
index 55e12c3..54c6169 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
@@ -21,6 +21,7 @@
 #include "main.h"
 #include "phy_shim.h"
 #include "antsel.h"
+#include "debug.h"
 
 #define ANT_SELCFG_AUTO		0x80	/* bit indicates antenna sel AUTO */
 #define ANT_SELCFG_MASK		0x33	/* antenna configuration mask */
@@ -137,7 +138,8 @@
 				asi->antsel_avail = false;
 			} else {
 				asi->antsel_avail = false;
-				wiphy_err(wlc->wiphy, "antsel_attach: 2o3 "
+				brcms_err(wlc->hw->d11core,
+					  "antsel_attach: 2o3 "
 					  "board cfg invalid\n");
 			}
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
index 27dd73e..871781e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
@@ -14,22 +14,29 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM brcmsmac
-
 #if !defined(__TRACE_BRCMSMAC_H) || defined(TRACE_HEADER_MULTI_READ)
 
 #define __TRACE_BRCMSMAC_H
 
+#include <linux/types.h>
+#include <linux/device.h>
 #include <linux/tracepoint.h>
 #include "mac80211_if.h"
 
-#ifndef CONFIG_BRCMDBG
+#ifndef CONFIG_BRCM_TRACING
 #undef TRACE_EVENT
 #define TRACE_EVENT(name, proto, ...) \
 static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
 #endif
 
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM brcmsmac
+
 /*
  * We define a tracepoint, its arguments, its printk format and its
  * 'fast binary record' layout.
@@ -78,9 +85,165 @@
 	)
 );
 
+TRACE_EVENT(brcms_macintstatus,
+	TP_PROTO(const struct device *dev, int in_isr, u32 macintstatus,
+		 u32 mask),
+	TP_ARGS(dev, in_isr, macintstatus, mask),
+	TP_STRUCT__entry(
+		__string(dev, dev_name(dev))
+		__field(int, in_isr)
+		__field(u32, macintstatus)
+		__field(u32, mask)
+	),
+	TP_fast_assign(
+		__assign_str(dev, dev_name(dev));
+		__entry->in_isr = in_isr;
+		__entry->macintstatus = macintstatus;
+		__entry->mask = mask;
+	),
+	TP_printk("[%s] in_isr=%d macintstatus=%#x mask=%#x", __get_str(dev),
+		  __entry->in_isr, __entry->macintstatus, __entry->mask)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM brcmsmac_tx
+
+TRACE_EVENT(brcms_txdesc,
+	TP_PROTO(const struct device *dev,
+		 void *txh, size_t txh_len),
+	TP_ARGS(dev, txh, txh_len),
+	TP_STRUCT__entry(
+		__string(dev, dev_name(dev))
+		__dynamic_array(u8, txh, txh_len)
+	),
+	TP_fast_assign(
+		__assign_str(dev, dev_name(dev));
+		memcpy(__get_dynamic_array(txh), txh, txh_len);
+	),
+	TP_printk("[%s] txdesc", __get_str(dev))
+);
+
+TRACE_EVENT(brcms_txstatus,
+	TP_PROTO(const struct device *dev, u16 framelen, u16 frameid,
+		 u16 status, u16 lasttxtime, u16 sequence, u16 phyerr,
+		 u16 ackphyrxsh),
+	TP_ARGS(dev, framelen, frameid, status, lasttxtime, sequence, phyerr,
+		ackphyrxsh),
+	TP_STRUCT__entry(
+		__string(dev, dev_name(dev))
+		__field(u16, framelen)
+		__field(u16, frameid)
+		__field(u16, status)
+		__field(u16, lasttxtime)
+		__field(u16, sequence)
+		__field(u16, phyerr)
+		__field(u16, ackphyrxsh)
+	),
+	TP_fast_assign(
+		__assign_str(dev, dev_name(dev));
+		__entry->framelen = framelen;
+		__entry->frameid = frameid;
+		__entry->status = status;
+		__entry->lasttxtime = lasttxtime;
+		__entry->sequence = sequence;
+		__entry->phyerr = phyerr;
+		__entry->ackphyrxsh = ackphyrxsh;
+	),
+	TP_printk("[%s] FrameId %#04x TxStatus %#04x LastTxTime %#04x "
+		  "Seq %#04x PHYTxStatus %#04x RxAck %#04x",
+		  __get_str(dev), __entry->frameid, __entry->status,
+		  __entry->lasttxtime, __entry->sequence, __entry->phyerr,
+		  __entry->ackphyrxsh)
+);
+
+TRACE_EVENT(brcms_ampdu_session,
+	TP_PROTO(const struct device *dev, unsigned max_ampdu_len,
+		 u16 max_ampdu_frames, u16 ampdu_len, u16 ampdu_frames,
+		 u16 dma_len),
+	TP_ARGS(dev, max_ampdu_len, max_ampdu_frames, ampdu_len, ampdu_frames,
+		dma_len),
+	TP_STRUCT__entry(
+		__string(dev, dev_name(dev))
+		__field(unsigned, max_ampdu_len)
+		__field(u16, max_ampdu_frames)
+		__field(u16, ampdu_len)
+		__field(u16, ampdu_frames)
+		__field(u16, dma_len)
+	),
+	TP_fast_assign(
+		__assign_str(dev, dev_name(dev));
+		__entry->max_ampdu_len = max_ampdu_len;
+		__entry->max_ampdu_frames = max_ampdu_frames;
+		__entry->ampdu_len = ampdu_len;
+		__entry->ampdu_frames = ampdu_frames;
+		__entry->dma_len = dma_len;
+	),
+	TP_printk("[%s] ampdu session max_len=%u max_frames=%u len=%u frames=%u dma_len=%u",
+		  __get_str(dev), __entry->max_ampdu_len,
+		  __entry->max_ampdu_frames, __entry->ampdu_len,
+		  __entry->ampdu_frames, __entry->dma_len)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM brcmsmac_msg
+
+#define MAX_MSG_LEN	100
+
+DECLARE_EVENT_CLASS(brcms_msg_event,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf),
+	TP_STRUCT__entry(
+		__dynamic_array(char, msg, MAX_MSG_LEN)
+	),
+	TP_fast_assign(
+		WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+				       MAX_MSG_LEN, vaf->fmt,
+				       *vaf->va) >= MAX_MSG_LEN);
+	),
+	TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(brcms_msg_event, brcms_info,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(brcms_msg_event, brcms_warn,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(brcms_msg_event, brcms_err,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(brcms_msg_event, brcms_crit,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+TRACE_EVENT(brcms_dbg,
+	TP_PROTO(u32 level, const char *func, struct va_format *vaf),
+	TP_ARGS(level, func, vaf),
+	TP_STRUCT__entry(
+		__field(u32, level)
+		__string(func, func)
+		__dynamic_array(char, msg, MAX_MSG_LEN)
+	),
+	TP_fast_assign(
+		__entry->level = level;
+		__assign_str(func, func);
+		WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+				       MAX_MSG_LEN, vaf->fmt,
+				       *vaf->va) >= MAX_MSG_LEN);
+	),
+	TP_printk("%s: %s", __get_str(func), __get_str(msg))
+);
+
 #endif /* __TRACE_BRCMSMAC_H */
 
-#ifdef CONFIG_BRCMDBG
+#ifdef CONFIG_BRCM_TRACING
 
 #undef TRACE_INCLUDE_PATH
 #define TRACE_INCLUDE_PATH .
@@ -89,4 +252,4 @@
 
 #include <trace/define_trace.h>
 
-#endif /* CONFIG_BRCMDBG */
+#endif /* CONFIG_BRCM_TRACING */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 64a48f0..a90b722 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -26,6 +26,7 @@
 #include "stf.h"
 #include "channel.h"
 #include "mac80211_if.h"
+#include "debug.h"
 
 /* QDB() macro takes a dB value and converts to a quarter dB value */
 #define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR)
@@ -336,8 +337,6 @@
 	const char *ccode = sprom->alpha2;
 	int ccode_len = sizeof(sprom->alpha2);
 
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
 	wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC);
 	if (wlc_cm == NULL)
 		return NULL;
@@ -615,8 +614,8 @@
 
 	/* check the chanspec */
 	if (brcms_c_chspec_malformed(chspec)) {
-		wiphy_err(wlc->wiphy, "wl%d: malformed chanspec 0x%x\n",
-			wlc->pub->unit, chspec);
+		brcms_err(wlc->hw->d11core, "wl%d: malformed chanspec 0x%x\n",
+			  wlc->pub->unit, chspec);
 		return false;
 	}
 
@@ -738,7 +737,8 @@
 		mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
 	} else {
 		mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
-		wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\"\n",
+		brcms_err(wlc->hw->d11core,
+			  "wl%d: %s: no valid channel for \"%s\"\n",
 			  wlc->pub->unit, __func__, request->alpha2);
 	}
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
new file mode 100644
index 0000000..6ba4136
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
@@ -0,0 +1,44 @@
+#include <linux/net.h>
+#include "types.h"
+#include "debug.h"
+#include "brcms_trace_events.h"
+
+#define __brcms_fn(fn)						\
+void __brcms_ ##fn(struct device *dev, const char *fmt, ...)	\
+{								\
+	struct va_format vaf = {				\
+		.fmt = fmt,					\
+	};							\
+	va_list args;						\
+								\
+	va_start(args, fmt);					\
+	vaf.va = &args;						\
+	dev_ ##fn(dev, "%pV", &vaf);				\
+	trace_brcms_ ##fn(&vaf);				\
+	va_end(args);						\
+}
+
+__brcms_fn(info)
+__brcms_fn(warn)
+__brcms_fn(err)
+__brcms_fn(crit)
+
+#if defined(CONFIG_BRCMDBG) || defined(CONFIG_BRCM_TRACING)
+void __brcms_dbg(struct device *dev, u32 level, const char *func,
+		 const char *fmt, ...)
+{
+	struct va_format vaf = {
+		.fmt = fmt,
+	};
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.va = &args;
+#ifdef CONFIG_BRCMDBG
+	if ((brcm_msg_level & level) && net_ratelimit())
+		dev_err(dev, "%s %pV", func, &vaf);
+#endif
+	trace_brcms_dbg(level, func, &vaf);
+	va_end(args);
+}
+#endif
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.h b/drivers/net/wireless/brcm80211/brcmsmac/debug.h
new file mode 100644
index 0000000..f77066b
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.h
@@ -0,0 +1,52 @@
+#ifndef _BRCMS_DEBUG_H_
+#define _BRCMS_DEBUG_H_
+
+#include <linux/device.h>
+#include <linux/bcma/bcma.h>
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+#include "main.h"
+#include "mac80211_if.h"
+
+__printf(2, 3)
+void __brcms_info(struct device *dev, const char *fmt, ...);
+__printf(2, 3)
+void __brcms_warn(struct device *dev, const char *fmt, ...);
+__printf(2, 3)
+void __brcms_err(struct device *dev, const char *fmt, ...);
+__printf(2, 3)
+void __brcms_crit(struct device *dev, const char *fmt, ...);
+
+#if defined(CONFIG_BRCMDBG) || defined(CONFIG_BRCM_TRACING)
+__printf(4, 5)
+void __brcms_dbg(struct device *dev, u32 level, const char *func,
+		 const char *fmt, ...);
+#else
+static inline __printf(4, 5)
+void __brcms_dbg(struct device *dev, u32 level, const char *func,
+		 const char *fmt, ...)
+{
+}
+#endif
+
+/*
+ * Debug macros cannot be used when wlc is uninitialized. Generally
+ * this means any code that could run before brcms_c_attach() has
+ * returned successfully probably shouldn't use the following macros.
+ */
+
+#define brcms_dbg(core, l, f, a...)	__brcms_dbg(&(core)->dev, l, __func__, f, ##a)
+#define brcms_info(core, f, a...)	__brcms_info(&(core)->dev, f, ##a)
+#define brcms_warn(core, f, a...)	__brcms_warn(&(core)->dev, f, ##a)
+#define brcms_err(core, f, a...)	__brcms_err(&(core)->dev, f, ##a)
+#define brcms_crit(core, f, a...)	__brcms_crit(&(core)->dev, f, ##a)
+
+#define brcms_dbg_info(core, f, a...)		brcms_dbg(core, BRCM_DL_INFO, f, ##a)
+#define brcms_dbg_mac80211(core, f, a...)	brcms_dbg(core, BRCM_DL_MAC80211, f, ##a)
+#define brcms_dbg_rx(core, f, a...)		brcms_dbg(core, BRCM_DL_RX, f, ##a)
+#define brcms_dbg_tx(core, f, a...)		brcms_dbg(core, BRCM_DL_TX, f, ##a)
+#define brcms_dbg_int(core, f, a...)		brcms_dbg(core, BRCM_DL_INT, f, ##a)
+#define brcms_dbg_dma(core, f, a...)		brcms_dbg(core, BRCM_DL_DMA, f, ##a)
+#define brcms_dbg_ht(core, f, a...)		brcms_dbg(core, BRCM_DL_HT, f, ##a)
+
+#endif /* _BRCMS_DEBUG_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 5e53305..511e457 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -14,17 +14,22 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
 
 #include <brcmu_utils.h>
 #include <aiutils.h>
 #include "types.h"
+#include "main.h"
 #include "dma.h"
 #include "soc.h"
+#include "scb.h"
+#include "ampdu.h"
+#include "debug.h"
+#include "brcms_trace_events.h"
 
 /*
  * dma register field offset calculation
@@ -176,28 +181,6 @@
 
 #define BCMEXTRAHDROOM 172
 
-/* debug/trace */
-#ifdef DEBUG
-#define	DMA_ERROR(fmt, ...)					\
-do {								\
-	if (*di->msg_level & 1)					\
-		pr_debug("%s: " fmt, __func__, ##__VA_ARGS__);	\
-} while (0)
-#define	DMA_TRACE(fmt, ...)					\
-do {								\
-	if (*di->msg_level & 2)					\
-		pr_debug("%s: " fmt, __func__, ##__VA_ARGS__);	\
-} while (0)
-#else
-#define	DMA_ERROR(fmt, ...)			\
-	no_printk(fmt, ##__VA_ARGS__)
-#define	DMA_TRACE(fmt, ...)			\
-	no_printk(fmt, ##__VA_ARGS__)
-#endif				/* DEBUG */
-
-#define	DMA_NONE(fmt, ...)			\
-	no_printk(fmt, ##__VA_ARGS__)
-
 #define	MAXNAMEL	8	/* 8 char names */
 
 /* macros to convert between byte offsets and indexes */
@@ -224,12 +207,14 @@
 /* dma engine software state */
 struct dma_info {
 	struct dma_pub dma; /* exported structure */
-	uint *msg_level;	/* message level pointer */
 	char name[MAXNAMEL];	/* callers name for diag msgs */
 
 	struct bcma_device *core;
 	struct device *dmadev;
 
+	/* session information for AMPDU */
+	struct brcms_ampdu_session ampdu_session;
+
 	bool dma64;	/* this dma engine is operating in 64-bit mode */
 	bool addrext;	/* this dma engine supports DmaExtendedAddrChanges */
 
@@ -298,12 +283,6 @@
 	bool aligndesc_4k;
 };
 
-/*
- * default dma message level (if input msg_level
- * pointer is null in dma_attach())
- */
-static uint dma_msg_level;
-
 /* Check for odd number of 1's */
 static u32 parity32(__le32 data)
 {
@@ -353,7 +332,7 @@
 
 static uint nextrxd(struct dma_info *di, uint i)
 {
-	return txd(di, i + 1);
+	return rxd(di, i + 1);
 }
 
 static uint ntxdactive(struct dma_info *di, uint h, uint t)
@@ -371,7 +350,7 @@
 	uint dmactrlflags;
 
 	if (di == NULL) {
-		DMA_ERROR("NULL dma handle\n");
+		brcms_dbg_dma(di->core, "NULL dma handle\n");
 		return 0;
 	}
 
@@ -423,13 +402,15 @@
 	/* not all tx or rx channel are available */
 	if (di->d64txregbase != 0) {
 		if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
-			DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
-				  di->name);
+			brcms_dbg_dma(di->core,
+				      "%s: DMA64 tx doesn't have AE set\n",
+				      di->name);
 		return true;
 	} else if (di->d64rxregbase != 0) {
 		if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
-			DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
-				  di->name);
+			brcms_dbg_dma(di->core,
+				      "%s: DMA64 rx doesn't have AE set\n",
+				      di->name);
 		return true;
 	}
 
@@ -530,8 +511,9 @@
 		va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
 			&alloced, &di->txdpaorig);
 		if (va == NULL) {
-			DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
-				  di->name);
+			brcms_dbg_dma(di->core,
+				      "%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
+				      di->name);
 			return false;
 		}
 		align = (1 << align_bits);
@@ -544,8 +526,9 @@
 		va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
 			&alloced, &di->rxdpaorig);
 		if (va == NULL) {
-			DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
-				  di->name);
+			brcms_dbg_dma(di->core,
+				      "%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
+				      di->name);
 			return false;
 		}
 		align = (1 << align_bits);
@@ -564,12 +547,13 @@
 	return dma64_alloc(di, direction);
 }
 
-struct dma_pub *dma_attach(char *name, struct si_pub *sih,
-			   struct bcma_device *core,
+struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
 			   uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
 			   uint rxbufsize, int rxextheadroom,
-			   uint nrxpost, uint rxoffset, uint *msg_level)
+			   uint nrxpost, uint rxoffset)
 {
+	struct si_pub *sih = wlc->hw->sih;
+	struct bcma_device *core = wlc->hw->d11core;
 	struct dma_info *di;
 	u8 rev = core->id.rev;
 	uint size;
@@ -580,9 +564,6 @@
 	if (di == NULL)
 		return NULL;
 
-	di->msg_level = msg_level ? msg_level : &dma_msg_level;
-
-
 	di->dma64 =
 		((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
 
@@ -598,11 +579,11 @@
 	 */
 	_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
 
-	DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d "
-		  "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
-		  "txregbase %u rxregbase %u\n", name, "DMA64",
-		  di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
-		  rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
+	brcms_dbg_dma(di->core, "%s: %s flags 0x%x ntxd %d nrxd %d "
+		      "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
+		      "txregbase %u rxregbase %u\n", name, "DMA64",
+		      di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
+		      rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
 
 	/* make a private copy of our callers name */
 	strncpy(di->name, name, MAXNAMEL);
@@ -664,8 +645,8 @@
 		di->dmadesc_align = 4;	/* 16 byte alignment */
 	}
 
-	DMA_NONE("DMA descriptor align_needed %d, align %d\n",
-		 di->aligndesc_4k, di->dmadesc_align);
+	brcms_dbg_dma(di->core, "DMA descriptor align_needed %d, align %d\n",
+		      di->aligndesc_4k, di->dmadesc_align);
 
 	/* allocate tx packet pointer vector */
 	if (ntxd) {
@@ -703,21 +684,27 @@
 
 	if ((di->ddoffsetlow != 0) && !di->addrext) {
 		if (di->txdpa > SI_PCI_DMA_SZ) {
-			DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n",
-				  di->name, (u32)di->txdpa);
+			brcms_dbg_dma(di->core,
+				      "%s: txdpa 0x%x: addrext not supported\n",
+				      di->name, (u32)di->txdpa);
 			goto fail;
 		}
 		if (di->rxdpa > SI_PCI_DMA_SZ) {
-			DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n",
-				  di->name, (u32)di->rxdpa);
+			brcms_dbg_dma(di->core,
+				      "%s: rxdpa 0x%x: addrext not supported\n",
+				      di->name, (u32)di->rxdpa);
 			goto fail;
 		}
 	}
 
-	DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
-		  di->ddoffsetlow, di->ddoffsethigh,
-		  di->dataoffsetlow, di->dataoffsethigh,
-		  di->addrext);
+	/* Initialize AMPDU session */
+	brcms_c_ampdu_reset_session(&di->ampdu_session, wlc);
+
+	brcms_dbg_dma(di->core,
+		      "ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
+		      di->ddoffsetlow, di->ddoffsethigh,
+		      di->dataoffsetlow, di->dataoffsethigh,
+		      di->addrext);
 
 	return (struct dma_pub *) di;
 
@@ -763,7 +750,7 @@
 {
 	struct dma_info *di = (struct dma_info *)pub;
 
-	DMA_TRACE("%s:\n", di->name);
+	brcms_dbg_dma(di->core, "%s:\n", di->name);
 
 	/* free dma descriptor rings */
 	if (di->txd64)
@@ -839,7 +826,7 @@
 	uint dmactrlflags = di->dma.dmactrlflags;
 	u32 control;
 
-	DMA_TRACE("%s:\n", di->name);
+	brcms_dbg_dma(di->core, "%s:\n", di->name);
 
 	control = D64_RC_RE | (bcma_read32(di->core,
 					   DMA64RXREGOFFS(di, control)) &
@@ -859,7 +846,7 @@
 {
 	struct dma_info *di = (struct dma_info *)pub;
 
-	DMA_TRACE("%s:\n", di->name);
+	brcms_dbg_dma(di->core, "%s:\n", di->name);
 
 	if (di->nrxd == 0)
 		return;
@@ -954,7 +941,7 @@
 		return 0;
 
 	len = le16_to_cpu(*(__le16 *) (p->data));
-	DMA_TRACE("%s: dma_rx len %d\n", di->name, len);
+	brcms_dbg_dma(di->core, "%s: dma_rx len %d\n", di->name, len);
 	dma_spin_for_len(len, p);
 
 	/* set actual length */
@@ -981,14 +968,15 @@
 					      DMA64RXREGOFFS(di, status0)) &
 				  D64_RS0_CD_MASK) - di->rcvptrbase) &
 				D64_RS0_CD_MASK, struct dma64desc);
-			DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
-				   di->rxin, di->rxout, cur);
+			brcms_dbg_dma(di->core,
+				      "rxin %d rxout %d, hw_curr %d\n",
+				      di->rxin, di->rxout, cur);
 		}
 #endif				/* DEBUG */
 
 		if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
-			DMA_ERROR("%s: bad frame length (%d)\n",
-				  di->name, len);
+			brcms_dbg_dma(di->core, "%s: bad frame length (%d)\n",
+				      di->name, len);
 			skb_queue_walk_safe(&dma_frames, p, next) {
 				skb_unlink(p, &dma_frames);
 				brcmu_pkt_buf_free_skb(p);
@@ -1005,7 +993,7 @@
 
 static bool dma64_rxidle(struct dma_info *di)
 {
-	DMA_TRACE("%s:\n", di->name);
+	brcms_dbg_dma(di->core, "%s:\n", di->name);
 
 	if (di->nrxd == 0)
 		return true;
@@ -1016,6 +1004,17 @@
 		 D64_RS0_CD_MASK));
 }
 
+static bool dma64_txidle(struct dma_info *di)
+{
+	if (di->ntxd == 0)
+		return true;
+
+	return ((bcma_read32(di->core,
+			     DMA64TXREGOFFS(di, status0)) & D64_XS0_CD_MASK) ==
+		(bcma_read32(di->core, DMA64TXREGOFFS(di, ptr)) &
+		 D64_XS0_CD_MASK));
+}
+
 /*
  * post receive buffers
  *  return false is refill failed completely and ring is empty this will stall
@@ -1047,7 +1046,7 @@
 
 	n = di->nrxpost - nrxdactive(di, rxin, rxout);
 
-	DMA_TRACE("%s: post %d\n", di->name, n);
+	brcms_dbg_dma(di->core, "%s: post %d\n", di->name, n);
 
 	if (di->rxbufsize > BCMEXTRAHDROOM)
 		extra_offset = di->rxextrahdrroom;
@@ -1060,9 +1059,11 @@
 		p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
 
 		if (p == NULL) {
-			DMA_ERROR("%s: out of rxbufs\n", di->name);
+			brcms_dbg_dma(di->core, "%s: out of rxbufs\n",
+				      di->name);
 			if (i == 0 && dma64_rxidle(di)) {
-				DMA_ERROR("%s: ring is empty !\n", di->name);
+				brcms_dbg_dma(di->core, "%s: ring is empty !\n",
+					      di->name);
 				ring_empty = true;
 			}
 			di->dma.rxnobuf++;
@@ -1107,7 +1108,7 @@
 	struct dma_info *di = (struct dma_info *)pub;
 	struct sk_buff *p;
 
-	DMA_TRACE("%s:\n", di->name);
+	brcms_dbg_dma(di->core, "%s:\n", di->name);
 
 	while ((p = _dma_getnextrxp(di, true)))
 		brcmu_pkt_buf_free_skb(p);
@@ -1138,7 +1139,7 @@
 	struct dma_info *di = (struct dma_info *)pub;
 	u32 control = D64_XC_XE;
 
-	DMA_TRACE("%s:\n", di->name);
+	brcms_dbg_dma(di->core, "%s:\n", di->name);
 
 	if (di->ntxd == 0)
 		return;
@@ -1170,7 +1171,7 @@
 {
 	struct dma_info *di = (struct dma_info *)pub;
 
-	DMA_TRACE("%s:\n", di->name);
+	brcms_dbg_dma(di->core, "%s:\n", di->name);
 
 	if (di->ntxd == 0)
 		return;
@@ -1182,7 +1183,7 @@
 {
 	struct dma_info *di = (struct dma_info *)pub;
 
-	DMA_TRACE("%s:\n", di->name);
+	brcms_dbg_dma(di->core, "%s:\n", di->name);
 
 	if (di->ntxd == 0)
 		return;
@@ -1205,11 +1206,11 @@
 	struct dma_info *di = (struct dma_info *)pub;
 	struct sk_buff *p;
 
-	DMA_TRACE("%s: %s\n",
-		  di->name,
-		  range == DMA_RANGE_ALL ? "all" :
-		  range == DMA_RANGE_TRANSMITTED ? "transmitted" :
-		  "transferred");
+	brcms_dbg_dma(di->core, "%s: %s\n",
+		      di->name,
+		      range == DMA_RANGE_ALL ? "all" :
+		      range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+		      "transferred");
 
 	if (di->txin == di->txout)
 		return;
@@ -1264,39 +1265,25 @@
 	return status == D64_RS0_RS_DISABLED;
 }
 
-/*
- * !! tx entry routine
- * WARNING: call must check the return value for error.
- *   the error(toss frames) could be fatal and cause many subsequent hard
- *   to debug problems
- */
-int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
+static void dma_txenq(struct dma_info *di, struct sk_buff *p)
 {
-	struct dma_info *di = (struct dma_info *)pub;
 	unsigned char *data;
 	uint len;
 	u16 txout;
 	u32 flags = 0;
 	dma_addr_t pa;
 
-	DMA_TRACE("%s:\n", di->name);
-
 	txout = di->txout;
 
+	if (WARN_ON(nexttxd(di, txout) == di->txin))
+		return;
+
 	/*
 	 * obtain and initialize transmit descriptor entry.
 	 */
 	data = p->data;
 	len = p->len;
 
-	/* no use to transmit a zero length packet */
-	if (len == 0)
-		return 0;
-
-	/* return nonzero if out of tx descriptors */
-	if (nexttxd(di, txout) == di->txin)
-		goto outoftxd;
-
 	/* get physical address of buffer start */
 	pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
 
@@ -1318,23 +1305,147 @@
 
 	/* bump the tx descriptor index */
 	di->txout = txout;
+}
 
-	/* kick the chip */
-	if (commit)
-		bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
-		      di->xmtptrbase + I2B(txout, struct dma64desc));
+static void ampdu_finalize(struct dma_info *di)
+{
+	struct brcms_ampdu_session *session = &di->ampdu_session;
+	struct sk_buff *p;
+
+	trace_brcms_ampdu_session(&session->wlc->hw->d11core->dev,
+				  session->max_ampdu_len,
+				  session->max_ampdu_frames,
+				  session->ampdu_len,
+				  skb_queue_len(&session->skb_list),
+				  session->dma_len);
+
+	if (WARN_ON(skb_queue_empty(&session->skb_list)))
+		return;
+
+	brcms_c_ampdu_finalize(session);
+
+	while (!skb_queue_empty(&session->skb_list)) {
+		p = skb_dequeue(&session->skb_list);
+		dma_txenq(di, p);
+	}
+
+	bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
+		     di->xmtptrbase + I2B(di->txout, struct dma64desc));
+	brcms_c_ampdu_reset_session(session, session->wlc);
+}
+
+static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p)
+{
+	struct brcms_ampdu_session *session = &di->ampdu_session;
+	int ret;
+
+	ret = brcms_c_ampdu_add_frame(session, p);
+	if (ret == -ENOSPC) {
+		/*
+		 * AMPDU cannot accomodate this frame. Close out the in-
+		 * progress AMPDU session and start a new one.
+		 */
+		ampdu_finalize(di);
+		ret = brcms_c_ampdu_add_frame(session, p);
+	}
+
+	WARN_ON(ret);
+}
+
+/* Update count of available tx descriptors based on current DMA state */
+static void dma_update_txavail(struct dma_info *di)
+{
+	/*
+	 * Available space is number of descriptors less the number of
+	 * active descriptors and the number of queued AMPDU frames.
+	 */
+	di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) -
+			  skb_queue_len(&di->ampdu_session.skb_list) - 1;
+}
+
+/*
+ * !! tx entry routine
+ * WARNING: call must check the return value for error.
+ *   the error(toss frames) could be fatal and cause many subsequent hard
+ *   to debug problems
+ */
+int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
+	       struct sk_buff *p)
+{
+	struct dma_info *di = (struct dma_info *)pub;
+	struct brcms_ampdu_session *session = &di->ampdu_session;
+	struct ieee80211_tx_info *tx_info;
+	bool is_ampdu;
+
+	/* no use to transmit a zero length packet */
+	if (p->len == 0)
+		return 0;
+
+	/* return nonzero if out of tx descriptors */
+	if (di->dma.txavail == 0 || nexttxd(di, di->txout) == di->txin)
+		goto outoftxd;
+
+	tx_info = IEEE80211_SKB_CB(p);
+	is_ampdu = tx_info->flags & IEEE80211_TX_CTL_AMPDU;
+	if (is_ampdu)
+		prep_ampdu_frame(di, p);
+	else
+		dma_txenq(di, p);
 
 	/* tx flow control */
-	di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
+	dma_update_txavail(di);
+
+	/* kick the chip */
+	if (is_ampdu) {
+		/*
+		 * Start sending data if we've got a full AMPDU, there's
+		 * no more space in the DMA ring, or the ring isn't
+		 * currently transmitting.
+		 */
+		if (skb_queue_len(&session->skb_list) == session->max_ampdu_frames ||
+		    di->dma.txavail == 0 || dma64_txidle(di))
+			ampdu_finalize(di);
+	} else {
+		bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
+			     di->xmtptrbase + I2B(di->txout, struct dma64desc));
+	}
 
 	return 0;
 
  outoftxd:
-	DMA_ERROR("%s: out of txds !!!\n", di->name);
+	brcms_dbg_dma(di->core, "%s: out of txds !!!\n", di->name);
 	brcmu_pkt_buf_free_skb(p);
 	di->dma.txavail = 0;
 	di->dma.txnobuf++;
-	return -1;
+	return -ENOSPC;
+}
+
+void dma_txflush(struct dma_pub *pub)
+{
+	struct dma_info *di = (struct dma_info *)pub;
+	struct brcms_ampdu_session *session = &di->ampdu_session;
+
+	if (!skb_queue_empty(&session->skb_list))
+		ampdu_finalize(di);
+}
+
+int dma_txpending(struct dma_pub *pub)
+{
+	struct dma_info *di = (struct dma_info *)pub;
+	return ntxdactive(di, di->txin, di->txout);
+}
+
+/*
+ * If we have an active AMPDU session and are not transmitting,
+ * this function will force tx to start.
+ */
+void dma_kick_tx(struct dma_pub *pub)
+{
+	struct dma_info *di = (struct dma_info *)pub;
+	struct brcms_ampdu_session *session = &di->ampdu_session;
+
+	if (!skb_queue_empty(&session->skb_list) && dma64_txidle(di))
+		ampdu_finalize(di);
 }
 
 /*
@@ -1354,11 +1465,11 @@
 	u16 active_desc;
 	struct sk_buff *txp;
 
-	DMA_TRACE("%s: %s\n",
-		  di->name,
-		  range == DMA_RANGE_ALL ? "all" :
-		  range == DMA_RANGE_TRANSMITTED ? "transmitted" :
-		  "transferred");
+	brcms_dbg_dma(di->core, "%s: %s\n",
+		      di->name,
+		      range == DMA_RANGE_ALL ? "all" :
+		      range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+		      "transferred");
 
 	if (di->ntxd == 0)
 		return NULL;
@@ -1412,13 +1523,13 @@
 	di->txin = i;
 
 	/* tx flow control */
-	di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
+	dma_update_txavail(di);
 
 	return txp;
 
  bogus:
-	DMA_NONE("bogus curr: start %d end %d txout %d\n",
-		 start, end, di->txout);
+	brcms_dbg_dma(di->core, "bogus curr: start %d end %d txout %d\n",
+		      start, end, di->txout);
 	return NULL;
 }
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
index cc269ee..ff5b80b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
@@ -74,12 +74,11 @@
 	uint txnobuf;		/* tx out of dma descriptors */
 };
 
-extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
-				  struct bcma_device *d11core,
+extern struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
 				  uint txregbase, uint rxregbase,
 				  uint ntxd, uint nrxd,
 				  uint rxbufsize, int rxextheadroom,
-				  uint nrxpost, uint rxoffset, uint *msg_level);
+				  uint nrxpost, uint rxoffset);
 
 void dma_rxinit(struct dma_pub *pub);
 int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
@@ -87,7 +86,11 @@
 bool dma_rxreset(struct dma_pub *pub);
 bool dma_txreset(struct dma_pub *pub);
 void dma_txinit(struct dma_pub *pub);
-int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit);
+int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
+	       struct sk_buff *p0);
+void dma_txflush(struct dma_pub *pub);
+int dma_txpending(struct dma_pub *pub);
+void dma_kick_tx(struct dma_pub *pub);
 void dma_txsuspend(struct dma_pub *pub);
 bool dma_txsuspended(struct dma_pub *pub);
 void dma_txresume(struct dma_pub *pub);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index a744ea5..1710ccb 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -33,6 +33,7 @@
 #include "ucode_loader.h"
 #include "mac80211_if.h"
 #include "main.h"
+#include "debug.h"
 
 #define N_TX_QUEUES	4 /* #tx queues on mac80211<->driver interface */
 
@@ -98,10 +99,14 @@
 };
 MODULE_DEVICE_TABLE(bcma, brcms_coreid_table);
 
-#ifdef DEBUG
-static int msglevel = 0xdeadbeef;
-module_param(msglevel, int, 0);
-#endif				/* DEBUG */
+#if defined(CONFIG_BRCMDBG)
+/*
+ * Module parameter for setting the debug message level. Available
+ * flags are specified by the BRCM_DL_* macros in
+ * drivers/net/wireless/brcm80211/include/defs.h.
+ */
+module_param_named(debug, brcm_msg_level, uint, S_IRUGO | S_IWUSR);
+#endif
 
 static struct ieee80211_channel brcms_2ghz_chantable[] = {
 	CHAN2GHZ(1, 2412, IEEE80211_CHAN_NO_HT40MINUS),
@@ -276,7 +281,7 @@
 
 	spin_lock_bh(&wl->lock);
 	if (!wl->pub->up) {
-		wiphy_err(wl->wiphy, "ops->tx called while down\n");
+		brcms_err(wl->wlc->hw->d11core, "ops->tx called while down\n");
 		kfree_skb(skb);
 		goto done;
 	}
@@ -313,8 +318,8 @@
 	spin_unlock_bh(&wl->lock);
 
 	if (err != 0)
-		wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
-			  err);
+		brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n",
+			  __func__, err);
 	return err;
 }
 
@@ -332,7 +337,7 @@
 	status = brcms_c_chipmatch(wl->wlc->hw->d11core);
 	spin_unlock_bh(&wl->lock);
 	if (!status) {
-		wiphy_err(wl->wiphy,
+		brcms_err(wl->wlc->hw->d11core,
 			  "wl: brcms_ops_stop: chipmatch failed\n");
 		return;
 	}
@@ -350,8 +355,9 @@
 
 	/* Just STA for now */
 	if (vif->type != NL80211_IFTYPE_STATION) {
-		wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only"
-			  " STA for now\n", __func__, vif->type);
+		brcms_err(wl->wlc->hw->d11core,
+			  "%s: Attempt to add type %d, only STA for now\n",
+			  __func__, vif->type);
 		return -EOPNOTSUPP;
 	}
 
@@ -370,9 +376,9 @@
 {
 	struct ieee80211_conf *conf = &hw->conf;
 	struct brcms_info *wl = hw->priv;
+	struct bcma_device *core = wl->wlc->hw->d11core;
 	int err = 0;
 	int new_int;
-	struct wiphy *wiphy = hw->wiphy;
 
 	spin_lock_bh(&wl->lock);
 	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
@@ -380,25 +386,26 @@
 						   conf->listen_interval);
 	}
 	if (changed & IEEE80211_CONF_CHANGE_MONITOR)
-		wiphy_dbg(wiphy, "%s: change monitor mode: %s\n",
-			  __func__, conf->flags & IEEE80211_CONF_MONITOR ?
-			  "true" : "false");
+		brcms_dbg_info(core, "%s: change monitor mode: %s\n",
+			       __func__, conf->flags & IEEE80211_CONF_MONITOR ?
+			       "true" : "false");
 	if (changed & IEEE80211_CONF_CHANGE_PS)
-		wiphy_err(wiphy, "%s: change power-save mode: %s (implement)\n",
+		brcms_err(core, "%s: change power-save mode: %s (implement)\n",
 			  __func__, conf->flags & IEEE80211_CONF_PS ?
 			  "true" : "false");
 
 	if (changed & IEEE80211_CONF_CHANGE_POWER) {
 		err = brcms_c_set_tx_power(wl->wlc, conf->power_level);
 		if (err < 0) {
-			wiphy_err(wiphy, "%s: Error setting power_level\n",
+			brcms_err(core, "%s: Error setting power_level\n",
 				  __func__);
 			goto config_out;
 		}
 		new_int = brcms_c_get_tx_power(wl->wlc);
 		if (new_int != conf->power_level)
-			wiphy_err(wiphy, "%s: Power level req != actual, %d %d"
-				  "\n", __func__, conf->power_level,
+			brcms_err(core,
+				  "%s: Power level req != actual, %d %d\n",
+				  __func__, conf->power_level,
 				  new_int);
 	}
 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@@ -425,13 +432,13 @@
 			struct ieee80211_bss_conf *info, u32 changed)
 {
 	struct brcms_info *wl = hw->priv;
-	struct wiphy *wiphy = hw->wiphy;
+	struct bcma_device *core = wl->wlc->hw->d11core;
 
 	if (changed & BSS_CHANGED_ASSOC) {
 		/* association status changed (associated/disassociated)
 		 * also implies a change in the AID.
 		 */
-		wiphy_err(wiphy, "%s: %s: %sassociated\n", KBUILD_MODNAME,
+		brcms_err(core, "%s: %s: %sassociated\n", KBUILD_MODNAME,
 			  __func__, info->assoc ? "" : "dis");
 		spin_lock_bh(&wl->lock);
 		brcms_c_associate_upd(wl->wlc, info->assoc);
@@ -491,7 +498,7 @@
 		error = brcms_c_set_rateset(wl->wlc, &rs);
 		spin_unlock_bh(&wl->lock);
 		if (error)
-			wiphy_err(wiphy, "changing basic rates failed: %d\n",
+			brcms_err(core, "changing basic rates failed: %d\n",
 				  error);
 	}
 	if (changed & BSS_CHANGED_BEACON_INT) {
@@ -508,30 +515,30 @@
 	}
 	if (changed & BSS_CHANGED_BEACON)
 		/* Beacon data changed, retrieve new beacon (beaconing modes) */
-		wiphy_err(wiphy, "%s: beacon changed\n", __func__);
+		brcms_err(core, "%s: beacon changed\n", __func__);
 
 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
 		/* Beaconing should be enabled/disabled (beaconing modes) */
-		wiphy_err(wiphy, "%s: Beacon enabled: %s\n", __func__,
+		brcms_err(core, "%s: Beacon enabled: %s\n", __func__,
 			  info->enable_beacon ? "true" : "false");
 	}
 
 	if (changed & BSS_CHANGED_CQM) {
 		/* Connection quality monitor config changed */
-		wiphy_err(wiphy, "%s: cqm change: threshold %d, hys %d "
+		brcms_err(core, "%s: cqm change: threshold %d, hys %d "
 			  " (implement)\n", __func__, info->cqm_rssi_thold,
 			  info->cqm_rssi_hyst);
 	}
 
 	if (changed & BSS_CHANGED_IBSS) {
 		/* IBSS join status changed */
-		wiphy_err(wiphy, "%s: IBSS joined: %s (implement)\n", __func__,
-			  info->ibss_joined ? "true" : "false");
+		brcms_err(core, "%s: IBSS joined: %s (implement)\n",
+			  __func__, info->ibss_joined ? "true" : "false");
 	}
 
 	if (changed & BSS_CHANGED_ARP_FILTER) {
 		/* Hardware ARP filter address list or state changed */
-		wiphy_err(wiphy, "%s: arp filtering: enabled %s, count %d"
+		brcms_err(core, "%s: arp filtering: enabled %s, count %d"
 			  " (implement)\n", __func__, info->arp_filter_enabled ?
 			  "true" : "false", info->arp_addr_cnt);
 	}
@@ -541,8 +548,8 @@
 		 * QoS for this association was enabled/disabled.
 		 * Note that it is only ever disabled for station mode.
 		 */
-		wiphy_err(wiphy, "%s: qos enabled: %s (implement)\n", __func__,
-			  info->qos ? "true" : "false");
+		brcms_err(core, "%s: qos enabled: %s (implement)\n",
+			  __func__, info->qos ? "true" : "false");
 	}
 	return;
 }
@@ -553,25 +560,25 @@
 			unsigned int *total_flags, u64 multicast)
 {
 	struct brcms_info *wl = hw->priv;
-	struct wiphy *wiphy = hw->wiphy;
+	struct bcma_device *core = wl->wlc->hw->d11core;
 
 	changed_flags &= MAC_FILTERS;
 	*total_flags &= MAC_FILTERS;
 
 	if (changed_flags & FIF_PROMISC_IN_BSS)
-		wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n");
+		brcms_dbg_info(core, "FIF_PROMISC_IN_BSS\n");
 	if (changed_flags & FIF_ALLMULTI)
-		wiphy_dbg(wiphy, "FIF_ALLMULTI\n");
+		brcms_dbg_info(core, "FIF_ALLMULTI\n");
 	if (changed_flags & FIF_FCSFAIL)
-		wiphy_dbg(wiphy, "FIF_FCSFAIL\n");
+		brcms_dbg_info(core, "FIF_FCSFAIL\n");
 	if (changed_flags & FIF_CONTROL)
-		wiphy_dbg(wiphy, "FIF_CONTROL\n");
+		brcms_dbg_info(core, "FIF_CONTROL\n");
 	if (changed_flags & FIF_OTHER_BSS)
-		wiphy_dbg(wiphy, "FIF_OTHER_BSS\n");
+		brcms_dbg_info(core, "FIF_OTHER_BSS\n");
 	if (changed_flags & FIF_PSPOLL)
-		wiphy_dbg(wiphy, "FIF_PSPOLL\n");
+		brcms_dbg_info(core, "FIF_PSPOLL\n");
 	if (changed_flags & FIF_BCN_PRBRESP_PROMISC)
-		wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n");
+		brcms_dbg_info(core, "FIF_BCN_PRBRESP_PROMISC\n");
 
 	spin_lock_bh(&wl->lock);
 	brcms_c_mac_promisc(wl->wlc, *total_flags);
@@ -653,8 +660,8 @@
 		status = brcms_c_aggregatable(wl->wlc, tid);
 		spin_unlock_bh(&wl->lock);
 		if (!status) {
-			wiphy_err(wl->wiphy, "START: tid %d is not agg\'able\n",
-				  tid);
+			brcms_err(wl->wlc->hw->d11core,
+				  "START: tid %d is not agg\'able\n", tid);
 			return -EINVAL;
 		}
 		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -681,8 +688,8 @@
 		/* Power save wakeup */
 		break;
 	default:
-		wiphy_err(wl->wiphy, "%s: Invalid command, ignoring\n",
-			  __func__);
+		brcms_err(wl->wlc->hw->d11core,
+			  "%s: Invalid command, ignoring\n", __func__);
 	}
 
 	return 0;
@@ -1144,14 +1151,13 @@
 	wl->pub->hw_up = false;
 	spin_unlock_bh(&wl->lock);
 
-	pr_debug("brcms_suspend ok\n");
+	brcms_dbg_info(wl->wlc->hw->d11core, "brcms_suspend ok\n");
 
 	return 0;
 }
 
 static int brcms_resume(struct bcma_device *pdev)
 {
-	pr_debug("brcms_resume ok\n");
 	return 0;
 }
 
@@ -1184,10 +1190,6 @@
 
 static int __init brcms_module_init(void)
 {
-#ifdef DEBUG
-	if (msglevel != 0xdeadbeef)
-		brcm_msg_level = msglevel;
-#endif
 	if (!schedule_work(&brcms_driver_work))
 		return -EBUSY;
 
@@ -1216,7 +1218,7 @@
 void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
 			 bool state, int prio)
 {
-	wiphy_err(wl->wiphy, "Shouldn't be here %s\n", __func__);
+	brcms_err(wl->wlc->hw->d11core, "Shouldn't be here %s\n", __func__);
 }
 
 /*
@@ -1224,7 +1226,8 @@
  */
 void brcms_init(struct brcms_info *wl)
 {
-	BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit);
+	brcms_dbg_info(wl->wlc->hw->d11core, "Initializing wl%d\n",
+		       wl->pub->unit);
 	brcms_reset(wl);
 	brcms_c_init(wl->wlc, wl->mute_tx);
 }
@@ -1234,7 +1237,7 @@
  */
 uint brcms_reset(struct brcms_info *wl)
 {
-	BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit);
+	brcms_dbg_info(wl->wlc->hw->d11core, "Resetting wl%d\n", wl->pub->unit);
 	brcms_c_reset(wl->wlc);
 
 	/* dpc will not be rescheduled */
@@ -1248,7 +1251,7 @@
 
 void brcms_fatal_error(struct brcms_info *wl)
 {
-	wiphy_err(wl->wlc->wiphy, "wl%d: fatal error, reinitializing\n",
+	brcms_err(wl->wlc->hw->d11core, "wl%d: fatal error, reinitializing\n",
 		  wl->wlc->pub->unit);
 	brcms_reset(wl);
 	ieee80211_restart_hw(wl->pub->ieee_hw);
@@ -1396,8 +1399,9 @@
 
 #ifdef DEBUG
 	if (t->set)
-		wiphy_err(hw->wiphy, "%s: Already set. Name: %s, per %d\n",
-			  __func__, t->name, periodic);
+		brcms_dbg_info(t->wl->wlc->hw->d11core,
+			       "%s: Already set. Name: %s, per %d\n",
+			       __func__, t->name, periodic);
 #endif
 	t->ms = ms;
 	t->periodic = (bool) periodic;
@@ -1486,8 +1490,8 @@
 			}
 		}
 	}
-	wiphy_err(wl->wiphy, "ERROR: ucode buf tag:%d can not be found!\n",
-		  idx);
+	brcms_err(wl->wlc->hw->d11core,
+		  "ERROR: ucode buf tag:%d can not be found!\n", idx);
 	*pbuf = NULL;
 fail:
 	return -ENODATA;
@@ -1510,7 +1514,7 @@
 				pdata = wl->fw.fw_bin[i]->data +
 					le32_to_cpu(hdr->offset);
 				if (le32_to_cpu(hdr->len) != 4) {
-					wiphy_err(wl->wiphy,
+					brcms_err(wl->wlc->hw->d11core,
 						  "ERROR: fw hdr len\n");
 					return -ENOMSG;
 				}
@@ -1519,7 +1523,8 @@
 			}
 		}
 	}
-	wiphy_err(wl->wiphy, "ERROR: ucode tag:%d can not be found!\n", idx);
+	brcms_err(wl->wlc->hw->d11core,
+		  "ERROR: ucode tag:%d can not be found!\n", idx);
 	return -ENOMSG;
 }
 
@@ -1560,8 +1565,8 @@
 				sizeof(struct firmware_hdr));
 			rc = -EBADF;
 		} else if (fw->size < MIN_FW_SIZE || fw->size > MAX_FW_SIZE) {
-			wiphy_err(wl->wiphy, "%s: out of bounds fw file size "
-				  "%zu\n", __func__, fw->size);
+			wiphy_err(wl->wiphy, "%s: out of bounds fw file size %zu\n",
+				  __func__, fw->size);
 			rc = -EBADF;
 		} else {
 			/* check if ucode section overruns firmware image */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 75086b3..2a44593 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -34,12 +34,9 @@
 #include "ucode_loader.h"
 #include "main.h"
 #include "soc.h"
-
-/*
- * Indication for txflowcontrol that all priority bits in
- * TXQ_STOP_FOR_PRIOFC_MASK are to be considered.
- */
-#define ALLPRIO				-1
+#include "dma.h"
+#include "debug.h"
+#include "brcms_trace_events.h"
 
 /* watchdog timer, in unit of ms */
 #define TIMER_INTERVAL_WATCHDOG		1000
@@ -126,21 +123,6 @@
 
 #define BRCMS_TEMPSENSE_PERIOD		10	/* 10 second timeout */
 
-/* precedences numbers for wlc queues. These are twice as may levels as
- * 802.1D priorities.
- * Odd numbers are used for HI priority traffic at same precedence levels
- * These constants are used ONLY by wlc_prio2prec_map.  Do not use them
- * elsewhere.
- */
-#define _BRCMS_PREC_NONE		0	/* None = - */
-#define _BRCMS_PREC_BK			2	/* BK - Background */
-#define _BRCMS_PREC_BE			4	/* BE - Best-effort */
-#define _BRCMS_PREC_EE			6	/* EE - Excellent-effort */
-#define _BRCMS_PREC_CL			8	/* CL - Controlled Load */
-#define _BRCMS_PREC_VI			10	/* Vi - Video */
-#define _BRCMS_PREC_VO			12	/* Vo - Voice */
-#define _BRCMS_PREC_NC			14	/* NC - Network Control */
-
 /* synthpu_dly times in us */
 #define SYNTHPU_DLY_APHY_US		3700
 #define SYNTHPU_DLY_BPHY_US		1050
@@ -237,17 +219,17 @@
 
 #define MAX_DMA_SEGS			4
 
-/* Max # of entries in Tx FIFO based on 4kb page size */
-#define NTXD				256
+/* # of entries in Tx FIFO */
+#define NTXD				64
 /* Max # of entries in Rx FIFO based on 4kb page size */
 #define NRXD				256
 
+/* Amount of headroom to leave in Tx FIFO */
+#define TX_HEADROOM			4
+
 /* try to keep this # rbufs posted to the chip */
 #define NRXBUFPOST			32
 
-/* data msg txq hiwat mark */
-#define BRCMS_DATAHIWAT			50
-
 /* max # frames to process in brcms_c_recv() */
 #define RXBND				8
 /* max # tx status to process in wlc_txstatus() */
@@ -283,24 +265,8 @@
 	u16 TXOP;
 } __packed;
 
-const u8 prio2fifo[NUMPRIO] = {
-	TX_AC_BE_FIFO,		/* 0    BE      AC_BE   Best Effort */
-	TX_AC_BK_FIFO,		/* 1    BK      AC_BK   Background */
-	TX_AC_BK_FIFO,		/* 2    --      AC_BK   Background */
-	TX_AC_BE_FIFO,		/* 3    EE      AC_BE   Best Effort */
-	TX_AC_VI_FIFO,		/* 4    CL      AC_VI   Video */
-	TX_AC_VI_FIFO,		/* 5    VI      AC_VI   Video */
-	TX_AC_VO_FIFO,		/* 6    VO      AC_VO   Voice */
-	TX_AC_VO_FIFO		/* 7    NC      AC_VO   Voice */
-};
-
 /* debug/trace */
-uint brcm_msg_level =
-#if defined(DEBUG)
-	LOG_ERROR_VAL;
-#else
-	0;
-#endif				/* DEBUG */
+uint brcm_msg_level;
 
 /* TX FIFO number to WME/802.1E Access Category */
 static const u8 wme_fifo2ac[] = {
@@ -320,18 +286,6 @@
 	TX_AC_BK_FIFO
 };
 
-/* 802.1D Priority to precedence queue mapping */
-const u8 wlc_prio2prec_map[] = {
-	_BRCMS_PREC_BE,		/* 0 BE - Best-effort */
-	_BRCMS_PREC_BK,		/* 1 BK - Background */
-	_BRCMS_PREC_NONE,		/* 2 None = - */
-	_BRCMS_PREC_EE,		/* 3 EE - Excellent-effort */
-	_BRCMS_PREC_CL,		/* 4 CL - Controlled Load */
-	_BRCMS_PREC_VI,		/* 5 Vi - Video */
-	_BRCMS_PREC_VO,		/* 6 Vo - Voice */
-	_BRCMS_PREC_NC,		/* 7 NC - Network Control */
-};
-
 static const u16 xmtfifo_sz[][NFIFO] = {
 	/* corerev 17: 5120, 49152, 49152, 5376, 4352, 1280 */
 	{20, 192, 192, 21, 17, 5},
@@ -371,6 +325,36 @@
 static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
 #endif
 
+/* Mapping of ieee80211 AC numbers to tx fifos */
+static const u8 ac_to_fifo_mapping[IEEE80211_NUM_ACS] = {
+	[IEEE80211_AC_VO]	= TX_AC_VO_FIFO,
+	[IEEE80211_AC_VI]	= TX_AC_VI_FIFO,
+	[IEEE80211_AC_BE]	= TX_AC_BE_FIFO,
+	[IEEE80211_AC_BK]	= TX_AC_BK_FIFO,
+};
+
+/* Mapping of tx fifos to ieee80211 AC numbers */
+static const u8 fifo_to_ac_mapping[IEEE80211_NUM_ACS] = {
+	[TX_AC_BK_FIFO]	= IEEE80211_AC_BK,
+	[TX_AC_BE_FIFO]	= IEEE80211_AC_BE,
+	[TX_AC_VI_FIFO]	= IEEE80211_AC_VI,
+	[TX_AC_VO_FIFO]	= IEEE80211_AC_VO,
+};
+
+static u8 brcms_ac_to_fifo(u8 ac)
+{
+	if (ac >= ARRAY_SIZE(ac_to_fifo_mapping))
+		return TX_AC_BE_FIFO;
+	return ac_to_fifo_mapping[ac];
+}
+
+static u8 brcms_fifo_to_ac(u8 fifo)
+{
+	if (fifo >= ARRAY_SIZE(fifo_to_ac_mapping))
+		return IEEE80211_AC_BE;
+	return fifo_to_ac_mapping[fifo];
+}
+
 /* Find basic rate for a given rate */
 static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
 {
@@ -415,10 +399,15 @@
 }
 
 /* sum the individual fifo tx pending packet counts */
-static s16 brcms_txpktpendtot(struct brcms_c_info *wlc)
+static int brcms_txpktpendtot(struct brcms_c_info *wlc)
 {
-	return wlc->core->txpktpend[0] + wlc->core->txpktpend[1] +
-	       wlc->core->txpktpend[2] + wlc->core->txpktpend[3];
+	int i;
+	int pending = 0;
+
+	for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
+		if (wlc->hw->di[i])
+			pending += dma_txpending(wlc->hw->di[i]);
+	return pending;
 }
 
 static bool brcms_is_mband_unlocked(struct brcms_c_info *wlc)
@@ -626,14 +615,11 @@
 	uint rate = rspec2rate(ratespec);
 
 	if (rate == 0) {
-		wiphy_err(wlc->wiphy, "wl%d: WAR: using rate of 1 mbps\n",
+		brcms_err(wlc->hw->d11core, "wl%d: WAR: using rate of 1 mbps\n",
 			  wlc->pub->unit);
 		rate = BRCM_RATE_1M;
 	}
 
-	BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, len%d\n",
-		 wlc->pub->unit, ratespec, preamble_type, mac_len);
-
 	if (is_mcs_rate(ratespec)) {
 		uint mcs = ratespec & RSPEC_RATE_MASK;
 		int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec);
@@ -696,7 +682,7 @@
 	u16 size;
 	u32 value;
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+	brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
 
 	for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) {
 		size = le16_to_cpu(inits[i].size);
@@ -725,7 +711,6 @@
 
 static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
 {
-	struct wiphy *wiphy = wlc_hw->wlc->wiphy;
 	struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
 
 	/* init microcode host flags */
@@ -736,8 +721,9 @@
 		if (BRCMS_ISNPHY(wlc_hw->band))
 			brcms_c_write_inits(wlc_hw, ucode->d11n0bsinitvals16);
 		else
-			wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
-				  " %d\n", __func__, wlc_hw->unit,
+			brcms_err(wlc_hw->d11core,
+				  "%s: wl%d: unsupported phy in corerev %d\n",
+				  __func__, wlc_hw->unit,
 				  wlc_hw->corerev);
 	} else {
 		if (D11REV_IS(wlc_hw->corerev, 24)) {
@@ -745,12 +731,14 @@
 				brcms_c_write_inits(wlc_hw,
 						    ucode->d11lcn0bsinitvals24);
 			else
-				wiphy_err(wiphy, "%s: wl%d: unsupported phy in"
-					  " core rev %d\n", __func__,
-					  wlc_hw->unit, wlc_hw->corerev);
+				brcms_err(wlc_hw->d11core,
+					  "%s: wl%d: unsupported phy in core rev %d\n",
+					  __func__, wlc_hw->unit,
+					  wlc_hw->corerev);
 		} else {
-			wiphy_err(wiphy, "%s: wl%d: unsupported corerev %d\n",
-				__func__, wlc_hw->unit, wlc_hw->corerev);
+			brcms_err(wlc_hw->d11core,
+				  "%s: wl%d: unsupported corerev %d\n",
+				  __func__, wlc_hw->unit, wlc_hw->corerev);
 		}
 	}
 }
@@ -765,7 +753,7 @@
 
 static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
 {
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk);
+	brcms_dbg_info(wlc_hw->d11core, "wl%d: clk %d\n", wlc_hw->unit, clk);
 
 	wlc_hw->phyclk = clk;
 
@@ -790,8 +778,8 @@
 /* low-level band switch utility routine */
 static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit)
 {
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
-		bandunit);
+	brcms_dbg_mac80211(wlc_hw->d11core, "wl%d: bandunit %d\n", wlc_hw->unit,
+			   bandunit);
 
 	wlc_hw->band = wlc_hw->bandstate[bandunit];
 
@@ -819,7 +807,7 @@
 	u32 macintmask;
 	u32 macctrl;
 
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
+	brcms_dbg_mac80211(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
 	macctrl = bcma_read32(wlc_hw->d11core,
 			      D11REGOFFS(maccontrol));
 	WARN_ON((macctrl & MCTL_EN_MAC) != 0);
@@ -841,9 +829,10 @@
 static bool
 brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
 {
-	struct sk_buff *p;
-	uint queue;
-	struct d11txh *txh;
+	struct sk_buff *p = NULL;
+	uint queue = NFIFO;
+	struct dma_pub *dma = NULL;
+	struct d11txh *txh = NULL;
 	struct scb *scb = NULL;
 	bool free_pdu;
 	int tx_rts, tx_frame_count, tx_rts_count;
@@ -854,6 +843,11 @@
 	struct ieee80211_tx_info *tx_info;
 	struct ieee80211_tx_rate *txrate;
 	int i;
+	bool fatal = true;
+
+	trace_brcms_txstatus(&wlc->hw->d11core->dev, txs->framelen,
+			     txs->frameid, txs->status, txs->lasttxtime,
+			     txs->sequence, txs->phyerr, txs->ackphyrxsh);
 
 	/* discard intermediate indications for ucode with one legitimate case:
 	 *   e.g. if "useRTS" is set. ucode did a successful rts/cts exchange,
@@ -862,34 +856,36 @@
 	 */
 	if (!(txs->status & TX_STATUS_AMPDU)
 	    && (txs->status & TX_STATUS_INTERMEDIATE)) {
-		BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n");
-		return false;
+		brcms_dbg_tx(wlc->hw->d11core, "INTERMEDIATE but not AMPDU\n");
+		fatal = false;
+		goto out;
 	}
 
 	queue = txs->frameid & TXFID_QUEUE_MASK;
 	if (queue >= NFIFO) {
-		p = NULL;
-		goto fatal;
+		brcms_err(wlc->hw->d11core, "queue %u >= NFIFO\n", queue);
+		goto out;
 	}
 
+	dma = wlc->hw->di[queue];
+
 	p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
-	if (p == NULL)
-		goto fatal;
+	if (p == NULL) {
+		brcms_err(wlc->hw->d11core, "dma_getnexttxp returned null!\n");
+		goto out;
+	}
 
 	txh = (struct d11txh *) (p->data);
 	mcl = le16_to_cpu(txh->MacTxControlLow);
 
-	if (txs->phyerr) {
-		if (brcm_msg_level & LOG_ERROR_VAL) {
-			wiphy_err(wlc->wiphy, "phyerr 0x%x, rate 0x%x\n",
-				  txs->phyerr, txh->MainRates);
-			brcms_c_print_txdesc(txh);
-		}
-		brcms_c_print_txstatus(txs);
-	}
+	if (txs->phyerr)
+		brcms_err(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
+			  txs->phyerr, txh->MainRates);
 
-	if (txs->frameid != le16_to_cpu(txh->TxFrameID))
-		goto fatal;
+	if (txs->frameid != le16_to_cpu(txh->TxFrameID)) {
+		brcms_err(wlc->hw->d11core, "frameid != txh->TxFrameID\n");
+		goto out;
+	}
 	tx_info = IEEE80211_SKB_CB(p);
 	h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
 
@@ -898,14 +894,24 @@
 
 	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
 		brcms_c_ampdu_dotxstatus(wlc->ampdu, scb, p, txs);
-		return false;
+		fatal = false;
+		goto out;
 	}
 
+	/*
+	 * brcms_c_ampdu_dotxstatus() will trace tx descriptors for AMPDU
+	 * frames; this traces them for the rest.
+	 */
+	trace_brcms_txdesc(&wlc->hw->d11core->dev, txh, sizeof(*txh));
+
 	supr_status = txs->status & TX_STATUS_SUPR_MASK;
-	if (supr_status == TX_STATUS_SUPR_BADCH)
-		BCMMSG(wlc->wiphy,
-		       "%s: Pkt tx suppressed, possibly channel %d\n",
-		       __func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec));
+	if (supr_status == TX_STATUS_SUPR_BADCH) {
+		unsigned xfts = le16_to_cpu(txh->XtraFrameTypes);
+		brcms_dbg_tx(wlc->hw->d11core,
+			     "Pkt tx suppressed, dest chan %u, current %d\n",
+			     (xfts >> XFTS_CHANNEL_SHIFT) & 0xff,
+			     CHSPEC_CHANNEL(wlc->default_bss->chanspec));
+	}
 
 	tx_rts = le16_to_cpu(txh->MacTxControlLow) & TXC_SENDRTS;
 	tx_frame_count =
@@ -916,7 +922,7 @@
 	lastframe = !ieee80211_has_morefrags(h->frame_control);
 
 	if (!lastframe) {
-		wiphy_err(wlc->wiphy, "Not last frame!\n");
+		brcms_err(wlc->hw->d11core, "Not last frame!\n");
 	} else {
 		/*
 		 * Set information to be consumed by Minstrel ht.
@@ -982,26 +988,37 @@
 	totlen = p->len;
 	free_pdu = true;
 
-	brcms_c_txfifo_complete(wlc, queue, 1);
-
 	if (lastframe) {
 		/* remove PLCP & Broadcom tx descriptor header */
 		skb_pull(p, D11_PHY_HDR_LEN);
 		skb_pull(p, D11_TXH_LEN);
 		ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p);
 	} else {
-		wiphy_err(wlc->wiphy, "%s: Not last frame => not calling "
-			  "tx_status\n", __func__);
+		brcms_err(wlc->hw->d11core,
+			  "%s: Not last frame => not calling tx_status\n",
+			  __func__);
 	}
 
-	return false;
+	fatal = false;
 
- fatal:
-	if (p)
-		brcmu_pkt_buf_free_skb(p);
+ out:
+	if (fatal) {
+		if (txh)
+			trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
+					   sizeof(*txh));
+		if (p)
+			brcmu_pkt_buf_free_skb(p);
+	}
 
-	return true;
+	if (dma && queue < NFIFO) {
+		u16 ac_queue = brcms_fifo_to_ac(queue);
+		if (dma->txavail > TX_HEADROOM && queue < TX_BCMC_FIFO &&
+		    ieee80211_queue_stopped(wlc->pub->ieee_hw, ac_queue))
+			ieee80211_wake_queue(wlc->pub->ieee_hw, ac_queue);
+		dma_kick_tx(dma);
+	}
 
+	return fatal;
 }
 
 /* process tx completion events in BMAC
@@ -1011,7 +1028,6 @@
 brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
 {
 	bool morepending = false;
-	struct brcms_c_info *wlc = wlc_hw->wlc;
 	struct bcma_device *core;
 	struct tx_status txstatus, *txs;
 	u32 s1, s2;
@@ -1022,8 +1038,6 @@
 	 */
 	uint max_tx_num = bound ? TXSBND : -1;
 
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
 	txs = &txstatus;
 	core = wlc_hw->d11core;
 	*fatal = false;
@@ -1032,8 +1046,8 @@
 	       && (s1 & TXS_V)) {
 
 		if (s1 == 0xffffffff) {
-			wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n",
-				wlc_hw->unit, __func__);
+			brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
+				  __func__);
 			return morepending;
 		}
 		s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
@@ -1058,9 +1072,6 @@
 	if (n >= max_tx_num)
 		morepending = true;
 
-	if (!pktq_empty(&wlc->pkt_queue->q))
-		brcms_c_send_q(wlc);
-
 	return morepending;
 }
 
@@ -1112,7 +1123,6 @@
 	u16 pio_mhf2 = 0;
 	struct brcms_hardware *wlc_hw = wlc->hw;
 	uint unit = wlc_hw->unit;
-	struct wiphy *wiphy = wlc->wiphy;
 
 	/* name and offsets for dma_attach */
 	snprintf(name, sizeof(name), "wl%d", unit);
@@ -1125,12 +1135,12 @@
 		 * TX: TX_AC_BK_FIFO (TX AC Background data packets)
 		 * RX: RX_FIFO (RX data packets)
 		 */
-		wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+		wlc_hw->di[0] = dma_attach(name, wlc,
 					   (wme ? dmareg(DMA_TX, 0) : 0),
 					   dmareg(DMA_RX, 0),
 					   (wme ? NTXD : 0), NRXD,
 					   RXBUFSZ, -1, NRXBUFPOST,
-					   BRCMS_HWRXOFF, &brcm_msg_level);
+					   BRCMS_HWRXOFF);
 		dma_attach_err |= (NULL == wlc_hw->di[0]);
 
 		/*
@@ -1139,10 +1149,9 @@
 		 *   (legacy) TX_DATA_FIFO (TX data packets)
 		 * RX: UNUSED
 		 */
-		wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+		wlc_hw->di[1] = dma_attach(name, wlc,
 					   dmareg(DMA_TX, 1), 0,
-					   NTXD, 0, 0, -1, 0, 0,
-					   &brcm_msg_level);
+					   NTXD, 0, 0, -1, 0, 0);
 		dma_attach_err |= (NULL == wlc_hw->di[1]);
 
 		/*
@@ -1150,26 +1159,26 @@
 		 * TX: TX_AC_VI_FIFO (TX AC Video data packets)
 		 * RX: UNUSED
 		 */
-		wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+		wlc_hw->di[2] = dma_attach(name, wlc,
 					   dmareg(DMA_TX, 2), 0,
-					   NTXD, 0, 0, -1, 0, 0,
-					   &brcm_msg_level);
+					   NTXD, 0, 0, -1, 0, 0);
 		dma_attach_err |= (NULL == wlc_hw->di[2]);
 		/*
 		 * FIFO 3
 		 * TX: TX_AC_VO_FIFO (TX AC Voice data packets)
 		 *   (legacy) TX_CTL_FIFO (TX control & mgmt packets)
 		 */
-		wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+		wlc_hw->di[3] = dma_attach(name, wlc,
 					   dmareg(DMA_TX, 3),
 					   0, NTXD, 0, 0, -1,
-					   0, 0, &brcm_msg_level);
+					   0, 0);
 		dma_attach_err |= (NULL == wlc_hw->di[3]);
 /* Cleaner to leave this as if with AP defined */
 
 		if (dma_attach_err) {
-			wiphy_err(wiphy, "wl%d: wlc_attach: dma_attach failed"
-				  "\n", unit);
+			brcms_err(wlc_hw->d11core,
+				  "wl%d: wlc_attach: dma_attach failed\n",
+				  unit);
 			return false;
 		}
 
@@ -1503,8 +1512,7 @@
 	u16 mac_m;
 	u16 mac_h;
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n",
-		 wlc_hw->unit);
+	brcms_dbg_rx(core, "wl%d: brcms_b_set_addrmatch\n", wlc_hw->unit);
 
 	mac_l = addr[0] | (addr[1] << 8);
 	mac_m = addr[2] | (addr[3] << 8);
@@ -1527,7 +1535,7 @@
 	__le32 word_le;
 	__be32 word_be;
 	bool be_bit;
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+	brcms_dbg_info(core, "wl%d\n", wlc_hw->unit);
 
 	bcma_write32(core, D11REGOFFS(tplatewrptr), offset);
 
@@ -1700,8 +1708,8 @@
 {
 	struct brcms_hardware *wlc_hw = wlc->hw;
 
-	BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
-		wlc_hw->band->bandunit);
+	brcms_dbg_mac80211(wlc_hw->d11core, "wl%d: bandunit %d\n", wlc_hw->unit,
+			   wlc_hw->band->bandunit);
 
 	brcms_c_ucode_bsinit(wlc_hw);
 
@@ -1736,8 +1744,6 @@
 /* Perform a soft reset of the PHY PLL */
 void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
 {
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
 	ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr),
 		  ~0, 0);
 	udelay(1);
@@ -1782,7 +1788,7 @@
 	u32 phy_bw_clkbits;
 	bool phy_in_reset = false;
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+	brcms_dbg_info(wlc_hw->d11core, "wl%d: reset phy\n", wlc_hw->unit);
 
 	if (pih == NULL)
 		return;
@@ -1916,7 +1922,7 @@
 /* power both the pll and external oscillator on/off */
 static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
 {
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d: want %d\n", wlc_hw->unit, want);
+	brcms_dbg_info(wlc_hw->d11core, "wl%d: want %d\n", wlc_hw->unit, want);
 
 	/*
 	 * dont power down if plldown is false or
@@ -2005,7 +2011,7 @@
 	if (flags == BRCMS_USE_COREFLAGS)
 		flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+	brcms_dbg_info(wlc_hw->d11core, "wl%d: core reset\n", wlc_hw->unit);
 
 	/* request FAST clock if not on  */
 	fastclk = wlc_hw->forcefastclk;
@@ -2016,13 +2022,13 @@
 	if (bcma_core_is_enabled(wlc_hw->d11core)) {
 		for (i = 0; i < NFIFO; i++)
 			if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i])))
-				wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: "
+				brcms_err(wlc_hw->d11core, "wl%d: %s: "
 					  "dma_txreset[%d]: cannot stop dma\n",
 					   wlc_hw->unit, __func__, i);
 
 		if ((wlc_hw->di[RX_FIFO])
 		    && (!wlc_dma_rxreset(wlc_hw, RX_FIFO)))
-			wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: dma_rxreset"
+			brcms_err(wlc_hw->d11core, "wl%d: %s: dma_rxreset"
 				  "[%d]: cannot stop dma\n",
 				  wlc_hw->unit, __func__, RX_FIFO);
 	}
@@ -2235,7 +2241,7 @@
 	uint i;
 	uint count;
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+	brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
 
 	count = (nbytes / sizeof(u32));
 
@@ -2263,8 +2269,8 @@
 					  ucode->bcm43xx_16_mimosz);
 			wlc_hw->ucode_loaded = true;
 		} else
-			wiphy_err(wlc->wiphy, "%s: wl%d: unsupported phy in "
-				  "corerev %d\n",
+			brcms_err(wlc_hw->d11core,
+				  "%s: wl%d: unsupported phy in corerev %d\n",
 				  __func__, wlc_hw->unit, wlc_hw->corerev);
 	} else if (D11REV_IS(wlc_hw->corerev, 24)) {
 		if (BRCMS_ISLCNPHY(wlc_hw->band)) {
@@ -2272,8 +2278,8 @@
 					  ucode->bcm43xx_24_lcnsz);
 			wlc_hw->ucode_loaded = true;
 		} else {
-			wiphy_err(wlc->wiphy, "%s: wl%d: unsupported phy in "
-				  "corerev %d\n",
+			brcms_err(wlc_hw->d11core,
+				  "%s: wl%d: unsupported phy in corerev %d\n",
 				  __func__, wlc_hw->unit, wlc_hw->corerev);
 		}
 	}
@@ -2310,7 +2316,6 @@
 	uint unit;
 	uint intstatus, idx;
 	struct bcma_device *core = wlc_hw->d11core;
-	struct wiphy *wiphy = wlc_hw->wlc->wiphy;
 
 	unit = wlc_hw->unit;
 
@@ -2323,39 +2328,39 @@
 		if (!intstatus)
 			continue;
 
-		BCMMSG(wlc_hw->wlc->wiphy, "wl%d: intstatus%d 0x%x\n",
-			unit, idx, intstatus);
+		brcms_dbg_int(core, "wl%d: intstatus%d 0x%x\n",
+			      unit, idx, intstatus);
 
 		if (intstatus & I_RO) {
-			wiphy_err(wiphy, "wl%d: fifo %d: receive fifo "
+			brcms_err(core, "wl%d: fifo %d: receive fifo "
 				  "overflow\n", unit, idx);
 			fatal = true;
 		}
 
 		if (intstatus & I_PC) {
-			wiphy_err(wiphy, "wl%d: fifo %d: descriptor error\n",
-				 unit, idx);
+			brcms_err(core, "wl%d: fifo %d: descriptor error\n",
+				  unit, idx);
 			fatal = true;
 		}
 
 		if (intstatus & I_PD) {
-			wiphy_err(wiphy, "wl%d: fifo %d: data error\n", unit,
+			brcms_err(core, "wl%d: fifo %d: data error\n", unit,
 				  idx);
 			fatal = true;
 		}
 
 		if (intstatus & I_DE) {
-			wiphy_err(wiphy, "wl%d: fifo %d: descriptor protocol "
+			brcms_err(core, "wl%d: fifo %d: descriptor protocol "
 				  "error\n", unit, idx);
 			fatal = true;
 		}
 
 		if (intstatus & I_RU)
-			wiphy_err(wiphy, "wl%d: fifo %d: receive descriptor "
+			brcms_err(core, "wl%d: fifo %d: receive descriptor "
 				  "underflow\n", idx, unit);
 
 		if (intstatus & I_XU) {
-			wiphy_err(wiphy, "wl%d: fifo %d: transmit fifo "
+			brcms_err(core, "wl%d: fifo %d: transmit fifo "
 				  "underflow\n", idx, unit);
 			fatal = true;
 		}
@@ -2516,13 +2521,13 @@
 {
 	struct brcms_hardware *wlc_hw = wlc->hw;
 	struct bcma_device *core = wlc_hw->d11core;
-	u32 macintstatus;
+	u32 macintstatus, mask;
 
 	/* macintstatus includes a DMA interrupt summary bit */
 	macintstatus = bcma_read32(core, D11REGOFFS(macintstatus));
+	mask = in_isr ? wlc->macintmask : wlc->defmacintmask;
 
-	BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit,
-		 macintstatus);
+	trace_brcms_macintstatus(&core->dev, in_isr, macintstatus, mask);
 
 	/* detect cardbus removed, in power down(suspend) and in reset */
 	if (brcms_deviceremoved(wlc))
@@ -2535,7 +2540,7 @@
 		return 0;
 
 	/* defer unsolicited interrupts */
-	macintstatus &= (in_isr ? wlc->macintmask : wlc->defmacintmask);
+	macintstatus &= mask;
 
 	/* if not for us */
 	if (macintstatus == 0)
@@ -2605,8 +2610,8 @@
 	macintstatus = wlc_intstatus(wlc, true);
 
 	if (macintstatus == 0xffffffff)
-		wiphy_err(wlc->wiphy, "DEVICEREMOVED detected in the ISR code"
-			  " path\n");
+		brcms_err(wlc_hw->d11core,
+			  "DEVICEREMOVED detected in the ISR code path\n");
 
 	/* it is not for us */
 	if (macintstatus == 0)
@@ -2626,10 +2631,9 @@
 	struct brcms_hardware *wlc_hw = wlc->hw;
 	struct bcma_device *core = wlc_hw->d11core;
 	u32 mc, mi;
-	struct wiphy *wiphy = wlc->wiphy;
 
-	BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
-		wlc_hw->band->bandunit);
+	brcms_dbg_mac80211(core, "wl%d: bandunit %d\n", wlc_hw->unit,
+			   wlc_hw->band->bandunit);
 
 	/*
 	 * Track overlapping suspend requests
@@ -2644,7 +2648,7 @@
 	mc = bcma_read32(core, D11REGOFFS(maccontrol));
 
 	if (mc == 0xffffffff) {
-		wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
+		brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
 			  __func__);
 		brcms_down(wlc->wl);
 		return;
@@ -2655,7 +2659,7 @@
 
 	mi = bcma_read32(core, D11REGOFFS(macintstatus));
 	if (mi == 0xffffffff) {
-		wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
+		brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
 			  __func__);
 		brcms_down(wlc->wl);
 		return;
@@ -2668,10 +2672,10 @@
 		 BRCMS_MAX_MAC_SUSPEND);
 
 	if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) {
-		wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
+		brcms_err(core, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
 			  " and MI_MACSSPNDD is still not on.\n",
 			  wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
-		wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
+		brcms_err(core, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
 			  "psm_brc 0x%04x\n", wlc_hw->unit,
 			  bcma_read32(core, D11REGOFFS(psmdebug)),
 			  bcma_read32(core, D11REGOFFS(phydebug)),
@@ -2680,7 +2684,7 @@
 
 	mc = bcma_read32(core, D11REGOFFS(maccontrol));
 	if (mc == 0xffffffff) {
-		wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
+		brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
 			  __func__);
 		brcms_down(wlc->wl);
 		return;
@@ -2696,8 +2700,8 @@
 	struct bcma_device *core = wlc_hw->d11core;
 	u32 mc, mi;
 
-	BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
-		wlc->band->bandunit);
+	brcms_dbg_mac80211(core, "wl%d: bandunit %d\n", wlc_hw->unit,
+			   wlc->band->bandunit);
 
 	/*
 	 * Track overlapping suspend requests
@@ -2740,8 +2744,6 @@
 	u32 w, val;
 	struct wiphy *wiphy = wlc_hw->wlc->wiphy;
 
-	BCMMSG(wiphy, "wl%d\n", wlc_hw->unit);
-
 	/* Validate dchip register access */
 
 	bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
@@ -2802,7 +2804,7 @@
 	struct bcma_device *core = wlc_hw->d11core;
 	u32 tmp;
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+	brcms_dbg_info(core, "wl%d\n", wlc_hw->unit);
 
 	tmp = 0;
 
@@ -2818,8 +2820,8 @@
 
 			tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
 			if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT)
-				wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY"
-					  " PLL failed\n", __func__);
+				brcms_err(core, "%s: turn on PHY PLL failed\n",
+					  __func__);
 		} else {
 			bcma_set32(core, D11REGOFFS(clk_ctl_st),
 				   tmp | CCS_ERSRC_REQ_D11PLL |
@@ -2835,8 +2837,8 @@
 			     (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
 			    !=
 			    (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
-				wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on "
-					  "PHY PLL failed\n", __func__);
+				brcms_err(core, "%s: turn on PHY PLL failed\n",
+					  __func__);
 		}
 	} else {
 		/*
@@ -2854,7 +2856,7 @@
 {
 	bool dev_gone;
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+	brcms_dbg_info(wlc_hw->d11core, "wl%d: disable core\n", wlc_hw->unit);
 
 	dev_gone = brcms_deviceremoved(wlc_hw->wlc);
 
@@ -2884,12 +2886,14 @@
 	uint i;
 
 	/* free any posted tx packets */
-	for (i = 0; i < NFIFO; i++)
+	for (i = 0; i < NFIFO; i++) {
 		if (wlc_hw->di[i]) {
 			dma_txreclaim(wlc_hw->di[i], DMA_RANGE_ALL);
-			wlc->core->txpktpend[i] = 0;
-			BCMMSG(wlc->wiphy, "pktpend fifo %d clrd\n", i);
+			if (i < TX_BCMC_FIFO)
+				ieee80211_wake_queue(wlc->pub->ieee_hw,
+						     brcms_fifo_to_ac(i));
 		}
+	}
 
 	/* free any posted rx packets */
 	dma_rxreclaim(wlc_hw->di[RX_FIFO]);
@@ -3109,7 +3113,7 @@
 	/* check for rx fifo 0 overflow */
 	delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl);
 	if (delta)
-		wiphy_err(wlc->wiphy, "wl%d: %u rx fifo 0 overflows!\n",
+		brcms_err(wlc->hw->d11core, "wl%d: %u rx fifo 0 overflows!\n",
 			  wlc->pub->unit, delta);
 
 	/* check for tx fifo underflows */
@@ -3118,8 +3122,9 @@
 		    (u16) (wlc->core->macstat_snapshot->txfunfl[i] -
 			      txfunfl[i]);
 		if (delta)
-			wiphy_err(wlc->wiphy, "wl%d: %u tx fifo %d underflows!"
-				  "\n", wlc->pub->unit, delta, i);
+			brcms_err(wlc->hw->d11core,
+				  "wl%d: %u tx fifo %d underflows!\n",
+				  wlc->pub->unit, delta, i);
 	}
 #endif				/* DEBUG */
 
@@ -3132,8 +3137,6 @@
 
 static void brcms_b_reset(struct brcms_hardware *wlc_hw)
 {
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
 	/* reset the core */
 	if (!brcms_deviceremoved(wlc_hw->wlc))
 		brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
@@ -3144,7 +3147,7 @@
 
 void brcms_c_reset(struct brcms_c_info *wlc)
 {
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+	brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
 
 	/* slurp up hw mac counters before core reset */
 	brcms_c_statsupd(wlc);
@@ -3189,10 +3192,9 @@
 	bool fifosz_fixup = false;
 	int err = 0;
 	u16 buf[NFIFO];
-	struct wiphy *wiphy = wlc->wiphy;
 	struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
 
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
+	brcms_dbg_info(core, "wl%d: core init\n", wlc_hw->unit);
 
 	/* reset PSM */
 	brcms_b_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_PSM_JMP_0 | MCTL_WAKE));
@@ -3212,7 +3214,7 @@
 	SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) &
 		   MI_MACSSPNDD) == 0), 1000 * 1000);
 	if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0)
-		wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-"
+		brcms_err(core, "wl%d: wlc_coreinit: ucode did not self-"
 			  "suspend!\n", wlc_hw->unit);
 
 	brcms_c_gpio_init(wlc);
@@ -3223,18 +3225,18 @@
 		if (BRCMS_ISNPHY(wlc_hw->band))
 			brcms_c_write_inits(wlc_hw, ucode->d11n0initvals16);
 		else
-			wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
+			brcms_err(core, "%s: wl%d: unsupported phy in corerev"
 				  " %d\n", __func__, wlc_hw->unit,
 				  wlc_hw->corerev);
 	} else if (D11REV_IS(wlc_hw->corerev, 24)) {
 		if (BRCMS_ISLCNPHY(wlc_hw->band))
 			brcms_c_write_inits(wlc_hw, ucode->d11lcn0initvals24);
 		else
-			wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
+			brcms_err(core, "%s: wl%d: unsupported phy in corerev"
 				  " %d\n", __func__, wlc_hw->unit,
 				  wlc_hw->corerev);
 	} else {
-		wiphy_err(wiphy, "%s: wl%d: unsupported corerev %d\n",
+		brcms_err(core, "%s: wl%d: unsupported corerev %d\n",
 			  __func__, wlc_hw->unit, wlc_hw->corerev);
 	}
 
@@ -3276,7 +3278,7 @@
 		err = -1;
 	}
 	if (err != 0)
-		wiphy_err(wiphy, "wlc_coreinit: txfifo mismatch: ucode size %d"
+		brcms_err(core, "wlc_coreinit: txfifo mismatch: ucode size %d"
 			  " driver size %d index %d\n", buf[i],
 			  wlc_hw->xmtfifo_sz[i], i);
 
@@ -3359,8 +3361,6 @@
 	bool fastclk;
 	struct brcms_c_info *wlc = wlc_hw->wlc;
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
 	/* request FAST clock if not on */
 	fastclk = wlc_hw->forcefastclk;
 	if (!fastclk)
@@ -3453,7 +3453,7 @@
 		rate = (rateset->rates[i] & BRCMS_RATE_MASK);
 
 		if (rate > BRCM_MAXRATE) {
-			wiphy_err(wlc->wiphy, "brcms_c_rate_lookup_init: "
+			brcms_err(wlc->hw->d11core, "brcms_c_rate_lookup_init: "
 				  "invalid rate 0x%X in rate set\n",
 				  rateset->rates[i]);
 			continue;
@@ -3529,7 +3529,6 @@
 	uint parkband;
 	uint i, band_order[2];
 
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
 	/*
 	 * We might have been bandlocked during down and the chip
 	 * power-cycled (hibernate). Figure out the right band to park on
@@ -3710,8 +3709,8 @@
 /* band-specific init */
 static void brcms_c_bsinit(struct brcms_c_info *wlc)
 {
-	BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n",
-		 wlc->pub->unit, wlc->band->bandunit);
+	brcms_dbg_info(wlc->hw->d11core, "wl%d: bandunit %d\n",
+		       wlc->pub->unit, wlc->band->bandunit);
 
 	/* write ucode ACK/CTS rate table */
 	brcms_c_set_ratetable(wlc);
@@ -3734,7 +3733,8 @@
 	    isOFDM ? M_TX_IDLE_BUSY_RATIO_X_16_OFDM :
 	    M_TX_IDLE_BUSY_RATIO_X_16_CCK;
 	if (duty_cycle > 100 || duty_cycle < 0) {
-		wiphy_err(wlc->wiphy, "wl%d:  duty cycle value off limit\n",
+		brcms_err(wlc->hw->d11core,
+			  "wl%d:  duty cycle value off limit\n",
 			  wlc->pub->unit);
 		return -EINVAL;
 	}
@@ -3752,40 +3752,6 @@
 	return 0;
 }
 
-/*
- * Initialize the base precedence map for dequeueing
- * from txq based on WME settings
- */
-static void brcms_c_tx_prec_map_init(struct brcms_c_info *wlc)
-{
-	wlc->tx_prec_map = BRCMS_PREC_BMP_ALL;
-	memset(wlc->fifo2prec_map, 0, NFIFO * sizeof(u16));
-
-	wlc->fifo2prec_map[TX_AC_BK_FIFO] = BRCMS_PREC_BMP_AC_BK;
-	wlc->fifo2prec_map[TX_AC_BE_FIFO] = BRCMS_PREC_BMP_AC_BE;
-	wlc->fifo2prec_map[TX_AC_VI_FIFO] = BRCMS_PREC_BMP_AC_VI;
-	wlc->fifo2prec_map[TX_AC_VO_FIFO] = BRCMS_PREC_BMP_AC_VO;
-}
-
-static void
-brcms_c_txflowcontrol_signal(struct brcms_c_info *wlc,
-			     struct brcms_txq_info *qi, bool on, int prio)
-{
-	/* transmit flowcontrol is not yet implemented */
-}
-
-static void brcms_c_txflowcontrol_reset(struct brcms_c_info *wlc)
-{
-	struct brcms_txq_info *qi;
-
-	for (qi = wlc->tx_queues; qi != NULL; qi = qi->next) {
-		if (qi->stopped) {
-			brcms_c_txflowcontrol_signal(wlc, qi, OFF, ALLPRIO);
-			qi->stopped = 0;
-		}
-	}
-}
-
 /* push sw hps and wake state through hardware */
 static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
 {
@@ -3795,7 +3761,8 @@
 
 	hps = brcms_c_ps_allowed(wlc);
 
-	BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps);
+	brcms_dbg_mac80211(wlc->hw->d11core, "wl%d: hps %d\n", wlc->pub->unit,
+			   hps);
 
 	v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
 	v2 = MCTL_WAKE;
@@ -3881,7 +3848,8 @@
 {
 	uint bandunit;
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d: 0x%x\n", wlc_hw->unit, chanspec);
+	brcms_dbg_mac80211(wlc_hw->d11core, "wl%d: 0x%x\n", wlc_hw->unit,
+			   chanspec);
 
 	wlc_hw->chanspec = chanspec;
 
@@ -3942,7 +3910,7 @@
 	u16 old_chanspec = wlc->chanspec;
 
 	if (!brcms_c_valid_chanspec_db(wlc->cmi, chanspec)) {
-		wiphy_err(wlc->wiphy, "wl%d: %s: Bad channel %d\n",
+		brcms_err(wlc->hw->d11core, "wl%d: %s: Bad channel %d\n",
 			  wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec));
 		return;
 	}
@@ -3953,8 +3921,8 @@
 		if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) {
 			switchband = true;
 			if (wlc->bandlocked) {
-				wiphy_err(wlc->wiphy, "wl%d: %s: chspec %d "
-					  "band is locked!\n",
+				brcms_err(wlc->hw->d11core,
+					  "wl%d: %s: chspec %d band is locked!\n",
 					  wlc->pub->unit, __func__,
 					  CHSPEC_CHANNEL(chanspec));
 				return;
@@ -4018,6 +3986,10 @@
  */
 void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val)
 {
+	/*
+	 * Cannot use brcms_dbg_* here because this function is called
+	 * before wlc is sufficiently initialized.
+	 */
 	BCMMSG(wlc->wiphy, "idx %d, val %d\n", idx, val);
 
 	switch (idx) {
@@ -4090,8 +4062,8 @@
 
 	/* Only apply params if the core is out of reset and has clocks */
 	if (!wlc->clk) {
-		wiphy_err(wlc->wiphy, "wl%d: %s : no-clock\n", wlc->pub->unit,
-			  __func__);
+		brcms_err(wlc->hw->d11core, "wl%d: %s : no-clock\n",
+			  wlc->pub->unit, __func__);
 		return;
 	}
 
@@ -4109,7 +4081,7 @@
 
 	if (acp_shm.aifs < EDCF_AIFSN_MIN
 	    || acp_shm.aifs > EDCF_AIFSN_MAX) {
-		wiphy_err(wlc->wiphy, "wl%d: edcf_setparams: bad "
+		brcms_err(wlc->hw->d11core, "wl%d: edcf_setparams: bad "
 			  "aifs %d\n", wlc->pub->unit, acp_shm.aifs);
 	} else {
 		acp_shm.cwmin = params->cw_min;
@@ -4224,8 +4196,8 @@
 	struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
 
 	if (brcms_deviceremoved(wlc)) {
-		wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit,
-			__func__);
+		brcms_err(wlc->hw->d11core, "wl%d: %s: dead chip\n",
+			  wlc->pub->unit, __func__);
 		brcms_down(wlc->wl);
 		return;
 	}
@@ -4238,8 +4210,6 @@
 {
 	struct brcms_hardware *wlc_hw = wlc->hw;
 
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
 	if (!wlc_hw->up)
 		return;
 
@@ -4258,14 +4228,14 @@
 /* common watchdog code */
 static void brcms_c_watchdog(struct brcms_c_info *wlc)
 {
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+	brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
 
 	if (!wlc->pub->up)
 		return;
 
 	if (brcms_deviceremoved(wlc)) {
-		wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit,
-			  __func__);
+		brcms_err(wlc->hw->d11core, "wl%d: %s: dead chip\n",
+			  wlc->pub->unit, __func__);
 		brcms_down(wlc->wl);
 		return;
 	}
@@ -4437,13 +4407,13 @@
 	struct ssb_sprom *sprom = &core->bus->sprom;
 
 	if (core->bus->hosttype == BCMA_HOSTTYPE_PCI)
-		BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
-		       pcidev->vendor,
-		       pcidev->device);
+		brcms_dbg_info(core, "wl%d: vendor 0x%x device 0x%x\n", unit,
+			       pcidev->vendor,
+			       pcidev->device);
 	else
-		BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
-		       core->bus->boardinfo.vendor,
-		       core->bus->boardinfo.type);
+		brcms_dbg_info(core, "wl%d: vendor 0x%x device 0x%x\n", unit,
+			       core->bus->boardinfo.vendor,
+			       core->bus->boardinfo.type);
 
 	wme = true;
 
@@ -4715,8 +4685,9 @@
 		goto fail;
 	}
 
-	BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x\n",
-	       wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih));
+	brcms_dbg_info(wlc_hw->d11core, "deviceid 0x%x nbands %d board 0x%x\n",
+		       wlc_hw->deviceid, wlc_hw->_nbands,
+		       ai_get_boardtype(wlc_hw->sih));
 
 	return err;
 
@@ -4836,56 +4807,6 @@
 		bi->flags |= BRCMS_BSS_HT;
 }
 
-static struct brcms_txq_info *brcms_c_txq_alloc(struct brcms_c_info *wlc)
-{
-	struct brcms_txq_info *qi, *p;
-
-	qi = kzalloc(sizeof(struct brcms_txq_info), GFP_ATOMIC);
-	if (qi != NULL) {
-		/*
-		 * Have enough room for control packets along with HI watermark
-		 * Also, add room to txq for total psq packets if all the SCBs
-		 * leave PS mode. The watermark for flowcontrol to OS packets
-		 * will remain the same
-		 */
-		brcmu_pktq_init(&qi->q, BRCMS_PREC_COUNT,
-			  2 * BRCMS_DATAHIWAT + PKTQ_LEN_DEFAULT);
-
-		/* add this queue to the the global list */
-		p = wlc->tx_queues;
-		if (p == NULL) {
-			wlc->tx_queues = qi;
-		} else {
-			while (p->next != NULL)
-				p = p->next;
-			p->next = qi;
-		}
-	}
-	return qi;
-}
-
-static void brcms_c_txq_free(struct brcms_c_info *wlc,
-			     struct brcms_txq_info *qi)
-{
-	struct brcms_txq_info *p;
-
-	if (qi == NULL)
-		return;
-
-	/* remove the queue from the linked list */
-	p = wlc->tx_queues;
-	if (p == qi)
-		wlc->tx_queues = p->next;
-	else {
-		while (p != NULL && p->next != qi)
-			p = p->next;
-		if (p != NULL)
-			p->next = p->next->next;
-	}
-
-	kfree(qi);
-}
-
 static void brcms_c_update_mimo_band_bwcap(struct brcms_c_info *wlc, u8 bwcap)
 {
 	uint i;
@@ -4991,8 +4912,6 @@
 	if (wlc == NULL)
 		return 0;
 
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
 	callbacks += brcms_b_detach(wlc);
 
 	/* delete software timers */
@@ -5005,10 +4924,6 @@
 
 	brcms_c_detach_module(wlc);
 
-
-	while (wlc->tx_queues != NULL)
-		brcms_c_txq_free(wlc, wlc->tx_queues);
-
 	brcms_c_detach_mfree(wlc);
 	return callbacks;
 }
@@ -5026,7 +4941,7 @@
 	if (wlc_hw->wlc->pub->hw_up)
 		return;
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+	brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
 
 	/*
 	 * Enable pll and xtal, initialize the power control registers,
@@ -5063,7 +4978,7 @@
 
 static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
 {
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+	brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
 
 	/*
 	 * Enable pll and xtal, initialize the power control registers,
@@ -5077,7 +4992,7 @@
 	 * Configure pci/pcmcia here instead of in brcms_c_attach()
 	 * to allow mfg hotswap:  down, hotswap (chip power cycle), up.
 	 */
-	bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci, wlc_hw->d11core,
+	bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci[0], wlc_hw->d11core,
 			      true);
 
 	/*
@@ -5102,8 +5017,6 @@
 
 static int brcms_b_up_finish(struct brcms_hardware *wlc_hw)
 {
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
 	wlc_hw->up = true;
 	wlc_phy_hw_state_upd(wlc_hw->band->pi, true);
 
@@ -5135,7 +5048,7 @@
 {
 	struct ieee80211_channel *ch;
 
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+	brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
 
 	/* HW is turned off so don't try to access it */
 	if (wlc->pub->hw_off || brcms_deviceremoved(wlc))
@@ -5176,8 +5089,8 @@
 					 WL_RADIO_HW_DISABLE);
 
 				if (bsscfg->enable && bsscfg->BSS)
-					wiphy_err(wlc->wiphy, "wl%d: up"
-						  ": rfdisable -> "
+					brcms_err(wlc->hw->d11core,
+						  "wl%d: up: rfdisable -> "
 						  "bsscfg_disable()\n",
 						   wlc->pub->unit);
 			}
@@ -5237,8 +5150,6 @@
 	bool dev_gone;
 	uint callbacks = 0;
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
 	if (!wlc_hw->up)
 		return callbacks;
 
@@ -5265,8 +5176,6 @@
 	uint callbacks = 0;
 	bool dev_gone;
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
 	if (!wlc_hw->up)
 		return callbacks;
 
@@ -5314,14 +5223,14 @@
 	uint callbacks = 0;
 	int i;
 	bool dev_gone = false;
-	struct brcms_txq_info *qi;
 
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+	brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
 
 	/* check if we are already in the going down path */
 	if (wlc->going_down) {
-		wiphy_err(wlc->wiphy, "wl%d: %s: Driver going down so return"
-			  "\n", wlc->pub->unit, __func__);
+		brcms_err(wlc->hw->d11core,
+			  "wl%d: %s: Driver going down so return\n",
+			  wlc->pub->unit, __func__);
 		return 0;
 	}
 	if (!wlc->pub->up)
@@ -5353,13 +5262,6 @@
 
 	wlc_phy_mute_upd(wlc->band->pi, false, PHY_MUTE_ALL);
 
-	/* clear txq flow control */
-	brcms_c_txflowcontrol_reset(wlc);
-
-	/* flush tx queues */
-	for (qi = wlc->tx_queues; qi != NULL; qi = qi->next)
-		brcmu_pktq_flush(&qi->q, true, NULL, NULL);
-
 	callbacks += brcms_b_down_finish(wlc->hw);
 
 	/* brcms_b_down_finish has done brcms_c_coredisable(). so clk is off */
@@ -5441,7 +5343,7 @@
 
 	default:
 		/* Error */
-		wiphy_err(wlc->wiphy, "wl%d: %s: invalid gmode %d\n",
+		brcms_err(wlc->hw->d11core, "wl%d: %s: invalid gmode %d\n",
 			  wlc->pub->unit, __func__, gmode);
 		return -ENOTSUPP;
 	}
@@ -5745,45 +5647,6 @@
 	return -ENODATA;
 }
 
-void brcms_c_print_txstatus(struct tx_status *txs)
-{
-	pr_debug("\ntxpkt (MPDU) Complete\n");
-
-	pr_debug("FrameID: %04x   TxStatus: %04x\n", txs->frameid, txs->status);
-
-	pr_debug("[15:12]  %d  frame attempts\n",
-		  (txs->status & TX_STATUS_FRM_RTX_MASK) >>
-		 TX_STATUS_FRM_RTX_SHIFT);
-	pr_debug(" [11:8]  %d  rts attempts\n",
-		 (txs->status & TX_STATUS_RTS_RTX_MASK) >>
-		 TX_STATUS_RTS_RTX_SHIFT);
-	pr_debug("    [7]  %d  PM mode indicated\n",
-		 txs->status & TX_STATUS_PMINDCTD ? 1 : 0);
-	pr_debug("    [6]  %d  intermediate status\n",
-		 txs->status & TX_STATUS_INTERMEDIATE ? 1 : 0);
-	pr_debug("    [5]  %d  AMPDU\n",
-		 txs->status & TX_STATUS_AMPDU ? 1 : 0);
-	pr_debug("  [4:2]  %d  Frame Suppressed Reason (%s)\n",
-		 (txs->status & TX_STATUS_SUPR_MASK) >> TX_STATUS_SUPR_SHIFT,
-		 (const char *[]) {
-			"None",
-			"PMQ Entry",
-			"Flush request",
-			"Previous frag failure",
-			"Channel mismatch",
-			"Lifetime Expiry",
-			"Underflow"
-		 } [(txs->status & TX_STATUS_SUPR_MASK) >>
-		    TX_STATUS_SUPR_SHIFT]);
-	pr_debug("    [1]  %d  acked\n",
-		 txs->status & TX_STATUS_ACK_RCV ? 1 : 0);
-
-	pr_debug("LastTxTime: %04x Seq: %04x PHYTxStatus: %04x RxAckRSSI: %04x RxAckSQ: %04x\n",
-		 txs->lasttxtime, txs->sequence, txs->phyerr,
-		 (txs->ackphyrxsh & PRXS1_JSSI_MASK) >> PRXS1_JSSI_SHIFT,
-		 (txs->ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT);
-}
-
 static bool brcms_c_chipmatch_pci(struct bcma_device *core)
 {
 	struct pci_dev *pcidev = core->bus->host_pci;
@@ -5832,184 +5695,6 @@
 	}
 }
 
-#if defined(DEBUG)
-void brcms_c_print_txdesc(struct d11txh *txh)
-{
-	u16 mtcl = le16_to_cpu(txh->MacTxControlLow);
-	u16 mtch = le16_to_cpu(txh->MacTxControlHigh);
-	u16 mfc = le16_to_cpu(txh->MacFrameControl);
-	u16 tfest = le16_to_cpu(txh->TxFesTimeNormal);
-	u16 ptcw = le16_to_cpu(txh->PhyTxControlWord);
-	u16 ptcw_1 = le16_to_cpu(txh->PhyTxControlWord_1);
-	u16 ptcw_1_Fbr = le16_to_cpu(txh->PhyTxControlWord_1_Fbr);
-	u16 ptcw_1_Rts = le16_to_cpu(txh->PhyTxControlWord_1_Rts);
-	u16 ptcw_1_FbrRts = le16_to_cpu(txh->PhyTxControlWord_1_FbrRts);
-	u16 mainrates = le16_to_cpu(txh->MainRates);
-	u16 xtraft = le16_to_cpu(txh->XtraFrameTypes);
-	u8 *iv = txh->IV;
-	u8 *ra = txh->TxFrameRA;
-	u16 tfestfb = le16_to_cpu(txh->TxFesTimeFallback);
-	u8 *rtspfb = txh->RTSPLCPFallback;
-	u16 rtsdfb = le16_to_cpu(txh->RTSDurFallback);
-	u8 *fragpfb = txh->FragPLCPFallback;
-	u16 fragdfb = le16_to_cpu(txh->FragDurFallback);
-	u16 mmodelen = le16_to_cpu(txh->MModeLen);
-	u16 mmodefbrlen = le16_to_cpu(txh->MModeFbrLen);
-	u16 tfid = le16_to_cpu(txh->TxFrameID);
-	u16 txs = le16_to_cpu(txh->TxStatus);
-	u16 mnmpdu = le16_to_cpu(txh->MaxNMpdus);
-	u16 mabyte = le16_to_cpu(txh->MaxABytes_MRT);
-	u16 mabyte_f = le16_to_cpu(txh->MaxABytes_FBR);
-	u16 mmbyte = le16_to_cpu(txh->MinMBytes);
-
-	u8 *rtsph = txh->RTSPhyHeader;
-	struct ieee80211_rts rts = txh->rts_frame;
-
-	/* add plcp header along with txh descriptor */
-	brcmu_dbg_hex_dump(txh, sizeof(struct d11txh) + 48,
-			   "Raw TxDesc + plcp header:\n");
-
-	pr_debug("TxCtlLow: %04x ", mtcl);
-	pr_debug("TxCtlHigh: %04x ", mtch);
-	pr_debug("FC: %04x ", mfc);
-	pr_debug("FES Time: %04x\n", tfest);
-	pr_debug("PhyCtl: %04x%s ", ptcw,
-	       (ptcw & PHY_TXC_SHORT_HDR) ? " short" : "");
-	pr_debug("PhyCtl_1: %04x ", ptcw_1);
-	pr_debug("PhyCtl_1_Fbr: %04x\n", ptcw_1_Fbr);
-	pr_debug("PhyCtl_1_Rts: %04x ", ptcw_1_Rts);
-	pr_debug("PhyCtl_1_Fbr_Rts: %04x\n", ptcw_1_FbrRts);
-	pr_debug("MainRates: %04x ", mainrates);
-	pr_debug("XtraFrameTypes: %04x ", xtraft);
-	pr_debug("\n");
-
-	print_hex_dump_bytes("SecIV:", DUMP_PREFIX_OFFSET, iv, sizeof(txh->IV));
-	print_hex_dump_bytes("RA:", DUMP_PREFIX_OFFSET,
-			     ra, sizeof(txh->TxFrameRA));
-
-	pr_debug("Fb FES Time: %04x ", tfestfb);
-	print_hex_dump_bytes("Fb RTS PLCP:", DUMP_PREFIX_OFFSET,
-			     rtspfb, sizeof(txh->RTSPLCPFallback));
-	pr_debug("RTS DUR: %04x ", rtsdfb);
-	print_hex_dump_bytes("PLCP:", DUMP_PREFIX_OFFSET,
-			     fragpfb, sizeof(txh->FragPLCPFallback));
-	pr_debug("DUR: %04x", fragdfb);
-	pr_debug("\n");
-
-	pr_debug("MModeLen: %04x ", mmodelen);
-	pr_debug("MModeFbrLen: %04x\n", mmodefbrlen);
-
-	pr_debug("FrameID:     %04x\n", tfid);
-	pr_debug("TxStatus:    %04x\n", txs);
-
-	pr_debug("MaxNumMpdu:  %04x\n", mnmpdu);
-	pr_debug("MaxAggbyte:  %04x\n", mabyte);
-	pr_debug("MaxAggbyte_fb:  %04x\n", mabyte_f);
-	pr_debug("MinByte:     %04x\n", mmbyte);
-
-	print_hex_dump_bytes("RTS PLCP:", DUMP_PREFIX_OFFSET,
-			     rtsph, sizeof(txh->RTSPhyHeader));
-	print_hex_dump_bytes("RTS Frame:", DUMP_PREFIX_OFFSET,
-			     (u8 *)&rts, sizeof(txh->rts_frame));
-	pr_debug("\n");
-}
-#endif				/* defined(DEBUG) */
-
-#if defined(DEBUG)
-static int
-brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf,
-		     int len)
-{
-	int i;
-	char *p = buf;
-	char hexstr[16];
-	int slen = 0, nlen = 0;
-	u32 bit;
-	const char *name;
-
-	if (len < 2 || !buf)
-		return 0;
-
-	buf[0] = '\0';
-
-	for (i = 0; flags != 0; i++) {
-		bit = bd[i].bit;
-		name = bd[i].name;
-		if (bit == 0 && flags != 0) {
-			/* print any unnamed bits */
-			snprintf(hexstr, 16, "0x%X", flags);
-			name = hexstr;
-			flags = 0;	/* exit loop */
-		} else if ((flags & bit) == 0)
-			continue;
-		flags &= ~bit;
-		nlen = strlen(name);
-		slen += nlen;
-		/* count btwn flag space */
-		if (flags != 0)
-			slen += 1;
-		/* need NULL char as well */
-		if (len <= slen)
-			break;
-		/* copy NULL char but don't count it */
-		strncpy(p, name, nlen + 1);
-		p += nlen;
-		/* copy btwn flag space and NULL char */
-		if (flags != 0)
-			p += snprintf(p, 2, " ");
-		len -= slen;
-	}
-
-	/* indicate the str was too short */
-	if (flags != 0) {
-		if (len < 2)
-			p -= 2 - len;	/* overwrite last char */
-		p += snprintf(p, 2, ">");
-	}
-
-	return (int)(p - buf);
-}
-#endif				/* defined(DEBUG) */
-
-#if defined(DEBUG)
-void brcms_c_print_rxh(struct d11rxhdr *rxh)
-{
-	u16 len = rxh->RxFrameSize;
-	u16 phystatus_0 = rxh->PhyRxStatus_0;
-	u16 phystatus_1 = rxh->PhyRxStatus_1;
-	u16 phystatus_2 = rxh->PhyRxStatus_2;
-	u16 phystatus_3 = rxh->PhyRxStatus_3;
-	u16 macstatus1 = rxh->RxStatus1;
-	u16 macstatus2 = rxh->RxStatus2;
-	char flagstr[64];
-	char lenbuf[20];
-	static const struct brcms_c_bit_desc macstat_flags[] = {
-		{RXS_FCSERR, "FCSErr"},
-		{RXS_RESPFRAMETX, "Reply"},
-		{RXS_PBPRES, "PADDING"},
-		{RXS_DECATMPT, "DeCr"},
-		{RXS_DECERR, "DeCrErr"},
-		{RXS_BCNSENT, "Bcn"},
-		{0, NULL}
-	};
-
-	brcmu_dbg_hex_dump(rxh, sizeof(struct d11rxhdr), "Raw RxDesc:\n");
-
-	brcms_c_format_flags(macstat_flags, macstatus1, flagstr, 64);
-
-	snprintf(lenbuf, sizeof(lenbuf), "0x%x", len);
-
-	pr_debug("RxFrameSize:     %6s (%d)%s\n", lenbuf, len,
-	       (rxh->PhyRxStatus_0 & PRXS0_SHORTH) ? " short preamble" : "");
-	pr_debug("RxPHYStatus:     %04x %04x %04x %04x\n",
-	       phystatus_0, phystatus_1, phystatus_2, phystatus_3);
-	pr_debug("RxMACStatus:     %x %s\n", macstatus1, flagstr);
-	pr_debug("RXMACaggtype:    %x\n",
-	       (macstatus2 & RXS_AGGTYPE_MASK));
-	pr_debug("RxTSFTime:       %04x\n", rxh->RxTSFTime);
-}
-#endif				/* defined(DEBUG) */
-
 u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate)
 {
 	u16 table_ptr;
@@ -6033,86 +5718,6 @@
 	return 2 * brcms_b_read_shm(wlc_hw, table_ptr + (index * 2));
 }
 
-static bool
-brcms_c_prec_enq_head(struct brcms_c_info *wlc, struct pktq *q,
-		      struct sk_buff *pkt, int prec, bool head)
-{
-	struct sk_buff *p;
-	int eprec = -1;		/* precedence to evict from */
-
-	/* Determine precedence from which to evict packet, if any */
-	if (pktq_pfull(q, prec))
-		eprec = prec;
-	else if (pktq_full(q)) {
-		p = brcmu_pktq_peek_tail(q, &eprec);
-		if (eprec > prec) {
-			wiphy_err(wlc->wiphy, "%s: Failing: eprec %d > prec %d"
-				  "\n", __func__, eprec, prec);
-			return false;
-		}
-	}
-
-	/* Evict if needed */
-	if (eprec >= 0) {
-		bool discard_oldest;
-
-		discard_oldest = ac_bitmap_tst(0, eprec);
-
-		/* Refuse newer packet unless configured to discard oldest */
-		if (eprec == prec && !discard_oldest) {
-			wiphy_err(wlc->wiphy, "%s: No where to go, prec == %d"
-				  "\n", __func__, prec);
-			return false;
-		}
-
-		/* Evict packet according to discard policy */
-		p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
-			brcmu_pktq_pdeq_tail(q, eprec);
-		brcmu_pkt_buf_free_skb(p);
-	}
-
-	/* Enqueue */
-	if (head)
-		p = brcmu_pktq_penq_head(q, prec, pkt);
-	else
-		p = brcmu_pktq_penq(q, prec, pkt);
-
-	return true;
-}
-
-/*
- * Attempts to queue a packet onto a multiple-precedence queue,
- * if necessary evicting a lower precedence packet from the queue.
- *
- * 'prec' is the precedence number that has already been mapped
- * from the packet priority.
- *
- * Returns true if packet consumed (queued), false if not.
- */
-static bool brcms_c_prec_enq(struct brcms_c_info *wlc, struct pktq *q,
-		      struct sk_buff *pkt, int prec)
-{
-	return brcms_c_prec_enq_head(wlc, q, pkt, prec, false);
-}
-
-void brcms_c_txq_enq(struct brcms_c_info *wlc, struct scb *scb,
-		     struct sk_buff *sdu, uint prec)
-{
-	struct brcms_txq_info *qi = wlc->pkt_queue;	/* Check me */
-	struct pktq *q = &qi->q;
-	int prio;
-
-	prio = sdu->priority;
-
-	if (!brcms_c_prec_enq(wlc, q, sdu, prec)) {
-		/*
-		 * we might hit this condtion in case
-		 * packet flooding from mac80211 stack
-		 */
-		brcmu_pkt_buf_free_skb(sdu);
-	}
-}
-
 /*
  * bcmc_fid_generate:
  * Generate frame ID for a BCMC packet.  The frag field is not used
@@ -6140,8 +5745,6 @@
 {
 	uint dur = 0;
 
-	BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d\n",
-		wlc->pub->unit, rspec, preamble_type);
 	/*
 	 * Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that
 	 * is less than or equal to the rate of the immediately previous
@@ -6159,8 +5762,6 @@
 brcms_c_calc_cts_time(struct brcms_c_info *wlc, u32 rspec,
 		      u8 preamble_type)
 {
-	BCMMSG(wlc->wiphy, "wl%d: ratespec 0x%x, preamble_type %d\n",
-		wlc->pub->unit, rspec, preamble_type);
 	return brcms_c_calc_ack_time(wlc, rspec, preamble_type);
 }
 
@@ -6168,8 +5769,6 @@
 brcms_c_calc_ba_time(struct brcms_c_info *wlc, u32 rspec,
 		     u8 preamble_type)
 {
-	BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, "
-		 "preamble_type %d\n", wlc->pub->unit, rspec, preamble_type);
 	/*
 	 * Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that
 	 * is less than or equal to the rate of the immediately previous
@@ -6223,9 +5822,6 @@
 	uint nsyms, mac_len, Ndps, kNdps;
 	uint rate = rspec2rate(ratespec);
 
-	BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, dur %d\n",
-		 wlc->pub->unit, ratespec, preamble_type, dur);
-
 	if (is_mcs_rate(ratespec)) {
 		uint mcs = ratespec & RSPEC_RATE_MASK;
 		int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec);
@@ -6292,7 +5888,7 @@
 			return true;
  error:
 	if (verbose)
-		wiphy_err(wlc->wiphy, "wl%d: valid_rate: rate spec 0x%x "
+		brcms_err(wlc->hw->d11core, "wl%d: valid_rate: rate spec 0x%x "
 			  "not in hw_rateset\n", wlc->pub->unit, rspec);
 
 	return false;
@@ -6302,6 +5898,7 @@
 mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
 		       u32 int_val)
 {
+	struct bcma_device *core = wlc->hw->d11core;
 	u8 stf = (int_val & NRATE_STF_MASK) >> NRATE_STF_SHIFT;
 	u8 rate = int_val & NRATE_RATE_MASK;
 	u32 rspec;
@@ -6318,7 +5915,7 @@
 	if ((wlc->pub->_n_enab & SUPPORT_11N) && ismcs) {
 		/* mcs only allowed when nmode */
 		if (stf > PHY_TXC1_MODE_SDM) {
-			wiphy_err(wlc->wiphy, "wl%d: %s: Invalid stf\n",
+			brcms_err(core, "wl%d: %s: Invalid stf\n",
 				  wlc->pub->unit, __func__);
 			bcmerror = -EINVAL;
 			goto done;
@@ -6329,8 +5926,8 @@
 			if (!CHSPEC_IS40(wlc->home_chanspec) ||
 			    ((stf != PHY_TXC1_MODE_SISO)
 			     && (stf != PHY_TXC1_MODE_CDD))) {
-				wiphy_err(wlc->wiphy, "wl%d: %s: Invalid mcs "
-					  "32\n", wlc->pub->unit, __func__);
+				brcms_err(core, "wl%d: %s: Invalid mcs 32\n",
+					  wlc->pub->unit, __func__);
 				bcmerror = -EINVAL;
 				goto done;
 			}
@@ -6338,9 +5935,9 @@
 		} else if (rate > HIGHEST_SINGLE_STREAM_MCS) {
 			/* mcs > 7 must use stf SDM */
 			if (stf != PHY_TXC1_MODE_SDM) {
-				BCMMSG(wlc->wiphy, "wl%d: enabling "
-				       "SDM mode for mcs %d\n",
-				       wlc->pub->unit, rate);
+				brcms_dbg_mac80211(core, "wl%d: enabling "
+						   "SDM mode for mcs %d\n",
+						   wlc->pub->unit, rate);
 				stf = PHY_TXC1_MODE_SDM;
 			}
 		} else {
@@ -6351,15 +5948,15 @@
 			if ((stf > PHY_TXC1_MODE_STBC) ||
 			    (!BRCMS_STBC_CAP_PHY(wlc)
 			     && (stf == PHY_TXC1_MODE_STBC))) {
-				wiphy_err(wlc->wiphy, "wl%d: %s: Invalid STBC"
-					  "\n", wlc->pub->unit, __func__);
+				brcms_err(core, "wl%d: %s: Invalid STBC\n",
+					  wlc->pub->unit, __func__);
 				bcmerror = -EINVAL;
 				goto done;
 			}
 		}
 	} else if (is_ofdm_rate(rate)) {
 		if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) {
-			wiphy_err(wlc->wiphy, "wl%d: %s: Invalid OFDM\n",
+			brcms_err(core, "wl%d: %s: Invalid OFDM\n",
 				  wlc->pub->unit, __func__);
 			bcmerror = -EINVAL;
 			goto done;
@@ -6367,20 +5964,20 @@
 	} else if (is_cck_rate(rate)) {
 		if ((cur_band->bandtype != BRCM_BAND_2G)
 		    || (stf != PHY_TXC1_MODE_SISO)) {
-			wiphy_err(wlc->wiphy, "wl%d: %s: Invalid CCK\n",
+			brcms_err(core, "wl%d: %s: Invalid CCK\n",
 				  wlc->pub->unit, __func__);
 			bcmerror = -EINVAL;
 			goto done;
 		}
 	} else {
-		wiphy_err(wlc->wiphy, "wl%d: %s: Unknown rate type\n",
+		brcms_err(core, "wl%d: %s: Unknown rate type\n",
 			  wlc->pub->unit, __func__);
 		bcmerror = -EINVAL;
 		goto done;
 	}
 	/* make sure multiple antennae are available for non-siso rates */
 	if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) {
-		wiphy_err(wlc->wiphy, "wl%d: %s: SISO antenna but !SISO "
+		brcms_err(core, "wl%d: %s: SISO antenna but !SISO "
 			  "request\n", wlc->pub->unit, __func__);
 		bcmerror = -EINVAL;
 		goto done;
@@ -6449,7 +6046,7 @@
 		break;
 
 	default:
-		wiphy_err(wlc->wiphy,
+		brcms_err(wlc->hw->d11core,
 			  "brcms_c_cck_plcp_set: unsupported rate %d\n",
 			  rate_500);
 		rate_500 = BRCM_RATE_1M;
@@ -6582,7 +6179,7 @@
 		bw = rspec_get_bw(rspec);
 		/* 10Mhz is not supported yet */
 		if (bw < PHY_TXC1_BW_20MHZ) {
-			wiphy_err(wlc->wiphy, "phytxctl1_calc: bw %d is "
+			brcms_err(wlc->hw->d11core, "phytxctl1_calc: bw %d is "
 				  "not supported yet, set to 20L\n", bw);
 			bw = PHY_TXC1_BW_20MHZ;
 		}
@@ -6609,7 +6206,7 @@
 		/* get the phyctl byte from rate phycfg table */
 		phycfg = brcms_c_rate_legacy_phyctl(rspec2rate(rspec));
 		if (phycfg == -1) {
-			wiphy_err(wlc->wiphy, "phytxctl1_calc: wrong "
+			brcms_err(wlc->hw->d11core, "phytxctl1_calc: wrong "
 				  "legacy OFDM/CCK rate\n");
 			phycfg = 0;
 		}
@@ -6689,8 +6286,9 @@
 	if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
 		/* non-AP STA should never use BCMC queue */
 		if (queue == TX_BCMC_FIFO) {
-			wiphy_err(wlc->wiphy, "wl%d: %s: ASSERT queue == "
-				  "TX_BCMC!\n", wlc->pub->unit, __func__);
+			brcms_err(wlc->hw->d11core,
+				  "wl%d: %s: ASSERT queue == TX_BCMC!\n",
+				  wlc->pub->unit, __func__);
 			frameid = bcmc_fid_generate(wlc, NULL, txh);
 		} else {
 			/* Increment the counter for first fragment */
@@ -6860,7 +6458,8 @@
 
 			if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
 			    && (!is_mcs_rate(rspec[k]))) {
-				wiphy_err(wlc->wiphy, "wl%d: %s: IEEE80211_TX_"
+				brcms_err(wlc->hw->d11core,
+					  "wl%d: %s: IEEE80211_TX_"
 					  "RC_MCS != is_mcs_rate(rspec)\n",
 					  wlc->pub->unit, __func__);
 			}
@@ -7254,14 +6853,16 @@
 					wlc->fragthresh[queue] =
 					    (u16) newfragthresh;
 			} else {
-				wiphy_err(wlc->wiphy, "wl%d: %s txop invalid "
+				brcms_err(wlc->hw->d11core,
+					  "wl%d: %s txop invalid "
 					  "for rate %d\n",
 					  wlc->pub->unit, fifo_names[queue],
 					  rspec2rate(rspec[0]));
 			}
 
 			if (dur > wlc->edcf_txop[ac])
-				wiphy_err(wlc->wiphy, "wl%d: %s: %s txop "
+				brcms_err(wlc->hw->d11core,
+					  "wl%d: %s: %s txop "
 					  "exceeded phylen %d/%d dur %d/%d\n",
 					  wlc->pub->unit, __func__,
 					  fifo_names[queue],
@@ -7273,79 +6874,33 @@
 	return 0;
 }
 
-void brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
-			      struct ieee80211_hw *hw)
+static int brcms_c_tx(struct brcms_c_info *wlc, struct sk_buff *skb)
 {
-	u8 prio;
-	uint fifo;
-	struct scb *scb = &wlc->pri_scb;
-	struct ieee80211_hdr *d11_header = (struct ieee80211_hdr *)(sdu->data);
-
-	/*
-	 * 802.11 standard requires management traffic
-	 * to go at highest priority
-	 */
-	prio = ieee80211_is_data(d11_header->frame_control) ? sdu->priority :
-		MAXPRIO;
-	fifo = prio2fifo[prio];
-	if (brcms_c_d11hdrs_mac80211(wlc, hw, sdu, scb, 0, 1, fifo, 0))
-		return;
-	brcms_c_txq_enq(wlc, scb, sdu, BRCMS_PRIO_TO_PREC(prio));
-	brcms_c_send_q(wlc);
-}
-
-void brcms_c_send_q(struct brcms_c_info *wlc)
-{
-	struct sk_buff *pkt[DOT11_MAXNUMFRAGS];
-	int prec;
-	u16 prec_map;
-	int err = 0, i, count;
-	uint fifo;
-	struct brcms_txq_info *qi = wlc->pkt_queue;
-	struct pktq *q = &qi->q;
-	struct ieee80211_tx_info *tx_info;
-
-	prec_map = wlc->tx_prec_map;
-
-	/* Send all the enq'd pkts that we can.
-	 * Dequeue packets with precedence with empty HW fifo only
-	 */
-	while (prec_map && (pkt[0] = brcmu_pktq_mdeq(q, prec_map, &prec))) {
-		tx_info = IEEE80211_SKB_CB(pkt[0]);
-		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
-			err = brcms_c_sendampdu(wlc->ampdu, qi, pkt, prec);
-		} else {
-			count = 1;
-			err = brcms_c_prep_pdu(wlc, pkt[0], &fifo);
-			if (!err) {
-				for (i = 0; i < count; i++)
-					brcms_c_txfifo(wlc, fifo, pkt[i], true,
-						       1);
-			}
-		}
-
-		if (err == -EBUSY) {
-			brcmu_pktq_penq_head(q, prec, pkt[0]);
-			/*
-			 * If send failed due to any other reason than a
-			 * change in HW FIFO condition, quit. Otherwise,
-			 * read the new prec_map!
-			 */
-			if (prec_map == wlc->tx_prec_map)
-				break;
-			prec_map = wlc->tx_prec_map;
-		}
-	}
-}
-
-void
-brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p,
-	       bool commit, s8 txpktpend)
-{
-	u16 frameid = INVALIDFID;
+	struct dma_pub *dma;
+	int fifo, ret = -ENOSPC;
 	struct d11txh *txh;
+	u16 frameid = INVALIDFID;
 
-	txh = (struct d11txh *) (p->data);
+	fifo = brcms_ac_to_fifo(skb_get_queue_mapping(skb));
+	dma = wlc->hw->di[fifo];
+	txh = (struct d11txh *)(skb->data);
+
+	if (dma->txavail == 0) {
+		/*
+		 * We sometimes get a frame from mac80211 after stopping
+		 * the queues. This only ever seems to be a single frame
+		 * and is seems likely to be a race. TX_HEADROOM should
+		 * ensure that we have enough space to handle these stray
+		 * packets, so warn if there isn't. If we're out of space
+		 * in the tx ring and the tx queue isn't stopped then
+		 * we've really got a bug; warn loudly if that happens.
+		 */
+		brcms_warn(wlc->hw->d11core,
+			   "Received frame for tx with no space in DMA ring\n");
+		WARN_ON(!ieee80211_queue_stopped(wlc->pub->ieee_hw,
+						 skb_get_queue_mapping(skb)));
+		return -ENOSPC;
+	}
 
 	/* When a BC/MC frame is being committed to the BCMC fifo
 	 * via DMA (NOT PIO), update ucode or BSS info as appropriate.
@@ -7353,16 +6908,6 @@
 	if (fifo == TX_BCMC_FIFO)
 		frameid = le16_to_cpu(txh->TxFrameID);
 
-	/*
-	 * Bump up pending count for if not using rpc. If rpc is
-	 * used, this will be handled in brcms_b_txfifo()
-	 */
-	if (commit) {
-		wlc->core->txpktpend[fifo] += txpktpend;
-		BCMMSG(wlc->wiphy, "pktpend inc %d to %d\n",
-			 txpktpend, wlc->core->txpktpend[fifo]);
-	}
-
 	/* Commit BCMC sequence number in the SHM frame ID location */
 	if (frameid != INVALIDFID) {
 		/*
@@ -7372,8 +6917,52 @@
 		brcms_b_write_shm(wlc->hw, M_BCMC_FID, frameid);
 	}
 
-	if (dma_txfast(wlc->hw->di[fifo], p, commit) < 0)
+	ret = brcms_c_txfifo(wlc, fifo, skb);
+	/*
+	 * The only reason for brcms_c_txfifo to fail is because
+	 * there weren't any DMA descriptors, but we've already
+	 * checked for that. So if it does fail yell loudly.
+	 */
+	WARN_ON_ONCE(ret);
+
+	return ret;
+}
+
+void brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
+			      struct ieee80211_hw *hw)
+{
+	uint fifo;
+	struct scb *scb = &wlc->pri_scb;
+
+	fifo = brcms_ac_to_fifo(skb_get_queue_mapping(sdu));
+	if (brcms_c_d11hdrs_mac80211(wlc, hw, sdu, scb, 0, 1, fifo, 0))
+		return;
+	if (brcms_c_tx(wlc, sdu))
+		dev_kfree_skb_any(sdu);
+}
+
+int
+brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p)
+{
+	struct dma_pub *dma = wlc->hw->di[fifo];
+	int ret;
+	u16 queue;
+
+	ret = dma_txfast(wlc, dma, p);
+	if (ret	< 0)
 		wiphy_err(wlc->wiphy, "txfifo: fatal, toss frames !!!\n");
+
+	/*
+	 * Stop queue if DMA ring is full. Reserve some free descriptors,
+	 * as we sometimes receive a frame from mac80211 after the queues
+	 * are stopped.
+	 */
+	queue = skb_get_queue_mapping(p);
+	if (dma->txavail <= TX_HEADROOM && fifo < TX_BCMC_FIFO &&
+	    !ieee80211_queue_stopped(wlc->pub->ieee_hw, queue))
+		ieee80211_stop_queue(wlc->pub->ieee_hw, queue);
+
+	return ret;
 }
 
 u32
@@ -7423,19 +7012,6 @@
 	return rts_rspec;
 }
 
-void
-brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo, s8 txpktpend)
-{
-	wlc->core->txpktpend[fifo] -= txpktpend;
-	BCMMSG(wlc->wiphy, "pktpend dec %d to %d\n", txpktpend,
-	       wlc->core->txpktpend[fifo]);
-
-	/* There is more room; mark precedences related to this FIFO sendable */
-	wlc->tx_prec_map |= wlc->fifo2prec_map[fifo];
-
-	/* figure out which bsscfg is being worked on... */
-}
-
 /* Update beacon listen interval in shared memory */
 static void brcms_c_bcn_li_upd(struct brcms_c_info *wlc)
 {
@@ -7508,7 +7084,7 @@
 
 	/* fill in TSF and flag its presence */
 	rx_status->mactime = brcms_c_recover_tsf64(wlc, rxh);
-	rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+	rx_status->flag |= RX_FLAG_MACTIME_START;
 
 	channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
 
@@ -7571,7 +7147,8 @@
 			rx_status->rate_idx = 11;
 			break;
 		default:
-			wiphy_err(wlc->wiphy, "%s: Unknown rate\n", __func__);
+			brcms_err(wlc->hw->d11core,
+				  "%s: Unknown rate\n", __func__);
 		}
 
 		/*
@@ -7590,7 +7167,7 @@
 		} else if (is_ofdm_rate(rspec)) {
 			rx_status->flag |= RX_FLAG_SHORTPRE;
 		} else {
-			wiphy_err(wlc->wiphy, "%s: Unknown modulation\n",
+			brcms_err(wlc->hw->d11core, "%s: Unknown modulation\n",
 				  __func__);
 		}
 	}
@@ -7600,12 +7177,12 @@
 
 	if (rxh->RxStatus1 & RXS_DECERR) {
 		rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
-		wiphy_err(wlc->wiphy, "%s:  RX_FLAG_FAILED_PLCP_CRC\n",
+		brcms_err(wlc->hw->d11core, "%s:  RX_FLAG_FAILED_PLCP_CRC\n",
 			  __func__);
 	}
 	if (rxh->RxStatus1 & RXS_FCSERR) {
 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
-		wiphy_err(wlc->wiphy, "%s:  RX_FLAG_FAILED_FCS_CRC\n",
+		brcms_err(wlc->hw->d11core, "%s:  RX_FLAG_FAILED_FCS_CRC\n",
 			  __func__);
 	}
 }
@@ -7649,9 +7226,6 @@
 {
 	uint nsyms, len = 0, kNdps;
 
-	BCMMSG(wlc->wiphy, "wl%d: rate %d, len%d\n",
-		 wlc->pub->unit, rspec2rate(ratespec), mac_len);
-
 	if (is_mcs_rate(ratespec)) {
 		uint mcs = ratespec & RSPEC_RATE_MASK;
 		int tot_streams = (mcs_2_txstreams(mcs) + 1) +
@@ -7883,35 +7457,6 @@
 		brcms_c_bss_update_probe_resp(wlc, bsscfg, suspend);
 }
 
-/* prepares pdu for transmission. returns BCM error codes */
-int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu, uint *fifop)
-{
-	uint fifo;
-	struct d11txh *txh;
-	struct ieee80211_hdr *h;
-	struct scb *scb;
-
-	txh = (struct d11txh *) (pdu->data);
-	h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
-
-	/* get the pkt queue info. This was put at brcms_c_sendctl or
-	 * brcms_c_send for PDU */
-	fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
-
-	scb = NULL;
-
-	*fifop = fifo;
-
-	/* return if insufficient dma resources */
-	if (*wlc->core->txavail[fifo] < MAX_DMA_SEGS) {
-		/* Mark precedences related to this FIFO, unsendable */
-		/* A fifo is full. Clear precedences related to that FIFO */
-		wlc->tx_prec_map &= ~(wlc->fifo2prec_map[fifo]);
-		return -EBUSY;
-	}
-	return 0;
-}
-
 int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
 			   uint *blocks)
 {
@@ -7977,13 +7522,15 @@
 void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
 {
 	int timeout = 20;
+	int i;
 
-	/* flush packet queue when requested */
-	if (drop)
-		brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL);
+	/* Kick DMA to send any pending AMPDU */
+	for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
+		if (wlc->hw->di[i])
+			dma_txflush(wlc->hw->di[i]);
 
 	/* wait for queue and DMA fifos to run dry */
-	while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) {
+	while (brcms_txpktpendtot(wlc) > 0) {
 		brcms_msleep(wlc->wl, 1);
 
 		if (--timeout == 0)
@@ -8032,8 +7579,6 @@
 	uint len;
 	bool is_amsdu;
 
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
 	/* frame starts with rxhdr */
 	rxh = (struct d11rxhdr *) (p->data);
 
@@ -8043,8 +7588,9 @@
 	/* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU subframes */
 	if (rxh->RxStatus1 & RXS_PBPRES) {
 		if (p->len < 2) {
-			wiphy_err(wlc->wiphy, "wl%d: recv: rcvd runt of "
-				  "len %d\n", wlc->pub->unit, p->len);
+			brcms_err(wlc->hw->d11core,
+				  "wl%d: recv: rcvd runt of len %d\n",
+				  wlc->pub->unit, p->len);
 			goto toss;
 		}
 		skb_pull(p, 2);
@@ -8089,7 +7635,6 @@
 	uint n = 0;
 	uint bound_limit = bound ? RXBND : -1;
 
-	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
 	skb_queue_head_init(&recv_frames);
 
 	/* gather received frames */
@@ -8140,10 +7685,9 @@
 	u32 macintstatus;
 	struct brcms_hardware *wlc_hw = wlc->hw;
 	struct bcma_device *core = wlc_hw->d11core;
-	struct wiphy *wiphy = wlc->wiphy;
 
 	if (brcms_deviceremoved(wlc)) {
-		wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
+		brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
 			  __func__);
 		brcms_down(wlc->wl);
 		return false;
@@ -8153,8 +7697,8 @@
 	macintstatus = wlc->macintstatus;
 	wlc->macintstatus = 0;
 
-	BCMMSG(wlc->wiphy, "wl%d: macintstatus 0x%x\n",
-	       wlc_hw->unit, macintstatus);
+	brcms_dbg_int(core, "wl%d: macintstatus 0x%x\n",
+		      wlc_hw->unit, macintstatus);
 
 	WARN_ON(macintstatus & MI_PRQ); /* PRQ Interrupt in non-MBSS */
 
@@ -8164,7 +7708,7 @@
 		if (brcms_b_txstatus(wlc->hw, bounded, &fatal))
 			wlc->macintstatus |= MI_TFS;
 		if (fatal) {
-			wiphy_err(wiphy, "MI_TFS: fatal\n");
+			brcms_err(core, "MI_TFS: fatal\n");
 			goto fatal;
 		}
 	}
@@ -8174,7 +7718,7 @@
 
 	/* ATIM window end */
 	if (macintstatus & MI_ATIMWINEND) {
-		BCMMSG(wlc->wiphy, "end of ATIM window\n");
+		brcms_dbg_info(core, "end of ATIM window\n");
 		bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid);
 		wlc->qvalid = 0;
 	}
@@ -8192,7 +7736,7 @@
 		wlc_phy_noise_sample_intr(wlc_hw->band->pi);
 
 	if (macintstatus & MI_GP0) {
-		wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d "
+		brcms_err(core, "wl%d: PSM microcode watchdog fired at %d "
 			  "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
 
 		printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
@@ -8206,15 +7750,11 @@
 		bcma_write32(core, D11REGOFFS(gptimer), 0);
 
 	if (macintstatus & MI_RFDISABLE) {
-		BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the"
-		       " RF Disable Input\n", wlc_hw->unit);
+		brcms_dbg_info(core, "wl%d: BMAC Detected a change on the"
+			       " RF Disable Input\n", wlc_hw->unit);
 		brcms_rfkill_set_hw_state(wlc->wl);
 	}
 
-	/* send any enq'd tx packets. Just makes sure to jump start tx */
-	if (!pktq_empty(&wlc->pkt_queue->q))
-		brcms_c_send_q(wlc);
-
 	/* it isn't done and needs to be resched if macintstatus is non-zero */
 	return wlc->macintstatus != 0;
 
@@ -8229,7 +7769,7 @@
 	struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
 	u16 chanspec;
 
-	BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+	brcms_dbg_info(core, "wl%d\n", wlc->pub->unit);
 
 	chanspec = ch20mhz_chspec(ch->hw_value);
 
@@ -8286,9 +7826,6 @@
 	bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF);
 	brcms_c_edcf_setparams(wlc, false);
 
-	/* Init precedence maps for empty FIFOs */
-	brcms_c_tx_prec_map_init(wlc);
-
 	/* read the ucode version if we have not yet done so */
 	if (wlc->ucode_rev == 0) {
 		wlc->ucode_rev =
@@ -8303,9 +7840,6 @@
 	if (mute_tx)
 		brcms_b_mute(wlc->hw, true);
 
-	/* clear tx flow control */
-	brcms_c_txflowcontrol_reset(wlc);
-
 	/* enable the RF Disable Delay timer */
 	bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT);
 
@@ -8464,15 +7998,6 @@
 	 * Complete the wlc default state initializations..
 	 */
 
-	/* allocate our initial queue */
-	wlc->pkt_queue = brcms_c_txq_alloc(wlc);
-	if (wlc->pkt_queue == NULL) {
-		wiphy_err(wl->wiphy, "wl%d: %s: failed to malloc tx queue\n",
-			  unit, __func__);
-		err = 100;
-		goto fail;
-	}
-
 	wlc->bsscfg->wlc = wlc;
 
 	wlc->mimoft = FT_HT;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index 8debc74..fb44774 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -101,9 +101,6 @@
 
 #define DATA_BLOCK_TX_SUPR	(1 << 4)
 
-/* 802.1D Priority to TX FIFO number for wme */
-extern const u8 prio2fifo[];
-
 /* Ucode MCTL_WAKE override bits */
 #define BRCMS_WAKE_OVERRIDE_CLKCTL	0x01
 #define BRCMS_WAKE_OVERRIDE_PHYREG	0x02
@@ -242,7 +239,6 @@
 
 	/* fifo */
 	uint *txavail[NFIFO];	/* # tx descriptors available */
-	s16 txpktpend[NFIFO];	/* tx admission control */
 
 	struct macstat *macstat_snapshot;	/* mac hw prev read values */
 };
@@ -382,19 +378,6 @@
 				 */
 };
 
-/* TX Queue information
- *
- * Each flow of traffic out of the device has a TX Queue with independent
- * flow control. Several interfaces may be associated with a single TX Queue
- * if they belong to the same flow of traffic from the device. For multi-channel
- * operation there are independent TX Queues for each channel.
- */
-struct brcms_txq_info {
-	struct brcms_txq_info *next;
-	struct pktq q;
-	uint stopped;		/* tx flow control bits */
-};
-
 /*
  * Principal common driver data structure.
  *
@@ -435,11 +418,8 @@
  * WDlast: last time wlc_watchdog() was called.
  * edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac.
  * wme_retries: per-AC retry limits.
- * tx_prec_map: Precedence map based on HW FIFO space.
- * fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME.
  * bsscfg: set of BSS configurations, idx 0 is default and always valid.
  * cfg: the primary bsscfg (can be AP or STA).
- * tx_queues: common TX Queue list.
  * modulecb:
  * mimoft: SIGN or 11N.
  * cck_40txbw: 11N, cck tx b/w override when in 40MHZ mode.
@@ -469,7 +449,6 @@
  * tempsense_lasttime;
  * tx_duty_cycle_ofdm: maximum allowed duty cycle for OFDM.
  * tx_duty_cycle_cck: maximum allowed duty cycle for CCK.
- * pkt_queue: txq for transmit packets.
  * wiphy:
  * pri_scb: primary Station Control Block
  */
@@ -533,14 +512,9 @@
 	u16 edcf_txop[IEEE80211_NUM_ACS];
 
 	u16 wme_retries[IEEE80211_NUM_ACS];
-	u16 tx_prec_map;
-	u16 fifo2prec_map[NFIFO];
 
 	struct brcms_bss_cfg *bsscfg;
 
-	/* tx queue */
-	struct brcms_txq_info *tx_queues;
-
 	struct modulecb *modulecb;
 
 	u8 mimoft;
@@ -585,7 +559,6 @@
 	u16 tx_duty_cycle_ofdm;
 	u16 tx_duty_cycle_cck;
 
-	struct brcms_txq_info *pkt_queue;
 	struct wiphy *wiphy;
 	struct scb pri_scb;
 };
@@ -637,30 +610,13 @@
 	struct brcms_bss_info *current_bss;
 };
 
-extern void brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
-			   struct sk_buff *p,
-			   bool commit, s8 txpktpend);
-extern void brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo,
-				    s8 txpktpend);
-extern void brcms_c_txq_enq(struct brcms_c_info *wlc, struct scb *scb,
-			    struct sk_buff *sdu, uint prec);
-extern void brcms_c_print_txstatus(struct tx_status *txs);
+extern int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
+			   struct sk_buff *p);
 extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
 		   uint *blocks);
 
-#if defined(DEBUG)
-extern void brcms_c_print_txdesc(struct d11txh *txh);
-#else
-static inline void brcms_c_print_txdesc(struct d11txh *txh)
-{
-}
-#endif
-
 extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
 extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
-extern void brcms_c_send_q(struct brcms_c_info *wlc);
-extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
-			    uint *fifo);
 extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
 				uint mac_len);
 extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 5855f4f..0148dec 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -200,43 +200,6 @@
 /* WL11N Support */
 #define AMPDU_AGG_HOST	1
 
-/* pri is priority encoded in the packet. This maps the Packet priority to
- * enqueue precedence as defined in wlc_prec_map
- */
-extern const u8 wlc_prio2prec_map[];
-#define BRCMS_PRIO_TO_PREC(pri)	wlc_prio2prec_map[(pri) & 7]
-
-#define	BRCMS_PREC_COUNT	16	/* Max precedence level implemented */
-
-/* Mask to describe all precedence levels */
-#define BRCMS_PREC_BMP_ALL		MAXBITVAL(BRCMS_PREC_COUNT)
-
-/*
- * This maps priority to one precedence higher - Used by PS-Poll response
- * packets to simulate enqueue-at-head operation, but still maintain the
- * order on the queue
- */
-#define BRCMS_PRIO_TO_HI_PREC(pri)	min(BRCMS_PRIO_TO_PREC(pri) + 1,\
-					    BRCMS_PREC_COUNT - 1)
-
-/* Define a bitmap of precedences comprised by each AC */
-#define BRCMS_PREC_BMP_AC_BE	(NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_BE)) | \
-			NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_BE)) |	\
-			NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_EE)) |	\
-			NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_EE)))
-#define BRCMS_PREC_BMP_AC_BK	(NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_BK)) | \
-			NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_BK)) |	\
-			NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_NONE)) |	\
-			NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_NONE)))
-#define BRCMS_PREC_BMP_AC_VI	(NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_CL)) | \
-			NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_CL)) |	\
-			NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_VI)) |	\
-			NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_VI)))
-#define BRCMS_PREC_BMP_AC_VO	(NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_VO)) | \
-			NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_VO)) |	\
-			NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_NC)) |	\
-			NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_NC)))
-
 /* network protection config */
 #define	BRCMS_PROT_G_SPEC		1	/* SPEC g protection */
 #define	BRCMS_PROT_G_OVR		2	/* SPEC g prot override */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
index ed1d1aa..dd91627 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
@@ -23,6 +23,7 @@
 #include "channel.h"
 #include "main.h"
 #include "stf.h"
+#include "debug.h"
 
 #define MIN_SPATIAL_EXPANSION	0
 #define MAX_SPATIAL_EXPANSION	1
@@ -160,8 +161,8 @@
 static int brcms_c_stf_txcore_set(struct brcms_c_info *wlc, u8 Nsts,
 				  u8 core_mask)
 {
-	BCMMSG(wlc->wiphy, "wl%d: Nsts %d core_mask %x\n",
-		 wlc->pub->unit, Nsts, core_mask);
+	brcms_dbg_ht(wlc->hw->d11core, "wl%d: Nsts %d core_mask %x\n",
+		     wlc->pub->unit, Nsts, core_mask);
 
 	if (hweight8(core_mask) > wlc->stf->txstreams)
 		core_mask = 0;
@@ -194,7 +195,8 @@
 	int i;
 	u8 core_mask = 0;
 
-	BCMMSG(wlc->wiphy, "wl%d: val %x\n", wlc->pub->unit, val);
+	brcms_dbg_ht(wlc->hw->d11core, "wl%d: val %x\n", wlc->pub->unit,
+		     val);
 
 	wlc->stf->spatial_policy = (s8) val;
 	for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/types.h b/drivers/net/wireless/brcm80211/brcmsmac/types.h
index e11ae83..ae1f3ad 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/types.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/types.h
@@ -246,7 +246,7 @@
 
 #define BCMMSG(dev, fmt, args...)		\
 do {						\
-	if (brcm_msg_level & LOG_TRACE_VAL)	\
+	if (brcm_msg_level & BRCM_DL_INFO)	\
 		wiphy_err(dev, "%s: " fmt, __func__, ##args);	\
 } while (0)
 
@@ -281,7 +281,6 @@
 struct brcms_info;
 struct brcms_c_info;
 struct brcms_hardware;
-struct brcms_txq_info;
 struct brcms_band;
 struct dma_pub;
 struct si_pub;
diff --git a/drivers/net/wireless/brcm80211/include/defs.h b/drivers/net/wireless/brcm80211/include/defs.h
index f0d8c04..fb7cbcf 100644
--- a/drivers/net/wireless/brcm80211/include/defs.h
+++ b/drivers/net/wireless/brcm80211/include/defs.h
@@ -78,9 +78,14 @@
 #define PM_OFF	0
 #define PM_MAX	1
 
-/* Message levels */
-#define LOG_ERROR_VAL		0x00000001
-#define LOG_TRACE_VAL		0x00000002
+/* Debug levels */
+#define BRCM_DL_INFO		0x00000001
+#define BRCM_DL_MAC80211	0x00000002
+#define BRCM_DL_RX		0x00000004
+#define BRCM_DL_TX		0x00000008
+#define BRCM_DL_INT		0x00000010
+#define BRCM_DL_DMA		0x00000020
+#define BRCM_DL_HT		0x00000040
 
 #define PM_OFF	0
 #define PM_MAX	1
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index df7050a..d39e3e2 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -415,7 +415,7 @@
 			ssid = pos + 2;
 			ssid_len = pos[1];
 			break;
-		case WLAN_EID_GENERIC:
+		case WLAN_EID_VENDOR_SPECIFIC:
 			if (pos[1] >= 4 &&
 			    pos[2] == 0x00 && pos[3] == 0x50 &&
 			    pos[4] == 0xf2 && pos[5] == 1) {
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 29b8fa1..46938bc 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1788,10 +1788,7 @@
 	}
 
 	/* Initialize the geo */
-	if (libipw_set_geo(priv->ieee, &ipw_geos[0])) {
-		printk(KERN_WARNING DRV_NAME "Could not set geo\n");
-		return 0;
-	}
+	libipw_set_geo(priv->ieee, &ipw_geos[0]);
 	priv->ieee->freq_band = LIBIPW_24GHZ_BAND;
 
 	lock = LOCK_NONE;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 768bf61..482f505 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11269,10 +11269,31 @@
 	 }
 };
 
+static void ipw_set_geo(struct ipw_priv *priv)
+{
+	int j;
+
+	for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
+		if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
+			    ipw_geos[j].name, 3))
+			break;
+	}
+
+	if (j == ARRAY_SIZE(ipw_geos)) {
+		IPW_WARNING("SKU [%c%c%c] not recognized.\n",
+			    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
+			    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
+			    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
+		j = 0;
+	}
+
+	libipw_set_geo(priv->ieee, &ipw_geos[j]);
+}
+
 #define MAX_HW_RESTARTS 5
 static int ipw_up(struct ipw_priv *priv)
 {
-	int rc, i, j;
+	int rc, i;
 
 	/* Age scan list entries found before suspend */
 	if (priv->suspend_time) {
@@ -11310,22 +11331,7 @@
 		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
 		memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
 
-		for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
-			if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
-				    ipw_geos[j].name, 3))
-				break;
-		}
-		if (j == ARRAY_SIZE(ipw_geos)) {
-			IPW_WARNING("SKU [%c%c%c] not recognized.\n",
-				    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
-				    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
-				    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
-			j = 0;
-		}
-		if (libipw_set_geo(priv->ieee, &ipw_geos[j])) {
-			IPW_WARNING("Could not set geography.");
-			return 0;
-		}
+		ipw_set_geo(priv);
 
 		if (priv->status & STATUS_RF_KILL_SW) {
 			IPW_WARNING("Radio disabled by module parameter.\n");
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 0b22fb4..6eede52 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -978,7 +978,7 @@
 /* libipw_geo.c */
 extern const struct libipw_geo *libipw_get_geo(struct libipw_device
 						     *ieee);
-extern int libipw_set_geo(struct libipw_device *ieee,
+extern void libipw_set_geo(struct libipw_device *ieee,
 			     const struct libipw_geo *geo);
 
 extern int libipw_is_valid_channel(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_geo.c b/drivers/net/wireless/ipw2x00/libipw_geo.c
index c9fe3c9..218f2a3 100644
--- a/drivers/net/wireless/ipw2x00/libipw_geo.c
+++ b/drivers/net/wireless/ipw2x00/libipw_geo.c
@@ -132,7 +132,7 @@
 	return 0;
 }
 
-int libipw_set_geo(struct libipw_device *ieee,
+void libipw_set_geo(struct libipw_device *ieee,
 		      const struct libipw_geo *geo)
 {
 	memcpy(ieee->geo.name, geo->name, 3);
@@ -143,7 +143,6 @@
 	       sizeof(struct libipw_channel));
 	memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels *
 	       sizeof(struct libipw_channel));
-	return 0;
 }
 
 const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee)
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 02e0579..95a1ca1 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -1108,7 +1108,7 @@
 		MFIE_STRING(ERP_INFO);
 		MFIE_STRING(RSN);
 		MFIE_STRING(EXT_SUPP_RATES);
-		MFIE_STRING(GENERIC);
+		MFIE_STRING(VENDOR_SPECIFIC);
 		MFIE_STRING(QOS_PARAMETER);
 	default:
 		return "UNKNOWN";
@@ -1248,8 +1248,8 @@
 			LIBIPW_DEBUG_MGMT("WLAN_EID_CHALLENGE: ignored\n");
 			break;
 
-		case WLAN_EID_GENERIC:
-			LIBIPW_DEBUG_MGMT("WLAN_EID_GENERIC: %d bytes\n",
+		case WLAN_EID_VENDOR_SPECIFIC:
+			LIBIPW_DEBUG_MGMT("WLAN_EID_VENDOR_SPECIFIC: %d bytes\n",
 					     info_element->len);
 			if (!libipw_parse_qos_info_param_IE(info_element,
 							       network))
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index 87e5398..e0b9d7f 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -516,7 +516,7 @@
 il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
 {
 	struct ieee80211_hdr *header;
-	struct ieee80211_rx_status rx_status;
+	struct ieee80211_rx_status rx_status = {};
 	struct il_rx_pkt *pkt = rxb_addr(rxb);
 	struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
 	struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index eac4dc8..07ffa57 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -613,7 +613,7 @@
 il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
 {
 	struct ieee80211_hdr *header;
-	struct ieee80211_rx_status rx_status;
+	struct ieee80211_rx_status rx_status = {};
 	struct il_rx_pkt *pkt = rxb_addr(rxb);
 	struct il_rx_phy_res *phy_res;
 	__le32 rx_pkt_status;
@@ -686,7 +686,7 @@
 
 	/* TSF isn't reliable. In order to allow smooth user experience,
 	 * this W/A doesn't propagate it to the mac80211 */
-	/*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */
+	/*rx_status.flag |= RX_FLAG_MACTIME_START; */
 
 	il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
 
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index b4bb813..e254cba 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -2919,9 +2919,8 @@
 #define IL_DBG(level, fmt, args...)					\
 do {									\
 	if (il_get_debug_level(il) & level)				\
-		dev_printk(KERN_ERR, &il->hw->wiphy->dev,		\
-			 "%c %s " fmt, in_interrupt() ? 'I' : 'U',	\
-			__func__ , ## args);				\
+		dev_err(&il->hw->wiphy->dev, "%c %s " fmt,		\
+			in_interrupt() ? 'I' : 'U', __func__ , ##args); \
 } while (0)
 
 #define il_print_hex_dump(il, level, p, len)				\
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 727fbb5..5cf4323 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -133,12 +133,3 @@
 	  support when it is loaded.
 
 	  Say Y only if you want to experiment with P2P.
-
-config IWLWIFI_EXPERIMENTAL_MFP
-	bool "support MFP (802.11w) even if uCode doesn't advertise"
-	depends on IWLWIFI
-	help
-	  This option enables experimental MFP (802.11W) support
-	  even if the microcode doesn't advertise it.
-
-	  Say Y only if you want to experiment with MFP.
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 75e12f2..33b3ad2 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -176,8 +176,8 @@
 /* lib */
 int iwlagn_send_tx_power(struct iwl_priv *priv);
 void iwlagn_temperature(struct iwl_priv *priv);
-int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
-void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
+int iwlagn_txfifo_flush(struct iwl_priv *priv);
+void iwlagn_dev_txfifo_flush(struct iwl_priv *priv);
 int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
 int iwl_send_statistics_request(struct iwl_priv *priv,
 				u8 flags, bool clear);
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 01128c9..71ab76b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -986,8 +986,7 @@
 
 #define IWL_AGG_TX_QUEUE_MSK		cpu_to_le32(0xffc00)
 
-#define IWL_DROP_SINGLE		0
-#define IWL_DROP_ALL		(BIT(IWL_RXON_CTX_BSS) | BIT(IWL_RXON_CTX_PAN))
+#define IWL_DROP_ALL			BIT(1)
 
 /*
  * REPLY_TXFIFO_FLUSH = 0x1e(command and response)
@@ -1004,14 +1003,14 @@
  * the flush operation ends when both the scheduler DMA done and TXFIFO empty
  * are set.
  *
- * @fifo_control: bit mask for which queues to flush
+ * @queue_control: bit mask for which queues to flush
  * @flush_control: flush controls
  *	0: Dump single MSDU
  *	1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
  *	2: Dump all FIFO
  */
 struct iwl_txfifo_flush_cmd {
-	__le32 fifo_control;
+	__le32 queue_control;
 	__le16 flush_control;
 	__le16 reserved;
 } __packed;
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 1a98fa3..769a08b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2101,7 +2101,7 @@
 	if (iwl_is_rfkill(priv))
 		return -EFAULT;
 
-	iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
+	iwlagn_dev_txfifo_flush(priv);
 
 	return count;
 }
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 8141f91..29c571a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -789,7 +789,6 @@
 	/* remain-on-channel offload support */
 	struct ieee80211_channel *hw_roc_channel;
 	struct delayed_work hw_roc_disable_work;
-	enum nl80211_channel_type hw_roc_chantype;
 	int hw_roc_duration;
 	bool hw_roc_setup, hw_roc_start_notified;
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index bef88c1..7e59be4 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -136,7 +136,7 @@
  *  1. acquire mutex before calling
  *  2. make sure rf is on and not in exit state
  */
-int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
+int iwlagn_txfifo_flush(struct iwl_priv *priv)
 {
 	struct iwl_txfifo_flush_cmd flush_cmd;
 	struct iwl_host_cmd cmd = {
@@ -146,35 +146,34 @@
 		.data = { &flush_cmd, },
 	};
 
-	might_sleep();
-
 	memset(&flush_cmd, 0, sizeof(flush_cmd));
-	if (flush_control & BIT(IWL_RXON_CTX_BSS))
-		flush_cmd.fifo_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
-				 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
-				 IWL_SCD_MGMT_MSK;
-	if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
-	    (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
-		flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
-				IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
-				IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
-				IWL_PAN_SCD_MULTICAST_MSK;
+
+	flush_cmd.queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
+				  IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
+				  IWL_SCD_MGMT_MSK;
+	if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
+		flush_cmd.queue_control |= IWL_PAN_SCD_VO_MSK |
+					   IWL_PAN_SCD_VI_MSK |
+					   IWL_PAN_SCD_BE_MSK |
+					   IWL_PAN_SCD_BK_MSK |
+					   IWL_PAN_SCD_MGMT_MSK |
+					   IWL_PAN_SCD_MULTICAST_MSK;
 
 	if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
-		flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
+		flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK;
 
-	IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
-		       flush_cmd.fifo_control);
-	flush_cmd.flush_control = cpu_to_le16(flush_control);
+	IWL_DEBUG_INFO(priv, "queue control: 0x%x\n",
+		       flush_cmd.queue_control);
+	flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL);
 
 	return iwl_dvm_send_cmd(priv, &cmd);
 }
 
-void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
+void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
 {
 	mutex_lock(&priv->mutex);
 	ieee80211_stop_queues(priv->hw);
-	if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
+	if (iwlagn_txfifo_flush(priv)) {
 		IWL_ERR(priv, "flush request fail\n");
 		goto done;
 	}
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 2d9eee9..fb959b0 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -168,10 +168,13 @@
 		hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
 			     IEEE80211_HW_SUPPORTS_STATIC_SMPS;
 
-#ifndef CONFIG_IWLWIFI_EXPERIMENTAL_MFP
-	/* enable 11w if the uCode advertise */
-	if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
-#endif /* !CONFIG_IWLWIFI_EXPERIMENTAL_MFP */
+	/*
+	 * Enable 11w if advertised by firmware and software crypto
+	 * is not enabled (as the firmware will interpret some mgmt
+	 * packets, so enabling it with software crypto isn't safe)
+	 */
+	if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
+	    !iwlwifi_mod_params.sw_crypto)
 		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
 
 	hw->sta_data_size = sizeof(struct iwl_station_priv);
@@ -1019,7 +1022,7 @@
 	 */
 	if (drop) {
 		IWL_DEBUG_MAC80211(priv, "send flush command\n");
-		if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
+		if (iwlagn_txfifo_flush(priv)) {
 			IWL_ERR(priv, "flush request fail\n");
 			goto done;
 		}
@@ -1032,8 +1035,8 @@
 }
 
 static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif,
 				     struct ieee80211_channel *channel,
-				     enum nl80211_channel_type channel_type,
 				     int duration)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1065,7 +1068,6 @@
 	}
 
 	priv->hw_roc_channel = channel;
-	priv->hw_roc_chantype = channel_type;
 	/* convert from ms to TU */
 	priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
 	priv->hw_roc_start_notified = false;
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 408132c..e3a07c9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -511,7 +511,7 @@
 		return;
 
 	IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
-	iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
+	iwlagn_dev_txfifo_flush(priv);
 }
 
 /*
@@ -1191,10 +1191,6 @@
 
 static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
 {
-	u16 radio_cfg;
-
-	priv->eeprom_data->sku = priv->eeprom_data->sku;
-
 	if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE &&
 	    !priv->cfg->ht_params) {
 		IWL_ERR(priv, "Invalid 11n configuration\n");
@@ -1206,9 +1202,7 @@
 		return -EINVAL;
 	}
 
-	IWL_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku);
-
-	radio_cfg = priv->eeprom_data->radio_cfg;
+	IWL_DEBUG_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku);
 
 	priv->hw_params.tx_chains_num =
 		num_of_ant(priv->eeprom_data->valid_tx_ant);
@@ -1218,9 +1212,9 @@
 		priv->hw_params.rx_chains_num =
 			num_of_ant(priv->eeprom_data->valid_rx_ant);
 
-	IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
-		 priv->eeprom_data->valid_tx_ant,
-		 priv->eeprom_data->valid_rx_ant);
+	IWL_DEBUG_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
+		       priv->eeprom_data->valid_tx_ant,
+		       priv->eeprom_data->valid_rx_ant);
 
 	return 0;
 }
@@ -1235,7 +1229,7 @@
 	struct iwl_op_mode *op_mode;
 	u16 num_mac;
 	u32 ucode_flags;
-	struct iwl_trans_config trans_cfg;
+	struct iwl_trans_config trans_cfg = {};
 	static const u8 no_reclaim_cmds[] = {
 		REPLY_RX_PHY_CMD,
 		REPLY_RX_MPDU_CMD,
@@ -1334,6 +1328,9 @@
 	/* Configure transport layer */
 	iwl_trans_configure(priv->trans, &trans_cfg);
 
+	trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+	trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+
 	/* At this point both hw and priv are allocated. */
 
 	SET_IEEE80211_DEV(priv->hw, priv->trans->dev);
@@ -1508,10 +1505,6 @@
 
 	iwl_tt_exit(priv);
 
-	/*This will stop the queues, move the device to low power state */
-	priv->ucode_loaded = false;
-	iwl_trans_stop_device(priv->trans);
-
 	kfree(priv->eeprom_blob);
 	iwl_free_eeprom_data(priv->eeprom_data);
 
@@ -1927,8 +1920,6 @@
 	 * commands by clearing the ready bit */
 	clear_bit(STATUS_READY, &priv->status);
 
-	wake_up(&priv->trans->wait_command_queue);
-
 	if (!ondemand) {
 		/*
 		 * If firmware keep reloading, then it indicate something
@@ -2152,8 +2143,6 @@
 {
 
 	int ret;
-	pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
-	pr_info(DRV_COPYRIGHT "\n");
 
 	ret = iwlagn_rate_control_register();
 	if (ret) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index 5a9c325..cac4f37 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -631,8 +631,6 @@
 	     test_bit(STATUS_RF_KILL_HW, &priv->status)))
 		wiphy_rfkill_set_hw_state(priv->hw->wiphy,
 			test_bit(STATUS_RF_KILL_HW, &priv->status));
-	else
-		wake_up(&priv->trans->wait_command_queue);
 	return 0;
 }
 
@@ -901,7 +899,7 @@
 			    struct iwl_device_cmd *cmd)
 {
 	struct ieee80211_hdr *header;
-	struct ieee80211_rx_status rx_status;
+	struct ieee80211_rx_status rx_status = {};
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_rx_phy_res *phy_res;
 	__le32 rx_pkt_status;
@@ -951,7 +949,7 @@
 
 	/* TSF isn't reliable. In order to allow smooth user experience,
 	 * this W/A doesn't propagate it to the mac80211 */
-	/*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+	/*rx_status.flag |= RX_FLAG_MACTIME_START;*/
 
 	priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index f5ca73a..4ae031f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1075,14 +1075,11 @@
 
 static void iwlagn_set_tx_status(struct iwl_priv *priv,
 				 struct ieee80211_tx_info *info,
-				 struct iwlagn_tx_resp *tx_resp,
-				 bool is_agg)
+				 struct iwlagn_tx_resp *tx_resp)
 {
-	u16  status = le16_to_cpu(tx_resp->status.status);
+	u16 status = le16_to_cpu(tx_resp->status.status);
 
 	info->status.rates[0].count = tx_resp->failure_frame + 1;
-	if (is_agg)
-		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
 	info->flags |= iwl_tx_status_to_mac80211(status);
 	iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
 				    info);
@@ -1231,7 +1228,7 @@
 			if (is_agg && !iwl_is_tx_success(status))
 				info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
 			iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
-				     tx_resp, is_agg);
+				     tx_resp);
 			if (!is_agg)
 				iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 2cb1efb..95e6d33 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -254,7 +254,7 @@
 	int ret;
 	int i;
 
-	iwl_trans_fw_alive(priv->trans);
+	iwl_trans_fw_alive(priv->trans, 0);
 
 	if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
 	    priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 87f465a..196266a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -150,7 +150,7 @@
 struct iwl_base_params {
 	int eeprom_size;
 	int num_of_queues;	/* def: HW dependent */
-	/* for iwl_apm_init() */
+	/* for iwl_pcie_apm_init() */
 	u32 pll_cfg_val;
 
 	const u16 max_ll_items;
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 59a5f78..b3fde5f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -25,6 +25,39 @@
  *****************************************************************************/
 
 #if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#include <linux/skbuff.h>
+#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
+#include "iwl-trans.h"
+#if !defined(__IWLWIFI_DEVICE_TRACE)
+static inline bool iwl_trace_data(struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+
+	if (ieee80211_is_data(hdr->frame_control))
+		return skb->protocol != cpu_to_be16(ETH_P_PAE);
+	return false;
+}
+
+static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
+				      void *rxbuf, size_t len)
+{
+	struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32));
+	struct ieee80211_hdr *hdr;
+
+	if (cmd->cmd != trans->rx_mpdu_cmd)
+		return len;
+
+	hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) +
+			trans->rx_mpdu_cmd_hdr_size);
+	if (!ieee80211_is_data(hdr->frame_control))
+		return len;
+	/* maybe try to identify EAPOL frames? */
+	return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size +
+		ieee80211_hdrlen(hdr->frame_control);
+}
+#endif
+
 #define __IWLWIFI_DEVICE_TRACE
 
 #include <linux/tracepoint.h>
@@ -235,6 +268,48 @@
 );
 
 #undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_data
+
+TRACE_EVENT(iwlwifi_dev_tx_data,
+	TP_PROTO(const struct device *dev,
+		 struct sk_buff *skb,
+		 void *data, size_t data_len),
+	TP_ARGS(dev, skb, data, data_len),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+
+		__dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		if (iwl_trace_data(skb))
+			memcpy(__get_dynamic_array(data), data, data_len);
+	),
+	TP_printk("[%s] TX frame data", __get_str(dev))
+);
+
+TRACE_EVENT(iwlwifi_dev_rx_data,
+	TP_PROTO(const struct device *dev,
+		 const struct iwl_trans *trans,
+		 void *rxbuf, size_t len),
+	TP_ARGS(dev, trans, rxbuf, len),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+
+		__dynamic_array(u8, data,
+				len - iwl_rx_trace_len(trans, rxbuf, len))
+	),
+	TP_fast_assign(
+		size_t offs = iwl_rx_trace_len(trans, rxbuf, len);
+		DEV_ASSIGN;
+		if (offs < len)
+			memcpy(__get_dynamic_array(data),
+			       ((u8 *)rxbuf) + offs, len - offs);
+	),
+	TP_printk("[%s] RX frame data", __get_str(dev))
+);
+
+#undef TRACE_SYSTEM
 #define TRACE_SYSTEM iwlwifi
 
 TRACE_EVENT(iwlwifi_dev_hcmd,
@@ -270,25 +345,28 @@
 );
 
 TRACE_EVENT(iwlwifi_dev_rx,
-	TP_PROTO(const struct device *dev, void *rxbuf, size_t len),
-	TP_ARGS(dev, rxbuf, len),
+	TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
+		 void *rxbuf, size_t len),
+	TP_ARGS(dev, trans, rxbuf, len),
 	TP_STRUCT__entry(
 		DEV_ENTRY
-		__dynamic_array(u8, rxbuf, len)
+		__dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
 	),
 	TP_fast_assign(
 		DEV_ASSIGN;
-		memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
+		memcpy(__get_dynamic_array(rxbuf), rxbuf,
+		       iwl_rx_trace_len(trans, rxbuf, len));
 	),
 	TP_printk("[%s] RX cmd %#.2x",
 		  __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
 );
 
 TRACE_EVENT(iwlwifi_dev_tx,
-	TP_PROTO(const struct device *dev, void *tfd, size_t tfdlen,
+	TP_PROTO(const struct device *dev, struct sk_buff *skb,
+		 void *tfd, size_t tfdlen,
 		 void *buf0, size_t buf0_len,
 		 void *buf1, size_t buf1_len),
-	TP_ARGS(dev, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+	TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
 	TP_STRUCT__entry(
 		DEV_ENTRY
 
@@ -301,14 +379,15 @@
 		 * for the possible padding).
 		 */
 		__dynamic_array(u8, buf0, buf0_len)
-		__dynamic_array(u8, buf1, buf1_len)
+		__dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len)
 	),
 	TP_fast_assign(
 		DEV_ASSIGN;
 		__entry->framelen = buf0_len + buf1_len;
 		memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
 		memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
-		memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
+		if (!iwl_trace_data(skb))
+			memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
 	),
 	TP_printk("[%s] TX %.2x (%zu bytes)",
 		  __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index f10170f..4a9dc96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -889,8 +889,8 @@
 {
 	if (data->eeprom_version >= trans->cfg->eeprom_ver ||
 	    data->calib_version >= trans->cfg->eeprom_calib_ver) {
-		IWL_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
-			 data->eeprom_version, data->calib_version);
+		IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+			       data->eeprom_version, data->calib_version);
 		return 0;
 	}
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 8060466..ec48563 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -267,7 +267,7 @@
 
 #define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS	(20)
 #define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS	(4)
-#define RX_RB_TIMEOUT	(0x10)
+#define RX_RB_TIMEOUT	(0x11)
 
 #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL         (0x00000000)
 #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL     (0x40000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 3dfebfb..54c41b4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -327,11 +327,11 @@
 EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
 
 int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
-			       void *buf, int dwords)
+			       const void *buf, int dwords)
 {
 	unsigned long flags;
 	int offs, result = 0;
-	u32 *vals = buf;
+	const u32 *vals = buf;
 
 	spin_lock_irqsave(&trans->reg_lock, flags);
 	if (likely(iwl_grab_nic_access(trans))) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 50d3819..e1aa69f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -87,7 +87,7 @@
 	} while (0)
 
 int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
-			       void *buf, int dwords);
+			       const void *buf, int dwords);
 
 u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
 int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 9253ef1..c3a4bb4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -213,6 +213,9 @@
 #define SCD_CONTEXT_QUEUE_OFFSET(x)\
 	(SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8))
 
+#define SCD_TX_STTS_QUEUE_OFFSET(x)\
+	(SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16))
+
 #define SCD_TRANS_TBL_OFFSET_QUEUE(x) \
 	((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index ff11542..e378ea6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -221,14 +221,21 @@
 /**
  * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
  *
- * IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
+ * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
  *	ring. The transport layer doesn't map the command's buffer to DMA, but
  *	rather copies it to an previously allocated DMA buffer. This flag tells
  *	the transport layer not to copy the command, but to map the existing
- *	buffer. This can save memcpy and is worth with very big comamnds.
+ *	buffer (that is passed in) instead. This saves the memcpy and allows
+ *	commands that are bigger than the fixed buffer to be submitted.
+ *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
+ * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
+ *	chunk internally and free it again after the command completes. This
+ *	can (currently) be used only once per command.
+ *	Note that a TFD entry after a DUP one cannot be a normal copied one.
  */
 enum iwl_hcmd_dataflag {
 	IWL_HCMD_DFL_NOCOPY	= BIT(0),
+	IWL_HCMD_DFL_DUP	= BIT(1),
 };
 
 /**
@@ -348,14 +355,17 @@
  * @start_fw: allocates and inits all the resources for the transport
  *	layer. Also kick a fw image.
  *	May sleep
- * @fw_alive: called when the fw sends alive notification
+ * @fw_alive: called when the fw sends alive notification. If the fw provides
+ *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
  *	May sleep
  * @stop_device:stops the whole device (embedded CPU put to reset)
  *	May sleep
  * @wowlan_suspend: put the device into the correct mode for WoWLAN during
  *	suspend. This is optional, if not implemented WoWLAN will not be
  *	supported. This callback may sleep.
- * @send_cmd:send a host command
+ * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
+ *	If RFkill is asserted in the middle of a SYNC host command, it must
+ *	return -ERFKILL straight away.
  *	May sleep only if CMD_SYNC is set
  * @tx: send an skb
  *	Must be atomic
@@ -385,7 +395,7 @@
 	int (*start_hw)(struct iwl_trans *iwl_trans);
 	void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
 	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
-	void (*fw_alive)(struct iwl_trans *trans);
+	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
 	void (*stop_device)(struct iwl_trans *trans);
 
 	void (*wowlan_suspend)(struct iwl_trans *trans);
@@ -438,12 +448,15 @@
  *	Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
  * @pm_support: set to true in start_hw if link pm is supported
- * @wait_command_queue: the wait_queue for SYNC host commands
  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
  * @dev_cmd_headroom: room needed for the transport's private use before the
  *	device_cmd for Tx - for internal use only
  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
+ *	starting the firmware, used for tracing
+ * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
+ *	start of the 802.11 header in the @rx_mpdu_cmd
  */
 struct iwl_trans {
 	const struct iwl_trans_ops *ops;
@@ -457,9 +470,9 @@
 	u32 hw_id;
 	char hw_id_str[52];
 
-	bool pm_support;
+	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
 
-	wait_queue_head_t wait_command_queue;
+	bool pm_support;
 
 	/* The following fields are internal only */
 	struct kmem_cache *dev_cmd_pool;
@@ -502,13 +515,13 @@
 	trans->state = IWL_TRANS_NO_FW;
 }
 
-static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
+static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
 {
 	might_sleep();
 
 	trans->state = IWL_TRANS_FW_ALIVE;
 
-	trans->ops->fw_alive(trans);
+	trans->ops->fw_alive(trans, scd_addr);
 }
 
 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
@@ -516,6 +529,8 @@
 {
 	might_sleep();
 
+	WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+
 	return trans->ops->start_fw(trans, fw);
 }
 
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2a46753..956fe6c3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -69,7 +69,6 @@
 
 #include "iwl-trans.h"
 #include "iwl-drv.h"
-#include "iwl-trans.h"
 
 #include "cfg.h"
 #include "internal.h"
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 401178f..d91d2e8 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -73,7 +73,7 @@
 };
 
 /**
- * struct iwl_rx_queue - Rx queue
+ * struct iwl_rxq - Rx queue
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
  * @pool:
@@ -91,7 +91,7 @@
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
-struct iwl_rx_queue {
+struct iwl_rxq {
 	__le32 *bd;
 	dma_addr_t bd_dma;
 	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
@@ -157,8 +157,8 @@
  * 32 since we don't need so many commands pending. Since the HW
  * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
  * the software buffers (in the variables @meta, @txb in struct
- * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
- * in the same struct) have 256.
+ * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
+ * the same struct) have 256.
  * This means that we end up with the following:
  *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
  *  SW entries:           | 0      | ... | 31          |
@@ -182,15 +182,17 @@
 #define TFD_TX_CMD_SLOTS 256
 #define TFD_CMD_SLOTS 32
 
-struct iwl_pcie_tx_queue_entry {
+struct iwl_pcie_txq_entry {
 	struct iwl_device_cmd *cmd;
 	struct iwl_device_cmd *copy_cmd;
 	struct sk_buff *skb;
+	/* buffer to free after command completes */
+	const void *free_buf;
 	struct iwl_cmd_meta meta;
 };
 
 /**
- * struct iwl_tx_queue - Tx Queue for DMA
+ * struct iwl_txq - Tx Queue for DMA
  * @q: generic Rx/Tx queue descriptor
  * @tfds: transmit frame descriptors (DMA memory)
  * @entries: transmit entries (driver state)
@@ -203,10 +205,10 @@
  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
  * descriptors) and required locking structures.
  */
-struct iwl_tx_queue {
+struct iwl_txq {
 	struct iwl_queue q;
 	struct iwl_tfd *tfds;
-	struct iwl_pcie_tx_queue_entry *entries;
+	struct iwl_pcie_txq_entry *entries;
 	spinlock_t lock;
 	struct timer_list stuck_timer;
 	struct iwl_trans_pcie *trans_pcie;
@@ -236,7 +238,7 @@
  * @wd_timeout: queue watchdog timeout (jiffies)
  */
 struct iwl_trans_pcie {
-	struct iwl_rx_queue rxq;
+	struct iwl_rxq rxq;
 	struct work_struct rx_replenish;
 	struct iwl_trans *trans;
 	struct iwl_drv *drv;
@@ -258,7 +260,7 @@
 	struct iwl_dma_ptr scd_bc_tbls;
 	struct iwl_dma_ptr kw;
 
-	struct iwl_tx_queue *txq;
+	struct iwl_txq *txq;
 	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
 	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
 
@@ -268,6 +270,8 @@
 
 	bool ucode_write_complete;
 	wait_queue_head_t ucode_write_waitq;
+	wait_queue_head_t wait_command_queue;
+
 	unsigned long status;
 	u8 cmd_queue;
 	u8 cmd_fifo;
@@ -283,13 +287,23 @@
 	unsigned long wd_timeout;
 };
 
-/*****************************************************
-* DRIVER STATUS FUNCTIONS
-******************************************************/
-#define STATUS_HCMD_ACTIVE	0
-#define STATUS_DEVICE_ENABLED	1
-#define STATUS_TPOWER_PMI	2
-#define STATUS_INT_ENABLED	3
+/**
+ * enum iwl_pcie_status: status of the PCIe transport
+ * @STATUS_HCMD_ACTIVE: a SYNC command is being processed
+ * @STATUS_DEVICE_ENABLED: APM is enabled
+ * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
+ * @STATUS_INT_ENABLED: interrupts are enabled
+ * @STATUS_RFKILL: the HW RFkill switch is in KILL position
+ * @STATUS_FW_ERROR: the fw is in error state
+ */
+enum iwl_pcie_status {
+	STATUS_HCMD_ACTIVE,
+	STATUS_DEVICE_ENABLED,
+	STATUS_TPOWER_PMI,
+	STATUS_INT_ENABLED,
+	STATUS_RFKILL,
+	STATUS_FW_ERROR,
+};
 
 #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
 	((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
@@ -301,6 +315,10 @@
 			    trans_specific);
 }
 
+/*
+ * Convention: trans API functions: iwl_trans_pcie_XXX
+ *	Other functions: iwl_pcie_XXX
+ */
 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 				       const struct pci_device_id *ent,
 				       const struct iwl_cfg *cfg);
@@ -309,50 +327,43 @@
 /*****************************************************
 * RX
 ******************************************************/
-void iwl_bg_rx_replenish(struct work_struct *data);
-void iwl_irq_tasklet(struct iwl_trans *trans);
-void iwl_rx_replenish(struct iwl_trans *trans);
-void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
-				   struct iwl_rx_queue *q);
+int iwl_pcie_rx_init(struct iwl_trans *trans);
+void iwl_pcie_tasklet(struct iwl_trans *trans);
+int iwl_pcie_rx_stop(struct iwl_trans *trans);
+void iwl_pcie_rx_free(struct iwl_trans *trans);
 
 /*****************************************************
-* ICT
+* ICT - interrupt handling
 ******************************************************/
-void iwl_reset_ict(struct iwl_trans *trans);
-void iwl_disable_ict(struct iwl_trans *trans);
-int iwl_alloc_isr_ict(struct iwl_trans *trans);
-void iwl_free_isr_ict(struct iwl_trans *trans);
-irqreturn_t iwl_isr_ict(int irq, void *data);
+irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
+int iwl_pcie_alloc_ict(struct iwl_trans *trans);
+void iwl_pcie_free_ict(struct iwl_trans *trans);
+void iwl_pcie_reset_ict(struct iwl_trans *trans);
+void iwl_pcie_disable_ict(struct iwl_trans *trans);
 
 /*****************************************************
 * TX / HCMD
 ******************************************************/
-void iwl_txq_update_write_ptr(struct iwl_trans *trans,
-			      struct iwl_tx_queue *txq);
-int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
-				 struct iwl_tx_queue *txq,
-				 dma_addr_t addr, u16 len, u8 reset);
-int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
-int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-void iwl_tx_cmd_complete(struct iwl_trans *trans,
-			 struct iwl_rx_cmd_buffer *rxb, int handler_status);
-void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
-				       struct iwl_tx_queue *txq,
-				       u16 byte_cnt);
+int iwl_pcie_tx_init(struct iwl_trans *trans);
+void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
+int iwl_pcie_tx_stop(struct iwl_trans *trans);
+void iwl_pcie_tx_free(struct iwl_trans *trans);
 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
 			       int sta_id, int tid, int frame_limit, u16 ssn);
 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
-void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
-		      enum dma_data_direction dma_dir);
-int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
-			 struct sk_buff_head *skbs);
-int iwl_queue_space(const struct iwl_queue *q);
-
+int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+		      struct iwl_device_cmd *dev_cmd, int txq_id);
+void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
+void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
+			    struct iwl_rx_cmd_buffer *rxb, int handler_status);
+void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+			    struct sk_buff_head *skbs);
 /*****************************************************
 * Error handling
 ******************************************************/
-int iwl_dump_fh(struct iwl_trans *trans, char **buf);
-void iwl_dump_csr(struct iwl_trans *trans);
+int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
+void iwl_pcie_dump_csr(struct iwl_trans *trans);
 
 /*****************************************************
 * Helpers
@@ -388,7 +399,7 @@
 }
 
 static inline void iwl_wake_queue(struct iwl_trans *trans,
-				  struct iwl_tx_queue *txq)
+				  struct iwl_txq *txq)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
@@ -399,7 +410,7 @@
 }
 
 static inline void iwl_stop_queue(struct iwl_trans *trans,
-				  struct iwl_tx_queue *txq)
+				  struct iwl_txq *txq)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
@@ -411,7 +422,7 @@
 				    txq->q.id);
 }
 
-static inline int iwl_queue_used(const struct iwl_queue *q, int i)
+static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
 {
 	return q->write_ptr >= q->read_ptr ?
 		(i >= q->read_ptr && i < q->write_ptr) :
@@ -423,8 +434,8 @@
 	return index & (q->n_window - 1);
 }
 
-static inline const char *
-trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd)
+static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie,
+					 u8 cmd)
 {
 	if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
 		return "UNKNOWN";
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index bb69f8f..bb32510 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -76,7 +76,7 @@
  * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
  *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  *   to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
  *   iwl->rxq is replenished and the READ INDEX is updated (updating the
  *   'processed' and 'read' driver indexes as well)
  * + A received packet is processed and handed to the kernel network stack,
@@ -89,28 +89,28 @@
  *
  * Driver sequence:
  *
- * iwl_rx_queue_alloc()   Allocates rx_free
- * iwl_rx_replenish()     Replenishes rx_free list from rx_used, and calls
- *                            iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * iwl_rxq_alloc()            Allocates rx_free
+ * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
+ *                            iwl_pcie_rxq_restock
+ * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
  *                            queue, updates firmware pointers, and updates
  *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl_rx_replenish
+ *                            are available, schedules iwl_pcie_rx_replenish
  *
  * -- enable interrupts --
- * ISR - iwl_rx()         Detach iwl_rx_mem_buffers from pool up to the
+ * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
  *                            READ INDEX, detaching the SKB from the pool.
  *                            Moves the packet buffer from queue to rx_used.
- *                            Calls iwl_rx_queue_restock to refill any empty
+ *                            Calls iwl_pcie_rxq_restock to refill any empty
  *                            slots.
  * ...
  *
  */
 
-/**
- * iwl_rx_queue_space - Return number of free slots available in queue.
+/*
+ * iwl_rxq_space - Return number of free slots available in queue.
  */
-static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
+static int iwl_rxq_space(const struct iwl_rxq *q)
 {
 	int s = q->read - q->write;
 	if (s <= 0)
@@ -122,11 +122,28 @@
 	return s;
 }
 
-/**
- * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+/*
+ * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
  */
-void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
-				   struct iwl_rx_queue *q)
+static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
+{
+	return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/*
+ * iwl_pcie_rx_stop - stops the Rx DMA
+ */
+int iwl_pcie_rx_stop(struct iwl_trans *trans)
+{
+	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
+				   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+}
+
+/*
+ * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
+ */
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
 {
 	unsigned long flags;
 	u32 reg;
@@ -176,16 +193,8 @@
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
-/**
- * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
-{
-	return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
+/*
+ * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
  *
  * If there are slots in the RX queue that need to be restocked,
  * and we have free pre-allocated buffers, fill the ranks as much
@@ -195,11 +204,10 @@
  * also updates the memory address in the firmware to reference the new
  * target buffer.
  */
-static void iwl_rx_queue_restock(struct iwl_trans *trans)
+static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
-	struct list_head *element;
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rx_mem_buffer *rxb;
 	unsigned long flags;
 
@@ -215,18 +223,18 @@
 		return;
 
 	spin_lock_irqsave(&rxq->lock, flags);
-	while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
 		/* The overwritten rxb must be a used one */
 		rxb = rxq->queue[rxq->write];
 		BUG_ON(rxb && rxb->page);
 
 		/* Get next free Rx buffer, remove from free list */
-		element = rxq->rx_free.next;
-		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
-		list_del(element);
+		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
+				       list);
+		list_del(&rxb->list);
 
 		/* Point to Rx buffer via next RBD in circular buffer */
-		rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
+		rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
 		rxq->queue[rxq->write] = rxb;
 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
 		rxq->free_count--;
@@ -243,24 +251,23 @@
 		spin_lock_irqsave(&rxq->lock, flags);
 		rxq->need_update = 1;
 		spin_unlock_irqrestore(&rxq->lock, flags);
-		iwl_rx_queue_update_write_ptr(trans, rxq);
+		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
 	}
 }
 
 /*
- * iwl_rx_allocate - allocate a page for each used RBD
+ * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  *
  * A used RBD is an Rx buffer that has been given to the stack. To use it again
  * a page must be allocated and the RBD must point to the page. This function
  * doesn't change the HW pointer but handles the list of pages that is used by
- * iwl_rx_queue_restock. The latter function will update the HW to use the newly
+ * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  * allocated buffers.
  */
-static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
-	struct list_head *element;
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rx_mem_buffer *rxb;
 	struct page *page;
 	unsigned long flags;
@@ -308,10 +315,9 @@
 			__free_pages(page, trans_pcie->rx_page_order);
 			return;
 		}
-		element = rxq->rx_used.next;
-		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
-		list_del(element);
-
+		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
+				       list);
+		list_del(&rxb->list);
 		spin_unlock_irqrestore(&rxq->lock, flags);
 
 		BUG_ON(rxb->page);
@@ -343,47 +349,227 @@
 	}
 }
 
+static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	int i;
+
+	/* Fill the rx_used queue with _all_ of the Rx buffers */
+	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+		/* In the reset function, these buffers may have been allocated
+		 * to an SKB, so we need to unmap and free potential storage */
+		if (rxq->pool[i].page != NULL) {
+			dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
+				       PAGE_SIZE << trans_pcie->rx_page_order,
+				       DMA_FROM_DEVICE);
+			__free_pages(rxq->pool[i].page,
+				     trans_pcie->rx_page_order);
+			rxq->pool[i].page = NULL;
+		}
+		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+	}
+}
+
 /*
- * iwl_rx_replenish - Move all used buffers from rx_used to rx_free
+ * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
  *
  * When moving to rx_free an page is allocated for the slot.
  *
- * Also restock the Rx queue via iwl_rx_queue_restock.
+ * Also restock the Rx queue via iwl_pcie_rxq_restock.
  * This is called as a scheduled work item (except for during initialization)
  */
-void iwl_rx_replenish(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	unsigned long flags;
 
-	iwl_rx_allocate(trans, GFP_KERNEL);
+	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
 
 	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-	iwl_rx_queue_restock(trans);
+	iwl_pcie_rxq_restock(trans);
 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 }
 
-static void iwl_rx_replenish_now(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
 {
-	iwl_rx_allocate(trans, GFP_ATOMIC);
+	iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
 
-	iwl_rx_queue_restock(trans);
+	iwl_pcie_rxq_restock(trans);
 }
 
-void iwl_bg_rx_replenish(struct work_struct *data)
+static void iwl_pcie_rx_replenish_work(struct work_struct *data)
 {
 	struct iwl_trans_pcie *trans_pcie =
 	    container_of(data, struct iwl_trans_pcie, rx_replenish);
 
-	iwl_rx_replenish(trans_pcie->trans);
+	iwl_pcie_rx_replenish(trans_pcie->trans);
 }
 
-static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct device *dev = trans->dev;
+
+	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
+
+	spin_lock_init(&rxq->lock);
+
+	if (WARN_ON(rxq->bd || rxq->rb_stts))
+		return -EINVAL;
+
+	/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
+	rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+				      &rxq->bd_dma, GFP_KERNEL);
+	if (!rxq->bd)
+		goto err_bd;
+
+	/*Allocate the driver's pointer to receive buffer status */
+	rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+					   &rxq->rb_stts_dma, GFP_KERNEL);
+	if (!rxq->rb_stts)
+		goto err_rb_stts;
+
+	return 0;
+
+err_rb_stts:
+	dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+			  rxq->bd, rxq->bd_dma);
+	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+	rxq->bd = NULL;
+err_bd:
+	return -ENOMEM;
+}
+
+static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 rb_size;
+	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+
+	if (trans_pcie->rx_buf_size_8k)
+		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+	else
+		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+	/* Stop Rx DMA */
+	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+	/* Reset driver's Rx queue write index */
+	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+	/* Tell device where to find RBD circular buffer in DRAM */
+	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+			   (u32)(rxq->bd_dma >> 8));
+
+	/* Tell device where in DRAM to update its Rx status */
+	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+			   rxq->rb_stts_dma >> 4);
+
+	/* Enable Rx DMA
+	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+	 *      the credit mechanism in 5000 HW RX FIFO
+	 * Direct rx interrupts to hosts
+	 * Rx buffer size 4 or 8k
+	 * RB timeout 0x10
+	 * 256 RBDs
+	 */
+	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+			   rb_size|
+			   (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+	/* Set interrupt coalescing timer to default (2048 usecs) */
+	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+}
+
+int iwl_pcie_rx_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
+
+	int i, err;
+	unsigned long flags;
+
+	if (!rxq->bd) {
+		err = iwl_pcie_rx_alloc(trans);
+		if (err)
+			return err;
+	}
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+
+	INIT_WORK(&trans_pcie->rx_replenish,
+		  iwl_pcie_rx_replenish_work);
+
+	iwl_pcie_rxq_free_rbs(trans);
+
+	for (i = 0; i < RX_QUEUE_SIZE; i++)
+		rxq->queue[i] = NULL;
+
+	/* Set us so that we have processed and used all buffers, but have
+	 * not restocked the Rx queue with fresh buffers */
+	rxq->read = rxq->write = 0;
+	rxq->write_actual = 0;
+	rxq->free_count = 0;
+	spin_unlock_irqrestore(&rxq->lock, flags);
+
+	iwl_pcie_rx_replenish(trans);
+
+	iwl_pcie_rx_hw_init(trans, rxq);
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	rxq->need_update = 1;
+	iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+	return 0;
+}
+
+void iwl_pcie_rx_free(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	unsigned long flags;
+
+	/*if rxq->bd is NULL, it means that nothing has been allocated,
+	 * exit now */
+	if (!rxq->bd) {
+		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
+		return;
+	}
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	iwl_pcie_rxq_free_rbs(trans);
+	spin_unlock_irqrestore(&rxq->lock, flags);
+
+	dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
+			  rxq->bd, rxq->bd_dma);
+	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+	rxq->bd = NULL;
+
+	if (rxq->rb_stts)
+		dma_free_coherent(trans->dev,
+				  sizeof(struct iwl_rb_status),
+				  rxq->rb_stts, rxq->rb_stts_dma);
+	else
+		IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
+	memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
+	rxq->rb_stts = NULL;
+}
+
+static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 				struct iwl_rx_mem_buffer *rxb)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
-	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
 	unsigned long flags;
 	bool page_stolen = false;
 	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
@@ -413,13 +599,13 @@
 			break;
 
 		IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
-			rxcb._offset,
-			trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
+			rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
 			pkt->hdr.cmd);
 
 		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
 		len += sizeof(u32); /* account for status word */
-		trace_iwlwifi_dev_rx(trans->dev, pkt, len);
+		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
+		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
 
 		/* Reclaim a command buffer only if this packet is a response
 		 *   to a (driver-originated) command.
@@ -445,7 +631,7 @@
 		cmd_index = get_cmd_index(&txq->q, index);
 
 		if (reclaim) {
-			struct iwl_pcie_tx_queue_entry *ent;
+			struct iwl_pcie_txq_entry *ent;
 			ent = &txq->entries[cmd_index];
 			cmd = ent->copy_cmd;
 			WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
@@ -459,6 +645,9 @@
 			/* The original command isn't needed any more */
 			kfree(txq->entries[cmd_index].copy_cmd);
 			txq->entries[cmd_index].copy_cmd = NULL;
+			/* nor is the duplicated part of the command */
+			kfree(txq->entries[cmd_index].free_buf);
+			txq->entries[cmd_index].free_buf = NULL;
 		}
 
 		/*
@@ -472,7 +661,7 @@
 			 * iwl_trans_send_cmd()
 			 * as we reclaim the driver command queue */
 			if (!rxcb._page_stolen)
-				iwl_tx_cmd_complete(trans, &rxcb, err);
+				iwl_pcie_hcmd_complete(trans, &rxcb, err);
 			else
 				IWL_WARN(trans, "Claim null rxb?\n");
 		}
@@ -514,17 +703,13 @@
 	spin_unlock_irqrestore(&rxq->lock, flags);
 }
 
-/**
- * iwl_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
+/*
+ * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
  */
-static void iwl_rx_handle(struct iwl_trans *trans)
+static void iwl_pcie_rx_handle(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	u32 r, i;
 	u8 fill_rx = 0;
 	u32 count = 8;
@@ -555,7 +740,7 @@
 
 		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
 			     r, i, rxb);
-		iwl_rx_handle_rxbuf(trans, rxb);
+		iwl_pcie_rx_handle_rb(trans, rxb);
 
 		i = (i + 1) & RX_QUEUE_MASK;
 		/* If there are a lot of unused frames,
@@ -564,7 +749,7 @@
 			count++;
 			if (count >= 8) {
 				rxq->read = i;
-				iwl_rx_replenish_now(trans);
+				iwl_pcie_rx_replenish_now(trans);
 				count = 0;
 			}
 		}
@@ -573,39 +758,41 @@
 	/* Backtrack one entry */
 	rxq->read = i;
 	if (fill_rx)
-		iwl_rx_replenish_now(trans);
+		iwl_pcie_rx_replenish_now(trans);
 	else
-		iwl_rx_queue_restock(trans);
+		iwl_pcie_rxq_restock(trans);
 }
 
-/**
- * iwl_irq_handle_error - called for HW or SW error interrupt from card
+/*
+ * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
  */
-static void iwl_irq_handle_error(struct iwl_trans *trans)
+static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
 {
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
 	if (trans->cfg->internal_wimax_coex &&
 	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
 			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
 	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
 			    APMG_PS_CTRL_VAL_RESET_REQ))) {
-		struct iwl_trans_pcie *trans_pcie =
-			IWL_TRANS_GET_PCIE_TRANS(trans);
-
 		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
 		iwl_op_mode_wimax_active(trans->op_mode);
-		wake_up(&trans->wait_command_queue);
+		wake_up(&trans_pcie->wait_command_queue);
 		return;
 	}
 
-	iwl_dump_csr(trans);
-	iwl_dump_fh(trans, NULL);
+	iwl_pcie_dump_csr(trans);
+	iwl_pcie_dump_fh(trans, NULL);
+
+	set_bit(STATUS_FW_ERROR, &trans_pcie->status);
+	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+	wake_up(&trans_pcie->wait_command_queue);
 
 	iwl_op_mode_nic_error(trans->op_mode);
 }
 
-/* tasklet for iwlagn interrupt */
-void iwl_irq_tasklet(struct iwl_trans *trans)
+void iwl_pcie_tasklet(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
@@ -657,7 +844,7 @@
 		iwl_disable_interrupts(trans);
 
 		isr_stats->hw++;
-		iwl_irq_handle_error(trans);
+		iwl_pcie_irq_handle_error(trans);
 
 		handled |= CSR_INT_BIT_HW_ERR;
 
@@ -694,6 +881,16 @@
 		isr_stats->rfkill++;
 
 		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+		if (hw_rfkill) {
+			set_bit(STATUS_RFKILL, &trans_pcie->status);
+			if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
+					       &trans_pcie->status))
+				IWL_DEBUG_RF_KILL(trans,
+						  "Rfkill while SYNC HCMD in flight\n");
+			wake_up(&trans_pcie->wait_command_queue);
+		} else {
+			clear_bit(STATUS_RFKILL, &trans_pcie->status);
+		}
 
 		handled |= CSR_INT_BIT_RF_KILL;
 	}
@@ -710,17 +907,16 @@
 		IWL_ERR(trans, "Microcode SW error detected. "
 			" Restarting 0x%X.\n", inta);
 		isr_stats->sw++;
-		iwl_irq_handle_error(trans);
+		iwl_pcie_irq_handle_error(trans);
 		handled |= CSR_INT_BIT_SW_ERR;
 	}
 
 	/* uCode wakes up after power-down sleep */
 	if (inta & CSR_INT_BIT_WAKEUP) {
 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
-		iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
+		iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
 		for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
-			iwl_txq_update_write_ptr(trans,
-						 &trans_pcie->txq[i]);
+			iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
 
 		isr_stats->wakeup++;
 
@@ -758,7 +954,7 @@
 		iwl_write8(trans, CSR_INT_PERIODIC_REG,
 			    CSR_INT_PERIODIC_DIS);
 
-		iwl_rx_handle(trans);
+		iwl_pcie_rx_handle(trans);
 
 		/*
 		 * Enable periodic interrupt in 8 msec only if we received
@@ -816,7 +1012,7 @@
 #define ICT_COUNT	(ICT_SIZE / sizeof(u32))
 
 /* Free dram table */
-void iwl_free_isr_ict(struct iwl_trans *trans)
+void iwl_pcie_free_ict(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
@@ -829,13 +1025,12 @@
 	}
 }
 
-
 /*
  * allocate dram shared table, it is an aligned memory
  * block of ICT_SIZE.
  * also reset all data related to ICT table interrupt.
  */
-int iwl_alloc_isr_ict(struct iwl_trans *trans)
+int iwl_pcie_alloc_ict(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
@@ -848,7 +1043,7 @@
 
 	/* just an API sanity check ... it is guaranteed to be aligned */
 	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
-		iwl_free_isr_ict(trans);
+		iwl_pcie_free_ict(trans);
 		return -EINVAL;
 	}
 
@@ -869,7 +1064,7 @@
 /* Device is going up inform it about using ICT interrupt table,
  * also we need to tell the driver to start using ICT interrupt.
  */
-void iwl_reset_ict(struct iwl_trans *trans)
+void iwl_pcie_reset_ict(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	u32 val;
@@ -899,7 +1094,7 @@
 }
 
 /* Device is going down disable ict interrupt usage */
-void iwl_disable_ict(struct iwl_trans *trans)
+void iwl_pcie_disable_ict(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	unsigned long flags;
@@ -910,7 +1105,7 @@
 }
 
 /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
-static irqreturn_t iwl_isr(int irq, void *data)
+static irqreturn_t iwl_pcie_isr(int irq, void *data)
 {
 	struct iwl_trans *trans = data;
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -957,7 +1152,7 @@
 #endif
 
 	trans_pcie->inta |= inta;
-	/* iwl_irq_tasklet() will service interrupts and re-enable them */
+	/* iwl_pcie_tasklet() will service interrupts and re-enable them */
 	if (likely(inta))
 		tasklet_schedule(&trans_pcie->irq_tasklet);
 	else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
@@ -982,7 +1177,7 @@
  * the interrupt we need to service, driver will set the entries back to 0 and
  * set index.
  */
-irqreturn_t iwl_isr_ict(int irq, void *data)
+irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
 {
 	struct iwl_trans *trans = data;
 	struct iwl_trans_pcie *trans_pcie;
@@ -1002,14 +1197,13 @@
 	 * use legacy interrupt.
 	 */
 	if (unlikely(!trans_pcie->use_ict)) {
-		irqreturn_t ret = iwl_isr(irq, data);
+		irqreturn_t ret = iwl_pcie_isr(irq, data);
 		spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 		return ret;
 	}
 
 	trace_iwlwifi_dev_irq(trans->dev);
 
-
 	/* Disable (but don't clear!) interrupts here to avoid
 	 * back-to-back ISRs and sporadic interrupts from our NIC.
 	 * If we have something to service, the tasklet will re-enable ints.
@@ -1018,7 +1212,6 @@
 	inta_mask = iwl_read32(trans, CSR_INT_MASK);  /* just for debug */
 	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
 
-
 	/* Ignore interrupt if there's nothing in NIC to service.
 	 * This may be due to IRQ shared with another device,
 	 * or due to sporadic interrupts thrown from our NIC. */
@@ -1067,7 +1260,7 @@
 	inta &= trans_pcie->inta_mask;
 	trans_pcie->inta |= inta;
 
-	/* iwl_irq_tasklet() will service interrupts and re-enable them */
+	/* iwl_pcie_tasklet() will service interrupts and re-enable them */
 	if (likely(inta))
 		tasklet_schedule(&trans_pcie->irq_tasklet);
 	else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index fe0fffd..f6c21e7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -74,584 +74,8 @@
 #include "iwl-prph.h"
 #include "iwl-agn-hw.h"
 #include "internal.h"
-/* FIXME: need to abstract out TX command (once we know what it looks like) */
-#include "dvm/commands.h"
 
-#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie)	\
-	(((1<<trans->cfg->base_params->num_of_queues) - 1) &\
-	(~(1<<(trans_pcie)->cmd_queue)))
-
-static int iwl_trans_rx_alloc(struct iwl_trans *trans)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
-	struct device *dev = trans->dev;
-
-	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
-
-	spin_lock_init(&rxq->lock);
-
-	if (WARN_ON(rxq->bd || rxq->rb_stts))
-		return -EINVAL;
-
-	/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
-	rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
-				      &rxq->bd_dma, GFP_KERNEL);
-	if (!rxq->bd)
-		goto err_bd;
-
-	/*Allocate the driver's pointer to receive buffer status */
-	rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
-					   &rxq->rb_stts_dma, GFP_KERNEL);
-	if (!rxq->rb_stts)
-		goto err_rb_stts;
-
-	return 0;
-
-err_rb_stts:
-	dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
-			  rxq->bd, rxq->bd_dma);
-	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
-	rxq->bd = NULL;
-err_bd:
-	return -ENOMEM;
-}
-
-static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
-	int i;
-
-	/* Fill the rx_used queue with _all_ of the Rx buffers */
-	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
-		/* In the reset function, these buffers may have been allocated
-		 * to an SKB, so we need to unmap and free potential storage */
-		if (rxq->pool[i].page != NULL) {
-			dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
-				       PAGE_SIZE << trans_pcie->rx_page_order,
-				       DMA_FROM_DEVICE);
-			__free_pages(rxq->pool[i].page,
-				     trans_pcie->rx_page_order);
-			rxq->pool[i].page = NULL;
-		}
-		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-	}
-}
-
-static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
-				 struct iwl_rx_queue *rxq)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	u32 rb_size;
-	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
-	u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
-
-	if (trans_pcie->rx_buf_size_8k)
-		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
-	else
-		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
-	/* Stop Rx DMA */
-	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
-	/* Reset driver's Rx queue write index */
-	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
-	/* Tell device where to find RBD circular buffer in DRAM */
-	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
-			   (u32)(rxq->bd_dma >> 8));
-
-	/* Tell device where in DRAM to update its Rx status */
-	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
-			   rxq->rb_stts_dma >> 4);
-
-	/* Enable Rx DMA
-	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
-	 *      the credit mechanism in 5000 HW RX FIFO
-	 * Direct rx interrupts to hosts
-	 * Rx buffer size 4 or 8k
-	 * RB timeout 0x10
-	 * 256 RBDs
-	 */
-	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
-			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
-			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
-			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
-			   rb_size|
-			   (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
-			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
-	/* Set interrupt coalescing timer to default (2048 usecs) */
-	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-}
-
-static int iwl_rx_init(struct iwl_trans *trans)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
-
-	int i, err;
-	unsigned long flags;
-
-	if (!rxq->bd) {
-		err = iwl_trans_rx_alloc(trans);
-		if (err)
-			return err;
-	}
-
-	spin_lock_irqsave(&rxq->lock, flags);
-	INIT_LIST_HEAD(&rxq->rx_free);
-	INIT_LIST_HEAD(&rxq->rx_used);
-
-	iwl_trans_rxq_free_rx_bufs(trans);
-
-	for (i = 0; i < RX_QUEUE_SIZE; i++)
-		rxq->queue[i] = NULL;
-
-	/* Set us so that we have processed and used all buffers, but have
-	 * not restocked the Rx queue with fresh buffers */
-	rxq->read = rxq->write = 0;
-	rxq->write_actual = 0;
-	rxq->free_count = 0;
-	spin_unlock_irqrestore(&rxq->lock, flags);
-
-	iwl_rx_replenish(trans);
-
-	iwl_trans_rx_hw_init(trans, rxq);
-
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-	rxq->need_update = 1;
-	iwl_rx_queue_update_write_ptr(trans, rxq);
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
-	return 0;
-}
-
-static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
-	unsigned long flags;
-
-	/*if rxq->bd is NULL, it means that nothing has been allocated,
-	 * exit now */
-	if (!rxq->bd) {
-		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
-		return;
-	}
-
-	spin_lock_irqsave(&rxq->lock, flags);
-	iwl_trans_rxq_free_rx_bufs(trans);
-	spin_unlock_irqrestore(&rxq->lock, flags);
-
-	dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
-			  rxq->bd, rxq->bd_dma);
-	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
-	rxq->bd = NULL;
-
-	if (rxq->rb_stts)
-		dma_free_coherent(trans->dev,
-				  sizeof(struct iwl_rb_status),
-				  rxq->rb_stts, rxq->rb_stts_dma);
-	else
-		IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
-	memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
-	rxq->rb_stts = NULL;
-}
-
-static int iwl_trans_rx_stop(struct iwl_trans *trans)
-{
-
-	/* stop Rx DMA */
-	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
-				   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-}
-
-static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
-				struct iwl_dma_ptr *ptr, size_t size)
-{
-	if (WARN_ON(ptr->addr))
-		return -EINVAL;
-
-	ptr->addr = dma_alloc_coherent(trans->dev, size,
-				       &ptr->dma, GFP_KERNEL);
-	if (!ptr->addr)
-		return -ENOMEM;
-	ptr->size = size;
-	return 0;
-}
-
-static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
-				struct iwl_dma_ptr *ptr)
-{
-	if (unlikely(!ptr->addr))
-		return;
-
-	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
-	memset(ptr, 0, sizeof(*ptr));
-}
-
-static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
-{
-	struct iwl_tx_queue *txq = (void *)data;
-	struct iwl_queue *q = &txq->q;
-	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
-	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
-	u32 scd_sram_addr = trans_pcie->scd_base_addr +
-		SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
-	u8 buf[16];
-	int i;
-
-	spin_lock(&txq->lock);
-	/* check if triggered erroneously */
-	if (txq->q.read_ptr == txq->q.write_ptr) {
-		spin_unlock(&txq->lock);
-		return;
-	}
-	spin_unlock(&txq->lock);
-
-	IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
-		jiffies_to_msecs(trans_pcie->wd_timeout));
-	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
-		txq->q.read_ptr, txq->q.write_ptr);
-
-	iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
-
-	iwl_print_hex_error(trans, buf, sizeof(buf));
-
-	for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
-		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
-			iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
-
-	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
-		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
-		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
-		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
-		u32 tbl_dw =
-			iwl_read_targ_mem(trans,
-					  trans_pcie->scd_base_addr +
-					  SCD_TRANS_TBL_OFFSET_QUEUE(i));
-
-		if (i & 0x1)
-			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
-		else
-			tbl_dw = tbl_dw & 0x0000FFFF;
-
-		IWL_ERR(trans,
-			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
-			i, active ? "" : "in", fifo, tbl_dw,
-			iwl_read_prph(trans,
-				      SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
-			iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
-	}
-
-	for (i = q->read_ptr; i != q->write_ptr;
-	     i = iwl_queue_inc_wrap(i, q->n_bd)) {
-		struct iwl_tx_cmd *tx_cmd =
-			(struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
-		IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
-			get_unaligned_le32(&tx_cmd->scratch));
-	}
-
-	iwl_op_mode_nic_error(trans->op_mode);
-}
-
-static int iwl_trans_txq_alloc(struct iwl_trans *trans,
-			       struct iwl_tx_queue *txq, int slots_num,
-			       u32 txq_id)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
-	int i;
-
-	if (WARN_ON(txq->entries || txq->tfds))
-		return -EINVAL;
-
-	setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
-		    (unsigned long)txq);
-	txq->trans_pcie = trans_pcie;
-
-	txq->q.n_window = slots_num;
-
-	txq->entries = kcalloc(slots_num,
-			       sizeof(struct iwl_pcie_tx_queue_entry),
-			       GFP_KERNEL);
-
-	if (!txq->entries)
-		goto error;
-
-	if (txq_id == trans_pcie->cmd_queue)
-		for (i = 0; i < slots_num; i++) {
-			txq->entries[i].cmd =
-				kmalloc(sizeof(struct iwl_device_cmd),
-					GFP_KERNEL);
-			if (!txq->entries[i].cmd)
-				goto error;
-		}
-
-	/* Circular buffer of transmit frame descriptors (TFDs),
-	 * shared with device */
-	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
-				       &txq->q.dma_addr, GFP_KERNEL);
-	if (!txq->tfds) {
-		IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
-		goto error;
-	}
-	txq->q.id = txq_id;
-
-	return 0;
-error:
-	if (txq->entries && txq_id == trans_pcie->cmd_queue)
-		for (i = 0; i < slots_num; i++)
-			kfree(txq->entries[i].cmd);
-	kfree(txq->entries);
-	txq->entries = NULL;
-
-	return -ENOMEM;
-
-}
-
-static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
-			      int slots_num, u32 txq_id)
-{
-	int ret;
-
-	txq->need_update = 0;
-
-	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
-	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
-	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
-
-	/* Initialize queue's high/low-water marks, and head/tail indexes */
-	ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
-			txq_id);
-	if (ret)
-		return ret;
-
-	spin_lock_init(&txq->lock);
-
-	/*
-	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
-	 * given Tx queue, and enable the DMA channel used for that queue.
-	 * Circular buffer (TFD queue in DRAM) physical base address */
-	iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
-			     txq->q.dma_addr >> 8);
-
-	return 0;
-}
-
-/**
- * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
- */
-static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
-	struct iwl_queue *q = &txq->q;
-	enum dma_data_direction dma_dir;
-
-	if (!q->n_bd)
-		return;
-
-	/* In the command queue, all the TBs are mapped as BIDI
-	 * so unmap them as such.
-	 */
-	if (txq_id == trans_pcie->cmd_queue)
-		dma_dir = DMA_BIDIRECTIONAL;
-	else
-		dma_dir = DMA_TO_DEVICE;
-
-	spin_lock_bh(&txq->lock);
-	while (q->write_ptr != q->read_ptr) {
-		iwl_txq_free_tfd(trans, txq, dma_dir);
-		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
-	}
-	spin_unlock_bh(&txq->lock);
-}
-
-/**
- * iwl_tx_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
-	struct device *dev = trans->dev;
-	int i;
-
-	if (WARN_ON(!txq))
-		return;
-
-	iwl_tx_queue_unmap(trans, txq_id);
-
-	/* De-alloc array of command/tx buffers */
-	if (txq_id == trans_pcie->cmd_queue)
-		for (i = 0; i < txq->q.n_window; i++) {
-			kfree(txq->entries[i].cmd);
-			kfree(txq->entries[i].copy_cmd);
-		}
-
-	/* De-alloc circular buffer of TFDs */
-	if (txq->q.n_bd) {
-		dma_free_coherent(dev, sizeof(struct iwl_tfd) *
-				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
-		memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
-	}
-
-	kfree(txq->entries);
-	txq->entries = NULL;
-
-	del_timer_sync(&txq->stuck_timer);
-
-	/* 0-fill queue descriptor structure */
-	memset(txq, 0, sizeof(*txq));
-}
-
-/**
- * iwl_trans_tx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
-{
-	int txq_id;
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-	/* Tx queues */
-	if (trans_pcie->txq) {
-		for (txq_id = 0;
-		     txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
-			iwl_tx_queue_free(trans, txq_id);
-	}
-
-	kfree(trans_pcie->txq);
-	trans_pcie->txq = NULL;
-
-	iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
-
-	iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
-}
-
-/**
- * iwl_trans_tx_alloc - allocate TX context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-static int iwl_trans_tx_alloc(struct iwl_trans *trans)
-{
-	int ret;
-	int txq_id, slots_num;
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
-			sizeof(struct iwlagn_scd_bc_tbl);
-
-	/*It is not allowed to alloc twice, so warn when this happens.
-	 * We cannot rely on the previous allocation, so free and fail */
-	if (WARN_ON(trans_pcie->txq)) {
-		ret = -EINVAL;
-		goto error;
-	}
-
-	ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
-				   scd_bc_tbls_size);
-	if (ret) {
-		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
-		goto error;
-	}
-
-	/* Alloc keep-warm buffer */
-	ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
-	if (ret) {
-		IWL_ERR(trans, "Keep Warm allocation failed\n");
-		goto error;
-	}
-
-	trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
-				  sizeof(struct iwl_tx_queue), GFP_KERNEL);
-	if (!trans_pcie->txq) {
-		IWL_ERR(trans, "Not enough memory for txq\n");
-		ret = ENOMEM;
-		goto error;
-	}
-
-	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
-	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
-	     txq_id++) {
-		slots_num = (txq_id == trans_pcie->cmd_queue) ?
-					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-		ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
-					  slots_num, txq_id);
-		if (ret) {
-			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
-			goto error;
-		}
-	}
-
-	return 0;
-
-error:
-	iwl_trans_pcie_tx_free(trans);
-
-	return ret;
-}
-static int iwl_tx_init(struct iwl_trans *trans)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	int ret;
-	int txq_id, slots_num;
-	unsigned long flags;
-	bool alloc = false;
-
-	if (!trans_pcie->txq) {
-		ret = iwl_trans_tx_alloc(trans);
-		if (ret)
-			goto error;
-		alloc = true;
-	}
-
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
-	/* Turn off all Tx DMA fifos */
-	iwl_write_prph(trans, SCD_TXFACT, 0);
-
-	/* Tell NIC where to find the "keep warm" buffer */
-	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
-			   trans_pcie->kw.dma >> 4);
-
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
-	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
-	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
-	     txq_id++) {
-		slots_num = (txq_id == trans_pcie->cmd_queue) ?
-					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-		ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
-					 slots_num, txq_id);
-		if (ret) {
-			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
-			goto error;
-		}
-	}
-
-	return 0;
-error:
-	/*Upon error, free only if we allocated something */
-	if (alloc)
-		iwl_trans_pcie_tx_free(trans);
-	return ret;
-}
-
-static void iwl_set_pwr_vmain(struct iwl_trans *trans)
+static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
 {
 /*
  * (for documentation purposes)
@@ -673,18 +97,11 @@
 #define PCI_CFG_LINK_CTRL_VAL_L0S_EN	0x01
 #define PCI_CFG_LINK_CTRL_VAL_L1_EN	0x02
 
-static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
+static void iwl_pcie_apm_config(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	u16 pci_lnk_ctl;
+	u16 lctl;
 
-	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL,
-				  &pci_lnk_ctl);
-	return pci_lnk_ctl;
-}
-
-static void iwl_apm_config(struct iwl_trans *trans)
-{
 	/*
 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
@@ -693,29 +110,27 @@
 	 * If not (unlikely), enable L0S, so there is at least some
 	 *    power savings, even without L1.
 	 */
-	u16 lctl = iwl_pciexp_link_ctrl(trans);
+	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
 
 	if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
 				PCI_CFG_LINK_CTRL_VAL_L1_EN) {
 		/* L1-ASPM enabled; disable(!) L0S */
 		iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-		dev_printk(KERN_INFO, trans->dev,
-			   "L1 Enabled; Disabling L0S\n");
+		dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
 	} else {
 		/* L1-ASPM disabled; enable(!) L0S */
 		iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-		dev_printk(KERN_INFO, trans->dev,
-			   "L1 Disabled; Enabling L0S\n");
+		dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
 	}
 	trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
 }
 
 /*
  * Start up NIC's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
  * NOTE:  This does not load uCode nor start the embedded processor
  */
-static int iwl_apm_init(struct iwl_trans *trans)
+static int iwl_pcie_apm_init(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int ret = 0;
@@ -747,7 +162,7 @@
 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
 
-	iwl_apm_config(trans);
+	iwl_pcie_apm_config(trans);
 
 	/* Configure analog phase-lock-loop before activating to D0A */
 	if (trans->cfg->base_params->pll_cfg_val)
@@ -793,7 +208,7 @@
 	return ret;
 }
 
-static int iwl_apm_stop_master(struct iwl_trans *trans)
+static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
 {
 	int ret = 0;
 
@@ -811,7 +226,7 @@
 	return ret;
 }
 
-static void iwl_apm_stop(struct iwl_trans *trans)
+static void iwl_pcie_apm_stop(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
@@ -819,7 +234,7 @@
 	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
 
 	/* Stop device's DMA activity */
-	iwl_apm_stop_master(trans);
+	iwl_pcie_apm_stop_master(trans);
 
 	/* Reset the entire device */
 	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
@@ -834,29 +249,29 @@
 		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 }
 
-static int iwl_nic_init(struct iwl_trans *trans)
+static int iwl_pcie_nic_init(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	unsigned long flags;
 
 	/* nic_init */
 	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-	iwl_apm_init(trans);
+	iwl_pcie_apm_init(trans);
 
 	/* Set interrupt coalescing calibration timer to default (512 usecs) */
 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
 
 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 
-	iwl_set_pwr_vmain(trans);
+	iwl_pcie_set_pwr_vmain(trans);
 
 	iwl_op_mode_nic_config(trans->op_mode);
 
 	/* Allocate the RX queue, or reset if it is already allocated */
-	iwl_rx_init(trans);
+	iwl_pcie_rx_init(trans);
 
 	/* Allocate or reset and init all Tx and Command queues */
-	if (iwl_tx_init(trans))
+	if (iwl_pcie_tx_init(trans))
 		return -ENOMEM;
 
 	if (trans->cfg->base_params->shadow_reg_enable) {
@@ -871,7 +286,7 @@
 #define HW_READY_TIMEOUT (50)
 
 /* Note: returns poll_bit return value, which is >= 0 if success */
-static int iwl_set_hw_ready(struct iwl_trans *trans)
+static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
 {
 	int ret;
 
@@ -889,14 +304,14 @@
 }
 
 /* Note: returns standard 0/-ERROR code */
-static int iwl_prepare_card_hw(struct iwl_trans *trans)
+static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 {
 	int ret;
 	int t = 0;
 
 	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
 
-	ret = iwl_set_hw_ready(trans);
+	ret = iwl_pcie_set_hw_ready(trans);
 	/* If the card is ready, exit 0 */
 	if (ret >= 0)
 		return 0;
@@ -906,7 +321,7 @@
 		    CSR_HW_IF_CONFIG_REG_PREPARE);
 
 	do {
-		ret = iwl_set_hw_ready(trans);
+		ret = iwl_pcie_set_hw_ready(trans);
 		if (ret >= 0)
 			return 0;
 
@@ -920,7 +335,7 @@
 /*
  * ucode
  */
-static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
+static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
 				   dma_addr_t phy_addr, u32 byte_cnt)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -967,7 +382,7 @@
 	return 0;
 }
 
-static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
+static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
 			    const struct fw_desc *section)
 {
 	u8 *v_addr;
@@ -988,8 +403,9 @@
 		copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
 
 		memcpy(v_addr, (u8 *)section->data + offset, copy_size);
-		ret = iwl_load_firmware_chunk(trans, section->offset + offset,
-					      p_addr, copy_size);
+		ret = iwl_pcie_load_firmware_chunk(trans,
+						   section->offset + offset,
+						   p_addr, copy_size);
 		if (ret) {
 			IWL_ERR(trans,
 				"Could not load the [%d] uCode section\n",
@@ -1002,7 +418,7 @@
 	return ret;
 }
 
-static int iwl_load_given_ucode(struct iwl_trans *trans,
+static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
 				const struct fw_img *image)
 {
 	int i, ret = 0;
@@ -1011,7 +427,7 @@
 		if (!image->sec[i].data)
 			break;
 
-		ret = iwl_load_section(trans, i, &image->sec[i]);
+		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
 		if (ret)
 			return ret;
 	}
@@ -1025,15 +441,18 @@
 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
 				   const struct fw_img *fw)
 {
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int ret;
 	bool hw_rfkill;
 
 	/* This may fail if AMT took ownership of the device */
-	if (iwl_prepare_card_hw(trans)) {
+	if (iwl_pcie_prepare_card_hw(trans)) {
 		IWL_WARN(trans, "Exit HW not ready\n");
 		return -EIO;
 	}
 
+	clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
+
 	iwl_enable_rfkill_int(trans);
 
 	/* If platform's RF_KILL switch is NOT set to KILL */
@@ -1044,7 +463,7 @@
 
 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
 
-	ret = iwl_nic_init(trans);
+	ret = iwl_pcie_nic_init(trans);
 	if (ret) {
 		IWL_ERR(trans, "Unable to init nic\n");
 		return ret;
@@ -1064,125 +483,13 @@
 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
 
 	/* Load the given image to the HW */
-	return iwl_load_given_ucode(trans, fw);
+	return iwl_pcie_load_given_ucode(trans, fw);
 }
 
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- */
-static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
+static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
 {
-	struct iwl_trans_pcie __maybe_unused *trans_pcie =
-		IWL_TRANS_GET_PCIE_TRANS(trans);
-
-	iwl_write_prph(trans, SCD_TXFACT, mask);
-}
-
-static void iwl_tx_start(struct iwl_trans *trans)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	u32 a;
-	int chan;
-	u32 reg_val;
-
-	/* make sure all queue are not stopped/used */
-	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
-	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
-
-	trans_pcie->scd_base_addr =
-		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
-	a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
-	/* reset conext data memory */
-	for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
-		a += 4)
-		iwl_write_targ_mem(trans, a, 0);
-	/* reset tx status memory */
-	for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
-		a += 4)
-		iwl_write_targ_mem(trans, a, 0);
-	for (; a < trans_pcie->scd_base_addr +
-	       SCD_TRANS_TBL_OFFSET_QUEUE(
-				trans->cfg->base_params->num_of_queues);
-	       a += 4)
-		iwl_write_targ_mem(trans, a, 0);
-
-	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
-		       trans_pcie->scd_bc_tbls.dma >> 10);
-
-	/* The chain extension of the SCD doesn't work well. This feature is
-	 * enabled by default by the HW, so we need to disable it manually.
-	 */
-	iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
-
-	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
-				trans_pcie->cmd_fifo);
-
-	/* Activate all Tx DMA/FIFO channels */
-	iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
-
-	/* Enable DMA channel */
-	for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
-		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
-				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
-				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
-	/* Update FH chicken bits */
-	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
-	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
-			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
-	/* Enable L1-Active */
-	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
-			    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-}
-
-static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
-{
-	iwl_reset_ict(trans);
-	iwl_tx_start(trans);
-}
-
-/**
- * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
- */
-static int iwl_trans_tx_stop(struct iwl_trans *trans)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	int ch, txq_id, ret;
-	unsigned long flags;
-
-	/* Turn off all Tx DMA fifos */
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
-	iwl_trans_txq_set_sched(trans, 0);
-
-	/* Stop each Tx DMA channel, and wait for it to be idle */
-	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
-		iwl_write_direct32(trans,
-				   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
-		ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
-			FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
-		if (ret < 0)
-			IWL_ERR(trans,
-				"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
-				ch,
-				iwl_read_direct32(trans,
-						  FH_TSSR_TX_STATUS_REG));
-	}
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
-	if (!trans_pcie->txq) {
-		IWL_WARN(trans,
-			 "Stopping tx queues that aren't allocated...\n");
-		return 0;
-	}
-
-	/* Unmap DMA from host system and free skb's */
-	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
-	     txq_id++)
-		iwl_tx_queue_unmap(trans, txq_id);
-
-	return 0;
+	iwl_pcie_reset_ict(trans);
+	iwl_pcie_tx_start(trans, scd_addr);
 }
 
 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
@@ -1196,7 +503,7 @@
 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 
 	/* device going down, Stop using ICT table */
-	iwl_disable_ict(trans);
+	iwl_pcie_disable_ict(trans);
 
 	/*
 	 * If a HW restart happens during firmware loading,
@@ -1206,8 +513,8 @@
 	 * already dead.
 	 */
 	if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
-		iwl_trans_tx_stop(trans);
-		iwl_trans_rx_stop(trans);
+		iwl_pcie_tx_stop(trans);
+		iwl_pcie_rx_stop(trans);
 
 		/* Power-down device's busmaster DMA clocks */
 		iwl_write_prph(trans, APMG_CLK_DIS_REG,
@@ -1220,7 +527,7 @@
 		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
 
 	/* Stop the device, and put it in low power state */
-	iwl_apm_stop(trans);
+	iwl_pcie_apm_stop(trans);
 
 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
 	 * Clean again the interrupt here
@@ -1245,6 +552,7 @@
 	clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
 	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
 	clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+	clear_bit(STATUS_RFKILL, &trans_pcie->status);
 }
 
 static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
@@ -1258,169 +566,6 @@
 		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
 }
 
-static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
-			     struct iwl_device_cmd *dev_cmd, int txq_id)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
-	struct iwl_cmd_meta *out_meta;
-	struct iwl_tx_queue *txq;
-	struct iwl_queue *q;
-	dma_addr_t phys_addr = 0;
-	dma_addr_t txcmd_phys;
-	dma_addr_t scratch_phys;
-	u16 len, firstlen, secondlen;
-	u8 wait_write_ptr = 0;
-	__le16 fc = hdr->frame_control;
-	u8 hdr_len = ieee80211_hdrlen(fc);
-	u16 __maybe_unused wifi_seq;
-
-	txq = &trans_pcie->txq[txq_id];
-	q = &txq->q;
-
-	if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
-		WARN_ON_ONCE(1);
-		return -EINVAL;
-	}
-
-	spin_lock(&txq->lock);
-
-	/* In AGG mode, the index in the ring must correspond to the WiFi
-	 * sequence number. This is a HW requirements to help the SCD to parse
-	 * the BA.
-	 * Check here that the packets are in the right place on the ring.
-	 */
-#ifdef CONFIG_IWLWIFI_DEBUG
-	wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
-	WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
-		  ((wifi_seq & 0xff) != q->write_ptr),
-		  "Q: %d WiFi Seq %d tfdNum %d",
-		  txq_id, wifi_seq, q->write_ptr);
-#endif
-
-	/* Set up driver data for this TFD */
-	txq->entries[q->write_ptr].skb = skb;
-	txq->entries[q->write_ptr].cmd = dev_cmd;
-
-	dev_cmd->hdr.cmd = REPLY_TX;
-	dev_cmd->hdr.sequence =
-		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
-			    INDEX_TO_SEQ(q->write_ptr)));
-
-	/* Set up first empty entry in queue's array of Tx/cmd buffers */
-	out_meta = &txq->entries[q->write_ptr].meta;
-
-	/*
-	 * Use the first empty entry in this queue's command buffer array
-	 * to contain the Tx command and MAC header concatenated together
-	 * (payload data will be in another buffer).
-	 * Size of this varies, due to varying MAC header length.
-	 * If end is not dword aligned, we'll have 2 extra bytes at the end
-	 * of the MAC header (device reads on dword boundaries).
-	 * We'll tell device about this padding later.
-	 */
-	len = sizeof(struct iwl_tx_cmd) +
-		sizeof(struct iwl_cmd_header) + hdr_len;
-	firstlen = (len + 3) & ~3;
-
-	/* Tell NIC about any 2-byte padding after MAC header */
-	if (firstlen != len)
-		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
-	/* Physical address of this Tx command's header (not MAC header!),
-	 * within command buffer array. */
-	txcmd_phys = dma_map_single(trans->dev,
-				    &dev_cmd->hdr, firstlen,
-				    DMA_BIDIRECTIONAL);
-	if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
-		goto out_err;
-	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
-	dma_unmap_len_set(out_meta, len, firstlen);
-
-	if (!ieee80211_has_morefrags(fc)) {
-		txq->need_update = 1;
-	} else {
-		wait_write_ptr = 1;
-		txq->need_update = 0;
-	}
-
-	/* Set up TFD's 2nd entry to point directly to remainder of skb,
-	 * if any (802.11 null frames have no payload). */
-	secondlen = skb->len - hdr_len;
-	if (secondlen > 0) {
-		phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
-					   secondlen, DMA_TO_DEVICE);
-		if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
-			dma_unmap_single(trans->dev,
-					 dma_unmap_addr(out_meta, mapping),
-					 dma_unmap_len(out_meta, len),
-					 DMA_BIDIRECTIONAL);
-			goto out_err;
-		}
-	}
-
-	/* Attach buffers to TFD */
-	iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
-	if (secondlen > 0)
-		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
-					     secondlen, 0);
-
-	scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
-				offsetof(struct iwl_tx_cmd, scratch);
-
-	/* take back ownership of DMA buffer to enable update */
-	dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
-				DMA_BIDIRECTIONAL);
-	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
-	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
-	IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
-		     le16_to_cpu(dev_cmd->hdr.sequence));
-	IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-
-	/* Set up entry for this TFD in Tx byte-count array */
-	iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
-
-	dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
-				   DMA_BIDIRECTIONAL);
-
-	trace_iwlwifi_dev_tx(trans->dev,
-			     &txq->tfds[txq->q.write_ptr],
-			     sizeof(struct iwl_tfd),
-			     &dev_cmd->hdr, firstlen,
-			     skb->data + hdr_len, secondlen);
-
-	/* start timer if queue currently empty */
-	if (txq->need_update && q->read_ptr == q->write_ptr &&
-	    trans_pcie->wd_timeout)
-		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
-
-	/* Tell device the write index *just past* this latest filled TFD */
-	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
-	iwl_txq_update_write_ptr(trans, txq);
-
-	/*
-	 * At this point the frame is "transmitted" successfully
-	 * and we will get a TX status notification eventually,
-	 * regardless of the value of ret. "ret" only indicates
-	 * whether or not we should update the write pointer.
-	 */
-	if (iwl_queue_space(q) < q->high_mark) {
-		if (wait_write_ptr) {
-			txq->need_update = 1;
-			iwl_txq_update_write_ptr(trans, txq);
-		} else {
-			iwl_stop_queue(trans, txq);
-		}
-	}
-	spin_unlock(&txq->lock);
-	return 0;
- out_err:
-	spin_unlock(&txq->lock);
-	return -1;
-}
-
 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1431,29 +576,28 @@
 
 	if (!trans_pcie->irq_requested) {
 		tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
-			iwl_irq_tasklet, (unsigned long)trans);
+			iwl_pcie_tasklet, (unsigned long)trans);
 
-		iwl_alloc_isr_ict(trans);
+		iwl_pcie_alloc_ict(trans);
 
-		err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
-				  DRV_NAME, trans);
+		err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
+				  IRQF_SHARED, DRV_NAME, trans);
 		if (err) {
 			IWL_ERR(trans, "Error allocating IRQ %d\n",
 				trans_pcie->irq);
 			goto error;
 		}
 
-		INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
 		trans_pcie->irq_requested = true;
 	}
 
-	err = iwl_prepare_card_hw(trans);
+	err = iwl_pcie_prepare_card_hw(trans);
 	if (err) {
 		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
 		goto err_free_irq;
 	}
 
-	iwl_apm_init(trans);
+	iwl_pcie_apm_init(trans);
 
 	/* From now on, the op_mode will be kept updated about RF kill state */
 	iwl_enable_rfkill_int(trans);
@@ -1467,7 +611,7 @@
 	trans_pcie->irq_requested = false;
 	free_irq(trans_pcie->irq, trans);
 error:
-	iwl_free_isr_ict(trans);
+	iwl_pcie_free_ict(trans);
 	tasklet_kill(&trans_pcie->irq_tasklet);
 	return err;
 }
@@ -1483,7 +627,7 @@
 	iwl_disable_interrupts(trans);
 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 
-	iwl_apm_stop(trans);
+	iwl_pcie_apm_stop(trans);
 
 	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
 	iwl_disable_interrupts(trans);
@@ -1507,28 +651,6 @@
 	}
 }
 
-static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
-				   struct sk_buff_head *skbs)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
-	/* n_bd is usually 256 => n_bd - 1 = 0xff */
-	int tfd_num = ssn & (txq->q.n_bd - 1);
-	int freed = 0;
-
-	spin_lock(&txq->lock);
-
-	if (txq->q.read_ptr != tfd_num) {
-		IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
-				   txq_id, txq->q.read_ptr, tfd_num, ssn);
-		freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
-		if (iwl_queue_space(&txq->q) > txq->q.low_mark)
-			iwl_wake_queue(trans, txq);
-	}
-
-	spin_unlock(&txq->lock);
-}
-
 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
 {
 	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
@@ -1575,12 +697,12 @@
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	iwl_trans_pcie_tx_free(trans);
-	iwl_trans_pcie_rx_free(trans);
+	iwl_pcie_tx_free(trans);
+	iwl_pcie_rx_free(trans);
 
 	if (trans_pcie->irq_requested == true) {
 		free_irq(trans_pcie->irq, trans);
-		iwl_free_isr_ict(trans);
+		iwl_pcie_free_ict(trans);
 	}
 
 	pci_disable_msi(trans_pcie->pci_dev);
@@ -1626,10 +748,10 @@
 
 #define IWL_FLUSH_WAIT_MS	2000
 
-static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_tx_queue *txq;
+	struct iwl_txq *txq;
 	struct iwl_queue *q;
 	int cnt;
 	unsigned long now = jiffies;
@@ -1673,7 +795,7 @@
 #undef IWL_CMD
 }
 
-int iwl_dump_fh(struct iwl_trans *trans, char **buf)
+int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
 {
 	int i;
 	static const u32 fh_tbl[] = {
@@ -1752,7 +874,7 @@
 #undef IWL_CMD
 }
 
-void iwl_dump_csr(struct iwl_trans *trans)
+void iwl_pcie_dump_csr(struct iwl_trans *trans)
 {
 	int i;
 	static const u32 csr_tbl[] = {
@@ -1809,7 +931,6 @@
 					const char __user *user_buf,    \
 					size_t count, loff_t *ppos);
 
-
 #define DEBUGFS_READ_FILE_OPS(name)					\
 	DEBUGFS_READ_FUNC(name);					\
 static const struct file_operations iwl_dbgfs_##name##_ops = {		\
@@ -1842,7 +963,7 @@
 {
 	struct iwl_trans *trans = file->private_data;
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_tx_queue *txq;
+	struct iwl_txq *txq;
 	struct iwl_queue *q;
 	char *buf;
 	int pos = 0;
@@ -1879,7 +1000,7 @@
 {
 	struct iwl_trans *trans = file->private_data;
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	char buf[256];
 	int pos = 0;
 	const size_t bufsz = sizeof(buf);
@@ -1998,7 +1119,7 @@
 	if (sscanf(buf, "%d", &csr) != 1)
 		return -EFAULT;
 
-	iwl_dump_csr(trans);
+	iwl_pcie_dump_csr(trans);
 
 	return count;
 }
@@ -2012,7 +1133,7 @@
 	int pos = 0;
 	ssize_t ret = -EFAULT;
 
-	ret = pos = iwl_dump_fh(trans, &buf);
+	ret = pos = iwl_pcie_dump_fh(trans, &buf);
 	if (buf) {
 		ret = simple_read_from_buffer(user_buf,
 					      count, ppos, buf, pos);
@@ -2081,7 +1202,7 @@
 
 	.wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
 
-	.send_cmd = iwl_trans_pcie_send_cmd,
+	.send_cmd = iwl_trans_pcie_send_hcmd,
 
 	.tx = iwl_trans_pcie_tx,
 	.reclaim = iwl_trans_pcie_reclaim,
@@ -2091,7 +1212,7 @@
 
 	.dbgfs_register = iwl_trans_pcie_dbgfs_register,
 
-	.wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
+	.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
 
 #ifdef CONFIG_PM_SLEEP
 	.suspend = iwl_trans_pcie_suspend,
@@ -2116,7 +1237,7 @@
 	trans = kzalloc(sizeof(struct iwl_trans) +
 			sizeof(struct iwl_trans_pcie), GFP_KERNEL);
 
-	if (WARN_ON(!trans))
+	if (!trans)
 		return NULL;
 
 	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2149,43 +1270,38 @@
 							  DMA_BIT_MASK(32));
 		/* both attempts failed: */
 		if (err) {
-			dev_printk(KERN_ERR, &pdev->dev,
-				   "No suitable DMA available.\n");
+			dev_err(&pdev->dev, "No suitable DMA available\n");
 			goto out_pci_disable_device;
 		}
 	}
 
 	err = pci_request_regions(pdev, DRV_NAME);
 	if (err) {
-		dev_printk(KERN_ERR, &pdev->dev,
-			   "pci_request_regions failed\n");
+		dev_err(&pdev->dev, "pci_request_regions failed\n");
 		goto out_pci_disable_device;
 	}
 
 	trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
 	if (!trans_pcie->hw_base) {
-		dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
+		dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
 		err = -ENODEV;
 		goto out_pci_release_regions;
 	}
 
-	dev_printk(KERN_INFO, &pdev->dev,
-		   "pci_resource_len = 0x%08llx\n",
-		   (unsigned long long) pci_resource_len(pdev, 0));
-	dev_printk(KERN_INFO, &pdev->dev,
-		   "pci_resource_base = %p\n", trans_pcie->hw_base);
-
-	dev_printk(KERN_INFO, &pdev->dev,
-		   "HW Revision ID = 0x%X\n", pdev->revision);
-
 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
 	 * PCI Tx retries from interfering with C3 CPU state */
 	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
 
 	err = pci_enable_msi(pdev);
-	if (err)
-		dev_printk(KERN_ERR, &pdev->dev,
-			   "pci_enable_msi failed(0X%x)\n", err);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
+		/* enable rfkill interrupt: hw bug w/a */
+		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+			pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+			pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+		}
+	}
 
 	trans->dev = &pdev->dev;
 	trans_pcie->irq = pdev->irq;
@@ -2195,16 +1311,8 @@
 	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
 		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
 
-	/* TODO: Move this away, not needed if not MSI */
-	/* enable rfkill interrupt: hw bug w/a */
-	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
-	if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
-		pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
-		pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
-	}
-
 	/* Initialize the wait queue for commands */
-	init_waitqueue_head(&trans->wait_command_queue);
+	init_waitqueue_head(&trans_pcie->wait_command_queue);
 	spin_lock_init(&trans->reg_lock);
 
 	snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 79a4ddc..6c5b867 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -42,12 +42,170 @@
 #define IWL_TX_CRC_SIZE 4
 #define IWL_TX_DELIMITER_SIZE 4
 
-/**
- * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill.  Driver and device exchange status of each
+ * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ ***************************************************/
+static int iwl_queue_space(const struct iwl_queue *q)
+{
+	int s = q->read_ptr - q->write_ptr;
+
+	if (q->read_ptr > q->write_ptr)
+		s -= q->n_bd;
+
+	if (s <= 0)
+		s += q->n_window;
+	/* keep some reserve to not confuse empty and full situations */
+	s -= 2;
+	if (s < 0)
+		s = 0;
+	return s;
+}
+
+/*
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  */
-void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
-				       struct iwl_tx_queue *txq,
-				       u16 byte_cnt)
+static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+{
+	q->n_bd = count;
+	q->n_window = slots_num;
+	q->id = id;
+
+	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
+	 * and iwl_queue_dec_wrap are broken. */
+	if (WARN_ON(!is_power_of_2(count)))
+		return -EINVAL;
+
+	/* slots_num must be power-of-two size, otherwise
+	 * get_cmd_index is broken. */
+	if (WARN_ON(!is_power_of_2(slots_num)))
+		return -EINVAL;
+
+	q->low_mark = q->n_window / 4;
+	if (q->low_mark < 4)
+		q->low_mark = 4;
+
+	q->high_mark = q->n_window / 8;
+	if (q->high_mark < 2)
+		q->high_mark = 2;
+
+	q->write_ptr = 0;
+	q->read_ptr = 0;
+
+	return 0;
+}
+
+static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+				  struct iwl_dma_ptr *ptr, size_t size)
+{
+	if (WARN_ON(ptr->addr))
+		return -EINVAL;
+
+	ptr->addr = dma_alloc_coherent(trans->dev, size,
+				       &ptr->dma, GFP_KERNEL);
+	if (!ptr->addr)
+		return -ENOMEM;
+	ptr->size = size;
+	return 0;
+}
+
+static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
+				  struct iwl_dma_ptr *ptr)
+{
+	if (unlikely(!ptr->addr))
+		return;
+
+	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
+	memset(ptr, 0, sizeof(*ptr));
+}
+
+static void iwl_pcie_txq_stuck_timer(unsigned long data)
+{
+	struct iwl_txq *txq = (void *)data;
+	struct iwl_queue *q = &txq->q;
+	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
+	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+	u32 scd_sram_addr = trans_pcie->scd_base_addr +
+				SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+	u8 buf[16];
+	int i;
+
+	spin_lock(&txq->lock);
+	/* check if triggered erroneously */
+	if (txq->q.read_ptr == txq->q.write_ptr) {
+		spin_unlock(&txq->lock);
+		return;
+	}
+	spin_unlock(&txq->lock);
+
+	IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
+		jiffies_to_msecs(trans_pcie->wd_timeout));
+	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+		txq->q.read_ptr, txq->q.write_ptr);
+
+	iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+
+	iwl_print_hex_error(trans, buf, sizeof(buf));
+
+	for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
+		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
+			iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
+
+	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
+		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+		u32 tbl_dw =
+			iwl_read_targ_mem(trans,
+					  trans_pcie->scd_base_addr +
+					  SCD_TRANS_TBL_OFFSET_QUEUE(i));
+
+		if (i & 0x1)
+			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
+		else
+			tbl_dw = tbl_dw & 0x0000FFFF;
+
+		IWL_ERR(trans,
+			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
+			i, active ? "" : "in", fifo, tbl_dw,
+			iwl_read_prph(trans,
+				      SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
+			iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
+	}
+
+	for (i = q->read_ptr; i != q->write_ptr;
+	     i = iwl_queue_inc_wrap(i, q->n_bd)) {
+		struct iwl_tx_cmd *tx_cmd =
+			(struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
+		IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
+			get_unaligned_le32(&tx_cmd->scratch));
+	}
+
+	iwl_op_mode_nic_error(trans->op_mode);
+}
+
+/*
+ * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
+					     struct iwl_txq *txq, u16 byte_cnt)
 {
 	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -88,10 +246,36 @@
 			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
 }
 
-/**
- * iwl_txq_update_write_ptr - Send new write index to hardware
+static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
+					    struct iwl_txq *txq)
+{
+	struct iwl_trans_pcie *trans_pcie =
+		IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+	int txq_id = txq->q.id;
+	int read_ptr = txq->q.read_ptr;
+	u8 sta_id = 0;
+	__le16 bc_ent;
+	struct iwl_tx_cmd *tx_cmd =
+		(void *)txq->entries[txq->q.read_ptr].cmd->payload;
+
+	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+	if (txq_id != trans_pcie->cmd_queue)
+		sta_id = tx_cmd->sta_id;
+
+	bc_ent = cpu_to_le16(1 | (sta_id << 12));
+	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+		scd_bc_tbl[txq_id].
+			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+}
+
+/*
+ * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
  */
-void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
+void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
 {
 	u32 reg = 0;
 	int txq_id = txq->q.id;
@@ -137,7 +321,7 @@
 	txq->need_update = 0;
 }
 
-static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
 {
 	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
 
@@ -149,15 +333,15 @@
 	return addr;
 }
 
-static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
 {
 	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
 
 	return le16_to_cpu(tb->hi_n_len) >> 4;
 }
 
-static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
-				  dma_addr_t addr, u16 len)
+static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
+				       dma_addr_t addr, u16 len)
 {
 	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
 	u16 hi_n_len = len << 4;
@@ -171,19 +355,20 @@
 	tfd->num_tbs = idx + 1;
 }
 
-static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
+static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
 {
 	return tfd->num_tbs & 0x1f;
 }
 
-static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
-			  struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
+static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
+			       struct iwl_cmd_meta *meta, struct iwl_tfd *tfd,
+			       enum dma_data_direction dma_dir)
 {
 	int i;
 	int num_tbs;
 
 	/* Sanity check on number of chunks */
-	num_tbs = iwl_tfd_get_num_tbs(tfd);
+	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
 
 	if (num_tbs >= IWL_NUM_OF_TBS) {
 		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
@@ -200,14 +385,14 @@
 
 	/* Unmap chunks, if any. */
 	for (i = 1; i < num_tbs; i++)
-		dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
-				iwl_tfd_tb_get_len(tfd, i), dma_dir);
+		dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
+				 iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir);
 
 	tfd->num_tbs = 0;
 }
 
-/**
- * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+/*
+ * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
  * @trans - transport private data
  * @txq - tx queue
  * @dma_dir - the direction of the DMA mapping
@@ -215,8 +400,8 @@
  * Does NOT advance any TFD circular buffer read/write indexes
  * Does NOT free the TFD itself (which is within circular buffer)
  */
-void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
-		      enum dma_data_direction dma_dir)
+static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+				  enum dma_data_direction dma_dir)
 {
 	struct iwl_tfd *tfd_tmp = txq->tfds;
 
@@ -227,8 +412,8 @@
 	lockdep_assert_held(&txq->lock);
 
 	/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
-	iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
-		      dma_dir);
+	iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
+			   dma_dir);
 
 	/* free SKB */
 	if (txq->entries) {
@@ -247,10 +432,8 @@
 	}
 }
 
-int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
-				 struct iwl_tx_queue *txq,
-				 dma_addr_t addr, u16 len,
-				 u8 reset)
+static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+				  dma_addr_t addr, u16 len, u8 reset)
 {
 	struct iwl_queue *q;
 	struct iwl_tfd *tfd, *tfd_tmp;
@@ -263,7 +446,7 @@
 	if (reset)
 		memset(tfd, 0, sizeof(*tfd));
 
-	num_tbs = iwl_tfd_get_num_tbs(tfd);
+	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
 
 	/* Each TFD can point to a maximum 20 Tx buffers */
 	if (num_tbs >= IWL_NUM_OF_TBS) {
@@ -279,108 +462,534 @@
 		IWL_ERR(trans, "Unaligned address = %llx\n",
 			(unsigned long long)addr);
 
-	iwl_tfd_set_tb(tfd, num_tbs, addr, len);
+	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
 
 	return 0;
 }
 
-/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
- * DMA services
- *
- * Theory of operation
- *
- * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
- * of buffer descriptors, each of which points to one or more data buffers for
- * the device to read from or fill.  Driver and device exchange status of each
- * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
- * entries in each circular buffer, to protect against confusing empty and full
- * queue states.
- *
- * The device reads or writes the data in the queues via the device's several
- * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
- *
- * For Tx queue, there are low mark and high mark limits. If, after queuing
- * the packet for Tx, free space become < low mark, Tx queue stopped. When
- * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
- * Tx queue resumed.
- *
- ***************************************************/
-
-int iwl_queue_space(const struct iwl_queue *q)
+static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
+			       struct iwl_txq *txq, int slots_num,
+			       u32 txq_id)
 {
-	int s = q->read_ptr - q->write_ptr;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+	int i;
 
-	if (q->read_ptr > q->write_ptr)
-		s -= q->n_bd;
+	if (WARN_ON(txq->entries || txq->tfds))
+		return -EINVAL;
 
-	if (s <= 0)
-		s += q->n_window;
-	/* keep some reserve to not confuse empty and full situations */
-	s -= 2;
-	if (s < 0)
-		s = 0;
-	return s;
+	setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
+		    (unsigned long)txq);
+	txq->trans_pcie = trans_pcie;
+
+	txq->q.n_window = slots_num;
+
+	txq->entries = kcalloc(slots_num,
+			       sizeof(struct iwl_pcie_txq_entry),
+			       GFP_KERNEL);
+
+	if (!txq->entries)
+		goto error;
+
+	if (txq_id == trans_pcie->cmd_queue)
+		for (i = 0; i < slots_num; i++) {
+			txq->entries[i].cmd =
+				kmalloc(sizeof(struct iwl_device_cmd),
+					GFP_KERNEL);
+			if (!txq->entries[i].cmd)
+				goto error;
+		}
+
+	/* Circular buffer of transmit frame descriptors (TFDs),
+	 * shared with device */
+	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+				       &txq->q.dma_addr, GFP_KERNEL);
+	if (!txq->tfds) {
+		IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
+		goto error;
+	}
+	txq->q.id = txq_id;
+
+	return 0;
+error:
+	if (txq->entries && txq_id == trans_pcie->cmd_queue)
+		for (i = 0; i < slots_num; i++)
+			kfree(txq->entries[i].cmd);
+	kfree(txq->entries);
+	txq->entries = NULL;
+
+	return -ENOMEM;
+
 }
 
-/**
- * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+			      int slots_num, u32 txq_id)
+{
+	int ret;
+
+	txq->need_update = 0;
+
+	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
+	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+	/* Initialize queue's high/low-water marks, and head/tail indexes */
+	ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
+			txq_id);
+	if (ret)
+		return ret;
+
+	spin_lock_init(&txq->lock);
+
+	/*
+	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
+	 * given Tx queue, and enable the DMA channel used for that queue.
+	 * Circular buffer (TFD queue in DRAM) physical base address */
+	iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
+			   txq->q.dma_addr >> 8);
+
+	return 0;
+}
+
+/*
+ * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
  */
-int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
 {
-	q->n_bd = count;
-	q->n_window = slots_num;
-	q->id = id;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+	struct iwl_queue *q = &txq->q;
+	enum dma_data_direction dma_dir;
 
-	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
-	 * and iwl_queue_dec_wrap are broken. */
-	if (WARN_ON(!is_power_of_2(count)))
-		return -EINVAL;
+	if (!q->n_bd)
+		return;
 
-	/* slots_num must be power-of-two size, otherwise
-	 * get_cmd_index is broken. */
-	if (WARN_ON(!is_power_of_2(slots_num)))
-		return -EINVAL;
+	/* In the command queue, all the TBs are mapped as BIDI
+	 * so unmap them as such.
+	 */
+	if (txq_id == trans_pcie->cmd_queue)
+		dma_dir = DMA_BIDIRECTIONAL;
+	else
+		dma_dir = DMA_TO_DEVICE;
 
-	q->low_mark = q->n_window / 4;
-	if (q->low_mark < 4)
-		q->low_mark = 4;
+	spin_lock_bh(&txq->lock);
+	while (q->write_ptr != q->read_ptr) {
+		iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
+		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+	}
+	spin_unlock_bh(&txq->lock);
+}
 
-	q->high_mark = q->n_window / 8;
-	if (q->high_mark < 2)
-		q->high_mark = 2;
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+	struct device *dev = trans->dev;
+	int i;
 
-	q->write_ptr = q->read_ptr = 0;
+	if (WARN_ON(!txq))
+		return;
+
+	iwl_pcie_txq_unmap(trans, txq_id);
+
+	/* De-alloc array of command/tx buffers */
+	if (txq_id == trans_pcie->cmd_queue)
+		for (i = 0; i < txq->q.n_window; i++) {
+			kfree(txq->entries[i].cmd);
+			kfree(txq->entries[i].copy_cmd);
+			kfree(txq->entries[i].free_buf);
+		}
+
+	/* De-alloc circular buffer of TFDs */
+	if (txq->q.n_bd) {
+		dma_free_coherent(dev, sizeof(struct iwl_tfd) *
+				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+		memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
+	}
+
+	kfree(txq->entries);
+	txq->entries = NULL;
+
+	del_timer_sync(&txq->stuck_timer);
+
+	/* 0-fill queue descriptor structure */
+	memset(txq, 0, sizeof(*txq));
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ */
+static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
+{
+	struct iwl_trans_pcie __maybe_unused *trans_pcie =
+		IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	iwl_write_prph(trans, SCD_TXFACT, mask);
+}
+
+void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 a;
+	int chan;
+	u32 reg_val;
+
+	/* make sure all queue are not stopped/used */
+	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+	trans_pcie->scd_base_addr =
+		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
+
+	WARN_ON(scd_base_addr != 0 &&
+		scd_base_addr != trans_pcie->scd_base_addr);
+
+	a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
+	/* reset conext data memory */
+	for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
+		a += 4)
+		iwl_write_targ_mem(trans, a, 0);
+	/* reset tx status memory */
+	for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
+		a += 4)
+		iwl_write_targ_mem(trans, a, 0);
+	for (; a < trans_pcie->scd_base_addr +
+	       SCD_TRANS_TBL_OFFSET_QUEUE(
+				trans->cfg->base_params->num_of_queues);
+	       a += 4)
+		iwl_write_targ_mem(trans, a, 0);
+
+	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
+		       trans_pcie->scd_bc_tbls.dma >> 10);
+
+	/* The chain extension of the SCD doesn't work well. This feature is
+	 * enabled by default by the HW, so we need to disable it manually.
+	 */
+	iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+
+	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
+				trans_pcie->cmd_fifo);
+
+	/* Activate all Tx DMA/FIFO channels */
+	iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7));
+
+	/* Enable DMA channel */
+	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
+		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+	/* Update FH chicken bits */
+	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
+	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
+			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+	/* Enable L1-Active */
+	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+			    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+}
+
+/*
+ * iwl_pcie_tx_stop - Stop all Tx DMA channels
+ */
+int iwl_pcie_tx_stop(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ch, txq_id, ret;
+	unsigned long flags;
+
+	/* Turn off all Tx DMA fifos */
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+	iwl_pcie_txq_set_sched(trans, 0);
+
+	/* Stop each Tx DMA channel, and wait for it to be idle */
+	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+		iwl_write_direct32(trans,
+				   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+		ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
+			FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
+		if (ret < 0)
+			IWL_ERR(trans,
+				"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
+				ch,
+				iwl_read_direct32(trans,
+						  FH_TSSR_TX_STATUS_REG));
+	}
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+	if (!trans_pcie->txq) {
+		IWL_WARN(trans,
+			 "Stopping tx queues that aren't allocated...\n");
+		return 0;
+	}
+
+	/* Unmap DMA from host system and free skb's */
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+	     txq_id++)
+		iwl_pcie_txq_unmap(trans, txq_id);
 
 	return 0;
 }
 
-static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
-					  struct iwl_tx_queue *txq)
+/*
+ * iwl_trans_tx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwl_pcie_tx_free(struct iwl_trans *trans)
 {
-	struct iwl_trans_pcie *trans_pcie =
-		IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
-	int txq_id = txq->q.id;
-	int read_ptr = txq->q.read_ptr;
-	u8 sta_id = 0;
-	__le16 bc_ent;
-	struct iwl_tx_cmd *tx_cmd =
-		(void *)txq->entries[txq->q.read_ptr].cmd->payload;
+	int txq_id;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+	/* Tx queues */
+	if (trans_pcie->txq) {
+		for (txq_id = 0;
+		     txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
+			iwl_pcie_txq_free(trans, txq_id);
+	}
 
-	if (txq_id != trans_pcie->cmd_queue)
-		sta_id = tx_cmd->sta_id;
+	kfree(trans_pcie->txq);
+	trans_pcie->txq = NULL;
 
-	bc_ent = cpu_to_le16(1 | (sta_id << 12));
-	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
 
-	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
-		scd_bc_tbl[txq_id].
-			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+	iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
 }
 
-static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
+/*
+ * iwl_pcie_tx_alloc - allocate TX context
+ * Allocate all Tx DMA structures and initialize them
+ */
+static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
+{
+	int ret;
+	int txq_id, slots_num;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
+			sizeof(struct iwlagn_scd_bc_tbl);
+
+	/*It is not allowed to alloc twice, so warn when this happens.
+	 * We cannot rely on the previous allocation, so free and fail */
+	if (WARN_ON(trans_pcie->txq)) {
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
+				   scd_bc_tbls_size);
+	if (ret) {
+		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+		goto error;
+	}
+
+	/* Alloc keep-warm buffer */
+	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
+	if (ret) {
+		IWL_ERR(trans, "Keep Warm allocation failed\n");
+		goto error;
+	}
+
+	trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
+				  sizeof(struct iwl_txq), GFP_KERNEL);
+	if (!trans_pcie->txq) {
+		IWL_ERR(trans, "Not enough memory for txq\n");
+		ret = ENOMEM;
+		goto error;
+	}
+
+	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+	     txq_id++) {
+		slots_num = (txq_id == trans_pcie->cmd_queue) ?
+					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+		ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
+					  slots_num, txq_id);
+		if (ret) {
+			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+			goto error;
+		}
+	}
+
+	return 0;
+
+error:
+	iwl_pcie_tx_free(trans);
+
+	return ret;
+}
+int iwl_pcie_tx_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret;
+	int txq_id, slots_num;
+	unsigned long flags;
+	bool alloc = false;
+
+	if (!trans_pcie->txq) {
+		ret = iwl_pcie_tx_alloc(trans);
+		if (ret)
+			goto error;
+		alloc = true;
+	}
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+	/* Turn off all Tx DMA fifos */
+	iwl_write_prph(trans, SCD_TXFACT, 0);
+
+	/* Tell NIC where to find the "keep warm" buffer */
+	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+			   trans_pcie->kw.dma >> 4);
+
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+	     txq_id++) {
+		slots_num = (txq_id == trans_pcie->cmd_queue) ?
+					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+		ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
+					 slots_num, txq_id);
+		if (ret) {
+			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+			goto error;
+		}
+	}
+
+	return 0;
+error:
+	/*Upon error, free only if we allocated something */
+	if (alloc)
+		iwl_pcie_tx_free(trans);
+	return ret;
+}
+
+static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
+					   struct iwl_txq *txq)
+{
+	if (!trans_pcie->wd_timeout)
+		return;
+
+	/*
+	 * if empty delete timer, otherwise move timer forward
+	 * since we're making progress on this queue
+	 */
+	if (txq->q.read_ptr == txq->q.write_ptr)
+		del_timer(&txq->stuck_timer);
+	else
+		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+}
+
+/* Frees buffers until index _not_ inclusive */
+void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+			    struct sk_buff_head *skbs)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+	/* n_bd is usually 256 => n_bd - 1 = 0xff */
+	int tfd_num = ssn & (txq->q.n_bd - 1);
+	struct iwl_queue *q = &txq->q;
+	int last_to_free;
+
+	/* This function is not meant to release cmd queue*/
+	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
+		return;
+
+	spin_lock(&txq->lock);
+
+	if (txq->q.read_ptr == tfd_num)
+		goto out;
+
+	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
+			   txq_id, txq->q.read_ptr, tfd_num, ssn);
+
+	/*Since we free until index _not_ inclusive, the one before index is
+	 * the last we will free. This one must be used */
+	last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
+
+	if (!iwl_queue_used(q, last_to_free)) {
+		IWL_ERR(trans,
+			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+			__func__, txq_id, last_to_free, q->n_bd,
+			q->write_ptr, q->read_ptr);
+		goto out;
+	}
+
+	if (WARN_ON(!skb_queue_empty(skbs)))
+		goto out;
+
+	for (;
+	     q->read_ptr != tfd_num;
+	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+		if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
+			continue;
+
+		__skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
+
+		txq->entries[txq->q.read_ptr].skb = NULL;
+
+		iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
+
+		iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
+	}
+
+	iwl_pcie_txq_progress(trans_pcie, txq);
+
+	if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+		iwl_wake_queue(trans, txq);
+out:
+	spin_unlock(&txq->lock);
+}
+
+/*
+ * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms.  If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+	struct iwl_queue *q = &txq->q;
+	int nfreed = 0;
+
+	lockdep_assert_held(&txq->lock);
+
+	if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
+		IWL_ERR(trans,
+			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
+			__func__, txq_id, idx, q->n_bd,
+			q->write_ptr, q->read_ptr);
+		return;
+	}
+
+	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+		if (nfreed++ > 0) {
+			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
+				idx, q->write_ptr, q->read_ptr);
+			iwl_op_mode_nic_error(trans->op_mode);
+		}
+	}
+
+	iwl_pcie_txq_progress(trans_pcie, txq);
+}
+
+static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
 				 u16 txq_id)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -405,7 +1014,8 @@
 	return 0;
 }
 
-static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
+static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
+					     u16 txq_id)
 {
 	/* Simply stop the queue, but don't change any configuration;
 	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
@@ -424,7 +1034,7 @@
 		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
 
 	/* Stop this Tx queue before configuring it */
-	iwl_txq_set_inactive(trans, txq_id);
+	iwl_pcie_txq_set_inactive(trans, txq_id);
 
 	/* Set this queue as a chain-building queue unless it is CMD queue */
 	if (txq_id != trans_pcie->cmd_queue)
@@ -435,7 +1045,7 @@
 		u16 ra_tid = BUILD_RAxTID(sta_id, tid);
 
 		/* Map receiver-address / traffic-ID to this queue */
-		iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
+		iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
 
 		/* enable aggregations for the queue */
 		iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
@@ -480,20 +1090,29 @@
 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 stts_addr = trans_pcie->scd_base_addr +
+			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
+	static const u32 zero_val[4] = {};
 
 	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
 		WARN_ONCE(1, "queue %d not used", txq_id);
 		return;
 	}
 
-	iwl_txq_set_inactive(trans, txq_id);
+	iwl_pcie_txq_set_inactive(trans, txq_id);
+
+	_iwl_write_targ_mem_dwords(trans, stts_addr,
+				   zero_val, ARRAY_SIZE(zero_val));
+
+	iwl_pcie_txq_unmap(trans, txq_id);
+
 	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
 }
 
 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
 
-/**
- * iwl_enqueue_hcmd - enqueue a uCode command
+/*
+ * iwl_pcie_enqueue_hcmd - enqueue a uCode command
  * @priv: device private data point
  * @cmd: a point to the ucode command structure
  *
@@ -501,15 +1120,17 @@
  * failed. On success, it turns the index (> 0) of command in the
  * command queue.
  */
-static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+				 struct iwl_host_cmd *cmd)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
 	struct iwl_queue *q = &txq->q;
 	struct iwl_device_cmd *out_cmd;
 	struct iwl_cmd_meta *out_meta;
+	void *dup_buf = NULL;
 	dma_addr_t phys_addr;
-	u32 idx;
+	int idx;
 	u16 copy_size, cmd_size;
 	bool had_nocopy = false;
 	int i;
@@ -526,10 +1147,33 @@
 			continue;
 		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
 			had_nocopy = true;
+			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
+				idx = -EINVAL;
+				goto free_dup_buf;
+			}
+		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
+			/*
+			 * This is also a chunk that isn't copied
+			 * to the static buffer so set had_nocopy.
+			 */
+			had_nocopy = true;
+
+			/* only allowed once */
+			if (WARN_ON(dup_buf)) {
+				idx = -EINVAL;
+				goto free_dup_buf;
+			}
+
+			dup_buf = kmemdup(cmd->data[i], cmd->len[i],
+					  GFP_ATOMIC);
+			if (!dup_buf)
+				return -ENOMEM;
 		} else {
 			/* NOCOPY must not be followed by normal! */
-			if (WARN_ON(had_nocopy))
-				return -EINVAL;
+			if (WARN_ON(had_nocopy)) {
+				idx = -EINVAL;
+				goto free_dup_buf;
+			}
 			copy_size += cmd->len[i];
 		}
 		cmd_size += cmd->len[i];
@@ -541,8 +1185,12 @@
 	 * allocated into separate TFDs, then we will need to
 	 * increase the size of the buffers.
 	 */
-	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
-		return -EINVAL;
+	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
+		 "Command %s (%#x) is too large (%d bytes)\n",
+		 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
+		idx = -EINVAL;
+		goto free_dup_buf;
+	}
 
 	spin_lock_bh(&txq->lock);
 
@@ -551,7 +1199,8 @@
 
 		IWL_ERR(trans, "No space in command queue\n");
 		iwl_op_mode_cmd_queue_full(trans->op_mode);
-		return -ENOSPC;
+		idx = -ENOSPC;
+		goto free_dup_buf;
 	}
 
 	idx = get_cmd_index(q, q->write_ptr);
@@ -575,7 +1224,8 @@
 	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
 		if (!cmd->len[i])
 			continue;
-		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
+		if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+					 IWL_HCMD_DFL_DUP))
 			break;
 		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
 		cmd_pos += cmd->len[i];
@@ -600,7 +1250,7 @@
 
 	IWL_DEBUG_HC(trans,
 		     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
-		     trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
+		     get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
 		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
 		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
 
@@ -614,28 +1264,35 @@
 	dma_unmap_addr_set(out_meta, mapping, phys_addr);
 	dma_unmap_len_set(out_meta, len, copy_size);
 
-	iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
+	iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
 
 	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+		const void *data = cmd->data[i];
+
 		if (!cmd->len[i])
 			continue;
-		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
+		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+					   IWL_HCMD_DFL_DUP)))
 			continue;
-		phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
+		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
+			data = dup_buf;
+		phys_addr = dma_map_single(trans->dev, (void *)data,
 					   cmd->len[i], DMA_BIDIRECTIONAL);
 		if (dma_mapping_error(trans->dev, phys_addr)) {
-			iwl_unmap_tfd(trans, out_meta,
-				      &txq->tfds[q->write_ptr],
-				      DMA_BIDIRECTIONAL);
+			iwl_pcie_tfd_unmap(trans, out_meta,
+					   &txq->tfds[q->write_ptr],
+					   DMA_BIDIRECTIONAL);
 			idx = -ENOMEM;
 			goto out;
 		}
 
-		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
-					     cmd->len[i], 0);
+		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0);
 	}
 
 	out_meta->flags = cmd->flags;
+	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
+		kfree(txq->entries[idx].free_buf);
+	txq->entries[idx].free_buf = dup_buf;
 
 	txq->need_update = 1;
 
@@ -648,70 +1305,18 @@
 
 	/* Increment and update queue's write index */
 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
-	iwl_txq_update_write_ptr(trans, txq);
+	iwl_pcie_txq_inc_wr_ptr(trans, txq);
 
  out:
 	spin_unlock_bh(&txq->lock);
+ free_dup_buf:
+	if (idx < 0)
+		kfree(dup_buf);
 	return idx;
 }
 
-static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
-				      struct iwl_tx_queue *txq)
-{
-	if (!trans_pcie->wd_timeout)
-		return;
-
-	/*
-	 * if empty delete timer, otherwise move timer forward
-	 * since we're making progress on this queue
-	 */
-	if (txq->q.read_ptr == txq->q.write_ptr)
-		del_timer(&txq->stuck_timer);
-	else
-		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
-}
-
-/**
- * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms.  If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
-				   int idx)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
-	struct iwl_queue *q = &txq->q;
-	int nfreed = 0;
-
-	lockdep_assert_held(&txq->lock);
-
-	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
-		IWL_ERR(trans,
-			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
-			__func__, txq_id, idx, q->n_bd,
-			q->write_ptr, q->read_ptr);
-		return;
-	}
-
-	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
-	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
-		if (nfreed++ > 0) {
-			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
-				idx, q->write_ptr, q->read_ptr);
-			iwl_op_mode_nic_error(trans->op_mode);
-		}
-
-	}
-
-	iwl_queue_progress(trans_pcie, txq);
-}
-
-/**
- * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+/*
+ * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
  * @rxb: Rx buffer to reclaim
  * @handler_status: return value of the handler of the command
  *	(put in setup_rx_handlers)
@@ -720,8 +1325,8 @@
  * will be executed.  The attached skb (if present) will only be freed
  * if the callback returns 1
  */
-void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
-			 int handler_status)
+void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
+			    struct iwl_rx_cmd_buffer *rxb, int handler_status)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -731,7 +1336,7 @@
 	struct iwl_device_cmd *cmd;
 	struct iwl_cmd_meta *meta;
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
 
 	/* If a Tx command is being handled and it isn't in the actual
 	 * command queue then there a command routing bug has been introduced
@@ -751,7 +1356,7 @@
 	cmd = txq->entries[cmd_index].cmd;
 	meta = &txq->entries[cmd_index].meta;
 
-	iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
+	iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
 
 	/* Input error checking is done when commands are added to queue. */
 	if (meta->flags & CMD_WANT_SKB) {
@@ -763,20 +1368,18 @@
 		meta->source->handler_status = handler_status;
 	}
 
-	iwl_hcmd_queue_reclaim(trans, txq_id, index);
+	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
 
 	if (!(meta->flags & CMD_ASYNC)) {
 		if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
 			IWL_WARN(trans,
 				 "HCMD_ACTIVE already clear for command %s\n",
-				 trans_pcie_get_cmd_string(trans_pcie,
-							   cmd->hdr.cmd));
+				 get_cmd_string(trans_pcie, cmd->hdr.cmd));
 		}
 		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
-			       trans_pcie_get_cmd_string(trans_pcie,
-							 cmd->hdr.cmd));
-		wake_up(&trans->wait_command_queue);
+			       get_cmd_string(trans_pcie, cmd->hdr.cmd));
+		wake_up(&trans_pcie->wait_command_queue);
 	}
 
 	meta->flags = 0;
@@ -786,7 +1389,8 @@
 
 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
 
-static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
+				    struct iwl_host_cmd *cmd)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int ret;
@@ -795,59 +1399,59 @@
 	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
 		return -EINVAL;
 
-
-	ret = iwl_enqueue_hcmd(trans, cmd);
+	ret = iwl_pcie_enqueue_hcmd(trans, cmd);
 	if (ret < 0) {
 		IWL_ERR(trans,
 			"Error sending %s: enqueue_hcmd failed: %d\n",
-			trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
+			get_cmd_string(trans_pcie, cmd->id), ret);
 		return ret;
 	}
 	return 0;
 }
 
-static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
+				   struct iwl_host_cmd *cmd)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int cmd_idx;
 	int ret;
 
 	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
-		       trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+		       get_cmd_string(trans_pcie, cmd->id));
 
 	if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
 				     &trans_pcie->status))) {
 		IWL_ERR(trans, "Command %s: a command is already active!\n",
-			trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+			get_cmd_string(trans_pcie, cmd->id));
 		return -EIO;
 	}
 
 	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
-		       trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+		       get_cmd_string(trans_pcie, cmd->id));
 
-	cmd_idx = iwl_enqueue_hcmd(trans, cmd);
+	cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
 	if (cmd_idx < 0) {
 		ret = cmd_idx;
 		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
 		IWL_ERR(trans,
 			"Error sending %s: enqueue_hcmd failed: %d\n",
-			trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
+			get_cmd_string(trans_pcie, cmd->id), ret);
 		return ret;
 	}
 
-	ret = wait_event_timeout(trans->wait_command_queue,
+	ret = wait_event_timeout(trans_pcie->wait_command_queue,
 				 !test_bit(STATUS_HCMD_ACTIVE,
 					   &trans_pcie->status),
 				 HOST_COMPLETE_TIMEOUT);
 	if (!ret) {
 		if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
-			struct iwl_tx_queue *txq =
+			struct iwl_txq *txq =
 				&trans_pcie->txq[trans_pcie->cmd_queue];
 			struct iwl_queue *q = &txq->q;
 
 			IWL_ERR(trans,
 				"Error sending %s: time out after %dms.\n",
-				trans_pcie_get_cmd_string(trans_pcie, cmd->id),
+				get_cmd_string(trans_pcie, cmd->id),
 				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
 
 			IWL_ERR(trans,
@@ -857,16 +1461,28 @@
 			clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
 			IWL_DEBUG_INFO(trans,
 				       "Clearing HCMD_ACTIVE for command %s\n",
-				       trans_pcie_get_cmd_string(trans_pcie,
-								 cmd->id));
+				       get_cmd_string(trans_pcie, cmd->id));
 			ret = -ETIMEDOUT;
 			goto cancel;
 		}
 	}
 
+	if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
+		IWL_ERR(trans, "FW error in SYNC CMD %s\n",
+			get_cmd_string(trans_pcie, cmd->id));
+		ret = -EIO;
+		goto cancel;
+	}
+
+	if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+		ret = -ERFKILL;
+		goto cancel;
+	}
+
 	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
 		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
-			trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+			get_cmd_string(trans_pcie, cmd->id));
 		ret = -EIO;
 		goto cancel;
 	}
@@ -893,64 +1509,183 @@
 	return ret;
 }
 
-int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
-{
-	if (cmd->flags & CMD_ASYNC)
-		return iwl_send_cmd_async(trans, cmd);
-
-	return iwl_send_cmd_sync(trans, cmd);
-}
-
-/* Frees buffers until index _not_ inclusive */
-int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
-			 struct sk_buff_head *skbs)
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
-	struct iwl_queue *q = &txq->q;
-	int last_to_free;
-	int freed = 0;
 
-	/* This function is not meant to release cmd queue*/
-	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
-		return 0;
+	if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
+		return -EIO;
 
-	lockdep_assert_held(&txq->lock);
+	if (test_bit(STATUS_RFKILL, &trans_pcie->status))
+		return -ERFKILL;
 
-	/*Since we free until index _not_ inclusive, the one before index is
-	 * the last we will free. This one must be used */
-	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
+	if (cmd->flags & CMD_ASYNC)
+		return iwl_pcie_send_hcmd_async(trans, cmd);
 
-	if ((index >= q->n_bd) ||
-	   (iwl_queue_used(q, last_to_free) == 0)) {
-		IWL_ERR(trans,
-			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
-			__func__, txq_id, last_to_free, q->n_bd,
-			q->write_ptr, q->read_ptr);
-		return 0;
+	/* We still can fail on RFKILL that can be asserted while we wait */
+	return iwl_pcie_send_hcmd_sync(trans, cmd);
+}
+
+int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+		      struct iwl_device_cmd *dev_cmd, int txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+	struct iwl_cmd_meta *out_meta;
+	struct iwl_txq *txq;
+	struct iwl_queue *q;
+	dma_addr_t phys_addr = 0;
+	dma_addr_t txcmd_phys;
+	dma_addr_t scratch_phys;
+	u16 len, firstlen, secondlen;
+	u8 wait_write_ptr = 0;
+	__le16 fc = hdr->frame_control;
+	u8 hdr_len = ieee80211_hdrlen(fc);
+	u16 __maybe_unused wifi_seq;
+
+	txq = &trans_pcie->txq[txq_id];
+	q = &txq->q;
+
+	if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
+		WARN_ON_ONCE(1);
+		return -EINVAL;
 	}
 
-	if (WARN_ON(!skb_queue_empty(skbs)))
-		return 0;
+	spin_lock(&txq->lock);
 
-	for (;
-	     q->read_ptr != index;
-	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+	/* In AGG mode, the index in the ring must correspond to the WiFi
+	 * sequence number. This is a HW requirements to help the SCD to parse
+	 * the BA.
+	 * Check here that the packets are in the right place on the ring.
+	 */
+#ifdef CONFIG_IWLWIFI_DEBUG
+	wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+	WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
+		  ((wifi_seq & 0xff) != q->write_ptr),
+		  "Q: %d WiFi Seq %d tfdNum %d",
+		  txq_id, wifi_seq, q->write_ptr);
+#endif
 
-		if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
-			continue;
+	/* Set up driver data for this TFD */
+	txq->entries[q->write_ptr].skb = skb;
+	txq->entries[q->write_ptr].cmd = dev_cmd;
 
-		__skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
+	dev_cmd->hdr.cmd = REPLY_TX;
+	dev_cmd->hdr.sequence =
+		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+			    INDEX_TO_SEQ(q->write_ptr)));
 
-		txq->entries[txq->q.read_ptr].skb = NULL;
+	/* Set up first empty entry in queue's array of Tx/cmd buffers */
+	out_meta = &txq->entries[q->write_ptr].meta;
 
-		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
+	/*
+	 * Use the first empty entry in this queue's command buffer array
+	 * to contain the Tx command and MAC header concatenated together
+	 * (payload data will be in another buffer).
+	 * Size of this varies, due to varying MAC header length.
+	 * If end is not dword aligned, we'll have 2 extra bytes at the end
+	 * of the MAC header (device reads on dword boundaries).
+	 * We'll tell device about this padding later.
+	 */
+	len = sizeof(struct iwl_tx_cmd) +
+		sizeof(struct iwl_cmd_header) + hdr_len;
+	firstlen = (len + 3) & ~3;
 
-		iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
-		freed++;
+	/* Tell NIC about any 2-byte padding after MAC header */
+	if (firstlen != len)
+		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+	/* Physical address of this Tx command's header (not MAC header!),
+	 * within command buffer array. */
+	txcmd_phys = dma_map_single(trans->dev,
+				    &dev_cmd->hdr, firstlen,
+				    DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
+		goto out_err;
+	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+	dma_unmap_len_set(out_meta, len, firstlen);
+
+	if (!ieee80211_has_morefrags(fc)) {
+		txq->need_update = 1;
+	} else {
+		wait_write_ptr = 1;
+		txq->need_update = 0;
 	}
 
-	iwl_queue_progress(trans_pcie, txq);
+	/* Set up TFD's 2nd entry to point directly to remainder of skb,
+	 * if any (802.11 null frames have no payload). */
+	secondlen = skb->len - hdr_len;
+	if (secondlen > 0) {
+		phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
+					   secondlen, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
+			dma_unmap_single(trans->dev,
+					 dma_unmap_addr(out_meta, mapping),
+					 dma_unmap_len(out_meta, len),
+					 DMA_BIDIRECTIONAL);
+			goto out_err;
+		}
+	}
 
-	return freed;
+	/* Attach buffers to TFD */
+	iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
+	if (secondlen > 0)
+		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
+
+	scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+				offsetof(struct iwl_tx_cmd, scratch);
+
+	/* take back ownership of DMA buffer to enable update */
+	dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
+				DMA_BIDIRECTIONAL);
+	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
+	IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
+		     le16_to_cpu(dev_cmd->hdr.sequence));
+	IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+
+	/* Set up entry for this TFD in Tx byte-count array */
+	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
+
+	dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
+				   DMA_BIDIRECTIONAL);
+
+	trace_iwlwifi_dev_tx(trans->dev, skb,
+			     &txq->tfds[txq->q.write_ptr],
+			     sizeof(struct iwl_tfd),
+			     &dev_cmd->hdr, firstlen,
+			     skb->data + hdr_len, secondlen);
+	trace_iwlwifi_dev_tx_data(trans->dev, skb,
+				  skb->data + hdr_len, secondlen);
+
+	/* start timer if queue currently empty */
+	if (txq->need_update && q->read_ptr == q->write_ptr &&
+	    trans_pcie->wd_timeout)
+		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+
+	/* Tell device the write index *just past* this latest filled TFD */
+	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+	iwl_pcie_txq_inc_wr_ptr(trans, txq);
+
+	/*
+	 * At this point the frame is "transmitted" successfully
+	 * and we will get a TX status notification eventually,
+	 * regardless of the value of ret. "ret" only indicates
+	 * whether or not we should update the write pointer.
+	 */
+	if (iwl_queue_space(q) < q->high_mark) {
+		if (wait_write_ptr) {
+			txq->need_update = 1;
+			iwl_pcie_txq_inc_wr_ptr(trans, txq);
+		} else {
+			iwl_stop_queue(trans, txq);
+		}
+	}
+	spin_unlock(&txq->lock);
+	return 0;
+out_err:
+	spin_unlock(&txq->lock);
+	return -1;
 }
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 1c10b54..ec36868 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -436,19 +436,19 @@
  */
 
 static int lbs_cfg_set_monitor_channel(struct wiphy *wiphy,
-				       struct ieee80211_channel *channel,
-				       enum nl80211_channel_type channel_type)
+				       struct cfg80211_chan_def *chandef)
 {
 	struct lbs_private *priv = wiphy_priv(wiphy);
 	int ret = -ENOTSUPP;
 
 	lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d",
-			   channel->center_freq, channel_type);
+			   chandef->chan->center_freq,
+			   cfg80211_get_chandef_type(chandef));
 
-	if (channel_type != NL80211_CHAN_NO_HT)
+	if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT)
 		goto out;
 
-	ret = lbs_set_channel(priv, channel->hw_value);
+	ret = lbs_set_channel(priv, chandef->chan->hw_value);
 
  out:
 	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
@@ -1734,7 +1734,7 @@
 	/* Fake DS channel IE */
 	*fake++ = WLAN_EID_DS_PARAMS;
 	*fake++ = 1;
-	*fake++ = params->channel->hw_value;
+	*fake++ = params->chandef.chan->hw_value;
 	/* Fake IBSS params IE */
 	*fake++ = WLAN_EID_IBSS_PARAMS;
 	*fake++ = 2;
@@ -1755,7 +1755,7 @@
 	lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie);
 
 	bss = cfg80211_inform_bss(priv->wdev->wiphy,
-				  params->channel,
+				  params->chandef.chan,
 				  bssid,
 				  0,
 				  capability,
@@ -1833,7 +1833,7 @@
 	cmd.bss.beaconperiod = cpu_to_le16(params->beacon_interval);
 	cmd.bss.ds.header.id = WLAN_EID_DS_PARAMS;
 	cmd.bss.ds.header.len = 1;
-	cmd.bss.ds.channel = params->channel->hw_value;
+	cmd.bss.ds.channel = params->chandef.chan->hw_value;
 	cmd.bss.ibss.header.id = WLAN_EID_IBSS_PARAMS;
 	cmd.bss.ibss.header.len = 2;
 	cmd.bss.ibss.atimwindow = 0;
@@ -1942,7 +1942,7 @@
 	cmd.ibss.atimwindow = 0;
 	cmd.ds.header.id = WLAN_EID_DS_PARAMS;
 	cmd.ds.header.len = 1;
-	cmd.ds.channel = params->channel->hw_value;
+	cmd.ds.channel = params->chandef.chan->hw_value;
 	/* Only v8 and below support setting probe delay */
 	if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8)
 		cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
@@ -1987,18 +1987,18 @@
 
 	lbs_deb_enter(LBS_DEB_CFG80211);
 
-	if (!params->channel) {
+	if (!params->chandef.chan) {
 		ret = -ENOTSUPP;
 		goto out;
 	}
 
-	ret = lbs_set_channel(priv, params->channel->hw_value);
+	ret = lbs_set_channel(priv, params->chandef.chan->hw_value);
 	if (ret)
 		goto out;
 
 	/* Search if someone is beaconing. This assumes that the
 	 * bss list is populated already */
-	bss = cfg80211_get_bss(wiphy, params->channel, params->bssid,
+	bss = cfg80211_get_bss(wiphy, params->chandef.chan, params->bssid,
 		params->ssid, params->ssid_len,
 		WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
 
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 4cb2343..739309e 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -588,17 +588,38 @@
 	size = fw->size;
 
 	while (size) {
-		ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
-		if (ret)
-			goto release;
+		timeout = jiffies + HZ;
+		while (1) {
+			ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
+			if (ret)
+				goto release;
 
-		req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret);
-		if (ret)
-			goto release;
+			req_size = sdio_readb(card->func, IF_SDIO_RD_BASE,
+					&ret);
+			if (ret)
+				goto release;
 
-		req_size |= sdio_readb(card->func, IF_SDIO_RD_BASE + 1, &ret) << 8;
-		if (ret)
-			goto release;
+			req_size |= sdio_readb(card->func, IF_SDIO_RD_BASE + 1,
+					&ret) << 8;
+			if (ret)
+				goto release;
+
+			/*
+			 * For SD8688 wait until the length is not 0, 1 or 2
+			 * before downloading the first FW block,
+			 * since BOOT code writes the register to indicate the
+			 * helper/FW download winner,
+			 * the value could be 1 or 2 (Func1 or Func2).
+			 */
+			if ((size != fw->size) || (req_size > 2))
+				break;
+			if (time_after(jiffies, timeout)) {
+				ret = -ETIMEDOUT;
+				goto release;
+			}
+			mdelay(1);
+		}
+
 /*
 		lbs_deb_sdio("firmware wants %d bytes\n", (int)req_size);
 */
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 9780775..3e81264 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -101,7 +101,7 @@
 
 	switch (action) {
 	case CMD_ACT_MESH_CONFIG_START:
-		ie->id = WLAN_EID_GENERIC;
+		ie->id = WLAN_EID_VENDOR_SPECIFIC;
 		ie->val.oui[0] = 0x00;
 		ie->val.oui[1] = 0x50;
 		ie->val.oui[2] = 0x43;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 429ca32..2aa8a1a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -44,9 +44,9 @@
 module_param(radios, int, 0444);
 MODULE_PARM_DESC(radios, "Number of simulated radios");
 
-static bool fake_hw_scan;
-module_param(fake_hw_scan, bool, 0444);
-MODULE_PARM_DESC(fake_hw_scan, "Install fake (no-op) hw-scan handler");
+static int channels = 1;
+module_param(channels, int, 0444);
+MODULE_PARM_DESC(channels, "Number of concurrent channels");
 
 /**
  * enum hwsim_regtest - the type of regulatory tests we offer
@@ -166,7 +166,9 @@
 static inline void hwsim_check_magic(struct ieee80211_vif *vif)
 {
 	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
-	WARN_ON(vp->magic != HWSIM_VIF_MAGIC);
+	WARN(vp->magic != HWSIM_VIF_MAGIC,
+	     "Invalid VIF (%p) magic %#x, %pM, %d/%d\n",
+	     vif, vp->magic, vif->addr, vif->type, vif->p2p);
 }
 
 static inline void hwsim_set_magic(struct ieee80211_vif *vif)
@@ -185,7 +187,7 @@
 	u32 magic;
 };
 
-#define HWSIM_STA_MAGIC	0x6d537748
+#define HWSIM_STA_MAGIC	0x6d537749
 
 static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta)
 {
@@ -205,6 +207,30 @@
 	sp->magic = 0;
 }
 
+struct hwsim_chanctx_priv {
+	u32 magic;
+};
+
+#define HWSIM_CHANCTX_MAGIC 0x6d53774a
+
+static inline void hwsim_check_chanctx_magic(struct ieee80211_chanctx_conf *c)
+{
+	struct hwsim_chanctx_priv *cp = (void *)c->drv_priv;
+	WARN_ON(cp->magic != HWSIM_CHANCTX_MAGIC);
+}
+
+static inline void hwsim_set_chanctx_magic(struct ieee80211_chanctx_conf *c)
+{
+	struct hwsim_chanctx_priv *cp = (void *)c->drv_priv;
+	cp->magic = HWSIM_CHANCTX_MAGIC;
+}
+
+static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c)
+{
+	struct hwsim_chanctx_priv *cp = (void *)c->drv_priv;
+	cp->magic = 0;
+}
+
 static struct class *hwsim_class;
 
 static struct net_device *hwsim_mon; /* global monitor netdev */
@@ -299,6 +325,13 @@
 
 	struct mac_address addresses[2];
 
+	struct ieee80211_channel *tmp_chan;
+	struct delayed_work roc_done;
+	struct delayed_work hw_scan;
+	struct cfg80211_scan_request *hw_scan_request;
+	struct ieee80211_vif *hw_scan_vif;
+	int scan_chan_idx;
+
 	struct ieee80211_channel *channel;
 	unsigned long beacon_int; /* in jiffies unit */
 	unsigned int rx_filter;
@@ -396,7 +429,8 @@
 }
 
 static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
-				      struct sk_buff *tx_skb)
+				      struct sk_buff *tx_skb,
+				      struct ieee80211_channel *chan)
 {
 	struct mac80211_hwsim_data *data = hw->priv;
 	struct sk_buff *skb;
@@ -423,7 +457,7 @@
 	hdr->rt_tsft = __mac80211_hwsim_get_tsf(data);
 	hdr->rt_flags = 0;
 	hdr->rt_rate = txrate->bitrate / 5;
-	hdr->rt_channel = cpu_to_le16(data->channel->center_freq);
+	hdr->rt_channel = cpu_to_le16(chan->center_freq);
 	flags = IEEE80211_CHAN_2GHZ;
 	if (txrate->flags & IEEE80211_RATE_ERP_G)
 		flags |= IEEE80211_CHAN_OFDM;
@@ -441,9 +475,9 @@
 }
 
 
-static void mac80211_hwsim_monitor_ack(struct ieee80211_hw *hw, const u8 *addr)
+static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
+				       const u8 *addr)
 {
-	struct mac80211_hwsim_data *data = hw->priv;
 	struct sk_buff *skb;
 	struct hwsim_radiotap_hdr *hdr;
 	u16 flags;
@@ -464,7 +498,7 @@
 					  (1 << IEEE80211_RADIOTAP_CHANNEL));
 	hdr->rt_flags = 0;
 	hdr->rt_rate = 0;
-	hdr->rt_channel = cpu_to_le16(data->channel->center_freq);
+	hdr->rt_channel = cpu_to_le16(chan->center_freq);
 	flags = IEEE80211_CHAN_2GHZ;
 	hdr->rt_chbitmask = cpu_to_le16(flags);
 
@@ -537,6 +571,7 @@
 	md.ret = false;
 	md.addr = addr;
 	ieee80211_iterate_active_interfaces_atomic(data->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
 						   mac80211_hwsim_addr_iter,
 						   &md);
 
@@ -556,12 +591,6 @@
 	int i;
 	struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES];
 
-	if (data->idle) {
-		wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
-		dev_kfree_skb(my_skb);
-		return;
-	}
-
 	if (data->ps != PS_DISABLED)
 		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
 	/* If the queue contains MAX_QUEUE skb's drop some */
@@ -629,8 +658,38 @@
 	printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
 }
 
+static bool hwsim_chans_compat(struct ieee80211_channel *c1,
+			       struct ieee80211_channel *c2)
+{
+	if (!c1 || !c2)
+		return false;
+
+	return c1->center_freq == c2->center_freq;
+}
+
+struct tx_iter_data {
+	struct ieee80211_channel *channel;
+	bool receive;
+};
+
+static void mac80211_hwsim_tx_iter(void *_data, u8 *addr,
+				   struct ieee80211_vif *vif)
+{
+	struct tx_iter_data *data = _data;
+
+	if (!vif->chanctx_conf)
+		return;
+
+	if (!hwsim_chans_compat(data->channel,
+				rcu_dereference(vif->chanctx_conf)->def.chan))
+		return;
+
+	data->receive = true;
+}
+
 static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
-					  struct sk_buff *skb)
+					  struct sk_buff *skb,
+					  struct ieee80211_channel *chan)
 {
 	struct mac80211_hwsim_data *data = hw->priv, *data2;
 	bool ack = false;
@@ -639,15 +698,10 @@
 	struct ieee80211_rx_status rx_status;
 	struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info);
 
-	if (data->idle) {
-		wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
-		return false;
-	}
-
 	memset(&rx_status, 0, sizeof(rx_status));
-	rx_status.flag |= RX_FLAG_MACTIME_MPDU;
-	rx_status.freq = data->channel->center_freq;
-	rx_status.band = data->channel->band;
+	rx_status.flag |= RX_FLAG_MACTIME_START;
+	rx_status.freq = chan->center_freq;
+	rx_status.band = chan->band;
 	rx_status.rate_idx = info->control.rates[0].idx;
 	if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
 		rx_status.flag |= RX_FLAG_HT;
@@ -673,17 +727,35 @@
 	list_for_each_entry(data2, &hwsim_radios, list) {
 		struct sk_buff *nskb;
 		struct ieee80211_mgmt *mgmt;
+		struct tx_iter_data tx_iter_data = {
+			.receive = false,
+			.channel = chan,
+		};
 
 		if (data == data2)
 			continue;
 
-		if (data2->idle || !data2->started ||
-		    !hwsim_ps_rx_ok(data2, skb) || !data2->channel ||
-		    data->channel->center_freq != data2->channel->center_freq ||
-		    !(data->group & data2->group))
+		if (!data2->started || (data2->idle && !data2->tmp_chan) ||
+		    !hwsim_ps_rx_ok(data2, skb))
 			continue;
 
-		nskb = skb_copy(skb, GFP_ATOMIC);
+		if (!(data->group & data2->group))
+			continue;
+
+		if (!hwsim_chans_compat(chan, data2->tmp_chan) &&
+		    !hwsim_chans_compat(chan, data2->channel)) {
+			ieee80211_iterate_active_interfaces_atomic(
+				data2->hw, IEEE80211_IFACE_ITER_NORMAL,
+				mac80211_hwsim_tx_iter, &tx_iter_data);
+			if (!tx_iter_data.receive)
+				continue;
+		}
+
+		/*
+		 * reserve some space for our vendor and the normal
+		 * radiotap header, since we're copying anyway
+		 */
+		nskb = skb_copy_expand(skb, 64, 0, GFP_ATOMIC);
 		if (nskb == NULL)
 			continue;
 
@@ -701,6 +773,33 @@
 				(data->tsf_offset - data2->tsf_offset) +
 				24 * 8 * 10 / txrate->bitrate);
 
+#if 0
+		/*
+		 * Don't enable this code by default as the OUI 00:00:00
+		 * is registered to Xerox so we shouldn't use it here, it
+		 * might find its way into pcap files.
+		 * Note that this code requires the headroom in the SKB
+		 * that was allocated earlier.
+		 */
+		rx_status.vendor_radiotap_oui[0] = 0x00;
+		rx_status.vendor_radiotap_oui[1] = 0x00;
+		rx_status.vendor_radiotap_oui[2] = 0x00;
+		rx_status.vendor_radiotap_subns = 127;
+		/*
+		 * Radiotap vendor namespaces can (and should) also be
+		 * split into fields by using the standard radiotap
+		 * presence bitmap mechanism. Use just BIT(0) here for
+		 * the presence bitmap.
+		 */
+		rx_status.vendor_radiotap_bitmap = BIT(0);
+		/* We have 8 bytes of (dummy) data */
+		rx_status.vendor_radiotap_len = 8;
+		/* For testing, also require it to be aligned */
+		rx_status.vendor_radiotap_align = 8;
+		/* push the data */
+		memcpy(skb_push(nskb, 8), "ABCDEFGH", 8);
+#endif
+
 		memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
 		ieee80211_rx_irqsafe(data2->hw, nskb);
 	}
@@ -713,18 +812,51 @@
 			      struct ieee80211_tx_control *control,
 			      struct sk_buff *skb)
 {
+	struct mac80211_hwsim_data *data = hw->priv;
+	struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_channel *channel;
 	bool ack;
-	struct ieee80211_tx_info *txi;
 	u32 _portid;
 
-	mac80211_hwsim_monitor_rx(hw, skb);
-
-	if (skb->len < 10) {
+	if (WARN_ON(skb->len < 10)) {
 		/* Should not happen; just a sanity check for addr1 use */
 		dev_kfree_skb(skb);
 		return;
 	}
 
+	if (channels == 1) {
+		channel = data->channel;
+	} else if (txi->hw_queue == 4) {
+		channel = data->tmp_chan;
+	} else {
+		chanctx_conf = rcu_dereference(txi->control.vif->chanctx_conf);
+		if (chanctx_conf)
+			channel = chanctx_conf->def.chan;
+		else
+			channel = NULL;
+	}
+
+	if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	if (data->idle && !data->tmp_chan) {
+		wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	if (txi->control.vif)
+		hwsim_check_magic(txi->control.vif);
+	if (control->sta)
+		hwsim_check_sta_magic(control->sta);
+
+	txi->rate_driver_data[0] = channel;
+
+	mac80211_hwsim_monitor_rx(hw, skb, channel);
+
 	/* wmediumd mode check */
 	_portid = ACCESS_ONCE(wmediumd_portid);
 
@@ -732,15 +864,13 @@
 		return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
 
 	/* NO wmediumd detected, perfect medium simulation */
-	ack = mac80211_hwsim_tx_frame_no_nl(hw, skb);
+	ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel);
 
 	if (ack && skb->len >= 16) {
 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-		mac80211_hwsim_monitor_ack(hw, hdr->addr2);
+		mac80211_hwsim_monitor_ack(channel, hdr->addr2);
 	}
 
-	txi = IEEE80211_SKB_CB(skb);
-
 	ieee80211_tx_info_clear_status(txi);
 
 	/* frame was transmitted at most favorable rate at first attempt */
@@ -778,6 +908,13 @@
 		    __func__, ieee80211_vif_type_p2p(vif),
 		    vif->addr);
 	hwsim_set_magic(vif);
+
+	vif->cab_queue = 0;
+	vif->hw_queue[IEEE80211_AC_VO] = 0;
+	vif->hw_queue[IEEE80211_AC_VI] = 1;
+	vif->hw_queue[IEEE80211_AC_BE] = 2;
+	vif->hw_queue[IEEE80211_AC_BK] = 3;
+
 	return 0;
 }
 
@@ -807,14 +944,26 @@
 	hwsim_clear_magic(vif);
 }
 
+static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
+				    struct sk_buff *skb,
+				    struct ieee80211_channel *chan)
+{
+	u32 _pid = ACCESS_ONCE(wmediumd_portid);
+
+	mac80211_hwsim_monitor_rx(hw, skb, chan);
+
+	if (_pid)
+		return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+
+	mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
+	dev_kfree_skb(skb);
+}
 
 static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
 				     struct ieee80211_vif *vif)
 {
 	struct ieee80211_hw *hw = arg;
 	struct sk_buff *skb;
-	struct ieee80211_tx_info *info;
-	u32 _portid;
 
 	hwsim_check_magic(vif);
 
@@ -826,18 +975,9 @@
 	skb = ieee80211_beacon_get(hw, vif);
 	if (skb == NULL)
 		return;
-	info = IEEE80211_SKB_CB(skb);
 
-	mac80211_hwsim_monitor_rx(hw, skb);
-
-	/* wmediumd mode check */
-	_portid = ACCESS_ONCE(wmediumd_portid);
-
-	if (_portid)
-		return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
-
-	mac80211_hwsim_tx_frame_no_nl(hw, skb);
-	dev_kfree_skb(skb);
+	mac80211_hwsim_tx_frame(hw, skb,
+				rcu_dereference(vif->chanctx_conf)->def.chan);
 }
 
 
@@ -850,7 +990,8 @@
 		return;
 
 	ieee80211_iterate_active_interfaces_atomic(
-		hw, mac80211_hwsim_beacon_tx, hw);
+		hw, IEEE80211_IFACE_ITER_NORMAL,
+		mac80211_hwsim_beacon_tx, hw);
 
 	data->beacon_timer.expires = jiffies + data->beacon_int;
 	add_timer(&data->beacon_timer);
@@ -877,7 +1018,7 @@
 	wiphy_debug(hw->wiphy,
 		    "%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
 		    __func__,
-		    conf->channel->center_freq,
+		    conf->channel ? conf->channel->center_freq : 0,
 		    hwsim_chantypes[conf->channel_type],
 		    !!(conf->flags & IEEE80211_CONF_IDLE),
 		    !!(conf->flags & IEEE80211_CONF_PS),
@@ -886,6 +1027,9 @@
 	data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
 
 	data->channel = conf->channel;
+
+	WARN_ON(data->channel && channels > 1);
+
 	data->power_level = conf->power_level;
 	if (!data->started || !data->beacon_int)
 		del_timer(&data->beacon_timer);
@@ -963,15 +1107,17 @@
 	}
 
 	if (changed & BSS_CHANGED_HT) {
-		wiphy_debug(hw->wiphy, "  HT: op_mode=0x%x, chantype=%s\n",
-			    info->ht_operation_mode,
-			    hwsim_chantypes[info->channel_type]);
+		wiphy_debug(hw->wiphy, "  HT: op_mode=0x%x\n",
+			    info->ht_operation_mode);
 	}
 
 	if (changed & BSS_CHANGED_BASIC_RATES) {
 		wiphy_debug(hw->wiphy, "  BASIC_RATES: 0x%llx\n",
 			    (unsigned long long) info->basic_rates);
 	}
+
+	if (changed & BSS_CHANGED_TXPOWER)
+		wiphy_debug(hw->wiphy, "  TX Power: %d dBm\n", info->txpower);
 }
 
 static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
@@ -1166,45 +1312,96 @@
 	/* Not implemented, queues only on kernel side */
 }
 
-struct hw_scan_done {
-	struct delayed_work w;
-	struct ieee80211_hw *hw;
-};
-
-static void hw_scan_done(struct work_struct *work)
+static void hw_scan_work(struct work_struct *work)
 {
-	struct hw_scan_done *hsd =
-		container_of(work, struct hw_scan_done, w.work);
+	struct mac80211_hwsim_data *hwsim =
+		container_of(work, struct mac80211_hwsim_data, hw_scan.work);
+	struct cfg80211_scan_request *req = hwsim->hw_scan_request;
+	int dwell, i;
 
-	ieee80211_scan_completed(hsd->hw, false);
-	kfree(hsd);
+	mutex_lock(&hwsim->mutex);
+	if (hwsim->scan_chan_idx >= req->n_channels) {
+		wiphy_debug(hwsim->hw->wiphy, "hw scan complete\n");
+		ieee80211_scan_completed(hwsim->hw, false);
+		hwsim->hw_scan_request = NULL;
+		hwsim->hw_scan_vif = NULL;
+		hwsim->tmp_chan = NULL;
+		mutex_unlock(&hwsim->mutex);
+		return;
+	}
+
+	wiphy_debug(hwsim->hw->wiphy, "hw scan %d MHz\n",
+		    req->channels[hwsim->scan_chan_idx]->center_freq);
+
+	hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx];
+	if (hwsim->tmp_chan->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
+	    !req->n_ssids) {
+		dwell = 120;
+	} else {
+		dwell = 30;
+		/* send probes */
+		for (i = 0; i < req->n_ssids; i++) {
+			struct sk_buff *probe;
+
+			probe = ieee80211_probereq_get(hwsim->hw,
+						       hwsim->hw_scan_vif,
+						       req->ssids[i].ssid,
+						       req->ssids[i].ssid_len,
+						       req->ie, req->ie_len);
+			if (!probe)
+				continue;
+			local_bh_disable();
+			mac80211_hwsim_tx_frame(hwsim->hw, probe,
+						hwsim->tmp_chan);
+			local_bh_enable();
+		}
+	}
+	ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan,
+				     msecs_to_jiffies(dwell));
+	hwsim->scan_chan_idx++;
+	mutex_unlock(&hwsim->mutex);
 }
 
 static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
 				  struct ieee80211_vif *vif,
 				  struct cfg80211_scan_request *req)
 {
-	struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL);
-	int i;
+	struct mac80211_hwsim_data *hwsim = hw->priv;
 
-	if (!hsd)
-		return -ENOMEM;
+	mutex_lock(&hwsim->mutex);
+	if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) {
+		mutex_unlock(&hwsim->mutex);
+		return -EBUSY;
+	}
+	hwsim->hw_scan_request = req;
+	hwsim->hw_scan_vif = vif;
+	hwsim->scan_chan_idx = 0;
+	mutex_unlock(&hwsim->mutex);
 
-	hsd->hw = hw;
-	INIT_DELAYED_WORK(&hsd->w, hw_scan_done);
+	wiphy_debug(hw->wiphy, "hwsim hw_scan request\n");
 
-	printk(KERN_DEBUG "hwsim hw_scan request\n");
-	for (i = 0; i < req->n_channels; i++)
-		printk(KERN_DEBUG "hwsim hw_scan freq %d\n",
-			req->channels[i]->center_freq);
-	print_hex_dump(KERN_DEBUG, "scan IEs: ", DUMP_PREFIX_OFFSET,
-			16, 1, req->ie, req->ie_len, 1);
-
-	ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
+	ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, 0);
 
 	return 0;
 }
 
+static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw,
+					  struct ieee80211_vif *vif)
+{
+	struct mac80211_hwsim_data *hwsim = hw->priv;
+
+	wiphy_debug(hw->wiphy, "hwsim cancel_hw_scan\n");
+
+	cancel_delayed_work_sync(&hwsim->hw_scan);
+
+	mutex_lock(&hwsim->mutex);
+	ieee80211_scan_completed(hwsim->hw, true);
+	hwsim->tmp_chan = NULL;
+	hwsim->hw_scan_request = NULL;
+	hwsim->hw_scan_vif = NULL;
+	mutex_unlock(&hwsim->mutex);
+}
+
 static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
 {
 	struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -1235,6 +1432,111 @@
 	mutex_unlock(&hwsim->mutex);
 }
 
+static void hw_roc_done(struct work_struct *work)
+{
+	struct mac80211_hwsim_data *hwsim =
+		container_of(work, struct mac80211_hwsim_data, roc_done.work);
+
+	mutex_lock(&hwsim->mutex);
+	ieee80211_remain_on_channel_expired(hwsim->hw);
+	hwsim->tmp_chan = NULL;
+	mutex_unlock(&hwsim->mutex);
+
+	wiphy_debug(hwsim->hw->wiphy, "hwsim ROC expired\n");
+}
+
+static int mac80211_hwsim_roc(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      struct ieee80211_channel *chan,
+			      int duration)
+{
+	struct mac80211_hwsim_data *hwsim = hw->priv;
+
+	mutex_lock(&hwsim->mutex);
+	if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) {
+		mutex_unlock(&hwsim->mutex);
+		return -EBUSY;
+	}
+
+	hwsim->tmp_chan = chan;
+	mutex_unlock(&hwsim->mutex);
+
+	wiphy_debug(hw->wiphy, "hwsim ROC (%d MHz, %d ms)\n",
+		    chan->center_freq, duration);
+
+	ieee80211_ready_on_channel(hw);
+
+	ieee80211_queue_delayed_work(hw, &hwsim->roc_done,
+				     msecs_to_jiffies(duration));
+	return 0;
+}
+
+static int mac80211_hwsim_croc(struct ieee80211_hw *hw)
+{
+	struct mac80211_hwsim_data *hwsim = hw->priv;
+
+	cancel_delayed_work_sync(&hwsim->roc_done);
+
+	mutex_lock(&hwsim->mutex);
+	hwsim->tmp_chan = NULL;
+	mutex_unlock(&hwsim->mutex);
+
+	wiphy_debug(hw->wiphy, "hwsim ROC canceled\n");
+
+	return 0;
+}
+
+static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
+				      struct ieee80211_chanctx_conf *ctx)
+{
+	hwsim_set_chanctx_magic(ctx);
+	wiphy_debug(hw->wiphy,
+		    "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+		    ctx->def.chan->center_freq, ctx->def.width,
+		    ctx->def.center_freq1, ctx->def.center_freq2);
+	return 0;
+}
+
+static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
+					  struct ieee80211_chanctx_conf *ctx)
+{
+	wiphy_debug(hw->wiphy,
+		    "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+		    ctx->def.chan->center_freq, ctx->def.width,
+		    ctx->def.center_freq1, ctx->def.center_freq2);
+	hwsim_check_chanctx_magic(ctx);
+	hwsim_clear_chanctx_magic(ctx);
+}
+
+static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
+					  struct ieee80211_chanctx_conf *ctx,
+					  u32 changed)
+{
+	hwsim_check_chanctx_magic(ctx);
+	wiphy_debug(hw->wiphy,
+		    "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+		    ctx->def.chan->center_freq, ctx->def.width,
+		    ctx->def.center_freq1, ctx->def.center_freq2);
+}
+
+static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw,
+					     struct ieee80211_vif *vif,
+					     struct ieee80211_chanctx_conf *ctx)
+{
+	hwsim_check_magic(vif);
+	hwsim_check_chanctx_magic(ctx);
+
+	return 0;
+}
+
+static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
+						struct ieee80211_vif *vif,
+						struct ieee80211_chanctx_conf *ctx)
+{
+	hwsim_check_magic(vif);
+	hwsim_check_chanctx_magic(ctx);
+}
+
 static struct ieee80211_ops mac80211_hwsim_ops =
 {
 	.tx = mac80211_hwsim_tx,
@@ -1315,7 +1617,6 @@
 	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
 	struct sk_buff *skb;
 	struct ieee80211_pspoll *pspoll;
-	u32 _portid;
 
 	if (!vp->assoc)
 		return;
@@ -1335,25 +1636,18 @@
 	memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
 	memcpy(pspoll->ta, mac, ETH_ALEN);
 
-	/* wmediumd mode check */
-	_portid = ACCESS_ONCE(wmediumd_portid);
-
-	if (_portid)
-		return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
-
-	if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
-		printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__);
-	dev_kfree_skb(skb);
+	rcu_read_lock();
+	mac80211_hwsim_tx_frame(data->hw, skb,
+				rcu_dereference(vif->chanctx_conf)->def.chan);
+	rcu_read_unlock();
 }
 
-
 static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
 				struct ieee80211_vif *vif, int ps)
 {
 	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
 	struct sk_buff *skb;
 	struct ieee80211_hdr *hdr;
-	u32 _portid;
 
 	if (!vp->assoc)
 		return;
@@ -1374,15 +1668,10 @@
 	memcpy(hdr->addr2, mac, ETH_ALEN);
 	memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
 
-	/* wmediumd mode check */
-	_portid = ACCESS_ONCE(wmediumd_portid);
-
-	if (_portid)
-		return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
-
-	if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
-		printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
-	dev_kfree_skb(skb);
+	rcu_read_lock();
+	mac80211_hwsim_tx_frame(data->hw, skb,
+				rcu_dereference(vif->chanctx_conf)->def.chan);
+	rcu_read_unlock();
 }
 
 
@@ -1423,14 +1712,17 @@
 
 	if (val == PS_MANUAL_POLL) {
 		ieee80211_iterate_active_interfaces(data->hw,
+						    IEEE80211_IFACE_ITER_NORMAL,
 						    hwsim_send_ps_poll, data);
 		data->ps_poll_pending = true;
 	} else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
 		ieee80211_iterate_active_interfaces(data->hw,
+						    IEEE80211_IFACE_ITER_NORMAL,
 						    hwsim_send_nullfunc_ps,
 						    data);
 	} else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
 		ieee80211_iterate_active_interfaces(data->hw,
+						    IEEE80211_IFACE_ITER_NORMAL,
 						    hwsim_send_nullfunc_no_ps,
 						    data);
 	}
@@ -1551,7 +1843,8 @@
 	   (hwsim_flags & HWSIM_TX_STAT_ACK)) {
 		if (skb->len >= 16) {
 			hdr = (struct ieee80211_hdr *) skb->data;
-			mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
+			mac80211_hwsim_monitor_ack(txi->rate_driver_data[0],
+						   hdr->addr2);
 		}
 		txi->flags |= IEEE80211_TX_STAT_ACK;
 	}
@@ -1566,7 +1859,7 @@
 					  struct genl_info *info)
 {
 
-	struct mac80211_hwsim_data  *data2;
+	struct mac80211_hwsim_data *data2;
 	struct ieee80211_rx_status rx_status;
 	struct mac_address *dst;
 	int frame_data_len;
@@ -1574,9 +1867,9 @@
 	struct sk_buff *skb = NULL;
 
 	if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
-	   !info->attrs[HWSIM_ATTR_FRAME] ||
-	   !info->attrs[HWSIM_ATTR_RX_RATE] ||
-	   !info->attrs[HWSIM_ATTR_SIGNAL])
+	    !info->attrs[HWSIM_ATTR_FRAME] ||
+	    !info->attrs[HWSIM_ATTR_RX_RATE] ||
+	    !info->attrs[HWSIM_ATTR_SIGNAL])
 		goto out;
 
 	dst = (struct mac_address *)nla_data(
@@ -1604,7 +1897,7 @@
 
 	/* check if radio is configured properly */
 
-	if (data2->idle || !data2->started || !data2->channel)
+	if (data2->idle || !data2->started)
 		goto out;
 
 	/*A frame is received from user space*/
@@ -1688,6 +1981,11 @@
 static int hwsim_init_netlink(void)
 {
 	int rc;
+
+	/* userspace test API hasn't been adjusted for multi-channel */
+	if (channels > 1)
+		return 0;
+
 	printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
 
 	rc = genl_register_family_with_ops(&hwsim_genl_family,
@@ -1710,6 +2008,10 @@
 {
 	int ret;
 
+	/* userspace test API hasn't been adjusted for multi-channel */
+	if (channels > 1)
+		return;
+
 	printk(KERN_INFO "mac80211_hwsim: closing netlink\n");
 	/* unregister the notifier */
 	netlink_unregister_notifier(&hwsim_netlink_notifier);
@@ -1732,7 +2034,7 @@
 	{ .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
 };
 
-static const struct ieee80211_iface_combination hwsim_if_comb = {
+static struct ieee80211_iface_combination hwsim_if_comb = {
 	.limits = hwsim_if_limits,
 	.n_limits = ARRAY_SIZE(hwsim_if_limits),
 	.max_interfaces = 2048,
@@ -1750,10 +2052,30 @@
 	if (radios < 1 || radios > 100)
 		return -EINVAL;
 
-	if (fake_hw_scan) {
+	if (channels < 1)
+		return -EINVAL;
+
+	if (channels > 1) {
+		hwsim_if_comb.num_different_channels = channels;
 		mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+		mac80211_hwsim_ops.cancel_hw_scan =
+			mac80211_hwsim_cancel_hw_scan;
 		mac80211_hwsim_ops.sw_scan_start = NULL;
 		mac80211_hwsim_ops.sw_scan_complete = NULL;
+		mac80211_hwsim_ops.remain_on_channel =
+			mac80211_hwsim_roc;
+		mac80211_hwsim_ops.cancel_remain_on_channel =
+			mac80211_hwsim_croc;
+		mac80211_hwsim_ops.add_chanctx =
+			mac80211_hwsim_add_chanctx;
+		mac80211_hwsim_ops.remove_chanctx =
+			mac80211_hwsim_remove_chanctx;
+		mac80211_hwsim_ops.change_chanctx =
+			mac80211_hwsim_change_chanctx;
+		mac80211_hwsim_ops.assign_vif_chanctx =
+			mac80211_hwsim_assign_vif_chanctx;
+		mac80211_hwsim_ops.unassign_vif_chanctx =
+			mac80211_hwsim_unassign_vif_chanctx;
 	}
 
 	spin_lock_init(&hwsim_radio_lock);
@@ -1803,13 +2125,18 @@
 		hw->wiphy->iface_combinations = &hwsim_if_comb;
 		hw->wiphy->n_iface_combinations = 1;
 
-		if (fake_hw_scan) {
+		if (channels > 1) {
 			hw->wiphy->max_scan_ssids = 255;
 			hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+			hw->wiphy->max_remain_on_channel_duration = 1000;
 		}
 
+		INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
+		INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
+
 		hw->channel_change_time = 1;
-		hw->queues = 4;
+		hw->queues = 5;
+		hw->offchannel_tx_hw_queue = 4;
 		hw->wiphy->interface_modes =
 			BIT(NL80211_IFTYPE_STATION) |
 			BIT(NL80211_IFTYPE_AP) |
@@ -1824,7 +2151,8 @@
 			    IEEE80211_HW_SUPPORTS_STATIC_SMPS |
 			    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
 			    IEEE80211_HW_AMPDU_AGGREGATION |
-			    IEEE80211_HW_WANT_MONITOR_VIF;
+			    IEEE80211_HW_WANT_MONITOR_VIF |
+			    IEEE80211_HW_QUEUE_CONTROL;
 
 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
 				    WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -1874,6 +2202,34 @@
 			sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
 
 			hw->wiphy->bands[band] = sband;
+
+			if (channels == 1)
+				continue;
+
+			sband->vht_cap.vht_supported = true;
+			sband->vht_cap.cap =
+				IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+				IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
+				IEEE80211_VHT_CAP_RXLDPC |
+				IEEE80211_VHT_CAP_SHORT_GI_80 |
+				IEEE80211_VHT_CAP_SHORT_GI_160 |
+				IEEE80211_VHT_CAP_TXSTBC |
+				IEEE80211_VHT_CAP_RXSTBC_1 |
+				IEEE80211_VHT_CAP_RXSTBC_2 |
+				IEEE80211_VHT_CAP_RXSTBC_3 |
+				IEEE80211_VHT_CAP_RXSTBC_4 |
+				IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT;
+			sband->vht_cap.vht_mcs.rx_mcs_map =
+				cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
+					    IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
+			sband->vht_cap.vht_mcs.tx_mcs_map =
+				sband->vht_cap.vht_mcs.rx_mcs_map;
 		}
 		/* By default all radios are belonging to the first group */
 		data->group = 1;
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 395f1bf..68d52cf 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -197,7 +197,7 @@
 				       ra_list_flags);
 		mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
 
-		mwifiex_write_data_complete(adapter, skb_src, 0);
+		mwifiex_write_data_complete(adapter, skb_src, 0, 0);
 
 		spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
 
@@ -256,7 +256,7 @@
 		if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
 			spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
 					       ra_list_flags);
-			mwifiex_write_data_complete(adapter, skb_aggr, -1);
+			mwifiex_write_data_complete(adapter, skb_aggr, 1, -1);
 			return -1;
 		}
 		if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
@@ -282,13 +282,13 @@
 		dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
 			__func__, ret);
 		adapter->dbg.num_tx_host_to_card_failure++;
-		mwifiex_write_data_complete(adapter, skb_aggr, ret);
+		mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
 		return 0;
 	case -EINPROGRESS:
 		adapter->data_sent = false;
 		break;
 	case 0:
-		mwifiex_write_data_complete(adapter, skb_aggr, ret);
+		mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
 		break;
 	default:
 		break;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 9402b93..4a97acd 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -58,8 +58,7 @@
 			if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
 				mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
 			else
-				mwifiex_process_rx_packet(priv->adapter,
-							  rx_tmp_ptr);
+				mwifiex_process_rx_packet(priv, rx_tmp_ptr);
 		}
 	}
 
@@ -106,7 +105,7 @@
 		if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
 			mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
 		else
-			mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+			mwifiex_process_rx_packet(priv, rx_tmp_ptr);
 	}
 
 	spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -442,8 +441,7 @@
 			if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
 				mwifiex_handle_uap_rx_forward(priv, payload);
 			else
-				mwifiex_process_rx_packet(priv->adapter,
-							  payload);
+				mwifiex_process_rx_packet(priv, payload);
 		}
 		return 0;
 	}
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 8e384fa..b2e2772 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -1,7 +1,6 @@
 config MWIFIEX
 	tristate "Marvell WiFi-Ex Driver"
 	depends on CFG80211
-	select LIB80211
 	---help---
 	  This adds support for wireless adapters based on Marvell
 	  802.11n chipsets.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 780d3e1..3b1c277 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -180,10 +180,8 @@
 static int
 mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 			 struct ieee80211_channel *chan, bool offchan,
-			 enum nl80211_channel_type channel_type,
-			 bool channel_type_valid, unsigned int wait,
-			 const u8 *buf, size_t len, bool no_cck,
-			 bool dont_wait_for_ack, u64 *cookie)
+			 unsigned int wait, const u8 *buf, size_t len,
+			 bool no_cck, bool dont_wait_for_ack, u64 *cookie)
 {
 	struct sk_buff *skb;
 	u16 pkt_len;
@@ -253,7 +251,6 @@
 mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
 				   struct wireless_dev *wdev,
 				   struct ieee80211_channel *chan,
-				   enum nl80211_channel_type channel_type,
 				   unsigned int duration, u64 *cookie)
 {
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
@@ -271,15 +268,14 @@
 	}
 
 	ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_SET, chan,
-					 &channel_type, duration);
+					 duration);
 
 	if (!ret) {
 		*cookie = random32() | 1;
 		priv->roc_cfg.cookie = *cookie;
 		priv->roc_cfg.chan = *chan;
-		priv->roc_cfg.chan_type = channel_type;
 
-		cfg80211_ready_on_channel(wdev, *cookie, chan, channel_type,
+		cfg80211_ready_on_channel(wdev, *cookie, chan,
 					  duration, GFP_ATOMIC);
 
 		wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie);
@@ -302,13 +298,11 @@
 		return -ENOENT;
 
 	ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_REMOVE,
-					 &priv->roc_cfg.chan,
-					 &priv->roc_cfg.chan_type, 0);
+					 &priv->roc_cfg.chan, 0);
 
 	if (!ret) {
 		cfg80211_remain_on_channel_expired(wdev, cookie,
 						   &priv->roc_cfg.chan,
-						   priv->roc_cfg.chan_type,
 						   GFP_ATOMIC);
 
 		memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg));
@@ -324,6 +318,7 @@
  */
 static int
 mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
+			      struct wireless_dev *wdev,
 			      enum nl80211_tx_power_setting type,
 			      int mbm)
 {
@@ -471,13 +466,13 @@
 			flag = 1;
 			first_chan = (u32) ch->hw_value;
 			next_chan = first_chan;
-			max_pwr = ch->max_reg_power;
+			max_pwr = ch->max_power;
 			no_of_parsed_chan = 1;
 			continue;
 		}
 
 		if (ch->hw_value == next_chan + 1 &&
-		    ch->max_reg_power == max_pwr) {
+		    ch->max_power == max_pwr) {
 			next_chan++;
 			no_of_parsed_chan++;
 		} else {
@@ -488,7 +483,7 @@
 			no_of_triplet++;
 			first_chan = (u32) ch->hw_value;
 			next_chan = first_chan;
-			max_pwr = ch->max_reg_power;
+			max_pwr = ch->max_power;
 			no_of_parsed_chan = 1;
 		}
 	}
@@ -1296,21 +1291,23 @@
 		return -EINVAL;
 	}
 
-	bss_cfg->channel =
-	    (u8)ieee80211_frequency_to_channel(params->channel->center_freq);
+	bss_cfg->channel = ieee80211_frequency_to_channel(
+				params->chandef.chan->center_freq);
 
 	/* Set appropriate bands */
-	if (params->channel->band == IEEE80211_BAND_2GHZ) {
+	if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
 		bss_cfg->band_cfg = BAND_CONFIG_BG;
 
-		if (params->channel_type == NL80211_CHAN_NO_HT)
+		if (cfg80211_get_chandef_type(&params->chandef) ==
+						NL80211_CHAN_NO_HT)
 			config_bands = BAND_B | BAND_G;
 		else
 			config_bands = BAND_B | BAND_G | BAND_GN;
 	} else {
 		bss_cfg->band_cfg = BAND_CONFIG_A;
 
-		if (params->channel_type == NL80211_CHAN_NO_HT)
+		if (cfg80211_get_chandef_type(&params->chandef) ==
+						NL80211_CHAN_NO_HT)
 			config_bands = BAND_A;
 		else
 			config_bands = BAND_AN | BAND_A;
@@ -1683,7 +1680,7 @@
 	int index = 0, i;
 	u8 config_bands = 0;
 
-	if (params->channel->band == IEEE80211_BAND_2GHZ) {
+	if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
 		if (!params->basic_rates) {
 			config_bands = BAND_B | BAND_G;
 		} else {
@@ -1708,10 +1705,12 @@
 			}
 		}
 
-		if (params->channel_type != NL80211_CHAN_NO_HT)
+		if (cfg80211_get_chandef_type(&params->chandef) !=
+						NL80211_CHAN_NO_HT)
 			config_bands |= BAND_GN;
 	} else {
-		if (params->channel_type == NL80211_CHAN_NO_HT)
+		if (cfg80211_get_chandef_type(&params->chandef) !=
+						NL80211_CHAN_NO_HT)
 			config_bands = BAND_A;
 		else
 			config_bands = BAND_AN | BAND_A;
@@ -1728,9 +1727,10 @@
 	}
 
 	adapter->sec_chan_offset =
-		mwifiex_chan_type_to_sec_chan_offset(params->channel_type);
-	priv->adhoc_channel =
-		ieee80211_frequency_to_channel(params->channel->center_freq);
+		mwifiex_chan_type_to_sec_chan_offset(
+			cfg80211_get_chandef_type(&params->chandef));
+	priv->adhoc_channel = ieee80211_frequency_to_channel(
+				params->chandef.chan->center_freq);
 
 	wiphy_dbg(wiphy, "info: set ibss band %d, chan %d, chan offset %d\n",
 		  config_bands, priv->adhoc_channel, adapter->sec_chan_offset);
@@ -1764,7 +1764,8 @@
 
 	ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
 				     params->bssid, priv->bss_mode,
-				     params->channel, NULL, params->privacy);
+				     params->chandef.chan, NULL,
+				     params->privacy);
 done:
 	if (!ret) {
 		cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
@@ -1819,12 +1820,18 @@
 
 	wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
 
-	if (atomic_read(&priv->wmm.tx_pkts_queued) >=
+	if ((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
+	    atomic_read(&priv->wmm.tx_pkts_queued) >=
 	    MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN) {
 		dev_dbg(priv->adapter->dev, "scan rejected due to traffic\n");
 		return -EBUSY;
 	}
 
+	if (priv->user_scan_cfg) {
+		dev_err(priv->adapter->dev, "cmd: Scan already in process..\n");
+		return -EBUSY;
+	}
+
 	priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
 				      GFP_KERNEL);
 	if (!priv->user_scan_cfg) {
@@ -2074,8 +2081,8 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
-			      ether_setup, 1);
+	dev = alloc_netdev_mqs(sizeof(struct mwifiex_private *), name,
+			       ether_setup, IEEE80211_NUM_ACS, 1);
 	if (!dev) {
 		wiphy_err(wiphy, "no memory available for netdevice\n");
 		priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -2116,7 +2123,6 @@
 	}
 
 	sema_init(&priv->async_sem, 1);
-	priv->scan_pending_on_block = false;
 
 	dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
 
@@ -2138,8 +2144,7 @@
 	mwifiex_dev_debugfs_remove(priv);
 #endif
 
-	if (!netif_queue_stopped(priv->netdev))
-		netif_stop_queue(priv->netdev);
+	mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
 
 	if (netif_carrier_ok(priv->netdev))
 		netif_carrier_off(priv->netdev);
@@ -2253,8 +2258,9 @@
 	wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
 	wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
 
-	wiphy->features = NL80211_FEATURE_HT_IBSS |
-			  NL80211_FEATURE_INACTIVITY_TIMER;
+	wiphy->features |= NL80211_FEATURE_HT_IBSS |
+			   NL80211_FEATURE_INACTIVITY_TIMER |
+			   NL80211_FEATURE_LOW_PRIORITY_SCAN;
 
 	/* Reserve space for mwifiex specific private data for BSS */
 	wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index ae9010e..5f438e6 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -914,21 +914,24 @@
 
 		dev_err(adapter->dev, "last_cmd_index = %d\n",
 			adapter->dbg.last_cmd_index);
-		print_hex_dump_bytes("last_cmd_id: ", DUMP_PREFIX_OFFSET,
-				     adapter->dbg.last_cmd_id, DBG_CMD_NUM);
-		print_hex_dump_bytes("last_cmd_act: ", DUMP_PREFIX_OFFSET,
-				     adapter->dbg.last_cmd_act, DBG_CMD_NUM);
+		dev_err(adapter->dev, "last_cmd_id: %*ph\n",
+			(int)sizeof(adapter->dbg.last_cmd_id),
+			adapter->dbg.last_cmd_id);
+		dev_err(adapter->dev, "last_cmd_act: %*ph\n",
+			(int)sizeof(adapter->dbg.last_cmd_act),
+			adapter->dbg.last_cmd_act);
 
 		dev_err(adapter->dev, "last_cmd_resp_index = %d\n",
 			adapter->dbg.last_cmd_resp_index);
-		print_hex_dump_bytes("last_cmd_resp_id: ", DUMP_PREFIX_OFFSET,
-				     adapter->dbg.last_cmd_resp_id,
-				     DBG_CMD_NUM);
+		dev_err(adapter->dev, "last_cmd_resp_id: %*ph\n",
+			(int)sizeof(adapter->dbg.last_cmd_resp_id),
+			adapter->dbg.last_cmd_resp_id);
 
 		dev_err(adapter->dev, "last_event_index = %d\n",
 			adapter->dbg.last_event_index);
-		print_hex_dump_bytes("last_event: ", DUMP_PREFIX_OFFSET,
-				     adapter->dbg.last_event, DBG_CMD_NUM);
+		dev_err(adapter->dev, "last_event: %*ph\n",
+			(int)sizeof(adapter->dbg.last_event),
+			adapter->dbg.last_event);
 
 		dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n",
 			adapter->data_sent, adapter->cmd_sent);
@@ -946,6 +949,9 @@
 	}
 	if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
 		mwifiex_init_fw_complete(adapter);
+
+	if (adapter->if_ops.card_reset)
+		adapter->if_ops.card_reset(adapter);
 }
 
 /*
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index a870b58..46e34aa 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -178,6 +178,7 @@
 		(struct mwifiex_private *) file->private_data;
 	struct net_device *netdev = priv->netdev;
 	struct netdev_hw_addr *ha;
+	struct netdev_queue *txq;
 	unsigned long page = get_zeroed_page(GFP_KERNEL);
 	char *p = (char *) page, fmt[64];
 	struct mwifiex_bss_info info;
@@ -229,8 +230,13 @@
 	p += sprintf(p, "num_rx_pkts_err = %lu\n", priv->stats.rx_errors);
 	p += sprintf(p, "carrier %s\n", ((netif_carrier_ok(priv->netdev))
 					 ? "on" : "off"));
-	p += sprintf(p, "tx queue %s\n", ((netif_queue_stopped(priv->netdev))
-					  ? "stopped" : "started"));
+	p += sprintf(p, "tx queue");
+	for (i = 0; i < netdev->num_tx_queues; i++) {
+		txq = netdev_get_tx_queue(netdev, i);
+		p += sprintf(p, " %d:%s", i, netif_tx_queue_stopped(txq) ?
+			     "stopped" : "started");
+	}
+	p += sprintf(p, "\n");
 
 	ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
 				      (unsigned long) p - page);
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index b5d37a8..39f03ce 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -84,18 +84,19 @@
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
 		if (priv->user_scan_cfg) {
-			dev_dbg(priv->adapter->dev,
-				"info: %s: scan aborted\n", __func__);
-			cfg80211_scan_done(priv->scan_request, 1);
-			priv->scan_request = NULL;
+			if (priv->scan_request) {
+				dev_dbg(priv->adapter->dev,
+					"info: aborting scan\n");
+				cfg80211_scan_done(priv->scan_request, 1);
+				priv->scan_request = NULL;
+			} else {
+				dev_dbg(priv->adapter->dev,
+					"info: scan already aborted\n");
+			}
+
 			kfree(priv->user_scan_cfg);
 			priv->user_scan_cfg = NULL;
 		}
-
-		if (priv->scan_pending_on_block) {
-			priv->scan_pending_on_block = false;
-			up(&priv->async_sem);
-		}
 		goto done;
 	}
 
@@ -387,9 +388,17 @@
 					struct mwifiex_adapter *adapter)
 {
 	unsigned long dev_queue_flags;
+	unsigned int i;
 
 	spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
-	netif_tx_wake_all_queues(netdev);
+
+	for (i = 0; i < netdev->num_tx_queues; i++) {
+		struct netdev_queue *txq = netdev_get_tx_queue(netdev, i);
+
+		if (netif_tx_queue_stopped(txq))
+			netif_tx_wake_queue(txq);
+	}
+
 	spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
 }
 
@@ -400,9 +409,17 @@
 					struct mwifiex_adapter *adapter)
 {
 	unsigned long dev_queue_flags;
+	unsigned int i;
 
 	spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
-	netif_tx_stop_all_queues(netdev);
+
+	for (i = 0; i < netdev->num_tx_queues; i++) {
+		struct netdev_queue *txq = netdev_get_tx_queue(netdev, i);
+
+		if (!netif_tx_queue_stopped(txq))
+			netif_tx_stop_queue(txq);
+	}
+
 	spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
 }
 
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 7b0858a..88664ae 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -721,8 +721,7 @@
 
 	if (!netif_carrier_ok(priv->netdev))
 		netif_carrier_on(priv->netdev);
-	if (netif_queue_stopped(priv->netdev))
-		netif_wake_queue(priv->netdev);
+	mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
 
 	if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
 		priv->scan_block = true;
@@ -1238,8 +1237,7 @@
 
 	if (!netif_carrier_ok(priv->netdev))
 		netif_carrier_on(priv->netdev);
-	if (netif_queue_stopped(priv->netdev))
-		netif_wake_queue(priv->netdev);
+	mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
 
 	mwifiex_save_curr_bcn(priv);
 
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index eb22dd2..9c802ed 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -282,6 +282,7 @@
 		mwifiex_shutdown_drv(adapter);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(mwifiex_main_process);
 
 /*
  * This function frees the adapter structure.
@@ -412,49 +413,6 @@
 }
 
 /*
- * This function fills a driver buffer.
- *
- * The function associates a given SKB with the provided driver buffer
- * and also updates some of the SKB parameters, including IP header,
- * priority and timestamp.
- */
-static void
-mwifiex_fill_buffer(struct sk_buff *skb)
-{
-	struct ethhdr *eth;
-	struct iphdr *iph;
-	struct timeval tv;
-	u8 tid = 0;
-
-	eth = (struct ethhdr *) skb->data;
-	switch (eth->h_proto) {
-	case __constant_htons(ETH_P_IP):
-		iph = ip_hdr(skb);
-		tid = IPTOS_PREC(iph->tos);
-		pr_debug("data: packet type ETH_P_IP: %04x, tid=%#x prio=%#x\n",
-			 eth->h_proto, tid, skb->priority);
-		break;
-	case __constant_htons(ETH_P_ARP):
-		pr_debug("data: ARP packet: %04x\n", eth->h_proto);
-	default:
-		break;
-	}
-/* Offset for TOS field in the IP header */
-#define IPTOS_OFFSET 5
-	tid = (tid >> IPTOS_OFFSET);
-	skb->priority = tid;
-	/* Record the current time the packet was queued; used to
-	   determine the amount of time the packet was queued in
-	   the driver before it was sent to the firmware.
-	   The delay is then sent along with the packet to the
-	   firmware for aggregate delay calculation for stats and
-	   MSDU lifetime expiry.
-	 */
-	do_gettimeofday(&tv);
-	skb->tstamp = timeval_to_ktime(tv);
-}
-
-/*
  * CFG802.11 network device handler for open.
  *
  * Starts the data queue.
@@ -472,6 +430,14 @@
 static int
 mwifiex_close(struct net_device *dev)
 {
+	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+	if (priv->scan_request) {
+		dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n");
+		cfg80211_scan_done(priv->scan_request, 1);
+		priv->scan_request = NULL;
+	}
+
 	return 0;
 }
 
@@ -480,17 +446,23 @@
  */
 int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
 {
-	mwifiex_wmm_add_buf_txqueue(priv, skb);
+	struct netdev_queue *txq;
+	int index = mwifiex_1d_to_wmm_queue[skb->priority];
+
+	if (atomic_inc_return(&priv->wmm_tx_pending[index]) >= MAX_TX_PENDING) {
+		txq = netdev_get_tx_queue(priv->netdev, index);
+		if (!netif_tx_queue_stopped(txq)) {
+			netif_tx_stop_queue(txq);
+			dev_dbg(priv->adapter->dev, "stop queue: %d\n", index);
+		}
+	}
+
 	atomic_inc(&priv->adapter->tx_pending);
+	mwifiex_wmm_add_buf_txqueue(priv, skb);
 
 	if (priv->adapter->scan_delay_cnt)
 		atomic_set(&priv->adapter->is_tx_received, true);
 
-	if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
-		mwifiex_set_trans_start(priv->netdev);
-		mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
-	}
-
 	queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
 
 	return 0;
@@ -505,6 +477,7 @@
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 	struct sk_buff *new_skb;
 	struct mwifiex_txinfo *tx_info;
+	struct timeval tv;
 
 	dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
 		jiffies, priv->bss_type, priv->bss_num);
@@ -542,7 +515,16 @@
 	tx_info = MWIFIEX_SKB_TXCB(skb);
 	tx_info->bss_num = priv->bss_num;
 	tx_info->bss_type = priv->bss_type;
-	mwifiex_fill_buffer(skb);
+
+	/* Record the current time the packet was queued; used to
+	 * determine the amount of time the packet was queued in
+	 * the driver before it was sent to the firmware.
+	 * The delay is then sent along with the packet to the
+	 * firmware for aggregate delay calculation for stats and
+	 * MSDU lifetime expiry.
+	 */
+	do_gettimeofday(&tv);
+	skb->tstamp = timeval_to_ktime(tv);
 
 	mwifiex_queue_tx_pkt(priv, skb);
 
@@ -622,6 +604,13 @@
 	return &priv->stats;
 }
 
+static u16
+mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb)
+{
+	skb->priority = cfg80211_classify8021d(skb);
+	return mwifiex_1d_to_wmm_queue[skb->priority];
+}
+
 /* Network device handlers */
 static const struct net_device_ops mwifiex_netdev_ops = {
 	.ndo_open = mwifiex_open,
@@ -631,6 +620,7 @@
 	.ndo_tx_timeout = mwifiex_tx_timeout,
 	.ndo_get_stats = mwifiex_get_stats,
 	.ndo_set_rx_mode = mwifiex_set_multicast_list,
+	.ndo_select_queue = mwifiex_netdev_select_wmm_queue,
 };
 
 /*
@@ -830,9 +820,7 @@
 	for (i = 0; i < adapter->priv_num; i++) {
 		priv = adapter->priv[i];
 		if (priv && priv->netdev) {
-			if (!netif_queue_stopped(priv->netdev))
-				mwifiex_stop_net_dev_queue(priv->netdev,
-							   adapter);
+			mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 			if (netif_carrier_ok(priv->netdev))
 				netif_carrier_off(priv->netdev);
 		}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index c2d0ab1..1b3cfc8 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -115,8 +115,6 @@
 #define MWIFIEX_TYPE_DATA				0
 #define MWIFIEX_TYPE_EVENT				3
 
-#define DBG_CMD_NUM						5
-
 #define MAX_BITMAP_RATES_SIZE			10
 
 #define MAX_CHANNEL_BAND_BG     14
@@ -373,7 +371,6 @@
 struct mwifiex_roc_cfg {
 	u64 cookie;
 	struct ieee80211_channel chan;
-	enum nl80211_channel_type chan_type;
 };
 
 struct mwifiex_adapter;
@@ -442,6 +439,7 @@
 	u8 wmm_enabled;
 	u8 wmm_qosinfo;
 	struct mwifiex_wmm_desc wmm;
+	atomic_t wmm_tx_pending[IEEE80211_NUM_ACS];
 	struct list_head sta_list;
 	/* spin lock for associated station list */
 	spinlock_t sta_list_spinlock;
@@ -484,7 +482,6 @@
 	u8 nick_name[16];
 	u16 current_key_index;
 	struct semaphore async_sem;
-	u8 scan_pending_on_block;
 	u8 report_scan_result;
 	struct cfg80211_scan_request *scan_request;
 	struct mwifiex_user_scan_cfg *user_scan_cfg;
@@ -603,6 +600,7 @@
 	int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
 	int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
 	int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
+	void (*card_reset) (struct mwifiex_adapter *);
 };
 
 struct mwifiex_adapter {
@@ -750,9 +748,9 @@
 
 int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
 
-int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb);
+int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb);
 
-int mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
+int mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
 				struct sk_buff *skb);
 
 int mwifiex_process_event(struct mwifiex_adapter *adapter);
@@ -791,7 +789,7 @@
 		       struct mwifiex_tx_param *tx_param);
 int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags);
 int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
-				struct sk_buff *skb, int status);
+				struct sk_buff *skb, int aggr, int status);
 void mwifiex_clean_txrx(struct mwifiex_private *priv);
 u8 mwifiex_check_last_packet_indication(struct mwifiex_private *priv);
 void mwifiex_check_ps_cond(struct mwifiex_adapter *adapter);
@@ -809,7 +807,7 @@
 					u8 activated);
 int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
 			      struct host_cmd_ds_command *resp);
-int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
+int mwifiex_process_rx_packet(struct mwifiex_private *priv,
 			      struct sk_buff *skb);
 int mwifiex_sta_prepare_cmd(struct mwifiex_private *, uint16_t cmd_no,
 			    u16 cmd_action, u32 cmd_oid,
@@ -819,9 +817,9 @@
 			    void *data_buf, void *cmd_buf);
 int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
 				struct host_cmd_ds_command *resp);
-int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
+int mwifiex_process_sta_rx_packet(struct mwifiex_private *,
 				  struct sk_buff *skb);
-int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
 				  struct sk_buff *skb);
 int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
 				  struct sk_buff *skb);
@@ -1019,7 +1017,6 @@
 
 int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
 			       struct ieee80211_channel *chan,
-			       enum nl80211_channel_type *channel_type,
 			       unsigned int duration);
 
 int mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 9171aae..9189a32 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -153,7 +153,7 @@
 
 	if (((bss_desc->bcn_wpa_ie) &&
 	     ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id ==
-	      WLAN_EID_WPA))) {
+	      WLAN_EID_VENDOR_SPECIFIC))) {
 		iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
 		oui = &mwifiex_wpa_oui[cipher][0];
 		ret = mwifiex_search_oui_in_ie(iebody, oui);
@@ -202,7 +202,7 @@
 	if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
 	    !priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) ||
 		((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id !=
-		 WLAN_EID_WPA)) &&
+		 WLAN_EID_VENDOR_SPECIFIC)) &&
 	    ((!bss_desc->bcn_rsn_ie) ||
 		((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id !=
 		 WLAN_EID_RSN)) &&
@@ -237,7 +237,8 @@
 {
 	if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled &&
 	    !priv->sec_info.wpa2_enabled && ((bss_desc->bcn_wpa_ie) &&
-	    ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id == WLAN_EID_WPA))
+	    ((*(bss_desc->bcn_wpa_ie)).
+	     vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC))
 	   /*
 	    * Privacy bit may NOT be set in some APs like
 	    * LinkSys WRT54G && bss_desc->privacy
@@ -309,7 +310,8 @@
 	if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
 	    !priv->sec_info.wpa2_enabled &&
 	    ((!bss_desc->bcn_wpa_ie) ||
-	     ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != WLAN_EID_WPA)) &&
+	     ((*(bss_desc->bcn_wpa_ie)).
+	      vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
 	    ((!bss_desc->bcn_rsn_ie) ||
 	     ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
 	    !priv->sec_info.encryption_mode && bss_desc->privacy) {
@@ -329,7 +331,8 @@
 	if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
 	    !priv->sec_info.wpa2_enabled &&
 	    ((!bss_desc->bcn_wpa_ie) ||
-	     ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != WLAN_EID_WPA)) &&
+	     ((*(bss_desc->bcn_wpa_ie)).
+	      vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
 	    ((!bss_desc->bcn_rsn_ie) ||
 	     ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
 	    priv->sec_info.encryption_mode && bss_desc->privacy) {
@@ -938,6 +941,11 @@
 				 chan_idx)->chan_scan_mode_bitmap
 					&= ~MWIFIEX_PASSIVE_SCAN;
 
+			if (*filtered_scan)
+				(scan_chan_list +
+				 chan_idx)->chan_scan_mode_bitmap
+					|= MWIFIEX_DISABLE_CHAN_FILT;
+
 			if (user_scan_in->chan_list[chan_idx].scan_time) {
 				scan_dur = (u16) user_scan_in->
 					chan_list[chan_idx].scan_time;
@@ -1759,26 +1767,39 @@
 		}
 		if (priv->report_scan_result)
 			priv->report_scan_result = false;
-		if (priv->scan_pending_on_block) {
-			priv->scan_pending_on_block = false;
-			up(&priv->async_sem);
-		}
 
 		if (priv->user_scan_cfg) {
-			dev_dbg(priv->adapter->dev,
-				"info: %s: sending scan results\n", __func__);
-			cfg80211_scan_done(priv->scan_request, 0);
-			priv->scan_request = NULL;
+			if (priv->scan_request) {
+				dev_dbg(priv->adapter->dev,
+					"info: notifying scan done\n");
+				cfg80211_scan_done(priv->scan_request, 0);
+				priv->scan_request = NULL;
+			} else {
+				dev_dbg(priv->adapter->dev,
+					"info: scan already aborted\n");
+			}
+
 			kfree(priv->user_scan_cfg);
 			priv->user_scan_cfg = NULL;
 		}
 	} else {
-		if (!mwifiex_wmm_lists_empty(adapter)) {
+		if (priv->user_scan_cfg && !priv->scan_request) {
+			spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+					       flags);
+			adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
+			mod_timer(&priv->scan_delay_timer, jiffies);
+			dev_dbg(priv->adapter->dev,
+				"info: %s: triggerring scan abort\n", __func__);
+		} else if (!mwifiex_wmm_lists_empty(adapter) &&
+			   (priv->scan_request && (priv->scan_request->flags &
+					    NL80211_SCAN_FLAG_LOW_PRIORITY))) {
 			spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
 					       flags);
 			adapter->scan_delay_cnt = 1;
 			mod_timer(&priv->scan_delay_timer, jiffies +
 				  msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+			dev_dbg(priv->adapter->dev,
+				"info: %s: deferring scan\n", __func__);
 		} else {
 			/* Get scan command from scan_pending_q and put to
 			   cmd_pending_q */
@@ -1891,7 +1912,6 @@
 			__func__);
 		return -1;
 	}
-	priv->scan_pending_on_block = true;
 
 	priv->adapter->scan_wait_q_woken = false;
 
@@ -1905,10 +1925,7 @@
 	if (!ret)
 		ret = mwifiex_wait_queue_complete(priv->adapter);
 
-	if (ret == -1) {
-		priv->scan_pending_on_block = false;
-		up(&priv->async_sem);
-	}
+	up(&priv->async_sem);
 
 	return ret;
 }
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 82cf0fa..5a1c1d0 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -906,8 +906,8 @@
 /*
  * SDIO interrupt handler.
  *
- * This function reads the interrupt status from firmware and assigns
- * the main process in workqueue which will handle the interrupt.
+ * This function reads the interrupt status from firmware and handles
+ * the interrupt in current thread (ksdioirqd) right away.
  */
 static void
 mwifiex_sdio_interrupt(struct sdio_func *func)
@@ -930,7 +930,7 @@
 		adapter->ps_state = PS_STATE_AWAKE;
 
 	mwifiex_interrupt_status(adapter);
-	queue_work(adapter->workqueue, &adapter->main_work);
+	mwifiex_main_process(adapter);
 }
 
 /*
@@ -1749,6 +1749,37 @@
 		port, card->mp_data_port_mask);
 }
 
+static struct mmc_host *reset_host;
+static void sdio_card_reset_worker(struct work_struct *work)
+{
+	/* The actual reset operation must be run outside of driver thread.
+	 * This is because mmc_remove_host() will cause the device to be
+	 * instantly destroyed, and the driver then needs to end its thread,
+	 * leading to a deadlock.
+	 *
+	 * We run it in a totally independent workqueue.
+	 */
+
+	pr_err("Resetting card...\n");
+	mmc_remove_host(reset_host);
+	/* 20ms delay is based on experiment with sdhci controller */
+	mdelay(20);
+	mmc_add_host(reset_host);
+}
+static DECLARE_WORK(card_reset_work, sdio_card_reset_worker);
+
+/* This function resets the card */
+static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
+{
+	struct sdio_mmc_card *card = adapter->card;
+
+	if (work_pending(&card_reset_work))
+		return;
+
+	reset_host = card->func->card->host;
+	schedule_work(&card_reset_work);
+}
+
 static struct mwifiex_if_ops sdio_ops = {
 	.init_if = mwifiex_init_sdio,
 	.cleanup_if = mwifiex_cleanup_sdio,
@@ -1767,6 +1798,7 @@
 	.cleanup_mpa_buf = mwifiex_cleanup_mpa_buf,
 	.cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
 	.event_complete = mwifiex_sdio_event_complete,
+	.card_reset = mwifiex_sdio_card_reset,
 };
 
 /*
@@ -1804,6 +1836,7 @@
 	/* Set the flag as user is removing this module. */
 	user_rmmod = 1;
 
+	cancel_work_sync(&card_reset_work);
 	sdio_unregister_driver(&mwifiex_sdio);
 }
 
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 2103373..8cc5468 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -25,6 +25,7 @@
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
 
 #include "main.h"
 
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 09e6a26..65c12eb 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -85,10 +85,6 @@
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 		if (priv->report_scan_result)
 			priv->report_scan_result = false;
-		if (priv->scan_pending_on_block) {
-			priv->scan_pending_on_block = false;
-			up(&priv->async_sem);
-		}
 		break;
 
 	case HostCmd_CMD_MAC_CONTROL:
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 8132119..41aafc7 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -124,8 +124,7 @@
 	}
 	memset(priv->cfg_bssid, 0, ETH_ALEN);
 
-	if (!netif_queue_stopped(priv->netdev))
-		mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+	mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 	if (netif_carrier_ok(priv->netdev))
 		netif_carrier_off(priv->netdev);
 }
@@ -197,8 +196,7 @@
 		dev_dbg(adapter->dev, "event: LINK_SENSED\n");
 		if (!netif_carrier_ok(priv->netdev))
 			netif_carrier_on(priv->netdev);
-		if (netif_queue_stopped(priv->netdev))
-			mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
+		mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
 		break;
 
 	case EVENT_DEAUTHENTICATED:
@@ -306,8 +304,7 @@
 		dev_dbg(adapter->dev, "event: ADHOC_BCN_LOST\n");
 		priv->adhoc_is_link_sensed = false;
 		mwifiex_clean_txrx(priv);
-		if (!netif_queue_stopped(priv->netdev))
-			mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+		mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 		if (netif_carrier_ok(priv->netdev))
 			netif_carrier_off(priv->netdev);
 		break;
@@ -424,7 +421,6 @@
 		cfg80211_remain_on_channel_expired(priv->wdev,
 						   priv->roc_cfg.cookie,
 						   &priv->roc_cfg.chan,
-						   priv->roc_cfg.chan_type,
 						   GFP_ATOMIC);
 
 		memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg));
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 0c9f70b..237c8d2b 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -276,8 +276,7 @@
 		dev_dbg(adapter->dev, "info: SSID found in scan list ... "
 				      "associating...\n");
 
-		if (!netif_queue_stopped(priv->netdev))
-			mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+		mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 		if (netif_carrier_ok(priv->netdev))
 			netif_carrier_off(priv->netdev);
 
@@ -318,8 +317,7 @@
 
 		ret = mwifiex_check_network_compatibility(priv, bss_desc);
 
-		if (!netif_queue_stopped(priv->netdev))
-			mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+		mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 		if (netif_carrier_ok(priv->netdev))
 			netif_carrier_off(priv->netdev);
 
@@ -713,7 +711,7 @@
 		dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n",
 			priv->wpa_ie_len, priv->wpa_ie[0]);
 
-		if (priv->wpa_ie[0] == WLAN_EID_WPA) {
+		if (priv->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) {
 			priv->sec_info.wpa_enabled = true;
 		} else if (priv->wpa_ie[0] == WLAN_EID_RSN) {
 			priv->sec_info.wpa2_enabled = true;
@@ -1046,7 +1044,6 @@
 int
 mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
 			   struct ieee80211_channel *chan,
-			   enum nl80211_channel_type *ct,
 			   unsigned int duration)
 {
 	struct host_cmd_ds_remain_on_chan roc_cfg;
@@ -1056,7 +1053,7 @@
 	roc_cfg.action = cpu_to_le16(action);
 	if (action == HostCmd_ACT_GEN_SET) {
 		roc_cfg.band_cfg = chan->band;
-		sc = mwifiex_chan_type_to_sec_chan_offset(*ct);
+		sc = mwifiex_chan_type_to_sec_chan_offset(NL80211_CHAN_NO_HT);
 		roc_cfg.band_cfg |= (sc << 2);
 
 		roc_cfg.channel =
@@ -1253,7 +1250,7 @@
 	}
 	pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
 	/* Test to see if it is a WPA IE, if not, then it is a gen IE */
-	if (((pvendor_ie->element_id == WLAN_EID_WPA) &&
+	if (((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) &&
 	     (!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui)))) ||
 	    (pvendor_ie->element_id == WLAN_EID_RSN)) {
 
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 07d32b7..b5c1095 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -38,14 +38,10 @@
  *
  * The completion callback is called after processing in complete.
  */
-int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
+int mwifiex_process_rx_packet(struct mwifiex_private *priv,
 			      struct sk_buff *skb)
 {
 	int ret;
-	struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
-	struct mwifiex_private *priv =
-			mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
-					       rx_info->bss_type);
 	struct rx_packet_hdr *rx_pkt_hdr;
 	struct rxpd *local_rx_pd;
 	int hdr_chop;
@@ -98,9 +94,9 @@
 
 	priv->rxpd_htinfo = local_rx_pd->ht_info;
 
-	ret = mwifiex_recv_packet(adapter, skb);
+	ret = mwifiex_recv_packet(priv, skb);
 	if (ret == -1)
-		dev_err(adapter->dev, "recv packet failed\n");
+		dev_err(priv->adapter->dev, "recv packet failed\n");
 
 	return ret;
 }
@@ -117,21 +113,15 @@
  *
  * The completion callback is called after processing in complete.
  */
-int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
+int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
 				  struct sk_buff *skb)
 {
+	struct mwifiex_adapter *adapter = priv->adapter;
 	int ret = 0;
 	struct rxpd *local_rx_pd;
-	struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
 	struct rx_packet_hdr *rx_pkt_hdr;
 	u8 ta[ETH_ALEN];
 	u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
-	struct mwifiex_private *priv =
-			mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
-					       rx_info->bss_type);
-
-	if (!priv)
-		return -1;
 
 	local_rx_pd = (struct rxpd *) (skb->data);
 	rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
@@ -169,13 +159,13 @@
 
 		while (!skb_queue_empty(&list)) {
 			rx_skb = __skb_dequeue(&list);
-			ret = mwifiex_recv_packet(adapter, rx_skb);
+			ret = mwifiex_recv_packet(priv, rx_skb);
 			if (ret == -1)
 				dev_err(adapter->dev, "Rx of A-MSDU failed");
 		}
 		return 0;
 	} else if (rx_pkt_type == PKT_TYPE_MGMT) {
-		ret = mwifiex_process_mgmt_packet(adapter, skb);
+		ret = mwifiex_process_mgmt_packet(priv, skb);
 		if (ret)
 			dev_err(adapter->dev, "Rx of mgmt packet failed");
 		dev_kfree_skb_any(skb);
@@ -188,7 +178,7 @@
 	 */
 	if (!IS_11N_ENABLED(priv) ||
 	    memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) {
-		mwifiex_process_rx_packet(adapter, skb);
+		mwifiex_process_rx_packet(priv, skb);
 		return ret;
 	}
 
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 2af2639..8c80024 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -48,13 +48,19 @@
 	if (!priv)
 		priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
 
+	if (!priv) {
+		dev_err(adapter->dev, "data: priv not found. Drop RX packet\n");
+		dev_kfree_skb_any(skb);
+		return -1;
+	}
+
 	rx_info->bss_num = priv->bss_num;
 	rx_info->bss_type = priv->bss_type;
 
 	if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
-		return mwifiex_process_uap_rx_packet(adapter, skb);
+		return mwifiex_process_uap_rx_packet(priv, skb);
 
-	return mwifiex_process_sta_rx_packet(adapter, skb);
+	return mwifiex_process_sta_rx_packet(priv, skb);
 }
 EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
 
@@ -115,13 +121,13 @@
 		dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
 			ret);
 		adapter->dbg.num_tx_host_to_card_failure++;
-		mwifiex_write_data_complete(adapter, skb, ret);
+		mwifiex_write_data_complete(adapter, skb, 0, ret);
 		break;
 	case -EINPROGRESS:
 		adapter->data_sent = false;
 		break;
 	case 0:
-		mwifiex_write_data_complete(adapter, skb, ret);
+		mwifiex_write_data_complete(adapter, skb, 0, ret);
 		break;
 	default:
 		break;
@@ -138,11 +144,12 @@
  * wakes up stalled traffic queue if required, and then frees the buffer.
  */
 int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
-				struct sk_buff *skb, int status)
+				struct sk_buff *skb, int aggr, int status)
 {
-	struct mwifiex_private *priv, *tpriv;
+	struct mwifiex_private *priv;
 	struct mwifiex_txinfo *tx_info;
-	int i;
+	struct netdev_queue *txq;
+	int index;
 
 	if (!skb)
 		return 0;
@@ -166,15 +173,20 @@
 
 	if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
 		atomic_dec_return(&adapter->pending_bridged_pkts);
-	if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING)
+
+	if (aggr)
+		/* For skb_aggr, do not wake up tx queue */
 		goto done;
 
-	for (i = 0; i < adapter->priv_num; i++) {
-		tpriv = adapter->priv[i];
+	atomic_dec(&adapter->tx_pending);
 
-		if (tpriv->media_connected &&
-		    netif_queue_stopped(tpriv->netdev))
-			mwifiex_wake_up_net_dev_queue(tpriv->netdev, adapter);
+	index = mwifiex_1d_to_wmm_queue[skb->priority];
+	if (atomic_dec_return(&priv->wmm_tx_pending[index]) < LOW_TX_PENDING) {
+		txq = netdev_get_tx_queue(priv->netdev, index);
+		if (netif_tx_queue_stopped(txq)) {
+			netif_tx_wake_queue(txq);
+			dev_dbg(adapter->dev, "wake queue: %d\n", index);
+		}
 	}
 done:
 	dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index d95a2d5..8dd7224 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -188,10 +188,19 @@
 	int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
 	const u8 *var_pos = params->beacon.head + var_offset;
 	int len = params->beacon.head_len - var_offset;
+	u8 rate_len = 0;
 
 	rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
-	if (rate_ie)
+	if (rate_ie) {
 		memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
+		rate_len = rate_ie->len;
+	}
+
+	rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
+					   params->beacon.tail,
+					   params->beacon.tail_len);
+	if (rate_ie)
+		memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
 
 	return;
 }
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index a33fa39..21c640d 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -235,11 +235,18 @@
 		break;
 	case EVENT_UAP_BSS_IDLE:
 		priv->media_connected = false;
+		if (netif_carrier_ok(priv->netdev))
+			netif_carrier_off(priv->netdev);
+		mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+
 		mwifiex_clean_txrx(priv);
 		mwifiex_del_all_sta_list(priv);
 		break;
 	case EVENT_UAP_BSS_ACTIVE:
 		priv->media_connected = true;
+		if (!netif_carrier_ok(priv->netdev))
+			netif_carrier_on(priv->netdev);
+		mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
 		break;
 	case EVENT_UAP_BSS_START:
 		dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 0966ac2..a018e42 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -146,7 +146,7 @@
 	}
 
 	/* Forward unicat/Inter-BSS packets to kernel. */
-	return mwifiex_process_rx_packet(adapter, skb);
+	return mwifiex_process_rx_packet(priv, skb);
 }
 
 /*
@@ -159,24 +159,17 @@
  *
  * The completion callback is called after processing is complete.
  */
-int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
 				  struct sk_buff *skb)
 {
+	struct mwifiex_adapter *adapter = priv->adapter;
 	int ret;
 	struct uap_rxpd *uap_rx_pd;
-	struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
 	struct rx_packet_hdr *rx_pkt_hdr;
 	u16 rx_pkt_type;
 	u8 ta[ETH_ALEN], pkt_type;
 	struct mwifiex_sta_node *node;
 
-	struct mwifiex_private *priv =
-			mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
-					       rx_info->bss_type);
-
-	if (!priv)
-		return -1;
-
 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
 	rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
@@ -210,7 +203,7 @@
 
 		while (!skb_queue_empty(&list)) {
 			rx_skb = __skb_dequeue(&list);
-			ret = mwifiex_recv_packet(adapter, rx_skb);
+			ret = mwifiex_recv_packet(priv, rx_skb);
 			if (ret)
 				dev_err(adapter->dev,
 					"AP:Rx A-MSDU failed");
@@ -218,7 +211,7 @@
 
 		return 0;
 	} else if (rx_pkt_type == PKT_TYPE_MGMT) {
-		ret = mwifiex_process_mgmt_packet(adapter, skb);
+		ret = mwifiex_process_mgmt_packet(priv, skb);
 		if (ret)
 			dev_err(adapter->dev, "Rx of mgmt packet failed");
 		dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 22a59165..bbe1f35 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -238,7 +238,7 @@
 	} else {
 		dev_dbg(adapter->dev, "%s: DATA\n", __func__);
 		atomic_dec(&card->tx_data_urb_pending);
-		mwifiex_write_data_complete(adapter, context->skb,
+		mwifiex_write_data_complete(adapter, context->skb, 0,
 					    urb->status ? -1 : 0);
 	}
 
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index ae88f80..0982375 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -146,20 +146,16 @@
  * to the kernel.
  */
 int
-mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
+mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
 			    struct sk_buff *skb)
 {
 	struct rxpd *rx_pd;
-	struct mwifiex_private *priv;
 	u16 pkt_len;
 
 	if (!skb)
 		return -1;
 
 	rx_pd = (struct rxpd *)skb->data;
-	priv = mwifiex_get_priv_by_id(adapter, rx_pd->bss_num, rx_pd->bss_type);
-	if (!priv)
-		return -1;
 
 	skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
 	skb_pull(skb, sizeof(pkt_len));
@@ -190,20 +186,11 @@
  * the function creates a blank SKB, fills it with the data from the
  * received buffer and then sends this new SKB to the kernel.
  */
-int mwifiex_recv_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb)
+int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
 {
-	struct mwifiex_rxinfo *rx_info;
-	struct mwifiex_private *priv;
-
 	if (!skb)
 		return -1;
 
-	rx_info = MWIFIEX_SKB_RXCB(skb);
-	priv = mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
-				      rx_info->bss_type);
-	if (!priv)
-		return -1;
-
 	skb->dev = priv->netdev;
 	skb->protocol = eth_type_trans(skb, priv->netdev);
 	skb->ip_summed = CHECKSUM_NONE;
@@ -225,7 +212,7 @@
 	 * fragments. Currently we fail the Filesndl-ht.scr script
 	 * for UDP, hence this fix
 	 */
-	if ((adapter->iface_type == MWIFIEX_USB) &&
+	if ((priv->adapter->iface_type == MWIFIEX_USB) &&
 	    (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
 		skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
 
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 600d819..818f871 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -483,7 +483,7 @@
 	struct sk_buff *skb, *tmp;
 
 	skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
-		mwifiex_write_data_complete(adapter, skb, -1);
+		mwifiex_write_data_complete(adapter, skb, 0, -1);
 }
 
 /*
@@ -650,7 +650,7 @@
 
 	if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
 		dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
-		mwifiex_write_data_complete(adapter, skb, -1);
+		mwifiex_write_data_complete(adapter, skb, 0, -1);
 		return;
 	}
 
@@ -680,7 +680,7 @@
 
 	if (!ra_list) {
 		spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
-		mwifiex_write_data_complete(adapter, skb, -1);
+		mwifiex_write_data_complete(adapter, skb, 0, -1);
 		return;
 	}
 
@@ -1090,7 +1090,7 @@
 		if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
 			spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
 					       ra_list_flags);
-			mwifiex_write_data_complete(adapter, skb, -1);
+			mwifiex_write_data_complete(adapter, skb, 0, -1);
 			return;
 		}
 
@@ -1195,7 +1195,7 @@
 		if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
 			spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
 					       ra_list_flags);
-			mwifiex_write_data_complete(adapter, skb, -1);
+			mwifiex_write_data_complete(adapter, skb, 0, -1);
 			return;
 		}
 
@@ -1209,7 +1209,7 @@
 		adapter->data_sent = false;
 		dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
 		adapter->dbg.num_tx_host_to_card_failure++;
-		mwifiex_write_data_complete(adapter, skb, ret);
+		mwifiex_write_data_complete(adapter, skb, 0, ret);
 		break;
 	case -EINPROGRESS:
 		adapter->data_sent = false;
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index ec83995..b92f39d 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -31,6 +31,8 @@
 	MWIFIEX_ECW_MAX = (BIT(4) | BIT(5) | BIT(6) | BIT(7)),
 };
 
+static const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+
 /*
  * This function retrieves the TID of the given RA list.
  */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 5099e53..0cdae66 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1851,6 +1851,7 @@
 	bool start_ba_session = false;
 	bool mgmtframe = false;
 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+	bool eapol_frame = false;
 
 	wh = (struct ieee80211_hdr *)skb->data;
 	if (ieee80211_is_data_qos(wh->frame_control))
@@ -1858,6 +1859,9 @@
 	else
 		qos = 0;
 
+	if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+		eapol_frame = true;
+
 	if (ieee80211_is_mgmt(wh->frame_control))
 		mgmtframe = true;
 
@@ -1916,9 +1920,8 @@
 
 	txpriority = index;
 
-	if (priv->ap_fw && sta && sta->ht_cap.ht_supported
-			&& skb->protocol != cpu_to_be16(ETH_P_PAE)
-			&& ieee80211_is_data_qos(wh->frame_control)) {
+	if (priv->ap_fw && sta && sta->ht_cap.ht_supported && !eapol_frame &&
+	    ieee80211_is_data_qos(wh->frame_control)) {
 		tid = qos & 0xf;
 		mwl8k_tx_count_packet(sta, tid);
 		spin_lock(&priv->stream_lock);
@@ -2005,6 +2008,8 @@
 				spin_unlock(&priv->stream_lock);
 			}
 			spin_unlock_bh(&priv->tx_lock);
+			pci_unmap_single(priv->pdev, dma, skb->len,
+					 PCI_DMA_TODEVICE);
 			dev_kfree_skb(skb);
 			return;
 		}
@@ -2025,9 +2030,11 @@
 	else
 		tx->peer_id = 0;
 
-	if (priv->ap_fw)
+	if (priv->ap_fw && ieee80211_is_data(wh->frame_control) && !eapol_frame)
 		tx->timestamp = cpu_to_le32(ioread32(priv->regs +
 						MWL8K_HW_TIMER_REGISTER));
+	else
+		tx->timestamp = 0;
 
 	wmb();
 	tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
@@ -3679,7 +3686,8 @@
 } __packed;
 
 static int
-mwl8k_check_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream)
+mwl8k_check_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream,
+	       struct ieee80211_vif *vif)
 {
 	struct mwl8k_cmd_bastream *cmd;
 	int rc;
@@ -3702,7 +3710,7 @@
 		cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE) |
 		cpu_to_le32(BASTREAM_FLAG_DIRECTION_UPSTREAM);
 
-	rc = mwl8k_post_cmd(hw, &cmd->header);
+	rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
 
 	kfree(cmd);
 
@@ -3711,7 +3719,7 @@
 
 static int
 mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream,
-		u8 buf_size)
+		u8 buf_size, struct ieee80211_vif *vif)
 {
 	struct mwl8k_cmd_bastream *cmd;
 	int rc;
@@ -3745,7 +3753,7 @@
 		cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE |
 					BASTREAM_FLAG_DIRECTION_UPSTREAM);
 
-	rc = mwl8k_post_cmd(hw, &cmd->header);
+	rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
 
 	wiphy_debug(hw->wiphy, "Created a BA stream for %pM : tid %d\n",
 		stream->sta->addr, stream->tid);
@@ -5085,6 +5093,7 @@
 	struct mwl8k_priv *priv = hw->priv;
 	struct mwl8k_ampdu_stream *stream;
 	u8 *addr = sta->addr;
+	struct mwl8k_sta *sta_info = MWL8K_STA(sta);
 
 	if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
 		return -ENOTSUPP;
@@ -5127,7 +5136,16 @@
 		/* Release the lock before we do the time consuming stuff */
 		spin_unlock(&priv->stream_lock);
 		for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) {
-			rc = mwl8k_check_ba(hw, stream);
+
+			/* Check if link is still valid */
+			if (!sta_info->is_ampdu_allowed) {
+				spin_lock(&priv->stream_lock);
+				mwl8k_remove_stream(hw, stream);
+				spin_unlock(&priv->stream_lock);
+				return -EBUSY;
+			}
+
+			rc = mwl8k_check_ba(hw, stream, vif);
 
 			/* If HW restart is in progress mwl8k_post_cmd will
 			 * return -EBUSY. Avoid retrying mwl8k_check_ba in
@@ -5167,7 +5185,7 @@
 		BUG_ON(stream == NULL);
 		BUG_ON(stream->state != AMPDU_STREAM_IN_PROGRESS);
 		spin_unlock(&priv->stream_lock);
-		rc = mwl8k_create_ba(hw, stream, buf_size);
+		rc = mwl8k_create_ba(hw, stream, buf_size, vif);
 		spin_lock(&priv->stream_lock);
 		if (!rc)
 			stream->state = AMPDU_STREAM_ACTIVE;
@@ -5617,6 +5635,18 @@
 	return rc;
 }
 
+static const struct ieee80211_iface_limit ap_if_limits[] = {
+	{ .max = 8,	.types = BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination ap_if_comb = {
+	.limits = ap_if_limits,
+	.n_limits = ARRAY_SIZE(ap_if_limits),
+	.max_interfaces = 8,
+	.num_different_channels = 1,
+};
+
+
 static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
 {
 	struct ieee80211_hw *hw = priv->hw;
@@ -5696,8 +5726,13 @@
 		goto err_free_cookie;
 
 	hw->wiphy->interface_modes = 0;
-	if (priv->ap_macids_supported || priv->device_info->fw_image_ap)
+
+	if (priv->ap_macids_supported || priv->device_info->fw_image_ap) {
 		hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
+		hw->wiphy->iface_combinations = &ap_if_comb;
+		hw->wiphy->n_iface_combinations = 1;
+	}
+
 	if (priv->sta_macids_supported || priv->device_info->fw_image_sta)
 		hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
 
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index 7b751fb..d01edd2 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -161,24 +161,23 @@
 }
 
 static int orinoco_set_monitor_channel(struct wiphy *wiphy,
-				       struct ieee80211_channel *chan,
-				       enum nl80211_channel_type channel_type)
+				       struct cfg80211_chan_def *chandef)
 {
 	struct orinoco_private *priv = wiphy_priv(wiphy);
 	int err = 0;
 	unsigned long flags;
 	int channel;
 
-	if (!chan)
+	if (!chandef->chan)
 		return -EINVAL;
 
-	if (channel_type != NL80211_CHAN_NO_HT)
+	if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT)
 		return -EINVAL;
 
-	if (chan->band != IEEE80211_BAND_2GHZ)
+	if (chandef->chan->band != IEEE80211_BAND_2GHZ)
 		return -EINVAL;
 
-	channel = ieee80211_freq_to_dsss_chan(chan->center_freq);
+	channel = ieee80211_freq_to_dsss_chan(chandef->chan->center_freq);
 
 	if ((channel < 1) || (channel > NUM_CHANNELS) ||
 	     !(priv->channel_mask & (1 << (channel - 1))))
diff --git a/drivers/net/wireless/orinoco/main.h b/drivers/net/wireless/orinoco/main.h
index 4dadf98..5a8fec2 100644
--- a/drivers/net/wireless/orinoco/main.h
+++ b/drivers/net/wireless/orinoco/main.h
@@ -39,7 +39,7 @@
 {
 	u8 *p = data;
 	while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) {
-		if ((p[0] == WLAN_EID_GENERIC) &&
+		if ((p[0] == WLAN_EID_VENDOR_SPECIFIC) &&
 		    (memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0))
 			return p;
 		p += p[1] + 2;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 7f53cea2..01624dc 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -865,7 +865,7 @@
 static int ezusb_access_ltv(struct ezusb_priv *upriv,
 			    struct request_context *ctx,
 			    u16 length, const void *data, u16 frame_type,
-			    void *ans_buff, int ans_size, u16 *ans_length)
+			    void *ans_buff, unsigned ans_size, u16 *ans_length)
 {
 	int req_size;
 	int retval = 0;
@@ -933,7 +933,7 @@
 	}
 	if (ctx->in_rid) {
 		struct ezusb_packet *ans = ctx->buf;
-		int exp_len;
+		unsigned exp_len;
 
 		if (ans->hermes_len != 0)
 			exp_len = le16_to_cpu(ans->hermes_len) * 2 + 12;
@@ -949,8 +949,7 @@
 		}
 
 		if (ans_buff)
-			memcpy(ans_buff, ans->data,
-			       min_t(int, exp_len, ans_size));
+			memcpy(ans_buff, ans->data, min(exp_len, ans_size));
 		if (ans_length)
 			*ans_length = le16_to_cpu(ans->hermes_len);
 	}
@@ -995,7 +994,7 @@
 	struct ezusb_priv *upriv = hw->priv;
 	struct request_context *ctx;
 
-	if ((bufsize < 0) || (bufsize % 2))
+	if (bufsize % 2)
 		return -EINVAL;
 
 	ctx = ezusb_alloc_ctx(upriv, rid, rid);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 5861e13..12f0a34 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -369,7 +369,11 @@
 	rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
 	priv->tsf_low32 = tsf32;
 
-	rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+	/* LMAC API Page 10/29 - s_lm_data_in - clock
+	 * "usec accurate timestamp of hardware clock
+	 * at end of frame (before OFDM SIFS EOF padding"
+	 */
+	rx_status->flag |= RX_FLAG_MACTIME_END;
 
 	if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
 		header_len += hdr->align[0];
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index bd1f0cb..abe1d03 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -490,9 +490,12 @@
 static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed);
 
 static int rndis_set_tx_power(struct wiphy *wiphy,
+			      struct wireless_dev *wdev,
 			      enum nl80211_tx_power_setting type,
 			      int mbm);
-static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm);
+static int rndis_get_tx_power(struct wiphy *wiphy,
+			      struct wireless_dev *wdev,
+			      int *dbm);
 
 static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
 				struct cfg80211_connect_params *sme);
@@ -1903,6 +1906,7 @@
 }
 
 static int rndis_set_tx_power(struct wiphy *wiphy,
+			      struct wireless_dev *wdev,
 			      enum nl80211_tx_power_setting type,
 			      int mbm)
 {
@@ -1930,7 +1934,9 @@
 	return -ENOTSUPP;
 }
 
-static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm)
+static int rndis_get_tx_power(struct wiphy *wiphy,
+			      struct wireless_dev *wdev,
+			      int *dbm)
 {
 	struct rndis_wlan_private *priv = wiphy_priv(wiphy);
 	struct usbnet *usbdev = priv->usbdev;
@@ -2287,7 +2293,7 @@
 {
 	struct rndis_wlan_private *priv = wiphy_priv(wiphy);
 	struct usbnet *usbdev = priv->usbdev;
-	struct ieee80211_channel *channel = params->channel;
+	struct ieee80211_channel *channel = params->chandef.chan;
 	struct ndis_80211_ssid ssid;
 	enum nl80211_auth_type auth_type;
 	int ret, alg, length, chan = -1;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 59474ae..c0441a7 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -2520,20 +2520,37 @@
 	return comp_value;
 }
 
+static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
+					int power_level, int max_power)
+{
+	int delta;
+
+	if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags))
+		return 0;
+
+	/*
+	 * XXX: We don't know the maximum transmit power of our hardware since
+	 * the EEPROM doesn't expose it. We only know that we are calibrated
+	 * to 100% tx power.
+	 *
+	 * Hence, we assume the regulatory limit that cfg80211 calulated for
+	 * the current channel is our maximum and if we are requested to lower
+	 * the value we just reduce our tx power accordingly.
+	 */
+	delta = power_level - max_power;
+	return min(delta, 0);
+}
+
 static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
 				   enum ieee80211_band band, int power_level,
 				   u8 txpower, int delta)
 {
-	u32 reg;
 	u16 eeprom;
 	u8 criterion;
 	u8 eirp_txpower;
 	u8 eirp_txpower_criterion;
 	u8 reg_limit;
 
-	if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b))
-		return txpower;
-
 	if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
 		/*
 		 * Check if eirp txpower exceed txpower_limit.
@@ -2542,11 +2559,13 @@
 		 * .11b data rate need add additional 4dbm
 		 * when calculating eirp txpower.
 		 */
-		rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
-		criterion = rt2x00_get_field32(reg, TX_PWR_CFG_0_6MBS);
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + 1,
+				   &eeprom);
+		criterion = rt2x00_get_field16(eeprom,
+					       EEPROM_TXPOWER_BYRATE_RATE0);
 
-		rt2x00_eeprom_read(rt2x00dev,
-				   EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
+				   &eeprom);
 
 		if (band == IEEE80211_BAND_2GHZ)
 			eirp_txpower_criterion = rt2x00_get_field16(eeprom,
@@ -2563,36 +2582,71 @@
 	} else
 		reg_limit = 0;
 
-	return txpower + delta - reg_limit;
+	txpower = max(0, txpower + delta - reg_limit);
+	return min_t(u8, txpower, 0xc);
 }
 
+/*
+ * We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and
+ * BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values,
+ * 4 bits for each rate (tune from 0 to 15 dBm). BBP_R1 controls transmit power
+ * for all rates, but allow to set only 4 discrete values: -12, -6, 0 and 6 dBm.
+ * Reference per rate transmit power values are located in the EEPROM at
+ * EEPROM_TXPOWER_BYRATE offset. We adjust them and BBP R1 settings according to
+ * current conditions (i.e. band, bandwidth, temperature, user settings).
+ */
 static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
-				  enum ieee80211_band band,
+				  struct ieee80211_channel *chan,
 				  int power_level)
 {
-	u8 txpower;
+	u8 txpower, r1;
 	u16 eeprom;
-	int i, is_rate_b;
-	u32 reg;
-	u8 r1;
-	u32 offset;
-	int delta;
+	u32 reg, offset;
+	int i, is_rate_b, delta, power_ctrl;
+	enum ieee80211_band band = chan->band;
 
 	/*
-	 * Calculate HT40 compensation delta
+	 * Calculate HT40 compensation. For 40MHz we need to add or subtract
+	 * value read from EEPROM (different for 2GHz and for 5GHz).
 	 */
 	delta = rt2800_get_txpower_bw_comp(rt2x00dev, band);
 
 	/*
-	 * calculate temperature compensation delta
+	 * Calculate temperature compensation. Depends on measurement of current
+	 * TSSI (Transmitter Signal Strength Indication) we know TX power (due
+	 * to temperature or maybe other factors) is smaller or bigger than
+	 * expected. We adjust it, based on TSSI reference and boundaries values
+	 * provided in EEPROM.
 	 */
 	delta += rt2800_get_gain_calibration_delta(rt2x00dev);
 
 	/*
-	 * set to normal bbp tx power control mode: +/- 0dBm
+	 * Decrease power according to user settings, on devices with unknown
+	 * maximum tx power. For other devices we take user power_level into
+	 * consideration on rt2800_compensate_txpower().
+	 */
+	delta += rt2800_get_txpower_reg_delta(rt2x00dev, power_level,
+					      chan->max_power);
+
+	/*
+	 * BBP_R1 controls TX power for all rates, it allow to set the following
+	 * gains -12, -6, 0, +6 dBm by setting values 2, 1, 0, 3 respectively.
+	 *
+	 * TODO: we do not use +6 dBm option to do not increase power beyond
+	 * regulatory limit, however this could be utilized for devices with
+	 * CAPABILITY_POWER_LIMIT.
 	 */
 	rt2800_bbp_read(rt2x00dev, 1, &r1);
-	rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, 0);
+	if (delta <= -12) {
+		power_ctrl = 2;
+		delta += 12;
+	} else if (delta <= -6) {
+		power_ctrl = 1;
+		delta += 6;
+	} else {
+		power_ctrl = 0;
+	}
+	rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
 	rt2800_bbp_write(rt2x00dev, 1, r1);
 	offset = TX_PWR_CFG_0;
 
@@ -2710,7 +2764,7 @@
 
 void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev)
 {
-	rt2800_config_txpower(rt2x00dev, rt2x00dev->curr_band,
+	rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.channel,
 			      rt2x00dev->tx_power);
 }
 EXPORT_SYMBOL_GPL(rt2800_gain_calibration);
@@ -2845,11 +2899,11 @@
 	if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
 		rt2800_config_channel(rt2x00dev, libconf->conf,
 				      &libconf->rf, &libconf->channel);
-		rt2800_config_txpower(rt2x00dev, libconf->conf->channel->band,
+		rt2800_config_txpower(rt2x00dev, libconf->conf->channel,
 				      libconf->conf->power_level);
 	}
 	if (flags & IEEE80211_CONF_CHANGE_POWER)
-		rt2800_config_txpower(rt2x00dev, libconf->conf->channel->band,
+		rt2800_config_txpower(rt2x00dev, libconf->conf->channel,
 				      libconf->conf->power_level);
 	if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
 		rt2800_config_retry_limit(rt2x00dev, libconf);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 3b8fb5a..0230812 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1096,6 +1096,7 @@
 	{ USB_DEVICE(0x177f, 0x0153) },
 	{ USB_DEVICE(0x177f, 0x0302) },
 	{ USB_DEVICE(0x177f, 0x0313) },
+	{ USB_DEVICE(0x177f, 0x0323) },
 	/* U-Media */
 	{ USB_DEVICE(0x157e, 0x300e) },
 	{ USB_DEVICE(0x157e, 0x3013) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 69097d1..67d1679 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -157,6 +157,7 @@
 	 * requested configurations.
 	 */
 	ieee80211_iterate_active_interfaces(rt2x00dev->hw,
+					    IEEE80211_IFACE_ITER_RESUME_ALL,
 					    rt2x00lib_intf_scheduled_iter,
 					    rt2x00dev);
 }
@@ -225,9 +226,9 @@
 		return;
 
 	/* send buffered bc/mc frames out for every bssid */
-	ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
-						   rt2x00lib_bc_buffer_iter,
-						   rt2x00dev);
+	ieee80211_iterate_active_interfaces_atomic(
+		rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		rt2x00lib_bc_buffer_iter, rt2x00dev);
 	/*
 	 * Devices with pre tbtt interrupt don't need to update the beacon
 	 * here as they will fetch the next beacon directly prior to
@@ -237,9 +238,9 @@
 		return;
 
 	/* fetch next beacon */
-	ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
-						   rt2x00lib_beaconupdate_iter,
-						   rt2x00dev);
+	ieee80211_iterate_active_interfaces_atomic(
+		rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		rt2x00lib_beaconupdate_iter, rt2x00dev);
 }
 EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
 
@@ -249,9 +250,9 @@
 		return;
 
 	/* fetch next beacon */
-	ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
-						   rt2x00lib_beaconupdate_iter,
-						   rt2x00dev);
+	ieee80211_iterate_active_interfaces_atomic(
+		rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		rt2x00lib_beaconupdate_iter, rt2x00dev);
 }
 EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 98a9e48..ed7a1bb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -424,9 +424,9 @@
 	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 		return 0;
 
-	ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
-						   rt2x00mac_set_tim_iter,
-						   rt2x00dev);
+	ieee80211_iterate_active_interfaces_atomic(
+		rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		rt2x00mac_set_tim_iter, rt2x00dev);
 
 	/* queue work to upodate the beacon template */
 	ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work);
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 021d83e..b4218a5 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -150,7 +150,7 @@
 			rx_status.freq = dev->conf.channel->center_freq;
 			rx_status.band = dev->conf.channel->band;
 			rx_status.mactime = le64_to_cpu(entry->tsft);
-			rx_status.flag |= RX_FLAG_MACTIME_MPDU;
+			rx_status.flag |= RX_FLAG_MACTIME_START;
 			if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
 				rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
 
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 7811b63..52e6beb 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -381,7 +381,7 @@
 	rx_status.rate_idx = rate;
 	rx_status.freq = dev->conf.channel->center_freq;
 	rx_status.band = dev->conf.channel->band;
-	rx_status.flag |= RX_FLAG_MACTIME_MPDU;
+	rx_status.flag |= RX_FLAG_MACTIME_START;
 	if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
 		rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
 	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 6b28e92..21b1bbb 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -32,6 +32,17 @@
 
 	If you choose to build it as a module, it will be called rtl8192de
 
+config RTL8723AE
+	tristate "Realtek RTL8723AE PCIe Wireless Network Adapter"
+	depends on MAC80211 && PCI && EXPERIMENTAL
+	select FW_LOADER
+	select RTLWIFI
+	---help---
+	This is the driver for Realtek RTL8723AE 802.11n PCIe
+	wireless network adapters.
+
+	If you choose to build it as a module, it will be called rtl8723ae
+
 config RTL8192CU
 	tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
 	depends on MAC80211 && USB
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index 97935c5..3b1cbac 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -7,7 +7,8 @@
 		efuse.o		\
 		ps.o		\
 		rc.o		\
-		regd.o
+		regd.o		\
+		stats.o
 
 rtl8192c_common-objs +=		\
 
@@ -24,5 +25,6 @@
 obj-$(CONFIG_RTL8192CU)		+= rtl8192cu/
 obj-$(CONFIG_RTL8192SE)		+= rtl8192se/
 obj-$(CONFIG_RTL8192DE)		+= rtl8192de/
+obj-$(CONFIG_RTL8723AE)		+= rtl8723ae/
 
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 59381fe..4494d13 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -826,6 +826,30 @@
 }
 EXPORT_SYMBOL(rtlwifi_rate_mapping);
 
+bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	__le16 fc = rtl_get_fc(skb);
+
+	if (rtlpriv->dm.supp_phymode_switch &&
+	    mac->link_state < MAC80211_LINKED &&
+	    (ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) {
+		if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+			rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+	}
+	if (ieee80211_is_auth(fc)) {
+		RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
+		rtl_ips_nic_on(hw);
+
+		mac->link_state = MAC80211_LINKING;
+		/* Dual mac */
+		rtlpriv->phy.need_iqk = true;
+	}
+
+	return true;
+}
+
 void rtl_get_tcb_desc(struct ieee80211_hw *hw,
 		      struct ieee80211_tx_info *info,
 		      struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index f35af0f..5a8c80e 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -142,4 +142,6 @@
 extern struct attribute_group rtl_attribute_group;
 int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
 			 bool isht, u8 desc_rate, bool first_ampdu);
+bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
+
 #endif
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 5b4b4d4..0e510f7 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -52,11 +52,8 @@
 	u32 target_content = 0;
 	u8 entry_i;
 
-	RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
-		 "key_cont_128:\n %x:%x:%x:%x:%x:%x\n",
-		 key_cont_128[0], key_cont_128[1],
-		 key_cont_128[2], key_cont_128[3],
-		 key_cont_128[4], key_cont_128[5]);
+	RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "key_cont_128: %6phC\n",
+		 key_cont_128);
 
 	for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
 		target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
@@ -340,7 +337,7 @@
 		if (((bitmap & BIT(0)) == BIT(0)) &&
 		    (memcmp(addr, sta_addr, ETH_ALEN) == 0)) {
 			/* Remove from HW Security CAM */
-			memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN);
+			eth_zero_addr(rtlpriv->sec.hwsec_cam_sta_addr[i]);
 			rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
 			RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
 				 "del CAM entry %d\n", i);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index a7c0e52..be33aa1 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -962,7 +962,6 @@
 	int err = 0;
 	u8 mac_addr[ETH_ALEN];
 	u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-	u8 zero_addr[ETH_ALEN] = { 0 };
 
 	if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
@@ -1057,7 +1056,7 @@
 			memcpy(rtlpriv->sec.key_buf[key_idx],
 			       key->key, key->keylen);
 			rtlpriv->sec.key_len[key_idx] = key->keylen;
-			memcpy(mac_addr, zero_addr, ETH_ALEN);
+			eth_zero_addr(mac_addr);
 		} else if (group_key) {	/* group key */
 			RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
 				 "set group key\n");
@@ -1108,7 +1107,7 @@
 		}
 		memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen);
 		rtlpriv->sec.key_len[key_idx] = 0;
-		memcpy(mac_addr, zero_addr, ETH_ALEN);
+		eth_zero_addr(mac_addr);
 		/*
 		 *mac80211 will delete entrys one by one,
 		 *so don't use rtl_cam_reset_all_entry
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/rtlwifi/debug.h
index 07493d2..fd3269f 100644
--- a/drivers/net/wireless/rtlwifi/debug.h
+++ b/drivers/net/wireless/rtlwifi/debug.h
@@ -106,6 +106,8 @@
 #define COMP_REGD			BIT(27)
 #define COMP_CHAN			BIT(28)
 #define COMP_USB			BIT(29)
+#define COMP_EASY_CONCURRENT	COMP_USB /* reuse of this bit is OK */
+#define COMP_BT_COEXIST			BIT(30)
 
 /*--------------------------------------------------------------
 		Define the rt_print components
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index abc306b..f38e30a 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1309,6 +1309,7 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_sta_info *sta_entry = NULL;
 	u8 tid = rtl_get_tid(skb);
+	__le16 fc = rtl_get_fc(skb);
 
 	if (!sta)
 		return false;
@@ -1316,6 +1317,12 @@
 
 	if (!rtlpriv->rtlhal.earlymode_enable)
 		return false;
+	if (ieee80211_is_nullfunc(fc))
+		return false;
+	if (ieee80211_is_qos_nullfunc(fc))
+		return false;
+	if (ieee80211_is_pspoll(fc))
+		return false;
 	if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
 		return false;
 	if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
@@ -1357,10 +1364,8 @@
 	u8 own;
 	u8 temp_one = 1;
 
-	if (ieee80211_is_auth(fc)) {
-		RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
-		rtl_ips_nic_on(hw);
-	}
+	if (ieee80211_is_mgmt(fc))
+		rtl_tx_mgmt_proc(hw, skb);
 
 	if (rtlpriv->psc.sw_ps_enabled) {
 		if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
@@ -1628,7 +1633,7 @@
 				 "8192 PCI-E is found - vid/did=%x/%x\n",
 				 venderid, deviceid);
 			rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
-			break;
+			return false;
 		case RTL_PCI_REVISION_ID_8192SE:
 			RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
 				 "8192SE is found - vid/did=%x/%x\n",
@@ -1643,6 +1648,11 @@
 			break;
 
 		}
+	} else if (deviceid == RTL_PCI_8723AE_DID) {
+		rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+			 "8723AE PCI-E is found - "
+			 "vid/did=%x/%x\n", venderid, deviceid);
 	} else if (deviceid == RTL_PCI_8192CET_DID ||
 		   deviceid == RTL_PCI_8192CE_DID ||
 		   deviceid == RTL_PCI_8191CE_DID ||
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index 241448f..f71b12a 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -79,6 +79,7 @@
 #define RTL_PCI_8173_DID	0x8173	/*8191 SE Crab */
 #define RTL_PCI_8172_DID	0x8172	/*8191 SE RE */
 #define RTL_PCI_8171_DID	0x8171	/*8191 SE Unicron */
+#define RTL_PCI_8723AE_DID	0x8723	/*8723AE */
 #define RTL_PCI_0045_DID	0x0045	/*8190 PCI for Ceraga */
 #define RTL_PCI_0046_DID	0x0046	/*8190 Cardbus for Ceraga */
 #define RTL_PCI_0044_DID	0x0044	/*8192e PCIE for Ceraga */
@@ -152,6 +153,7 @@
 
 struct rtl_pci {
 	struct pci_dev *pdev;
+	bool irq_enabled;
 
 	bool driver_is_goingto_unload;
 	bool up_first_time;
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index d5cbf01..c1e065f 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -55,7 +55,8 @@
 	 *      1M we will not use FW rate but user rate.
 	 */
 	if (rtlmac->opmode == NL80211_IFTYPE_AP ||
-		rtlmac->opmode == NL80211_IFTYPE_ADHOC) {
+	    rtlmac->opmode == NL80211_IFTYPE_ADHOC ||
+	    rtlmac->opmode == NL80211_IFTYPE_MESH_POINT) {
 		if (sta) {
 			sta_entry = (struct rtl_sta_info *) sta->drv_priv;
 			wireless_mode = sta_entry->wireless_mode;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 1ca4e25..1cdf5a2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -43,8 +43,8 @@
 #define GET_UNDECORATED_AVERAGE_RSSI(_priv)	\
 	((RTLPRIV(_priv))->mac80211.opmode == \
 			     NL80211_IFTYPE_ADHOC) ?	\
-	((RTLPRIV(_priv))->dm.entry_min_undecoratedsmoothed_pwdb) : \
-	((RTLPRIV(_priv))->dm.undecorated_smoothed_pwdb)
+	((RTLPRIV(_priv))->dm.entry_min_undec_sm_pwdb) : \
+	((RTLPRIV(_priv))->dm.undec_sm_pwdb)
 
 static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
 	0x7f8001fe,
@@ -167,18 +167,18 @@
 	dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
 	dm_digtable->cur_igvalue = 0x20;
 	dm_digtable->pre_igvalue = 0x0;
-	dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
-	dm_digtable->presta_connectstate = DIG_STA_DISCONNECT;
-	dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
+	dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
+	dm_digtable->presta_cstate = DIG_STA_DISCONNECT;
+	dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
 	dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
 	dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
 	dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
 	dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
 	dm_digtable->rx_gain_range_max = DM_DIG_MAX;
 	dm_digtable->rx_gain_range_min = DM_DIG_MIN;
-	dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
-	dm_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX;
-	dm_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+	dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+	dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
+	dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
 	dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
 	dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
 }
@@ -189,22 +189,21 @@
 	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 	long rssi_val_min = 0;
 
-	if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
-	    (dm_digtable->cursta_connectstate == DIG_STA_CONNECT)) {
-		if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
+	if ((dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) &&
+	    (dm_digtable->cursta_cstate == DIG_STA_CONNECT)) {
+		if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0)
 			rssi_val_min =
-			    (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
-			     rtlpriv->dm.undecorated_smoothed_pwdb) ?
-			    rtlpriv->dm.undecorated_smoothed_pwdb :
-			    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+			    (rtlpriv->dm.entry_min_undec_sm_pwdb >
+			     rtlpriv->dm.undec_sm_pwdb) ?
+			    rtlpriv->dm.undec_sm_pwdb :
+			    rtlpriv->dm.entry_min_undec_sm_pwdb;
 		else
-			rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
-	} else if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT ||
-		   dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT) {
-		rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
-	} else if (dm_digtable->curmultista_connectstate ==
-		   DIG_MULTISTA_CONNECT) {
-		rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+			rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
+	} else if (dm_digtable->cursta_cstate == DIG_STA_CONNECT ||
+		   dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT) {
+		rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
+	} else if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) {
+		rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb;
 	}
 
 	return (u8) rssi_val_min;
@@ -286,37 +285,33 @@
 static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+	struct dig_t *digtable = &rtlpriv->dm_digtable;
 
-	if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable->fa_highthresh) {
-		if ((dm_digtable->backoff_val - 2) <
-		    dm_digtable->backoff_val_range_min)
-			dm_digtable->backoff_val =
-			    dm_digtable->backoff_val_range_min;
+	if (rtlpriv->falsealm_cnt.cnt_all > digtable->fa_highthresh) {
+		if ((digtable->back_val - 2) < digtable->back_range_min)
+			digtable->back_val = digtable->back_range_min;
 		else
-			dm_digtable->backoff_val -= 2;
-	} else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable->fa_lowthresh) {
-		if ((dm_digtable->backoff_val + 2) >
-		    dm_digtable->backoff_val_range_max)
-			dm_digtable->backoff_val =
-			    dm_digtable->backoff_val_range_max;
+			digtable->back_val -= 2;
+	} else if (rtlpriv->falsealm_cnt.cnt_all < digtable->fa_lowthresh) {
+		if ((digtable->back_val + 2) > digtable->back_range_max)
+			digtable->back_val = digtable->back_range_max;
 		else
-			dm_digtable->backoff_val += 2;
+			digtable->back_val += 2;
 	}
 
-	if ((dm_digtable->rssi_val_min + 10 - dm_digtable->backoff_val) >
-	    dm_digtable->rx_gain_range_max)
-		dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_max;
-	else if ((dm_digtable->rssi_val_min + 10 -
-		  dm_digtable->backoff_val) < dm_digtable->rx_gain_range_min)
-		dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_min;
+	if ((digtable->rssi_val_min + 10 - digtable->back_val) >
+	    digtable->rx_gain_range_max)
+		digtable->cur_igvalue = digtable->rx_gain_range_max;
+	else if ((digtable->rssi_val_min + 10 -
+		  digtable->back_val) < digtable->rx_gain_range_min)
+		digtable->cur_igvalue = digtable->rx_gain_range_min;
 	else
-		dm_digtable->cur_igvalue = dm_digtable->rssi_val_min + 10 -
-		    dm_digtable->backoff_val;
+		digtable->cur_igvalue = digtable->rssi_val_min + 10 -
+		    digtable->back_val;
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-		 "rssi_val_min = %x backoff_val %x\n",
-		 dm_digtable->rssi_val_min, dm_digtable->backoff_val);
+		 "rssi_val_min = %x back_val %x\n",
+		 digtable->rssi_val_min, digtable->back_val);
 
 	rtl92c_dm_write_dig(hw);
 }
@@ -327,14 +322,14 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+	long rssi_strength = rtlpriv->dm.entry_min_undec_sm_pwdb;
 	bool multi_sta = false;
 
 	if (mac->opmode == NL80211_IFTYPE_ADHOC)
 		multi_sta = true;
 
 	if (!multi_sta ||
-	    dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
+	    dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
 		initialized = false;
 		dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
 		return;
@@ -345,7 +340,7 @@
 		rtl92c_dm_write_dig(hw);
 	}
 
-	if (dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) {
+	if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) {
 		if ((rssi_strength < dm_digtable->rssi_lowthresh) &&
 		    (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
 
@@ -367,8 +362,8 @@
 	}
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-		 "curmultista_connectstate = %x dig_ext_port_stage %x\n",
-		 dm_digtable->curmultista_connectstate,
+		 "curmultista_cstate = %x dig_ext_port_stage %x\n",
+		 dm_digtable->curmultista_cstate,
 		 dm_digtable->dig_ext_port_stage);
 }
 
@@ -378,15 +373,14 @@
 	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-		 "presta_connectstate = %x, cursta_connectstate = %x\n",
-		 dm_digtable->presta_connectstate,
-		 dm_digtable->cursta_connectstate);
+		 "presta_cstate = %x, cursta_cstate = %x\n",
+		 dm_digtable->presta_cstate, dm_digtable->cursta_cstate);
 
-	if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectstate
-	    || dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT
-	    || dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
+	if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
+	    dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
+	    dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
 
-		if (dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
+		if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
 			dm_digtable->rssi_val_min =
 			    rtl92c_dm_initial_gain_min_pwdb(hw);
 			rtl92c_dm_ctrl_initgain_by_rssi(hw);
@@ -394,7 +388,7 @@
 	} else {
 		dm_digtable->rssi_val_min = 0;
 		dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
-		dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
+		dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
 		dm_digtable->cur_igvalue = 0x20;
 		dm_digtable->pre_igvalue = 0;
 		rtl92c_dm_write_dig(hw);
@@ -407,7 +401,7 @@
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
-	if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
+	if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
 		dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
 
 		if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
@@ -484,15 +478,15 @@
 		return;
 
 	if (mac->link_state >= MAC80211_LINKED)
-		dm_digtable->cursta_connectstate = DIG_STA_CONNECT;
+		dm_digtable->cursta_cstate = DIG_STA_CONNECT;
 	else
-		dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
+		dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
 
 	rtl92c_dm_initial_gain_sta(hw);
 	rtl92c_dm_initial_gain_multi_sta(hw);
 	rtl92c_dm_cck_packet_detection_thresh(hw);
 
-	dm_digtable->presta_connectstate = dm_digtable->cursta_connectstate;
+	dm_digtable->presta_cstate = dm_digtable->cursta_cstate;
 
 }
 
@@ -526,9 +520,9 @@
 	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
-		 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
+		 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
 		 dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
-		 dm_digtable->backoff_val);
+		 dm_digtable->back_val);
 
 	dm_digtable->cur_igvalue += 2;
 	if (dm_digtable->cur_igvalue > 0x3f)
@@ -555,20 +549,18 @@
 	return;
 
 	if (tmpentry_max_pwdb != 0) {
-		rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
-		    tmpentry_max_pwdb;
+		rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
 	} else {
-		rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
+		rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
 	}
 
 	if (tmpentry_min_pwdb != 0xff) {
-		rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
-		    tmpentry_min_pwdb;
+		rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
 	} else {
-		rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
+		rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
 	}
 
-	h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
+	h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
 	h2c_parameter[0] = 0;
 
 	rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
@@ -1160,7 +1152,7 @@
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rate_adaptive *p_ra = &(rtlpriv->ra);
-	u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
+	u32 low_rssi_thresh, high_rssi_thresh;
 	struct ieee80211_sta *sta = NULL;
 
 	if (is_hal_stop(rtlhal)) {
@@ -1179,35 +1171,33 @@
 	    mac->opmode == NL80211_IFTYPE_STATION) {
 		switch (p_ra->pre_ratr_state) {
 		case DM_RATR_STA_HIGH:
-			high_rssithresh_for_ra = 50;
-			low_rssithresh_for_ra = 20;
+			high_rssi_thresh = 50;
+			low_rssi_thresh = 20;
 			break;
 		case DM_RATR_STA_MIDDLE:
-			high_rssithresh_for_ra = 55;
-			low_rssithresh_for_ra = 20;
+			high_rssi_thresh = 55;
+			low_rssi_thresh = 20;
 			break;
 		case DM_RATR_STA_LOW:
-			high_rssithresh_for_ra = 50;
-			low_rssithresh_for_ra = 25;
+			high_rssi_thresh = 50;
+			low_rssi_thresh = 25;
 			break;
 		default:
-			high_rssithresh_for_ra = 50;
-			low_rssithresh_for_ra = 20;
+			high_rssi_thresh = 50;
+			low_rssi_thresh = 20;
 			break;
 		}
 
-		if (rtlpriv->dm.undecorated_smoothed_pwdb >
-		    (long)high_rssithresh_for_ra)
+		if (rtlpriv->dm.undec_sm_pwdb > (long)high_rssi_thresh)
 			p_ra->ratr_state = DM_RATR_STA_HIGH;
-		else if (rtlpriv->dm.undecorated_smoothed_pwdb >
-			 (long)low_rssithresh_for_ra)
+		else if (rtlpriv->dm.undec_sm_pwdb > (long)low_rssi_thresh)
 			p_ra->ratr_state = DM_RATR_STA_MIDDLE;
 		else
 			p_ra->ratr_state = DM_RATR_STA_LOW;
 
 		if (p_ra->pre_ratr_state != p_ra->ratr_state) {
 			RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n",
-				 rtlpriv->dm.undecorated_smoothed_pwdb);
+				 rtlpriv->dm.undec_sm_pwdb);
 			RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
 				 "RSSI_LEVEL = %d\n", p_ra->ratr_state);
 			RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
@@ -1315,7 +1305,7 @@
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
 	if (((mac->link_state == MAC80211_NOLINK)) &&
-	    (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+	    (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
 		dm_pstable->rssi_val_min = 0;
 		RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n");
 	}
@@ -1323,20 +1313,19 @@
 	if (mac->link_state == MAC80211_LINKED) {
 		if (mac->opmode == NL80211_IFTYPE_ADHOC) {
 			dm_pstable->rssi_val_min =
-			    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+			    rtlpriv->dm.entry_min_undec_sm_pwdb;
 			RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
 				 "AP Client PWDB = 0x%lx\n",
 				 dm_pstable->rssi_val_min);
 		} else {
-			dm_pstable->rssi_val_min =
-			    rtlpriv->dm.undecorated_smoothed_pwdb;
+			dm_pstable->rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
 			RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
 				 "STA Default Port PWDB = 0x%lx\n",
 				 dm_pstable->rssi_val_min);
 		}
 	} else {
 		dm_pstable->rssi_val_min =
-		    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+		    rtlpriv->dm.entry_min_undec_sm_pwdb;
 
 		RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
 			 "AP Ext Port PWDB = 0x%lx\n",
@@ -1368,7 +1357,7 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	long undecorated_smoothed_pwdb;
+	long undec_sm_pwdb;
 
 	if (!rtlpriv->dm.dynamic_txpower_enable)
 		return;
@@ -1379,7 +1368,7 @@
 	}
 
 	if ((mac->link_state < MAC80211_LINKED) &&
-	    (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+	    (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
 			 "Not connected to any\n");
 
@@ -1391,41 +1380,35 @@
 
 	if (mac->link_state >= MAC80211_LINKED) {
 		if (mac->opmode == NL80211_IFTYPE_ADHOC) {
-			undecorated_smoothed_pwdb =
-			    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+			undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
 			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 				 "AP Client PWDB = 0x%lx\n",
-				 undecorated_smoothed_pwdb);
+				 undec_sm_pwdb);
 		} else {
-			undecorated_smoothed_pwdb =
-			    rtlpriv->dm.undecorated_smoothed_pwdb;
+			undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
 			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 				 "STA Default Port PWDB = 0x%lx\n",
-				 undecorated_smoothed_pwdb);
+				 undec_sm_pwdb);
 		}
 	} else {
-		undecorated_smoothed_pwdb =
-		    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+		undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
 
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "AP Ext Port PWDB = 0x%lx\n",
-			 undecorated_smoothed_pwdb);
+			 undec_sm_pwdb);
 	}
 
-	if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+	if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
 		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
-	} else if ((undecorated_smoothed_pwdb <
-		    (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
-		   (undecorated_smoothed_pwdb >=
-		    TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+	} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
+		   (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
 
 		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
-	} else if (undecorated_smoothed_pwdb <
-		   (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+	} else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
 		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "TXHIGHPWRLEVEL_NORMAL\n");
@@ -1473,48 +1456,46 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
-	long undecorated_smoothed_pwdb;
+	long undec_sm_pwdb;
 	u8 curr_bt_rssi_state = 0x00;
 
 	if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
-		undecorated_smoothed_pwdb =
-				 GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
+		undec_sm_pwdb = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
 	} else {
-		if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)
-			undecorated_smoothed_pwdb = 100;
+		if (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)
+			undec_sm_pwdb = 100;
 		else
-			undecorated_smoothed_pwdb =
-				rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+			undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
 	}
 
 	/* Check RSSI to determine HighPower/NormalPower state for
 	 * BT coexistence. */
-	if (undecorated_smoothed_pwdb >= 67)
+	if (undec_sm_pwdb >= 67)
 		curr_bt_rssi_state &= (~BT_RSSI_STATE_NORMAL_POWER);
-	else if (undecorated_smoothed_pwdb < 62)
+	else if (undec_sm_pwdb < 62)
 		curr_bt_rssi_state |= BT_RSSI_STATE_NORMAL_POWER;
 
 	/* Check RSSI to determine AMPDU setting for BT coexistence. */
-	if (undecorated_smoothed_pwdb >= 40)
+	if (undec_sm_pwdb >= 40)
 		curr_bt_rssi_state &= (~BT_RSSI_STATE_AMDPU_OFF);
-	else if (undecorated_smoothed_pwdb <= 32)
+	else if (undec_sm_pwdb <= 32)
 		curr_bt_rssi_state |= BT_RSSI_STATE_AMDPU_OFF;
 
 	/* Marked RSSI state. It will be used to determine BT coexistence
 	 * setting later. */
-	if (undecorated_smoothed_pwdb < 35)
+	if (undec_sm_pwdb < 35)
 		curr_bt_rssi_state |=  BT_RSSI_STATE_SPECIAL_LOW;
 	else
 		curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW);
 
 	/* Set Tx Power according to BT status. */
-	if (undecorated_smoothed_pwdb >= 30)
+	if (undec_sm_pwdb >= 30)
 		curr_bt_rssi_state |=  BT_RSSI_STATE_TXPOWER_LOW;
-	else if (undecorated_smoothed_pwdb < 25)
+	else if (undec_sm_pwdb < 25)
 		curr_bt_rssi_state &= (~BT_RSSI_STATE_TXPOWER_LOW);
 
 	/* Check BT state related to BT_Idle in B/G mode. */
-	if (undecorated_smoothed_pwdb < 15)
+	if (undec_sm_pwdb < 15)
 		curr_bt_rssi_state |=  BT_RSSI_STATE_BG_EDCA_LOW;
 	else
 		curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index cdcad7d..1d5d360 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -34,9 +34,6 @@
 #include "dm_common.h"
 #include "phy_common.h"
 
-/* Define macro to shorten lines */
-#define MCS_TXPWR	mcs_txpwrlevel_origoffset
-
 u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -138,13 +135,13 @@
 		rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
 						 BIT(8));
 	if (rfpi_enable)
-		retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+		retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
 					 BLSSIREADBACKDATA);
 	else
-		retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+		retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
 					 BLSSIREADBACKDATA);
 	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
-		 rfpath, pphyreg->rflssi_readback, retvalue);
+		 rfpath, pphyreg->rf_rb, retvalue);
 	return retvalue;
 }
 EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
@@ -290,11 +287,11 @@
 	else
 		return;
 
-	rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][index] = data;
+	rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
 	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
 		 "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n",
 		 rtlphy->pwrgroup_cnt, index,
-		 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][index]);
+		 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
 
 	if (index == 13)
 		rtlphy->pwrgroup_cnt++;
@@ -374,14 +371,10 @@
 	rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
 	rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
 
-	rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
-	    RFPGA0_XAB_SWITCHCONTROL;
-	rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
-	    RFPGA0_XAB_SWITCHCONTROL;
-	rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
-	    RFPGA0_XCD_SWITCHCONTROL;
-	rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
-	    RFPGA0_XCD_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
 
 	rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
 	rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
@@ -393,47 +386,33 @@
 	rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
 	rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
 
-	rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
-	    ROFDM0_XARXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
-	    ROFDM0_XBRXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
-	    ROFDM0_XCRXIQIMBANLANCE;
-	rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
-	    ROFDM0_XDRXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
+	rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
 
 	rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
 	rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
 	rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
 	rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
 
-	rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
-	    ROFDM0_XATXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
-	    ROFDM0_XBTXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
-	    ROFDM0_XCTXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
-	    ROFDM0_XDTXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
 
 	rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
 	rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
 	rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
 	rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
 
-	rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
-	    RFPGA0_XA_LSSIREADBACK;
-	rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
-	    RFPGA0_XB_LSSIREADBACK;
-	rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
-	    RFPGA0_XC_LSSIREADBACK;
-	rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
-	    RFPGA0_XD_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
 
-	rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
-	    TRANSCEIVEA_HSPI_READBACK;
-	rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
-	    TRANSCEIVEB_HSPI_READBACK;
+	rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
+	rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
 
 }
 EXPORT_SYMBOL(_rtl92c_phy_init_bb_rf_register_definition);
@@ -724,6 +703,26 @@
 }
 EXPORT_SYMBOL(rtl92c_phy_sw_chnl);
 
+static void _rtl92c_phy_sw_rf_setting(struct ieee80211_hw *hw, u8 channel)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
+		if (channel == 6 && rtlphy->current_chan_bw ==
+		    HT_CHANNEL_WIDTH_20)
+			rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD,
+				      0x00255);
+		else{
+			u32 backupRF0x1A = (u32)rtl_get_rfreg(hw, RF90_PATH_A,
+					    RF_RX_G1, RFREG_OFFSET_MASK);
+			rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD,
+				      backupRF0x1A);
+		}
+	}
+}
+
 static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
 					     u32 cmdtableidx, u32 cmdtablesz,
 					     enum swchnlcmd_id cmdid,
@@ -837,6 +836,7 @@
 					      currentcmd->para1,
 					      RFREG_OFFSET_MASK,
 					      rtlphy->rfreg_chnlval[rfpath]);
+			_rtl92c_phy_sw_rf_setting(hw, channel);
 			}
 			break;
 		default:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 2925094..3cfa1bb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -116,6 +116,9 @@
 	LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
 
 #define CHIP_VER_B			BIT(4)
+#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3)
+#define CHIP_BONDING_92C_1T2R		0x1
+#define RF_TYPE_1T2R			BIT(1)
 #define CHIP_92C_BITMASK		BIT(0)
 #define CHIP_UNKNOWN			BIT(7)
 #define CHIP_92C_1T2R			0x03
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
index 27b3af8..74f9c08 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -41,7 +41,7 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	long undecorated_smoothed_pwdb;
+	long undec_sm_pwdb;
 
 	if (!rtlpriv->dm.dynamic_txpower_enable)
 		return;
@@ -52,7 +52,7 @@
 	}
 
 	if ((mac->link_state < MAC80211_LINKED) &&
-	    (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+	    (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
 			 "Not connected to any\n");
 
@@ -64,41 +64,35 @@
 
 	if (mac->link_state >= MAC80211_LINKED) {
 		if (mac->opmode == NL80211_IFTYPE_ADHOC) {
-			undecorated_smoothed_pwdb =
-			    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+			undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
 			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 				 "AP Client PWDB = 0x%lx\n",
-				 undecorated_smoothed_pwdb);
+				 undec_sm_pwdb);
 		} else {
-			undecorated_smoothed_pwdb =
-			    rtlpriv->dm.undecorated_smoothed_pwdb;
+			undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
 			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 				 "STA Default Port PWDB = 0x%lx\n",
-				 undecorated_smoothed_pwdb);
+				 undec_sm_pwdb);
 		}
 	} else {
-		undecorated_smoothed_pwdb =
-		    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+		undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
 
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "AP Ext Port PWDB = 0x%lx\n",
-			 undecorated_smoothed_pwdb);
+			 undec_sm_pwdb);
 	}
 
-	if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+	if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
 		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
-	} else if ((undecorated_smoothed_pwdb <
-		    (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
-		   (undecorated_smoothed_pwdb >=
-		    TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+	} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
+		   (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
 
 		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
-	} else if (undecorated_smoothed_pwdb <
-		   (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+	} else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
 		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "TXHIGHPWRLEVEL_NORMAL\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 86d73b3..d1f34f6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -896,7 +896,6 @@
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-	static bool iqk_initialized; /* initialized to false */
 	bool rtstatus = true;
 	bool is92c;
 	int err;
@@ -921,9 +920,28 @@
 
 	rtlhal->last_hmeboxnum = 0;
 	rtl92c_phy_mac_config(hw);
+	/* because last function modify RCR, so we update
+	 * rcr var here, or TP will unstable for receive_config
+	 * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
+	 * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252*/
+	rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
+	rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
+	rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
 	rtl92c_phy_bb_config(hw);
 	rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
 	rtl92c_phy_rf_config(hw);
+	if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+	    !IS_92C_SERIAL(rtlhal->version)) {
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255);
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00);
+	} else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x0C, MASKDWORD, 0x894AE);
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x0A, MASKDWORD, 0x1AF31);
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, MASKDWORD, 0x8F425);
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_SYN_G2, MASKDWORD, 0x4F200);
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK1, MASKDWORD, 0x44053);
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK2, MASKDWORD, 0x80201);
+	}
 	rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
 						 RF_CHNLBW, RFREG_OFFSET_MASK);
 	rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
@@ -945,11 +963,11 @@
 
 	if (ppsc->rfpwr_state == ERFON) {
 		rtl92c_phy_set_rfpath_switch(hw, 1);
-		if (iqk_initialized) {
+		if (rtlphy->iqk_initialized) {
 			rtl92c_phy_iq_calibrate(hw, true);
 		} else {
 			rtl92c_phy_iq_calibrate(hw, false);
-			iqk_initialized = true;
+			rtlphy->iqk_initialized = true;
 		}
 
 		rtl92c_dm_check_txpower_tracking(hw);
@@ -1004,6 +1022,13 @@
 				   ? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) |
 				   CHIP_VENDOR_UMC));
 		}
+		if (IS_92C_SERIAL(version)) {
+			value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
+			version = (enum version_8192c)(version |
+				   ((CHIP_BONDING_IDENTIFIER(value32)
+				   == CHIP_BONDING_92C_1T2R) ?
+				   RF_TYPE_1T2R : 0));
+		}
 	}
 
 	switch (version) {
@@ -1019,12 +1044,30 @@
 	case VERSION_A_CHIP_88C:
 		versionid = "A_CHIP_88C";
 		break;
+	case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT:
+		versionid = "A_CUT_92C_1T2R";
+		break;
+	case VERSION_NORMAL_UMC_CHIP_92C_A_CUT:
+		versionid = "A_CUT_92C";
+		break;
+	case VERSION_NORMAL_UMC_CHIP_88C_A_CUT:
+		versionid = "A_CUT_88C";
+		break;
+	case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT:
+		versionid = "B_CUT_92C_1T2R";
+		break;
+	case VERSION_NORMAL_UMC_CHIP_92C_B_CUT:
+		versionid = "B_CUT_92C";
+		break;
+	case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
+		versionid = "B_CUT_88C";
+		break;
 	default:
 		versionid = "Unknown. Bug?";
 		break;
 	}
 
-	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
 		 "Chip Version ID: %s\n", versionid);
 
 	switch (version & 0x3) {
@@ -1197,6 +1240,7 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
 	u8 u1b_tmp;
 	u32 u4b_tmp;
 
@@ -1225,7 +1269,8 @@
 	rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, 0x0790);
 	rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
 	rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
-	rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
+	if (!IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
+		rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
 	if (rtlpcipriv->bt_coexist.bt_coexistence) {
 		u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL);
 		u4b_tmp |= 0x03824800;
@@ -1254,6 +1299,9 @@
 		rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
 	RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
 	_rtl92ce_poweroff_adapter(hw);
+
+	/* after power off we should do iqk again */
+	rtlpriv->phy.iqk_initialized = false;
 }
 
 void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
@@ -1355,9 +1403,9 @@
 			tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
 		else
 			tempval = EEPROM_DEFAULT_HT40_2SDIFF;
-		rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] =
+		rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_A][i] =
 		    (tempval & 0xf);
-		rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] =
+		rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_B][i] =
 		    ((tempval & 0xf0) >> 4);
 	}
 
@@ -1381,7 +1429,7 @@
 				"RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
 				rf_path, i,
 				rtlefuse->
-				eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][i]);
+				eprom_chnl_txpwr_ht40_2sdf[rf_path][i]);
 
 	for (rf_path = 0; rf_path < 2; rf_path++) {
 		for (i = 0; i < 14; i++) {
@@ -1396,14 +1444,14 @@
 			if ((rtlefuse->
 			     eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
 			     rtlefuse->
-			     eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index])
+			     eprom_chnl_txpwr_ht40_2sdf[rf_path][index])
 			    > 0) {
 				rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
 				    rtlefuse->
 				    eeprom_chnlarea_txpwr_ht40_1s[rf_path]
 				    [index] -
 				    rtlefuse->
-				    eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+				    eprom_chnl_txpwr_ht40_2sdf[rf_path]
 				    [index];
 			} else {
 				rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
@@ -1912,16 +1960,16 @@
 			ratr_bitmap &= 0x0f0ff0ff;
 		break;
 	}
+	sta_entry->ratr_index = ratr_index;
+
 	RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
 		 "ratr_bitmap :%x\n", ratr_bitmap);
 	*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
 				     (ratr_index << 28);
 	rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
 	RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
-		 "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
-		 ratr_index, ratr_bitmap,
-		 rate_mask[0], rate_mask[1], rate_mask[2], rate_mask[3],
-		 rate_mask[4]);
+		 "Rate_index:%x, ratr_val:%x, %5phC\n",
+		 ratr_index, ratr_bitmap, rate_mask);
 	rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
 
 	if (macid != 0)
@@ -2176,7 +2224,7 @@
 
 	if (rtlpcipriv->bt_coexist.reg_bt_iso == 2)
 		rtlpcipriv->bt_coexist.bt_ant_isolation =
-			rtlpcipriv->bt_coexist.eeprom_bt_ant_isolation;
+			rtlpcipriv->bt_coexist.eeprom_bt_ant_isol;
 	else
 		rtlpcipriv->bt_coexist.bt_ant_isolation =
 			rtlpcipriv->bt_coexist.reg_bt_iso;
@@ -2207,23 +2255,22 @@
 					      bool auto_load_fail, u8 *hwinfo)
 {
 	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
-	u8 value;
+	u8 val;
 
 	if (!auto_load_fail) {
 		rtlpcipriv->bt_coexist.eeprom_bt_coexist =
 					((hwinfo[RF_OPTION1] & 0xe0) >> 5);
-		value = hwinfo[RF_OPTION4];
-		rtlpcipriv->bt_coexist.eeprom_bt_type = ((value & 0xe) >> 1);
-		rtlpcipriv->bt_coexist.eeprom_bt_ant_num = (value & 0x1);
-		rtlpcipriv->bt_coexist.eeprom_bt_ant_isolation =
-							 ((value & 0x10) >> 4);
+		val = hwinfo[RF_OPTION4];
+		rtlpcipriv->bt_coexist.eeprom_bt_type = ((val & 0xe) >> 1);
+		rtlpcipriv->bt_coexist.eeprom_bt_ant_num = (val & 0x1);
+		rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = ((val & 0x10) >> 4);
 		rtlpcipriv->bt_coexist.eeprom_bt_radio_shared =
-							 ((value & 0x20) >> 5);
+							 ((val & 0x20) >> 5);
 	} else {
 		rtlpcipriv->bt_coexist.eeprom_bt_coexist = 0;
 		rtlpcipriv->bt_coexist.eeprom_bt_type = BT_2WIRE;
 		rtlpcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2;
-		rtlpcipriv->bt_coexist.eeprom_bt_ant_isolation = 0;
+		rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = 0;
 		rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
 	}
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 88deae6..73262ca 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -82,6 +82,8 @@
 
 	if (is92c)
 		rtl_write_byte(rtlpriv, 0x14, 0x71);
+	else
+		rtl_write_byte(rtlpriv, 0x04CA, 0x0A);
 	return rtstatus;
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
index 54c7614..a9c406f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
@@ -97,15 +97,12 @@
 		}
 
 		if (rtlefuse->eeprom_regulatory == 0) {
-			tmpval =
-			    (rtlphy->mcs_txpwrlevel_origoffset[0][6]) +
-			    (rtlphy->mcs_txpwrlevel_origoffset[0][7] <<
-			     8);
+			tmpval = (rtlphy->mcs_offset[0][6]) +
+			    (rtlphy->mcs_offset[0][7] << 8);
 			tx_agc[RF90_PATH_A] += tmpval;
 
-			tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) +
-				 (rtlphy->mcs_txpwrlevel_origoffset[0][15] <<
-				 24);
+			tmpval = (rtlphy->mcs_offset[0][14]) +
+				 (rtlphy->mcs_offset[0][15] << 24);
 			tx_agc[RF90_PATH_B] += tmpval;
 		}
 	}
@@ -209,8 +206,7 @@
 		case 0:
 			chnlgroup = 0;
 
-			writeVal =
-			    rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index +
+			writeVal = rtlphy->mcs_offset[chnlgroup][index +
 			    (rf ? 8 : 0)]
 			    + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
 
@@ -240,8 +236,7 @@
 						chnlgroup++;
 				}
 
-				writeVal =
-				    rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+				writeVal = rtlphy->mcs_offset[chnlgroup]
 				    [index + (rf ? 8 : 0)] + ((index < 2) ?
 							      powerBase0[rf] :
 							      powerBase1[rf]);
@@ -276,8 +271,7 @@
 								    1]);
 			}
 			for (i = 0; i < 4; i++) {
-				pwr_diff_limit[i] =
-				    (u8) ((rtlphy->mcs_txpwrlevel_origoffset
+				pwr_diff_limit[i] = (u8) ((rtlphy->mcs_offset
 					  [chnlgroup][index +
 					  (rf ? 8 : 0)] & (0x7f << (i * 8))) >>
 					  (i * 8));
@@ -317,8 +311,7 @@
 			break;
 		default:
 			chnlgroup = 0;
-			writeVal =
-			    rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+			writeVal = rtlphy->mcs_offset[chnlgroup]
 			    [index + (rf ? 8 : 0)]
 			    + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index ea2e1bd..60451ee 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -162,12 +162,10 @@
 
 	/* request fw */
 	if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
-	    !IS_92C_SERIAL(rtlhal->version)) {
+	    !IS_92C_SERIAL(rtlhal->version))
 		rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin";
-	} else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
+	else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
 		rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin";
-		pr_info("****** This B_CUT device may not work with kernels 3.6 and earlier\n");
-	}
 
 	rtlpriv->max_fw_size = 0x4000;
 	pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 390d6d4..1734247 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -127,11 +127,11 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct phy_sts_cck_8192s_t *cck_buf;
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
 	s8 rx_pwr_all = 0, rx_pwr[4];
 	u8 evm, pwdb_all, rf_rx_num = 0;
 	u8 i, max_spatial_stream;
 	u32 rssi, total_rssi = 0;
-	bool in_powersavemode = false;
 	bool is_cck_rate;
 
 	is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
@@ -140,14 +140,14 @@
 	pstats->is_cck = is_cck_rate;
 	pstats->packet_beacon = packet_beacon;
 	pstats->is_cck = is_cck_rate;
-	pstats->rx_mimo_signalquality[0] = -1;
-	pstats->rx_mimo_signalquality[1] = -1;
+	pstats->rx_mimo_sig_qual[0] = -1;
+	pstats->rx_mimo_sig_qual[1] = -1;
 
 	if (is_cck_rate) {
 		u8 report, cck_highpwr;
 		cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
 
-		if (!in_powersavemode)
+		if (ppsc->rfpwr_state == ERFON)
 			cck_highpwr = (u8) rtl_get_bbreg(hw,
 						 RFPGA0_XA_HSSIPARAMETER2,
 						 BIT(9));
@@ -211,8 +211,8 @@
 			}
 
 			pstats->signalquality = sq;
-			pstats->rx_mimo_signalquality[0] = sq;
-			pstats->rx_mimo_signalquality[1] = -1;
+			pstats->rx_mimo_sig_qual[0] = sq;
+			pstats->rx_mimo_sig_qual[1] = -1;
 		}
 	} else {
 		rtlpriv->dm.rfpath_rxenable[0] =
@@ -251,8 +251,7 @@
 				if (i == 0)
 					pstats->signalquality =
 					    (u8) (evm & 0xff);
-				pstats->rx_mimo_signalquality[i] =
-				    (u8) (evm & 0xff);
+				pstats->rx_mimo_sig_qual[i] = (u8) (evm & 0xff);
 			}
 		}
 	}
@@ -362,36 +361,31 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	long undecorated_smoothed_pwdb;
+	long undec_sm_pwdb;
 
 	if (mac->opmode == NL80211_IFTYPE_ADHOC) {
 		return;
 	} else {
-		undecorated_smoothed_pwdb =
-		    rtlpriv->dm.undecorated_smoothed_pwdb;
+		undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
 	}
 
 	if (pstats->packet_toself || pstats->packet_beacon) {
-		if (undecorated_smoothed_pwdb < 0)
-			undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
+		if (undec_sm_pwdb < 0)
+			undec_sm_pwdb = pstats->rx_pwdb_all;
 
-		if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
-			undecorated_smoothed_pwdb =
-			    (((undecorated_smoothed_pwdb) *
+		if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
+			undec_sm_pwdb = (((undec_sm_pwdb) *
 			      (RX_SMOOTH_FACTOR - 1)) +
 			     (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
 
-			undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
-			    + 1;
+			undec_sm_pwdb += 1;
 		} else {
-			undecorated_smoothed_pwdb =
-			    (((undecorated_smoothed_pwdb) *
+			undec_sm_pwdb = (((undec_sm_pwdb) *
 			      (RX_SMOOTH_FACTOR - 1)) +
 			     (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
 		}
 
-		rtlpriv->dm.undecorated_smoothed_pwdb =
-		    undecorated_smoothed_pwdb;
+		rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
 		_rtl92ce_update_rxsignalstatistics(hw, pstats);
 	}
 }
@@ -438,15 +432,14 @@
 			for (n_spatialstream = 0; n_spatialstream < 2;
 			     n_spatialstream++) {
 				if (pstats->
-				    rx_mimo_signalquality[n_spatialstream] !=
-				    -1) {
+				    rx_mimo_sig_qual[n_spatialstream] != -1) {
 					if (rtlpriv->stats.
 					    rx_evm_percentage[n_spatialstream]
 					    == 0) {
 						rtlpriv->stats.
 						   rx_evm_percentage
 						   [n_spatialstream] =
-						   pstats->rx_mimo_signalquality
+						   pstats->rx_mimo_sig_qual
 						   [n_spatialstream];
 					}
 
@@ -456,8 +449,7 @@
 					      stats.rx_evm_percentage
 					      [n_spatialstream] *
 					      (RX_SMOOTH_FACTOR - 1)) +
-					     (pstats->
-					      rx_mimo_signalquality
+					     (pstats->rx_mimo_sig_qual
 					      [n_spatialstream] * 1)) /
 					    (RX_SMOOTH_FACTOR);
 				}
@@ -567,7 +559,7 @@
 	if (GET_RX_DESC_RXHT(pdesc))
 		rx_status->flag |= RX_FLAG_HT;
 
-	rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+	rx_status->flag |= RX_FLAG_MACTIME_START;
 
 	if (stats->decrypted)
 		rx_status->flag |= RX_FLAG_DECRYPTED;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
index 6fd39ea..16a0b9e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
@@ -39,7 +39,7 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	long undecorated_smoothed_pwdb;
+	long undec_sm_pwdb;
 
 	if (!rtlpriv->dm.dynamic_txpower_enable)
 		return;
@@ -50,7 +50,7 @@
 	}
 
 	if ((mac->link_state < MAC80211_LINKED) &&
-	    (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+	    (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
 			 "Not connected to any\n");
 
@@ -62,41 +62,35 @@
 
 	if (mac->link_state >= MAC80211_LINKED) {
 		if (mac->opmode == NL80211_IFTYPE_ADHOC) {
-			undecorated_smoothed_pwdb =
-			    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+			undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
 			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 				 "AP Client PWDB = 0x%lx\n",
-				 undecorated_smoothed_pwdb);
+				 undec_sm_pwdb);
 		} else {
-			undecorated_smoothed_pwdb =
-			    rtlpriv->dm.undecorated_smoothed_pwdb;
+			undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
 			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 				 "STA Default Port PWDB = 0x%lx\n",
-				 undecorated_smoothed_pwdb);
+				 undec_sm_pwdb);
 		}
 	} else {
-		undecorated_smoothed_pwdb =
-		    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+		undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
 
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "AP Ext Port PWDB = 0x%lx\n",
-			 undecorated_smoothed_pwdb);
+			 undec_sm_pwdb);
 	}
 
-	if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+	if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
 		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
-	} else if ((undecorated_smoothed_pwdb <
-		    (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
-		   (undecorated_smoothed_pwdb >=
-		    TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+	} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
+		   (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
 
 		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
-	} else if (undecorated_smoothed_pwdb <
-		   (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+	} else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
 		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "TXHIGHPWRLEVEL_NORMAL\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 4bbb711..b1ccff4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -152,9 +152,9 @@
 			tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
 		else
 			tempval = EEPROM_DEFAULT_HT40_2SDIFF;
-		rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] =
+		rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_A][i] =
 		    (tempval & 0xf);
-		rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] =
+		rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_B][i] =
 		    ((tempval & 0xf0) >> 4);
 	}
 	for (rf_path = 0; rf_path < 2; rf_path++)
@@ -177,7 +177,7 @@
 				"RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
 				rf_path, i,
 				rtlefuse->
-				eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][i]);
+				eprom_chnl_txpwr_ht40_2sdf[rf_path][i]);
 	for (rf_path = 0; rf_path < 2; rf_path++) {
 		for (i = 0; i < 14; i++) {
 			index = _rtl92c_get_chnl_group((u8) i);
@@ -189,13 +189,13 @@
 			if ((rtlefuse->
 			     eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
 			     rtlefuse->
-			     eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index])
+			     eprom_chnl_txpwr_ht40_2sdf[rf_path][index])
 			    > 0) {
 				rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
 				    rtlefuse->
 				    eeprom_chnlarea_txpwr_ht40_1s[rf_path]
 				    [index] - rtlefuse->
-				    eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+				    eprom_chnl_txpwr_ht40_2sdf[rf_path]
 				    [index];
 			} else {
 				rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
@@ -2169,10 +2169,8 @@
 				      ratr_index << 28);
 	rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
 	RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
-		 "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
-		 ratr_index, ratr_bitmap,
-		 rate_mask[0], rate_mask[1], rate_mask[2], rate_mask[3],
-		 rate_mask[4]);
+		 "Rate_index:%x, ratr_val:%x, %5phC\n",
+		 ratr_index, ratr_bitmap, rate_mask);
 	rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 7e91c76..32ff959 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -46,7 +46,7 @@
 
 #define LINK_Q	ui_link_quality
 #define RX_EVM	rx_evm_percentage
-#define RX_SIGQ	rx_mimo_signalquality
+#define RX_SIGQ	rx_mimo_sig_qual
 
 
 void rtl92c_read_chip_version(struct ieee80211_hw *hw)
@@ -982,32 +982,27 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	long undecorated_smoothed_pwdb = 0;
+	long undec_sm_pwdb = 0;
 
 	if (mac->opmode == NL80211_IFTYPE_ADHOC) {
 		return;
 	} else {
-		undecorated_smoothed_pwdb =
-		    rtlpriv->dm.undecorated_smoothed_pwdb;
+		undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
 	}
 	if (pstats->packet_toself || pstats->packet_beacon) {
-		if (undecorated_smoothed_pwdb < 0)
-			undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
-		if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
-			undecorated_smoothed_pwdb =
-			    (((undecorated_smoothed_pwdb) *
+		if (undec_sm_pwdb < 0)
+			undec_sm_pwdb = pstats->rx_pwdb_all;
+		if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
+			undec_sm_pwdb = (((undec_sm_pwdb) *
 			      (RX_SMOOTH_FACTOR - 1)) +
 			     (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
-			undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
-			    + 1;
+			undec_sm_pwdb += 1;
 		} else {
-			undecorated_smoothed_pwdb =
-			    (((undecorated_smoothed_pwdb) *
+			undec_sm_pwdb = (((undec_sm_pwdb) *
 			      (RX_SMOOTH_FACTOR - 1)) +
 			     (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
 		}
-		rtlpriv->dm.undecorated_smoothed_pwdb =
-		    undecorated_smoothed_pwdb;
+		rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
 		_rtl92c_update_rxsignalstatistics(hw, pstats);
 	}
 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 506b9a0..953f1a0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -115,15 +115,11 @@
 				    (ppowerlevel[idx1] << 24);
 			}
 			if (rtlefuse->eeprom_regulatory == 0) {
-				tmpval = (rtlphy->mcs_txpwrlevel_origoffset
-					[0][6]) +
-					(rtlphy->mcs_txpwrlevel_origoffset
-					[0][7] <<  8);
+				tmpval = (rtlphy->mcs_offset[0][6]) +
+					(rtlphy->mcs_offset[0][7] <<  8);
 				tx_agc[RF90_PATH_A] += tmpval;
-				tmpval = (rtlphy->mcs_txpwrlevel_origoffset
-					[0][14]) +
-					(rtlphy->mcs_txpwrlevel_origoffset
-					[0][15] << 24);
+				tmpval = (rtlphy->mcs_offset[0][14]) +
+					(rtlphy->mcs_offset[0][15] << 24);
 				tx_agc[RF90_PATH_B] += tmpval;
 			}
 		}
@@ -215,7 +211,7 @@
 		switch (rtlefuse->eeprom_regulatory) {
 		case 0:
 			chnlgroup = 0;
-			writeVal = rtlphy->mcs_txpwrlevel_origoffset
+			writeVal = rtlphy->mcs_offset
 			    [chnlgroup][index + (rf ? 8 : 0)]
 			    + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
 			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
@@ -238,8 +234,7 @@
 				else
 					chnlgroup += 4;
 			}
-			writeVal = rtlphy->mcs_txpwrlevel_origoffset
-					[chnlgroup][index +
+			writeVal = rtlphy->mcs_offset[chnlgroup][index +
 					(rf ? 8 : 0)] +
 					((index < 2) ? powerBase0[rf] :
 					powerBase1[rf]);
@@ -271,8 +266,7 @@
 					[channel - 1]);
 			}
 			for (i = 0; i < 4; i++) {
-				pwr_diff_limit[i] =
-				    (u8) ((rtlphy->mcs_txpwrlevel_origoffset
+				pwr_diff_limit[i] = (u8) ((rtlphy->mcs_offset
 				    [chnlgroup][index + (rf ? 8 : 0)]
 				    & (0x7f << (i * 8))) >> (i * 8));
 				if (rtlphy->current_chan_bw ==
@@ -306,7 +300,7 @@
 			break;
 		default:
 			chnlgroup = 0;
-			writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+			writeVal = rtlphy->mcs_offset[chnlgroup]
 				   [index + (rf ? 8 : 0)] + ((index < 2) ?
 				   powerBase0[rf] : powerBase1[rf]);
 			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 6e66f04..b6222ee 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -334,7 +334,7 @@
 		rx_status->flag |= RX_FLAG_40MHZ;
 	if (GET_RX_DESC_RX_HT(pdesc))
 		rx_status->flag |= RX_FLAG_HT;
-	rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+	rx_status->flag |= RX_FLAG_MACTIME_START;
 	if (stats->decrypted)
 		rx_status->flag |= RX_FLAG_DECRYPTED;
 	rx_status->rate_idx = rtlwifi_rate_mapping(hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index ed868c3..fd8df23 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -35,7 +35,7 @@
 #include "dm.h"
 #include "fw.h"
 
-#define UNDEC_SM_PWDB	entry_min_undecoratedsmoothed_pwdb
+#define UNDEC_SM_PWDB	entry_min_undec_sm_pwdb
 
 static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
 	0x7f8001fe,		/* 0, +6.0dB */
@@ -164,18 +164,18 @@
 	de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
 	de_digtable->cur_igvalue = 0x20;
 	de_digtable->pre_igvalue = 0x0;
-	de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
-	de_digtable->presta_connectstate = DIG_STA_DISCONNECT;
-	de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
+	de_digtable->cursta_cstate = DIG_STA_DISCONNECT;
+	de_digtable->presta_cstate = DIG_STA_DISCONNECT;
+	de_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
 	de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
 	de_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
 	de_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
 	de_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
 	de_digtable->rx_gain_range_max = DM_DIG_FA_UPPER;
 	de_digtable->rx_gain_range_min = DM_DIG_FA_LOWER;
-	de_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
-	de_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX;
-	de_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+	de_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+	de_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
+	de_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
 	de_digtable->pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
 	de_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
 	de_digtable->large_fa_hit = 0;
@@ -273,35 +273,34 @@
 	/* Determine the minimum RSSI  */
 	if ((mac->link_state < MAC80211_LINKED) &&
 	    (rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
-		de_digtable->min_undecorated_pwdb_for_dm = 0;
+		de_digtable->min_undec_pwdb_for_dm = 0;
 		RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
 			 "Not connected to any\n");
 	}
 	if (mac->link_state >= MAC80211_LINKED) {
 		if (mac->opmode == NL80211_IFTYPE_AP ||
 		    mac->opmode == NL80211_IFTYPE_ADHOC) {
-			de_digtable->min_undecorated_pwdb_for_dm =
+			de_digtable->min_undec_pwdb_for_dm =
 			    rtlpriv->dm.UNDEC_SM_PWDB;
 			RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
 				 "AP Client PWDB = 0x%lx\n",
 				 rtlpriv->dm.UNDEC_SM_PWDB);
 		} else {
-			de_digtable->min_undecorated_pwdb_for_dm =
-			    rtlpriv->dm.undecorated_smoothed_pwdb;
+			de_digtable->min_undec_pwdb_for_dm =
+			    rtlpriv->dm.undec_sm_pwdb;
 			RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
 				 "STA Default Port PWDB = 0x%x\n",
-				 de_digtable->min_undecorated_pwdb_for_dm);
+				 de_digtable->min_undec_pwdb_for_dm);
 		}
 	} else {
-		de_digtable->min_undecorated_pwdb_for_dm =
-		    rtlpriv->dm.UNDEC_SM_PWDB;
+		de_digtable->min_undec_pwdb_for_dm = rtlpriv->dm.UNDEC_SM_PWDB;
 		RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
 			 "AP Ext Port or disconnect PWDB = 0x%x\n",
-			 de_digtable->min_undecorated_pwdb_for_dm);
+			 de_digtable->min_undec_pwdb_for_dm);
 	}
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
-		 de_digtable->min_undecorated_pwdb_for_dm);
+		 de_digtable->min_undec_pwdb_for_dm);
 }
 
 static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
@@ -310,16 +309,16 @@
 	struct dig_t *de_digtable = &rtlpriv->dm_digtable;
 	unsigned long flag = 0;
 
-	if (de_digtable->cursta_connectstate == DIG_STA_CONNECT) {
+	if (de_digtable->cursta_cstate == DIG_STA_CONNECT) {
 		if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
-			if (de_digtable->min_undecorated_pwdb_for_dm <= 25)
+			if (de_digtable->min_undec_pwdb_for_dm <= 25)
 				de_digtable->cur_cck_pd_state =
 							 CCK_PD_STAGE_LOWRSSI;
 			else
 				de_digtable->cur_cck_pd_state =
 							 CCK_PD_STAGE_HIGHRSSI;
 		} else {
-			if (de_digtable->min_undecorated_pwdb_for_dm <= 20)
+			if (de_digtable->min_undec_pwdb_for_dm <= 20)
 				de_digtable->cur_cck_pd_state =
 							 CCK_PD_STAGE_LOWRSSI;
 			else
@@ -342,7 +341,7 @@
 		de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
 	}
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
-		 de_digtable->cursta_connectstate == DIG_STA_CONNECT ?
+		 de_digtable->cursta_cstate == DIG_STA_CONNECT ?
 		 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
 		 de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
@@ -358,9 +357,9 @@
 	struct dig_t *de_digtable = &rtlpriv->dm_digtable;
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
-		 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
+		 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
 		 de_digtable->cur_igvalue, de_digtable->pre_igvalue,
-		 de_digtable->backoff_val);
+		 de_digtable->back_val);
 	if (de_digtable->dig_enable_flag == false) {
 		RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
 		de_digtable->pre_igvalue = 0x17;
@@ -382,13 +381,13 @@
 	if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
 	    (rtlpriv->mac80211.vendor == PEER_CISCO)) {
 		RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
-		if (de_digtable->last_min_undecorated_pwdb_for_dm >= 50
-		    && de_digtable->min_undecorated_pwdb_for_dm < 50) {
+		if (de_digtable->last_min_undec_pwdb_for_dm >= 50
+		    && de_digtable->min_undec_pwdb_for_dm < 50) {
 			rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
 			RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 				 "Early Mode Off\n");
-		} else if (de_digtable->last_min_undecorated_pwdb_for_dm <= 55 &&
-			   de_digtable->min_undecorated_pwdb_for_dm > 55) {
+		} else if (de_digtable->last_min_undec_pwdb_for_dm <= 55 &&
+			   de_digtable->min_undec_pwdb_for_dm > 55) {
 			rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
 			RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 				 "Early Mode On\n");
@@ -409,8 +408,8 @@
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
 	if (rtlpriv->rtlhal.earlymode_enable) {
 		rtl92d_early_mode_enabled(rtlpriv);
-		de_digtable->last_min_undecorated_pwdb_for_dm =
-				 de_digtable->min_undecorated_pwdb_for_dm;
+		de_digtable->last_min_undec_pwdb_for_dm =
+				 de_digtable->min_undec_pwdb_for_dm;
 	}
 	if (!rtlpriv->dm.dm_initialgain_enable)
 		return;
@@ -428,9 +427,9 @@
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
 	/* Decide the current status and if modify initial gain or not */
 	if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
-		de_digtable->cursta_connectstate = DIG_STA_CONNECT;
+		de_digtable->cursta_cstate = DIG_STA_CONNECT;
 	else
-		de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
+		de_digtable->cursta_cstate = DIG_STA_DISCONNECT;
 
 	/* adjust initial gain according to false alarm counter */
 	if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
@@ -522,7 +521,7 @@
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
 	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	long undecorated_smoothed_pwdb;
+	long undec_sm_pwdb;
 
 	if ((!rtlpriv->dm.dynamic_txpower_enable)
 	    || rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
@@ -539,62 +538,62 @@
 	}
 	if (mac->link_state >= MAC80211_LINKED) {
 		if (mac->opmode == NL80211_IFTYPE_ADHOC) {
-			undecorated_smoothed_pwdb =
+			undec_sm_pwdb =
 			    rtlpriv->dm.UNDEC_SM_PWDB;
 			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 				 "IBSS Client PWDB = 0x%lx\n",
-				 undecorated_smoothed_pwdb);
+				 undec_sm_pwdb);
 		} else {
-			undecorated_smoothed_pwdb =
-			    rtlpriv->dm.undecorated_smoothed_pwdb;
+			undec_sm_pwdb =
+			    rtlpriv->dm.undec_sm_pwdb;
 			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 				 "STA Default Port PWDB = 0x%lx\n",
-				 undecorated_smoothed_pwdb);
+				 undec_sm_pwdb);
 		}
 	} else {
-		undecorated_smoothed_pwdb =
+		undec_sm_pwdb =
 		    rtlpriv->dm.UNDEC_SM_PWDB;
 
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "AP Ext Port PWDB = 0x%lx\n",
-			 undecorated_smoothed_pwdb);
+			 undec_sm_pwdb);
 	}
 	if (rtlhal->current_bandtype == BAND_ON_5G) {
-		if (undecorated_smoothed_pwdb >= 0x33) {
+		if (undec_sm_pwdb >= 0x33) {
 			rtlpriv->dm.dynamic_txhighpower_lvl =
 						 TXHIGHPWRLEVEL_LEVEL2;
 			RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
 				 "5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n");
-		} else if ((undecorated_smoothed_pwdb < 0x33)
-			   && (undecorated_smoothed_pwdb >= 0x2b)) {
+		} else if ((undec_sm_pwdb < 0x33)
+			   && (undec_sm_pwdb >= 0x2b)) {
 			rtlpriv->dm.dynamic_txhighpower_lvl =
 						 TXHIGHPWRLEVEL_LEVEL1;
 			RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
 				 "5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n");
-		} else if (undecorated_smoothed_pwdb < 0x2b) {
+		} else if (undec_sm_pwdb < 0x2b) {
 			rtlpriv->dm.dynamic_txhighpower_lvl =
 						 TXHIGHPWRLEVEL_NORMAL;
 			RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
 				 "5G:TxHighPwrLevel_Normal\n");
 		}
 	} else {
-		if (undecorated_smoothed_pwdb >=
+		if (undec_sm_pwdb >=
 		    TX_POWER_NEAR_FIELD_THRESH_LVL2) {
 			rtlpriv->dm.dynamic_txhighpower_lvl =
 						 TXHIGHPWRLEVEL_LEVEL2;
 			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 				 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
 		} else
-		    if ((undecorated_smoothed_pwdb <
+		    if ((undec_sm_pwdb <
 			 (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3))
-			&& (undecorated_smoothed_pwdb >=
+			&& (undec_sm_pwdb >=
 			    TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
 
 			rtlpriv->dm.dynamic_txhighpower_lvl =
 						 TXHIGHPWRLEVEL_LEVEL1;
 			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 				 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
-		} else if (undecorated_smoothed_pwdb <
+		} else if (undec_sm_pwdb <
 			   (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
 			rtlpriv->dm.dynamic_txhighpower_lvl =
 						 TXHIGHPWRLEVEL_NORMAL;
@@ -620,7 +619,7 @@
 		return;
 	/* Indicate Rx signal strength to FW. */
 	if (rtlpriv->dm.useramask) {
-		u32 temp = rtlpriv->dm.undecorated_smoothed_pwdb;
+		u32 temp = rtlpriv->dm.undec_sm_pwdb;
 
 		temp <<= 16;
 		temp |= 0x100;
@@ -629,7 +628,7 @@
 		rtl92d_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, (u8 *) (&temp));
 	} else {
 		rtl_write_byte(rtlpriv, 0x4fe,
-			       (u8) rtlpriv->dm.undecorated_smoothed_pwdb);
+			       (u8) rtlpriv->dm.undec_sm_pwdb);
 	}
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index db00860..33041bd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -298,13 +298,13 @@
 		rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
 			      BIT(8));
 	if (rfpi_enable)
-		retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+		retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
 			BLSSIREADBACKDATA);
 	else
-		retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+		retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
 			BLSSIREADBACKDATA);
 	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n",
-		 rfpath, pphyreg->rflssi_readback, retvalue);
+		 rfpath, pphyreg->rf_rb, retvalue);
 	return retvalue;
 }
 
@@ -478,14 +478,10 @@
 
 	/* RF switch Control */
 	/* TR/Ant switch control */
-	rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
-		RFPGA0_XAB_SWITCHCONTROL;
-	rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
-	    RFPGA0_XAB_SWITCHCONTROL;
-	rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
-	    RFPGA0_XCD_SWITCHCONTROL;
-	rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
-	    RFPGA0_XCD_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
 
 	/* AGC control 1 */
 	rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
@@ -500,14 +496,10 @@
 	rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
 
 	/* RX AFE control 1 */
-	rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
-	    ROFDM0_XARXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
-	    ROFDM0_XBRXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
-	    ROFDM0_XCRXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
-	    ROFDM0_XDRXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
 
 	/*RX AFE control 1 */
 	rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
@@ -516,14 +508,10 @@
 	rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
 
 	/* Tx AFE control 1 */
-	rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
-	    ROFDM0_XATxIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
-	    ROFDM0_XBTxIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
-	    ROFDM0_XCTxIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
-	    ROFDM0_XDTxIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATxIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTxIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTxIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTxIQIMBALANCE;
 
 	/* Tx AFE control 2 */
 	rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATxAFE;
@@ -532,20 +520,14 @@
 	rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTxAFE;
 
 	/* Tranceiver LSSI Readback SI mode */
-	rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
-	    RFPGA0_XA_LSSIREADBACK;
-	rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
-	    RFPGA0_XB_LSSIREADBACK;
-	rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
-	    RFPGA0_XC_LSSIREADBACK;
-	rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
-	    RFPGA0_XD_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
 
 	/* Tranceiver LSSI Readback PI mode */
-	rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
-	    TRANSCEIVERA_HSPI_READBACK;
-	rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
-	    TRANSCEIVERB_HSPI_READBACK;
+	rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVERA_HSPI_READBACK;
+	rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVERB_HSPI_READBACK;
 }
 
 static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
@@ -702,12 +684,11 @@
 	else
 		return;
 
-	rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][index] = data;
+	rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
 	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
 		 "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%ulx\n",
 		 rtlphy->pwrgroup_cnt, index,
-		 rtlphy->mcs_txpwrlevel_origoffset
-		 [rtlphy->pwrgroup_cnt][index]);
+		 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
 	if (index == 13)
 		rtlphy->pwrgroup_cnt++;
 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
index 3066a7fb..20144e0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
@@ -106,11 +106,11 @@
 			    (ppowerlevel[idx1] << 24);
 		}
 		if (rtlefuse->eeprom_regulatory == 0) {
-			tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][6]) +
-			    (rtlphy->mcs_txpwrlevel_origoffset[0][7] << 8);
+			tmpval = (rtlphy->mcs_offset[0][6]) +
+			    (rtlphy->mcs_offset[0][7] << 8);
 			tx_agc[RF90_PATH_A] += tmpval;
-			tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) +
-			    (rtlphy->mcs_txpwrlevel_origoffset[0][15] << 24);
+			tmpval = (rtlphy->mcs_offset[0][14]) +
+			    (rtlphy->mcs_offset[0][15] << 24);
 			tx_agc[RF90_PATH_B] += tmpval;
 		}
 	}
@@ -227,7 +227,7 @@
 		switch (rtlefuse->eeprom_regulatory) {
 		case 0:
 			chnlgroup = 0;
-			writeval = rtlphy->mcs_txpwrlevel_origoffset
+			writeval = rtlphy->mcs_offset
 					[chnlgroup][index +
 					(rf ? 8 : 0)] + ((index < 2) ?
 					powerbase0[rf] :
@@ -247,7 +247,7 @@
 					chnlgroup++;
 				else
 					chnlgroup += 4;
-				writeval = rtlphy->mcs_txpwrlevel_origoffset
+				writeval = rtlphy->mcs_offset
 						[chnlgroup][index +
 						(rf ? 8 : 0)] + ((index < 2) ?
 						powerbase0[rf] :
@@ -280,8 +280,7 @@
 					[channel - 1]);
 			}
 			for (i = 0; i < 4; i++) {
-				pwr_diff_limit[i] =
-					(u8)((rtlphy->mcs_txpwrlevel_origoffset
+				pwr_diff_limit[i] = (u8)((rtlphy->mcs_offset
 					[chnlgroup][index + (rf ? 8 : 0)] &
 					(0x7f << (i * 8))) >> (i * 8));
 				if (rtlphy->current_chan_bw ==
@@ -316,8 +315,7 @@
 			break;
 		default:
 			chnlgroup = 0;
-			writeval = rtlphy->mcs_txpwrlevel_origoffset
-				   [chnlgroup][index +
+			writeval = rtlphy->mcs_offset[chnlgroup][index +
 				   (rf ? 8 : 0)] + ((index < 2) ?
 				   powerbase0[rf] : powerbase1[rf]);
 			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 4686f34..f9f3861 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -132,8 +132,8 @@
 	pstats->packet_toself = packet_toself;
 	pstats->packet_beacon = packet_beacon;
 	pstats->is_cck = is_cck_rate;
-	pstats->rx_mimo_signalquality[0] = -1;
-	pstats->rx_mimo_signalquality[1] = -1;
+	pstats->rx_mimo_sig_qual[0] = -1;
+	pstats->rx_mimo_sig_qual[1] = -1;
 
 	if (is_cck_rate) {
 		u8 report, cck_highpwr;
@@ -212,8 +212,8 @@
 					sq = ((64 - sq) * 100) / 44;
 			}
 			pstats->signalquality = sq;
-			pstats->rx_mimo_signalquality[0] = sq;
-			pstats->rx_mimo_signalquality[1] = -1;
+			pstats->rx_mimo_sig_qual[0] = sq;
+			pstats->rx_mimo_sig_qual[1] = -1;
 		}
 	} else {
 		rtlpriv->dm.rfpath_rxenable[0] = true;
@@ -246,7 +246,7 @@
 				if (i == 0)
 					pstats->signalquality =
 						 (u8)(evm & 0xff);
-				pstats->rx_mimo_signalquality[i] =
+				pstats->rx_mimo_sig_qual[i] =
 						 (u8)(evm & 0xff);
 			}
 		}
@@ -345,33 +345,28 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	long undecorated_smoothed_pwdb;
+	long undec_sm_pwdb;
 
 	if (mac->opmode == NL80211_IFTYPE_ADHOC	||
 		mac->opmode == NL80211_IFTYPE_AP)
 		return;
 	else
-		undecorated_smoothed_pwdb =
-		    rtlpriv->dm.undecorated_smoothed_pwdb;
+		undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
 
 	if (pstats->packet_toself || pstats->packet_beacon) {
-		if (undecorated_smoothed_pwdb < 0)
-			undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
-		if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
-			undecorated_smoothed_pwdb =
-			      (((undecorated_smoothed_pwdb) *
+		if (undec_sm_pwdb < 0)
+			undec_sm_pwdb = pstats->rx_pwdb_all;
+		if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
+			undec_sm_pwdb = (((undec_sm_pwdb) *
 			      (RX_SMOOTH_FACTOR - 1)) +
 			      (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
-			undecorated_smoothed_pwdb =
-			      undecorated_smoothed_pwdb + 1;
+			undec_sm_pwdb = undec_sm_pwdb + 1;
 		} else {
-			undecorated_smoothed_pwdb =
-			      (((undecorated_smoothed_pwdb) *
+			undec_sm_pwdb = (((undec_sm_pwdb) *
 			      (RX_SMOOTH_FACTOR - 1)) +
 			      (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
 		}
-		rtlpriv->dm.undecorated_smoothed_pwdb =
-				 undecorated_smoothed_pwdb;
+		rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
 		_rtl92de_update_rxsignalstatistics(hw, pstats);
 	}
 }
@@ -383,15 +378,15 @@
 	int stream;
 
 	for (stream = 0; stream < 2; stream++) {
-		if (pstats->rx_mimo_signalquality[stream] != -1) {
+		if (pstats->rx_mimo_sig_qual[stream] != -1) {
 			if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
 				rtlpriv->stats.rx_evm_percentage[stream] =
-				    pstats->rx_mimo_signalquality[stream];
+				    pstats->rx_mimo_sig_qual[stream];
 			}
 			rtlpriv->stats.rx_evm_percentage[stream] =
 			    ((rtlpriv->stats.rx_evm_percentage[stream]
 			      * (RX_SMOOTH_FACTOR - 1)) +
-			     (pstats->rx_mimo_signalquality[stream] * 1)) /
+			     (pstats->rx_mimo_sig_qual[stream] * 1)) /
 			    (RX_SMOOTH_FACTOR);
 		}
 	}
@@ -514,7 +509,7 @@
 		rx_status->flag |= RX_FLAG_40MHZ;
 	if (GET_RX_DESC_RXHT(pdesc))
 		rx_status->flag |= RX_FLAG_HT;
-	rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+	rx_status->flag |= RX_FLAG_MACTIME_START;
 	if (stats->decrypted)
 		rx_status->flag |= RX_FLAG_DECRYPTED;
 	rx_status->rate_idx = rtlwifi_rate_mapping(hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index 465f581..bf79a52 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -267,13 +267,12 @@
 			break;
 		}
 
-		if (rtlpriv->dm.undecorated_smoothed_pwdb >
-		    (long)high_rssi_thresh) {
+		if (rtlpriv->dm.undec_sm_pwdb > (long)high_rssi_thresh) {
 			ra->ratr_state = DM_RATR_STA_HIGH;
-		} else if (rtlpriv->dm.undecorated_smoothed_pwdb >
+		} else if (rtlpriv->dm.undec_sm_pwdb >
 			   (long)middle_rssi_thresh) {
 			ra->ratr_state = DM_RATR_STA_LOW;
-		} else if (rtlpriv->dm.undecorated_smoothed_pwdb >
+		} else if (rtlpriv->dm.undec_sm_pwdb >
 			   (long)low_rssi_thresh) {
 			ra->ratr_state = DM_RATR_STA_LOW;
 		} else {
@@ -283,8 +282,7 @@
 		if (ra->pre_ratr_state != ra->ratr_state) {
 			RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
 				 "RSSI = %ld RSSI_LEVEL = %d PreState = %d, CurState = %d\n",
-				 rtlpriv->dm.undecorated_smoothed_pwdb,
-				 ra->ratr_state,
+				 rtlpriv->dm.undec_sm_pwdb, ra->ratr_state,
 				 ra->pre_ratr_state, ra->ratr_state);
 
 			rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
@@ -316,7 +314,7 @@
 	rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_MRC, (u8 *)(&current_mrc));
 
 	if (mac->link_state >= MAC80211_LINKED) {
-		if (rtlpriv->dm.undecorated_smoothed_pwdb > tmpentry_maxpwdb) {
+		if (rtlpriv->dm.undec_sm_pwdb > tmpentry_maxpwdb) {
 			rssi_a = rtlpriv->stats.rx_rssi_percentage[RF90_PATH_A];
 			rssi_b = rtlpriv->stats.rx_rssi_percentage[RF90_PATH_B];
 		}
@@ -424,18 +422,18 @@
 	struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
 
 	if (falsealm_cnt->cnt_all > digtable->fa_highthresh) {
-		if ((digtable->backoff_val - 6) <
+		if ((digtable->back_val - 6) <
 			digtable->backoffval_range_min)
-			digtable->backoff_val = digtable->backoffval_range_min;
+			digtable->back_val = digtable->backoffval_range_min;
 		else
-			digtable->backoff_val -= 6;
+			digtable->back_val -= 6;
 	} else if (falsealm_cnt->cnt_all < digtable->fa_lowthresh) {
-		if ((digtable->backoff_val + 6) >
+		if ((digtable->back_val + 6) >
 			digtable->backoffval_range_max)
-			digtable->backoff_val =
+			digtable->back_val =
 				 digtable->backoffval_range_max;
 		else
-			digtable->backoff_val += 6;
+			digtable->back_val += 6;
 	}
 }
 
@@ -447,28 +445,28 @@
 	static u8 initialized, force_write;
 	u8 initial_gain = 0;
 
-	if ((digtable->pre_sta_connectstate == digtable->cur_sta_connectstate) ||
-		(digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) {
-		if (digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) {
+	if ((digtable->pre_sta_cstate == digtable->cur_sta_cstate) ||
+	    (digtable->cur_sta_cstate == DIG_STA_BEFORE_CONNECT)) {
+		if (digtable->cur_sta_cstate == DIG_STA_BEFORE_CONNECT) {
 			if (rtlpriv->psc.rfpwr_state != ERFON)
 				return;
 
 			if (digtable->backoff_enable_flag)
 				rtl92s_backoff_enable_flag(hw);
 			else
-				digtable->backoff_val = DM_DIG_BACKOFF;
+				digtable->back_val = DM_DIG_BACKOFF;
 
-			if ((digtable->rssi_val + 10 - digtable->backoff_val) >
+			if ((digtable->rssi_val + 10 - digtable->back_val) >
 				digtable->rx_gain_range_max)
 				digtable->cur_igvalue =
 						digtable->rx_gain_range_max;
-			else if ((digtable->rssi_val + 10 - digtable->backoff_val)
+			else if ((digtable->rssi_val + 10 - digtable->back_val)
 				 < digtable->rx_gain_range_min)
 				digtable->cur_igvalue =
 						digtable->rx_gain_range_min;
 			else
 				digtable->cur_igvalue = digtable->rssi_val + 10 -
-						digtable->backoff_val;
+						digtable->back_val;
 
 			if (falsealm_cnt->cnt_all > 10000)
 				digtable->cur_igvalue =
@@ -490,7 +488,7 @@
 		digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
 		rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE);
 
-		digtable->backoff_val = DM_DIG_BACKOFF;
+		digtable->back_val = DM_DIG_BACKOFF;
 		digtable->cur_igvalue = rtlpriv->phy.default_initialgain[0];
 		digtable->pre_igvalue = 0;
 		return;
@@ -528,14 +526,14 @@
 	/* Decide the current status and if modify initial gain or not */
 	if (rtlpriv->mac80211.link_state >= MAC80211_LINKED ||
 	    rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC)
-		digtable->cur_sta_connectstate = DIG_STA_CONNECT;
+		digtable->cur_sta_cstate = DIG_STA_CONNECT;
 	else
-		digtable->cur_sta_connectstate = DIG_STA_DISCONNECT;
+		digtable->cur_sta_cstate = DIG_STA_DISCONNECT;
 
-	digtable->rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb;
+	digtable->rssi_val = rtlpriv->dm.undec_sm_pwdb;
 
 	/* Change dig mode to rssi */
-	if (digtable->cur_sta_connectstate != DIG_STA_DISCONNECT) {
+	if (digtable->cur_sta_cstate != DIG_STA_DISCONNECT) {
 		if (digtable->dig_twoport_algorithm ==
 		    DIG_TWO_PORT_ALGO_FALSE_ALARM) {
 			digtable->dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
@@ -546,7 +544,7 @@
 	_rtl92s_dm_false_alarm_counter_statistics(hw);
 	_rtl92s_dm_initial_gain_sta_beforeconnect(hw);
 
-	digtable->pre_sta_connectstate = digtable->cur_sta_connectstate;
+	digtable->pre_sta_cstate = digtable->cur_sta_cstate;
 }
 
 static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw)
@@ -573,7 +571,7 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	long undecorated_smoothed_pwdb;
+	long undec_sm_pwdb;
 	long txpwr_threshold_lv1, txpwr_threshold_lv2;
 
 	/* 2T2R TP issue */
@@ -587,7 +585,7 @@
 	}
 
 	if ((mac->link_state < MAC80211_LINKED) &&
-	    (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+	    (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
 			 "Not connected to any\n");
 
@@ -599,25 +597,22 @@
 
 	if (mac->link_state >= MAC80211_LINKED) {
 		if (mac->opmode == NL80211_IFTYPE_ADHOC) {
-			undecorated_smoothed_pwdb =
-			    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+			undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
 			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 				 "AP Client PWDB = 0x%lx\n",
-				 undecorated_smoothed_pwdb);
+				 undec_sm_pwdb);
 		} else {
-			undecorated_smoothed_pwdb =
-			    rtlpriv->dm.undecorated_smoothed_pwdb;
+			undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
 			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 				 "STA Default Port PWDB = 0x%lx\n",
-				 undecorated_smoothed_pwdb);
+				 undec_sm_pwdb);
 		}
 	} else {
-		undecorated_smoothed_pwdb =
-		    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+		undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
 
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "AP Ext Port PWDB = 0x%lx\n",
-			 undecorated_smoothed_pwdb);
+			 undec_sm_pwdb);
 	}
 
 	txpwr_threshold_lv2 = TX_POWER_NEAR_FIELD_THRESH_LVL2;
@@ -625,12 +620,12 @@
 
 	if (rtl_get_bbreg(hw, 0xc90, MASKBYTE0) == 1)
 		rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
-	else if (undecorated_smoothed_pwdb >= txpwr_threshold_lv2)
+	else if (undec_sm_pwdb >= txpwr_threshold_lv2)
 		rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL2;
-	else if ((undecorated_smoothed_pwdb < (txpwr_threshold_lv2 - 3)) &&
-		(undecorated_smoothed_pwdb >= txpwr_threshold_lv1))
+	else if ((undec_sm_pwdb < (txpwr_threshold_lv2 - 3)) &&
+		(undec_sm_pwdb >= txpwr_threshold_lv1))
 		rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL1;
-	else if (undecorated_smoothed_pwdb < (txpwr_threshold_lv1 - 3))
+	else if (undec_sm_pwdb < (txpwr_threshold_lv1 - 3))
 		rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
 
 	if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl))
@@ -665,10 +660,10 @@
 	digtable->dig_state = DM_STA_DIG_MAX;
 	digtable->dig_highpwrstate = DM_STA_DIG_MAX;
 
-	digtable->cur_sta_connectstate = DIG_STA_DISCONNECT;
-	digtable->pre_sta_connectstate = DIG_STA_DISCONNECT;
-	digtable->cur_ap_connectstate = DIG_AP_DISCONNECT;
-	digtable->pre_ap_connectstate = DIG_AP_DISCONNECT;
+	digtable->cur_sta_cstate = DIG_STA_DISCONNECT;
+	digtable->pre_sta_cstate = DIG_STA_DISCONNECT;
+	digtable->cur_ap_cstate = DIG_AP_DISCONNECT;
+	digtable->pre_ap_cstate = DIG_AP_DISCONNECT;
 
 	digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
 	digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
@@ -681,7 +676,7 @@
 
 	/* for dig debug rssi value */
 	digtable->rssi_val = 50;
-	digtable->backoff_val = DM_DIG_BACKOFF;
+	digtable->back_val = DM_DIG_BACKOFF;
 	digtable->rx_gain_range_max = DM_DIG_MAX;
 
 	digtable->rx_gain_range_min = DM_DIG_MIN;
@@ -709,7 +704,7 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 
 	rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
-	rtlpriv->dm.undecorated_smoothed_pwdb = -1;
+	rtlpriv->dm.undec_sm_pwdb = -1;
 
 	_rtl92s_dm_init_dynamic_txpower(hw);
 	rtl92s_dm_init_edca_turbo(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 4542e69..1d72779 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -1697,7 +1697,7 @@
 			hwinfo[EEPROM_TXPOWERBASE + 6 + rf_path * 3 + i];
 
 			/* Read OFDM RF A & B Tx power for 2T */
-			rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][i]
+			rtlefuse->eprom_chnl_txpwr_ht40_2sdf[rf_path][i]
 				 = hwinfo[EEPROM_TXPOWERBASE + 12 +
 				   rf_path * 3 + i];
 		}
@@ -1722,7 +1722,7 @@
 			RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
 				"RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
 				rf_path, i,
-				rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif
+				rtlefuse->eprom_chnl_txpwr_ht40_2sdf
 				[rf_path][i]);
 
 	for (rf_path = 0; rf_path < 2; rf_path++) {
@@ -1748,7 +1748,7 @@
 				rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
 							[rf_path][index];
 			rtlefuse->txpwrlevel_ht40_2s[rf_path][i]  =
-				rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif
+				rtlefuse->eprom_chnl_txpwr_ht40_2sdf
 							[rf_path][index];
 		}
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index b917a2a..6740497 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -139,17 +139,17 @@
 						BIT(8));
 
 	if (rfpi_enable)
-		retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+		retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
 					 BLSSI_READBACK_DATA);
 	else
-		retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+		retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
 					 BLSSI_READBACK_DATA);
 
-	retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+	retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
 				 BLSSI_READBACK_DATA);
 
 	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
-		 rfpath, pphyreg->rflssi_readback, retvalue);
+		 rfpath, pphyreg->rf_rb, retvalue);
 
 	return retvalue;
 
@@ -696,7 +696,7 @@
 	else
 		return;
 
-	rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][index] = data;
+	rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
 	if (index == 5)
 		rtlphy->pwrgroup_cnt++;
 }
@@ -765,14 +765,10 @@
 	rtlphy->phyreg_def[RF90_PATH_D].rfhssi_para2 = RFPGA0_XD_HSSIPARAMETER2;
 
 	/* RF switch Control */
-	rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
-						 RFPGA0_XAB_SWITCHCONTROL;
-	rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
-						 RFPGA0_XAB_SWITCHCONTROL;
-	rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
-						 RFPGA0_XCD_SWITCHCONTROL;
-	rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
-						 RFPGA0_XCD_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
 
 	/* AGC control 1  */
 	rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
@@ -787,14 +783,10 @@
 	rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
 
 	/* RX AFE control 1  */
-	rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
-						 ROFDM0_XARXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
-						 ROFDM0_XBRXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
-						 ROFDM0_XCRXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
-						 ROFDM0_XDRXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
 
 	/* RX AFE control 1   */
 	rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
@@ -803,14 +795,10 @@
 	rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
 
 	/* Tx AFE control 1  */
-	rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
-						 ROFDM0_XATXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
-						 ROFDM0_XBTXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
-						 ROFDM0_XCTXIQIMBALANCE;
-	rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
-						 ROFDM0_XDTXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
 
 	/* Tx AFE control 2  */
 	rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
@@ -819,20 +807,14 @@
 	rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
 
 	/* Tranceiver LSSI Readback */
-	rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
-			 RFPGA0_XA_LSSIREADBACK;
-	rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
-			 RFPGA0_XB_LSSIREADBACK;
-	rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
-			 RFPGA0_XC_LSSIREADBACK;
-	rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
-			 RFPGA0_XD_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
 
 	/* Tranceiver LSSI Readback PI mode  */
-	rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
-			 TRANSCEIVERA_HSPI_READBACK;
-	rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
-			 TRANSCEIVERB_HSPI_READBACK;
+	rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVERA_HSPI_READBACK;
+	rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVERB_HSPI_READBACK;
 }
 
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
index 08c2f56..5061f1d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
@@ -192,8 +192,7 @@
 		 * defined by Realtek for large power */
 		chnlgroup = 0;
 
-		writeval = rtlphy->mcs_txpwrlevel_origoffset
-				[chnlgroup][index] +
+		writeval = rtlphy->mcs_offset[chnlgroup][index] +
 				((index < 2) ? pwrbase0 : pwrbase1);
 
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
@@ -223,8 +222,7 @@
 					chnlgroup++;
 			}
 
-			writeval = rtlphy->mcs_txpwrlevel_origoffset
-					[chnlgroup][index]
+			writeval = rtlphy->mcs_offset[chnlgroup][index]
 					+ ((index < 2) ?
 					pwrbase0 : pwrbase1);
 
@@ -257,8 +255,7 @@
 		}
 
 		for (i = 0; i < 4; i++) {
-			pwrdiff_limit[i] =
-				(u8)((rtlphy->mcs_txpwrlevel_origoffset
+			pwrdiff_limit[i] = (u8)((rtlphy->mcs_offset
 				[chnlgroup][index] & (0x7f << (i * 8)))
 				>> (i * 8));
 
@@ -296,7 +293,7 @@
 		break;
 	default:
 		chnlgroup = 0;
-		writeval = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index] +
+		writeval = rtlphy->mcs_offset[chnlgroup][index] +
 				((index < 2) ? pwrbase0 : pwrbase1);
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "RTK better performance, writeval = 0x%x\n", writeval);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index e3cf4c0..0e9f6eb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -129,8 +129,8 @@
 	pstats->packet_matchbssid = packet_match_bssid;
 	pstats->packet_toself = packet_toself;
 	pstats->packet_beacon = packet_beacon;
-	pstats->rx_mimo_signalquality[0] = -1;
-	pstats->rx_mimo_signalquality[1] = -1;
+	pstats->rx_mimo_sig_qual[0] = -1;
+	pstats->rx_mimo_sig_qual[1] = -1;
 
 	if (is_cck) {
 		u8 report, cck_highpwr;
@@ -216,8 +216,8 @@
 			}
 
 			pstats->signalquality = sq;
-			pstats->rx_mimo_signalquality[0] = sq;
-			pstats->rx_mimo_signalquality[1] = -1;
+			pstats->rx_mimo_sig_qual[0] = sq;
+			pstats->rx_mimo_sig_qual[1] = -1;
 		}
 	} else {
 		rtlpriv->dm.rfpath_rxenable[0] =
@@ -256,8 +256,7 @@
 				if (i == 0)
 					pstats->signalquality = (u8)(evm &
 								 0xff);
-				pstats->rx_mimo_signalquality[i] =
-							 (u8) (evm & 0xff);
+				pstats->rx_mimo_sig_qual[i] = (u8) (evm & 0xff);
 			}
 		}
 	}
@@ -366,7 +365,7 @@
 		return;
 	} else {
 		undec_sm_pwdb =
-		    rtlpriv->dm.undecorated_smoothed_pwdb;
+		    rtlpriv->dm.undec_sm_pwdb;
 	}
 
 	if (pstats->packet_toself || pstats->packet_beacon) {
@@ -386,7 +385,7 @@
 			      (RX_SMOOTH_FACTOR);
 		}
 
-		rtlpriv->dm.undecorated_smoothed_pwdb = undec_sm_pwdb;
+		rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
 		_rtl92se_update_rxsignalstatistics(hw, pstats);
 	}
 }
@@ -398,16 +397,16 @@
 	u32 stream;
 
 	for (stream = 0; stream < 2; stream++) {
-		if (pstats->rx_mimo_signalquality[stream] != -1) {
+		if (pstats->rx_mimo_sig_qual[stream] != -1) {
 			if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
 				rtlpriv->stats.rx_evm_percentage[stream] =
-				    pstats->rx_mimo_signalquality[stream];
+				    pstats->rx_mimo_sig_qual[stream];
 			}
 
 			rtlpriv->stats.rx_evm_percentage[stream] =
 			    ((rtlpriv->stats.rx_evm_percentage[stream] *
 					(RX_SMOOTH_FACTOR - 1)) +
-			     (pstats->rx_mimo_signalquality[stream] *
+			     (pstats->rx_mimo_sig_qual[stream] *
 					1)) / (RX_SMOOTH_FACTOR);
 		}
 	}
@@ -554,7 +553,7 @@
 	if (stats->is_ht)
 		rx_status->flag |= RX_FLAG_HT;
 
-	rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+	rx_status->flag |= RX_FLAG_MACTIME_START;
 
 	/* hw will set stats->decrypted true, if it finds the
 	 * frame is open data frame or mgmt frame,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
new file mode 100644
index 0000000..4ed731f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
@@ -0,0 +1,22 @@
+obj-m := rtl8723ae.o
+
+
+rtl8723ae-objs :=		\
+		dm.o		\
+		fw.o		\
+		hal_btc.o	\
+		hal_bt_coexist.o\
+		hw.o		\
+		led.o		\
+		phy.o		\
+		pwrseq.o	\
+		pwrseqcmd.o	\
+		rf.o		\
+		sw.o		\
+		table.o		\
+		trx.o		\
+
+
+obj-$(CONFIG_RTL8723AE) += rtl8723ae.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h b/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h
new file mode 100644
index 0000000..417afee
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h
@@ -0,0 +1,41 @@
+/******************************************************************************
+ **
+ ** Copyright(c) 2009-2012  Realtek Corporation.
+ **
+ ** This program is free software; you can redistribute it and/or modify it
+ ** under the terms of version 2 of the GNU General Public License as
+ ** published by the Free Software Foundation.
+ **
+ ** This program is distributed in the hope that it will be useful, but WITHOUT
+ ** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ ** FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ ** more details.
+ **
+ ** You should have received a copy of the GNU General Public License along with
+ ** this program; if not, write to the Free Software Foundation, Inc.,
+ ** 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ **
+ ** The full GNU General Public License is included in this distribution in the
+ ** file called LICENSE.
+ **
+ ** Contact Information:
+ ** wlanfae <wlanfae@realtek.com>
+ ** Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ ** Hsinchu 300, Taiwan.
+ ** Larry Finger <Larry.Finger@lwfinger.net>
+ **
+ *****************************************************************************
+ */
+
+#ifndef __RTL8723E_BTC_H__
+#define __RTL8723E_BTC_H__
+
+#include "../wifi.h"
+#include "hal_bt_coexist.h"
+
+struct bt_coexist_c2h_info {
+	u8 no_parse_c2h;
+	u8 has_c2h;
+};
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
new file mode 100644
index 0000000..8c11035
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
@@ -0,0 +1,163 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ****************************************************************************
+ */
+
+#ifndef __RTL8723E_DEF_H__
+#define __RTL8723E_DEF_H__
+
+#define HAL_PRIME_CHNL_OFFSET_LOWER			1
+
+#define RX_MPDU_QUEUE					0
+
+#define CHIP_8723			BIT(0)
+#define NORMAL_CHIP			BIT(3)
+#define RF_TYPE_1T2R			BIT(4)
+#define RF_TYPE_2T2R			BIT(5)
+#define CHIP_VENDOR_UMC			BIT(7)
+#define B_CUT_VERSION			BIT(12)
+#define C_CUT_VERSION			BIT(13)
+#define D_CUT_VERSION			((BIT(12)|BIT(13)))
+#define E_CUT_VERSION			BIT(14)
+#define	RF_RL_ID			(BIT(31)|BIT(30)|BIT(29)|BIT(28))
+
+enum version_8723e {
+	VERSION_TEST_UMC_CHIP_8723 = 0x0081,
+	VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089,
+	VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089,
+};
+
+/* MASK */
+#define IC_TYPE_MASK			(BIT(0)|BIT(1)|BIT(2))
+#define CHIP_TYPE_MASK			BIT(3)
+#define RF_TYPE_MASK			(BIT(4)|BIT(5)|BIT(6))
+#define MANUFACTUER_MASK		BIT(7)
+#define ROM_VERSION_MASK		(BIT(11)|BIT(10)|BIT(9)|BIT(8))
+#define CUT_VERSION_MASK		(BIT(15)|BIT(14)|BIT(13)|BIT(12))
+
+/* Get element */
+#define GET_CVID_IC_TYPE(version)	((version) & IC_TYPE_MASK)
+#define GET_CVID_MANUFACTUER(version)	((version) & MANUFACTUER_MASK)
+#define GET_CVID_CUT_VERSION(version)	((version) & CUT_VERSION_MASK)
+
+#define IS_81XXC(version)		((GET_CVID_IC_TYPE(version) == 0) ?\
+					true : false)
+#define IS_8723_SERIES(version)						\
+		((GET_CVID_IC_TYPE(version) == CHIP_8723) ? true : false)
+#define IS_CHIP_VENDOR_UMC(version)					\
+		((GET_CVID_MANUFACTUER(version)) ? true : false)
+
+#define IS_VENDOR_UMC_A_CUT(version)	((IS_CHIP_VENDOR_UMC(version)) ? \
+		((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
+#define IS_VENDOR_8723_A_CUT(version)	((IS_8723_SERIES(version)) ?	\
+		((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
+#define IS_81xxC_VENDOR_UMC_B_CUT(version)	((IS_CHIP_VENDOR_UMC(version)) \
+		? ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? \
+		true : false) : false)
+
+enum rf_optype {
+	RF_OP_BY_SW_3WIRE = 0,
+	RF_OP_BY_FW,
+	RF_OP_MAX
+};
+
+enum rf_power_state {
+	RF_ON,
+	RF_OFF,
+	RF_SLEEP,
+	RF_SHUT_DOWN,
+};
+
+enum power_save_mode {
+	POWER_SAVE_MODE_ACTIVE,
+	POWER_SAVE_MODE_SAVE,
+};
+
+enum power_polocy_config {
+	POWERCFG_MAX_POWER_SAVINGS,
+	POWERCFG_GLOBAL_POWER_SAVINGS,
+	POWERCFG_LOCAL_POWER_SAVINGS,
+	POWERCFG_LENOVO,
+};
+
+enum interface_select_pci {
+	INTF_SEL1_MINICARD = 0,
+	INTF_SEL0_PCIE = 1,
+	INTF_SEL2_RSV = 2,
+	INTF_SEL3_RSV = 3,
+};
+
+enum hal_fw_c2h_cmd_id {
+	HAL_FW_C2H_CMD_Read_MACREG = 0,
+	HAL_FW_C2H_CMD_Read_BBREG = 1,
+	HAL_FW_C2H_CMD_Read_RFREG = 2,
+	HAL_FW_C2H_CMD_Read_EEPROM = 3,
+	HAL_FW_C2H_CMD_Read_EFUSE = 4,
+	HAL_FW_C2H_CMD_Read_CAM = 5,
+	HAL_FW_C2H_CMD_Get_BasicRate = 6,
+	HAL_FW_C2H_CMD_Get_DataRate = 7,
+	HAL_FW_C2H_CMD_Survey = 8,
+	HAL_FW_C2H_CMD_SurveyDone = 9,
+	HAL_FW_C2H_CMD_JoinBss = 10,
+	HAL_FW_C2H_CMD_AddSTA = 11,
+	HAL_FW_C2H_CMD_DelSTA = 12,
+	HAL_FW_C2H_CMD_AtimDone = 13,
+	HAL_FW_C2H_CMD_TX_Report = 14,
+	HAL_FW_C2H_CMD_CCX_Report = 15,
+	HAL_FW_C2H_CMD_DTM_Report = 16,
+	HAL_FW_C2H_CMD_TX_Rate_Statistics = 17,
+	HAL_FW_C2H_CMD_C2HLBK = 18,
+	HAL_FW_C2H_CMD_C2HDBG = 19,
+	HAL_FW_C2H_CMD_C2HFEEDBACK = 20,
+	HAL_FW_C2H_CMD_MAX
+};
+
+enum rtl_desc_qsel {
+	QSLT_BK = 0x2,
+	QSLT_BE = 0x0,
+	QSLT_VI = 0x5,
+	QSLT_VO = 0x7,
+	QSLT_BEACON = 0x10,
+	QSLT_HIGH = 0x11,
+	QSLT_MGNT = 0x12,
+	QSLT_CMD = 0x13,
+};
+
+struct phy_sts_cck_8723e_t {
+	u8 adc_pwdb_X[4];
+	u8 sq_rpt;
+	u8 cck_agc_rpt;
+};
+
+struct h2c_cmd_8723e {
+	u8 element_id;
+	u32 cmd_len;
+	u8 *p_cmdbuffer;
+};
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
new file mode 100644
index 0000000..12e2a3c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
@@ -0,0 +1,920 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ****************************************************************************
+ */
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "fw.h"
+#include "hal_btc.h"
+
+static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
+	0x7f8001fe,
+	0x788001e2,
+	0x71c001c7,
+	0x6b8001ae,
+	0x65400195,
+	0x5fc0017f,
+	0x5a400169,
+	0x55400155,
+	0x50800142,
+	0x4c000130,
+	0x47c0011f,
+	0x43c0010f,
+	0x40000100,
+	0x3c8000f2,
+	0x390000e4,
+	0x35c000d7,
+	0x32c000cb,
+	0x300000c0,
+	0x2d4000b5,
+	0x2ac000ab,
+	0x288000a2,
+	0x26000098,
+	0x24000090,
+	0x22000088,
+	0x20000080,
+	0x1e400079,
+	0x1c800072,
+	0x1b00006c,
+	0x19800066,
+	0x18000060,
+	0x16c0005b,
+	0x15800056,
+	0x14400051,
+	0x1300004c,
+	0x12000048,
+	0x11000044,
+	0x10000040,
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+	{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
+	{0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
+	{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
+	{0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
+	{0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
+	{0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
+	{0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
+	{0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
+	{0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
+	{0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
+	{0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
+	{0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
+	{0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
+	{0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
+	{0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
+	{0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
+	{0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
+	{0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
+	{0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
+	{0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
+	{0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
+	{0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
+	{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
+	{0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
+	{0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
+	{0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
+	{0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
+	{0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
+	{0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
+	{0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
+	{0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
+	{0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
+	{0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
+	{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
+	{0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
+	{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
+	{0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
+	{0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
+	{0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
+	{0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
+	{0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
+	{0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
+	{0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
+	{0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
+	{0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
+	{0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
+	{0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
+	{0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
+	{0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
+	{0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
+	{0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
+	{0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
+	{0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
+	{0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
+	{0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
+	{0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
+	{0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
+	{0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
+	{0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
+	{0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
+	{0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
+	{0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
+	{0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
+	{0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
+	{0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
+	{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
+};
+
+static void rtl8723ae_dm_diginit(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+
+	dm_digtable->dig_enable_flag = true;
+	dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+	dm_digtable->cur_igvalue = 0x20;
+	dm_digtable->pre_igvalue = 0x0;
+	dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
+	dm_digtable->presta_cstate = DIG_STA_DISCONNECT;
+	dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
+	dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
+	dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
+	dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+	dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+	dm_digtable->rx_gain_range_max = DM_DIG_MAX;
+	dm_digtable->rx_gain_range_min = DM_DIG_MIN;
+	dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+	dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
+	dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
+	dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
+	dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
+}
+
+static u8 rtl_init_gain_min_pwdb(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+	long rssi_val_min = 0;
+
+	if ((dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) &&
+	    (dm_digtable->cursta_cstate == DIG_STA_CONNECT)) {
+		if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0)
+			rssi_val_min =
+			    (rtlpriv->dm.entry_min_undec_sm_pwdb >
+			     rtlpriv->dm.undec_sm_pwdb) ?
+			    rtlpriv->dm.undec_sm_pwdb :
+			    rtlpriv->dm.entry_min_undec_sm_pwdb;
+		else
+			rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
+	} else if (dm_digtable->cursta_cstate == DIG_STA_CONNECT ||
+		   dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT) {
+		rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
+	} else if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) {
+		rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb;
+	}
+
+	return (u8) rssi_val_min;
+}
+
+static void rtl8723ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+	u32 ret_value;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+
+	ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
+	falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
+
+	ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
+	falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
+	falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
+
+	ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
+	falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
+	falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
+	    falsealm_cnt->cnt_rate_illegal +
+	    falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
+
+	rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
+	ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
+	falsealm_cnt->cnt_cck_fail = ret_value;
+
+	ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
+	falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
+	falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
+				 falsealm_cnt->cnt_rate_illegal +
+				 falsealm_cnt->cnt_crc8_fail +
+				 falsealm_cnt->cnt_mcs_fail +
+				 falsealm_cnt->cnt_cck_fail);
+
+	rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
+	rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
+	rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
+	rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
+
+	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+		 "cnt_parity_fail = %d, cnt_rate_illegal = %d, "
+		 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+		 falsealm_cnt->cnt_parity_fail,
+		 falsealm_cnt->cnt_rate_illegal,
+		 falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
+
+	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+		 "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+		 falsealm_cnt->cnt_ofdm_fail,
+		 falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
+}
+
+static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+	u8 value_igi = dm_digtable->cur_igvalue;
+
+	if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
+		value_igi--;
+	else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
+		value_igi += 0;
+	else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
+		value_igi++;
+	else
+		value_igi += 2;
+
+	value_igi = clamp(value_igi, (u8)DM_DIG_FA_LOWER, (u8)DM_DIG_FA_UPPER);
+	if (rtlpriv->falsealm_cnt.cnt_all > 10000)
+		value_igi = 0x32;
+
+	dm_digtable->cur_igvalue = value_igi;
+	rtl8723ae_dm_write_dig(hw);
+}
+
+static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dgtbl = &rtlpriv->dm_digtable;
+
+	if (rtlpriv->falsealm_cnt.cnt_all > dgtbl->fa_highthresh) {
+		if ((dgtbl->back_val - 2) < dgtbl->back_range_min)
+			dgtbl->back_val = dgtbl->back_range_min;
+		else
+			dgtbl->back_val -= 2;
+	} else if (rtlpriv->falsealm_cnt.cnt_all < dgtbl->fa_lowthresh) {
+		if ((dgtbl->back_val + 2) > dgtbl->back_range_max)
+			dgtbl->back_val = dgtbl->back_range_max;
+		else
+			dgtbl->back_val += 2;
+	}
+
+	if ((dgtbl->rssi_val_min + 10 - dgtbl->back_val) >
+	    dgtbl->rx_gain_range_max)
+		dgtbl->cur_igvalue = dgtbl->rx_gain_range_max;
+	else if ((dgtbl->rssi_val_min + 10 -
+		  dgtbl->back_val) < dgtbl->rx_gain_range_min)
+		dgtbl->cur_igvalue = dgtbl->rx_gain_range_min;
+	else
+		dgtbl->cur_igvalue = dgtbl->rssi_val_min + 10 - dgtbl->back_val;
+
+	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+		 "rssi_val_min = %x back_val %x\n",
+		 dgtbl->rssi_val_min, dgtbl->back_val);
+
+	rtl8723ae_dm_write_dig(hw);
+}
+
+static void rtl8723ae_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+	long rssi_strength = rtlpriv->dm.entry_min_undec_sm_pwdb;
+	bool multi_sta = false;
+
+	if (mac->opmode == NL80211_IFTYPE_ADHOC)
+		multi_sta = true;
+
+	if ((!multi_sta) ||
+	    (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT)) {
+		rtlpriv->initialized = false;
+		dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+		return;
+	} else if (!rtlpriv->initialized) {
+		rtlpriv->initialized = true;
+		dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
+		dm_digtable->cur_igvalue = 0x20;
+		rtl8723ae_dm_write_dig(hw);
+	}
+
+	if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) {
+		if ((rssi_strength < dm_digtable->rssi_lowthresh) &&
+		    (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
+
+			if (dm_digtable->dig_ext_port_stage ==
+			    DIG_EXT_PORT_STAGE_2) {
+				dm_digtable->cur_igvalue = 0x20;
+				rtl8723ae_dm_write_dig(hw);
+			}
+
+			dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
+		} else if (rssi_strength > dm_digtable->rssi_highthresh) {
+			dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
+			rtl92c_dm_ctrl_initgain_by_fa(hw);
+		}
+	} else if (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
+		dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
+		dm_digtable->cur_igvalue = 0x20;
+		rtl8723ae_dm_write_dig(hw);
+	}
+
+	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+		 "curmultista_cstate = %x dig_ext_port_stage %x\n",
+		 dm_digtable->curmultista_cstate,
+		 dm_digtable->dig_ext_port_stage);
+}
+
+static void rtl8723ae_dm_initial_gain_sta(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+
+	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+		 "presta_cstate = %x, cursta_cstate = %x\n",
+		 dm_digtable->presta_cstate,
+		 dm_digtable->cursta_cstate);
+
+	if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
+	    dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
+	    dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
+
+		if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
+			dm_digtable->rssi_val_min = rtl_init_gain_min_pwdb(hw);
+			rtl92c_dm_ctrl_initgain_by_rssi(hw);
+		}
+	} else {
+		dm_digtable->rssi_val_min = 0;
+		dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+		dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+		dm_digtable->cur_igvalue = 0x20;
+		dm_digtable->pre_igvalue = 0;
+		rtl8723ae_dm_write_dig(hw);
+	}
+}
+static void rtl8723ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+
+	if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
+		dm_digtable->rssi_val_min = rtl_init_gain_min_pwdb(hw);
+
+		if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+			if (dm_digtable->rssi_val_min <= 25)
+				dm_digtable->cur_cck_pd_state =
+				    CCK_PD_STAGE_LowRssi;
+			else
+				dm_digtable->cur_cck_pd_state =
+				    CCK_PD_STAGE_HighRssi;
+		} else {
+			if (dm_digtable->rssi_val_min <= 20)
+				dm_digtable->cur_cck_pd_state =
+				    CCK_PD_STAGE_LowRssi;
+			else
+				dm_digtable->cur_cck_pd_state =
+				    CCK_PD_STAGE_HighRssi;
+		}
+	} else {
+		dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
+	}
+
+	if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) {
+		if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+			if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
+				dm_digtable->cur_cck_fa_state =
+				    CCK_FA_STAGE_High;
+			else
+				dm_digtable->cur_cck_fa_state =
+							 CCK_FA_STAGE_Low;
+
+			if (dm_digtable->pre_cck_fa_state !=
+			    dm_digtable->cur_cck_fa_state) {
+				if (dm_digtable->cur_cck_fa_state ==
+				    CCK_FA_STAGE_Low)
+					rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
+						      0x83);
+				else
+					rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
+						      0xcd);
+
+				dm_digtable->pre_cck_fa_state =
+				    dm_digtable->cur_cck_fa_state;
+			}
+
+			rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
+
+		} else {
+			rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
+			rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
+
+		}
+		dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state;
+	}
+
+	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+		 "CCKPDStage=%x\n", dm_digtable->cur_cck_pd_state);
+
+}
+
+static void rtl8723ae_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
+{
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+
+	if (mac->act_scanning == true)
+		return;
+
+	if (mac->link_state >= MAC80211_LINKED)
+		dm_digtable->cursta_cstate = DIG_STA_CONNECT;
+	else
+		dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
+
+	rtl8723ae_dm_initial_gain_sta(hw);
+	rtl8723ae_dm_initial_gain_multi_sta(hw);
+	rtl8723ae_dm_cck_packet_detection_thresh(hw);
+
+	dm_digtable->presta_cstate = dm_digtable->cursta_cstate;
+
+}
+
+static void rtl8723ae_dm_dig(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+
+	if (rtlpriv->dm.dm_initialgain_enable == false)
+		return;
+	if (dm_digtable->dig_enable_flag == false)
+		return;
+
+	rtl8723ae_dm_ctrl_initgain_by_twoport(hw);
+}
+
+static void rtl8723ae_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->dm.dynamic_txpower_enable = false;
+
+	rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+	rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+}
+
+static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	long undec_sm_pwdb;
+
+	if (!rtlpriv->dm.dynamic_txpower_enable)
+		return;
+
+	if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
+		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+		return;
+	}
+
+	if ((mac->link_state < MAC80211_LINKED) &&
+	    (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
+		RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+			 "Not connected\n");
+
+		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+
+		rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+		return;
+	}
+
+	if (mac->link_state >= MAC80211_LINKED) {
+		if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+			undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
+			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+				 "AP Client PWDB = 0x%lx\n",
+				 undec_sm_pwdb);
+		} else {
+			undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+			RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+				 "STA Default Port PWDB = 0x%lx\n",
+				 undec_sm_pwdb);
+		}
+	} else {
+		undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
+
+		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+			 "AP Ext Port PWDB = 0x%lx\n",
+			  undec_sm_pwdb);
+	}
+
+	if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+			 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
+	} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
+		   (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+			 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
+	} else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+			 "TXHIGHPWRLEVEL_NORMAL\n");
+	}
+
+	if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
+		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+			 "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+			  rtlphy->current_channel);
+		rtl8723ae_phy_set_txpower_level(hw, rtlphy->current_channel);
+	}
+
+	rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
+}
+
+void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+
+	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+		 "cur_igvalue = 0x%x, "
+		 "pre_igvalue = 0x%x, back_val = %d\n",
+		 dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
+		 dm_digtable->back_val);
+
+	if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) {
+		rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
+			      dm_digtable->cur_igvalue);
+		rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
+			      dm_digtable->cur_igvalue);
+
+		dm_digtable->pre_igvalue = dm_digtable->cur_igvalue;
+	}
+}
+
+static void rtl8723ae_dm_pwdmonitor(struct ieee80211_hw *hw)
+{
+}
+
+void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->dm.current_turbo_edca = false;
+	rtlpriv->dm.is_any_nonbepkts = false;
+	rtlpriv->dm.is_cur_rdlstate = false;
+}
+
+static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+	u64 cur_txok_cnt = 0;
+	u64 cur_rxok_cnt = 0;
+	u32 edca_be_ul = 0x5ea42b;
+	u32 edca_be_dl = 0x5ea42b;
+	bool bt_change_edca = false;
+
+	if ((mac->last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) ||
+	    (mac->last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) {
+		rtlpriv->dm.current_turbo_edca = false;
+		mac->last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
+		mac->last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl;
+	}
+
+	if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) {
+		edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
+		bt_change_edca = true;
+	}
+
+	if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) {
+		edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl;
+		bt_change_edca = true;
+	}
+
+	if (mac->link_state != MAC80211_LINKED) {
+		rtlpriv->dm.current_turbo_edca = false;
+		return;
+	}
+
+	if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) {
+		if (!(edca_be_ul & 0xffff0000))
+			edca_be_ul |= 0x005e0000;
+
+		if (!(edca_be_dl & 0xffff0000))
+			edca_be_dl |= 0x005e0000;
+	}
+
+	if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) &&
+	     (!rtlpriv->dm.disable_framebursting))) {
+
+		cur_txok_cnt = rtlpriv->stats.txbytesunicast -
+			       mac->last_txok_cnt;
+		cur_rxok_cnt = rtlpriv->stats.rxbytesunicast -
+			       mac->last_rxok_cnt;
+
+		if (cur_rxok_cnt > 4 * cur_txok_cnt) {
+			if (!rtlpriv->dm.is_cur_rdlstate ||
+			    !rtlpriv->dm.current_turbo_edca) {
+				rtl_write_dword(rtlpriv,
+						REG_EDCA_BE_PARAM,
+						edca_be_dl);
+				rtlpriv->dm.is_cur_rdlstate = true;
+			}
+		} else {
+			if (rtlpriv->dm.is_cur_rdlstate ||
+			    !rtlpriv->dm.current_turbo_edca) {
+				rtl_write_dword(rtlpriv,
+						REG_EDCA_BE_PARAM,
+						edca_be_ul);
+				rtlpriv->dm.is_cur_rdlstate = false;
+			}
+		}
+		rtlpriv->dm.current_turbo_edca = true;
+	} else {
+		if (rtlpriv->dm.current_turbo_edca) {
+			u8 tmp = AC0_BE;
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_AC_PARAM,
+						      (u8 *) (&tmp));
+			rtlpriv->dm.current_turbo_edca = false;
+		}
+	}
+
+	rtlpriv->dm.is_any_nonbepkts = false;
+	mac->last_txok_cnt = rtlpriv->stats.txbytesunicast;
+	mac->last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+
+static void rtl8723ae_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->dm.txpower_tracking = true;
+	rtlpriv->dm.txpower_trackinginit = false;
+
+	RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+		 "pMgntInfo->txpower_tracking = %d\n",
+		 rtlpriv->dm.txpower_tracking);
+}
+
+void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rate_adaptive *p_ra = &(rtlpriv->ra);
+
+	p_ra->ratr_state = DM_RATR_STA_INIT;
+	p_ra->pre_ratr_state = DM_RATR_STA_INIT;
+
+	if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+		rtlpriv->dm.useramask = true;
+	else
+		rtlpriv->dm.useramask = false;
+}
+
+static void rtl8723ae_dm_init_dynamic_bpowersaving(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->dm_pstable.pre_ccastate = CCA_MAX;
+	rtlpriv->dm_pstable.cur_ccasate = CCA_MAX;
+	rtlpriv->dm_pstable.pre_rfstate = RF_MAX;
+	rtlpriv->dm_pstable.cur_rfstate = RF_MAX;
+	rtlpriv->dm_pstable.rssi_val_min = 0;
+}
+
+void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
+
+	if (!rtlpriv->reg_init) {
+		rtlpriv->reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+				    MASKDWORD) & 0x1CC000) >> 14;
+
+		rtlpriv->reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
+				    MASKDWORD) & BIT(3)) >> 3;
+
+		rtlpriv->reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+				    MASKDWORD) & 0xFF000000) >> 24;
+
+		rtlpriv->reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) &
+				   0xF000) >> 12;
+
+		rtlpriv->reg_init = true;
+	}
+
+	if (!force_in_normal) {
+		if (dm_pstable->rssi_val_min != 0) {
+			if (dm_pstable->pre_rfstate == RF_NORMAL) {
+				if (dm_pstable->rssi_val_min >= 30)
+					dm_pstable->cur_rfstate = RF_SAVE;
+				else
+					dm_pstable->cur_rfstate = RF_NORMAL;
+			} else {
+				if (dm_pstable->rssi_val_min <= 25)
+					dm_pstable->cur_rfstate = RF_NORMAL;
+				else
+					dm_pstable->cur_rfstate = RF_SAVE;
+			}
+		} else {
+			dm_pstable->cur_rfstate = RF_MAX;
+		}
+	} else {
+		dm_pstable->cur_rfstate = RF_NORMAL;
+	}
+
+	if (dm_pstable->pre_rfstate != dm_pstable->cur_rfstate) {
+		if (dm_pstable->cur_rfstate == RF_SAVE) {
+
+			rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+				      BIT(5), 0x1);
+			rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+				      0x1C0000, 0x2);
+			rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
+			rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+				      0xFF000000, 0x63);
+			rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+				      0xC000, 0x2);
+			rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
+			rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
+			rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
+		} else {
+			rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+				      0x1CC000, rtlpriv->reg_874);
+			rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
+				      rtlpriv->reg_c70);
+			rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
+				      rtlpriv->reg_85c);
+			rtl_set_bbreg(hw, 0xa74, 0xF000, rtlpriv->reg_a74);
+			rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
+			rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+				      BIT(5), 0x0);
+		}
+
+		dm_pstable->pre_rfstate = dm_pstable->cur_rfstate;
+	}
+}
+
+static void rtl8723ae_dm_dynamic_bpowersaving(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
+
+	if (((mac->link_state == MAC80211_NOLINK)) &&
+	    (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
+		dm_pstable->rssi_val_min = 0;
+		RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+			 "Not connected to any\n");
+	}
+
+	if (mac->link_state == MAC80211_LINKED) {
+		if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+			dm_pstable->rssi_val_min =
+			    rtlpriv->dm.entry_min_undec_sm_pwdb;
+			RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+				 "AP Client PWDB = 0x%lx\n",
+				 dm_pstable->rssi_val_min);
+		} else {
+			dm_pstable->rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
+			RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+				 "STA Default Port PWDB = 0x%lx\n",
+				 dm_pstable->rssi_val_min);
+		}
+	} else {
+		dm_pstable->rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb;
+
+		RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+			 "AP Ext Port PWDB = 0x%lx\n",
+			 dm_pstable->rssi_val_min);
+	}
+
+	rtl8723ae_dm_rf_saving(hw, false);
+}
+
+void rtl8723ae_dm_init(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+	rtl8723ae_dm_diginit(hw);
+	rtl8723ae_dm_init_dynamic_txpower(hw);
+	rtl8723ae_dm_init_edca_turbo(hw);
+	rtl8723ae_dm_init_rate_adaptive_mask(hw);
+	rtl8723ae_dm_initialize_txpower_tracking(hw);
+	rtl8723ae_dm_init_dynamic_bpowersaving(hw);
+}
+
+void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	bool fw_current_inpsmode = false;
+	bool fw_ps_awake = true;
+	rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+				      (u8 *) (&fw_current_inpsmode));
+	rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+				      (u8 *) (&fw_ps_awake));
+
+	if ((ppsc->rfpwr_state == ERFON) &&
+	    ((!fw_current_inpsmode) && fw_ps_awake) &&
+	    (!ppsc->rfchange_inprogress)) {
+		rtl8723ae_dm_pwdmonitor(hw);
+		rtl8723ae_dm_dig(hw);
+		rtl8723ae_dm_false_alarm_counter_statistics(hw);
+		rtl8723ae_dm_dynamic_bpowersaving(hw);
+		rtl8723ae_dm_dynamic_txpower(hw);
+		/* rtl92c_dm_refresh_rate_adaptive_mask(hw); */
+		rtl8723ae_dm_bt_coexist(hw);
+		rtl8723ae_dm_check_edca_turbo(hw);
+	}
+	if (rtlpcipriv->bt_coexist.init_set)
+		rtl_write_byte(rtlpriv, 0x76e, 0xc);
+}
+
+static void rtl8723ae_dm_init_bt_coexist(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	rtlpcipriv->bt_coexist.bt_rfreg_origin_1e
+		= rtl_get_rfreg(hw, (enum radio_path)0, RF_RCK1, 0xfffff);
+	rtlpcipriv->bt_coexist.bt_rfreg_origin_1f
+		= rtl_get_rfreg(hw, (enum radio_path)0, RF_RCK2, 0xf0);
+
+	rtlpcipriv->bt_coexist.cstate = 0;
+	rtlpcipriv->bt_coexist.previous_state = 0;
+	rtlpcipriv->bt_coexist.cstate_h = 0;
+	rtlpcipriv->bt_coexist.previous_state_h = 0;
+	rtlpcipriv->bt_coexist.lps_counter = 0;
+
+	/*  Enable counter statistics */
+	rtl_write_byte(rtlpriv, 0x76e, 0x4);
+	rtl_write_byte(rtlpriv, 0x778, 0x3);
+	rtl_write_byte(rtlpriv, 0x40, 0x20);
+
+	rtlpcipriv->bt_coexist.init_set = true;
+}
+
+void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u8 tmp_byte = 0;
+	if (!rtlpcipriv->bt_coexist.bt_coexistence) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+			 "[DM]{BT], BT not exist!!\n");
+		return;
+	}
+
+	if (!rtlpcipriv->bt_coexist.init_set) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+			 "[DM][BT], rtl8723ae_dm_bt_coexist()\n");
+
+		rtl8723ae_dm_init_bt_coexist(hw);
+	}
+
+	tmp_byte = rtl_read_byte(rtlpriv, 0x40);
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+		 "[DM][BT], 0x40 is 0x%x", tmp_byte);
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+		 "[DM][BT], bt_dm_coexist start");
+	rtl8723ae_dm_bt_coexist_8723(hw);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
new file mode 100644
index 0000000..39d2461
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
@@ -0,0 +1,149 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ****************************************************************************
+ */
+
+#ifndef	__RTL8723E_DM_H__
+#define __RTL8723E_DM_H__
+
+#define HAL_DM_HIPWR_DISABLE			BIT(1)
+
+#define OFDM_TABLE_SIZE				37
+#define CCK_TABLE_SIZE				33
+
+#define DM_DIG_THRESH_HIGH			40
+#define DM_DIG_THRESH_LOW			35
+
+#define DM_FALSEALARM_THRESH_LOW		400
+#define DM_FALSEALARM_THRESH_HIGH		1000
+
+#define DM_DIG_MAX				0x3e
+#define DM_DIG_MIN				0x1e
+
+#define DM_DIG_FA_UPPER				0x32
+#define DM_DIG_FA_LOWER				0x20
+#define DM_DIG_FA_TH0				0x20
+#define DM_DIG_FA_TH1				0x100
+#define DM_DIG_FA_TH2				0x200
+
+#define DM_DIG_BACKOFF_MAX			12
+#define DM_DIG_BACKOFF_MIN			-4
+#define DM_DIG_BACKOFF_DEFAULT			10
+
+#define DM_RATR_STA_INIT			0
+
+#define TXHIGHPWRLEVEL_NORMAL			0
+#define TXHIGHPWRLEVEL_LEVEL1			1
+#define TXHIGHPWRLEVEL_LEVEL2			2
+#define TXHIGHPWRLEVEL_BT1			3
+#define TXHIGHPWRLEVEL_BT2			4
+
+#define DM_TYPE_BYDRIVER			1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2		74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1		67
+
+struct swat_t {
+	u8 failure_cnt;
+	u8 try_flag;
+	u8 stop_trying;
+	long pre_rssi;
+	long trying_threshold;
+	u8 cur_antenna;
+	u8 pre_antenna;
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+	DIG_TYPE_THRESH_HIGH = 0,
+	DIG_TYPE_THRESH_LOW = 1,
+	DIG_TYPE_BACKOFF = 2,
+	DIG_TYPE_RX_GAIN_MIN = 3,
+	DIG_TYPE_RX_GAIN_MAX = 4,
+	DIG_TYPE_ENABLE = 5,
+	DIG_TYPE_DISABLE = 6,
+	DIG_OP_TYPE_MAX
+};
+
+enum tag_cck_packet_detection_threshold_type_definition {
+	CCK_PD_STAGE_LowRssi = 0,
+	CCK_PD_STAGE_HighRssi = 1,
+	CCK_FA_STAGE_Low = 2,
+	CCK_FA_STAGE_High = 3,
+	CCK_PD_STAGE_MAX = 4,
+};
+
+enum dm_1r_cca_e {
+	CCA_1R = 0,
+	CCA_2R = 1,
+	CCA_MAX = 2,
+};
+
+enum dm_rf_e {
+	RF_SAVE = 0,
+	RF_NORMAL = 1,
+	RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch_e {
+	ANS_ANTENNA_B = 1,
+	ANS_ANTENNA_A = 2,
+	ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg_e {
+	DIG_EXT_PORT_STAGE_0 = 0,
+	DIG_EXT_PORT_STAGE_1 = 1,
+	DIG_EXT_PORT_STAGE_2 = 2,
+	DIG_EXT_PORT_STAGE_3 = 3,
+	DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect_e {
+	DIG_STA_DISCONNECT = 0,
+	DIG_STA_CONNECT = 1,
+	DIG_STA_BEFORE_CONNECT = 2,
+	DIG_MULTISTA_DISCONNECT = 3,
+	DIG_MULTISTA_CONNECT = 4,
+	DIG_CONNECT_MAX
+};
+
+#define GET_UNDECORATED_AVERAGE_RSSI(_priv)     \
+	((((struct rtl_priv *)(_priv))->mac80211.opmode ==	\
+	NL80211_IFTYPE_ADHOC) ?  \
+	(((struct rtl_priv *)(_priv))->dm.entry_min_undec_sm_pwdb) \
+	: (((struct rtl_priv *)(_priv))->dm.undec_sm_pwdb))
+
+void rtl8723ae_dm_init(struct ieee80211_hw *hw);
+void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw);
+void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw);
+void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
+void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
new file mode 100644
index 0000000..f55b176
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
@@ -0,0 +1,745 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ****************************************************************************
+ */
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "fw.h"
+
+static void _rtl8723ae_enable_fw_download(struct ieee80211_hw *hw, bool enable)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmp;
+	if (enable) {
+		tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+		rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
+
+		tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+		rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
+
+		tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
+		rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
+	} else {
+		tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+		rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
+
+		rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
+	}
+}
+
+static void _rtl8723ae_fw_block_write(struct ieee80211_hw *hw,
+				      const u8 *buffer, u32 size)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 blockSize = sizeof(u32);
+	u8 *bufferPtr = (u8 *) buffer;
+	u32 *pu4BytePtr = (u32 *) buffer;
+	u32 i, offset, blockCount, remainSize;
+
+	blockCount = size / blockSize;
+	remainSize = size % blockSize;
+
+	for (i = 0; i < blockCount; i++) {
+		offset = i * blockSize;
+		rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
+				*(pu4BytePtr + i));
+	}
+
+	if (remainSize) {
+		offset = blockCount * blockSize;
+		bufferPtr += offset;
+		for (i = 0; i < remainSize; i++) {
+			rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
+						 offset + i), *(bufferPtr + i));
+		}
+	}
+}
+
+static void _rtl8723ae_fw_page_write(struct ieee80211_hw *hw,
+				     u32 page, const u8 *buffer, u32 size)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 value8;
+	u8 u8page = (u8) (page & 0x07);
+
+	value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
+
+	rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
+	_rtl8723ae_fw_block_write(hw, buffer, size);
+}
+
+static void _rtl8723ae_write_fw(struct ieee80211_hw *hw,
+				enum version_8723e version, u8 *buffer,
+				u32 size)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 *bufferPtr = (u8 *) buffer;
+	u32 page_nums, remain_size;
+	u32 page, offset;
+
+	RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
+
+	page_nums = size / FW_8192C_PAGE_SIZE;
+	remain_size = size % FW_8192C_PAGE_SIZE;
+
+	if (page_nums > 6) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "Page numbers should not be greater then 6\n");
+	}
+
+	for (page = 0; page < page_nums; page++) {
+		offset = page * FW_8192C_PAGE_SIZE;
+		_rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset),
+					 FW_8192C_PAGE_SIZE);
+	}
+
+	if (remain_size) {
+		offset = page_nums * FW_8192C_PAGE_SIZE;
+		page = page_nums;
+		_rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset),
+					 remain_size);
+	}
+
+	RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n");
+}
+
+static int _rtl8723ae_fw_free_to_go(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int err = -EIO;
+	u32 counter = 0;
+	u32 value32;
+
+	do {
+		value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+	} while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
+		 (!(value32 & FWDL_ChkSum_rpt)));
+
+	if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
+			 value32);
+		goto exit;
+	}
+
+	RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+		 "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
+
+	value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+	value32 |= MCUFWDL_RDY;
+	value32 &= ~WINTINI_RDY;
+	rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+
+	counter = 0;
+
+	do {
+		value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+		if (value32 & WINTINI_RDY) {
+			RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+				 "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
+				 value32);
+			err = 0;
+			goto exit;
+		}
+
+		mdelay(FW_8192C_POLLING_DELAY);
+
+	} while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
+
+	RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+		 "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32);
+
+exit:
+	return err;
+}
+
+int rtl8723ae_download_fw(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl8723ae_firmware_header *pfwheader;
+	u8 *pfwdata;
+	u32 fwsize;
+	int err;
+	enum version_8723e version = rtlhal->version;
+
+	if (!rtlhal->pfirmware)
+		return 1;
+
+	pfwheader = (struct rtl8723ae_firmware_header *)rtlhal->pfirmware;
+	pfwdata = (u8 *) rtlhal->pfirmware;
+	fwsize = rtlhal->fwsize;
+
+	if (IS_FW_HEADER_EXIST(pfwheader)) {
+		RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+			 "Firmware Version(%d), Signature(%#x),Size(%d)\n",
+			 pfwheader->version, pfwheader->signature,
+			 (int)sizeof(struct rtl8723ae_firmware_header));
+
+		pfwdata = pfwdata + sizeof(struct rtl8723ae_firmware_header);
+		fwsize = fwsize - sizeof(struct rtl8723ae_firmware_header);
+	}
+
+	if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) {
+		rtl8723ae_firmware_selfreset(hw);
+		rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+	}
+	_rtl8723ae_enable_fw_download(hw, true);
+	_rtl8723ae_write_fw(hw, version, pfwdata, fwsize);
+	_rtl8723ae_enable_fw_download(hw, false);
+
+	err = _rtl8723ae_fw_free_to_go(hw);
+	if (err) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "Firmware is not ready to run!\n");
+	} else {
+		RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+			 "Firmware is ready to run!\n");
+	}
+	return 0;
+}
+
+static bool rtl8723ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 val_hmetfr, val_mcutst_1;
+	bool result = false;
+
+	val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
+	val_mcutst_1 = rtl_read_byte(rtlpriv, (REG_MCUTST_1 + boxnum));
+
+	if (((val_hmetfr >> boxnum) & BIT(0)) == 0 && val_mcutst_1 == 0)
+		result = true;
+	return result;
+}
+
+static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
+					u8 element_id, u32 cmd_len,
+					u8 *p_cmdbuffer)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 boxnum;
+	u16 box_reg = 0, box_extreg = 0;
+	u8 u1tmp;
+	bool isfw_rd = false;
+	bool bwrite_sucess = false;
+	u8 wait_h2c_limmit = 100;
+	u8 wait_writeh2c_limmit = 100;
+	u8 boxcontent[4], boxextcontent[2];
+	u32 h2c_waitcounter = 0;
+	unsigned long flag;
+	u8 idx;
+
+	RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+
+	while (true) {
+		spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+		if (rtlhal->h2c_setinprogress) {
+			RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+				 "H2C set in progress! Wait to set..element_id(%d).\n",
+				 element_id);
+
+			while (rtlhal->h2c_setinprogress) {
+				spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
+						       flag);
+				h2c_waitcounter++;
+				RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+					 "Wait 100 us (%d times)...\n",
+					 h2c_waitcounter);
+				udelay(100);
+
+				if (h2c_waitcounter > 1000)
+					return;
+				spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
+						  flag);
+			}
+			spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+		} else {
+			rtlhal->h2c_setinprogress = true;
+			spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+			break;
+		}
+	}
+
+	while (!bwrite_sucess) {
+		wait_writeh2c_limmit--;
+		if (wait_writeh2c_limmit == 0) {
+			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+				 "Write H2C fail because no trigger "
+				 "for FW INT!\n");
+			break;
+		}
+
+		boxnum = rtlhal->last_hmeboxnum;
+		switch (boxnum) {
+		case 0:
+			box_reg = REG_HMEBOX_0;
+			box_extreg = REG_HMEBOX_EXT_0;
+			break;
+		case 1:
+			box_reg = REG_HMEBOX_1;
+			box_extreg = REG_HMEBOX_EXT_1;
+			break;
+		case 2:
+			box_reg = REG_HMEBOX_2;
+			box_extreg = REG_HMEBOX_EXT_2;
+			break;
+		case 3:
+			box_reg = REG_HMEBOX_3;
+			box_extreg = REG_HMEBOX_EXT_3;
+			break;
+		default:
+			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+				 "switch case not processed\n");
+			break;
+		}
+
+		isfw_rd = rtl8723ae_check_fw_read_last_h2c(hw, boxnum);
+		while (!isfw_rd) {
+
+			wait_h2c_limmit--;
+			if (wait_h2c_limmit == 0) {
+				RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+					 "Wating too long for FW read clear HMEBox(%d)!\n",
+					 boxnum);
+				break;
+			}
+
+			udelay(10);
+
+			isfw_rd = rtl8723ae_check_fw_read_last_h2c(hw, boxnum);
+			u1tmp = rtl_read_byte(rtlpriv, 0x1BF);
+			RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+				 "Wating for FW read clear HMEBox(%d)!!! "
+				 "0x1BF = %2x\n", boxnum, u1tmp);
+		}
+
+		if (!isfw_rd) {
+			RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+				 "Write H2C register BOX[%d] fail!!!!! "
+				 "Fw do not read.\n", boxnum);
+			break;
+		}
+
+		memset(boxcontent, 0, sizeof(boxcontent));
+		memset(boxextcontent, 0, sizeof(boxextcontent));
+		boxcontent[0] = element_id;
+		RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+			 "Write element_id box_reg(%4x) = %2x\n",
+			  box_reg, element_id);
+
+		switch (cmd_len) {
+		case 1:
+			boxcontent[0] &= ~(BIT(7));
+			memcpy((u8 *) (boxcontent) + 1,
+			       p_cmdbuffer, 1);
+
+			for (idx = 0; idx < 4; idx++) {
+				rtl_write_byte(rtlpriv, box_reg + idx,
+					       boxcontent[idx]);
+			}
+			break;
+		case 2:
+			boxcontent[0] &= ~(BIT(7));
+			memcpy((u8 *) (boxcontent) + 1,
+			       p_cmdbuffer, 2);
+
+			for (idx = 0; idx < 4; idx++) {
+				rtl_write_byte(rtlpriv, box_reg + idx,
+					       boxcontent[idx]);
+			}
+			break;
+		case 3:
+			boxcontent[0] &= ~(BIT(7));
+			memcpy((u8 *) (boxcontent) + 1,
+			       p_cmdbuffer, 3);
+
+			for (idx = 0; idx < 4; idx++) {
+				rtl_write_byte(rtlpriv, box_reg + idx,
+					       boxcontent[idx]);
+			}
+			break;
+		case 4:
+			boxcontent[0] |= (BIT(7));
+			memcpy((u8 *) (boxextcontent),
+			       p_cmdbuffer, 2);
+			memcpy((u8 *) (boxcontent) + 1,
+			       p_cmdbuffer + 2, 2);
+
+			for (idx = 0; idx < 2; idx++) {
+				rtl_write_byte(rtlpriv, box_extreg + idx,
+					       boxextcontent[idx]);
+			}
+
+			for (idx = 0; idx < 4; idx++) {
+				rtl_write_byte(rtlpriv, box_reg + idx,
+					       boxcontent[idx]);
+			}
+			break;
+		case 5:
+			boxcontent[0] |= (BIT(7));
+			memcpy((u8 *) (boxextcontent),
+			       p_cmdbuffer, 2);
+			memcpy((u8 *) (boxcontent) + 1,
+			       p_cmdbuffer + 2, 3);
+
+			for (idx = 0; idx < 2; idx++) {
+				rtl_write_byte(rtlpriv, box_extreg + idx,
+					       boxextcontent[idx]);
+			}
+
+			for (idx = 0; idx < 4; idx++) {
+				rtl_write_byte(rtlpriv, box_reg + idx,
+					       boxcontent[idx]);
+			}
+			break;
+		default:
+			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+				 "switch case not process\n");
+			break;
+		}
+
+		bwrite_sucess = true;
+
+		rtlhal->last_hmeboxnum = boxnum + 1;
+		if (rtlhal->last_hmeboxnum == 4)
+			rtlhal->last_hmeboxnum = 0;
+
+		RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+			 "pHalData->last_hmeboxnum  = %d\n",
+			 rtlhal->last_hmeboxnum);
+	}
+
+	spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+	rtlhal->h2c_setinprogress = false;
+	spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+
+	RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+}
+
+void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw,
+			    u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+	if (rtlhal->fw_ready == false) {
+		RT_ASSERT(false,
+			 "return H2C cmd because of Fw download fail!!!\n");
+		return;
+	}
+
+	_rtl8723ae_fill_h2c_command(hw, element_id, cmd_len, p_cmdbuffer);
+	return;
+}
+
+void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw)
+{
+	u8 u1tmp;
+	u8 delay = 100;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
+	u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+
+	while (u1tmp & BIT(2)) {
+		delay--;
+		if (delay == 0)
+			break;
+		udelay(50);
+		u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+	}
+	if (delay == 0) {
+		u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+		rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2)));
+	}
+}
+
+void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 u1_h2c_set_pwrmode[3] = { 0 };
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+	RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+
+	SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
+	SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
+	SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
+					      ppsc->reg_max_lps_awakeintvl);
+
+	RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+		      "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
+		      u1_h2c_set_pwrmode, 3);
+	rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
+
+}
+
+static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw,
+				       struct sk_buff *skb)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl8192_tx_ring *ring;
+	struct rtl_tx_desc *pdesc;
+	u8 own;
+	unsigned long flags;
+	struct sk_buff *pskb = NULL;
+
+	ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+	pskb = __skb_dequeue(&ring->queue);
+	if (pskb)
+		kfree_skb(pskb);
+
+	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+	pdesc = &ring->desc[0];
+	own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
+
+	rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
+
+	__skb_queue_tail(&ring->queue, skb);
+
+	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+	rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+	return true;
+}
+
+static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
+	/* page 0 beacon */
+	0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+	0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x50, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
+	0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
+	0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
+	0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
+	0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
+	0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
+	0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
+	0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+	/* page 1 beacon */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x10, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x10, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+	/* page 2  ps-poll */
+	0xA4, 0x10, 0x01, 0xC0, 0x00, 0x40, 0x10, 0x10,
+	0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x18, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+	0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+	/* page 3  null */
+	0x48, 0x01, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
+	0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+	0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x72, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+	0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+	/* page 4  probe_resp */
+	0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
+	0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+	0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
+	0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
+	0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
+	0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
+	0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
+	0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
+	0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
+	0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
+	0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
+	0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+	/* page 5  probe_resp */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct sk_buff *skb = NULL;
+
+	u32 totalpacketlen;
+	bool rtstatus;
+	u8 u1RsvdPageLoc[3] = { 0 };
+	bool dlok = false;
+
+	u8 *beacon;
+	u8 *p_pspoll;
+	u8 *nullfunc;
+	u8 *p_probersp;
+	/*---------------------------------------------------------
+				(1) beacon
+	---------------------------------------------------------
+	*/
+	beacon = &reserved_page_packet[BEACON_PG * 128];
+	SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+	SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+
+	/*-------------------------------------------------------
+				(2) ps-poll
+	--------------------------------------------------------
+	*/
+	p_pspoll = &reserved_page_packet[PSPOLL_PG * 128];
+	SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+	SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+	SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+
+	SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG);
+
+	/*--------------------------------------------------------
+				(3) null data
+	---------------------------------------------------------i
+	*/
+	nullfunc = &reserved_page_packet[NULL_PG * 128];
+	SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+	SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+	SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+
+	SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG);
+
+	/*---------------------------------------------------------
+				(4) probe response
+	----------------------------------------------------------
+	*/
+	p_probersp = &reserved_page_packet[PROBERSP_PG * 128];
+	SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+	SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+	SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+
+	SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG);
+
+	totalpacketlen = TOTAL_RESERVED_PKT_LEN;
+
+	RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+		      "rtl8723ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+		      &reserved_page_packet[0], totalpacketlen);
+	RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+		      "rtl8723ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+		      u1RsvdPageLoc, 3);
+
+	skb = dev_alloc_skb(totalpacketlen);
+	memcpy((u8 *) skb_put(skb, totalpacketlen),
+	       &reserved_page_packet, totalpacketlen);
+
+	rtstatus = _rtl8723ae_cmd_send_packet(hw, skb);
+
+	if (rtstatus)
+		dlok = true;
+
+	if (dlok) {
+		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+			 "Set RSVD page location to Fw.\n");
+		RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+				"H2C_RSVDPAGE:\n",
+				u1RsvdPageLoc, 3);
+		rtl8723ae_fill_h2c_cmd(hw, H2C_RSVDPAGE,
+				       sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+	} else
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+			 "Set RSVD page location to Fw FAIL!!!!!!.\n");
+}
+
+void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
+{
+	u8 u1_joinbssrpt_parm[1] = { 0 };
+
+	SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
+
+	rtl8723ae_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
new file mode 100644
index 0000000..89994e1
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
@@ -0,0 +1,101 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ****************************************************************************
+ */
+
+#ifndef __RTL92C__FW__H__
+#define __RTL92C__FW__H__
+
+#define FW_8192C_START_ADDRESS			0x1000
+#define FW_8192C_END_ADDRESS			0x3FFF
+#define FW_8192C_PAGE_SIZE			4096
+#define FW_8192C_POLLING_DELAY			5
+#define FW_8192C_POLLING_TIMEOUT_COUNT		1000
+
+#define BEACON_PG				0
+#define PSPOLL_PG				2
+#define NULL_PG					3
+#define PROBERSP_PG				4 /* ->5 */
+
+#define TOTAL_RESERVED_PKT_LEN			768
+
+#define IS_FW_HEADER_EXIST(_pfwhdr)		\
+	((_pfwhdr->signature&0xFF00) == 0x2300)
+
+struct rtl8723ae_firmware_header {
+	u16 signature;
+	u8 category;
+	u8 function;
+	u16 version;
+	u8 subversion;
+	u8 rsvd1;
+	u8 month;
+	u8 date;
+	u8 hour;
+	u8 minute;
+	u16 ramcodeSize;
+	u16 rsvd2;
+	u32 svnindex;
+	u32 rsvd3;
+	u32 rsvd4;
+	u32 rsvd5;
+};
+
+enum rtl8192c_h2c_cmd {
+	H2C_AP_OFFLOAD = 0,
+	H2C_SETPWRMODE = 1,
+	H2C_JOINBSSRPT = 2,
+	H2C_RSVDPAGE = 3,
+	H2C_RSSI_REPORT = 5,
+	H2C_RA_MASK = 6,
+	MAX_H2CCMD
+};
+
+#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val)			\
+	SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val)		\
+	SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val)	\
+	SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val)		\
+	SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val)		\
+	SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val)		\
+	SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val)		\
+	SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+
+int rtl8723ae_download_fw(struct ieee80211_hw *hw);
+void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+			    u32 cmd_len, u8 *p_cmdbuffer);
+void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
new file mode 100644
index 0000000..3d092e4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
@@ -0,0 +1,542 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "hal_bt_coexist.h"
+#include "../pci.h"
+#include "dm.h"
+#include "fw.h"
+#include "phy.h"
+#include "reg.h"
+#include "hal_btc.h"
+
+void rtl8723ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw,
+						 bool reject)
+{
+}
+
+void _rtl8723_dm_bt_check_wifi_state(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	if (rtlpriv->link_info.busytraffic) {
+		rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_IDLE;
+
+		if (rtlpriv->link_info.tx_busy_traffic)
+			rtlpcipriv->bt_coexist.cstate |=
+					BT_COEX_STATE_WIFI_UPLINK;
+		else
+			rtlpcipriv->bt_coexist.cstate &=
+					~BT_COEX_STATE_WIFI_UPLINK;
+
+		if (rtlpriv->link_info.rx_busy_traffic)
+			rtlpcipriv->bt_coexist.cstate |=
+					BT_COEX_STATE_WIFI_DOWNLINK;
+		else
+			rtlpcipriv->bt_coexist.cstate &=
+					~BT_COEX_STATE_WIFI_DOWNLINK;
+	} else {
+		rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_WIFI_IDLE;
+		rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_UPLINK;
+		rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_DOWNLINK;
+	}
+
+	if (rtlpriv->mac80211.mode == WIRELESS_MODE_G ||
+	    rtlpriv->mac80211.mode == WIRELESS_MODE_B) {
+		rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_WIFI_LEGACY;
+		rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_HT20;
+		rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_HT40;
+	} else {
+		rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_LEGACY;
+		if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+			rtlpcipriv->bt_coexist.cstate |=
+					BT_COEX_STATE_WIFI_HT40;
+			rtlpcipriv->bt_coexist.cstate &=
+					~BT_COEX_STATE_WIFI_HT20;
+		} else {
+			rtlpcipriv->bt_coexist.cstate |=
+					BT_COEX_STATE_WIFI_HT20;
+			rtlpcipriv->bt_coexist.cstate &=
+					~BT_COEX_STATE_WIFI_HT40;
+		}
+	}
+
+	if (rtlpriv->bt_operation_on)
+		rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_BT30;
+	else
+		rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_BT30;
+}
+
+u8 rtl8723ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
+					  u8 level_num, u8 rssi_thresh,
+					  u8 rssi_thresh1)
+
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	long smooth;
+	u8 bt_rssi_state = 0;
+
+	smooth =  rtl8723ae_dm_bt_get_rx_ss(hw);
+
+	if (level_num == 2) {
+		rtlpcipriv->bt_coexist.cstate &=
+				~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+
+		if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
+		    BT_RSSI_STATE_LOW) ||
+		    (rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
+		    BT_RSSI_STATE_STAY_LOW)) {
+			if (smooth >= (rssi_thresh +
+			    BT_FW_COEX_THRESH_TOL)) {
+				bt_rssi_state = BT_RSSI_STATE_HIGH;
+				rtlpcipriv->bt_coexist.cstate |=
+					BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+				rtlpcipriv->bt_coexist.cstate &=
+					~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI_1 state switch to High\n");
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI_1 state stay at Low\n");
+			}
+		} else {
+			if (smooth < rssi_thresh) {
+				bt_rssi_state = BT_RSSI_STATE_LOW;
+				rtlpcipriv->bt_coexist.cstate |=
+					 BT_COEX_STATE_WIFI_RSSI_1_LOW;
+				rtlpcipriv->bt_coexist.cstate &=
+					 ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI_1 state switch to Low\n");
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI_1 state stay at High\n");
+			}
+		}
+	} else if (level_num == 3) {
+		if (rssi_thresh > rssi_thresh1) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[DM][BT], RSSI_1 thresh error!!\n");
+			return rtlpcipriv->bt_coexist.bt_pre_rssi_state;
+		}
+
+		if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
+		    BT_RSSI_STATE_LOW) ||
+		    (rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
+		    BT_RSSI_STATE_STAY_LOW)) {
+			if (smooth >=
+			    (rssi_thresh+BT_FW_COEX_THRESH_TOL)) {
+				bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+				rtlpcipriv->bt_coexist.cstate |=
+					 BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+				rtlpcipriv->bt_coexist.cstate &=
+					 ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+				rtlpcipriv->bt_coexist.cstate &=
+					 ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI_1 state switch to Medium\n");
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI_1 state stay at Low\n");
+			}
+		} else if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
+			   BT_RSSI_STATE_MEDIUM) ||
+			   (rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
+			   BT_RSSI_STATE_STAY_MEDIUM)) {
+			if (smooth >= (rssi_thresh1 +
+			    BT_FW_COEX_THRESH_TOL)) {
+				bt_rssi_state = BT_RSSI_STATE_HIGH;
+				rtlpcipriv->bt_coexist.cstate |=
+					 BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+				rtlpcipriv->bt_coexist.cstate &=
+					 ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+				rtlpcipriv->bt_coexist.cstate &=
+					 ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI_1 state switch to High\n");
+			} else if (smooth < rssi_thresh) {
+				bt_rssi_state = BT_RSSI_STATE_LOW;
+				rtlpcipriv->bt_coexist.cstate |=
+					BT_COEX_STATE_WIFI_RSSI_1_LOW;
+				rtlpcipriv->bt_coexist.cstate &=
+					~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+				rtlpcipriv->bt_coexist.cstate &=
+					~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI_1 state switch to Low\n");
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI_1 state stay at Medium\n");
+			}
+		} else {
+			if (smooth < rssi_thresh1) {
+				bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+				rtlpcipriv->bt_coexist.cstate |=
+					BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+				rtlpcipriv->bt_coexist.cstate &=
+					~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+				rtlpcipriv->bt_coexist.cstate &=
+					~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI_1 state switch to Medium\n");
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI_1 state stay at High\n");
+			}
+		}
+	}
+
+	rtlpcipriv->bt_coexist.bt_pre_rssi_state1 = bt_rssi_state;
+
+	return bt_rssi_state;
+}
+
+u8 rtl8723ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
+					 u8 level_num, u8 rssi_thresh,
+					 u8 rssi_thresh1)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	long smooth;
+	u8 bt_rssi_state = 0;
+
+	smooth = rtl8723ae_dm_bt_get_rx_ss(hw);
+
+	if (level_num == 2) {
+		rtlpcipriv->bt_coexist.cstate &=
+					 ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+
+		if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
+		    BT_RSSI_STATE_LOW) ||
+		    (rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
+		    BT_RSSI_STATE_STAY_LOW)){
+			if (smooth >=
+			    (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
+				bt_rssi_state = BT_RSSI_STATE_HIGH;
+				rtlpcipriv->bt_coexist.cstate |=
+					BT_COEX_STATE_WIFI_RSSI_HIGH;
+				rtlpcipriv->bt_coexist.cstate &=
+					~BT_COEX_STATE_WIFI_RSSI_LOW;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI state switch to High\n");
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI state stay at Low\n");
+			}
+		} else {
+			if (smooth < rssi_thresh) {
+				bt_rssi_state = BT_RSSI_STATE_LOW;
+				rtlpcipriv->bt_coexist.cstate |=
+					BT_COEX_STATE_WIFI_RSSI_LOW;
+				rtlpcipriv->bt_coexist.cstate &=
+					~BT_COEX_STATE_WIFI_RSSI_HIGH;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI state switch to Low\n");
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI state stay at High\n");
+			}
+		}
+	} else if (level_num == 3) {
+		if (rssi_thresh > rssi_thresh1) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[DM][BT], RSSI thresh error!!\n");
+			return rtlpcipriv->bt_coexist.bt_pre_rssi_state;
+		}
+		if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
+		    BT_RSSI_STATE_LOW) ||
+		    (rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
+		    BT_RSSI_STATE_STAY_LOW)) {
+			if (smooth >=
+			    (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
+				bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+				rtlpcipriv->bt_coexist.cstate
+					|= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+				rtlpcipriv->bt_coexist.cstate
+					&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+				rtlpcipriv->bt_coexist.cstate
+					&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI state switch to Medium\n");
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI state stay at Low\n");
+			}
+		} else if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
+			   BT_RSSI_STATE_MEDIUM) ||
+			   (rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
+			   BT_RSSI_STATE_STAY_MEDIUM)) {
+			if (smooth >=
+			    (rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) {
+				bt_rssi_state = BT_RSSI_STATE_HIGH;
+				rtlpcipriv->bt_coexist.cstate
+					|= BT_COEX_STATE_WIFI_RSSI_HIGH;
+				rtlpcipriv->bt_coexist.cstate
+					&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+				rtlpcipriv->bt_coexist.cstate
+					&= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI state switch to High\n");
+			} else if (smooth < rssi_thresh) {
+				bt_rssi_state = BT_RSSI_STATE_LOW;
+				rtlpcipriv->bt_coexist.cstate
+					|= BT_COEX_STATE_WIFI_RSSI_LOW;
+				rtlpcipriv->bt_coexist.cstate
+					&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+				rtlpcipriv->bt_coexist.cstate
+					&= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI state switch to Low\n");
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI state stay at Medium\n");
+			}
+		} else {
+			if (smooth < rssi_thresh1) {
+				bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+				rtlpcipriv->bt_coexist.cstate
+					|= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+				rtlpcipriv->bt_coexist.cstate
+					&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+				rtlpcipriv->bt_coexist.cstate
+					&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI state switch to Medium\n");
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+					 "[DM][BT], RSSI state stay at High\n");
+			}
+		}
+	}
+
+	rtlpcipriv->bt_coexist.bt_pre_rssi_state = bt_rssi_state;
+	return bt_rssi_state;
+}
+
+long rtl8723ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	long smooth = 0;
+
+	if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+		smooth = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
+	else
+		smooth = rtlpriv->dm.entry_min_undec_sm_pwdb;
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "rtl8723ae_dm_bt_get_rx_ss() = %ld\n", smooth);
+
+	return smooth;
+}
+
+void rtl8723ae_dm_bt_balance(struct ieee80211_hw *hw,
+			     bool balance_on, u8 ms0, u8 ms1)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[3] = {0};
+
+	if (balance_on) {
+		h2c_parameter[2] = 1;
+		h2c_parameter[1] = ms1;
+		h2c_parameter[0] = ms0;
+		rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
+	} else {
+		h2c_parameter[2] = 0;
+		h2c_parameter[1] = 0;
+		h2c_parameter[0] = 0;
+	}
+	rtlpcipriv->bt_coexist.balance_on = balance_on;
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[DM][BT], Balance=[%s:%dms:%dms], write 0xc=0x%x\n",
+		 balance_on ? "ON" : "OFF", ms0, ms1,
+		 h2c_parameter[0]<<16 | h2c_parameter[1]<<8 | h2c_parameter[2]);
+
+	rtl8723ae_fill_h2c_cmd(hw, 0xc, 3, h2c_parameter);
+}
+
+
+void rtl8723ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	if (type == BT_AGCTABLE_OFF) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BT]AGCTable Off!\n");
+		rtl_write_dword(rtlpriv, 0xc78, 0x641c0001);
+		rtl_write_dword(rtlpriv, 0xc78, 0x631d0001);
+		rtl_write_dword(rtlpriv, 0xc78, 0x621e0001);
+		rtl_write_dword(rtlpriv, 0xc78, 0x611f0001);
+		rtl_write_dword(rtlpriv, 0xc78, 0x60200001);
+
+		rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0x32000);
+		rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0x71000);
+		rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0xb0000);
+		rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0xfc000);
+		rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_G1, 0xfffff, 0x30355);
+	} else if (type == BT_AGCTABLE_ON) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BT]AGCTable On!\n");
+		rtl_write_dword(rtlpriv, 0xc78, 0x4e1c0001);
+		rtl_write_dword(rtlpriv, 0xc78, 0x4d1d0001);
+		rtl_write_dword(rtlpriv, 0xc78, 0x4c1e0001);
+		rtl_write_dword(rtlpriv, 0xc78, 0x4b1f0001);
+		rtl_write_dword(rtlpriv, 0xc78, 0x4a200001);
+
+		rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0xdc000);
+		rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0x90000);
+		rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0x51000);
+		rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0x12000);
+		rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_G1, 0xfffff, 0x00355);
+
+		rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
+	}
+}
+
+void rtl8723ae_dm_bt_bback_off_level(struct ieee80211_hw *hw, u8 type)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	if (type == BT_BB_BACKOFF_OFF) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BT]BBBackOffLevel Off!\n");
+		rtl_write_dword(rtlpriv, 0xc04, 0x3a05611);
+	} else if (type == BT_BB_BACKOFF_ON) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BT]BBBackOffLevel On!\n");
+		rtl_write_dword(rtlpriv, 0xc04, 0x3a07611);
+		rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
+	}
+}
+
+void rtl8723ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "rtl8723ae_dm_bt_fw_coex_all_off()\n");
+
+	if (rtlpcipriv->bt_coexist.fw_coexist_all_off)
+		return;
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "rtl8723ae_dm_bt_fw_coex_all_off(), real Do\n");
+	rtl8723ae_dm_bt_fw_coex_all_off_8723a(hw);
+	rtlpcipriv->bt_coexist.fw_coexist_all_off = true;
+}
+
+void rtl8723ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "rtl8723ae_dm_bt_sw_coex_all_off()\n");
+
+	if (rtlpcipriv->bt_coexist.sw_coexist_all_off)
+		return;
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "rtl8723ae_dm_bt_sw_coex_all_off(), real Do\n");
+	rtl8723ae_dm_bt_sw_coex_all_off_8723a(hw);
+	rtlpcipriv->bt_coexist.sw_coexist_all_off = true;
+}
+
+void rtl8723ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "rtl8723ae_dm_bt_hw_coex_all_off()\n");
+
+	if (rtlpcipriv->bt_coexist.hw_coexist_all_off)
+		return;
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "rtl8723ae_dm_bt_hw_coex_all_off(), real Do\n");
+
+	rtl8723ae_dm_bt_hw_coex_all_off_8723a(hw);
+
+	rtlpcipriv->bt_coexist.hw_coexist_all_off = true;
+}
+
+void rtl8723ae_btdm_coex_all_off(struct ieee80211_hw *hw)
+{
+	rtl8723ae_dm_bt_fw_coex_all_off(hw);
+	rtl8723ae_dm_bt_sw_coex_all_off(hw);
+	rtl8723ae_dm_bt_hw_coex_all_off(hw);
+}
+
+bool rtl8723ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	if ((rtlpcipriv->bt_coexist.previous_state ==
+	    rtlpcipriv->bt_coexist.cstate) &&
+	    (rtlpcipriv->bt_coexist.previous_state_h ==
+	    rtlpcipriv->bt_coexist.cstate_h))
+		return false;
+	else
+		return true;
+}
+
+bool rtl8723ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (rtlpriv->link_info.tx_busy_traffic)
+		return true;
+	else
+		return false;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h
new file mode 100644
index 0000000..76f4d12
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h
@@ -0,0 +1,160 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723E_HAL_BT_COEXIST_H__
+#define __RTL8723E_HAL_BT_COEXIST_H__
+
+#include "../wifi.h"
+
+/* The reg define is for 8723 */
+#define	REG_HIGH_PRIORITY_TXRX			0x770
+#define	REG_LOW_PRIORITY_TXRX			0x774
+
+#define BT_FW_COEX_THRESH_TOL			6
+#define BT_FW_COEX_THRESH_20			20
+#define BT_FW_COEX_THRESH_23			23
+#define BT_FW_COEX_THRESH_25			25
+#define BT_FW_COEX_THRESH_30			30
+#define BT_FW_COEX_THRESH_35			35
+#define BT_FW_COEX_THRESH_40			40
+#define BT_FW_COEX_THRESH_45			45
+#define BT_FW_COEX_THRESH_47			47
+#define BT_FW_COEX_THRESH_50			50
+#define BT_FW_COEX_THRESH_55			55
+
+#define BT_COEX_STATE_BT30			BIT(0)
+#define BT_COEX_STATE_WIFI_HT20			BIT(1)
+#define BT_COEX_STATE_WIFI_HT40			BIT(2)
+#define BT_COEX_STATE_WIFI_LEGACY		BIT(3)
+
+#define BT_COEX_STATE_WIFI_RSSI_LOW		BIT(4)
+#define BT_COEX_STATE_WIFI_RSSI_MEDIUM		BIT(5)
+#define BT_COEX_STATE_WIFI_RSSI_HIGH		BIT(6)
+#define BT_COEX_STATE_DEC_BT_POWER		BIT(7)
+
+#define BT_COEX_STATE_WIFI_IDLE			BIT(8)
+#define BT_COEX_STATE_WIFI_UPLINK		BIT(9)
+#define BT_COEX_STATE_WIFI_DOWNLINK		BIT(10)
+
+#define BT_COEX_STATE_BT_INQ_PAGE		BIT(11)
+#define BT_COEX_STATE_BT_IDLE			BIT(12)
+#define BT_COEX_STATE_BT_UPLINK			BIT(13)
+#define BT_COEX_STATE_BT_DOWNLINK		BIT(14)
+
+#define BT_COEX_STATE_HOLD_FOR_BT_OPERATION	BIT(15)
+#define BT_COEX_STATE_BT_RSSI_LOW		BIT(19)
+
+#define BT_COEX_STATE_PROFILE_HID		BIT(20)
+#define BT_COEX_STATE_PROFILE_A2DP		BIT(21)
+#define BT_COEX_STATE_PROFILE_PAN		BIT(22)
+#define BT_COEX_STATE_PROFILE_SCO		BIT(23)
+
+#define BT_COEX_STATE_WIFI_RSSI_1_LOW		BIT(24)
+#define BT_COEX_STATE_WIFI_RSSI_1_MEDIUM	BIT(25)
+#define BT_COEX_STATE_WIFI_RSSI_1_HIGH		BIT(26)
+
+#define BT_COEX_STATE_BTINFO_COMMON		BIT(30)
+#define BT_COEX_STATE_BTINFO_B_HID_SCOESCO	BIT(31)
+#define BT_COEX_STATE_BTINFO_B_FTP_A2DP		BIT(29)
+
+#define BT_COEX_STATE_BT_CNT_LEVEL_0		BIT(0)
+#define BT_COEX_STATE_BT_CNT_LEVEL_1		BIT(1)
+#define BT_COEX_STATE_BT_CNT_LEVEL_2		BIT(2)
+#define BT_COEX_STATE_BT_CNT_LEVEL_3		BIT(3)
+
+#define BT_RSSI_STATE_HIGH			0
+#define BT_RSSI_STATE_MEDIUM			1
+#define BT_RSSI_STATE_LOW			2
+#define BT_RSSI_STATE_STAY_HIGH			3
+#define BT_RSSI_STATE_STAY_MEDIUM		4
+#define BT_RSSI_STATE_STAY_LOW			5
+
+#define	BT_AGCTABLE_OFF				0
+#define	BT_AGCTABLE_ON				1
+#define	BT_BB_BACKOFF_OFF			0
+#define	BT_BB_BACKOFF_ON			1
+#define	BT_FW_NAV_OFF				0
+#define	BT_FW_NAV_ON				1
+
+#define	BT_COEX_MECH_NONE			0
+#define	BT_COEX_MECH_SCO			1
+#define	BT_COEX_MECH_HID			2
+#define	BT_COEX_MECH_A2DP			3
+#define	BT_COEX_MECH_PAN			4
+#define	BT_COEX_MECH_HID_A2DP			5
+#define	BT_COEX_MECH_HID_PAN			6
+#define	BT_COEX_MECH_PAN_A2DP			7
+#define	BT_COEX_MECH_HID_SCO_ESCO		8
+#define	BT_COEX_MECH_FTP_A2DP			9
+#define	BT_COEX_MECH_COMMON			10
+#define	BT_COEX_MECH_MAX			11
+
+#define	BT_DBG_PROFILE_NONE			0
+#define	BT_DBG_PROFILE_SCO			1
+#define	BT_DBG_PROFILE_HID			2
+#define	BT_DBG_PROFILE_A2DP			3
+#define	BT_DBG_PROFILE_PAN			4
+#define	BT_DBG_PROFILE_HID_A2DP			5
+#define	BT_DBG_PROFILE_HID_PAN			6
+#define	BT_DBG_PROFILE_PAN_A2DP			7
+#define	BT_DBG_PROFILE_MAX			9
+
+#define	BTINFO_B_FTP				BIT(7)
+#define	BTINFO_B_A2DP				BIT(6)
+#define	BTINFO_B_HID				BIT(5)
+#define	BTINFO_B_SCO_BUSY			BIT(4)
+#define	BTINFO_B_ACL_BUSY			BIT(3)
+#define	BTINFO_B_INQ_PAGE			BIT(2)
+#define	BTINFO_B_SCO_ESCO			BIT(1)
+#define	BTINFO_B_CONNECTION			BIT(0)
+
+
+void rtl8723ae_btdm_coex_all_off(struct ieee80211_hw *hw);
+void rtl8723ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw);
+
+void rtl8723ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw);
+void rtl8723ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw);
+long rtl8723ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw);
+void rtl8723ae_dm_bt_balance(struct ieee80211_hw *hw,
+			    bool balance_on, u8 ms0, u8 ms1);
+void rtl8723ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type);
+void rtl8723ae_dm_bt_bback_off_level(struct ieee80211_hw *hw, u8 type);
+u8 rtl8723ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
+					u8 level_num, u8 rssi_thresh,
+					u8 rssi_thresh1);
+u8 rtl8723ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
+					 u8  level_num, u8 rssi_thresh,
+					 u8 rssi_thresh1);
+void _rtl8723_dm_bt_check_wifi_state(struct ieee80211_hw *hw);
+void rtl8723ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw,
+						bool reject);
+
+bool rtl8723ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw);
+bool rtl8723ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
new file mode 100644
index 0000000..887d521
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
@@ -0,0 +1,1786 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ****************************************************************************
+ */
+#include "hal_btc.h"
+#include "../pci.h"
+#include "phy.h"
+#include "fw.h"
+#include "reg.h"
+#include "def.h"
+
+void rtl8723ae_bt_coex_off_before_lps(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+	if (!rtlpcipriv->bt_coexist.bt_coexistence)
+		return;
+
+	if (ppsc->inactiveps) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BT][DM], Before enter IPS, turn off all Coexist DM\n");
+		rtlpcipriv->bt_coexist.cstate = 0;
+		rtlpcipriv->bt_coexist.previous_state = 0;
+		rtlpcipriv->bt_coexist.cstate_h = 0;
+		rtlpcipriv->bt_coexist.previous_state_h = 0;
+		rtl8723ae_btdm_coex_all_off(hw);
+	}
+}
+
+static enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	enum _RT_MEDIA_STATUS m_status = RT_MEDIA_DISCONNECT;
+
+	u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+	if (bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+		m_status = RT_MEDIA_CONNECT;
+
+	return m_status;
+}
+
+void rtl_8723e_bt_wifi_media_status_notify(struct ieee80211_hw *hw,
+					   bool mstatus)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 h2c_parameter[3] = {0};
+	u8 chnl;
+
+	if (!rtlpcipriv->bt_coexist.bt_coexistence)
+		return;
+
+	if (RT_MEDIA_CONNECT == mstatus)
+		h2c_parameter[0] = 0x1; /* 0: disconnected, 1:connected */
+	else
+		h2c_parameter[0] = 0x0;
+
+	if (mgnt_link_status_query(hw))	{
+		chnl = rtlphy->current_channel;
+		h2c_parameter[1] = chnl;
+	}
+
+	if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+		h2c_parameter[2] = 0x30;
+	else
+		h2c_parameter[2] = 0x20;
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+		 "[BTCoex], FW write 0x19 = 0x%x\n",
+		 h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]);
+
+	rtl8723ae_fill_h2c_cmd(hw, 0x19, 3, h2c_parameter);
+
+}
+
+static bool rtl8723ae_dm_bt_is_wifi_busy(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	if (rtlpriv->link_info.busytraffic ||
+		rtlpriv->link_info.rx_busy_traffic ||
+		rtlpriv->link_info.tx_busy_traffic)
+		return true;
+	else
+		return false;
+}
+
+static void rtl8723ae_dm_bt_set_fw_3a(struct ieee80211_hw *hw,
+				      u8 byte1, u8 byte2, u8 byte3,
+				      u8 byte4, u8 byte5)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[5] = {0};
+
+	h2c_parameter[0] = byte1;
+	h2c_parameter[1] = byte2;
+	h2c_parameter[2] = byte3;
+	h2c_parameter[3] = byte4;
+	h2c_parameter[4] = byte5;
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], FW write 0x3a(4bytes) = 0x%x%8x\n",
+		 h2c_parameter[0], h2c_parameter[1]<<24 | h2c_parameter[2]<<16 |
+		 h2c_parameter[3]<<8 | h2c_parameter[4]);
+	rtl8723ae_fill_h2c_cmd(hw, 0x3a, 5, h2c_parameter);
+}
+
+static bool rtl8723ae_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "Need to decrease bt power\n");
+		rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_DEC_BT_POWER;
+		return true;
+	}
+
+	rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_DEC_BT_POWER;
+	return false;
+}
+
+static bool rtl8723ae_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	if ((rtlpcipriv->bt_coexist.previous_state ==
+	    rtlpcipriv->bt_coexist.cstate) &&
+	    (rtlpcipriv->bt_coexist.previous_state_h ==
+	    rtlpcipriv->bt_coexist.cstate_h)) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[DM][BT], Coexist state do not chang!!\n");
+		return true;
+	} else {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[DM][BT], Coexist state changed!!\n");
+		return false;
+	}
+}
+
+static void rtl8723ae_dm_bt_set_coex_table(struct ieee80211_hw *hw,
+					   u32 val_0x6c0, u32 val_0x6c8,
+					   u32 val_0x6cc)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "set coex table, set 0x6c0 = 0x%x\n", val_0x6c0);
+	rtl_write_dword(rtlpriv, 0x6c0, val_0x6c0);
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "set coex table, set 0x6c8 = 0x%x\n", val_0x6c8);
+	rtl_write_dword(rtlpriv, 0x6c8, val_0x6c8);
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "set coex table, set 0x6cc = 0x%x\n", val_0x6cc);
+	rtl_write_byte(rtlpriv, 0x6cc, val_0x6cc);
+}
+
+static void rtl8723ae_dm_bt_set_hw_pta_mode(struct ieee80211_hw *hw, bool mode)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (BT_PTA_MODE_ON == mode) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode on, ");
+		/*  Enable GPIO 0/1/2/3/8 pins for bt */
+		rtl_write_byte(rtlpriv, 0x40, 0x20);
+		rtlpcipriv->bt_coexist.hw_coexist_all_off = false;
+	} else {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode off\n");
+		rtl_write_byte(rtlpriv, 0x40, 0x0);
+	}
+}
+
+static void rtl8723ae_dm_bt_set_sw_rf_rx_lpf_corner(struct ieee80211_hw *hw,
+						    u8 type)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (BT_RF_RX_LPF_CORNER_SHRINK == type) {
+		/* Shrink RF Rx LPF corner, 0x1e[7:4]=1111 ==> [11:4] by Jenyu*/
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "Shrink RF Rx LPF corner!!\n");
+		rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff,
+					0xf0ff7);
+		rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
+	} else if (BT_RF_RX_LPF_CORNER_RESUME == type) {
+		/*Resume RF Rx LPF corner*/
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "Resume RF Rx LPF corner!!\n");
+		rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff,
+			rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
+	}
+}
+
+static void rtl8723ae_bt_set_penalty_tx_rate_adap(struct ieee80211_hw *hw,
+						  u8 ra_type)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u8 tmu1;
+
+	tmu1 = rtl_read_byte(rtlpriv, 0x4fd);
+	tmu1 |= BIT(0);
+	if (BT_TX_RATE_ADAPTIVE_LOW_PENALTY == ra_type) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "Tx rate adaptive, set low penalty!!\n");
+		tmu1 &= ~BIT(2);
+		rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
+	} else if (BT_TX_RATE_ADAPTIVE_NORMAL == ra_type) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "Tx rate adaptive, set normal!!\n");
+		tmu1 |= BIT(2);
+	}
+	rtl_write_byte(rtlpriv, 0x4fd, tmu1);
+}
+
+static void rtl8723ae_dm_bt_btdm_structure_reload(struct ieee80211_hw *hw,
+						 struct btdm_8723 *btdm)
+{
+	btdm->all_off = false;
+	btdm->agc_table_en = false;
+	btdm->adc_back_off_on = false;
+	btdm->b2_ant_hid_en = false;
+	btdm->low_penalty_rate_adaptive = false;
+	btdm->rf_rx_lpf_shrink = false;
+	btdm->reject_aggre_pkt = false;
+
+	btdm->tdma_on = false;
+	btdm->tdma_ant = TDMA_2ANT;
+	btdm->tdma_nav = TDMA_NAV_OFF;
+	btdm->tdma_dac_swing = TDMA_DAC_SWING_OFF;
+	btdm->fw_dac_swing_lvl = 0x20;
+
+	btdm->tra_tdma_on = false;
+	btdm->tra_tdma_ant = TDMA_2ANT;
+	btdm->tra_tdma_nav = TDMA_NAV_OFF;
+	btdm->ignore_wlan_act = false;
+
+	btdm->ps_tdma_on = false;
+	btdm->ps_tdma_byte[0] = 0x0;
+	btdm->ps_tdma_byte[1] = 0x0;
+	btdm->ps_tdma_byte[2] = 0x0;
+	btdm->ps_tdma_byte[3] = 0x8;
+	btdm->ps_tdma_byte[4] = 0x0;
+
+	btdm->pta_on = true;
+	btdm->val_0x6c0 = 0x5a5aaaaa;
+	btdm->val_0x6c8 = 0xcc;
+	btdm->val_0x6cc = 0x3;
+
+	btdm->sw_dac_swing_on = false;
+	btdm->sw_dac_swing_lvl = 0xc0;
+	btdm->wlan_act_hi = 0x20;
+	btdm->wlan_act_lo = 0x10;
+	btdm->bt_retry_index = 2;
+
+	btdm->dec_bt_pwr = false;
+}
+
+static void dm_bt_btdm_structure_reload_all_off(struct ieee80211_hw *hw,
+						struct btdm_8723 *btdm)
+{
+	rtl8723ae_dm_bt_btdm_structure_reload(hw, btdm);
+	btdm->all_off = true;
+	btdm->pta_on = false;
+	btdm->wlan_act_hi = 0x10;
+}
+
+static bool rtl8723ae_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct btdm_8723 btdm8723;
+	bool common = false;
+
+	rtl8723ae_dm_bt_btdm_structure_reload(hw, &btdm8723);
+
+	if (!rtl8723ae_dm_bt_is_wifi_busy(hw)
+	    && !rtlpcipriv->bt_coexist.bt_busy) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "Wifi idle + Bt idle, bt coex mechanism always off!!\n");
+		dm_bt_btdm_structure_reload_all_off(hw, &btdm8723);
+		common = true;
+	} else if (rtl8723ae_dm_bt_is_wifi_busy(hw)
+		   && !rtlpcipriv->bt_coexist.bt_busy) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "Wifi non-idle + Bt disabled/idle!!\n");
+		btdm8723.low_penalty_rate_adaptive = true;
+		btdm8723.rf_rx_lpf_shrink = false;
+		btdm8723.reject_aggre_pkt = false;
+
+		/* sw mechanism */
+		btdm8723.agc_table_en = false;
+		btdm8723.adc_back_off_on = false;
+		btdm8723.sw_dac_swing_on = false;
+
+		btdm8723.pta_on = true;
+		btdm8723.val_0x6c0 = 0x5a5aaaaa;
+		btdm8723.val_0x6c8 = 0xcccc;
+		btdm8723.val_0x6cc = 0x3;
+
+		btdm8723.tdma_on = false;
+		btdm8723.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+		btdm8723.b2_ant_hid_en = false;
+
+		common = true;
+	} else if (rtlpcipriv->bt_coexist.bt_busy) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "Bt non-idle!\n");
+		if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "Wifi connection exist\n");
+			common = false;
+		} else {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "No Wifi connection!\n");
+			btdm8723.rf_rx_lpf_shrink = true;
+			btdm8723.low_penalty_rate_adaptive = false;
+			btdm8723.reject_aggre_pkt = false;
+
+			/* sw mechanism */
+			btdm8723.agc_table_en = false;
+			btdm8723.adc_back_off_on = false;
+			btdm8723.sw_dac_swing_on = false;
+
+			btdm8723.pta_on = true;
+			btdm8723.val_0x6c0 = 0x55555555;
+			btdm8723.val_0x6c8 = 0x0000ffff;
+			btdm8723.val_0x6cc = 0x3;
+
+			btdm8723.tdma_on = false;
+			btdm8723.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+			btdm8723.b2_ant_hid_en = false;
+
+			common = true;
+		}
+	}
+
+	if (rtl8723ae_dm_bt_need_to_dec_bt_pwr(hw))
+		btdm8723.dec_bt_pwr = true;
+
+	if (common)
+		rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_BTINFO_COMMON;
+
+	if (common && rtl8723ae_dm_bt_is_coexist_state_changed(hw))
+		rtl8723ae_dm_bt_set_bt_dm(hw, &btdm8723);
+
+	return common;
+}
+
+static void rtl8723ae_dm_bt_set_sw_full_time_dac_swing(struct ieee80211_hw *hw,
+						       bool sw_dac_swing_on,
+						       u32 sw_dac_swing_lvl)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (sw_dac_swing_on) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl);
+		rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000,
+					 sw_dac_swing_lvl);
+		rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
+	} else {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], SwDacSwing Off!\n");
+		rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
+	}
+}
+
+static void rtl8723ae_dm_bt_set_fw_dec_bt_pwr(struct ieee80211_hw *hw,
+					      bool dec_bt_pwr)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[1] = {0};
+
+	h2c_parameter[0] = 0;
+
+	if (dec_bt_pwr) {
+		h2c_parameter[0] |= BIT(1);
+		rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
+	}
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], decrease Bt Power : %s, write 0x21 = 0x%x\n",
+		 (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+
+	rtl8723ae_fill_h2c_cmd(hw, 0x21, 1, h2c_parameter);
+}
+
+static void rtl8723ae_dm_bt_set_fw_2_ant_hid(struct ieee80211_hw *hw,
+					    bool enable, bool dac_swing_on)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[1] = {0};
+
+	if (enable) {
+		h2c_parameter[0] |= BIT(0);
+		rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
+	}
+	if (dac_swing_on)
+		h2c_parameter[0] |= BIT(1); /* Dac Swing default enable */
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], turn 2-Ant+HID mode %s, DACSwing:%s, write 0x15 = 0x%x\n",
+		 (enable ? "ON!!" : "OFF!!"), (dac_swing_on ? "ON" : "OFF"),
+		 h2c_parameter[0]);
+
+	rtl8723ae_fill_h2c_cmd(hw, 0x15, 1, h2c_parameter);
+}
+
+static void rtl8723ae_dm_bt_set_fw_tdma_ctrl(struct ieee80211_hw *hw,
+					     bool enable, u8 ant_num, u8 nav_en,
+					     u8 dac_swing_en)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u8 h2c_parameter[1] = {0};
+	u8 h2c_parameter1[1] = {0};
+
+	h2c_parameter[0] = 0;
+	h2c_parameter1[0] = 0;
+
+	if (enable) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], set BT PTA update manager to trigger update!!\n");
+		h2c_parameter1[0] |= BIT(0);
+
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], turn TDMA mode ON!!\n");
+		h2c_parameter[0] |= BIT(0);		/* function enable */
+		if (TDMA_1ANT == ant_num) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex], TDMA_1ANT\n");
+			h2c_parameter[0] |= BIT(1);
+		} else if (TDMA_2ANT == ant_num) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex], TDMA_2ANT\n");
+		} else {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex], Unknown Ant\n");
+		}
+
+		if (TDMA_NAV_OFF == nav_en) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex], TDMA_NAV_OFF\n");
+		} else if (TDMA_NAV_ON == nav_en) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex], TDMA_NAV_ON\n");
+			h2c_parameter[0] |= BIT(2);
+		}
+
+		if (TDMA_DAC_SWING_OFF == dac_swing_en) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex], TDMA_DAC_SWING_OFF\n");
+		} else if (TDMA_DAC_SWING_ON == dac_swing_en) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex], TDMA_DAC_SWING_ON\n");
+			h2c_parameter[0] |= BIT(4);
+		}
+		rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
+	} else {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], set BT PTA update manager to no update!!\n");
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], turn TDMA mode OFF!!\n");
+	}
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], FW2AntTDMA, write 0x26 = 0x%x\n",
+		 h2c_parameter1[0]);
+	rtl8723ae_fill_h2c_cmd(hw, 0x26, 1, h2c_parameter1);
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], FW2AntTDMA, write 0x14 = 0x%x\n", h2c_parameter[0]);
+	rtl8723ae_fill_h2c_cmd(hw, 0x14, 1, h2c_parameter);
+}
+
+static void rtl8723ae_dm_bt_set_fw_ignore_wlan_act(struct ieee80211_hw *hw,
+						   bool enable)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u8 h2c_parameter[1] = {0};
+
+	if (enable) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], BT Ignore Wlan_Act !!\n");
+		h2c_parameter[0] |= BIT(0);		/* function enable */
+		rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
+	} else {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], BT don't ignore Wlan_Act !!\n");
+	}
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25 = 0x%x\n",
+		 h2c_parameter[0]);
+
+	rtl8723ae_fill_h2c_cmd(hw, 0x25, 1, h2c_parameter);
+}
+
+static void rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(struct ieee80211_hw *hw,
+						 bool enable, u8 ant_num,
+						 u8 nav_en)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 h2c_parameter[2] = {0};
+
+	/* Only 8723 B cut should do this */
+	if (IS_VENDOR_8723_A_CUT(rtlhal->version)) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], not 8723B cut, don't set Traditional TDMA!!\n");
+		return;
+	}
+
+	if (enable) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], turn TTDMA mode ON!!\n");
+		h2c_parameter[0] |= BIT(0);		/* function enable */
+		if (TDMA_1ANT == ant_num) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex], TTDMA_1ANT\n");
+			h2c_parameter[0] |= BIT(1);
+		} else if (TDMA_2ANT == ant_num) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex], TTDMA_2ANT\n");
+		} else {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex], Unknown Ant\n");
+		}
+
+		if (TDMA_NAV_OFF == nav_en) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex], TTDMA_NAV_OFF\n");
+		} else if (TDMA_NAV_ON == nav_en) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex], TTDMA_NAV_ON\n");
+			h2c_parameter[1] |= BIT(0);
+		}
+
+		rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
+	} else {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], turn TTDMA mode OFF!!\n");
+	}
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], FW Traditional TDMA, write 0x33 = 0x%x\n",
+		 h2c_parameter[0] << 8 | h2c_parameter[1]);
+
+	rtl8723ae_fill_h2c_cmd(hw, 0x33, 2, h2c_parameter);
+}
+
+static void rtl8723ae_dm_bt_set_fw_dac_swing_level(struct ieee80211_hw *hw,
+						   u8 dac_swing_lvl)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[1] = {0};
+
+	h2c_parameter[0] = dac_swing_lvl;
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], write 0x29 = 0x%x\n", h2c_parameter[0]);
+
+	rtl8723ae_fill_h2c_cmd(hw, 0x29, 1, h2c_parameter);
+}
+
+static void rtl8723ae_dm_bt_set_fw_bt_hid_info(struct ieee80211_hw *hw,
+					       bool enable)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[1] = {0};
+
+	h2c_parameter[0] = 0;
+
+	if (enable) {
+		h2c_parameter[0] |= BIT(0);
+		rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
+	}
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], Set BT HID information = 0x%x\n", enable);
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], write 0x24 = 0x%x\n", h2c_parameter[0]);
+
+	rtl8723ae_fill_h2c_cmd(hw, 0x24, 1, h2c_parameter);
+}
+
+static void rtl8723ae_dm_bt_set_fw_bt_retry_index(struct ieee80211_hw *hw,
+						  u8 retry_index)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[1] = {0};
+
+	h2c_parameter[0] = retry_index;
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], Set BT Retry Index=%d\n", retry_index);
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], write 0x23 = 0x%x\n", h2c_parameter[0]);
+
+	rtl8723ae_fill_h2c_cmd(hw, 0x23, 1, h2c_parameter);
+}
+
+static void rtl8723ae_dm_bt_set_fw_wlan_act(struct ieee80211_hw *hw,
+					    u8 wlan_act_hi, u8 wlan_act_lo)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter_hi[1] = {0};
+	u8 h2c_parameter_lo[1] = {0};
+
+	h2c_parameter_hi[0] = wlan_act_hi;
+	h2c_parameter_lo[0] = wlan_act_lo;
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], Set WLAN_ACT Hi:Lo = 0x%x/0x%x\n", wlan_act_hi,
+		 wlan_act_lo);
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], write 0x22 = 0x%x\n", h2c_parameter_hi[0]);
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "[BTCoex], write 0x11 = 0x%x\n", h2c_parameter_lo[0]);
+
+	/* WLAN_ACT = High duration, unit:ms */
+	rtl8723ae_fill_h2c_cmd(hw, 0x22, 1, h2c_parameter_hi);
+	/*  WLAN_ACT = Low duration, unit:3*625us */
+	rtl8723ae_fill_h2c_cmd(hw, 0x11, 1, h2c_parameter_lo);
+}
+
+void rtl8723ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8723 *btdm)
+{
+	struct rtl_pci_priv	*rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv	*rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	struct btdm_8723 *btdm_8723 = &rtlhal->hal_coex_8723.btdm;
+	u8 i;
+	bool fw_current_inpsmode = false;
+	bool fw_ps_awake = true;
+
+	rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+				      (u8 *)(&fw_current_inpsmode));
+	rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+				      (u8 *)(&fw_ps_awake));
+
+	/* check new setting is different than the old one,
+	 * if all the same, don't do the setting again.
+	 */
+	if (memcmp(btdm_8723, btdm, sizeof(struct btdm_8723)) == 0) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], the same coexist setting, return!!\n");
+		return;
+	} else {	/* save the new coexist setting */
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], UPDATE TO NEW COEX SETTING!!\n");
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new bAllOff = 0x%x/ 0x%x\n",
+			 btdm_8723->all_off, btdm->all_off);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new agc_table_en = 0x%x/ 0x%x\n",
+			 btdm_8723->agc_table_en, btdm->agc_table_en);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new adc_back_off_on = 0x%x/ 0x%x\n",
+			 btdm_8723->adc_back_off_on, btdm->adc_back_off_on);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new b2_ant_hid_en = 0x%x/ 0x%x\n",
+			 btdm_8723->b2_ant_hid_en, btdm->b2_ant_hid_en);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new bLowPenaltyRateAdaptive = 0x%x/ 0x%x\n",
+			 btdm_8723->low_penalty_rate_adaptive,
+			 btdm->low_penalty_rate_adaptive);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new bRfRxLpfShrink = 0x%x/ 0x%x\n",
+			 btdm_8723->rf_rx_lpf_shrink, btdm->rf_rx_lpf_shrink);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new bRejectAggrePkt = 0x%x/ 0x%x\n",
+			 btdm_8723->reject_aggre_pkt, btdm->reject_aggre_pkt);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new tdma_on = 0x%x/ 0x%x\n",
+			 btdm_8723->tdma_on, btdm->tdma_on);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new tdmaAnt = 0x%x/ 0x%x\n",
+			 btdm_8723->tdma_ant, btdm->tdma_ant);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new tdmaNav = 0x%x/ 0x%x\n",
+			 btdm_8723->tdma_nav, btdm->tdma_nav);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new tdma_dac_swing = 0x%x/ 0x%x\n",
+			 btdm_8723->tdma_dac_swing, btdm->tdma_dac_swing);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new fwDacSwingLvl = 0x%x/ 0x%x\n",
+			 btdm_8723->fw_dac_swing_lvl, btdm->fw_dac_swing_lvl);
+
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new bTraTdmaOn = 0x%x/ 0x%x\n",
+			 btdm_8723->tra_tdma_on, btdm->tra_tdma_on);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new traTdmaAnt = 0x%x/ 0x%x\n",
+			 btdm_8723->tra_tdma_ant, btdm->tra_tdma_ant);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new traTdmaNav = 0x%x/ 0x%x\n",
+			 btdm_8723->tra_tdma_nav, btdm->tra_tdma_nav);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new bPsTdmaOn = 0x%x/ 0x%x\n",
+			 btdm_8723->ps_tdma_on, btdm->ps_tdma_on);
+		for (i = 0; i < 5; i++) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "[BTCoex], original/new psTdmaByte[i] = 0x%x/ 0x%x\n",
+				 btdm_8723->ps_tdma_byte[i],
+				 btdm->ps_tdma_byte[i]);
+		}
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new bIgnoreWlanAct = 0x%x/ 0x%x\n",
+			 btdm_8723->ignore_wlan_act, btdm->ignore_wlan_act);
+
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new bPtaOn = 0x%x/ 0x%x\n",
+			 btdm_8723->pta_on, btdm->pta_on);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new val_0x6c0 = 0x%x/ 0x%x\n",
+			 btdm_8723->val_0x6c0, btdm->val_0x6c0);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new val_0x6c8 = 0x%x/ 0x%x\n",
+			 btdm_8723->val_0x6c8, btdm->val_0x6c8);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new val_0x6cc = 0x%x/ 0x%x\n",
+			 btdm_8723->val_0x6cc, btdm->val_0x6cc);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new sw_dac_swing_on = 0x%x/ 0x%x\n",
+			 btdm_8723->sw_dac_swing_on, btdm->sw_dac_swing_on);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new sw_dac_swing_lvl = 0x%x/ 0x%x\n",
+			 btdm_8723->sw_dac_swing_lvl,
+			 btdm->sw_dac_swing_lvl);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new wlanActHi = 0x%x/ 0x%x\n",
+			 btdm_8723->wlan_act_hi, btdm->wlan_act_hi);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new wlanActLo = 0x%x/ 0x%x\n",
+			 btdm_8723->wlan_act_lo, btdm->wlan_act_lo);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], original/new btRetryIndex = 0x%x/ 0x%x\n",
+			btdm_8723->bt_retry_index, btdm->bt_retry_index);
+
+		memcpy(btdm_8723, btdm, sizeof(struct btdm_8723));
+	}
+	/*
+	 * Here we only consider when Bt Operation
+	 * inquiry/paging/pairing is ON
+	 * we only need to turn off TDMA
+	 */
+
+	if (rtlpcipriv->bt_coexist.hold_for_bt_operation) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], set to ignore wlanAct for BT OP!!\n");
+		rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw, true);
+		return;
+	}
+
+	if (btdm->all_off) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], disable all coexist mechanism !!\n");
+		rtl8723ae_btdm_coex_all_off(hw);
+		return;
+	}
+
+	rtl8723ae_dm_bt_reject_ap_aggregated_packet(hw, btdm->reject_aggre_pkt);
+
+	if (btdm->low_penalty_rate_adaptive)
+		rtl8723ae_bt_set_penalty_tx_rate_adap(hw,
+			BT_TX_RATE_ADAPTIVE_LOW_PENALTY);
+	else
+		rtl8723ae_bt_set_penalty_tx_rate_adap(hw,
+			BT_TX_RATE_ADAPTIVE_NORMAL);
+
+	if (btdm->rf_rx_lpf_shrink)
+		rtl8723ae_dm_bt_set_sw_rf_rx_lpf_corner(hw,
+					 BT_RF_RX_LPF_CORNER_SHRINK);
+	else
+		rtl8723ae_dm_bt_set_sw_rf_rx_lpf_corner(hw,
+					 BT_RF_RX_LPF_CORNER_RESUME);
+
+	if (btdm->agc_table_en)
+		rtl8723ae_dm_bt_agc_table(hw, BT_AGCTABLE_ON);
+	else
+		rtl8723ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF);
+
+	if (btdm->adc_back_off_on)
+		rtl8723ae_dm_bt_bback_off_level(hw, BT_BB_BACKOFF_ON);
+	else
+		rtl8723ae_dm_bt_bback_off_level(hw, BT_BB_BACKOFF_OFF);
+
+	rtl8723ae_dm_bt_set_fw_bt_retry_index(hw, btdm->bt_retry_index);
+
+	rtl8723ae_dm_bt_set_fw_dac_swing_level(hw, btdm->fw_dac_swing_lvl);
+	rtl8723ae_dm_bt_set_fw_wlan_act(hw, btdm->wlan_act_hi,
+				       btdm->wlan_act_lo);
+
+	rtl8723ae_dm_bt_set_coex_table(hw, btdm->val_0x6c0,
+		btdm->val_0x6c8, btdm->val_0x6cc);
+	rtl8723ae_dm_bt_set_hw_pta_mode(hw, btdm->pta_on);
+
+	/* Note: There is a constraint between TDMA and 2AntHID
+	 * Only one of 2AntHid and tdma can be turned on
+	 * We should turn off those mechanisms first
+	 * and then turn on them on.
+	*/
+	if (btdm->b2_ant_hid_en) {
+		/* turn off tdma */
+		rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on,
+						    btdm->tra_tdma_ant,
+						    btdm->tra_tdma_nav);
+		rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, false, btdm->tdma_ant,
+						btdm->tdma_nav,
+						btdm->tdma_dac_swing);
+
+		/* turn off Pstdma */
+		rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw,
+						      btdm->ignore_wlan_act);
+		/* Antenna control by PTA, 0x870 = 0x300. */
+		rtl8723ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+
+		/* turn on 2AntHid */
+		rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, true);
+		rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, true, true);
+	} else if (btdm->tdma_on) {
+		/* turn off 2AntHid */
+		rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, false);
+		rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+
+		/* turn off pstdma */
+		rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw,
+						      btdm->ignore_wlan_act);
+		/* Antenna control by PTA, 0x870 = 0x300. */
+		rtl8723ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+
+		/* turn on tdma */
+		rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on,
+				 btdm->tra_tdma_ant, btdm->tra_tdma_nav);
+		rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, true, btdm->tdma_ant,
+				 btdm->tdma_nav, btdm->tdma_dac_swing);
+	} else if (btdm->ps_tdma_on) {
+		/* turn off 2AntHid */
+		rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, false);
+		rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+
+		/* turn off tdma */
+		rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on,
+				 btdm->tra_tdma_ant, btdm->tra_tdma_nav);
+		rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, false, btdm->tdma_ant,
+				 btdm->tdma_nav, btdm->tdma_dac_swing);
+
+		/* turn on pstdma */
+		rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw,
+				 btdm->ignore_wlan_act);
+		rtl8723ae_dm_bt_set_fw_3a(hw,
+			btdm->ps_tdma_byte[0],
+			btdm->ps_tdma_byte[1],
+			btdm->ps_tdma_byte[2],
+			btdm->ps_tdma_byte[3],
+			btdm->ps_tdma_byte[4]);
+	} else {
+		/* turn off 2AntHid */
+		rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, false);
+		rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+
+		/* turn off tdma */
+		rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on,
+				 btdm->tra_tdma_ant, btdm->tra_tdma_nav);
+		rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, false, btdm->tdma_ant,
+				 btdm->tdma_nav, btdm->tdma_dac_swing);
+
+		/* turn off pstdma */
+		rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw,
+						      btdm->ignore_wlan_act);
+		/* Antenna control by PTA, 0x870 = 0x300. */
+		rtl8723ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+	}
+
+	/* Note:
+	 * We should add delay for making sure sw DacSwing can be set
+	 *  sucessfully. Because of that rtl8723ae_dm_bt_set_fw_2_ant_hid()
+	 * and rtl8723ae_dm_bt_set_fw_tdma_ctrl()
+	 * will overwrite the reg 0x880.
+	*/
+	mdelay(30);
+	rtl8723ae_dm_bt_set_sw_full_time_dac_swing(hw,
+		btdm->sw_dac_swing_on, btdm->sw_dac_swing_lvl);
+	rtl8723ae_dm_bt_set_fw_dec_bt_pwr(hw, btdm->dec_bt_pwr);
+}
+
+/*============================================================
+ * extern function start with BTDM_
+ *============================================================
+ */
+static u32 rtl8723ae_dm_bt_tx_rx_couter_h(struct ieee80211_hw *hw)
+{
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u32 counters = 0;
+
+	counters = rtlhal->hal_coex_8723.high_priority_tx +
+		   rtlhal->hal_coex_8723.high_priority_rx;
+	return counters;
+}
+
+static u32 rtl8723ae_dm_bt_tx_rx_couter_l(struct ieee80211_hw *hw)
+{
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	return rtlhal->hal_coex_8723.low_priority_tx +
+	       rtlhal->hal_coex_8723.low_priority_rx;
+}
+
+static u8 rtl8723ae_dm_bt_bt_tx_rx_counter_level(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u32 bt_tx_rx_cnt = 0;
+	u8 bt_tx_rx_cnt_lvl = 0;
+
+	bt_tx_rx_cnt = rtl8723ae_dm_bt_tx_rx_couter_h(hw) +
+		       rtl8723ae_dm_bt_tx_rx_couter_l(hw);
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+		 "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt);
+
+	rtlpcipriv->bt_coexist.cstate_h &=
+		 ~(BT_COEX_STATE_BT_CNT_LEVEL_0 | BT_COEX_STATE_BT_CNT_LEVEL_1 |
+		  BT_COEX_STATE_BT_CNT_LEVEL_2);
+
+	if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_3) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], BT TxRx Counters at level 3\n");
+		bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_3;
+		rtlpcipriv->bt_coexist.cstate_h |= BT_COEX_STATE_BT_CNT_LEVEL_3;
+	} else if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_2) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], BT TxRx Counters at level 2\n");
+		bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_2;
+		rtlpcipriv->bt_coexist.cstate_h |= BT_COEX_STATE_BT_CNT_LEVEL_2;
+	} else if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_1) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], BT TxRx Counters at level 1\n");
+		bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_1;
+		rtlpcipriv->bt_coexist.cstate_h |= BT_COEX_STATE_BT_CNT_LEVEL_1;
+	} else {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], BT TxRx Counters at level 0\n");
+		bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_0;
+		rtlpcipriv->bt_coexist.cstate_h |= BT_COEX_STATE_BT_CNT_LEVEL_0;
+	}
+	return bt_tx_rx_cnt_lvl;
+}
+
+static void rtl8723ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct btdm_8723 btdm8723;
+	u8 bt_rssi_state, bt_rssi_state1;
+	u8 bt_tx_rx_cnt_lvl;
+
+	rtl8723ae_dm_bt_btdm_structure_reload(hw, &btdm8723);
+
+	btdm8723.rf_rx_lpf_shrink = true;
+	btdm8723.low_penalty_rate_adaptive = true;
+	btdm8723.reject_aggre_pkt = false;
+
+	bt_tx_rx_cnt_lvl = rtl8723ae_dm_bt_bt_tx_rx_counter_level(hw);
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+		 "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl);
+
+	if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n");
+		/* coex table */
+		btdm8723.val_0x6c0 = 0x55555555;
+		btdm8723.val_0x6c8 = 0xffff;
+		btdm8723.val_0x6cc = 0x3;
+
+		/* sw mechanism */
+		btdm8723.agc_table_en = false;
+		btdm8723.adc_back_off_on = false;
+		btdm8723.sw_dac_swing_on = false;
+
+		/* fw mechanism */
+		btdm8723.ps_tdma_on = true;
+		if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "[BTCoex], BT TxRx Counters >= 1400\n");
+			btdm8723.ps_tdma_byte[0] = 0xa3;
+			btdm8723.ps_tdma_byte[1] = 0x5;
+			btdm8723.ps_tdma_byte[2] = 0x5;
+			btdm8723.ps_tdma_byte[3] = 0x2;
+			btdm8723.ps_tdma_byte[4] = 0x80;
+		} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+			btdm8723.ps_tdma_byte[0] = 0xa3;
+			btdm8723.ps_tdma_byte[1] = 0xa;
+			btdm8723.ps_tdma_byte[2] = 0xa;
+			btdm8723.ps_tdma_byte[3] = 0x2;
+			btdm8723.ps_tdma_byte[4] = 0x80;
+		} else {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "[BTCoex], BT TxRx Counters < 1200\n");
+			btdm8723.ps_tdma_byte[0] = 0xa3;
+			btdm8723.ps_tdma_byte[1] = 0xf;
+			btdm8723.ps_tdma_byte[2] = 0xf;
+			btdm8723.ps_tdma_byte[3] = 0x2;
+			btdm8723.ps_tdma_byte[4] = 0x80;
+		}
+	} else {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "HT20 or Legacy\n");
+		bt_rssi_state = rtl8723ae_dm_bt_check_coex_rssi_state(hw, 2,
+								     47, 0);
+		bt_rssi_state1 = rtl8723ae_dm_bt_check_coex_rssi_state1(hw, 2,
+								       27, 0);
+
+		/* coex table */
+		btdm8723.val_0x6c0 = 0x55555555;
+		btdm8723.val_0x6c8 = 0xffff;
+		btdm8723.val_0x6cc = 0x3;
+
+		/* sw mechanism */
+		if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+		    (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "Wifi rssi high\n");
+			btdm8723.agc_table_en = true;
+			btdm8723.adc_back_off_on = true;
+			btdm8723.sw_dac_swing_on = false;
+		} else {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "Wifi rssi low\n");
+			btdm8723.agc_table_en = false;
+			btdm8723.adc_back_off_on = false;
+			btdm8723.sw_dac_swing_on = false;
+		}
+
+		/* fw mechanism */
+		btdm8723.ps_tdma_on = true;
+		if ((bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
+		    (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH)) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "Wifi rssi-1 high\n");
+			/* only rssi high we need to do this,
+			 * when rssi low, the value will modified by fw
+			 */
+			rtl_write_byte(rtlpriv, 0x883, 0x40);
+			if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters >= 1400\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0x5;
+				btdm8723.ps_tdma_byte[2] = 0x5;
+				btdm8723.ps_tdma_byte[3] = 0x83;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0xa;
+				btdm8723.ps_tdma_byte[2] = 0xa;
+				btdm8723.ps_tdma_byte[3] = 0x83;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			} else {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters < 1200\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0xf;
+				btdm8723.ps_tdma_byte[2] = 0xf;
+				btdm8723.ps_tdma_byte[3] = 0x83;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			}
+		} else {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "Wifi rssi-1 low\n");
+			if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters >= 1400\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0x5;
+				btdm8723.ps_tdma_byte[2] = 0x5;
+				btdm8723.ps_tdma_byte[3] = 0x2;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0xa;
+				btdm8723.ps_tdma_byte[2] = 0xa;
+				btdm8723.ps_tdma_byte[3] = 0x2;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			} else {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters < 1200\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0xf;
+				btdm8723.ps_tdma_byte[2] = 0xf;
+				btdm8723.ps_tdma_byte[3] = 0x2;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			}
+		}
+	}
+
+	if (rtl8723ae_dm_bt_need_to_dec_bt_pwr(hw))
+		btdm8723.dec_bt_pwr = true;
+
+	/* Always ignore WlanAct if bHid|bSCOBusy|bSCOeSCO */
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+		 "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
+		 rtlhal->hal_coex_8723.bt_inq_page_start_time,
+		 bt_tx_rx_cnt_lvl);
+	if ((rtlhal->hal_coex_8723.bt_inq_page_start_time) ||
+	    (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl)) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], Set BT inquiry / page scan 0x3a setting\n");
+		btdm8723.ps_tdma_on = true;
+		btdm8723.ps_tdma_byte[0] = 0xa3;
+		btdm8723.ps_tdma_byte[1] = 0x5;
+		btdm8723.ps_tdma_byte[2] = 0x5;
+		btdm8723.ps_tdma_byte[3] = 0x2;
+		btdm8723.ps_tdma_byte[4] = 0x80;
+	}
+
+	if (rtl8723ae_dm_bt_is_coexist_state_changed(hw))
+		rtl8723ae_dm_bt_set_bt_dm(hw, &btdm8723);
+}
+
+static void rtl8723ae_dm_bt_2_ant_fta2dp(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct btdm_8723 btdm8723;
+	u8 bt_rssi_state, bt_rssi_state1;
+	u32 bt_tx_rx_cnt_lvl;
+
+	rtl8723ae_dm_bt_btdm_structure_reload(hw, &btdm8723);
+	btdm8723.rf_rx_lpf_shrink = true;
+	btdm8723.low_penalty_rate_adaptive = true;
+	btdm8723.reject_aggre_pkt = false;
+
+	bt_tx_rx_cnt_lvl = rtl8723ae_dm_bt_bt_tx_rx_counter_level(hw);
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+		 "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl);
+
+	if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n");
+		bt_rssi_state = rtl8723ae_dm_bt_check_coex_rssi_state(hw, 2,
+								     37, 0);
+
+		/* coex table */
+		btdm8723.val_0x6c0 = 0x55555555;
+		btdm8723.val_0x6c8 = 0xffff;
+		btdm8723.val_0x6cc = 0x3;
+
+		/* sw mechanism */
+		btdm8723.agc_table_en = false;
+		btdm8723.adc_back_off_on = true;
+		btdm8723.sw_dac_swing_on = false;
+
+		/* fw mechanism */
+		btdm8723.ps_tdma_on = true;
+		if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+		    (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "Wifi rssi high\n");
+			if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters >= 1400\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0x5;
+				btdm8723.ps_tdma_byte[2] = 0x5;
+				btdm8723.ps_tdma_byte[3] = 0x81;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0xa;
+				btdm8723.ps_tdma_byte[2] = 0xa;
+				btdm8723.ps_tdma_byte[3] = 0x81;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			} else {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters < 1200\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0xf;
+				btdm8723.ps_tdma_byte[2] = 0xf;
+				btdm8723.ps_tdma_byte[3] = 0x81;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			}
+		} else {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "Wifi rssi low\n");
+			if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters >= 1400\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0x5;
+				btdm8723.ps_tdma_byte[2] = 0x5;
+				btdm8723.ps_tdma_byte[3] = 0x0;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0xa;
+				btdm8723.ps_tdma_byte[2] = 0xa;
+				btdm8723.ps_tdma_byte[3] = 0x0;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			} else {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters < 1200\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0xf;
+				btdm8723.ps_tdma_byte[2] = 0xf;
+				btdm8723.ps_tdma_byte[3] = 0x0;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			}
+		}
+	} else {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "HT20 or Legacy\n");
+		bt_rssi_state = rtl8723ae_dm_bt_check_coex_rssi_state(hw, 2,
+								     47, 0);
+		bt_rssi_state1 = rtl8723ae_dm_bt_check_coex_rssi_state1(hw, 2,
+								       27, 0);
+
+		/* coex table */
+		btdm8723.val_0x6c0 = 0x55555555;
+		btdm8723.val_0x6c8 = 0xffff;
+		btdm8723.val_0x6cc = 0x3;
+
+		/* sw mechanism */
+		if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+		    (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "Wifi rssi high\n");
+			btdm8723.agc_table_en = true;
+			btdm8723.adc_back_off_on = true;
+			btdm8723.sw_dac_swing_on = false;
+		} else {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "Wifi rssi low\n");
+			btdm8723.agc_table_en = false;
+			btdm8723.adc_back_off_on = false;
+			btdm8723.sw_dac_swing_on = false;
+		}
+
+		/* fw mechanism */
+		btdm8723.ps_tdma_on = true;
+		if ((bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
+		    (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH)) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "Wifi rssi-1 high\n");
+			/* only rssi high we need to do this,
+			 * when rssi low, the value will modified by fw
+			 */
+			rtl_write_byte(rtlpriv, 0x883, 0x40);
+			if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters >= 1400\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0x5;
+				btdm8723.ps_tdma_byte[2] = 0x5;
+				btdm8723.ps_tdma_byte[3] = 0x81;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0xa;
+				btdm8723.ps_tdma_byte[2] = 0xa;
+				btdm8723.ps_tdma_byte[3] = 0x81;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			} else {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters < 1200\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0xf;
+				btdm8723.ps_tdma_byte[2] = 0xf;
+				btdm8723.ps_tdma_byte[3] = 0x81;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			}
+		} else {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "Wifi rssi-1 low\n");
+			if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters >= 1400\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0x5;
+				btdm8723.ps_tdma_byte[2] = 0x5;
+				btdm8723.ps_tdma_byte[3] = 0x0;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0xa;
+				btdm8723.ps_tdma_byte[2] = 0xa;
+				btdm8723.ps_tdma_byte[3] = 0x0;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			} else {
+				RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+					 "[BTCoex], BT TxRx Counters < 1200\n");
+				btdm8723.ps_tdma_byte[0] = 0xa3;
+				btdm8723.ps_tdma_byte[1] = 0xf;
+				btdm8723.ps_tdma_byte[2] = 0xf;
+				btdm8723.ps_tdma_byte[3] = 0x0;
+				btdm8723.ps_tdma_byte[4] = 0x80;
+			}
+		}
+	}
+
+	if (rtl8723ae_dm_bt_need_to_dec_bt_pwr(hw))
+		btdm8723.dec_bt_pwr = true;
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+		 "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
+		 rtlhal->hal_coex_8723.bt_inq_page_start_time,
+		 bt_tx_rx_cnt_lvl);
+
+	if ((rtlhal->hal_coex_8723.bt_inq_page_start_time) ||
+	    (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl)) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], Set BT inquiry / page scan 0x3a setting\n");
+		btdm8723.ps_tdma_on = true;
+		btdm8723.ps_tdma_byte[0] = 0xa3;
+		btdm8723.ps_tdma_byte[1] = 0x5;
+		btdm8723.ps_tdma_byte[2] = 0x5;
+		btdm8723.ps_tdma_byte[3] = 0x83;
+		btdm8723.ps_tdma_byte[4] = 0x80;
+	}
+
+	if (rtl8723ae_dm_bt_is_coexist_state_changed(hw))
+		rtl8723ae_dm_bt_set_bt_dm(hw, &btdm8723);
+}
+
+static void rtl8723ae_dm_bt_inq_page_monitor(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	u32 cur_time = jiffies;
+
+	if (rtlhal->hal_coex_8723.c2h_bt_inquiry_page) {
+		/* bt inquiry or page is started. */
+		if (rtlhal->hal_coex_8723.bt_inq_page_start_time == 0) {
+			rtlpcipriv->bt_coexist.cstate |=
+					 BT_COEX_STATE_BT_INQ_PAGE;
+			rtlhal->hal_coex_8723.bt_inq_page_start_time = cur_time;
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "[BTCoex], BT Inquiry/page is started at time : 0x%x\n",
+				 rtlhal->hal_coex_8723.bt_inq_page_start_time);
+		}
+	}
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+		 "[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x\n",
+		 rtlhal->hal_coex_8723.bt_inq_page_start_time, cur_time);
+
+	if (rtlhal->hal_coex_8723.bt_inq_page_start_time) {
+		if ((((long)cur_time -
+		    (long)rtlhal->hal_coex_8723.bt_inq_page_start_time) / HZ) >=
+		    10) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "[BTCoex], BT Inquiry/page >= 10sec!!!");
+			rtlhal->hal_coex_8723.bt_inq_page_start_time = 0;
+			rtlpcipriv->bt_coexist.cstate &=
+						 ~BT_COEX_STATE_BT_INQ_PAGE;
+		}
+	}
+}
+
+static void rtl8723ae_dm_bt_reset_action_profile_state(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	rtlpcipriv->bt_coexist.cstate &=
+		~(BT_COEX_STATE_PROFILE_HID | BT_COEX_STATE_PROFILE_A2DP |
+		BT_COEX_STATE_PROFILE_PAN | BT_COEX_STATE_PROFILE_SCO);
+
+	rtlpcipriv->bt_coexist.cstate &=
+		~(BT_COEX_STATE_BTINFO_COMMON |
+		BT_COEX_STATE_BTINFO_B_HID_SCOESCO |
+		BT_COEX_STATE_BTINFO_B_FTP_A2DP);
+}
+
+static void _rtl8723ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u8 bt_retry_cnt;
+	u8 bt_info_original;
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+		 "[BTCoex] Get bt info by fw!!\n");
+
+	_rtl8723_dm_bt_check_wifi_state(hw);
+
+	if (rtlhal->hal_coex_8723.c2h_bt_info_req_sent) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "[BTCoex] c2h for btInfo not rcvd yet!!\n");
+	}
+
+	bt_retry_cnt = rtlhal->hal_coex_8723.bt_retry_cnt;
+	bt_info_original = rtlhal->hal_coex_8723.c2h_bt_info_original;
+
+	/* when bt inquiry or page scan, we have to set h2c 0x25
+	 * ignore wlanact for continuous 4x2secs
+	 */
+	rtl8723ae_dm_bt_inq_page_monitor(hw);
+	rtl8723ae_dm_bt_reset_action_profile_state(hw);
+
+	if (rtl8723ae_dm_bt_is_2_ant_common_action(hw)) {
+		rtlpcipriv->bt_coexist.bt_profile_case = BT_COEX_MECH_COMMON;
+		rtlpcipriv->bt_coexist.bt_profile_action = BT_COEX_MECH_COMMON;
+
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "Action 2-Ant common.\n");
+	} else {
+		if ((bt_info_original & BTINFO_B_HID) ||
+		    (bt_info_original & BTINFO_B_SCO_BUSY) ||
+		    (bt_info_original & BTINFO_B_SCO_ESCO)) {
+			rtlpcipriv->bt_coexist.cstate |=
+					BT_COEX_STATE_BTINFO_B_HID_SCOESCO;
+			rtlpcipriv->bt_coexist.bt_profile_case =
+					BT_COEX_MECH_HID_SCO_ESCO;
+			rtlpcipriv->bt_coexist.bt_profile_action =
+					BT_COEX_MECH_HID_SCO_ESCO;
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "[BTCoex], BTInfo: bHid|bSCOBusy|bSCOeSCO\n");
+			rtl8723ae_dm_bt_2_ant_hid_sco_esco(hw);
+		} else if ((bt_info_original & BTINFO_B_FTP) ||
+			   (bt_info_original & BTINFO_B_A2DP)) {
+			rtlpcipriv->bt_coexist.cstate |=
+					BT_COEX_STATE_BTINFO_B_FTP_A2DP;
+			rtlpcipriv->bt_coexist.bt_profile_case =
+					BT_COEX_MECH_FTP_A2DP;
+			rtlpcipriv->bt_coexist.bt_profile_action =
+					BT_COEX_MECH_FTP_A2DP;
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "BTInfo: bFTP|bA2DP\n");
+			rtl8723ae_dm_bt_2_ant_fta2dp(hw);
+		} else {
+			rtlpcipriv->bt_coexist.cstate |=
+					 BT_COEX_STATE_BTINFO_B_HID_SCOESCO;
+			rtlpcipriv->bt_coexist.bt_profile_case =
+					 BT_COEX_MECH_NONE;
+			rtlpcipriv->bt_coexist.bt_profile_action =
+					 BT_COEX_MECH_NONE;
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+				 "[BTCoex], BTInfo: undefined case!!!!\n");
+			rtl8723ae_dm_bt_2_ant_hid_sco_esco(hw);
+		}
+	}
+}
+
+static void _rtl8723ae_dm_bt_coexist_1_ant(struct ieee80211_hw *hw)
+{
+}
+
+void rtl8723ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw)
+{
+	rtl8723ae_dm_bt_set_coex_table(hw, 0x5a5aaaaa, 0xcc, 0x3);
+	rtl8723ae_dm_bt_set_hw_pta_mode(hw, true);
+}
+
+void rtl8723ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw)
+{
+	rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw, false);
+	rtl8723ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+	rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+	rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, false,
+					     TDMA_2ANT, TDMA_NAV_OFF);
+	rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, false, TDMA_2ANT,
+				TDMA_NAV_OFF, TDMA_DAC_SWING_OFF);
+	rtl8723ae_dm_bt_set_fw_dac_swing_level(hw, 0);
+	rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, false);
+	rtl8723ae_dm_bt_set_fw_bt_retry_index(hw, 2);
+	rtl8723ae_dm_bt_set_fw_wlan_act(hw, 0x10, 0x10);
+	rtl8723ae_dm_bt_set_fw_dec_bt_pwr(hw, false);
+}
+
+void rtl8723ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw)
+{
+	rtl8723ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF);
+	rtl8723ae_dm_bt_bback_off_level(hw, BT_BB_BACKOFF_OFF);
+	rtl8723ae_dm_bt_reject_ap_aggregated_packet(hw, false);
+
+	rtl8723ae_bt_set_penalty_tx_rate_adap(hw, BT_TX_RATE_ADAPTIVE_NORMAL);
+	rtl8723ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_RESUME);
+	rtl8723ae_dm_bt_set_sw_full_time_dac_swing(hw, false, 0xc0);
+}
+
+static void rtl8723ae_dm_bt_query_bt_information(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	u8 h2c_parameter[1] = {0};
+
+	rtlhal->hal_coex_8723.c2h_bt_info_req_sent = true;
+
+	h2c_parameter[0] |=  BIT(0);
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "Query Bt information, write 0x38 = 0x%x\n",
+		 h2c_parameter[0]);
+
+	rtl8723ae_fill_h2c_cmd(hw, 0x38, 1, h2c_parameter);
+}
+
+static void rtl8723ae_dm_bt_bt_hw_counters_monitor(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u32 reg_htx_rx, reg_ltx_rx, u32_tmp;
+	u32 reg_htx, reg_hrx, reg_ltx, reg_lrx;
+
+	reg_htx_rx = REG_HIGH_PRIORITY_TXRX;
+	reg_ltx_rx = REG_LOW_PRIORITY_TXRX;
+
+	u32_tmp = rtl_read_dword(rtlpriv, reg_htx_rx);
+	reg_htx = u32_tmp & MASKLWORD;
+	reg_hrx = (u32_tmp & MASKHWORD)>>16;
+
+	u32_tmp = rtl_read_dword(rtlpriv, reg_ltx_rx);
+	reg_ltx = u32_tmp & MASKLWORD;
+	reg_lrx = (u32_tmp & MASKHWORD)>>16;
+
+	if (rtlpcipriv->bt_coexist.lps_counter > 1) {
+		reg_htx %= rtlpcipriv->bt_coexist.lps_counter;
+		reg_hrx %= rtlpcipriv->bt_coexist.lps_counter;
+		reg_ltx %= rtlpcipriv->bt_coexist.lps_counter;
+		reg_lrx %= rtlpcipriv->bt_coexist.lps_counter;
+	}
+
+	rtlhal->hal_coex_8723.high_priority_tx = reg_htx;
+	rtlhal->hal_coex_8723.high_priority_rx = reg_hrx;
+	rtlhal->hal_coex_8723.low_priority_tx = reg_ltx;
+	rtlhal->hal_coex_8723.low_priority_rx = reg_lrx;
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+		 "High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
+		 reg_htx_rx, reg_htx, reg_htx, reg_hrx, reg_hrx);
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+		 "Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
+		 reg_ltx_rx, reg_ltx, reg_ltx, reg_lrx, reg_lrx);
+	rtlpcipriv->bt_coexist.lps_counter = 0;
+}
+
+static void rtl8723ae_dm_bt_bt_enable_disable_check(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	bool bt_alife = true;
+
+	if (rtlhal->hal_coex_8723.high_priority_tx == 0 &&
+	    rtlhal->hal_coex_8723.high_priority_rx == 0 &&
+	    rtlhal->hal_coex_8723.low_priority_tx == 0 &&
+	    rtlhal->hal_coex_8723.low_priority_rx == 0)
+		bt_alife = false;
+	if (rtlhal->hal_coex_8723.high_priority_tx == 0xeaea &&
+	    rtlhal->hal_coex_8723.high_priority_rx == 0xeaea &&
+	    rtlhal->hal_coex_8723.low_priority_tx == 0xeaea &&
+	    rtlhal->hal_coex_8723.low_priority_rx == 0xeaea)
+		bt_alife = false;
+	if (rtlhal->hal_coex_8723.high_priority_tx == 0xffff &&
+	    rtlhal->hal_coex_8723.high_priority_rx == 0xffff &&
+	    rtlhal->hal_coex_8723.low_priority_tx == 0xffff &&
+	    rtlhal->hal_coex_8723.low_priority_rx == 0xffff)
+		bt_alife = false;
+	if (bt_alife) {
+		rtlpcipriv->bt_coexist.bt_active_zero_cnt = 0;
+		rtlpcipriv->bt_coexist.cur_bt_disabled = false;
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "8723A BT is enabled !!\n");
+	} else {
+		rtlpcipriv->bt_coexist.bt_active_zero_cnt++;
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "8723A bt all counters = 0, %d times!!\n",
+			 rtlpcipriv->bt_coexist.bt_active_zero_cnt);
+		if (rtlpcipriv->bt_coexist.bt_active_zero_cnt >= 2) {
+			rtlpcipriv->bt_coexist.cur_bt_disabled = true;
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "8723A BT is disabled !!\n");
+		}
+	}
+	if (rtlpcipriv->bt_coexist.pre_bt_disabled !=
+		rtlpcipriv->bt_coexist.cur_bt_disabled) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "8723A BT is from %s to %s!!\n",
+			 (rtlpcipriv->bt_coexist.pre_bt_disabled ?
+			 "disabled" : "enabled"),
+			 (rtlpcipriv->bt_coexist.cur_bt_disabled ?
+			 "disabled" : "enabled"));
+		rtlpcipriv->bt_coexist.pre_bt_disabled
+			= rtlpcipriv->bt_coexist.cur_bt_disabled;
+	}
+}
+
+
+void rtl8723ae_dm_bt_coexist_8723(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	rtl8723ae_dm_bt_query_bt_information(hw);
+	rtl8723ae_dm_bt_bt_hw_counters_monitor(hw);
+	rtl8723ae_dm_bt_bt_enable_disable_check(hw);
+
+	if (rtlpcipriv->bt_coexist.bt_ant_num == ANT_X2) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], 2 Ant mechanism\n");
+		_rtl8723ae_dm_bt_coexist_2_ant(hw);
+	} else {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "[BTCoex], 1 Ant mechanism\n");
+		_rtl8723ae_dm_bt_coexist_1_ant(hw);
+	}
+
+	if (!rtl8723ae_dm_bt_is_same_coexist_state(hw)) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTCoex], Coexist State[bitMap] change from 0x%x%8x to 0x%x%8x\n",
+			 rtlpcipriv->bt_coexist.previous_state_h,
+			 rtlpcipriv->bt_coexist.previous_state,
+			 rtlpcipriv->bt_coexist.cstate_h,
+			 rtlpcipriv->bt_coexist.cstate);
+		rtlpcipriv->bt_coexist.previous_state
+			= rtlpcipriv->bt_coexist.cstate;
+		rtlpcipriv->bt_coexist.previous_state_h
+			= rtlpcipriv->bt_coexist.cstate_h;
+	}
+}
+
+static void rtl8723ae_dm_bt_parse_bt_info(struct ieee80211_hw *hw,
+					  u8 *tmbuf, u8 len)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u8 bt_info;
+	u8 i;
+
+	rtlhal->hal_coex_8723.c2h_bt_info_req_sent = false;
+	rtlhal->hal_coex_8723.bt_retry_cnt = 0;
+	for (i = 0; i < len; i++) {
+		if (i == 0)
+			rtlhal->hal_coex_8723.c2h_bt_info_original = tmbuf[i];
+		else if (i == 1)
+			rtlhal->hal_coex_8723.bt_retry_cnt = tmbuf[i];
+		if (i == len-1) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "0x%2x]", tmbuf[i]);
+		} else {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "0x%2x, ", tmbuf[i]);
+		}
+	}
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+		 "BT info bt_info (Data)= 0x%x\n",
+		 rtlhal->hal_coex_8723.c2h_bt_info_original);
+	bt_info = rtlhal->hal_coex_8723.c2h_bt_info_original;
+
+	if (bt_info & BIT(2))
+		rtlhal->hal_coex_8723.c2h_bt_inquiry_page = true;
+	else
+		rtlhal->hal_coex_8723.c2h_bt_inquiry_page = false;
+
+	if (bt_info & BTINFO_B_CONNECTION) {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTC2H], BTInfo: bConnect=true\n");
+		rtlpcipriv->bt_coexist.bt_busy = true;
+		rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_BT_IDLE;
+	} else {
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+			 "[BTC2H], BTInfo: bConnect=false\n");
+		rtlpcipriv->bt_coexist.bt_busy = false;
+		rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_BT_IDLE;
+	}
+}
+void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct c2h_evt_hdr c2h_event;
+	u8 *ptmbuf;
+	u8 index;
+	u8 u1tmp;
+
+	memset(&c2h_event, 0, sizeof(c2h_event));
+	u1tmp = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL);
+	RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+		 "&&&&&&: REG_C2HEVT_MSG_NORMAL is 0x%x\n", u1tmp);
+	c2h_event.cmd_id = u1tmp & 0xF;
+	c2h_event.cmd_len = (u1tmp & 0xF0) >> 4;
+	c2h_event.cmd_seq = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 1);
+	RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+		 "cmd_id: %d, cmd_len: %d, cmd_seq: %d\n",
+		 c2h_event.cmd_id , c2h_event.cmd_len, c2h_event.cmd_seq);
+	u1tmp = rtl_read_byte(rtlpriv, 0x01AF);
+	if (u1tmp == C2H_EVT_HOST_CLOSE) {
+		return;
+	} else if (u1tmp != C2H_EVT_FW_CLOSE) {
+		rtl_write_byte(rtlpriv, 0x1AF, 0x00);
+		return;
+	}
+	ptmbuf = kmalloc(c2h_event.cmd_len, GFP_KERNEL);
+	if (ptmbuf == NULL) {
+		RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+			 "malloc cmd buf failed\n");
+		return;
+	}
+
+	/* Read the content */
+	for (index = 0; index < c2h_event.cmd_len; index++)
+		ptmbuf[index] = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL +
+				  2 + index);
+
+	switch (c2h_event.cmd_id) {
+	case C2H_BT_RSSI:
+		break;
+
+	case C2H_BT_OP_MODE:
+			break;
+
+	case BT_INFO:
+		RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+			 "BT info Byte[0] (ID) is 0x%x\n", c2h_event.cmd_id);
+		RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+			 "BT info Byte[1] (Seq) is 0x%x\n", c2h_event.cmd_seq);
+		RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+			 "BT info Byte[2] (Data)= 0x%x\n", ptmbuf[0]);
+
+		rtl8723ae_dm_bt_parse_bt_info(hw, ptmbuf, c2h_event.cmd_len);
+		break;
+	default:
+		break;
+	}
+	kfree(ptmbuf);
+
+	rtl_write_byte(rtlpriv, 0x01AF, C2H_EVT_HOST_CLOSE);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h
new file mode 100644
index 0000000..4325ecd
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h
@@ -0,0 +1,151 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ****************************************************************************
+ */
+
+#ifndef __RTL8723E_HAL_BTC_H__
+#define __RTL8723E_HAL_BTC_H__
+
+#include "../wifi.h"
+#include "btc.h"
+#include "hal_bt_coexist.h"
+
+#define	BT_TXRX_CNT_THRES_1		1200
+#define	BT_TXRX_CNT_THRES_2		1400
+#define	BT_TXRX_CNT_THRES_3		3000
+#define	BT_TXRX_CNT_LEVEL_0		0	/* < 1200 */
+#define	BT_TXRX_CNT_LEVEL_1		1	/* >= 1200 && < 1400 */
+#define	BT_TXRX_CNT_LEVEL_2		2	/* >= 1400 */
+#define	BT_TXRX_CNT_LEVEL_3		3
+
+/* TDMA mode definition */
+#define	TDMA_2ANT		0
+#define	TDMA_1ANT		1
+#define	TDMA_NAV_OFF		0
+#define	TDMA_NAV_ON		1
+#define	TDMA_DAC_SWING_OFF	0
+#define	TDMA_DAC_SWING_ON	1
+
+/* PTA mode related definition */
+#define	BT_PTA_MODE_OFF		0
+#define	BT_PTA_MODE_ON		1
+
+/* Penalty Tx Rate Adaptive */
+#define	BT_TX_RATE_ADAPTIVE_NORMAL	0
+#define	BT_TX_RATE_ADAPTIVE_LOW_PENALTY	1
+
+/* RF Corner */
+#define	BT_RF_RX_LPF_CORNER_RESUME	0
+#define	BT_RF_RX_LPF_CORNER_SHRINK	1
+
+#define C2H_EVT_HOST_CLOSE		0x00
+#define C2H_EVT_FW_CLOSE		0xFF
+
+enum bt_traffic_mode {
+	BT_MOTOR_EXT_BE = 0x00,
+	BT_MOTOR_EXT_GUL = 0x01,
+	BT_MOTOR_EXT_GUB = 0x02,
+	BT_MOTOR_EXT_GULB = 0x03
+};
+
+enum bt_traffic_mode_profile {
+	BT_PROFILE_NONE,
+	BT_PROFILE_A2DP,
+	BT_PROFILE_PAN,
+	BT_PROFILE_HID,
+	BT_PROFILE_SCO
+};
+
+enum hci_ext_bt_operation {
+	HCI_BT_OP_NONE = 0x0,
+	HCI_BT_OP_INQUIRE_START	= 0x1,
+	HCI_BT_OP_INQUIRE_FINISH = 0x2,
+	HCI_BT_OP_PAGING_START = 0x3,
+	HCI_BT_OP_PAGING_SUCCESS = 0x4,
+	HCI_BT_OP_PAGING_UNSUCCESS = 0x5,
+	HCI_BT_OP_PAIRING_START = 0x6,
+	HCI_BT_OP_PAIRING_FINISH = 0x7,
+	HCI_BT_OP_BT_DEV_ENABLE = 0x8,
+	HCI_BT_OP_BT_DEV_DISABLE = 0x9,
+	HCI_BT_OP_MAX,
+};
+
+enum bt_spec {
+	BT_SPEC_1_0_b = 0x00,
+	BT_SPEC_1_1 = 0x01,
+	BT_SPEC_1_2 = 0x02,
+	BT_SPEC_2_0_EDR = 0x03,
+	BT_SPEC_2_1_EDR = 0x04,
+	BT_SPEC_3_0_HS = 0x05,
+	BT_SPEC_4_0 = 0x06
+};
+
+struct c2h_evt_hdr {
+	u8 cmd_id;
+	u8 cmd_len;
+	u8 cmd_seq;
+};
+
+enum bt_state {
+	BT_INFO_STATE_DISABLED = 0,
+	BT_INFO_STATE_NO_CONNECTION = 1,
+	BT_INFO_STATE_CONNECT_IDLE = 2,
+	BT_INFO_STATE_INQ_OR_PAG = 3,
+	BT_INFO_STATE_ACL_ONLY_BUSY = 4,
+	BT_INFO_STATE_SCO_ONLY_BUSY = 5,
+	BT_INFO_STATE_ACL_SCO_BUSY = 6,
+	BT_INFO_STATE_HID_BUSY = 7,
+	BT_INFO_STATE_HID_SCO_BUSY = 8,
+	BT_INFO_STATE_MAX = 7
+};
+
+enum rtl8723ae_c2h_evt {
+	C2H_DBG = 0,
+	C2H_TSF = 1,
+	C2H_AP_RPT_RSP = 2,
+	C2H_CCX_TX_RPT = 3,	/* The FW notify the report of the specific */
+				/* tx packet. */
+	C2H_BT_RSSI = 4,
+	C2H_BT_OP_MODE = 5,
+	C2H_HW_INFO_EXCH = 10,
+	C2H_C2H_H2C_TEST = 11,
+	BT_INFO = 12,
+	MAX_C2HEVENT
+};
+
+void rtl8723ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw);
+void rtl8723ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw);
+void rtl8723ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw);
+void rtl8723ae_dm_bt_coexist_8723(struct ieee80211_hw *hw);
+void rtl8723ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw,
+			      struct btdm_8723 *p_btdm);
+void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw);
+void rtl_8723e_bt_wifi_media_status_notify(struct ieee80211_hw *hw,
+					   bool mstatus);
+void rtl8723ae_bt_coex_off_before_lps(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
new file mode 100644
index 0000000..0a8c038
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -0,0 +1,2380 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "fw.h"
+#include "led.h"
+#include "hw.h"
+#include "pwrseqcmd.h"
+#include "pwrseq.h"
+#include "btc.h"
+
+static void _rtl8723ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+					u8 set_bits, u8 clear_bits)
+{
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpci->reg_bcn_ctrl_val |= set_bits;
+	rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
+
+	rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
+}
+
+static void _rtl8723ae_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmp1byte;
+
+	tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+	rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
+	rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+	tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+	tmp1byte &= ~(BIT(0));
+	rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl8723ae_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmp1byte;
+
+	tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+	rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
+	rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+	tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+	tmp1byte |= BIT(1);
+	rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl8723ae_enable_bcn_sufunc(struct ieee80211_hw *hw)
+{
+	_rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(1));
+}
+
+static void _rtl8723ae_disable_bcn_sufunc(struct ieee80211_hw *hw)
+{
+	_rtl8723ae_set_bcn_ctrl_reg(hw, BIT(1), 0);
+}
+
+void rtl8723ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	switch (variable) {
+	case HW_VAR_RCR:
+		*((u32 *) (val)) = rtlpci->receive_config;
+		break;
+	case HW_VAR_RF_STATE:
+		*((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+		break;
+	case HW_VAR_FWLPS_RF_ON:{
+		enum rf_pwrstate rfState;
+		u32 val_rcr;
+
+		rtlpriv->cfg->ops->get_hw_reg(hw,
+					      HW_VAR_RF_STATE,
+					      (u8 *) (&rfState));
+		if (rfState == ERFOFF) {
+			*((bool *) (val)) = true;
+		} else {
+			val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+			val_rcr &= 0x00070000;
+			if (val_rcr)
+				*((bool *) (val)) = false;
+			else
+				*((bool *) (val)) = true;
+		}
+		break; }
+	case HW_VAR_FW_PSMODE_STATUS:
+		*((bool *) (val)) = ppsc->fw_current_inpsmode;
+		break;
+	case HW_VAR_CORRECT_TSF:{
+		u64 tsf;
+		u32 *ptsf_low = (u32 *)&tsf;
+		u32 *ptsf_high = ((u32 *)&tsf) + 1;
+
+		*ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+		*ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+		*((u64 *) (val)) = tsf;
+
+		break; }
+	default:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "switch case not process\n");
+		break;
+	}
+}
+
+void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	u8 idx;
+
+	switch (variable) {
+	case HW_VAR_ETHER_ADDR:
+		for (idx = 0; idx < ETH_ALEN; idx++) {
+			rtl_write_byte(rtlpriv, (REG_MACID + idx),
+				       val[idx]);
+		}
+		break;
+	case HW_VAR_BASIC_RATE:{
+		u16 rate_cfg = ((u16 *) val)[0];
+		u8 rate_index = 0;
+		rate_cfg = rate_cfg & 0x15f;
+		rate_cfg |= 0x01;
+		rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
+		rtl_write_byte(rtlpriv, REG_RRSR + 1,
+			       (rate_cfg >> 8) & 0xff);
+		while (rate_cfg > 0x1) {
+			rate_cfg = (rate_cfg >> 1);
+			rate_index++;
+		}
+		rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
+			       rate_index);
+		break; }
+	case HW_VAR_BSSID:
+		for (idx = 0; idx < ETH_ALEN; idx++) {
+			rtl_write_byte(rtlpriv, (REG_BSSID + idx),
+				       val[idx]);
+		}
+		break;
+	case HW_VAR_SIFS:
+		rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
+		rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
+
+		rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+		rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+
+		if (!mac->ht_enable)
+			rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+				       0x0e0e);
+		else
+			rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+				       *((u16 *) val));
+		break;
+	case HW_VAR_SLOT_TIME:{
+		u8 e_aci;
+
+		RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+			 "HW_VAR_SLOT_TIME %x\n", val[0]);
+
+		rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+
+		for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_AC_PARAM,
+						      (u8 *) (&e_aci));
+		}
+		break; }
+	case HW_VAR_ACK_PREAMBLE:{
+		u8 reg_tmp;
+		u8 short_preamble = (bool) (*(u8 *) val);
+		reg_tmp = (mac->cur_40_prime_sc) << 5;
+		if (short_preamble)
+			reg_tmp |= 0x80;
+
+		rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
+		break; }
+	case HW_VAR_AMPDU_MIN_SPACE:{
+		u8 min_spacing_to_set;
+		u8 sec_min_space;
+
+		min_spacing_to_set = *((u8 *) val);
+		if (min_spacing_to_set <= 7) {
+			sec_min_space = 0;
+
+			if (min_spacing_to_set < sec_min_space)
+				min_spacing_to_set = sec_min_space;
+
+			mac->min_space_cfg = ((mac->min_space_cfg &
+					       0xf8) |
+					      min_spacing_to_set);
+
+			*val = min_spacing_to_set;
+
+			RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+				 "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+				  mac->min_space_cfg);
+
+			rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+				       mac->min_space_cfg);
+		}
+		break; }
+	case HW_VAR_SHORTGI_DENSITY:{
+		u8 density_to_set;
+
+		density_to_set = *((u8 *) val);
+		mac->min_space_cfg |= (density_to_set << 3);
+
+		RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+			 "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+			 mac->min_space_cfg);
+
+		rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+			       mac->min_space_cfg);
+
+		break; }
+	case HW_VAR_AMPDU_FACTOR:{
+		u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
+		u8 regtoset_bt[4] = {0x31, 0x74, 0x42, 0x97};
+		u8 factor_toset;
+		u8 *p_regtoset = NULL;
+		u8 index;
+
+		if ((pcipriv->bt_coexist.bt_coexistence) &&
+		    (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
+			p_regtoset = regtoset_bt;
+		else
+			p_regtoset = regtoset_normal;
+
+		factor_toset = *((u8 *) val);
+		if (factor_toset <= 3) {
+			factor_toset = (1 << (factor_toset + 2));
+			if (factor_toset > 0xf)
+				factor_toset = 0xf;
+
+			for (index = 0; index < 4; index++) {
+				if ((p_regtoset[index] & 0xf0) >
+				    (factor_toset << 4))
+					p_regtoset[index] =
+					    (p_regtoset[index] & 0x0f) |
+					    (factor_toset << 4);
+
+				if ((p_regtoset[index] & 0x0f) >
+				    factor_toset)
+					p_regtoset[index] =
+					    (p_regtoset[index] & 0xf0) |
+					    (factor_toset);
+
+				rtl_write_byte(rtlpriv,
+					       (REG_AGGLEN_LMT + index),
+					       p_regtoset[index]);
+
+			}
+
+			RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+				 "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+				 factor_toset);
+		}
+		break; }
+	case HW_VAR_AC_PARAM:{
+		u8 e_aci = *((u8 *) val);
+		rtl8723ae_dm_init_edca_turbo(hw);
+
+		if (rtlpci->acm_method != eAcmWay2_SW)
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_ACM_CTRL,
+						      (u8 *) (&e_aci));
+		break; }
+	case HW_VAR_ACM_CTRL:{
+		u8 e_aci = *((u8 *) val);
+		union aci_aifsn *p_aci_aifsn =
+		    (union aci_aifsn *)(&(mac->ac[0].aifs));
+		u8 acm = p_aci_aifsn->f.acm;
+		u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+		acm_ctrl |= ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
+
+		if (acm) {
+			switch (e_aci) {
+			case AC0_BE:
+				acm_ctrl |= AcmHw_BeqEn;
+				break;
+			case AC2_VI:
+				acm_ctrl |= AcmHw_ViqEn;
+				break;
+			case AC3_VO:
+				acm_ctrl |= AcmHw_VoqEn;
+				break;
+			default:
+				RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+					 "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+					 acm);
+				break;
+			}
+		} else {
+			switch (e_aci) {
+			case AC0_BE:
+				acm_ctrl &= (~AcmHw_BeqEn);
+				break;
+			case AC2_VI:
+				acm_ctrl &= (~AcmHw_ViqEn);
+				break;
+			case AC3_VO:
+				acm_ctrl &= (~AcmHw_BeqEn);
+				break;
+			default:
+				RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+					 "switch case not processed\n");
+				break;
+			}
+		}
+
+		RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
+			 "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+			 acm_ctrl);
+		rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+		break; }
+	case HW_VAR_RCR:
+		rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
+		rtlpci->receive_config = ((u32 *) (val))[0];
+		break;
+	case HW_VAR_RETRY_LIMIT:{
+		u8 retry_limit = ((u8 *) (val))[0];
+
+		rtl_write_word(rtlpriv, REG_RL,
+			       retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+			       retry_limit << RETRY_LIMIT_LONG_SHIFT);
+		break; }
+	case HW_VAR_DUAL_TSF_RST:
+		rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+		break;
+	case HW_VAR_EFUSE_BYTES:
+		rtlefuse->efuse_usedbytes = *((u16 *) val);
+		break;
+	case HW_VAR_EFUSE_USAGE:
+		rtlefuse->efuse_usedpercentage = *((u8 *) val);
+		break;
+	case HW_VAR_IO_CMD:
+		rtl8723ae_phy_set_io_cmd(hw, (*(enum io_type *)val));
+		break;
+	case HW_VAR_WPA_CONFIG:
+		rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+		break;
+	case HW_VAR_SET_RPWM:{
+		u8 rpwm_val;
+
+		rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
+		udelay(1);
+
+		if (rpwm_val & BIT(7)) {
+			rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
+				       (*(u8 *) val));
+		} else {
+			rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
+				       ((*(u8 *) val) | BIT(7)));
+		}
+
+		break; }
+	case HW_VAR_H2C_FW_PWRMODE:{
+		u8 psmode = (*(u8 *) val);
+
+		if (psmode != FW_PS_ACTIVE_MODE)
+			rtl8723ae_dm_rf_saving(hw, true);
+
+		rtl8723ae_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+		break; }
+	case HW_VAR_FW_PSMODE_STATUS:
+		ppsc->fw_current_inpsmode = *((bool *) val);
+		break;
+	case HW_VAR_H2C_FW_JOINBSSRPT:{
+		u8 mstatus = (*(u8 *) val);
+		u8 tmp_regcr, tmp_reg422;
+		bool recover = false;
+
+		if (mstatus == RT_MEDIA_CONNECT) {
+			rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL);
+
+			tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
+			rtl_write_byte(rtlpriv, REG_CR + 1,
+				       (tmp_regcr | BIT(0)));
+
+			_rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(3));
+			_rtl8723ae_set_bcn_ctrl_reg(hw, BIT(4), 0);
+
+			tmp_reg422 = rtl_read_byte(rtlpriv,
+				     REG_FWHW_TXQ_CTRL + 2);
+			if (tmp_reg422 & BIT(6))
+				recover = true;
+			rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+				       tmp_reg422 & (~BIT(6)));
+
+			rtl8723ae_set_fw_rsvdpagepkt(hw, 0);
+
+			_rtl8723ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
+			_rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(4));
+
+			if (recover)
+				rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+					       tmp_reg422);
+
+			rtl_write_byte(rtlpriv, REG_CR + 1,
+				       (tmp_regcr & ~(BIT(0))));
+		}
+		rtl8723ae_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+
+		break; }
+	case HW_VAR_AID:{
+		u16 u2btmp;
+		u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+		u2btmp &= 0xC000;
+		rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
+				mac->assoc_id));
+		break; }
+	case HW_VAR_CORRECT_TSF:{
+		u8 btype_ibss = ((u8 *) (val))[0];
+
+		if (btype_ibss == true)
+			_rtl8723ae_stop_tx_beacon(hw);
+
+		_rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(3));
+
+		rtl_write_dword(rtlpriv, REG_TSFTR,
+				(u32) (mac->tsf & 0xffffffff));
+		rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+				(u32) ((mac->tsf >> 32) & 0xffffffff));
+
+		_rtl8723ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
+
+		if (btype_ibss == true)
+			_rtl8723ae_resume_tx_beacon(hw);
+		break; }
+	default:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "switch case not processed\n");
+		break;
+	}
+}
+
+static bool _rtl8723ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	bool status = true;
+	long count = 0;
+	u32 value = _LLT_INIT_ADDR(address) |
+	    _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+
+	rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+
+	do {
+		value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+		if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+			break;
+
+		if (count > POLLING_LLT_THRESHOLD) {
+			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+				 "Failed to polling write LLT done at address %d!\n",
+				 address);
+			status = false;
+			break;
+		}
+	} while (++count);
+
+	return status;
+}
+
+static bool _rtl8723ae_llt_table_init(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	unsigned short i;
+	u8 txpktbuf_bndy;
+	u8 maxPage;
+	bool status;
+	u8 ubyte;
+
+	maxPage = 255;
+	txpktbuf_bndy = 246;
+
+	rtl_write_byte(rtlpriv, REG_CR, 0x8B);
+
+	rtl_write_word(rtlpriv, REG_RQPN_NPQ, 0x0000);
+
+	rtl_write_dword(rtlpriv, REG_RQPN, 0x80ac1c29);
+	rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x03);
+
+	rtl_write_dword(rtlpriv, REG_TRXFF_BNDY, (0x27FF0000 | txpktbuf_bndy));
+	rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
+
+	rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+	rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+
+	rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy);
+	rtl_write_byte(rtlpriv, REG_PBP, 0x11);
+	rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
+
+	for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+		status = _rtl8723ae_llt_write(hw, i, i + 1);
+		if (true != status)
+			return status;
+	}
+
+	status = _rtl8723ae_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
+	if (true != status)
+		return status;
+
+	for (i = txpktbuf_bndy; i < maxPage; i++) {
+		status = _rtl8723ae_llt_write(hw, i, (i + 1));
+		if (true != status)
+			return status;
+	}
+
+	status = _rtl8723ae_llt_write(hw, maxPage, txpktbuf_bndy);
+	if (true != status)
+		return status;
+
+	rtl_write_byte(rtlpriv, REG_CR, 0xff);
+	ubyte = rtl_read_byte(rtlpriv, REG_RQPN + 3);
+	rtl_write_byte(rtlpriv, REG_RQPN + 3, ubyte | BIT(7));
+
+	return true;
+}
+
+static void _rtl8723ae_gen_refresh_led_state(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+
+	if (rtlpriv->rtlhal.up_first_time)
+		return;
+
+	if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
+		rtl8723ae_sw_led_on(hw, pLed0);
+	else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
+		rtl8723ae_sw_led_on(hw, pLed0);
+	else
+		rtl8723ae_sw_led_off(hw, pLed0);
+}
+
+static bool _rtl8712e_init_mac(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	unsigned char bytetmp;
+	unsigned short wordtmp;
+	u16 retry = 0;
+	u16 tmpu2b;
+	bool mac_func_enable;
+
+	rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+	bytetmp = rtl_read_byte(rtlpriv, REG_CR);
+	if (bytetmp == 0xFF)
+		mac_func_enable = true;
+	else
+		mac_func_enable = false;
+
+
+	/* HW Power on sequence */
+	if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+		PWR_INTF_PCI_MSK, Rtl8723_NIC_ENABLE_FLOW))
+		return false;
+
+	bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+2);
+	rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+2, bytetmp | BIT(4));
+
+	/* eMAC time out function enable, 0x369[7]=1 */
+	bytetmp = rtl_read_byte(rtlpriv, 0x369);
+	rtl_write_byte(rtlpriv, 0x369, bytetmp | BIT(7));
+
+	/* ePHY reg 0x1e bit[4]=1 using MDIO interface,
+	 * we should do this before Enabling ASPM backdoor.
+	 */
+	do {
+		rtl_write_word(rtlpriv, 0x358, 0x5e);
+		udelay(100);
+		rtl_write_word(rtlpriv, 0x356, 0xc280);
+		rtl_write_word(rtlpriv, 0x354, 0xc290);
+		rtl_write_word(rtlpriv, 0x358, 0x3e);
+		udelay(100);
+		rtl_write_word(rtlpriv, 0x358, 0x5e);
+		udelay(100);
+		tmpu2b = rtl_read_word(rtlpriv, 0x356);
+		retry++;
+	} while (tmpu2b != 0xc290 && retry < 100);
+
+	if (retry >= 100) {
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+			 "InitMAC(): ePHY configure fail!!!\n");
+		return false;
+	}
+
+	rtl_write_word(rtlpriv, REG_CR, 0x2ff);
+	rtl_write_word(rtlpriv, REG_CR + 1, 0x06);
+
+	if (!mac_func_enable) {
+		if (_rtl8723ae_llt_table_init(hw) == false)
+			return false;
+	}
+
+	rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
+	rtl_write_byte(rtlpriv, REG_HISRE, 0xff);
+
+	rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x27ff);
+
+	wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0xf;
+	wordtmp |= 0xF771;
+	rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
+
+	rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F);
+	rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+	rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF);
+	rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config);
+
+	rtl_write_byte(rtlpriv, 0x4d0, 0x0);
+
+	rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
+			((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
+			DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_MGQ_DESA,
+			(u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
+			DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_VOQ_DESA,
+			(u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_VIQ_DESA,
+			(u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_BEQ_DESA,
+			(u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_BKQ_DESA,
+			(u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_HQ_DESA,
+			(u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
+			DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_RX_DESA,
+			(u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma &
+			DMA_BIT_MASK(32));
+
+	rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x74);
+
+	rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
+
+	bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
+	rtl_write_byte(rtlpriv, REG_APSD_CTRL, bytetmp & ~BIT(6));
+	do {
+		retry++;
+		bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
+	} while ((retry < 200) && (bytetmp & BIT(7)));
+
+	_rtl8723ae_gen_refresh_led_state(hw);
+
+	rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0);
+
+	return true;
+}
+
+static void _rtl8723ae_hw_configure(struct ieee80211_hw *hw)
+{
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	u8 reg_bw_opmode;
+	u32 reg_ratr, reg_prsr;
+
+	reg_bw_opmode = BW_OPMODE_20MHZ;
+	reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
+	    RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+	reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+
+	rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8);
+
+	rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+
+	rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr);
+
+	rtl_write_byte(rtlpriv, REG_SLOT, 0x09);
+
+	rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, 0x0);
+
+	rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F80);
+
+	rtl_write_word(rtlpriv, REG_RL, 0x0707);
+
+	rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x02012802);
+
+	rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
+
+	rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000);
+	rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504);
+	rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
+	rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504);
+
+	if ((pcipriv->bt_coexist.bt_coexistence) &&
+	    (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
+		rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x97427431);
+	else
+		rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841);
+
+	rtl_write_byte(rtlpriv, REG_ATIMWND, 0x2);
+
+	rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff);
+
+	rtlpci->reg_bcn_ctrl_val = 0x1f;
+	rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val);
+
+	rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+
+	rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+
+	rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
+	rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+
+	if ((pcipriv->bt_coexist.bt_coexistence) &&
+	    (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) {
+		rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
+		rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0402);
+	} else {
+		rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
+		rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
+	}
+
+	if ((pcipriv->bt_coexist.bt_coexistence) &&
+	     (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
+		rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666);
+	else
+		rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x086666);
+
+	rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
+
+	rtl_write_word(rtlpriv, REG_SPEC_SIFS, 0x1010);
+	rtl_write_word(rtlpriv, REG_MAC_SPEC_SIFS, 0x1010);
+
+	rtl_write_word(rtlpriv, REG_SIFS_CTX, 0x1010);
+
+	rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x1010);
+
+	rtl_write_dword(rtlpriv, REG_MAR, 0xffffffff);
+	rtl_write_dword(rtlpriv, REG_MAR + 4, 0xffffffff);
+
+	rtl_write_dword(rtlpriv, 0x394, 0x1);
+}
+
+static void _rtl8723ae_enable_aspm_back_door(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+	rtl_write_byte(rtlpriv, 0x34b, 0x93);
+	rtl_write_word(rtlpriv, 0x350, 0x870c);
+	rtl_write_byte(rtlpriv, 0x352, 0x1);
+
+	if (ppsc->support_backdoor)
+		rtl_write_byte(rtlpriv, 0x349, 0x1b);
+	else
+		rtl_write_byte(rtlpriv, 0x349, 0x03);
+
+	rtl_write_word(rtlpriv, 0x350, 0x2718);
+	rtl_write_byte(rtlpriv, 0x352, 0x1);
+}
+
+void rtl8723ae_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 sec_reg_value;
+
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+		 "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+		 rtlpriv->sec.pairwise_enc_algorithm,
+		 rtlpriv->sec.group_enc_algorithm);
+
+	if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+		RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+			 "not open hw encryption\n");
+		return;
+	}
+
+	sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
+
+	if (rtlpriv->sec.use_defaultkey) {
+		sec_reg_value |= SCR_TxUseDK;
+		sec_reg_value |= SCR_RxUseDK;
+	}
+
+	sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+
+	rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+
+	RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+		 "The SECR-value %x\n", sec_reg_value);
+
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+
+}
+
+int rtl8723ae_hw_init(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	bool rtstatus = true;
+	int err;
+	u8 tmp_u1b;
+
+	rtlpriv->rtlhal.being_init_adapter = true;
+	rtlpriv->intf_ops->disable_aspm(hw);
+	rtstatus = _rtl8712e_init_mac(hw);
+	if (rtstatus != true) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+		err = 1;
+		return err;
+	}
+
+	err = rtl8723ae_download_fw(hw);
+	if (err) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+			 "Failed to download FW. Init HW without FW now..\n");
+		err = 1;
+		rtlhal->fw_ready = false;
+		return err;
+	} else {
+		rtlhal->fw_ready = true;
+	}
+
+	rtlhal->last_hmeboxnum = 0;
+	rtl8723ae_phy_mac_config(hw);
+	/* because the last function modifies RCR, we update
+	 * rcr var here, or TP will be unstable as ther receive_config
+	 * is wrong, RX RCR_ACRC32 will cause TP unstable & Rx
+	 * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
+	 */
+	rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
+	rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
+	rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+
+	rtl8723ae_phy_bb_config(hw);
+	rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
+	rtl8723ae_phy_rf_config(hw);
+	if (IS_VENDOR_UMC_A_CUT(rtlhal->version)) {
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255);
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00);
+	} else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x0C, MASKDWORD, 0x894AE);
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x0A, MASKDWORD, 0x1AF31);
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, MASKDWORD, 0x8F425);
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_SYN_G2, MASKDWORD, 0x4F200);
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK1, MASKDWORD, 0x44053);
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK2, MASKDWORD, 0x80201);
+	}
+	rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
+						 RF_CHNLBW, RFREG_OFFSET_MASK);
+	rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
+						 RF_CHNLBW, RFREG_OFFSET_MASK);
+	rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+	rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+	rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
+	_rtl8723ae_hw_configure(hw);
+	rtl_cam_reset_all_entry(hw);
+	rtl8723ae_enable_hw_security_config(hw);
+
+	ppsc->rfpwr_state = ERFON;
+
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+	_rtl8723ae_enable_aspm_back_door(hw);
+	rtlpriv->intf_ops->enable_aspm(hw);
+
+	rtl8723ae_bt_hw_init(hw);
+
+	if (ppsc->rfpwr_state == ERFON) {
+		rtl8723ae_phy_set_rfpath_switch(hw, 1);
+		if (rtlphy->iqk_initialized) {
+			rtl8723ae_phy_iq_calibrate(hw, true);
+		} else {
+			rtl8723ae_phy_iq_calibrate(hw, false);
+			rtlphy->iqk_initialized = true;
+		}
+
+		rtl8723ae_phy_lc_calibrate(hw);
+	}
+
+	tmp_u1b = efuse_read_1byte(hw, 0x1FA);
+	if (!(tmp_u1b & BIT(0))) {
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
+	}
+
+	if (!(tmp_u1b & BIT(4))) {
+		tmp_u1b = rtl_read_byte(rtlpriv, 0x16) & 0x0F;
+		rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
+		udelay(10);
+		rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
+	}
+	rtl8723ae_dm_init(hw);
+	rtlpriv->rtlhal.being_init_adapter = false;
+	return err;
+}
+
+static enum version_8723e _rtl8723ae_read_chip_version(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	enum version_8723e version = 0x0000;
+	u32 value32;
+
+	value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
+	if (value32 & TRP_VAUX_EN) {
+		version = (enum version_8723e)(version |
+			  ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0));
+		/* RTL8723 with BT function. */
+		version = (enum version_8723e)(version |
+			  ((value32 & BT_FUNC) ? CHIP_8723 : 0));
+
+	} else {
+		/* Normal mass production chip. */
+		version = (enum version_8723e) NORMAL_CHIP;
+		version = (enum version_8723e)(version |
+			  ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0));
+		/* RTL8723 with BT function. */
+		version = (enum version_8723e)(version |
+			  ((value32 & BT_FUNC) ? CHIP_8723 : 0));
+		if (IS_CHIP_VENDOR_UMC(version))
+			version = (enum version_8723e)(version |
+			((value32 & CHIP_VER_RTL_MASK)));/* IC version (CUT) */
+		if (IS_8723_SERIES(version)) {
+			value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
+			/* ROM code version */
+			version = (enum version_8723e)(version |
+				  ((value32 & RF_RL_ID)>>20));
+		}
+	}
+
+	if (IS_8723_SERIES(version)) {
+		value32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
+		rtlphy->polarity_ctl = ((value32 & WL_HWPDN_SL) ?
+				       RT_POLARITY_HIGH_ACT :
+				       RT_POLARITY_LOW_ACT);
+	}
+	switch (version) {
+	case VERSION_TEST_UMC_CHIP_8723:
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "Chip Version ID: VERSION_TEST_UMC_CHIP_8723.\n");
+		break;
+	case VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT:
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT.\n");
+		break;
+	case VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT:
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT.\n");
+		break;
+	default:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "Chip Version ID: Unknown. Bug?\n");
+		break;
+	}
+
+	if (IS_8723_SERIES(version))
+		rtlphy->rf_type = RF_1T1R;
+
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n",
+		(rtlphy->rf_type == RF_2T2R) ? "RF_2T2R" : "RF_1T1R");
+
+	return version;
+}
+
+static int _rtl8723ae_set_media_status(struct ieee80211_hw *hw,
+				     enum nl80211_iftype type)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 bt_msr = rtl_read_byte(rtlpriv, MSR) & 0xfc;
+	enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+
+	rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
+	RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD,
+		 "clear 0x550 when set HW_VAR_MEDIA_STATUS\n");
+
+	if (type == NL80211_IFTYPE_UNSPECIFIED ||
+	    type == NL80211_IFTYPE_STATION) {
+		_rtl8723ae_stop_tx_beacon(hw);
+		_rtl8723ae_enable_bcn_sufunc(hw);
+	} else if (type == NL80211_IFTYPE_ADHOC ||
+		type == NL80211_IFTYPE_AP) {
+		_rtl8723ae_resume_tx_beacon(hw);
+		_rtl8723ae_disable_bcn_sufunc(hw);
+	} else {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+			 "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+			 type);
+	}
+
+	switch (type) {
+	case NL80211_IFTYPE_UNSPECIFIED:
+		bt_msr |= MSR_NOLINK;
+		ledaction = LED_CTL_LINK;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "Set Network type to NO LINK!\n");
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		bt_msr |= MSR_ADHOC;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "Set Network type to Ad Hoc!\n");
+		break;
+	case NL80211_IFTYPE_STATION:
+		bt_msr |= MSR_INFRA;
+		ledaction = LED_CTL_LINK;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "Set Network type to STA!\n");
+		break;
+	case NL80211_IFTYPE_AP:
+		bt_msr |= MSR_AP;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "Set Network type to AP!\n");
+		break;
+	default:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "Network type %d not supported!\n",
+			 type);
+		return 1;
+		break;
+
+	}
+
+	rtl_write_byte(rtlpriv, (MSR), bt_msr);
+	rtlpriv->cfg->ops->led_control(hw, ledaction);
+	if ((bt_msr & 0x03) == MSR_AP)
+		rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+	else
+		rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+	return 0;
+}
+
+void rtl8723ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	u32 reg_rcr = rtlpci->receive_config;
+
+	if (rtlpriv->psc.rfpwr_state != ERFON)
+		return;
+
+	if (check_bssid == true) {
+		reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+					      (u8 *)(&reg_rcr));
+		_rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(4));
+	} else if (check_bssid == false) {
+		reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+		_rtl8723ae_set_bcn_ctrl_reg(hw, BIT(4), 0);
+		rtlpriv->cfg->ops->set_hw_reg(hw,
+			HW_VAR_RCR, (u8 *) (&reg_rcr));
+	}
+}
+
+int rtl8723ae_set_network_type(struct ieee80211_hw *hw,
+			       enum nl80211_iftype type)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (_rtl8723ae_set_media_status(hw, type))
+		return -EOPNOTSUPP;
+
+	if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
+		if (type != NL80211_IFTYPE_AP)
+			rtl8723ae_set_check_bssid(hw, true);
+	} else {
+		rtl8723ae_set_check_bssid(hw, false);
+	}
+	return 0;
+}
+
+/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */
+void rtl8723ae_set_qos(struct ieee80211_hw *hw, int aci)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtl8723ae_dm_init_edca_turbo(hw);
+	switch (aci) {
+	case AC1_BK:
+		rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
+		break;
+	case AC0_BE:
+		/* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4ac_param); */
+		break;
+	case AC2_VI:
+		rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
+		break;
+	case AC3_VO:
+		rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
+		break;
+	default:
+		RT_ASSERT(false, "invalid aci: %d !\n", aci);
+		break;
+	}
+}
+
+void rtl8723ae_enable_interrupt(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	rtl_write_dword(rtlpriv, 0x3a8, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+	rtl_write_dword(rtlpriv, 0x3ac, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+	rtlpci->irq_enabled = true;
+}
+
+void rtl8723ae_disable_interrupt(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	rtl_write_dword(rtlpriv, 0x3a8, IMR8190_DISABLED);
+	rtl_write_dword(rtlpriv, 0x3ac, IMR8190_DISABLED);
+	rtlpci->irq_enabled = false;
+	synchronize_irq(rtlpci->pdev->irq);
+}
+
+static void _rtl8723ae_poweroff_adapter(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 u1tmp;
+
+	/* Combo (PCIe + USB) Card and PCIe-MF Card */
+	/* 1. Run LPS WL RFOFF flow */
+	rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+		PWR_INTF_PCI_MSK, Rtl8723_NIC_LPS_ENTER_FLOW);
+
+	/* 2. 0x1F[7:0] = 0 */
+	/* turn off RF */
+	rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
+	if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready)
+		rtl8723ae_firmware_selfreset(hw);
+
+	/* Reset MCU. Suggested by Filen. */
+	u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1tmp & (~BIT(2))));
+
+	/* g.	MCUFWDL 0x80[1:0]=0	 */
+	/* reset MCU ready status */
+	rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+
+	/* HW card disable configuration. */
+	rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+		PWR_INTF_PCI_MSK, Rtl8723_NIC_DISABLE_FLOW);
+
+	/* Reset MCU IO Wrapper */
+	u1tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+	rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1tmp & (~BIT(0))));
+	u1tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+	rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1tmp | BIT(0));
+
+	/* 7. RSV_CTRL 0x1C[7:0] = 0x0E */
+	/* lock ISO/CLK/Power control register */
+	rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
+}
+
+void rtl8723ae_card_disable(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	enum nl80211_iftype opmode;
+
+	mac->link_state = MAC80211_NOLINK;
+	opmode = NL80211_IFTYPE_UNSPECIFIED;
+	_rtl8723ae_set_media_status(hw, opmode);
+	if (rtlpci->driver_is_goingto_unload ||
+	    ppsc->rfoff_reason > RF_CHANGE_BY_PS)
+		rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+	RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+	_rtl8723ae_poweroff_adapter(hw);
+
+	/* after power off we should do iqk again */
+	rtlpriv->phy.iqk_initialized = false;
+}
+
+void rtl8723ae_interrupt_recognized(struct ieee80211_hw *hw,
+				    u32 *p_inta, u32 *p_intb)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	*p_inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, 0x3a0, *p_inta);
+}
+
+void rtl8723ae_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	u16 bcn_interval, atim_window;
+
+	bcn_interval = mac->beacon_interval;
+	atim_window = 2;	/*FIX MERGE */
+	rtl8723ae_disable_interrupt(hw);
+	rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+	rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+	rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+	rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
+	rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
+	rtl_write_byte(rtlpriv, 0x606, 0x30);
+	rtl8723ae_enable_interrupt(hw);
+}
+
+void rtl8723ae_set_beacon_interval(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	u16 bcn_interval = mac->beacon_interval;
+
+	RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
+		 "beacon_interval:%d\n", bcn_interval);
+	rtl8723ae_disable_interrupt(hw);
+	rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+	rtl8723ae_enable_interrupt(hw);
+}
+
+void rtl8723ae_update_interrupt_mask(struct ieee80211_hw *hw,
+				     u32 add_msr, u32 rm_msr)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
+		 "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+
+	if (add_msr)
+		rtlpci->irq_mask[0] |= add_msr;
+	if (rm_msr)
+		rtlpci->irq_mask[0] &= (~rm_msr);
+	rtl8723ae_disable_interrupt(hw);
+	rtl8723ae_enable_interrupt(hw);
+}
+
+static u8 _rtl8723ae_get_chnl_group(u8 chnl)
+{
+	u8 group;
+
+	if (chnl < 3)
+		group = 0;
+	else if (chnl < 9)
+		group = 1;
+	else
+		group = 2;
+	return group;
+}
+
+static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+						   bool autoload_fail,
+						   u8 *hwinfo)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u8 rf_path, index, tempval;
+	u16 i;
+
+	for (rf_path = 0; rf_path < 1; rf_path++) {
+		for (i = 0; i < 3; i++) {
+			if (!autoload_fail) {
+				rtlefuse->eeprom_chnlarea_txpwr_cck
+				    [rf_path][i] =
+				    hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i];
+				rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
+				    [rf_path][i] =
+				    hwinfo[EEPROM_TXPOWERHT40_1S + rf_path *
+				    3 + i];
+			} else {
+				rtlefuse->eeprom_chnlarea_txpwr_cck
+				    [rf_path][i] =
+				    EEPROM_DEFAULT_TXPOWERLEVEL;
+				rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
+				    [rf_path][i] =
+				    EEPROM_DEFAULT_TXPOWERLEVEL;
+			}
+		}
+	}
+
+	for (i = 0; i < 3; i++) {
+		if (!autoload_fail)
+			tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
+		else
+			tempval = EEPROM_DEFAULT_HT40_2SDIFF;
+		rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_A][i] =
+		    (tempval & 0xf);
+		rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_B][i] =
+		    ((tempval & 0xf0) >> 4);
+	}
+
+	for (rf_path = 0; rf_path < 2; rf_path++)
+		for (i = 0; i < 3; i++)
+			RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+				"RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
+				i, rtlefuse->eeprom_chnlarea_txpwr_cck
+				[rf_path][i]);
+	for (rf_path = 0; rf_path < 2; rf_path++)
+		for (i = 0; i < 3; i++)
+			RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+				"RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
+				rf_path, i,
+				rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
+				[rf_path][i]);
+	for (rf_path = 0; rf_path < 2; rf_path++)
+		for (i = 0; i < 3; i++)
+			RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+				"RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
+				rf_path, i,
+				rtlefuse->eprom_chnl_txpwr_ht40_2sdf
+				[rf_path][i]);
+
+	for (rf_path = 0; rf_path < 2; rf_path++) {
+		for (i = 0; i < 14; i++) {
+			index = _rtl8723ae_get_chnl_group((u8) i);
+
+			rtlefuse->txpwrlevel_cck[rf_path][i] =
+				rtlefuse->eeprom_chnlarea_txpwr_cck
+							[rf_path][index];
+			rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+				rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
+							[rf_path][index];
+
+			if ((rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
+			    [rf_path][index] -
+			    rtlefuse->eprom_chnl_txpwr_ht40_2sdf[rf_path]
+			    [index]) > 0) {
+				rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
+					rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
+					[rf_path][index] -
+					rtlefuse->eprom_chnl_txpwr_ht40_2sdf
+					[rf_path][index];
+			} else {
+				rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
+			}
+		}
+
+		for (i = 0; i < 14; i++) {
+			RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+				"RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
+				"[0x%x / 0x%x / 0x%x]\n", rf_path, i,
+				rtlefuse->txpwrlevel_cck[rf_path][i],
+				rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
+				rtlefuse->txpwrlevel_ht40_2s[rf_path][i]);
+		}
+	}
+
+	for (i = 0; i < 3; i++) {
+		if (!autoload_fail) {
+			rtlefuse->eeprom_pwrlimit_ht40[i] =
+			    hwinfo[EEPROM_TXPWR_GROUP + i];
+			rtlefuse->eeprom_pwrlimit_ht20[i] =
+			    hwinfo[EEPROM_TXPWR_GROUP + 3 + i];
+		} else {
+			rtlefuse->eeprom_pwrlimit_ht40[i] = 0;
+			rtlefuse->eeprom_pwrlimit_ht20[i] = 0;
+		}
+	}
+
+	for (rf_path = 0; rf_path < 2; rf_path++) {
+		for (i = 0; i < 14; i++) {
+			index = _rtl8723ae_get_chnl_group((u8) i);
+
+			if (rf_path == RF90_PATH_A) {
+				rtlefuse->pwrgroup_ht20[rf_path][i] =
+				    (rtlefuse->eeprom_pwrlimit_ht20[index] &
+				    0xf);
+				rtlefuse->pwrgroup_ht40[rf_path][i] =
+				    (rtlefuse->eeprom_pwrlimit_ht40[index] &
+				    0xf);
+			} else if (rf_path == RF90_PATH_B) {
+				rtlefuse->pwrgroup_ht20[rf_path][i] =
+				    ((rtlefuse->eeprom_pwrlimit_ht20[index] &
+				    0xf0) >> 4);
+				rtlefuse->pwrgroup_ht40[rf_path][i] =
+				    ((rtlefuse->eeprom_pwrlimit_ht40[index] &
+				    0xf0) >> 4);
+			}
+
+			RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+				"RF-%d pwrgroup_ht20[%d] = 0x%x\n", rf_path, i,
+				rtlefuse->pwrgroup_ht20[rf_path][i]);
+			RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+				"RF-%d pwrgroup_ht40[%d] = 0x%x\n", rf_path, i,
+				rtlefuse->pwrgroup_ht40[rf_path][i]);
+		}
+	}
+
+	for (i = 0; i < 14; i++) {
+		index = _rtl8723ae_get_chnl_group((u8) i);
+
+		if (!autoload_fail)
+			tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index];
+		else
+			tempval = EEPROM_DEFAULT_HT20_DIFF;
+
+		rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
+		rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
+		    ((tempval >> 4) & 0xF);
+
+		if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] & BIT(3))
+			rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0;
+
+		if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3))
+			rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0;
+
+		index = _rtl8723ae_get_chnl_group((u8) i);
+
+		if (!autoload_fail)
+			tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index];
+		else
+			tempval = EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
+
+		rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = (tempval & 0xF);
+		rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
+		    ((tempval >> 4) & 0xF);
+	}
+
+	rtlefuse->legacy_ht_txpowerdiff =
+	    rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
+
+	for (i = 0; i < 14; i++)
+		RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+			"RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
+			rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]);
+	for (i = 0; i < 14; i++)
+		RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+			"RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
+			rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]);
+	for (i = 0; i < 14; i++)
+		RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+			"RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
+			rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]);
+	for (i = 0; i < 14; i++)
+		RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+			"RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
+			rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]);
+
+	if (!autoload_fail)
+		rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
+	else
+		rtlefuse->eeprom_regulatory = 0;
+	RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+		"eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
+
+	if (!autoload_fail)
+		rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
+	else
+		rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
+	RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+		"TSSI_A = 0x%x, TSSI_B = 0x%x\n",
+		rtlefuse->eeprom_tssi[RF90_PATH_A],
+		rtlefuse->eeprom_tssi[RF90_PATH_B]);
+
+	if (!autoload_fail)
+		tempval = hwinfo[EEPROM_THERMAL_METER];
+	else
+		tempval = EEPROM_DEFAULT_THERMALMETER;
+	rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
+
+	if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
+		rtlefuse->apk_thermalmeterignore = true;
+
+	rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+	RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+		"thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
+}
+
+static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
+					 bool pseudo_test)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u16 i, usvalue;
+	u8 hwinfo[HWSET_MAX_SIZE];
+	u16 eeprom_id;
+
+	if (pseudo_test) {
+		/* need add */
+		return;
+	}
+	if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+		rtl_efuse_shadow_map_update(hw);
+
+		memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+		       HWSET_MAX_SIZE);
+	} else if (rtlefuse->epromtype == EEPROM_93C46) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "RTL819X Not boot from eeprom, check it !!");
+	}
+
+	RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
+		      hwinfo, HWSET_MAX_SIZE);
+
+	eeprom_id = *((u16 *)&hwinfo[0]);
+	if (eeprom_id != RTL8190_EEPROM_ID) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+			 "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
+		rtlefuse->autoload_failflag = true;
+	} else {
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+		rtlefuse->autoload_failflag = false;
+	}
+
+	if (rtlefuse->autoload_failflag == true)
+		return;
+
+	rtlefuse->eeprom_vid = *(u16 *) &hwinfo[EEPROM_VID];
+	rtlefuse->eeprom_did = *(u16 *) &hwinfo[EEPROM_DID];
+	rtlefuse->eeprom_svid = *(u16 *) &hwinfo[EEPROM_SVID];
+	rtlefuse->eeprom_smid = *(u16 *) &hwinfo[EEPROM_SMID];
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+		 "EEPROMId = 0x%4x\n", eeprom_id);
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+		 "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+		 "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+		 "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+		 "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
+
+	for (i = 0; i < 6; i += 2) {
+		usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
+		*((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+	}
+
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+		 "dev_addr: %pM\n", rtlefuse->dev_addr);
+
+	_rtl8723ae_read_txpower_info_from_hwpg(hw,
+			rtlefuse->autoload_failflag, hwinfo);
+
+	rtl8723ae_read_bt_coexist_info_from_hwpg(hw,
+			rtlefuse->autoload_failflag, hwinfo);
+
+	rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+	rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+	rtlefuse->txpwr_fromeprom = true;
+	rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+		 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+
+	/* set channel paln to world wide 13 */
+	rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+
+	if (rtlhal->oem_id == RT_CID_DEFAULT) {
+		switch (rtlefuse->eeprom_oemid) {
+		case EEPROM_CID_DEFAULT:
+			if (rtlefuse->eeprom_did == 0x8176) {
+				if (CHK_SVID_SMID(0x10EC, 0x6151) ||
+				    CHK_SVID_SMID(0x10EC, 0x6152) ||
+				    CHK_SVID_SMID(0x10EC, 0x6154) ||
+				    CHK_SVID_SMID(0x10EC, 0x6155) ||
+				    CHK_SVID_SMID(0x10EC, 0x6177) ||
+				    CHK_SVID_SMID(0x10EC, 0x6178) ||
+				    CHK_SVID_SMID(0x10EC, 0x6179) ||
+				    CHK_SVID_SMID(0x10EC, 0x6180) ||
+				    CHK_SVID_SMID(0x10EC, 0x8151) ||
+				    CHK_SVID_SMID(0x10EC, 0x8152) ||
+				    CHK_SVID_SMID(0x10EC, 0x8154) ||
+				    CHK_SVID_SMID(0x10EC, 0x8155) ||
+				    CHK_SVID_SMID(0x10EC, 0x8181) ||
+				    CHK_SVID_SMID(0x10EC, 0x8182) ||
+				    CHK_SVID_SMID(0x10EC, 0x8184) ||
+				    CHK_SVID_SMID(0x10EC, 0x8185) ||
+				    CHK_SVID_SMID(0x10EC, 0x9151) ||
+				    CHK_SVID_SMID(0x10EC, 0x9152) ||
+				    CHK_SVID_SMID(0x10EC, 0x9154) ||
+				    CHK_SVID_SMID(0x10EC, 0x9155) ||
+				    CHK_SVID_SMID(0x10EC, 0x9181) ||
+				    CHK_SVID_SMID(0x10EC, 0x9182) ||
+				    CHK_SVID_SMID(0x10EC, 0x9184) ||
+				    CHK_SVID_SMID(0x10EC, 0x9185))
+					rtlhal->oem_id = RT_CID_TOSHIBA;
+				else if (rtlefuse->eeprom_svid == 0x1025)
+					rtlhal->oem_id = RT_CID_819x_Acer;
+				else if (CHK_SVID_SMID(0x10EC, 0x6191) ||
+					 CHK_SVID_SMID(0x10EC, 0x6192) ||
+					 CHK_SVID_SMID(0x10EC, 0x6193) ||
+					 CHK_SVID_SMID(0x10EC, 0x7191) ||
+					 CHK_SVID_SMID(0x10EC, 0x7192) ||
+					 CHK_SVID_SMID(0x10EC, 0x7193) ||
+					 CHK_SVID_SMID(0x10EC, 0x8191) ||
+					 CHK_SVID_SMID(0x10EC, 0x8192) ||
+					 CHK_SVID_SMID(0x10EC, 0x8193))
+					rtlhal->oem_id = RT_CID_819x_SAMSUNG;
+				else if (CHK_SVID_SMID(0x10EC, 0x8195) ||
+					 CHK_SVID_SMID(0x10EC, 0x9195) ||
+					 CHK_SVID_SMID(0x10EC, 0x7194) ||
+					 CHK_SVID_SMID(0x10EC, 0x8200) ||
+					 CHK_SVID_SMID(0x10EC, 0x8201) ||
+					 CHK_SVID_SMID(0x10EC, 0x8202) ||
+					 CHK_SVID_SMID(0x10EC, 0x9200))
+					rtlhal->oem_id = RT_CID_819x_Lenovo;
+				else if (CHK_SVID_SMID(0x10EC, 0x8197) ||
+					 CHK_SVID_SMID(0x10EC, 0x9196))
+					rtlhal->oem_id = RT_CID_819x_CLEVO;
+				else if (CHK_SVID_SMID(0x1028, 0x8194) ||
+					 CHK_SVID_SMID(0x1028, 0x8198) ||
+					 CHK_SVID_SMID(0x1028, 0x9197) ||
+					 CHK_SVID_SMID(0x1028, 0x9198))
+					rtlhal->oem_id = RT_CID_819x_DELL;
+				else if (CHK_SVID_SMID(0x103C, 0x1629))
+					rtlhal->oem_id = RT_CID_819x_HP;
+				else if (CHK_SVID_SMID(0x1A32, 0x2315))
+					rtlhal->oem_id = RT_CID_819x_QMI;
+				else if (CHK_SVID_SMID(0x10EC, 0x8203))
+					rtlhal->oem_id = RT_CID_819x_PRONETS;
+				else if (CHK_SVID_SMID(0x1043, 0x84B5))
+					rtlhal->oem_id =
+						 RT_CID_819x_Edimax_ASUS;
+				else
+					rtlhal->oem_id = RT_CID_DEFAULT;
+			} else if (rtlefuse->eeprom_did == 0x8178) {
+				if (CHK_SVID_SMID(0x10EC, 0x6181) ||
+				    CHK_SVID_SMID(0x10EC, 0x6182) ||
+				    CHK_SVID_SMID(0x10EC, 0x6184) ||
+				    CHK_SVID_SMID(0x10EC, 0x6185) ||
+				    CHK_SVID_SMID(0x10EC, 0x7181) ||
+				    CHK_SVID_SMID(0x10EC, 0x7182) ||
+				    CHK_SVID_SMID(0x10EC, 0x7184) ||
+				    CHK_SVID_SMID(0x10EC, 0x7185) ||
+				    CHK_SVID_SMID(0x10EC, 0x8181) ||
+				    CHK_SVID_SMID(0x10EC, 0x8182) ||
+				    CHK_SVID_SMID(0x10EC, 0x8184) ||
+				    CHK_SVID_SMID(0x10EC, 0x8185) ||
+				    CHK_SVID_SMID(0x10EC, 0x9181) ||
+				    CHK_SVID_SMID(0x10EC, 0x9182) ||
+				    CHK_SVID_SMID(0x10EC, 0x9184) ||
+				    CHK_SVID_SMID(0x10EC, 0x9185))
+					rtlhal->oem_id = RT_CID_TOSHIBA;
+				else if (rtlefuse->eeprom_svid == 0x1025)
+					rtlhal->oem_id = RT_CID_819x_Acer;
+				else if (CHK_SVID_SMID(0x10EC, 0x8186))
+					rtlhal->oem_id = RT_CID_819x_PRONETS;
+				else if (CHK_SVID_SMID(0x1043, 0x8486))
+					rtlhal->oem_id =
+						     RT_CID_819x_Edimax_ASUS;
+				else
+					rtlhal->oem_id = RT_CID_DEFAULT;
+			} else {
+					rtlhal->oem_id = RT_CID_DEFAULT;
+			}
+			break;
+		case EEPROM_CID_TOSHIBA:
+			rtlhal->oem_id = RT_CID_TOSHIBA;
+			break;
+		case EEPROM_CID_CCX:
+			rtlhal->oem_id = RT_CID_CCX;
+			break;
+		case EEPROM_CID_QMI:
+			rtlhal->oem_id = RT_CID_819x_QMI;
+			break;
+		case EEPROM_CID_WHQL:
+				break;
+		default:
+			rtlhal->oem_id = RT_CID_DEFAULT;
+			break;
+
+		}
+	}
+}
+
+static void _rtl8723ae_hal_customized_behavior(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	switch (rtlhal->oem_id) {
+	case RT_CID_819x_HP:
+		pcipriv->ledctl.led_opendrain = true;
+		break;
+	case RT_CID_819x_Lenovo:
+	case RT_CID_DEFAULT:
+	case RT_CID_TOSHIBA:
+	case RT_CID_CCX:
+	case RT_CID_819x_Acer:
+	case RT_CID_WHQL:
+	default:
+		break;
+	}
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+		 "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+}
+
+void rtl8723ae_read_eeprom_info(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 tmp_u1b;
+	u32 value32;
+
+	value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST]);
+	value32 = (value32 & ~EFUSE_SEL_MASK) | EFUSE_SEL(EFUSE_WIFI_SEL_0);
+	rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST], value32);
+
+	rtlhal->version = _rtl8723ae_read_chip_version(hw);
+
+	if (get_rf_type(rtlphy) == RF_1T1R)
+		rtlpriv->dm.rfpath_rxenable[0] = true;
+	else
+		rtlpriv->dm.rfpath_rxenable[0] =
+		    rtlpriv->dm.rfpath_rxenable[1] = true;
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+		 rtlhal->version);
+
+	tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+	if (tmp_u1b & BIT(4)) {
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+		rtlefuse->epromtype = EEPROM_93C46;
+	} else {
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+		rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
+	}
+	if (tmp_u1b & BIT(5)) {
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+		rtlefuse->autoload_failflag = false;
+		_rtl8723ae_read_adapter_info(hw, false);
+	} else {
+		rtlefuse->autoload_failflag = true;
+		_rtl8723ae_read_adapter_info(hw, false);
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+	}
+	_rtl8723ae_hal_customized_behavior(hw);
+}
+
+static void rtl8723ae_update_hal_rate_table(struct ieee80211_hw *hw,
+					    struct ieee80211_sta *sta)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u32 ratr_value;
+	u8 ratr_index = 0;
+	u8 nmode = mac->ht_enable;
+	u8 mimo_ps = IEEE80211_SMPS_OFF;
+	u8 curtxbw_40mhz = mac->bw_40;
+	u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+				1 : 0;
+	u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+				1 : 0;
+	enum wireless_mode wirelessmode = mac->mode;
+
+	if (rtlhal->current_bandtype == BAND_ON_5G)
+		ratr_value = sta->supp_rates[1] << 4;
+	else
+		ratr_value = sta->supp_rates[0];
+	if (mac->opmode == NL80211_IFTYPE_ADHOC)
+		ratr_value = 0xfff;
+	ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+		       sta->ht_cap.mcs.rx_mask[0] << 12);
+	switch (wirelessmode) {
+	case WIRELESS_MODE_B:
+		if (ratr_value & 0x0000000c)
+			ratr_value &= 0x0000000d;
+		else
+			ratr_value &= 0x0000000f;
+		break;
+	case WIRELESS_MODE_G:
+		ratr_value &= 0x00000FF5;
+		break;
+	case WIRELESS_MODE_N_24G:
+	case WIRELESS_MODE_N_5G:
+		nmode = 1;
+		if (mimo_ps == IEEE80211_SMPS_STATIC) {
+			ratr_value &= 0x0007F005;
+		} else {
+			u32 ratr_mask;
+
+			if (get_rf_type(rtlphy) == RF_1T2R ||
+			    get_rf_type(rtlphy) == RF_1T1R)
+				ratr_mask = 0x000ff005;
+			else
+				ratr_mask = 0x0f0ff005;
+
+			ratr_value &= ratr_mask;
+		}
+		break;
+	default:
+		if (rtlphy->rf_type == RF_1T2R)
+			ratr_value &= 0x000ff0ff;
+		else
+			ratr_value &= 0x0f0ff0ff;
+
+		break;
+	}
+
+	if ((pcipriv->bt_coexist.bt_coexistence) &&
+	    (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) &&
+	    (pcipriv->bt_coexist.bt_cur_state) &&
+	    (pcipriv->bt_coexist.bt_ant_isolation) &&
+	    ((pcipriv->bt_coexist.bt_service == BT_SCO) ||
+	    (pcipriv->bt_coexist.bt_service == BT_BUSY)))
+		ratr_value &= 0x0fffcfc0;
+	else
+		ratr_value &= 0x0FFFFFFF;
+
+	if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
+	   (!curtxbw_40mhz && curshortgi_20mhz)))
+		ratr_value |= 0x10000000;
+
+	rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+
+	RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+		 "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
+}
+
+static void rtl8723ae_update_hal_rate_mask(struct ieee80211_hw *hw,
+		struct ieee80211_sta *sta, u8 rssi_level)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_sta_info *sta_entry = NULL;
+	u32 ratr_bitmap;
+	u8 ratr_index;
+	u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+				? 1 : 0;
+	u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+				1 : 0;
+	u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+				1 : 0;
+	enum wireless_mode wirelessmode = 0;
+	bool shortgi = false;
+	u8 rate_mask[5];
+	u8 macid = 0;
+	u8 mimo_ps = IEEE80211_SMPS_OFF;
+
+	sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+	wirelessmode = sta_entry->wireless_mode;
+	if (mac->opmode == NL80211_IFTYPE_STATION)
+		curtxbw_40mhz = mac->bw_40;
+	else if (mac->opmode == NL80211_IFTYPE_AP ||
+		mac->opmode == NL80211_IFTYPE_ADHOC)
+		macid = sta->aid + 1;
+
+	if (rtlhal->current_bandtype == BAND_ON_5G)
+		ratr_bitmap = sta->supp_rates[1] << 4;
+	else
+		ratr_bitmap = sta->supp_rates[0];
+	if (mac->opmode == NL80211_IFTYPE_ADHOC)
+		ratr_bitmap = 0xfff;
+	ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+			sta->ht_cap.mcs.rx_mask[0] << 12);
+	switch (wirelessmode) {
+	case WIRELESS_MODE_B:
+		ratr_index = RATR_INX_WIRELESS_B;
+		if (ratr_bitmap & 0x0000000c)
+			ratr_bitmap &= 0x0000000d;
+		else
+			ratr_bitmap &= 0x0000000f;
+		break;
+	case WIRELESS_MODE_G:
+		ratr_index = RATR_INX_WIRELESS_GB;
+
+		if (rssi_level == 1)
+			ratr_bitmap &= 0x00000f00;
+		else if (rssi_level == 2)
+			ratr_bitmap &= 0x00000ff0;
+		else
+			ratr_bitmap &= 0x00000ff5;
+		break;
+	case WIRELESS_MODE_A:
+		ratr_index = RATR_INX_WIRELESS_A;
+		ratr_bitmap &= 0x00000ff0;
+		break;
+	case WIRELESS_MODE_N_24G:
+	case WIRELESS_MODE_N_5G:
+		ratr_index = RATR_INX_WIRELESS_NGB;
+
+		if (mimo_ps == IEEE80211_SMPS_STATIC) {
+			if (rssi_level == 1)
+				ratr_bitmap &= 0x00070000;
+			else if (rssi_level == 2)
+				ratr_bitmap &= 0x0007f000;
+			else
+				ratr_bitmap &= 0x0007f005;
+		} else {
+			if (rtlphy->rf_type == RF_1T2R ||
+			    rtlphy->rf_type == RF_1T1R) {
+				if (curtxbw_40mhz) {
+					if (rssi_level == 1)
+						ratr_bitmap &= 0x000f0000;
+					else if (rssi_level == 2)
+						ratr_bitmap &= 0x000ff000;
+					else
+						ratr_bitmap &= 0x000ff015;
+				} else {
+					if (rssi_level == 1)
+						ratr_bitmap &= 0x000f0000;
+					else if (rssi_level == 2)
+						ratr_bitmap &= 0x000ff000;
+					else
+						ratr_bitmap &= 0x000ff005;
+				}
+			} else {
+				if (curtxbw_40mhz) {
+					if (rssi_level == 1)
+						ratr_bitmap &= 0x0f0f0000;
+					else if (rssi_level == 2)
+						ratr_bitmap &= 0x0f0ff000;
+					else
+						ratr_bitmap &= 0x0f0ff015;
+				} else {
+					if (rssi_level == 1)
+						ratr_bitmap &= 0x0f0f0000;
+					else if (rssi_level == 2)
+						ratr_bitmap &= 0x0f0ff000;
+					else
+						ratr_bitmap &= 0x0f0ff005;
+				}
+			}
+		}
+
+		if ((curtxbw_40mhz && curshortgi_40mhz) ||
+		    (!curtxbw_40mhz && curshortgi_20mhz)) {
+			if (macid == 0)
+				shortgi = true;
+			else if (macid == 1)
+				shortgi = false;
+		}
+		break;
+	default:
+		ratr_index = RATR_INX_WIRELESS_NGB;
+
+		if (rtlphy->rf_type == RF_1T2R)
+			ratr_bitmap &= 0x000ff0ff;
+		else
+			ratr_bitmap &= 0x0f0ff0ff;
+		break;
+	}
+	sta_entry->ratr_index = ratr_index;
+
+	RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+		 "ratr_bitmap :%x\n", ratr_bitmap);
+	/* convert ratr_bitmap to le byte array */
+	rate_mask[0] = ratr_bitmap;
+	rate_mask[1] = (ratr_bitmap >>= 8);
+	rate_mask[2] = (ratr_bitmap >>= 8);
+	rate_mask[3] = ((ratr_bitmap >> 8) & 0x0f) | (ratr_index << 4);
+	rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
+	RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+		 "Rate_index:%x, ratr_bitmap: %*phC\n",
+		 ratr_index, 5, rate_mask);
+	rtl8723ae_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
+}
+
+void rtl8723ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
+		struct ieee80211_sta *sta, u8 rssi_level)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (rtlpriv->dm.useramask)
+		rtl8723ae_update_hal_rate_mask(hw, sta, rssi_level);
+	else
+		rtl8723ae_update_hal_rate_table(hw, sta);
+}
+
+void rtl8723ae_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	u16 sifs_timer;
+
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+				      (u8 *)&mac->slot_time);
+	if (!mac->ht_enable)
+		sifs_timer = 0x0a0a;
+	else
+		sifs_timer = 0x1010;
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
+}
+
+bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+	u8 u1tmp;
+	bool actuallyset = false;
+
+	if (rtlpriv->rtlhal.being_init_adapter)
+		return false;
+
+	if (ppsc->swrf_processing)
+		return false;
+
+	spin_lock(&rtlpriv->locks.rf_ps_lock);
+	if (ppsc->rfchange_inprogress) {
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
+		return false;
+	} else {
+		ppsc->rfchange_inprogress = true;
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
+	}
+
+	cur_rfstate = ppsc->rfpwr_state;
+
+	rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
+		       rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2)&~(BIT(1)));
+
+	u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL_2);
+
+	if (rtlphy->polarity_ctl)
+		e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFOFF : ERFON;
+	else
+		e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
+
+	if ((ppsc->hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
+		RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+			 "GPIOChangeRF  - HW Radio ON, RF ON\n");
+
+		e_rfpowerstate_toset = ERFON;
+		ppsc->hwradiooff = false;
+		actuallyset = true;
+	} else if ((ppsc->hwradiooff == false)
+		   && (e_rfpowerstate_toset == ERFOFF)) {
+		RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+			 "GPIOChangeRF  - HW Radio OFF, RF OFF\n");
+
+		e_rfpowerstate_toset = ERFOFF;
+		ppsc->hwradiooff = true;
+		actuallyset = true;
+	}
+
+	if (actuallyset) {
+		spin_lock(&rtlpriv->locks.rf_ps_lock);
+		ppsc->rfchange_inprogress = false;
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
+	} else {
+		if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
+			RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+		spin_lock(&rtlpriv->locks.rf_ps_lock);
+		ppsc->rfchange_inprogress = false;
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
+	}
+
+	*valid = 1;
+	return !ppsc->hwradiooff;
+}
+
+void rtl8723ae_set_key(struct ieee80211_hw *hw, u32 key_index,
+		       u8 *p_macaddr, bool is_group, u8 enc_algo,
+		       bool is_wepkey, bool clear_all)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u8 *macaddr = p_macaddr;
+	u32 entry_id = 0;
+	bool is_pairwise = false;
+	static u8 cam_const_addr[4][6] = {
+		{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+		{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+		{0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+		{0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+	};
+	static u8 cam_const_broad[] = {
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+	};
+
+	if (clear_all) {
+		u8 idx = 0;
+		u8 cam_offset = 0;
+		u8 clear_number = 5;
+
+		RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+
+		for (idx = 0; idx < clear_number; idx++) {
+			rtl_cam_mark_invalid(hw, cam_offset + idx);
+			rtl_cam_empty_entry(hw, cam_offset + idx);
+
+			if (idx < 5) {
+				memset(rtlpriv->sec.key_buf[idx], 0,
+				       MAX_KEY_LEN);
+				rtlpriv->sec.key_len[idx] = 0;
+			}
+		}
+	} else {
+		switch (enc_algo) {
+		case WEP40_ENCRYPTION:
+			enc_algo = CAM_WEP40;
+			break;
+		case WEP104_ENCRYPTION:
+			enc_algo = CAM_WEP104;
+			break;
+		case TKIP_ENCRYPTION:
+			enc_algo = CAM_TKIP;
+			break;
+		case AESCCMP_ENCRYPTION:
+			enc_algo = CAM_AES;
+			break;
+		default:
+			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+				 "switch case not processed\n");
+			enc_algo = CAM_TKIP;
+			break;
+		}
+
+		if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+			macaddr = cam_const_addr[key_index];
+			entry_id = key_index;
+		} else {
+			if (is_group) {
+				macaddr = cam_const_broad;
+				entry_id = key_index;
+			} else {
+				if (mac->opmode == NL80211_IFTYPE_AP) {
+					entry_id = rtl_cam_get_free_entry(hw,
+								macaddr);
+					if (entry_id >=  TOTAL_CAM_ENTRY) {
+						RT_TRACE(rtlpriv, COMP_SEC,
+							 DBG_EMERG,
+							 "Can not find free hw security cam entry\n");
+						return;
+					}
+				} else {
+					entry_id = CAM_PAIRWISE_KEY_POSITION;
+				}
+
+				key_index = PAIRWISE_KEYIDX;
+				is_pairwise = true;
+			}
+		}
+
+		if (rtlpriv->sec.key_len[key_index] == 0) {
+			RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+				 "delete one entry, entry_id is %d\n",
+				 entry_id);
+			if (mac->opmode == NL80211_IFTYPE_AP)
+				rtl_cam_del_entry(hw, p_macaddr);
+			rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+		} else {
+			RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+				 "add one entry\n");
+			if (is_pairwise) {
+				RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+					 "set Pairwiase key\n");
+
+				rtl_cam_add_one_entry(hw, macaddr, key_index,
+					entry_id, enc_algo,
+					CAM_CONFIG_NO_USEDK,
+					rtlpriv->sec.key_buf[key_index]);
+			} else {
+				RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+					 "set group key\n");
+
+				if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+					rtl_cam_add_one_entry(hw,
+						rtlefuse->dev_addr,
+						PAIRWISE_KEYIDX,
+						CAM_PAIRWISE_KEY_POSITION,
+						enc_algo,
+						CAM_CONFIG_NO_USEDK,
+						rtlpriv->sec.key_buf
+						[entry_id]);
+				}
+
+				rtl_cam_add_one_entry(hw, macaddr, key_index,
+						entry_id, enc_algo,
+						CAM_CONFIG_NO_USEDK,
+						rtlpriv->sec.key_buf[entry_id]);
+			}
+
+		}
+	}
+}
+
+static void rtl8723ae_bt_var_init(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	pcipriv->bt_coexist.bt_coexistence =
+					pcipriv->bt_coexist.eeprom_bt_coexist;
+	pcipriv->bt_coexist.bt_ant_num =
+					pcipriv->bt_coexist.eeprom_bt_ant_num;
+	pcipriv->bt_coexist.bt_coexist_type =
+					pcipriv->bt_coexist.eeprom_bt_type;
+
+		pcipriv->bt_coexist.bt_ant_isolation =
+				pcipriv->bt_coexist.eeprom_bt_ant_isol;
+
+	pcipriv->bt_coexist.bt_radio_shared_type =
+				pcipriv->bt_coexist.eeprom_bt_radio_shared;
+
+	RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+		 "BT Coexistance = 0x%x\n",
+		 pcipriv->bt_coexist.bt_coexistence);
+
+	if (pcipriv->bt_coexist.bt_coexistence) {
+		pcipriv->bt_coexist.bt_busy_traffic = false;
+		pcipriv->bt_coexist.bt_traffic_mode_set = false;
+		pcipriv->bt_coexist.bt_non_traffic_mode_set = false;
+
+		pcipriv->bt_coexist.cstate = 0;
+		pcipriv->bt_coexist.previous_state = 0;
+
+		if (pcipriv->bt_coexist.bt_ant_num == ANT_X2) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "BlueTooth BT_Ant_Num = Antx2\n");
+		} else if (pcipriv->bt_coexist.bt_ant_num == ANT_X1) {
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "BlueTooth BT_Ant_Num = Antx1\n");
+		}
+
+		switch (pcipriv->bt_coexist.bt_coexist_type) {
+		case BT_2WIRE:
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "BlueTooth BT_CoexistType = BT_2Wire\n");
+			break;
+		case BT_ISSC_3WIRE:
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "BlueTooth BT_CoexistType = BT_ISSC_3Wire\n");
+			break;
+		case BT_ACCEL:
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "BlueTooth BT_CoexistType = BT_ACCEL\n");
+			break;
+		case BT_CSR_BC4:
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "BlueTooth BT_CoexistType = BT_CSR_BC4\n");
+			break;
+		case BT_CSR_BC8:
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "BlueTooth BT_CoexistType = BT_CSR_BC8\n");
+			break;
+		case BT_RTL8756:
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "BlueTooth BT_CoexistType = BT_RTL8756\n");
+			break;
+		default:
+			RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+				 "BlueTooth BT_CoexistType = Unknown\n");
+			break;
+		}
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "BlueTooth BT_Ant_isolation = %d\n",
+			 pcipriv->bt_coexist.bt_ant_isolation);
+		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+			 "BT_RadioSharedType = 0x%x\n",
+			 pcipriv->bt_coexist.bt_radio_shared_type);
+		pcipriv->bt_coexist.bt_active_zero_cnt = 0;
+		pcipriv->bt_coexist.cur_bt_disabled = false;
+		pcipriv->bt_coexist.pre_bt_disabled = false;
+	}
+}
+
+void rtl8723ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+					      bool auto_load_fail, u8 *hwinfo)
+{
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 value;
+	u32 tmpu_32;
+
+	if (!auto_load_fail) {
+		tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
+		if (tmpu_32 & BIT(18))
+			pcipriv->bt_coexist.eeprom_bt_coexist = 1;
+		else
+			pcipriv->bt_coexist.eeprom_bt_coexist = 0;
+		value = hwinfo[RF_OPTION4];
+		pcipriv->bt_coexist.eeprom_bt_type = BT_RTL8723A;
+		pcipriv->bt_coexist.eeprom_bt_ant_num = (value & 0x1);
+		pcipriv->bt_coexist.eeprom_bt_ant_isol = ((value & 0x10) >> 4);
+		pcipriv->bt_coexist.eeprom_bt_radio_shared =
+				((value & 0x20) >> 5);
+	} else {
+		pcipriv->bt_coexist.eeprom_bt_coexist = 0;
+		pcipriv->bt_coexist.eeprom_bt_type = BT_RTL8723A;
+		pcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2;
+		pcipriv->bt_coexist.eeprom_bt_ant_isol = 0;
+		pcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
+	}
+
+	rtl8723ae_bt_var_init(hw);
+}
+
+void rtl8723ae_bt_reg_init(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+
+	/* 0:Low, 1:High, 2:From Efuse. */
+	pcipriv->bt_coexist.reg_bt_iso = 2;
+	/* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
+	pcipriv->bt_coexist.reg_bt_sco = 3;
+	/* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
+	pcipriv->bt_coexist.reg_bt_sco = 0;
+}
+
+
+void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw)
+{
+}
+
+void rtl8723ae_suspend(struct ieee80211_hw *hw)
+{
+}
+
+void rtl8723ae_resume(struct ieee80211_hw *hw)
+{
+}
+
+/* Turn on AAP (RCR:bit 0) for promicuous mode. */
+void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
+	bool allow_all_da, bool write_into_reg)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	if (allow_all_da) /* Set BIT0 */
+		rtlpci->receive_config |= RCR_AAP;
+	else /* Clear BIT0 */
+		rtlpci->receive_config &= ~RCR_AAP;
+
+	if (write_into_reg)
+		rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+
+
+	RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
+		 "receive_config=0x%08X, write_into_reg=%d\n",
+		 rtlpci->receive_config, write_into_reg);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
new file mode 100644
index 0000000..6fa24f79
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
@@ -0,0 +1,73 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723E_HW_H__
+#define __RTL8723E_HW_H__
+
+#define CHK_SVID_SMID(_val1, _val2)				\
+	((rtlefuse->eeprom_svid == (_val1)) &&			\
+	 (rtlefuse->eeprom_smid == (_val2)))
+
+void rtl8723ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8723ae_read_eeprom_info(struct ieee80211_hw *hw);
+
+void rtl8723ae_interrupt_recognized(struct ieee80211_hw *hw,
+				    u32 *p_inta, u32 *p_intb);
+int rtl8723ae_hw_init(struct ieee80211_hw *hw);
+void rtl8723ae_card_disable(struct ieee80211_hw *hw);
+void rtl8723ae_enable_interrupt(struct ieee80211_hw *hw);
+void rtl8723ae_disable_interrupt(struct ieee80211_hw *hw);
+int rtl8723ae_set_network_type(struct ieee80211_hw *hw,
+			       enum nl80211_iftype type);
+void rtl8723ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+void rtl8723ae_set_qos(struct ieee80211_hw *hw, int aci);
+void rtl8723ae_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl8723ae_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl8723ae_update_interrupt_mask(struct ieee80211_hw *hw,
+				     u32 add_msr, u32 rm_msr);
+void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8723ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
+				   struct ieee80211_sta *sta, u8 rssi_level);
+void rtl8723ae_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
+void rtl8723ae_enable_hw_security_config(struct ieee80211_hw *hw);
+void rtl8723ae_set_key(struct ieee80211_hw *hw, u32 key_index,
+		       u8 *p_macaddr, bool is_group, u8 enc_algo,
+		       bool is_wepkey, bool clear_all);
+
+void rtl8723ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+					      bool autoload_fail, u8 *hwinfo);
+void rtl8723ae_bt_reg_init(struct ieee80211_hw *hw);
+void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw);
+void rtl8723ae_suspend(struct ieee80211_hw *hw);
+void rtl8723ae_resume(struct ieee80211_hw *hw);
+void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
+				  bool allow_all_da, bool write_into_reg);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/rtlwifi/rtl8723ae/led.c
new file mode 100644
index 0000000..9c4e1d81
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/led.c
@@ -0,0 +1,151 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "reg.h"
+#include "led.h"
+
+static void _rtl8723ae_init_led(struct ieee80211_hw *hw,
+				struct rtl_led *pled, enum rtl_led_pin ledpin)
+{
+	pled->hw = hw;
+	pled->ledpin = ledpin;
+	pled->ledon = false;
+}
+
+void rtl8723ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 ledcfg;
+
+	RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+		 "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+
+	ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+
+	switch (pled->ledpin) {
+	case LED_PIN_GPIO0:
+		break;
+	case LED_PIN_LED0:
+		rtl_write_byte(rtlpriv,
+			       REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6));
+		break;
+	case LED_PIN_LED1:
+		rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
+		break;
+	default:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "switch case not processed\n");
+		break;
+	}
+	pled->ledon = true;
+}
+
+void rtl8723ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	u8 ledcfg;
+
+	RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+		 "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+
+	ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+
+	switch (pled->ledpin) {
+	case LED_PIN_GPIO0:
+		break;
+	case LED_PIN_LED0:
+		ledcfg &= 0xf0;
+		if (pcipriv->ledctl.led_opendrain)
+			rtl_write_byte(rtlpriv, REG_LEDCFG2,
+				       (ledcfg | BIT(1) | BIT(5) | BIT(6)));
+		else
+			rtl_write_byte(rtlpriv, REG_LEDCFG2,
+				       (ledcfg | BIT(3) | BIT(5) | BIT(6)));
+		break;
+	case LED_PIN_LED1:
+		ledcfg &= 0x0f;
+		rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
+		break;
+	default:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "switch case not processed\n");
+		break;
+	}
+	pled->ledon = false;
+}
+
+void rtl8723ae_init_sw_leds(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+
+	_rtl8723ae_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
+	_rtl8723ae_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+}
+
+static void _rtl8723ae_sw_led_control(struct ieee80211_hw *hw,
+				    enum led_ctl_mode ledaction)
+{
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+
+	switch (ledaction) {
+	case LED_CTL_POWER_ON:
+	case LED_CTL_LINK:
+	case LED_CTL_NO_LINK:
+		rtl8723ae_sw_led_on(hw, pLed0);
+		break;
+	case LED_CTL_POWER_OFF:
+		rtl8723ae_sw_led_off(hw, pLed0);
+		break;
+	default:
+		break;
+	}
+}
+
+void rtl8723ae_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+	if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+	    (ledaction == LED_CTL_TX ||
+	     ledaction == LED_CTL_RX ||
+	     ledaction == LED_CTL_SITE_SURVEY ||
+	     ledaction == LED_CTL_LINK ||
+	     ledaction == LED_CTL_NO_LINK ||
+	     ledaction == LED_CTL_START_TO_LINK ||
+	     ledaction == LED_CTL_POWER_ON)) {
+		return;
+	}
+	RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
+	_rtl8723ae_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.h b/drivers/net/wireless/rtlwifi/rtl8723ae/led.h
new file mode 100644
index 0000000..2cb88e7
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/led.h
@@ -0,0 +1,39 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CE_LED_H__
+#define __RTL92CE_LED_H__
+
+void rtl8723ae_init_sw_leds(struct ieee80211_hw *hw);
+void rtl8723ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8723ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8723ae_led_control(struct ieee80211_hw *hw,
+			   enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
new file mode 100644
index 0000000..39cc793
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -0,0 +1,2044 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+
+/* static forward definitions */
+static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+				  enum radio_path rfpath, u32 offset);
+static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+				    enum radio_path rfpath,
+				    u32 offset, u32 data);
+static u32 _phy_rf_serial_read(struct ieee80211_hw *hw,
+			       enum radio_path rfpath, u32 offset);
+static void _phy_rf_serial_write(struct ieee80211_hw *hw,
+				 enum radio_path rfpath, u32 offset, u32 data);
+static u32 _phy_calculate_bit_shift(u32 bitmask);
+static bool _phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
+static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw);
+static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype);
+static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype);
+static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw);
+static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+				      u32 cmdtableidx, u32 cmdtablesz,
+				      enum swchnlcmd_id cmdid,
+				      u32 para1, u32 para2,
+				      u32 msdelay);
+static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
+				      u8 *stage, u8 *step, u32 *delay);
+static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
+				enum wireless_mode wirelessmode,
+				long power_indbm);
+static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+				  enum wireless_mode wirelessmode, u8 txpwridx);
+static void rtl8723ae_phy_set_io(struct ieee80211_hw *hw);
+
+u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
+			       u32 bitmask)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 returnvalue, originalvalue, bitshift;
+
+	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+		 "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+	originalvalue = rtl_read_dword(rtlpriv, regaddr);
+	bitshift = _phy_calculate_bit_shift(bitmask);
+	returnvalue = (originalvalue & bitmask) >> bitshift;
+
+	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+		 "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask, regaddr,
+		 originalvalue);
+
+	return returnvalue;
+}
+
+void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+			      u32 regaddr, u32 bitmask, u32 data)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 originalvalue, bitshift;
+
+	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+		 "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr,
+		 bitmask, data);
+
+	if (bitmask != MASKDWORD) {
+		originalvalue = rtl_read_dword(rtlpriv, regaddr);
+		bitshift = _phy_calculate_bit_shift(bitmask);
+		data = ((originalvalue & (~bitmask)) | (data << bitshift));
+	}
+
+	rtl_write_dword(rtlpriv, regaddr, data);
+
+	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+		 "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+		 regaddr, bitmask, data);
+}
+
+u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+			       enum radio_path rfpath, u32 regaddr, u32 bitmask)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 original_value, readback_value, bitshift;
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	unsigned long flags;
+
+	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+		 "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+		 regaddr, rfpath, bitmask);
+
+	spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+	if (rtlphy->rf_mode != RF_OP_BY_FW)
+		original_value = _phy_rf_serial_read(hw, rfpath, regaddr);
+	else
+		original_value = _phy_fw_rf_serial_read(hw, rfpath, regaddr);
+
+	bitshift = _phy_calculate_bit_shift(bitmask);
+	readback_value = (original_value & bitmask) >> bitshift;
+
+	spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+		 "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+		 regaddr, rfpath, bitmask, original_value);
+
+	return readback_value;
+}
+
+void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+			      enum radio_path rfpath,
+			      u32 regaddr, u32 bitmask, u32 data)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u32 original_value, bitshift;
+	unsigned long flags;
+
+	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+		 "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+		 regaddr, bitmask, data, rfpath);
+
+	spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+	if (rtlphy->rf_mode != RF_OP_BY_FW) {
+		if (bitmask != RFREG_OFFSET_MASK) {
+			original_value = _phy_rf_serial_read(hw, rfpath,
+							     regaddr);
+			bitshift = _phy_calculate_bit_shift(bitmask);
+			data = ((original_value & (~bitmask)) |
+			       (data << bitshift));
+		}
+
+		_phy_rf_serial_write(hw, rfpath, regaddr, data);
+	} else {
+		if (bitmask != RFREG_OFFSET_MASK) {
+			original_value = _phy_fw_rf_serial_read(hw, rfpath,
+								regaddr);
+			bitshift = _phy_calculate_bit_shift(bitmask);
+			data = ((original_value & (~bitmask)) |
+			       (data << bitshift));
+		}
+		_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
+	}
+
+	spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+		 "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+		 regaddr, bitmask, data, rfpath);
+}
+
+static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+					    enum radio_path rfpath, u32 offset)
+{
+	RT_ASSERT(false, "deprecated!\n");
+	return 0;
+}
+
+static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+				    enum radio_path rfpath,
+				    u32 offset, u32 data)
+{
+	RT_ASSERT(false, "deprecated!\n");
+}
+
+static u32 _phy_rf_serial_read(struct ieee80211_hw *hw,
+			       enum radio_path rfpath, u32 offset)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+	u32 newoffset;
+	u32 tmplong, tmplong2;
+	u8 rfpi_enable = 0;
+	u32 retvalue;
+
+	offset &= 0x3f;
+	newoffset = offset;
+	if (RT_CANNOT_IO(hw)) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+		return 0xFFFFFFFF;
+	}
+	tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+	if (rfpath == RF90_PATH_A)
+		tmplong2 = tmplong;
+	else
+		tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+	tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+	    (newoffset << 23) | BLSSIREADEDGE;
+	rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+		      tmplong & (~BLSSIREADEDGE));
+	mdelay(1);
+	rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+	mdelay(1);
+	rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+		      tmplong | BLSSIREADEDGE);
+	mdelay(1);
+	if (rfpath == RF90_PATH_A)
+		rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+						 BIT(8));
+	else if (rfpath == RF90_PATH_B)
+		rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+						 BIT(8));
+	if (rfpi_enable)
+		retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
+					 BLSSIREADBACKDATA);
+	else
+		retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
+					 BLSSIREADBACKDATA);
+	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
+		 rfpath, pphyreg->rf_rb, retvalue);
+	return retvalue;
+}
+
+static void _phy_rf_serial_write(struct ieee80211_hw *hw,
+				 enum radio_path rfpath, u32 offset, u32 data)
+{
+	u32 data_and_addr;
+	u32 newoffset;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+	if (RT_CANNOT_IO(hw)) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+		return;
+	}
+	offset &= 0x3f;
+	newoffset = offset;
+	data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+	rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
+		 rfpath, pphyreg->rf3wire_offset, data_and_addr);
+}
+
+static u32 _phy_calculate_bit_shift(u32 bitmask)
+{
+	u32 i;
+
+	for (i = 0; i <= 31; i++) {
+		if (((bitmask >> i) & 0x1) == 1)
+			break;
+	}
+	return i;
+}
+
+static void _rtl8723ae_phy_bb_config_1t(struct ieee80211_hw *hw)
+{
+	rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
+	rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
+	rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
+	rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
+	rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
+	rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
+	rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
+	rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
+	rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
+	rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
+}
+
+bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	bool rtstatus = _phy_cfg_mac_w_header(hw);
+	rtl_write_byte(rtlpriv, 0x04CA, 0x0A);
+	return rtstatus;
+}
+
+bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw)
+{
+	bool rtstatus = true;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmpu1b;
+	u8 reg_hwparafile = 1;
+
+	_phy_init_bb_rf_reg_def(hw);
+
+	/* 1. 0x28[1] = 1 */
+	tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_PLL_CTRL);
+	udelay(2);
+	rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, (tmpu1b|BIT(1)));
+	udelay(2);
+	/* 2. 0x29[7:0] = 0xFF */
+	rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL+1, 0xff);
+	udelay(2);
+
+	/* 3. 0x02[1:0] = 2b'11 */
+	tmpu1b = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, (tmpu1b |
+		       FEN_BB_GLB_RSTn | FEN_BBRSTB));
+
+	/* 4. 0x25[6] = 0 */
+	tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL+1);
+	rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+1, (tmpu1b&(~BIT(6))));
+
+	/* 5. 0x24[20] = 0	Advised by SD3 Alex Wang. 2011.02.09. */
+	tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL+2);
+	rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+2, (tmpu1b&(~BIT(4))));
+
+	/* 6. 0x1f[7:0] = 0x07 */
+	rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x07);
+
+	if (reg_hwparafile == 1)
+		rtstatus = _phy_bb8192c_config_parafile(hw);
+	return rtstatus;
+}
+
+bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw)
+{
+	return rtl8723ae_phy_rf6052_config(hw);
+}
+
+static bool _phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	bool rtstatus;
+
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n");
+	rtstatus = _phy_cfg_bb_w_header(hw, BASEBAND_CONFIG_PHY_REG);
+	if (rtstatus != true) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
+		return false;
+	}
+
+	if (rtlphy->rf_type == RF_1T2R) {
+		_rtl8723ae_phy_bb_config_1t(hw);
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
+	}
+	if (rtlefuse->autoload_failflag == false) {
+		rtlphy->pwrgroup_cnt = 0;
+		rtstatus = _phy_cfg_bb_w_pgheader(hw, BASEBAND_CONFIG_PHY_REG);
+	}
+	if (rtstatus != true) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
+		return false;
+	}
+	rtstatus = _phy_cfg_bb_w_header(hw, BASEBAND_CONFIG_AGC_TAB);
+	if (rtstatus != true) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+		return false;
+	}
+	rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
+					 RFPGA0_XA_HSSIPARAMETER2, 0x200));
+	return true;
+}
+
+static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i;
+	u32 arraylength;
+	u32 *ptrarray;
+
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl723MACPHY_Array\n");
+	arraylength = RTL8723E_MACARRAYLENGTH;
+	ptrarray = RTL8723EMAC_ARRAY;
+
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+		 "Img:RTL8192CEMAC_2T_ARRAY\n");
+	for (i = 0; i < arraylength; i = i + 2)
+		rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
+	return true;
+}
+
+static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype)
+{
+	int i;
+	u32 *phy_regarray_table;
+	u32 *agctab_array_table;
+	u16 phy_reg_arraylen, agctab_arraylen;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	agctab_arraylen = RTL8723E_AGCTAB_1TARRAYLENGTH;
+	agctab_array_table = RTL8723EAGCTAB_1TARRAY;
+	phy_reg_arraylen = RTL8723E_PHY_REG_1TARRAY_LENGTH;
+	phy_regarray_table = RTL8723EPHY_REG_1TARRAY;
+	if (configtype == BASEBAND_CONFIG_PHY_REG) {
+		for (i = 0; i < phy_reg_arraylen; i = i + 2) {
+			if (phy_regarray_table[i] == 0xfe)
+				mdelay(50);
+			else if (phy_regarray_table[i] == 0xfd)
+				mdelay(5);
+			else if (phy_regarray_table[i] == 0xfc)
+				mdelay(1);
+			else if (phy_regarray_table[i] == 0xfb)
+				udelay(50);
+			else if (phy_regarray_table[i] == 0xfa)
+				udelay(5);
+			else if (phy_regarray_table[i] == 0xf9)
+				udelay(1);
+			rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
+				      phy_regarray_table[i + 1]);
+			udelay(1);
+			RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+				 "The phy_regarray_table[0] is %x"
+				 " Rtl819XPHY_REGArray[1] is %x\n",
+				 phy_regarray_table[i],
+				 phy_regarray_table[i + 1]);
+		}
+	} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+		for (i = 0; i < agctab_arraylen; i = i + 2) {
+			rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
+				      agctab_array_table[i + 1]);
+			udelay(1);
+			RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+				 "The agctab_array_table[0] is "
+				 "%x Rtl819XPHY_REGArray[1] is %x\n",
+				 agctab_array_table[i],
+				 agctab_array_table[i + 1]);
+		}
+	}
+	return true;
+}
+
+static void _st_pwrIdx_dfrate_off(struct ieee80211_hw *hw, u32 regaddr,
+				  u32 bitmask, u32 data)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	switch (regaddr) {
+	case RTXAGC_A_RATE18_06:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][0] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][0]);
+		break;
+	case RTXAGC_A_RATE54_24:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][1] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][1]);
+		break;
+	case RTXAGC_A_CCK1_MCS32:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][6] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][6]);
+		break;
+	case RTXAGC_B_CCK11_A_CCK2_11:
+		if (bitmask == 0xffffff00) {
+			rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][7] = data;
+			RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+				 "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
+				 rtlphy->pwrgroup_cnt,
+				 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][7]);
+		}
+		if (bitmask == 0x000000ff) {
+			rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][15] = data;
+			RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+				 "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
+				 rtlphy->pwrgroup_cnt,
+				 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][15]);
+		}
+		break;
+	case RTXAGC_A_MCS03_MCS00:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][2] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][2]);
+		break;
+	case RTXAGC_A_MCS07_MCS04:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][3] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][3]);
+		break;
+	case RTXAGC_A_MCS11_MCS08:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][4] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][4]);
+		break;
+	case RTXAGC_A_MCS15_MCS12:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][5] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][5]);
+		break;
+	case RTXAGC_B_RATE18_06:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][8] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][8]);
+		break;
+	case RTXAGC_B_RATE54_24:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][9] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][9]);
+		break;
+	case RTXAGC_B_CCK1_55_MCS32:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][14] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][14]);
+		break;
+	case RTXAGC_B_MCS03_MCS00:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][10] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][10]);
+		break;
+	case RTXAGC_B_MCS07_MCS04:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][11] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][11]);
+		break;
+	case RTXAGC_B_MCS11_MCS08:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][12] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][12]);
+		break;
+	case RTXAGC_B_MCS15_MCS12:
+		rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][13] = data;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			 "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
+			 rtlphy->pwrgroup_cnt,
+			 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][13]);
+		rtlphy->pwrgroup_cnt++;
+		break;
+	}
+}
+
+static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int i;
+	u32 *phy_regarray_table_pg;
+	u16 phy_regarray_pg_len;
+
+	phy_regarray_pg_len = RTL8723E_PHY_REG_ARRAY_PGLENGTH;
+	phy_regarray_table_pg = RTL8723EPHY_REG_ARRAY_PG;
+
+	if (configtype == BASEBAND_CONFIG_PHY_REG) {
+		for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
+			if (phy_regarray_table_pg[i] == 0xfe)
+				mdelay(50);
+			else if (phy_regarray_table_pg[i] == 0xfd)
+				mdelay(5);
+			else if (phy_regarray_table_pg[i] == 0xfc)
+				mdelay(1);
+			else if (phy_regarray_table_pg[i] == 0xfb)
+				udelay(50);
+			else if (phy_regarray_table_pg[i] == 0xfa)
+				udelay(5);
+			else if (phy_regarray_table_pg[i] == 0xf9)
+				udelay(1);
+
+			_st_pwrIdx_dfrate_off(hw, phy_regarray_table_pg[i],
+					      phy_regarray_table_pg[i + 1],
+					      phy_regarray_table_pg[i + 2]);
+		}
+	} else {
+		RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+			 "configtype != BaseBand_Config_PHY_REG\n");
+	}
+	return true;
+}
+
+bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+					     enum radio_path rfpath)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int i;
+	bool rtstatus = true;
+	u32 *radioa_array_table;
+	u32 *radiob_array_table;
+	u16 radioa_arraylen, radiob_arraylen;
+
+	radioa_arraylen = Rtl8723ERADIOA_1TARRAYLENGTH;
+	radioa_array_table = RTL8723E_RADIOA_1TARRAY;
+	radiob_arraylen = RTL8723E_RADIOB_1TARRAYLENGTH;
+	radiob_array_table = RTL8723E_RADIOB_1TARRAY;
+
+	rtstatus = true;
+
+	switch (rfpath) {
+	case RF90_PATH_A:
+		for (i = 0; i < radioa_arraylen; i = i + 2) {
+			if (radioa_array_table[i] == 0xfe)
+				mdelay(50);
+			else if (radioa_array_table[i] == 0xfd)
+				mdelay(5);
+			else if (radioa_array_table[i] == 0xfc)
+				mdelay(1);
+			else if (radioa_array_table[i] == 0xfb)
+				udelay(50);
+			else if (radioa_array_table[i] == 0xfa)
+				udelay(5);
+			else if (radioa_array_table[i] == 0xf9)
+				udelay(1);
+			else {
+				rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
+					      RFREG_OFFSET_MASK,
+					      radioa_array_table[i + 1]);
+				udelay(1);
+			}
+		}
+		break;
+	case RF90_PATH_B:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "switch case not process\n");
+		break;
+	case RF90_PATH_C:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "switch case not process\n");
+		break;
+	case RF90_PATH_D:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "switch case not process\n");
+		break;
+	}
+	return true;
+}
+
+void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	rtlphy->default_initialgain[0] =
+	    (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+	rtlphy->default_initialgain[1] =
+	    (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+	rtlphy->default_initialgain[2] =
+	    (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+	rtlphy->default_initialgain[3] =
+	    (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+		 "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+		  rtlphy->default_initialgain[0],
+		  rtlphy->default_initialgain[1],
+		  rtlphy->default_initialgain[2],
+		  rtlphy->default_initialgain[3]);
+
+	rtlphy->framesync = (u8) rtl_get_bbreg(hw,
+					       ROFDM0_RXDETECTOR3, MASKBYTE0);
+	rtlphy->framesync_c34 = rtl_get_bbreg(hw,
+					      ROFDM0_RXDETECTOR2, MASKDWORD);
+
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+		 "Default framesync (0x%x) = 0x%x\n",
+		 ROFDM0_RXDETECTOR3, rtlphy->framesync);
+}
+
+static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+	rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+	rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+	rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+	rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+	rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+	rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+	rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+	rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
+			    RFPGA0_XA_LSSIPARAMETER;
+	rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
+			    RFPGA0_XB_LSSIPARAMETER;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+	rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+	rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+	rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+	rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+	rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+	rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
+	rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
+	rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+	rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
+	rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
+	rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
+	rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
+	rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
+	rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
+	rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
+	rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
+	rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
+	rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
+	rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
+	rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
+	rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
+	rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
+	rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
+	rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
+	rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
+}
+
+void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u8 txpwr_level;
+	long txpwr_dbm;
+
+	txpwr_level = rtlphy->cur_cck_txpwridx;
+	txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level);
+	txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
+	    rtlefuse->legacy_ht_txpowerdiff;
+	if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm)
+		txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+						  txpwr_level);
+	txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+	if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) >
+	    txpwr_dbm)
+		txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+						  txpwr_level);
+	*powerlevel = txpwr_dbm;
+}
+
+static void _rtl8723ae_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
+					 u8 *cckpowerlevel, u8 *ofdmpowerlevel)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u8 index = (channel - 1);
+
+	cckpowerlevel[RF90_PATH_A] =
+	    rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
+	cckpowerlevel[RF90_PATH_B] =
+	    rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
+	if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
+		ofdmpowerlevel[RF90_PATH_A] =
+		    rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
+		ofdmpowerlevel[RF90_PATH_B] =
+		    rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
+	} else if (get_rf_type(rtlphy) == RF_2T2R) {
+		ofdmpowerlevel[RF90_PATH_A] =
+		    rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
+		ofdmpowerlevel[RF90_PATH_B] =
+		    rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
+	}
+}
+
+static void _rtl8723ae_ccxpower_index_check(struct ieee80211_hw *hw,
+					    u8 channel, u8 *cckpowerlevel,
+					    u8 *ofdmpowerlevel)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
+	rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
+}
+
+void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u8 cckpowerlevel[2], ofdmpowerlevel[2];
+
+	if (rtlefuse->txpwr_fromeprom == false)
+		return;
+	_rtl8723ae_get_txpower_index(hw, channel, &cckpowerlevel[0],
+				     &ofdmpowerlevel[0]);
+	_rtl8723ae_ccxpower_index_check(hw, channel, &cckpowerlevel[0],
+					&ofdmpowerlevel[0]);
+	rtl8723ae_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
+	rtl8723ae_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
+}
+
+bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u8 idx;
+	u8 rf_path;
+	u8 ccktxpwridx = _phy_dbm_to_txpwr_Idx(hw, WIRELESS_MODE_B,
+					       power_indbm);
+	u8 ofdmtxpwridx = _phy_dbm_to_txpwr_Idx(hw, WIRELESS_MODE_N_24G,
+						power_indbm);
+	if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
+		ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
+	else
+		ofdmtxpwridx = 0;
+	RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
+		 "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
+		 power_indbm, ccktxpwridx, ofdmtxpwridx);
+	for (idx = 0; idx < 14; idx++) {
+		for (rf_path = 0; rf_path < 2; rf_path++) {
+			rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
+			rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
+							    ofdmtxpwridx;
+			rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
+							    ofdmtxpwridx;
+		}
+	}
+	rtl8723ae_phy_set_txpower_level(hw, rtlphy->current_channel);
+	return true;
+}
+
+static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
+				enum wireless_mode wirelessmode,
+				long power_indbm)
+{
+	u8 txpwridx;
+	long offset;
+
+	switch (wirelessmode) {
+	case WIRELESS_MODE_B:
+		offset = -7;
+		break;
+	case WIRELESS_MODE_G:
+	case WIRELESS_MODE_N_24G:
+		offset = -8;
+		break;
+	default:
+		offset = -8;
+		break;
+	}
+
+	if ((power_indbm - offset) > 0)
+		txpwridx = (u8) ((power_indbm - offset) * 2);
+	else
+		txpwridx = 0;
+
+	if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
+		txpwridx = MAX_TXPWR_IDX_NMODE_92S;
+
+	return txpwridx;
+}
+
+static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+				  enum wireless_mode wirelessmode, u8 txpwridx)
+{
+	long offset;
+	long pwrout_dbm;
+
+	switch (wirelessmode) {
+	case WIRELESS_MODE_B:
+		offset = -7;
+		break;
+	case WIRELESS_MODE_G:
+	case WIRELESS_MODE_N_24G:
+		offset = -8;
+		break;
+	default:
+		offset = -8;
+		break;
+	}
+	pwrout_dbm = txpwridx / 2 + offset;
+	return pwrout_dbm;
+}
+
+void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	enum io_type iotype;
+
+	if (!is_hal_stop(rtlhal)) {
+		switch (operation) {
+		case SCAN_OPT_BACKUP:
+			iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_IO_CMD,
+						      (u8 *)&iotype);
+
+			break;
+		case SCAN_OPT_RESTORE:
+			iotype = IO_CMD_RESUME_DM_BY_SCAN;
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_IO_CMD,
+						      (u8 *)&iotype);
+			break;
+		default:
+			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+				 "Unknown Scan Backup operation.\n");
+			break;
+		}
+	}
+}
+
+void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	u8 reg_bw_opmode;
+	u8 reg_prsr_rsc;
+
+	RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+		 "Switch to %s bandwidth\n",
+		 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+		 "20MHz" : "40MHz");
+
+	if (is_hal_stop(rtlhal)) {
+		rtlphy->set_bwmode_inprogress = false;
+		return;
+	}
+
+	reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
+	reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
+
+	switch (rtlphy->current_chan_bw) {
+	case HT_CHANNEL_WIDTH_20:
+		reg_bw_opmode |= BW_OPMODE_20MHZ;
+		rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+		break;
+	case HT_CHANNEL_WIDTH_20_40:
+		reg_bw_opmode &= ~BW_OPMODE_20MHZ;
+		rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+		reg_prsr_rsc =
+		    (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
+		rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
+		break;
+	default:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+		break;
+	}
+
+	switch (rtlphy->current_chan_bw) {
+	case HT_CHANNEL_WIDTH_20:
+		rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
+		rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
+		rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
+		break;
+	case HT_CHANNEL_WIDTH_20_40:
+		rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
+		rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
+
+		rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
+			      (mac->cur_40_prime_sc >> 1));
+		rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
+		rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);
+
+		rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
+			      (mac->cur_40_prime_sc ==
+			       HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+		break;
+	default:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+		break;
+	}
+	rtl8723ae_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+	rtlphy->set_bwmode_inprogress = false;
+	RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+}
+
+void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+			       enum nl80211_channel_type ch_type)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 tmp_bw = rtlphy->current_chan_bw;
+
+	if (rtlphy->set_bwmode_inprogress)
+		return;
+	rtlphy->set_bwmode_inprogress = true;
+	if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+		rtl8723ae_phy_set_bw_mode_callback(hw);
+	} else {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+			 "FALSE driver sleep or unload\n");
+		rtlphy->set_bwmode_inprogress = false;
+		rtlphy->current_chan_bw = tmp_bw;
+	}
+}
+
+void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u32 delay;
+
+	RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+		 "switch to channel%d\n", rtlphy->current_channel);
+	if (is_hal_stop(rtlhal))
+		return;
+	do {
+		if (!rtlphy->sw_chnl_inprogress)
+			break;
+		if (!_phy_sw_chnl_step_by_step
+		    (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
+		     &rtlphy->sw_chnl_step, &delay)) {
+			if (delay > 0)
+				mdelay(delay);
+			else
+				continue;
+		} else {
+			rtlphy->sw_chnl_inprogress = false;
+		}
+		break;
+	} while (true);
+	RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+}
+
+u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	if (rtlphy->sw_chnl_inprogress)
+		return 0;
+	if (rtlphy->set_bwmode_inprogress)
+		return 0;
+	RT_ASSERT((rtlphy->current_channel <= 14),
+		  "WIRELESS_MODE_G but channel>14");
+	rtlphy->sw_chnl_inprogress = true;
+	rtlphy->sw_chnl_stage = 0;
+	rtlphy->sw_chnl_step = 0;
+	if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+		rtl8723ae_phy_sw_chnl_callback(hw);
+		RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+			 "sw_chnl_inprogress false schdule workitem\n");
+		rtlphy->sw_chnl_inprogress = false;
+	} else {
+		RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+			 "sw_chnl_inprogress false driver sleep or unload\n");
+		rtlphy->sw_chnl_inprogress = false;
+	}
+	return 1;
+}
+
+static void _rtl8723ae_phy_sw_rf_seting(struct ieee80211_hw *hw, u8 channel)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
+		if (channel == 6 && rtlphy->current_chan_bw ==
+		    HT_CHANNEL_WIDTH_20)
+			rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD,
+				      0x00255);
+		else{
+			u32 backupRF0x1A = (u32)rtl_get_rfreg(hw, RF90_PATH_A,
+					   RF_RX_G1, RFREG_OFFSET_MASK);
+			rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD,
+				      backupRF0x1A);
+		}
+	}
+}
+
+static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
+				      u8 *stage, u8 *step, u32 *delay)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
+	u32 precommoncmdcnt;
+	struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
+	u32 postcommoncmdcnt;
+	struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
+	u32 rfdependcmdcnt;
+	struct swchnlcmd *currentcmd = NULL;
+	u8 rfpath;
+	u8 num_total_rfpath = rtlphy->num_total_rfpath;
+
+	precommoncmdcnt = 0;
+	_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+				  MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL,
+				  0, 0, 0);
+	_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+				  MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
+	postcommoncmdcnt = 0;
+
+	_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
+				  MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
+	rfdependcmdcnt = 0;
+
+	RT_ASSERT((channel >= 1 && channel <= 14),
+		  "illegal channel for Zebra: %d\n", channel);
+
+	_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+				  MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
+				  RF_CHNLBW, channel, 10);
+
+	_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+				  MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
+
+	do {
+		switch (*stage) {
+		case 0:
+			currentcmd = &precommoncmd[*step];
+			break;
+		case 1:
+			currentcmd = &rfdependcmd[*step];
+			break;
+		case 2:
+			currentcmd = &postcommoncmd[*step];
+			break;
+		}
+
+		if (currentcmd->cmdid == CMDID_END) {
+			if ((*stage) == 2) {
+				return true;
+			} else {
+				(*stage)++;
+				(*step) = 0;
+				continue;
+			}
+		}
+
+		switch (currentcmd->cmdid) {
+		case CMDID_SET_TXPOWEROWER_LEVEL:
+			rtl8723ae_phy_set_txpower_level(hw, channel);
+			break;
+		case CMDID_WRITEPORT_ULONG:
+			rtl_write_dword(rtlpriv, currentcmd->para1,
+					currentcmd->para2);
+			break;
+		case CMDID_WRITEPORT_USHORT:
+			rtl_write_word(rtlpriv, currentcmd->para1,
+				       (u16) currentcmd->para2);
+			break;
+		case CMDID_WRITEPORT_UCHAR:
+			rtl_write_byte(rtlpriv, currentcmd->para1,
+				       (u8) currentcmd->para2);
+			break;
+		case CMDID_RF_WRITEREG:
+			for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
+				rtlphy->rfreg_chnlval[rfpath] =
+				    ((rtlphy->rfreg_chnlval[rfpath] &
+				      0xfffffc00) | currentcmd->para2);
+
+				rtl_set_rfreg(hw, (enum radio_path)rfpath,
+					      currentcmd->para1,
+					      RFREG_OFFSET_MASK,
+					      rtlphy->rfreg_chnlval[rfpath]);
+			}
+			_rtl8723ae_phy_sw_rf_seting(hw, channel);
+			break;
+		default:
+			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+				 "switch case not process\n");
+			break;
+		}
+
+		break;
+	} while (true);
+
+	(*delay) = currentcmd->msdelay;
+	(*step)++;
+	return false;
+}
+
+static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+				      u32 cmdtableidx, u32 cmdtablesz,
+				      enum swchnlcmd_id cmdid, u32 para1,
+				      u32 para2, u32 msdelay)
+{
+	struct swchnlcmd *pcmd;
+
+	if (cmdtable == NULL) {
+		RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+		return false;
+	}
+
+	if (cmdtableidx >= cmdtablesz)
+		return false;
+
+	pcmd = cmdtable + cmdtableidx;
+	pcmd->cmdid = cmdid;
+	pcmd->para1 = para1;
+	pcmd->para2 = para2;
+	pcmd->msdelay = msdelay;
+	return true;
+}
+
+static u8 _rtl8723ae_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
+{
+	u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
+	u8 result = 0x00;
+
+	rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
+	rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
+	rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
+	rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
+		      config_pathb ? 0x28160202 : 0x28160502);
+
+	if (config_pathb) {
+		rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
+		rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
+		rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
+		rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
+	}
+
+	rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
+	rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+	rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
+
+	mdelay(IQK_DELAY_TIME);
+
+	reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+	reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
+	reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
+	reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
+
+	if (!(reg_eac & BIT(28)) &&
+	    (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+	    (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
+		result |= 0x01;
+	else
+		return result;
+
+	if (!(reg_eac & BIT(27)) &&
+	    (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
+	    (((reg_eac & 0x03FF0000) >> 16) != 0x36))
+		result |= 0x02;
+	return result;
+}
+
+static u8 _rtl8723ae_phy_path_b_iqk(struct ieee80211_hw *hw)
+{
+	u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+	u8 result = 0x00;
+
+	rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
+	rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
+	mdelay(IQK_DELAY_TIME);
+	reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+	reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
+	reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
+	reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
+	reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
+
+	if (!(reg_eac & BIT(31)) &&
+	    (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
+	    (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
+		result |= 0x01;
+	else
+		return result;
+	if (!(reg_eac & BIT(30)) &&
+	    (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
+	    (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
+		result |= 0x02;
+	return result;
+}
+
+static void phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, bool iqk_ok,
+				       long result[][8], u8 final_candidate,
+				       bool btxonly)
+{
+	u32 oldval_0, x, tx0_a, reg;
+	long y, tx0_c;
+
+	if (final_candidate == 0xFF) {
+		return;
+	} else if (iqk_ok) {
+		oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+					  MASKDWORD) >> 22) & 0x3FF;
+		x = result[final_candidate][0];
+		if ((x & 0x00000200) != 0)
+			x = x | 0xFFFFFC00;
+		tx0_a = (x * oldval_0) >> 8;
+		rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
+		rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
+			      ((x * oldval_0 >> 7) & 0x1));
+		y = result[final_candidate][1];
+		if ((y & 0x00000200) != 0)
+			y = y | 0xFFFFFC00;
+		tx0_c = (y * oldval_0) >> 8;
+		rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
+			      ((tx0_c & 0x3C0) >> 6));
+		rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
+			      (tx0_c & 0x3F));
+		rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
+			      ((y * oldval_0 >> 7) & 0x1));
+		if (btxonly)
+			return;
+		reg = result[final_candidate][2];
+		rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+		reg = result[final_candidate][3] & 0x3F;
+		rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+		reg = (result[final_candidate][3] >> 6) & 0xF;
+		rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
+	}
+}
+
+static void phy_save_adda_regs(struct ieee80211_hw *hw,
+					       u32 *addareg, u32 *addabackup,
+					       u32 registernum)
+{
+	u32 i;
+
+	for (i = 0; i < registernum; i++)
+		addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
+}
+
+static void phy_save_mac_regs(struct ieee80211_hw *hw, u32 *macreg,
+			      u32 *macbackup)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i;
+
+	for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+		macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
+	macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
+}
+
+static void phy_reload_adda_regs(struct ieee80211_hw *hw, u32 *addareg,
+				 u32 *addabackup, u32 regiesternum)
+{
+	u32 i;
+
+	for (i = 0; i < regiesternum; i++)
+		rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
+}
+
+static void phy_reload_mac_regs(struct ieee80211_hw *hw, u32 *macreg,
+				u32 *macbackup)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i;
+
+	for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+		rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
+	rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
+}
+
+static void _rtl8723ae_phy_path_adda_on(struct ieee80211_hw *hw,
+					u32 *addareg, bool is_patha_on,
+					bool is2t)
+{
+	u32 pathOn;
+	u32 i;
+
+	pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
+	if (false == is2t) {
+		pathOn = 0x0bdb25a0;
+		rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
+	} else {
+		rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
+	}
+
+	for (i = 1; i < IQK_ADDA_REG_NUM; i++)
+		rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
+}
+
+static void _rtl8723ae_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+						   u32 *macreg, u32 *macbackup)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i = 0;
+
+	rtl_write_byte(rtlpriv, macreg[i], 0x3F);
+
+	for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+		rtl_write_byte(rtlpriv, macreg[i],
+			       (u8) (macbackup[i] & (~BIT(3))));
+	rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
+}
+
+static void _rtl8723ae_phy_path_a_standby(struct ieee80211_hw *hw)
+{
+	rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
+	rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+	rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+}
+
+static void _rtl8723ae_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
+{
+	u32 mode;
+
+	mode = pi_mode ? 0x01000100 : 0x01000000;
+	rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
+	rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
+}
+
+static bool phy_simularity_comp(struct ieee80211_hw *hw, long result[][8],
+				u8 c1, u8 c2)
+{
+	u32 i, j, diff, simularity_bitmap, bound;
+
+	u8 final_candidate[2] = { 0xFF, 0xFF };
+	bool bresult = true;
+
+	bound = 4;
+
+	simularity_bitmap = 0;
+
+	for (i = 0; i < bound; i++) {
+		diff = (result[c1][i] > result[c2][i]) ?
+		    (result[c1][i] - result[c2][i]) :
+		    (result[c2][i] - result[c1][i]);
+
+		if (diff > MAX_TOLERANCE) {
+			if ((i == 2 || i == 6) && !simularity_bitmap) {
+				if (result[c1][i] + result[c1][i + 1] == 0)
+					final_candidate[(i / 4)] = c2;
+				else if (result[c2][i] + result[c2][i + 1] == 0)
+					final_candidate[(i / 4)] = c1;
+				else
+					simularity_bitmap = simularity_bitmap |
+					    (1 << i);
+			} else
+				simularity_bitmap =
+				    simularity_bitmap | (1 << i);
+		}
+	}
+
+	if (simularity_bitmap == 0) {
+		for (i = 0; i < (bound / 4); i++) {
+			if (final_candidate[i] != 0xFF) {
+				for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+					result[3][j] =
+					    result[final_candidate[i]][j];
+				bresult = false;
+			}
+		}
+		return bresult;
+	} else if (!(simularity_bitmap & 0x0F)) {
+		for (i = 0; i < 4; i++)
+			result[3][i] = result[c1][i];
+		return false;
+	} else {
+		return false;
+	}
+
+}
+
+static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
+					long result[][8], u8 t, bool is2t)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u32 i;
+	u8 patha_ok, pathb_ok;
+	u32 adda_reg[IQK_ADDA_REG_NUM] = {
+		0x85c, 0xe6c, 0xe70, 0xe74,
+		0xe78, 0xe7c, 0xe80, 0xe84,
+		0xe88, 0xe8c, 0xed0, 0xed4,
+		0xed8, 0xedc, 0xee0, 0xeec
+	};
+	u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+		0x522, 0x550, 0x551, 0x040
+	};
+	const u32 retrycount = 2;
+	u32 bbvalue;
+
+	if (t == 0) {
+		bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
+
+		phy_save_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
+		phy_save_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
+	}
+	_rtl8723ae_phy_path_adda_on(hw, adda_reg, true, is2t);
+	if (t == 0) {
+		rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
+						 RFPGA0_XA_HSSIPARAMETER1,
+						 BIT(8));
+	}
+
+	if (!rtlphy->rfpi_enable)
+		_rtl8723ae_phy_pi_mode_switch(hw, true);
+	if (t == 0) {
+		rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
+		rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
+		rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
+	}
+	rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
+	rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
+	rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
+	if (is2t) {
+		rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+		rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
+	}
+	_rtl8723ae_phy_mac_setting_calibration(hw, iqk_mac_reg,
+					    rtlphy->iqk_mac_backup);
+	rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
+	if (is2t)
+		rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
+	rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+	rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
+	rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
+	for (i = 0; i < retrycount; i++) {
+		patha_ok = _rtl8723ae_phy_path_a_iqk(hw, is2t);
+		if (patha_ok == 0x03) {
+			result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
+					0x3FF0000) >> 16;
+			result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
+					0x3FF0000) >> 16;
+			result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
+					0x3FF0000) >> 16;
+			result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
+					0x3FF0000) >> 16;
+			break;
+		} else if (i == (retrycount - 1) && patha_ok == 0x01)
+
+			result[t][0] = (rtl_get_bbreg(hw, 0xe94,
+					MASKDWORD) & 0x3FF0000) >> 16;
+		result[t][1] =
+		    (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
+
+	}
+
+	if (is2t) {
+		_rtl8723ae_phy_path_a_standby(hw);
+		_rtl8723ae_phy_path_adda_on(hw, adda_reg, false, is2t);
+		for (i = 0; i < retrycount; i++) {
+			pathb_ok = _rtl8723ae_phy_path_b_iqk(hw);
+			if (pathb_ok == 0x03) {
+				result[t][4] =
+				    (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
+				     0x3FF0000) >> 16;
+				result[t][5] =
+				    (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
+				     0x3FF0000) >> 16;
+				result[t][6] =
+				    (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
+				     0x3FF0000) >> 16;
+				result[t][7] =
+				    (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
+				     0x3FF0000) >> 16;
+				break;
+			} else if (i == (retrycount - 1) && pathb_ok == 0x01) {
+				result[t][4] =
+				    (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
+				     0x3FF0000) >> 16;
+			}
+			result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
+					0x3FF0000) >> 16;
+		}
+	}
+	rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
+	rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
+	rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
+	rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
+	rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
+	if (is2t)
+		rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
+	if (t != 0) {
+		if (!rtlphy->rfpi_enable)
+			_rtl8723ae_phy_pi_mode_switch(hw, false);
+		phy_reload_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
+		phy_reload_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
+	}
+}
+
+static void _rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmpreg;
+	u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
+
+	tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+
+	if ((tmpreg & 0x70) != 0)
+		rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+	else
+		rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+	if ((tmpreg & 0x70) != 0) {
+		rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
+
+		if (is2t)
+			rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
+						  MASK12BITS);
+
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
+			      (rf_a_mode & 0x8FFFF) | 0x10000);
+
+		if (is2t)
+			rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+				      (rf_b_mode & 0x8FFFF) | 0x10000);
+	}
+	lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
+
+	rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000);
+
+	mdelay(100);
+
+	if ((tmpreg & 0x70) != 0) {
+		rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
+
+		if (is2t)
+			rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+				      rf_b_mode);
+	} else {
+		rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+	}
+}
+
+static void _rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw,
+					     bool bmain, bool is2t)
+{
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	if (is_hal_stop(rtlhal)) {
+		rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
+		rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+	}
+	if (is2t) {
+		if (bmain)
+			rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+				      BIT(5) | BIT(6), 0x1);
+		else
+			rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+				      BIT(5) | BIT(6), 0x2);
+	} else {
+		if (bmain)
+			rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
+		else
+			rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
+
+	}
+}
+
+#undef IQK_ADDA_REG_NUM
+#undef IQK_DELAY_TIME
+
+void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	long result[4][8];
+	u8 i, final_candidate;
+	bool patha_ok, pathb_ok;
+	long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
+	    reg_ecc, reg_tmp = 0;
+	bool is12simular, is13simular, is23simular;
+	bool start_conttx = false, singletone = false;
+	u32 iqk_bb_reg[10] = {
+		ROFDM0_XARXIQIMBALANCE,
+		ROFDM0_XBRXIQIMBALANCE,
+		ROFDM0_ECCATHRESHOLD,
+		ROFDM0_AGCRSSITABLE,
+		ROFDM0_XATXIQIMBALANCE,
+		ROFDM0_XBTXIQIMBALANCE,
+		ROFDM0_XCTXIQIMBALANCE,
+		ROFDM0_XCTXAFE,
+		ROFDM0_XDTXAFE,
+		ROFDM0_RXIQEXTANTA
+	};
+
+	if (recovery) {
+		phy_reload_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
+		return;
+	}
+	if (start_conttx || singletone)
+		return;
+	for (i = 0; i < 8; i++) {
+		result[0][i] = 0;
+		result[1][i] = 0;
+		result[2][i] = 0;
+		result[3][i] = 0;
+	}
+	final_candidate = 0xff;
+	patha_ok = false;
+	pathb_ok = false;
+	is12simular = false;
+	is23simular = false;
+	is13simular = false;
+	for (i = 0; i < 3; i++) {
+		_rtl8723ae_phy_iq_calibrate(hw, result, i, false);
+		if (i == 1) {
+			is12simular = phy_simularity_comp(hw, result, 0, 1);
+			if (is12simular) {
+				final_candidate = 0;
+				break;
+			}
+		}
+		if (i == 2) {
+			is13simular = phy_simularity_comp(hw, result, 0, 2);
+			if (is13simular) {
+				final_candidate = 0;
+				break;
+			}
+			is23simular = phy_simularity_comp(hw, result, 1, 2);
+			if (is23simular) {
+				final_candidate = 1;
+			} else {
+				for (i = 0; i < 8; i++)
+					reg_tmp += result[3][i];
+
+				if (reg_tmp != 0)
+					final_candidate = 3;
+				else
+					final_candidate = 0xFF;
+			}
+		}
+	}
+	for (i = 0; i < 4; i++) {
+		reg_e94 = result[i][0];
+		reg_e9c = result[i][1];
+		reg_ea4 = result[i][2];
+		reg_eac = result[i][3];
+		reg_eb4 = result[i][4];
+		reg_ebc = result[i][5];
+		reg_ec4 = result[i][6];
+		reg_ecc = result[i][7];
+	}
+	if (final_candidate != 0xff) {
+		rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
+		rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
+		reg_ea4 = result[final_candidate][2];
+		reg_eac = result[final_candidate][3];
+		rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
+		rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
+		reg_ec4 = result[final_candidate][6];
+		reg_ecc = result[final_candidate][7];
+		patha_ok = pathb_ok = true;
+	} else {
+		rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
+		rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
+	}
+	if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
+		phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
+					   final_candidate, (reg_ea4 == 0));
+	phy_save_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
+}
+
+void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw)
+{
+	bool start_conttx = false, singletone = false;
+
+	if (start_conttx || singletone)
+		return;
+	_rtl8723ae_phy_lc_calibrate(hw, false);
+}
+
+void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
+{
+	_rtl8723ae_phy_set_rfpath_switch(hw, bmain, false);
+}
+
+bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	bool postprocessing = false;
+
+	RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+		 "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+		 iotype, rtlphy->set_io_inprogress);
+	do {
+		switch (iotype) {
+		case IO_CMD_RESUME_DM_BY_SCAN:
+			RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+				 "[IO CMD] Resume DM after scan.\n");
+			postprocessing = true;
+			break;
+		case IO_CMD_PAUSE_DM_BY_SCAN:
+			RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+				 "[IO CMD] Pause DM before scan.\n");
+			postprocessing = true;
+			break;
+		default:
+			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+				 "switch case not process\n");
+			break;
+		}
+	} while (false);
+	if (postprocessing && !rtlphy->set_io_inprogress) {
+		rtlphy->set_io_inprogress = true;
+		rtlphy->current_io_type = iotype;
+	} else {
+		return false;
+	}
+	rtl8723ae_phy_set_io(hw);
+	RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype);
+	return true;
+}
+
+static void rtl8723ae_phy_set_io(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+
+	RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+		 "--->Cmd(%#x), set_io_inprogress(%d)\n",
+		 rtlphy->current_io_type, rtlphy->set_io_inprogress);
+	switch (rtlphy->current_io_type) {
+	case IO_CMD_RESUME_DM_BY_SCAN:
+		dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
+		rtl8723ae_dm_write_dig(hw);
+		rtl8723ae_phy_set_txpower_level(hw, rtlphy->current_channel);
+		break;
+	case IO_CMD_PAUSE_DM_BY_SCAN:
+		rtlphy->initgain_backup.xaagccore1 = dm_digtable->cur_igvalue;
+		dm_digtable->cur_igvalue = 0x17;
+		rtl8723ae_dm_write_dig(hw);
+		break;
+	default:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "switch case not process\n");
+		break;
+	}
+	rtlphy->set_io_inprogress = false;
+	RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+		 "<---(%#x)\n", rtlphy->current_io_type);
+}
+
+static void rtl8723ae_phy_set_rf_on(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+	rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+	rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+
+static void _rtl8723ae_phy_set_rf_sleep(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 u4b_tmp;
+	u8 delay = 5;
+
+	rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+	rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+	rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+	u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+	while (u4b_tmp != 0 && delay > 0) {
+		rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+		rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+		u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+		delay--;
+	}
+	if (delay == 0) {
+		rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+		rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+		rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+		rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+		RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+			 "Switch RF timeout !!!.\n");
+		return;
+	}
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+	rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+
+static bool _rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+					      enum rf_pwrstate rfpwr_state)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl8192_tx_ring *ring = NULL;
+	bool bresult = true;
+	u8 i, queue_id;
+
+	switch (rfpwr_state) {
+	case ERFON:
+		if ((ppsc->rfpwr_state == ERFOFF) &&
+		    RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+			bool rtstatus;
+			u32 InitializeCount = 0;
+			do {
+				InitializeCount++;
+				RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+					 "IPS Set eRf nic enable\n");
+				rtstatus = rtl_ps_enable_nic(hw);
+			} while ((rtstatus != true) && (InitializeCount < 10));
+			RT_CLEAR_PS_LEVEL(ppsc,
+					  RT_RF_OFF_LEVL_HALT_NIC);
+		} else {
+			RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+				 "Set ERFON sleeped:%d ms\n",
+				 jiffies_to_msecs(jiffies -
+				 ppsc->last_sleep_jiffies));
+			ppsc->last_awake_jiffies = jiffies;
+			rtl8723ae_phy_set_rf_on(hw);
+		}
+		if (mac->link_state == MAC80211_LINKED) {
+			rtlpriv->cfg->ops->led_control(hw,
+					LED_CTL_LINK);
+		} else {
+			rtlpriv->cfg->ops->led_control(hw,
+					LED_CTL_NO_LINK);
+		}
+		break;
+	case ERFOFF:
+		if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+			RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+				 "IPS Set eRf nic disable\n");
+			rtl_ps_disable_nic(hw);
+			RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+		} else {
+			if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+				rtlpriv->cfg->ops->led_control(hw,
+					LED_CTL_NO_LINK);
+			} else {
+				rtlpriv->cfg->ops->led_control(hw,
+					LED_CTL_POWER_OFF);
+			}
+		}
+		break;
+	case ERFSLEEP:
+		if (ppsc->rfpwr_state == ERFOFF)
+			break;
+		for (queue_id = 0, i = 0;
+		     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+			ring = &pcipriv->dev.tx_ring[queue_id];
+			if (skb_queue_len(&ring->queue) == 0) {
+				queue_id++;
+				continue;
+			} else {
+				RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+					 "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+					 (i + 1), queue_id,
+					 skb_queue_len(&ring->queue));
+
+				udelay(10);
+				i++;
+			}
+			if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+				RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+					 "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+					 MAX_DOZE_WAITING_TIMES_9x,
+					 queue_id,
+					 skb_queue_len(&ring->queue));
+				break;
+			}
+		}
+		RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+			 "Set ERFSLEEP awaked:%d ms\n",
+			 jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies));
+		ppsc->last_sleep_jiffies = jiffies;
+		_rtl8723ae_phy_set_rf_sleep(hw);
+		break;
+	default:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "switch case not processed\n");
+		bresult = false;
+		break;
+	}
+	if (bresult)
+		ppsc->rfpwr_state = rfpwr_state;
+	return bresult;
+}
+
+bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+				      enum rf_pwrstate rfpwr_state)
+{
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	bool bresult = false;
+
+	if (rfpwr_state == ppsc->rfpwr_state)
+		return bresult;
+	bresult = _rtl8723ae_phy_set_rf_power_state(hw, rfpwr_state);
+	return bresult;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
new file mode 100644
index 0000000..e7a59eb
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
@@ -0,0 +1,224 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_PHY_H__
+#define __RTL92C_PHY_H__
+
+#define MAX_PRECMD_CNT				16
+#define MAX_RFDEPENDCMD_CNT			16
+#define MAX_POSTCMD_CNT				16
+
+#define MAX_DOZE_WAITING_TIMES_9x		64
+
+#define RT_CANNOT_IO(hw)			false
+#define HIGHPOWER_RADIOA_ARRAYLEN		22
+
+#define MAX_TOLERANCE				5
+#define	IQK_DELAY_TIME				1
+
+#define	APK_BB_REG_NUM				5
+#define	APK_AFE_REG_NUM				16
+#define	APK_CURVE_REG_NUM			4
+#define	PATH_NUM				2
+
+#define LOOP_LIMIT				5
+#define MAX_STALL_TIME				50
+#define AntennaDiversityValue			0x80
+#define MAX_TXPWR_IDX_NMODE_92S			63
+#define Reset_Cnt_Limit				3
+
+#define IQK_MAC_REG_NUM				4
+
+#define RF6052_MAX_PATH				2
+
+#define CT_OFFSET_MAC_ADDR			0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX		0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX		0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF	0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF		0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF		0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET		0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET		0x72
+
+#define CT_OFFSET_CHANNEL_PLAH			0x75
+#define CT_OFFSET_THERMAL_METER			0x78
+#define CT_OFFSET_RF_OPTION			0x79
+#define CT_OFFSET_VERSION			0x7E
+#define CT_OFFSET_CUSTOMER_ID			0x7F
+
+#define RTL92C_MAX_PATH_NUM			2
+
+enum swchnlcmd_id {
+	CMDID_END,
+	CMDID_SET_TXPOWEROWER_LEVEL,
+	CMDID_BBREGWRITE10,
+	CMDID_WRITEPORT_ULONG,
+	CMDID_WRITEPORT_USHORT,
+	CMDID_WRITEPORT_UCHAR,
+	CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+	enum swchnlcmd_id cmdid;
+	u32 para1;
+	u32 para2;
+	u32 msdelay;
+};
+
+enum hw90_block_e {
+	HW90_BLOCK_MAC = 0,
+	HW90_BLOCK_PHY0 = 1,
+	HW90_BLOCK_PHY1 = 2,
+	HW90_BLOCK_RF = 3,
+	HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum baseband_config_type {
+	BASEBAND_CONFIG_PHY_REG = 0,
+	BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum ra_offset_area {
+	RA_OFFSET_LEGACY_OFDM1,
+	RA_OFFSET_LEGACY_OFDM2,
+	RA_OFFSET_HT_OFDM1,
+	RA_OFFSET_HT_OFDM2,
+	RA_OFFSET_HT_OFDM3,
+	RA_OFFSET_HT_OFDM4,
+	RA_OFFSET_HT_CCK,
+};
+
+enum antenna_path {
+	ANTENNA_NONE,
+	ANTENNA_D,
+	ANTENNA_C,
+	ANTENNA_CD,
+	ANTENNA_B,
+	ANTENNA_BD,
+	ANTENNA_BC,
+	ANTENNA_BCD,
+	ANTENNA_A,
+	ANTENNA_AD,
+	ANTENNA_AC,
+	ANTENNA_ACD,
+	ANTENNA_AB,
+	ANTENNA_ABD,
+	ANTENNA_ABC,
+	ANTENNA_ABCD
+};
+
+struct r_antenna_select_ofdm {
+	u32 r_tx_antenna:4;
+	u32 r_ant_l:4;
+	u32 r_ant_non_ht:4;
+	u32 r_ant_ht1:4;
+	u32 r_ant_ht2:4;
+	u32 r_ant_ht_s1:4;
+	u32 r_ant_non_ht_s1:4;
+	u32 ofdm_txsc:2;
+	u32 reserved:2;
+};
+
+struct r_antenna_select_cck {
+	u8 r_cckrx_enable_2:2;
+	u8 r_cckrx_enable:2;
+	u8 r_ccktx_enable:4;
+};
+
+struct efuse_contents {
+	u8 mac_addr[ETH_ALEN];
+	u8 cck_tx_power_idx[6];
+	u8 ht40_1s_tx_power_idx[6];
+	u8 ht40_2s_tx_power_idx_diff[3];
+	u8 ht20_tx_power_idx_diff[3];
+	u8 ofdm_tx_power_idx_diff[3];
+	u8 ht40_max_power_offset[3];
+	u8 ht20_max_power_offset[3];
+	u8 channel_plan;
+	u8 thermal_meter;
+	u8 rf_option[5];
+	u8 version;
+	u8 oem_id;
+	u8 regulatory;
+};
+
+struct tx_power_struct {
+	u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 legacy_ht_txpowerdiff;
+	u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 pwrgroup_cnt;
+	u32 mcs_original_offset[4][16];
+};
+
+extern u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
+				      u32 regaddr, u32 bitmask);
+extern void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+				     u32 regaddr, u32 bitmask, u32 data);
+extern u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+				      enum radio_path rfpath, u32 regaddr,
+				      u32 bitmask);
+extern void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+				     enum radio_path rfpath, u32 regaddr,
+				     u32 bitmask, u32 data);
+extern bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
+extern bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
+extern bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+						 enum radio_path rfpath);
+extern void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+extern void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
+					    long *powerlevel);
+extern void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
+					    u8 channel);
+extern bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+					     long power_indbm);
+extern void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw,
+						u8 operation);
+extern void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+extern void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+				      enum nl80211_channel_type ch_type);
+extern void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+extern u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
+extern void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
+bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+					     enum radio_path rfpath);
+bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+extern bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+					     enum rf_pwrstate rfpwr_state);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c
new file mode 100644
index 0000000..df6ca9a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c
@@ -0,0 +1,109 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseqcmd.h"
+#include "pwrseq.h"
+
+/* drivers should parse arrays below and do the corresponding actions */
+
+/*3 Power on  Array*/
+struct wlan_pwr_cfg rtl8723A_power_on_flow[RTL8723A_TRANS_CARDEMU_TO_ACT_STPS
+					+ RTL8723A_TRANS_END_STPS] = {
+	RTL8723A_TRANS_CARDEMU_TO_ACT,
+	RTL8723A_TRANS_END
+};
+
+/*3Radio off GPIO Array */
+struct wlan_pwr_cfg rtl8723A_radio_off_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
+					+ RTL8723A_TRANS_END_STPS] = {
+	RTL8723A_TRANS_ACT_TO_CARDEMU,
+	RTL8723A_TRANS_END
+};
+
+/*3Card Disable Array*/
+struct wlan_pwr_cfg
+rtl8723A_card_disable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
+			  + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS
+			  + RTL8723A_TRANS_END_STPS] = {
+	RTL8723A_TRANS_ACT_TO_CARDEMU,
+	RTL8723A_TRANS_CARDEMU_TO_CARDDIS,
+	RTL8723A_TRANS_END
+};
+
+/*3 Card Enable Array*/
+struct wlan_pwr_cfg rtl8723A_card_enable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
+					+ RTL8723A_TRANS_CARDEMU_TO_PDN_STPS
+					+ RTL8723A_TRANS_END_STPS] = {
+	RTL8723A_TRANS_CARDDIS_TO_CARDEMU,
+	RTL8723A_TRANS_CARDEMU_TO_ACT,
+	RTL8723A_TRANS_END
+};
+
+/*3Suspend Array*/
+struct wlan_pwr_cfg rtl8723A_suspend_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
+					+ RTL8723A_TRANS_CARDEMU_TO_SUS_STPS
+					+ RTL8723A_TRANS_END_STPS] = {
+	RTL8723A_TRANS_ACT_TO_CARDEMU,
+	RTL8723A_TRANS_CARDEMU_TO_SUS,
+	RTL8723A_TRANS_END
+};
+
+/*3 Resume Array*/
+struct wlan_pwr_cfg rtl8723A_resume_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
+					+ RTL8723A_TRANS_CARDEMU_TO_SUS_STPS
+					+ RTL8723A_TRANS_END_STPS] = {
+	RTL8723A_TRANS_SUS_TO_CARDEMU,
+	RTL8723A_TRANS_CARDEMU_TO_ACT,
+	RTL8723A_TRANS_END
+};
+
+/*3HWPDN Array*/
+struct wlan_pwr_cfg rtl8723A_hwpdn_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
+				+ RTL8723A_TRANS_CARDEMU_TO_PDN_STPS
+				+ RTL8723A_TRANS_END_STPS] = {
+	RTL8723A_TRANS_ACT_TO_CARDEMU,
+	RTL8723A_TRANS_CARDEMU_TO_PDN,
+	RTL8723A_TRANS_END
+};
+
+/*3 Enter LPS */
+struct wlan_pwr_cfg rtl8723A_enter_lps_flow[RTL8723A_TRANS_ACT_TO_LPS_STPS
+					+ RTL8723A_TRANS_END_STPS] = {
+	/*FW behavior*/
+	RTL8723A_TRANS_ACT_TO_LPS,
+	RTL8723A_TRANS_END
+};
+
+/*3 Leave LPS */
+struct wlan_pwr_cfg rtl8723A_leave_lps_flow[RTL8723A_TRANS_LPS_TO_ACT_STPS
+					+ RTL8723A_TRANS_END_STPS] = {
+	/*FW behavior*/
+	RTL8723A_TRANS_LPS_TO_ACT,
+	RTL8723A_TRANS_END
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
new file mode 100644
index 0000000..7a46f9f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
@@ -0,0 +1,322 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723E_PWRSEQ_H__
+#define __RTL8723E_PWRSEQ_H__
+
+#include "pwrseqcmd.h"
+/*
+	Check document WM-20110607-Paul-RTL8723A_Power_Architecture-R02.vsd
+	There are 6 HW Power States:
+	0: POFF--Power Off
+	1: PDN--Power Down
+	2: CARDEMU--Card Emulation
+	3: ACT--Active Mode
+	4: LPS--Low Power State
+	5: SUS--Suspend
+
+	The transision from different states are defined below
+	TRANS_CARDEMU_TO_ACT
+	TRANS_ACT_TO_CARDEMU
+	TRANS_CARDEMU_TO_SUS
+	TRANS_SUS_TO_CARDEMU
+	TRANS_CARDEMU_TO_PDN
+	TRANS_ACT_TO_LPS
+	TRANS_LPS_TO_ACT
+
+	TRANS_END
+*/
+
+#define	RTL8723A_TRANS_CARDEMU_TO_ACT_STPS	10
+#define	RTL8723A_TRANS_ACT_TO_CARDEMU_STPS	10
+#define	RTL8723A_TRANS_CARDEMU_TO_SUS_STPS	10
+#define	RTL8723A_TRANS_SUS_TO_CARDEMU_STPS	10
+#define	RTL8723A_TRANS_CARDEMU_TO_PDN_STPS	10
+#define	RTL8723A_TRANS_PDN_TO_CARDEMU_STPS	10
+#define	RTL8723A_TRANS_ACT_TO_LPS_STPS		15
+#define	RTL8723A_TRANS_LPS_TO_ACT_STPS		15
+#define	RTL8723A_TRANS_END_STPS			1
+
+
+#define RTL8723A_TRANS_CARDEMU_TO_ACT					\
+	/* format */							\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, \
+	 *  comments here*/						\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), 0},		\
+		/* disable SW LPS 0x04[10]=0*/				\
+	{0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)},	\
+		/* wait till 0x04[17] = 1    power ready*/		\
+	{0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)},	\
+		/* release WLON reset  0x04[16]=1*/			\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},		\
+		/* disable HWPDN 0x04[15]=0*/				\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)), 0},	\
+	/* disable WL suspend*/						\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)},	\
+		/* polling until return 0*/				\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}
+
+#define RTL8723A_TRANS_ACT_TO_CARDEMU					\
+	/* format */							\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, \
+	 *  comments here*/						\
+	{0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},		\
+		/*0x1F[7:0] = 0 turn off RF*/				\
+	{0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},		\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)},	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}
+
+#define RTL8723A_TRANS_CARDEMU_TO_SUS					\
+	/* format */							\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, \
+	 *  comments here*/						\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4)|BIT(3),		\
+		(BIT(4)|BIT(3))},					\
+		/*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK |	\
+		PWR_INTF_SDIO_MSK,					\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)},\
+		 /*0x04[12:11] = 2b'01 enable WL suspend*/		\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,	\
+		PWR_BASEADDR_MAC,					\
+		PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)|BIT(4)},		\
+		 /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/	\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,	\
+		PWR_BASEADDR_SDIO,					\
+		PWR_CMD_WRITE, BIT(0), BIT(0)},				\
+		/*Set SDIO suspend local register*/			\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,	\
+		PWR_BASEADDR_SDIO,					\
+		PWR_CMD_POLLING, BIT(1), 0}				\
+		/*wait power state to suspend*/
+
+#define RTL8723A_TRANS_SUS_TO_CARDEMU					\
+	/* format */							\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,	\
+		PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0},		\
+		/*Set SDIO suspend local register*/			\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,	\
+		PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)},	\
+		/*wait power state to suspend*/				\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}	\
+		/*0x04[12:11] = 2b'01enable WL suspend*/
+
+#define RTL8723A_TRANS_CARDEMU_TO_CARDDIS				\
+	/* format */							\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,			\
+	PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,				\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)},\
+		/*0x04[12:11] = 2b'01 enable WL suspend*/		\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), BIT(2)},	\
+		/*0x04[10] = 1, enable SW LPS*/				\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,	\
+		PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)},	\
+		/*Set SDIO suspend local register*/			\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,	\
+		PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}		\
+		/*wait power state to suspend*/
+
+#define RTL8723A_TRANS_CARDDIS_TO_CARDEMU				\
+	/* format */							\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,	\
+		PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0},		\
+		/*Set SDIO suspend local register*/			\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,	\
+		PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)},	\
+		/*wait power state to suspend*/				\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0},	\
+		/*0x04[12:11] = 2b'00enable WL suspend*/		\
+	{0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}		\
+		/*PCIe DMA start*/
+
+#define RTL8723A_TRANS_CARDEMU_TO_PDN					\
+	/* format */							\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
+	{0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},		\
+		/* 0x04[16] = 0*/\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}	\
+		/* 0x04[15] = 1*/
+
+#define RTL8723A_TRANS_PDN_TO_CARDEMU					\
+	/* format */							\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}		\
+		/* 0x04[15] = 0*/
+
+#define RTL8723A_TRANS_ACT_TO_LPS					\
+	/* format */							\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
+	{0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF},		\
+		/*PCIe DMA stop*/					\
+	{0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F},		\
+		/*Tx Pause*/						\
+	{0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},		\
+		/*Should be zero if no packet is transmitting*/		\
+	{0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},		\
+		/*Should be zero if no packet is transmitting*/		\
+	{0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},		\
+		/*Should be zero if no packet is transmitting*/		\
+	{0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},		\
+		/*Should be zero if no packet is transmitting*/		\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},		\
+		/*CCK and OFDM are disabled,and clock are gated*/	\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},	\
+		/*Delay 1us*/						\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},		\
+		/*Whole BB is reset*/					\
+	{0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F},		\
+		/*Reset MAC TRX*/					\
+	{0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},		\
+		/*check if removed later*/				\
+	{0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)}	\
+		/*Respond TxOK to scheduler*/
+
+#define RTL8723A_TRANS_LPS_TO_ACT					\
+	/* format */							\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
+	{0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,	\
+		 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84},		\
+		 /*SDIO RPWM*/						\
+	{0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84},		\
+		/*USB RPWM*/						\
+	{0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84},		\
+		/*PCIe RPWM*/						\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		 PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS},	\
+		/*Delay*/						\
+	{0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0},		\
+		/* 0x08[4] = 0 switch TSF to 40M*/			\
+	{0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0},		\
+		/*Polling 0x109[7]=0  TSF in 40M*/			\
+	{0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0},	\
+		/*.	0x29[7:6] = 2b'00	 enable BB clock*/	\
+	{0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)},	\
+		/*.	0x101[1] = 1*/					\
+	{0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF},		\
+		/* 0x100[7:0] = 0xFF enable WMAC TRX*/			\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1)|BIT(0),		\
+		BIT(1)|BIT(0)},						\
+		/* 0x02[1:0] = 2b'11  enable BB macro*/			\
+	{0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+		PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}		\
+		/*.	0x522 = 0*/
+
+#define RTL8723A_TRANS_END						\
+	/* format */							\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
+	{0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,	\
+	0, PWR_CMD_END, 0, 0}
+
+extern struct
+wlan_pwr_cfg rtl8723A_power_on_flow[RTL8723A_TRANS_CARDEMU_TO_ACT_STPS
+				    + RTL8723A_TRANS_END_STPS];
+extern struct
+wlan_pwr_cfg rtl8723A_radio_off_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
+				     + RTL8723A_TRANS_END_STPS];
+extern struct
+wlan_pwr_cfg rtl8723A_card_disable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
+					+ RTL8723A_TRANS_CARDEMU_TO_PDN_STPS
+					+ RTL8723A_TRANS_END_STPS];
+extern struct
+wlan_pwr_cfg rtl8723A_card_enable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
+				       + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS
+				       + RTL8723A_TRANS_END_STPS];
+extern struct
+wlan_pwr_cfg rtl8723A_suspend_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
+				   + RTL8723A_TRANS_CARDEMU_TO_SUS_STPS
+				   + RTL8723A_TRANS_END_STPS];
+extern struct
+wlan_pwr_cfg rtl8723A_resume_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
+				  + RTL8723A_TRANS_CARDEMU_TO_SUS_STPS
+				  + RTL8723A_TRANS_END_STPS];
+extern struct
+wlan_pwr_cfg rtl8723A_hwpdn_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
+				 + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS
+				 + RTL8723A_TRANS_END_STPS];
+extern struct
+wlan_pwr_cfg rtl8723A_enter_lps_flow[RTL8723A_TRANS_ACT_TO_LPS_STPS
+				     + RTL8723A_TRANS_END_STPS];
+extern struct
+wlan_pwr_cfg rtl8723A_leave_lps_flow[RTL8723A_TRANS_LPS_TO_ACT_STPS
+				     + RTL8723A_TRANS_END_STPS];
+
+/* RTL8723 Power Configuration CMDs for PCIe interface */
+#define Rtl8723_NIC_PWR_ON_FLOW		rtl8723A_power_on_flow
+#define Rtl8723_NIC_RF_OFF_FLOW		rtl8723A_radio_off_flow
+#define Rtl8723_NIC_DISABLE_FLOW	rtl8723A_card_disable_flow
+#define Rtl8723_NIC_ENABLE_FLOW		rtl8723A_card_enable_flow
+#define Rtl8723_NIC_SUSPEND_FLOW	rtl8723A_suspend_flow
+#define Rtl8723_NIC_RESUME_FLOW		rtl8723A_resume_flow
+#define Rtl8723_NIC_PDN_FLOW		rtl8723A_hwpdn_flow
+#define Rtl8723_NIC_LPS_ENTER_FLOW	rtl8723A_enter_lps_flow
+#define Rtl8723_NIC_LPS_LEAVE_FLOW	rtl8723A_leave_lps_flow
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.c b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.c
new file mode 100644
index 0000000..2044b59
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.c
@@ -0,0 +1,129 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseq.h"
+
+/*	Description:
+ *		This routine deals with the Power Configuration CMD
+ *		 parsing for RTL8723/RTL8188E Series IC.
+ *	Assumption:
+ *		We should follow specific format that was released from HW SD.
+ */
+bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+			      u8 faversion, u8 interface_type,
+			      struct wlan_pwr_cfg pwrcfgcmd[])
+{
+	struct wlan_pwr_cfg cfg_cmd = {0};
+	bool polling_bit = false;
+	u32 ary_idx = 0;
+	u8 value = 0;
+	u32 offset = 0;
+	u32 polling_count = 0;
+	u32 max_polling_cnt = 5000;
+
+	do {
+		cfg_cmd = pwrcfgcmd[ary_idx];
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+			"rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x),"
+			"interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
+			GET_PWR_CFG_OFFSET(cfg_cmd),
+					   GET_PWR_CFG_CUT_MASK(cfg_cmd),
+			GET_PWR_CFG_FAB_MASK(cfg_cmd),
+					     GET_PWR_CFG_INTF_MASK(cfg_cmd),
+			GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
+			GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
+
+		if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) &&
+		    (GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) &&
+		    (GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) {
+			switch (GET_PWR_CFG_CMD(cfg_cmd)) {
+			case PWR_CMD_READ:
+				RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+					"rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n");
+				break;
+			case PWR_CMD_WRITE:
+				RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+					"rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n");
+				offset = GET_PWR_CFG_OFFSET(cfg_cmd);
+
+				/*Read the value from system register*/
+				value = rtl_read_byte(rtlpriv, offset);
+				value &= (~(GET_PWR_CFG_MASK(cfg_cmd)));
+				value |= (GET_PWR_CFG_VALUE(cfg_cmd) &
+					  GET_PWR_CFG_MASK(cfg_cmd));
+
+				/*Write the value back to sytem register*/
+				rtl_write_byte(rtlpriv, offset, value);
+				break;
+			case PWR_CMD_POLLING:
+				RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+					"rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n");
+				polling_bit = false;
+				offset = GET_PWR_CFG_OFFSET(cfg_cmd);
+
+				do {
+					value = rtl_read_byte(rtlpriv, offset);
+
+					value &= GET_PWR_CFG_MASK(cfg_cmd);
+					if (value ==
+					    (GET_PWR_CFG_VALUE(cfg_cmd)
+					    & GET_PWR_CFG_MASK(cfg_cmd)))
+						polling_bit = true;
+					else
+						udelay(10);
+
+					if (polling_count++ > max_polling_cnt)
+						return false;
+				} while (!polling_bit);
+				break;
+			case PWR_CMD_DELAY:
+				RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+					"rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n");
+				if (GET_PWR_CFG_VALUE(cfg_cmd) ==
+				    PWRSEQ_DELAY_US)
+					udelay(GET_PWR_CFG_OFFSET(cfg_cmd));
+				else
+					mdelay(GET_PWR_CFG_OFFSET(cfg_cmd));
+				break;
+			case PWR_CMD_END:
+				RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+					 "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
+				return true;
+			default:
+				RT_ASSERT(false,
+					 "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
+				break;
+			}
+
+		}
+		ary_idx++;
+	} while (1);
+
+	return true;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.h b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.h
new file mode 100644
index 0000000..6e0f3ea
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.h
@@ -0,0 +1,98 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723E_PWRSEQCMD_H__
+#define __RTL8723E_PWRSEQCMD_H__
+
+#include "../wifi.h"
+/*---------------------------------------------
+ * 3 The value of cmd: 4 bits
+ *---------------------------------------------
+ */
+#define    PWR_CMD_READ		0x00
+#define    PWR_CMD_WRITE	0x01
+#define    PWR_CMD_POLLING	0x02
+#define    PWR_CMD_DELAY	0x03
+#define    PWR_CMD_END		0x04
+
+/* define the base address of each block */
+#define   PWR_BASEADDR_MAC	0x00
+#define   PWR_BASEADDR_USB	0x01
+#define   PWR_BASEADDR_PCIE	0x02
+#define   PWR_BASEADDR_SDIO	0x03
+
+#define	PWR_INTF_SDIO_MSK	BIT(0)
+#define	PWR_INTF_USB_MSK	BIT(1)
+#define	PWR_INTF_PCI_MSK	BIT(2)
+#define	PWR_INTF_ALL_MSK	(BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+#define	PWR_FAB_TSMC_MSK	BIT(0)
+#define	PWR_FAB_UMC_MSK		BIT(1)
+#define	PWR_FAB_ALL_MSK		(BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+#define	PWR_CUT_TESTCHIP_MSK	BIT(0)
+#define	PWR_CUT_A_MSK		BIT(1)
+#define	PWR_CUT_B_MSK		BIT(2)
+#define	PWR_CUT_C_MSK		BIT(3)
+#define	PWR_CUT_D_MSK		BIT(4)
+#define	PWR_CUT_E_MSK		BIT(5)
+#define	PWR_CUT_F_MSK		BIT(6)
+#define	PWR_CUT_G_MSK		BIT(7)
+#define	PWR_CUT_ALL_MSK		0xFF
+
+enum pwrseq_delay_unit {
+	PWRSEQ_DELAY_US,
+	PWRSEQ_DELAY_MS,
+};
+
+struct wlan_pwr_cfg {
+	u16 offset;
+	u8 cut_msk;
+	u8 fab_msk:4;
+	u8 interface_msk:4;
+	u8 base:4;
+	u8 cmd:4;
+	u8 msk;
+	u8 value;
+};
+
+#define	GET_PWR_CFG_OFFSET(__PWR_CMD)	(__PWR_CMD.offset)
+#define	GET_PWR_CFG_CUT_MASK(__PWR_CMD)	(__PWR_CMD.cut_msk)
+#define	GET_PWR_CFG_FAB_MASK(__PWR_CMD)	(__PWR_CMD.fab_msk)
+#define	GET_PWR_CFG_INTF_MASK(__PWR_CMD)	(__PWR_CMD.interface_msk)
+#define	GET_PWR_CFG_BASE(__PWR_CMD)	(__PWR_CMD.base)
+#define	GET_PWR_CFG_CMD(__PWR_CMD)	(__PWR_CMD.cmd)
+#define	GET_PWR_CFG_MASK(__PWR_CMD)	(__PWR_CMD.msk)
+#define	GET_PWR_CFG_VALUE(__PWR_CMD)	(__PWR_CMD.value)
+
+bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+			      u8 fab_version, u8 interface_type,
+			      struct wlan_pwr_cfg pwrcfgcmd[]);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
new file mode 100644
index 0000000..199da36
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
@@ -0,0 +1,2097 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723E_REG_H__
+#define __RTL8723E_REG_H__
+
+#define REG_SYS_ISO_CTRL			0x0000
+#define REG_SYS_FUNC_EN				0x0002
+#define REG_APS_FSMCO				0x0004
+#define REG_SYS_CLKR				0x0008
+#define REG_9346CR				0x000A
+#define REG_EE_VPD				0x000C
+#define REG_AFE_MISC				0x0010
+#define REG_SPS0_CTRL				0x0011
+#define REG_SPS_OCP_CFG				0x0018
+#define REG_RSV_CTRL				0x001C
+#define REG_RF_CTRL				0x001F
+#define REG_LDOA15_CTRL				0x0020
+#define REG_LDOV12D_CTRL			0x0021
+#define REG_LDOHCI12_CTRL			0x0022
+#define REG_LPLDO_CTRL				0x0023
+#define REG_AFE_XTAL_CTRL			0x0024
+#define REG_AFE_PLL_CTRL			0x0028
+#define REG_EFUSE_CTRL				0x0030
+#define REG_EFUSE_TEST				0x0034
+#define REG_PWR_DATA				0x0038
+#define REG_CAL_TIMER				0x003C
+#define REG_ACLK_MON				0x003E
+#define REG_GPIO_MUXCFG				0x0040
+#define REG_GPIO_IO_SEL				0x0042
+#define REG_MAC_PINMUX_CFG			0x0043
+#define REG_GPIO_PIN_CTRL			0x0044
+#define REG_GPIO_INTM				0x0048
+#define REG_LEDCFG0				0x004C
+#define REG_LEDCFG1				0x004D
+#define REG_LEDCFG2				0x004E
+#define REG_LEDCFG3				0x004F
+#define REG_FSIMR				0x0050
+#define REG_FSISR				0x0054
+#define REG_GPIO_PIN_CTRL_2			0x0060
+#define REG_GPIO_IO_SEL_2			0x0062
+#define REG_MULTI_FUNC_CTRL			0x0068
+
+#define REG_MCUFWDL				0x0080
+
+#define REG_HMEBOX_EXT_0			0x0088
+#define REG_HMEBOX_EXT_1			0x008A
+#define REG_HMEBOX_EXT_2			0x008C
+#define REG_HMEBOX_EXT_3			0x008E
+
+#define REG_BIST_SCAN				0x00D0
+#define REG_BIST_RPT				0x00D4
+#define REG_BIST_ROM_RPT			0x00D8
+#define REG_USB_SIE_INTF			0x00E0
+#define REG_PCIE_MIO_INTF			0x00E4
+#define REG_PCIE_MIO_INTD			0x00E8
+#define REG_SYS_CFG				0x00F0
+#define REG_GPIO_OUTSTS				0x00F4
+
+#define REG_CR					0x0100
+#define REG_PBP					0x0104
+#define REG_TRXDMA_CTRL				0x010C
+#define REG_TRXFF_BNDY				0x0114
+#define REG_TRXFF_STATUS			0x0118
+#define REG_RXFF_PTR				0x011C
+#define REG_HIMR				0x0120
+#define REG_HISR				0x0124
+#define REG_HIMRE				0x0128
+#define REG_HISRE				0x012C
+#define REG_CPWM				0x012F
+#define REG_FWIMR				0x0130
+#define REG_FWISR				0x0134
+#define REG_PKTBUF_DBG_CTRL			0x0140
+#define REG_PKTBUF_DBG_DATA_L			0x0144
+#define REG_PKTBUF_DBG_DATA_H			0x0148
+
+#define REG_TC0_CTRL				0x0150
+#define REG_TC1_CTRL				0x0154
+#define REG_TC2_CTRL				0x0158
+#define REG_TC3_CTRL				0x015C
+#define REG_TC4_CTRL				0x0160
+#define REG_TCUNIT_BASE				0x0164
+#define REG_MBIST_START				0x0174
+#define REG_MBIST_DONE				0x0178
+#define REG_MBIST_FAIL				0x017C
+#define REG_C2HEVT_MSG_NORMAL			0x01A0
+#define REG_C2HEVT_MSG_TEST			0x01B8
+#define REG_MCUTST_1				0x01c0
+#define REG_FMETHR				0x01C8
+#define REG_HMETFR				0x01CC
+#define REG_HMEBOX_0				0x01D0
+#define REG_HMEBOX_1				0x01D4
+#define REG_HMEBOX_2				0x01D8
+#define REG_HMEBOX_3				0x01DC
+
+#define REG_LLT_INIT				0x01E0
+#define REG_BB_ACCEESS_CTRL			0x01E8
+#define REG_BB_ACCESS_DATA			0x01EC
+
+#define REG_RQPN				0x0200
+#define REG_FIFOPAGE				0x0204
+#define REG_TDECTRL				0x0208
+#define REG_TXDMA_OFFSET_CHK			0x020C
+#define REG_TXDMA_STATUS			0x0210
+#define REG_RQPN_NPQ				0x0214
+
+#define REG_RXDMA_AGG_PG_TH			0x0280
+#define REG_RXPKT_NUM				0x0284
+#define REG_RXDMA_STATUS			0x0288
+
+#define	REG_PCIE_CTRL_REG			0x0300
+#define	REG_INT_MIG				0x0304
+#define	REG_BCNQ_DESA				0x0308
+#define	REG_HQ_DESA				0x0310
+#define	REG_MGQ_DESA				0x0318
+#define	REG_VOQ_DESA				0x0320
+#define	REG_VIQ_DESA				0x0328
+#define	REG_BEQ_DESA				0x0330
+#define	REG_BKQ_DESA				0x0338
+#define	REG_RX_DESA				0x0340
+#define	REG_DBI					0x0348
+#define	REG_MDIO				0x0354
+#define	REG_DBG_SEL				0x0360
+#define	REG_PCIE_HRPWM				0x0361
+#define	REG_PCIE_HCPWM				0x0363
+#define	REG_UART_CTRL				0x0364
+#define	REG_UART_TX_DESA			0x0370
+#define	REG_UART_RX_DESA			0x0378
+
+#define	REG_HDAQ_DESA_NODEF			0x0000
+#define	REG_CMDQ_DESA_NODEF			0x0000
+
+#define REG_VOQ_INFORMATION			0x0400
+#define REG_VIQ_INFORMATION			0x0404
+#define REG_BEQ_INFORMATION			0x0408
+#define REG_BKQ_INFORMATION			0x040C
+#define REG_MGQ_INFORMATION			0x0410
+#define REG_HGQ_INFORMATION			0x0414
+#define REG_BCNQ_INFORMATION			0x0418
+
+#define REG_CPU_MGQ_INFORMATION			0x041C
+#define REG_FWHW_TXQ_CTRL			0x0420
+#define REG_HWSEQ_CTRL				0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY			0x0424
+#define REG_TXPKTBUF_MGQ_BDNY			0x0425
+#define REG_MULTI_BCNQ_EN			0x0426
+#define REG_MULTI_BCNQ_OFFSET			0x0427
+#define REG_SPEC_SIFS				0x0428
+#define REG_RL					0x042A
+#define REG_DARFRC				0x0430
+#define REG_RARFRC				0x0438
+#define REG_RRSR				0x0440
+#define REG_ARFR0				0x0444
+#define REG_ARFR1				0x0448
+#define REG_ARFR2				0x044C
+#define REG_ARFR3				0x0450
+#define REG_AGGLEN_LMT				0x0458
+#define REG_AMPDU_MIN_SPACE			0x045C
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD		0x045D
+#define REG_FAST_EDCA_CTRL			0x0460
+#define REG_RD_RESP_PKT_TH			0x0463
+#define REG_INIRTS_RATE_SEL			0x0480
+#define REG_INIDATA_RATE_SEL			0x0484
+#define REG_POWER_STATUS			0x04A4
+#define REG_POWER_STAGE1			0x04B4
+#define REG_POWER_STAGE2			0x04B8
+#define REG_PKT_LIFE_TIME			0x04C0
+#define REG_STBC_SETTING			0x04C4
+#define REG_PROT_MODE_CTRL			0x04C8
+#define REG_BAR_MODE_CTRL			0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT			0x04CF
+#define REG_NQOS_SEQ				0x04DC
+#define REG_QOS_SEQ				0x04DE
+#define REG_NEED_CPU_HANDLE			0x04E0
+#define REG_PKT_LOSE_RPT			0x04E1
+#define REG_PTCL_ERR_STATUS			0x04E2
+#define REG_DUMMY				0x04FC
+
+#define REG_EDCA_VO_PARAM			0x0500
+#define REG_EDCA_VI_PARAM			0x0504
+#define REG_EDCA_BE_PARAM			0x0508
+#define REG_EDCA_BK_PARAM			0x050C
+#define REG_BCNTCFG				0x0510
+#define REG_PIFS				0x0512
+#define REG_RDG_PIFS				0x0513
+#define REG_SIFS_CTX				0x0514
+#define REG_SIFS_TRX				0x0516
+#define REG_AGGR_BREAK_TIME			0x051A
+#define REG_SLOT				0x051B
+#define REG_TX_PTCL_CTRL			0x0520
+#define REG_TXPAUSE				0x0522
+#define REG_DIS_TXREQ_CLR			0x0523
+#define REG_RD_CTRL				0x0524
+#define REG_TBTT_PROHIBIT			0x0540
+#define REG_RD_NAV_NXT				0x0544
+#define REG_NAV_PROT_LEN			0x0546
+#define REG_BCN_CTRL				0x0550
+#define REG_USTIME_TSF				0x0551
+#define REG_MBID_NUM				0x0552
+#define REG_DUAL_TSF_RST			0x0553
+#define REG_BCN_INTERVAL			0x0554
+#define REG_MBSSID_BCN_SPACE			0x0554
+#define REG_DRVERLYINT				0x0558
+#define REG_BCNDMATIM				0x0559
+#define REG_ATIMWND				0x055A
+#define REG_BCN_MAX_ERR				0x055D
+#define REG_RXTSF_OFFSET_CCK			0x055E
+#define REG_RXTSF_OFFSET_OFDM			0x055F
+#define REG_TSFTR				0x0560
+#define REG_INIT_TSFTR				0x0564
+#define REG_PSTIMER				0x0580
+#define REG_TIMER0				0x0584
+#define REG_TIMER1				0x0588
+#define REG_ACMHWCTRL				0x05C0
+#define REG_ACMRSTCTRL				0x05C1
+#define REG_ACMAVG				0x05C2
+#define REG_VO_ADMTIME				0x05C4
+#define REG_VI_ADMTIME				0x05C6
+#define REG_BE_ADMTIME				0x05C8
+#define REG_EDCA_RANDOM_GEN			0x05CC
+#define REG_SCH_TXCMD				0x05D0
+
+#define REG_APSD_CTRL				0x0600
+#define REG_BWOPMODE				0x0603
+#define REG_TCR					0x0604
+#define REG_RCR					0x0608
+#define REG_RX_PKT_LIMIT			0x060C
+#define REG_RX_DLK_TIME				0x060D
+#define REG_RX_DRVINFO_SZ			0x060F
+
+#define REG_MACID				0x0610
+#define REG_BSSID				0x0618
+#define REG_MAR					0x0620
+#define REG_MBIDCAMCFG				0x0628
+
+#define REG_USTIME_EDCA				0x0638
+#define REG_MAC_SPEC_SIFS			0x063A
+#define REG_RESP_SIFS_CCK			0x063C
+#define REG_RESP_SIFS_OFDM			0x063E
+#define REG_ACKTO				0x0640
+#define REG_CTS2TO				0x0641
+#define REG_EIFS				0x0642
+
+#define REG_NAV_CTRL				0x0650
+#define REG_BACAMCMD				0x0654
+#define REG_BACAMCONTENT			0x0658
+#define REG_LBDLY				0x0660
+#define REG_FWDLY				0x0661
+#define REG_RXERR_RPT				0x0664
+#define REG_WMAC_TRXPTCL_CTL			0x0668
+
+#define REG_CAMCMD				0x0670
+#define REG_CAMWRITE				0x0674
+#define REG_CAMREAD				0x0678
+#define REG_CAMDBG				0x067C
+#define REG_SECCFG				0x0680
+
+#define REG_WOW_CTRL				0x0690
+#define REG_PSSTATUS				0x0691
+#define REG_PS_RX_INFO				0x0692
+#define REG_LPNAV_CTRL				0x0694
+#define REG_WKFMCAM_CMD				0x0698
+#define REG_WKFMCAM_RWD				0x069C
+#define REG_RXFLTMAP0				0x06A0
+#define REG_RXFLTMAP1				0x06A2
+#define REG_RXFLTMAP2				0x06A4
+#define REG_BCN_PSR_RPT				0x06A8
+#define REG_CALB32K_CTRL			0x06AC
+#define REG_PKT_MON_CTRL			0x06B4
+#define REG_BT_COEX_TABLE			0x06C0
+#define REG_WMAC_RESP_TXINFO			0x06D8
+
+#define REG_USB_INFO				0xFE17
+#define REG_USB_SPECIAL_OPTION			0xFE55
+#define REG_USB_DMA_AGG_TO			0xFE5B
+#define REG_USB_AGG_TO				0xFE5C
+#define REG_USB_AGG_TH				0xFE5D
+
+#define REG_TEST_USB_TXQS			0xFE48
+#define REG_TEST_SIE_VID			0xFE60
+#define REG_TEST_SIE_PID			0xFE62
+#define REG_TEST_SIE_OPTIONAL			0xFE64
+#define REG_TEST_SIE_CHIRP_K			0xFE65
+#define REG_TEST_SIE_PHY			0xFE66
+#define REG_TEST_SIE_MAC_ADDR			0xFE70
+#define REG_TEST_SIE_STRING			0xFE80
+
+#define REG_NORMAL_SIE_VID			0xFE60
+#define REG_NORMAL_SIE_PID			0xFE62
+#define REG_NORMAL_SIE_OPTIONAL			0xFE64
+#define REG_NORMAL_SIE_EP			0xFE65
+#define REG_NORMAL_SIE_PHY			0xFE68
+#define REG_NORMAL_SIE_MAC_ADDR			0xFE70
+#define REG_NORMAL_SIE_STRING			0xFE80
+
+#define	CR9346					REG_9346CR
+#define	MSR					(REG_CR + 2)
+#define	ISR					REG_HISR
+#define	TSFR					REG_TSFTR
+
+#define	MACIDR0					REG_MACID
+#define	MACIDR4					(REG_MACID + 4)
+
+#define PBP					REG_PBP
+
+#define	IDR0					MACIDR0
+#define	IDR4					MACIDR4
+
+#define	UNUSED_REGISTER				0x1BF
+#define	DCAM					UNUSED_REGISTER
+#define	PSR					UNUSED_REGISTER
+#define BBADDR					UNUSED_REGISTER
+#define	PHYDATAR				UNUSED_REGISTER
+
+#define	INVALID_BBRF_VALUE			0x12345678
+
+#define	MAX_MSS_DENSITY_2T			0x13
+#define	MAX_MSS_DENSITY_1T			0x0A
+
+#define	CMDEEPROM_EN				BIT(5)
+#define	CMDEEPROM_SEL				BIT(4)
+#define	CMD9346CR_9356SEL			BIT(4)
+#define	AUTOLOAD_EEPROM				(CMDEEPROM_EN|CMDEEPROM_SEL)
+#define	AUTOLOAD_EFUSE				CMDEEPROM_EN
+
+#define	GPIOSEL_GPIO				0
+#define	GPIOSEL_ENBT				BIT(5)
+
+#define	GPIO_IN					REG_GPIO_PIN_CTRL
+#define	GPIO_OUT				(REG_GPIO_PIN_CTRL+1)
+#define	GPIO_IO_SEL				(REG_GPIO_PIN_CTRL+2)
+#define	GPIO_MOD				(REG_GPIO_PIN_CTRL+3)
+
+#define	MSR_NOLINK				0x00
+#define	MSR_ADHOC				0x01
+#define	MSR_INFRA				0x02
+#define	MSR_AP					0x03
+
+#define	RRSR_RSC_OFFSET				21
+#define	RRSR_SHORT_OFFSET			23
+#define	RRSR_RSC_BW_40M				0x600000
+#define	RRSR_RSC_UPSUBCHNL			0x400000
+#define	RRSR_RSC_LOWSUBCHNL			0x200000
+#define	RRSR_SHORT				0x800000
+#define	RRSR_1M					BIT(0)
+#define	RRSR_2M					BIT(1)
+#define	RRSR_5_5M				BIT(2)
+#define	RRSR_11M				BIT(3)
+#define	RRSR_6M					BIT(4)
+#define	RRSR_9M					BIT(5)
+#define	RRSR_12M				BIT(6)
+#define	RRSR_18M				BIT(7)
+#define	RRSR_24M				BIT(8)
+#define	RRSR_36M				BIT(9)
+#define	RRSR_48M				BIT(10)
+#define	RRSR_54M				BIT(11)
+#define	RRSR_MCS0				BIT(12)
+#define	RRSR_MCS1				BIT(13)
+#define	RRSR_MCS2				BIT(14)
+#define	RRSR_MCS3				BIT(15)
+#define	RRSR_MCS4				BIT(16)
+#define	RRSR_MCS5				BIT(17)
+#define	RRSR_MCS6				BIT(18)
+#define	RRSR_MCS7				BIT(19)
+#define	BRSR_ACKSHORTPMB			BIT(23)
+
+#define	RATR_1M					0x00000001
+#define	RATR_2M					0x00000002
+#define	RATR_55M				0x00000004
+#define	RATR_11M				0x00000008
+#define	RATR_6M					0x00000010
+#define	RATR_9M					0x00000020
+#define	RATR_12M				0x00000040
+#define	RATR_18M				0x00000080
+#define	RATR_24M				0x00000100
+#define	RATR_36M				0x00000200
+#define	RATR_48M				0x00000400
+#define	RATR_54M				0x00000800
+#define	RATR_MCS0				0x00001000
+#define	RATR_MCS1				0x00002000
+#define	RATR_MCS2				0x00004000
+#define	RATR_MCS3				0x00008000
+#define	RATR_MCS4				0x00010000
+#define	RATR_MCS5				0x00020000
+#define	RATR_MCS6				0x00040000
+#define	RATR_MCS7				0x00080000
+#define	RATR_MCS8				0x00100000
+#define	RATR_MCS9				0x00200000
+#define	RATR_MCS10				0x00400000
+#define	RATR_MCS11				0x00800000
+#define	RATR_MCS12				0x01000000
+#define	RATR_MCS13				0x02000000
+#define	RATR_MCS14				0x04000000
+#define	RATR_MCS15				0x08000000
+
+#define	RATE_ALL_CCK		(RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define	RATE_ALL_OFDM_AG	(RATR_6M | RATR_9M | RATR_12M | RATR_18M |\
+				RATR_24M | RATR_36M | RATR_48M | RATR_54M)
+#define	RATE_ALL_OFDM_1SS	(RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\
+				RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\
+				RATR_MCS6 | RATR_MCS7)
+#define	RATE_ALL_OFDM_2SS	(RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\
+				RATR_MCS11 | RATR_MCS12 | RATR_MCS13 |\
+				RATR_MCS14 | RATR_MCS15)
+
+#define	BW_OPMODE_20MHZ				BIT(2)
+#define	BW_OPMODE_5G				BIT(1)
+#define	BW_OPMODE_11J				BIT(0)
+
+#define	CAM_VALID				BIT(15)
+#define	CAM_NOTVALID				0x0000
+#define	CAM_USEDK				BIT(5)
+
+#define	CAM_NONE				0x0
+#define	CAM_WEP40				0x01
+#define	CAM_TKIP				0x02
+#define	CAM_AES					0x04
+#define	CAM_WEP104				0x05
+
+#define	TOTAL_CAM_ENTRY				32
+#define	HALF_CAM_ENTRY				16
+
+#define	CAM_WRITE				BIT(16)
+#define	CAM_READ				0x00000000
+#define	CAM_POLLINIG				BIT(31)
+
+#define	SCR_USEDK				0x01
+#define	SCR_TXSEC_ENABLE			0x02
+#define	SCR_RXSEC_ENABLE			0x04
+
+#define	WOW_PMEN				BIT(0)
+#define	WOW_WOMEN				BIT(1)
+#define	WOW_MAGIC				BIT(2)
+#define	WOW_UWF					BIT(3)
+
+#define	IMR8190_DISABLED			0x0
+#define	IMR_BCNDMAINT6				BIT(31)
+#define	IMR_BCNDMAINT5				BIT(30)
+#define	IMR_BCNDMAINT4				BIT(29)
+#define	IMR_BCNDMAINT3				BIT(28)
+#define	IMR_BCNDMAINT2				BIT(27)
+#define	IMR_BCNDMAINT1				BIT(26)
+#define	IMR_BCNDOK8				BIT(25)
+#define	IMR_BCNDOK7				BIT(24)
+#define	IMR_BCNDOK6				BIT(23)
+#define	IMR_BCNDOK5				BIT(22)
+#define	IMR_BCNDOK4				BIT(21)
+#define	IMR_BCNDOK3				BIT(20)
+#define	IMR_BCNDOK2				BIT(19)
+#define	IMR_BCNDOK1				BIT(18)
+#define	IMR_TIMEOUT2				BIT(17)
+#define	IMR_TIMEOUT1				BIT(16)
+#define	IMR_TXFOVW				BIT(15)
+#define	IMR_PSTIMEOUT				BIT(14)
+#define	IMR_BCNINT				BIT(13)
+#define	IMR_RXFOVW				BIT(12)
+#define	IMR_RDU					BIT(11)
+#define	IMR_ATIMEND				BIT(10)
+#define	IMR_BDOK				BIT(9)
+#define	IMR_HIGHDOK				BIT(8)
+#define	IMR_TBDOK				BIT(7)
+#define	IMR_MGNTDOK				BIT(6)
+#define	IMR_TBDER				BIT(5)
+#define	IMR_BKDOK				BIT(4)
+#define	IMR_BEDOK				BIT(3)
+#define	IMR_VIDOK				BIT(2)
+#define	IMR_VODOK				BIT(1)
+#define	IMR_ROK					BIT(0)
+
+#define	IMR_TXERR				BIT(11)
+#define	IMR_RXERR				BIT(10)
+#define	IMR_CPWM				BIT(8)
+#define	IMR_OCPINT				BIT(1)
+#define	IMR_WLANOFF				BIT(0)
+
+/* 8723E series PCIE Host IMR/ISR bit */
+/* IMR DW0 Bit 0-31 */
+#define	PHIMR_TIMEOUT2				BIT(31)
+#define	PHIMR_TIMEOUT1				BIT(30)
+#define	PHIMR_PSTIMEOUT				BIT(29)
+#define	PHIMR_GTINT4				BIT(28)
+#define	PHIMR_GTINT3				BIT(27)
+#define	PHIMR_TXBCNERR				BIT(26)
+#define	PHIMR_TXBCNOK				BIT(25)
+#define	PHIMR_TSF_BIT32_TOGGLE			BIT(24)
+#define	PHIMR_BCNDMAINT3			BIT(23)
+#define	PHIMR_BCNDMAINT2			BIT(22)
+#define	PHIMR_BCNDMAINT1			BIT(21)
+#define	PHIMR_BCNDMAINT0			BIT(20)
+#define	PHIMR_BCNDOK3				BIT(19)
+#define	PHIMR_BCNDOK2				BIT(18)
+#define	PHIMR_BCNDOK1				BIT(17)
+#define	PHIMR_BCNDOK0				BIT(16)
+#define	PHIMR_HSISR_IND_ON			BIT(15)
+#define	PHIMR_BCNDMAINT_E			BIT(14)
+#define	PHIMR_ATIMEND_E				BIT(13)
+#define	PHIMR_ATIM_CTW_END			BIT(12)
+#define	PHIMR_HISRE_IND				BIT(11)
+#define	PHIMR_C2HCMD				BIT(10)
+#define	PHIMR_CPWM2				BIT(9)
+#define	PHIMR_CPWM				BIT(8)
+#define	PHIMR_HIGHDOK				BIT(7)
+#define	PHIMR_MGNTDOK				BIT(6)
+#define	PHIMR_BKDOK				BIT(5)
+#define	PHIMR_BEDOK				BIT(4)
+#define	PHIMR_VIDOK				BIT(3)
+#define	PHIMR_VODOK				BIT(2)
+#define	PHIMR_RDU				BIT(1)
+#define	PHIMR_ROK				BIT(0)
+
+/* PCIE Host Interrupt Status Extension bit */
+#define	PHIMR_BCNDMAINT7			BIT(23)
+#define	PHIMR_BCNDMAINT6			BIT(22)
+#define	PHIMR_BCNDMAINT5			BIT(21)
+#define	PHIMR_BCNDMAINT4			BIT(20)
+#define	PHIMR_BCNDOK7				BIT(19)
+#define	PHIMR_BCNDOK6				BIT(18)
+#define	PHIMR_BCNDOK5				BIT(17)
+#define	PHIMR_BCNDOK4				BIT(16)
+/* bit12-15: RSVD */
+#define	PHIMR_TXERR				BIT(11)
+#define	PHIMR_RXERR				BIT(10)
+#define	PHIMR_TXFOVW				BIT(9)
+#define	PHIMR_RXFOVW				BIT(8)
+/* bit2-7: RSV */
+#define	PHIMR_OCPINT				BIT(1)
+
+#define	HWSET_MAX_SIZE				256
+#define EFUSE_MAX_SECTION			32
+#define EFUSE_REAL_CONTENT_LEN			512
+#define EFUSE_OOB_PROTECT_BYTES			15
+
+#define	EEPROM_DEFAULT_TSSI			0x0
+#define EEPROM_DEFAULT_TXPOWERDIFF		0x0
+#define EEPROM_DEFAULT_CRYSTALCAP		0x5
+#define EEPROM_DEFAULT_BOARDTYPE		0x02
+#define EEPROM_DEFAULT_TXPOWER			0x1010
+#define	EEPROM_DEFAULT_HT2T_TXPWR		0x10
+
+#define	EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF	0x3
+#define	EEPROM_DEFAULT_THERMALMETER		0x12
+#define	EEPROM_DEFAULT_ANTTXPOWERDIFF		0x0
+#define	EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP	0x5
+#define	EEPROM_DEFAULT_TXPOWERLEVEL		0x22
+#define	EEPROM_DEFAULT_HT40_2SDIFF		0x0
+#define EEPROM_DEFAULT_HT20_DIFF		2
+#define	EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF	0x3
+#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET	0
+#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET	0
+
+
+#define EEPROM_DEFAULT_PID			0x1234
+#define EEPROM_DEFAULT_VID			0x5678
+#define EEPROM_DEFAULT_CUSTOMERID		0xAB
+#define EEPROM_DEFAULT_SUBCUSTOMERID		0xCD
+#define EEPROM_DEFAULT_VERSION			0
+
+#define	EEPROM_CHANNEL_PLAN_FCC			0x0
+#define	EEPROM_CHANNEL_PLAN_IC			0x1
+#define	EEPROM_CHANNEL_PLAN_ETSI		0x2
+#define	EEPROM_CHANNEL_PLAN_SPAIN		0x3
+#define	EEPROM_CHANNEL_PLAN_FRANCE		0x4
+#define	EEPROM_CHANNEL_PLAN_MKK			0x5
+#define	EEPROM_CHANNEL_PLAN_MKK1		0x6
+#define	EEPROM_CHANNEL_PLAN_ISRAEL		0x7
+#define	EEPROM_CHANNEL_PLAN_TELEC		0x8
+#define	EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN	0x9
+#define	EEPROM_CHANNEL_PLAN_WORLD_WIDE_13	0xA
+#define	EEPROM_CHANNEL_PLAN_NCC			0xB
+#define	EEPROM_CHANNEL_PLAN_BY_HW_MASK		0x80
+
+#define EEPROM_CID_DEFAULT			0x0
+#define EEPROM_CID_TOSHIBA			0x4
+#define	EEPROM_CID_CCX				0x10
+#define	EEPROM_CID_QMI				0x0D
+#define EEPROM_CID_WHQL				0xFE
+
+#define	RTL8192_EEPROM_ID			0x8129
+
+#define RTL8190_EEPROM_ID			0x8129
+#define EEPROM_HPON				0x02
+#define EEPROM_CLK				0x06
+#define EEPROM_TESTR				0x08
+
+#define EEPROM_VID				0x49
+#define EEPROM_DID				0x4B
+#define EEPROM_SVID				0x4D
+#define EEPROM_SMID				0x4F
+
+#define EEPROM_MAC_ADDR				0x67
+
+#define EEPROM_CCK_TX_PWR_INX			0x5A
+#define EEPROM_HT40_1S_TX_PWR_INX		0x60
+#define EEPROM_HT40_2S_TX_PWR_INX_DIFF		0x66
+#define EEPROM_HT20_TX_PWR_INX_DIFF		0x69
+#define EEPROM_OFDM_TX_PWR_INX_DIFF		0x6C
+#define EEPROM_HT40_MAX_PWR_OFFSET		0x25
+#define EEPROM_HT20_MAX_PWR_OFFSET		0x22
+
+#define EEPROM_THERMAL_METER			0x2a
+#define EEPROM_XTAL_K				0x78
+#define EEPROM_RF_OPT1				0x79
+#define EEPROM_RF_OPT2				0x7A
+#define EEPROM_RF_OPT3				0x7B
+#define EEPROM_RF_OPT4				0x7C
+#define EEPROM_CHANNEL_PLAN			0x28
+#define EEPROM_VERSION				0x30
+#define EEPROM_CUSTOMER_ID			0x31
+
+#define EEPROM_PWRDIFF				0x54
+
+#define EEPROM_TXPOWERCCK			0x10
+#define	EEPROM_TXPOWERHT40_1S			0x16
+#define	EEPROM_TXPOWERHT40_2SDIFF		0x66
+#define EEPROM_TXPOWERHT20DIFF			0x1C
+#define EEPROM_TXPOWER_OFDMDIFF			0x1F
+
+#define	EEPROM_TXPWR_GROUP			0x22
+
+#define EEPROM_TSSI_A				0x29
+#define EEPROM_TSSI_B				0x77
+
+#define EEPROM_CHANNELPLAN			0x28
+
+#define RF_OPTION1				0x2B
+#define RF_OPTION2				0x2C
+#define RF_OPTION3				0x2D
+#define RF_OPTION4				0x2E
+
+#define	STOPBECON				BIT(6)
+#define	STOPHIGHT				BIT(5)
+#define	STOPMGT					BIT(4)
+#define	STOPVO					BIT(3)
+#define	STOPVI					BIT(2)
+#define	STOPBE					BIT(1)
+#define	STOPBK					BIT(0)
+
+#define	RCR_APPFCS				BIT(31)
+#define	RCR_APP_MIC				BIT(30)
+#define	RCR_APP_ICV				BIT(29)
+#define	RCR_APP_PHYST_RXFF			BIT(28)
+#define	RCR_APP_BA_SSN				BIT(27)
+#define	RCR_ENMBID				BIT(24)
+#define	RCR_LSIGEN				BIT(23)
+#define	RCR_MFBEN				BIT(22)
+#define	RCR_HTC_LOC_CTRL			BIT(14)
+#define	RCR_AMF					BIT(13)
+#define	RCR_ACF					BIT(12)
+#define	RCR_ADF					BIT(11)
+#define	RCR_AICV				BIT(9)
+#define	RCR_ACRC32				BIT(8)
+#define	RCR_CBSSID_BCN				BIT(7)
+#define	RCR_CBSSID_DATA				BIT(6)
+#define	RCR_CBSSID				RCR_CBSSID_DATA
+#define	RCR_APWRMGT				BIT(5)
+#define	RCR_ADD3				BIT(4)
+#define	RCR_AB					BIT(3)
+#define	RCR_AM					BIT(2)
+#define	RCR_APM					BIT(1)
+#define	RCR_AAP					BIT(0)
+#define	RCR_MXDMA_OFFSET			8
+#define	RCR_FIFO_OFFSET				13
+
+#define RSV_CTRL				0x001C
+#define RD_CTRL					0x0524
+
+#define REG_USB_INFO				0xFE17
+#define REG_USB_SPECIAL_OPTION			0xFE55
+#define REG_USB_DMA_AGG_TO			0xFE5B
+#define REG_USB_AGG_TO				0xFE5C
+#define REG_USB_AGG_TH				0xFE5D
+
+#define REG_USB_VID				0xFE60
+#define REG_USB_PID				0xFE62
+#define REG_USB_OPTIONAL			0xFE64
+#define REG_USB_CHIRP_K				0xFE65
+#define REG_USB_PHY				0xFE66
+#define REG_USB_MAC_ADDR			0xFE70
+#define REG_USB_HRPWM				0xFE58
+#define REG_USB_HCPWM				0xFE57
+
+#define SW18_FPWM				BIT(3)
+
+#define ISO_MD2PP				BIT(0)
+#define ISO_UA2USB				BIT(1)
+#define ISO_UD2CORE				BIT(2)
+#define ISO_PA2PCIE				BIT(3)
+#define ISO_PD2CORE				BIT(4)
+#define ISO_IP2MAC				BIT(5)
+#define ISO_DIOP				BIT(6)
+#define ISO_DIOE				BIT(7)
+#define ISO_EB2CORE				BIT(8)
+#define ISO_DIOR				BIT(9)
+
+#define PWC_EV25V				BIT(14)
+#define PWC_EV12V				BIT(15)
+
+#define FEN_BBRSTB				BIT(0)
+#define FEN_BB_GLB_RSTn				BIT(1)
+#define FEN_USBA				BIT(2)
+#define FEN_UPLL				BIT(3)
+#define FEN_USBD				BIT(4)
+#define FEN_DIO_PCIE				BIT(5)
+#define FEN_PCIEA				BIT(6)
+#define FEN_PPLL				BIT(7)
+#define FEN_PCIED				BIT(8)
+#define FEN_DIOE				BIT(9)
+#define FEN_CPUEN				BIT(10)
+#define FEN_DCORE				BIT(11)
+#define FEN_ELDR				BIT(12)
+#define FEN_DIO_RF				BIT(13)
+#define FEN_HWPDN				BIT(14)
+#define FEN_MREGEN				BIT(15)
+
+#define PFM_LDALL				BIT(0)
+#define PFM_ALDN				BIT(1)
+#define PFM_LDKP				BIT(2)
+#define PFM_WOWL				BIT(3)
+#define EnPDN					BIT(4)
+#define PDN_PL					BIT(5)
+#define APFM_ONMAC				BIT(8)
+#define APFM_OFF				BIT(9)
+#define APFM_RSM				BIT(10)
+#define AFSM_HSUS				BIT(11)
+#define AFSM_PCIE				BIT(12)
+#define APDM_MAC				BIT(13)
+#define APDM_HOST				BIT(14)
+#define APDM_HPDN				BIT(15)
+#define RDY_MACON				BIT(16)
+#define SUS_HOST				BIT(17)
+#define ROP_ALD					BIT(20)
+#define ROP_PWR					BIT(21)
+#define ROP_SPS					BIT(22)
+#define SOP_MRST				BIT(25)
+#define SOP_FUSE				BIT(26)
+#define SOP_ABG					BIT(27)
+#define SOP_AMB					BIT(28)
+#define SOP_RCK					BIT(29)
+#define SOP_A8M					BIT(30)
+#define XOP_BTCK				BIT(31)
+
+#define ANAD16V_EN				BIT(0)
+#define ANA8M					BIT(1)
+#define MACSLP					BIT(4)
+#define LOADER_CLK_EN				BIT(5)
+#define _80M_SSC_DIS				BIT(7)
+#define _80M_SSC_EN_HO				BIT(8)
+#define PHY_SSC_RSTB				BIT(9)
+#define SEC_CLK_EN				BIT(10)
+#define MAC_CLK_EN				BIT(11)
+#define SYS_CLK_EN				BIT(12)
+#define RING_CLK_EN				BIT(13)
+
+#define	BOOT_FROM_EEPROM			BIT(4)
+#define	EEPROM_EN				BIT(5)
+
+#define AFE_BGEN				BIT(0)
+#define AFE_MBEN				BIT(1)
+#define MAC_ID_EN				BIT(7)
+
+#define WLOCK_ALL				BIT(0)
+#define WLOCK_00				BIT(1)
+#define WLOCK_04				BIT(2)
+#define WLOCK_08				BIT(3)
+#define WLOCK_40				BIT(4)
+#define R_DIS_PRST_0				BIT(5)
+#define R_DIS_PRST_1				BIT(6)
+#define LOCK_ALL_EN				BIT(7)
+
+#define RF_EN					BIT(0)
+#define RF_RSTB					BIT(1)
+#define RF_SDMRSTB				BIT(2)
+
+#define LDA15_EN				BIT(0)
+#define LDA15_STBY				BIT(1)
+#define LDA15_OBUF				BIT(2)
+#define LDA15_REG_VOS				BIT(3)
+#define _LDA15_VOADJ(x)				(((x) & 0x7) << 4)
+
+#define LDV12_EN				BIT(0)
+#define LDV12_SDBY				BIT(1)
+#define LPLDO_HSM				BIT(2)
+#define LPLDO_LSM_DIS				BIT(3)
+#define _LDV12_VADJ(x)				(((x) & 0xF) << 4)
+
+#define XTAL_EN					BIT(0)
+#define XTAL_BSEL				BIT(1)
+#define _XTAL_BOSC(x)				(((x) & 0x3) << 2)
+#define _XTAL_CADJ(x)				(((x) & 0xF) << 4)
+#define XTAL_GATE_USB				BIT(8)
+#define _XTAL_USB_DRV(x)			(((x) & 0x3) << 9)
+#define XTAL_GATE_AFE				BIT(11)
+#define _XTAL_AFE_DRV(x)			(((x) & 0x3) << 12)
+#define XTAL_RF_GATE				BIT(14)
+#define _XTAL_RF_DRV(x)				(((x) & 0x3) << 15)
+#define XTAL_GATE_DIG				BIT(17)
+#define _XTAL_DIG_DRV(x)			(((x) & 0x3) << 18)
+#define XTAL_BT_GATE				BIT(20)
+#define _XTAL_BT_DRV(x)				(((x) & 0x3) << 21)
+#define _XTAL_GPIO(x)				(((x) & 0x7) << 23)
+
+#define CKDLY_AFE				BIT(26)
+#define CKDLY_USB				BIT(27)
+#define CKDLY_DIG				BIT(28)
+#define CKDLY_BT				BIT(29)
+
+#define APLL_EN					BIT(0)
+#define APLL_320_EN				BIT(1)
+#define APLL_FREF_SEL				BIT(2)
+#define APLL_EDGE_SEL				BIT(3)
+#define APLL_WDOGB				BIT(4)
+#define APLL_LPFEN				BIT(5)
+
+#define APLL_REF_CLK_13MHZ			0x1
+#define APLL_REF_CLK_19_2MHZ			0x2
+#define APLL_REF_CLK_20MHZ			0x3
+#define APLL_REF_CLK_25MHZ			0x4
+#define APLL_REF_CLK_26MHZ			0x5
+#define APLL_REF_CLK_38_4MHZ			0x6
+#define APLL_REF_CLK_40MHZ			0x7
+
+#define APLL_320EN				BIT(14)
+#define APLL_80EN				BIT(15)
+#define APLL_1MEN				BIT(24)
+
+#define ALD_EN					BIT(18)
+#define EF_PD					BIT(19)
+#define EF_FLAG					BIT(31)
+
+#define EF_TRPT					BIT(7)
+#define LDOE25_EN				BIT(31)
+
+#define RSM_EN					BIT(0)
+#define Timer_EN				BIT(4)
+
+#define TRSW0EN					BIT(2)
+#define TRSW1EN					BIT(3)
+#define EROM_EN					BIT(4)
+#define EnBT					BIT(5)
+#define EnUart					BIT(8)
+#define Uart_910				BIT(9)
+#define EnPMAC					BIT(10)
+#define SIC_SWRST				BIT(11)
+#define EnSIC					BIT(12)
+#define SIC_23					BIT(13)
+#define EnHDP					BIT(14)
+#define SIC_LBK					BIT(15)
+
+#define LED0PL					BIT(4)
+#define LED1PL					BIT(12)
+#define LED0DIS					BIT(7)
+
+#define MCUFWDL_EN				BIT(0)
+#define MCUFWDL_RDY				BIT(1)
+#define FWDL_ChkSum_rpt				BIT(2)
+#define MACINI_RDY				BIT(3)
+#define BBINI_RDY				BIT(4)
+#define RFINI_RDY				BIT(5)
+#define WINTINI_RDY				BIT(6)
+#define CPRST					BIT(23)
+
+#define XCLK_VLD				BIT(0)
+#define ACLK_VLD				BIT(1)
+#define UCLK_VLD				BIT(2)
+#define PCLK_VLD				BIT(3)
+#define PCIRSTB					BIT(4)
+#define V15_VLD					BIT(5)
+#define TRP_B15V_EN				BIT(7)
+#define SIC_IDLE				BIT(8)
+#define BD_MAC2					BIT(9)
+#define BD_MAC1					BIT(10)
+#define IC_MACPHY_MODE				BIT(11)
+#define BT_FUNC					BIT(16)
+#define VENDOR_ID				BIT(19)
+#define PAD_HWPD_IDN				BIT(22)
+#define TRP_VAUX_EN				BIT(23)
+#define TRP_BT_EN				BIT(24)
+#define BD_PKG_SEL				BIT(25)
+#define BD_HCI_SEL				BIT(26)
+#define TYPE_ID					BIT(27)
+
+#define CHIP_VER_RTL_MASK			0xF000
+#define CHIP_VER_RTL_SHIFT			12
+
+#define REG_LBMODE				(REG_CR + 3)
+
+#define HCI_TXDMA_EN				BIT(0)
+#define HCI_RXDMA_EN				BIT(1)
+#define TXDMA_EN				BIT(2)
+#define RXDMA_EN				BIT(3)
+#define PROTOCOL_EN				BIT(4)
+#define SCHEDULE_EN				BIT(5)
+#define MACTXEN					BIT(6)
+#define MACRXEN					BIT(7)
+#define ENSWBCN					BIT(8)
+#define ENSEC					BIT(9)
+
+#define _NETTYPE(x)				(((x) & 0x3) << 16)
+#define MASK_NETTYPE				0x30000
+#define NT_NO_LINK				0x0
+#define NT_LINK_AD_HOC				0x1
+#define NT_LINK_AP				0x2
+#define NT_AS_AP				0x3
+
+#define _LBMODE(x)				(((x) & 0xF) << 24)
+#define MASK_LBMODE				0xF000000
+#define LOOPBACK_NORMAL				0x0
+#define LOOPBACK_IMMEDIATELY			0xB
+#define LOOPBACK_MAC_DELAY			0x3
+#define LOOPBACK_PHY				0x1
+#define LOOPBACK_DMA				0x7
+
+#define GET_RX_PAGE_SIZE(value)			((value) & 0xF)
+#define GET_TX_PAGE_SIZE(value)			(((value) & 0xF0) >> 4)
+#define _PSRX_MASK				0xF
+#define _PSTX_MASK				0xF0
+#define _PSRX(x)				(x)
+#define _PSTX(x)				((x) << 4)
+
+#define PBP_64					0x0
+#define PBP_128					0x1
+#define PBP_256					0x2
+#define PBP_512					0x3
+#define PBP_1024				0x4
+
+#define RXDMA_ARBBW_EN				BIT(0)
+#define RXSHFT_EN				BIT(1)
+#define RXDMA_AGG_EN				BIT(2)
+#define QS_VO_QUEUE				BIT(8)
+#define QS_VI_QUEUE				BIT(9)
+#define QS_BE_QUEUE				BIT(10)
+#define QS_BK_QUEUE				BIT(11)
+#define QS_MANAGER_QUEUE			BIT(12)
+#define QS_HIGH_QUEUE				BIT(13)
+
+#define HQSEL_VOQ				BIT(0)
+#define HQSEL_VIQ				BIT(1)
+#define HQSEL_BEQ				BIT(2)
+#define HQSEL_BKQ				BIT(3)
+#define HQSEL_MGTQ				BIT(4)
+#define HQSEL_HIQ				BIT(5)
+
+#define _TXDMA_HIQ_MAP(x)			(((x)&0x3) << 14)
+#define _TXDMA_MGQ_MAP(x)			(((x)&0x3) << 12)
+#define _TXDMA_BKQ_MAP(x)			(((x)&0x3) << 10)
+#define _TXDMA_BEQ_MAP(x)			(((x)&0x3) << 8)
+#define _TXDMA_VIQ_MAP(x)			(((x)&0x3) << 6)
+#define _TXDMA_VOQ_MAP(x)			(((x)&0x3) << 4)
+
+#define QUEUE_LOW				1
+#define QUEUE_NORMAL				2
+#define QUEUE_HIGH				3
+
+#define _LLT_NO_ACTIVE				0x0
+#define _LLT_WRITE_ACCESS			0x1
+#define _LLT_READ_ACCESS			0x2
+
+#define _LLT_INIT_DATA(x)			((x) & 0xFF)
+#define _LLT_INIT_ADDR(x)			(((x) & 0xFF) << 8)
+#define _LLT_OP(x)				(((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x)			(((x) >> 30) & 0x3)
+
+#define BB_WRITE_READ_MASK			(BIT(31) | BIT(30))
+#define BB_WRITE_EN				BIT(30)
+#define BB_READ_EN				BIT(31)
+
+#define _HPQ(x)					((x) & 0xFF)
+#define _LPQ(x)					(((x) & 0xFF) << 8)
+#define _PUBQ(x)				(((x) & 0xFF) << 16)
+#define _NPQ(x)					((x) & 0xFF)
+
+#define HPQ_PUBLIC_DIS				BIT(24)
+#define LPQ_PUBLIC_DIS				BIT(25)
+#define LD_RQPN					BIT(31)
+
+#define BCN_VALID				BIT(16)
+#define BCN_HEAD(x)				(((x) & 0xFF) << 8)
+#define	BCN_HEAD_MASK				0xFF00
+
+#define BLK_DESC_NUM_SHIFT			4
+#define BLK_DESC_NUM_MASK			0xF
+
+#define DROP_DATA_EN				BIT(9)
+
+#define EN_AMPDU_RTY_NEW			BIT(7)
+
+#define _INIRTSMCS_SEL(x)			((x) & 0x3F)
+
+#define _SPEC_SIFS_CCK(x)			((x) & 0xFF)
+#define _SPEC_SIFS_OFDM(x)			(((x) & 0xFF) << 8)
+
+#define RATE_REG_BITMAP_ALL			0xFFFFF
+
+#define _RRSC_BITMAP(x)				((x) & 0xFFFFF)
+
+#define _RRSR_RSC(x)				(((x) & 0x3) << 21)
+#define RRSR_RSC_RESERVED			0x0
+#define RRSR_RSC_UPPER_SUBCHANNEL		0x1
+#define RRSR_RSC_LOWER_SUBCHANNEL		0x2
+#define RRSR_RSC_DUPLICATE_MODE			0x3
+
+#define USE_SHORT_G1				BIT(20)
+
+#define _AGGLMT_MCS0(x)				((x) & 0xF)
+#define _AGGLMT_MCS1(x)				(((x) & 0xF) << 4)
+#define _AGGLMT_MCS2(x)				(((x) & 0xF) << 8)
+#define _AGGLMT_MCS3(x)				(((x) & 0xF) << 12)
+#define _AGGLMT_MCS4(x)				(((x) & 0xF) << 16)
+#define _AGGLMT_MCS5(x)				(((x) & 0xF) << 20)
+#define _AGGLMT_MCS6(x)				(((x) & 0xF) << 24)
+#define _AGGLMT_MCS7(x)				(((x) & 0xF) << 28)
+
+#define	RETRY_LIMIT_SHORT_SHIFT			8
+#define	RETRY_LIMIT_LONG_SHIFT			0
+
+#define _DARF_RC1(x)				((x) & 0x1F)
+#define _DARF_RC2(x)				(((x) & 0x1F) << 8)
+#define _DARF_RC3(x)				(((x) & 0x1F) << 16)
+#define _DARF_RC4(x)				(((x) & 0x1F) << 24)
+#define _DARF_RC5(x)				((x) & 0x1F)
+#define _DARF_RC6(x)				(((x) & 0x1F) << 8)
+#define _DARF_RC7(x)				(((x) & 0x1F) << 16)
+#define _DARF_RC8(x)				(((x) & 0x1F) << 24)
+
+#define _RARF_RC1(x)				((x) & 0x1F)
+#define _RARF_RC2(x)				(((x) & 0x1F) << 8)
+#define _RARF_RC3(x)				(((x) & 0x1F) << 16)
+#define _RARF_RC4(x)				(((x) & 0x1F) << 24)
+#define _RARF_RC5(x)				((x) & 0x1F)
+#define _RARF_RC6(x)				(((x) & 0x1F) << 8)
+#define _RARF_RC7(x)				(((x) & 0x1F) << 16)
+#define _RARF_RC8(x)				(((x) & 0x1F) << 24)
+
+#define AC_PARAM_TXOP_LIMIT_OFFSET		16
+#define AC_PARAM_ECW_MAX_OFFSET			12
+#define AC_PARAM_ECW_MIN_OFFSET			8
+#define AC_PARAM_AIFS_OFFSET			0
+
+#define _AIFS(x)				(x)
+#define _ECW_MAX_MIN(x)				((x) << 8)
+#define _TXOP_LIMIT(x)				((x) << 16)
+
+#define _BCNIFS(x)				((x) & 0xFF)
+#define _BCNECW(x)				((((x) & 0xF)) << 8)
+
+#define _LRL(x)					((x) & 0x3F)
+#define _SRL(x)					(((x) & 0x3F) << 8)
+
+#define _SIFS_CCK_CTX(x)			((x) & 0xFF)
+#define _SIFS_CCK_TRX(x)			(((x) & 0xFF) << 8);
+
+#define _SIFS_OFDM_CTX(x)			((x) & 0xFF)
+#define _SIFS_OFDM_TRX(x)			(((x) & 0xFF) << 8);
+
+#define _TBTT_PROHIBIT_HOLD(x)			(((x) & 0xFF) << 8)
+
+#define DIS_EDCA_CNT_DWN			BIT(11)
+
+#define EN_MBSSID				BIT(1)
+#define EN_TXBCN_RPT				BIT(2)
+#define	EN_BCN_FUNCTION				BIT(3)
+
+#define TSFTR_RST				BIT(0)
+#define TSFTR1_RST				BIT(1)
+
+#define STOP_BCNQ				BIT(6)
+
+#define	DIS_TSF_UDT0_NORMAL_CHIP		BIT(4)
+#define	DIS_TSF_UDT0_TEST_CHIP			BIT(5)
+
+#define	AcmHw_HwEn				BIT(0)
+#define	AcmHw_BeqEn				BIT(1)
+#define	AcmHw_ViqEn				BIT(2)
+#define	AcmHw_VoqEn				BIT(3)
+#define	AcmHw_BeqStatus				BIT(4)
+#define	AcmHw_ViqStatus				BIT(5)
+#define	AcmHw_VoqStatus				BIT(6)
+
+#define APSDOFF					BIT(6)
+#define APSDOFF_STATUS				BIT(7)
+
+#define BW_20MHZ				BIT(2)
+
+#define RATE_BITMAP_ALL				0xFFFFF
+
+#define RATE_RRSR_CCK_ONLY_1M			0xFFFF1
+
+#define TSFRST					BIT(0)
+#define DIS_GCLK				BIT(1)
+#define PAD_SEL					BIT(2)
+#define PWR_ST					BIT(6)
+#define PWRBIT_OW_EN				BIT(7)
+#define ACRC					BIT(8)
+#define CFENDFORM				BIT(9)
+#define ICV					BIT(10)
+
+#define AAP					BIT(0)
+#define APM					BIT(1)
+#define AM					BIT(2)
+#define AB					BIT(3)
+#define ADD3					BIT(4)
+#define APWRMGT					BIT(5)
+#define CBSSID					BIT(6)
+#define CBSSID_DATA				BIT(6)
+#define CBSSID_BCN				BIT(7)
+#define ACRC32					BIT(8)
+#define AICV					BIT(9)
+#define ADF					BIT(11)
+#define ACF					BIT(12)
+#define AMF					BIT(13)
+#define HTC_LOC_CTRL				BIT(14)
+#define UC_DATA_EN				BIT(16)
+#define BM_DATA_EN				BIT(17)
+#define MFBEN					BIT(22)
+#define LSIGEN					BIT(23)
+#define EnMBID					BIT(24)
+#define APP_BASSN				BIT(27)
+#define APP_PHYSTS				BIT(28)
+#define APP_ICV					BIT(29)
+#define APP_MIC					BIT(30)
+#define APP_FCS					BIT(31)
+
+#define _MIN_SPACE(x)				((x) & 0x7)
+#define _SHORT_GI_PADDING(x)			(((x) & 0x1F) << 3)
+
+#define RXERR_TYPE_OFDM_PPDU			0
+#define RXERR_TYPE_OFDM_FALSE_ALARM		1
+#define	RXERR_TYPE_OFDM_MPDU_OK			2
+#define RXERR_TYPE_OFDM_MPDU_FAIL		3
+#define RXERR_TYPE_CCK_PPDU			4
+#define RXERR_TYPE_CCK_FALSE_ALARM		5
+#define RXERR_TYPE_CCK_MPDU_OK			6
+#define RXERR_TYPE_CCK_MPDU_FAIL		7
+#define RXERR_TYPE_HT_PPDU			8
+#define RXERR_TYPE_HT_FALSE_ALARM		9
+#define RXERR_TYPE_HT_MPDU_TOTAL		10
+#define RXERR_TYPE_HT_MPDU_OK			11
+#define RXERR_TYPE_HT_MPDU_FAIL			12
+#define RXERR_TYPE_RX_FULL_DROP			15
+
+#define RXERR_COUNTER_MASK			0xFFFFF
+#define RXERR_RPT_RST				BIT(27)
+#define _RXERR_RPT_SEL(type)			((type) << 28)
+
+#define	SCR_TxUseDK				BIT(0)
+#define	SCR_RxUseDK				BIT(1)
+#define	SCR_TxEncEnable				BIT(2)
+#define	SCR_RxDecEnable				BIT(3)
+#define	SCR_SKByA2				BIT(4)
+#define	SCR_NoSKMC				BIT(5)
+#define SCR_TXBCUSEDK				BIT(6)
+#define SCR_RXBCUSEDK				BIT(7)
+
+#define USB_IS_HIGH_SPEED			0
+#define USB_IS_FULL_SPEED			1
+#define USB_SPEED_MASK				BIT(5)
+
+#define USB_NORMAL_SIE_EP_MASK			0xF
+#define USB_NORMAL_SIE_EP_SHIFT			4
+
+#define USB_TEST_EP_MASK			0x30
+#define USB_TEST_EP_SHIFT			4
+
+#define USB_AGG_EN				BIT(3)
+
+#define MAC_ADDR_LEN				6
+#define LAST_ENTRY_OF_TX_PKT_BUFFER		255
+
+#define POLLING_LLT_THRESHOLD			20
+#define POLLING_READY_TIMEOUT_COUNT		1000
+
+#define	MAX_MSS_DENSITY_2T			0x13
+#define	MAX_MSS_DENSITY_1T			0x0A
+
+#define EPROM_CMD_OPERATING_MODE_MASK		((1<<7)|(1<<6))
+#define EPROM_CMD_CONFIG			0x3
+#define EPROM_CMD_LOAD				1
+
+#define	HWSET_MAX_SIZE_92S			HWSET_MAX_SIZE
+
+#define	HAL_8192C_HW_GPIO_WPS_BIT		BIT(2)
+
+#define	RPMAC_RESET				0x100
+#define	RPMAC_TXSTART				0x104
+#define	RPMAC_TXLEGACYSIG			0x108
+#define	RPMAC_TXHTSIG1				0x10c
+#define	RPMAC_TXHTSIG2				0x110
+#define	RPMAC_PHYDEBUG				0x114
+#define	RPMAC_TXPACKETNUM			0x118
+#define	RPMAC_TXIDLE				0x11c
+#define	RPMAC_TXMACHEADER0			0x120
+#define	RPMAC_TXMACHEADER1			0x124
+#define	RPMAC_TXMACHEADER2			0x128
+#define	RPMAC_TXMACHEADER3			0x12c
+#define	RPMAC_TXMACHEADER4			0x130
+#define	RPMAC_TXMACHEADER5			0x134
+#define	RPMAC_TXDADATYPE			0x138
+#define	RPMAC_TXRANDOMSEED			0x13c
+#define	RPMAC_CCKPLCPPREAMBLE			0x140
+#define	RPMAC_CCKPLCPHEADER			0x144
+#define	RPMAC_CCKCRC16				0x148
+#define	RPMAC_OFDMRXCRC32OK			0x170
+#define	RPMAC_OFDMRXCRC32Er			0x174
+#define	RPMAC_OFDMRXPARITYER			0x178
+#define	RPMAC_OFDMRXCRC8ER			0x17c
+#define	RPMAC_CCKCRXRC16ER			0x180
+#define	RPMAC_CCKCRXRC32ER			0x184
+#define	RPMAC_CCKCRXRC32OK			0x188
+#define	RPMAC_TXSTATUS				0x18c
+
+#define	RFPGA0_RFMOD				0x800
+
+#define	RFPGA0_TXINFO				0x804
+#define	RFPGA0_PSDFUNCTION			0x808
+
+#define	RFPGA0_TXGAINSTAGE			0x80c
+
+#define	RFPGA0_RFTIMING1			0x810
+#define	RFPGA0_RFTIMING2			0x814
+
+#define	RFPGA0_XA_HSSIPARAMETER1		0x820
+#define	RFPGA0_XA_HSSIPARAMETER2		0x824
+#define	RFPGA0_XB_HSSIPARAMETER1		0x828
+#define	RFPGA0_XB_HSSIPARAMETER2		0x82c
+
+#define	RFPGA0_XA_LSSIPARAMETER			0x840
+#define	RFPGA0_XB_LSSIPARAMETER			0x844
+
+#define	RFPGA0_RFWAKEUPPARAMETER		0x850
+#define	RFPGA0_RFSLEEPUPPARAMETER		0x854
+
+#define	RFPGA0_XAB_SWITCHCONTROL		0x858
+#define	RFPGA0_XCD_SWITCHCONTROL		0x85c
+
+#define	RFPGA0_XA_RFINTERFACEOE			0x860
+#define	RFPGA0_XB_RFINTERFACEOE			0x864
+
+#define	RFPGA0_XAB_RFINTERFACESW		0x870
+#define	RFPGA0_XCD_RFINTERFACESW		0x874
+
+#define	rFPGA0_XAB_RFPARAMETER			0x878
+#define	rFPGA0_XCD_RFPARAMETER			0x87c
+
+#define	RFPGA0_ANALOGPARAMETER1			0x880
+#define	RFPGA0_ANALOGPARAMETER2			0x884
+#define	RFPGA0_ANALOGPARAMETER3			0x888
+#define	RFPGA0_ANALOGPARAMETER4			0x88c
+
+#define	RFPGA0_XA_LSSIREADBACK			0x8a0
+#define	RFPGA0_XB_LSSIREADBACK			0x8a4
+#define	RFPGA0_XC_LSSIREADBACK			0x8a8
+#define	RFPGA0_XD_LSSIREADBACK			0x8ac
+
+#define	RFPGA0_PSDREPORT			0x8b4
+#define	TRANSCEIVEA_HSPI_READBACK		0x8b8
+#define	TRANSCEIVEB_HSPI_READBACK		0x8bc
+#define	RFPGA0_XAB_RFINTERFACERB		0x8e0
+#define	RFPGA0_XCD_RFINTERFACERB		0x8e4
+
+#define	RFPGA1_RFMOD				0x900
+
+#define	RFPGA1_TXBLOCK				0x904
+#define	RFPGA1_DEBUGSELECT			0x908
+#define	RFPGA1_TXINFO				0x90c
+
+#define	RCCK0_SYSTEM				0xa00
+
+#define	RCCK0_AFESETTING			0xa04
+#define	RCCK0_CCA				0xa08
+
+#define	RCCK0_RXAGC1				0xa0c
+#define	RCCK0_RXAGC2				0xa10
+
+#define	RCCK0_RXHP				0xa14
+
+#define	RCCK0_DSPPARAMETER1			0xa18
+#define	RCCK0_DSPPARAMETER2			0xa1c
+
+#define	RCCK0_TXFILTER1				0xa20
+#define	RCCK0_TXFILTER2				0xa24
+#define	RCCK0_DEBUGPORT				0xa28
+#define	RCCK0_FALSEALARMREPORT			0xa2c
+#define	RCCK0_TRSSIREPORT			0xa50
+#define	RCCK0_RXREPORT				0xa54
+#define	RCCK0_FACOUNTERLOWER			0xa5c
+#define	RCCK0_FACOUNTERUPPER			0xa58
+
+#define	ROFDM0_LSTF				0xc00
+
+#define	ROFDM0_TRXPATHENABLE			0xc04
+#define	ROFDM0_TRMUXPAR				0xc08
+#define	ROFDM0_TRSWISOLATION			0xc0c
+
+#define	ROFDM0_XARXAFE				0xc10
+#define	ROFDM0_XARXIQIMBALANCE			0xc14
+#define	ROFDM0_XBRXAFE				0xc18
+#define	ROFDM0_XBRXIQIMBALANCE			0xc1c
+#define	ROFDM0_XCRXAFE				0xc20
+#define	ROFDM0_XCRXIQIMBANLANCE			0xc24
+#define	ROFDM0_XDRXAFE				0xc28
+#define	ROFDM0_XDRXIQIMBALANCE			0xc2c
+
+#define	ROFDM0_RXDETECTOR1			0xc30
+#define	ROFDM0_RXDETECTOR2			0xc34
+#define	ROFDM0_RXDETECTOR3			0xc38
+#define	ROFDM0_RXDETECTOR4			0xc3c
+
+#define	ROFDM0_RXDSP				0xc40
+#define	ROFDM0_CFOANDDAGC			0xc44
+#define	ROFDM0_CCADROPTHRESHOLD			0xc48
+#define	ROFDM0_ECCATHRESHOLD			0xc4c
+
+#define	ROFDM0_XAAGCCORE1			0xc50
+#define	ROFDM0_XAAGCCORE2			0xc54
+#define	ROFDM0_XBAGCCORE1			0xc58
+#define	ROFDM0_XBAGCCORE2			0xc5c
+#define	ROFDM0_XCAGCCORE1			0xc60
+#define	ROFDM0_XCAGCCORE2			0xc64
+#define	ROFDM0_XDAGCCORE1			0xc68
+#define	ROFDM0_XDAGCCORE2			0xc6c
+
+#define	ROFDM0_AGCPARAMETER1			0xc70
+#define	ROFDM0_AGCPARAMETER2			0xc74
+#define	ROFDM0_AGCRSSITABLE			0xc78
+#define	ROFDM0_HTSTFAGC				0xc7c
+
+#define	ROFDM0_XATXIQIMBALANCE			0xc80
+#define	ROFDM0_XATXAFE				0xc84
+#define	ROFDM0_XBTXIQIMBALANCE			0xc88
+#define	ROFDM0_XBTXAFE				0xc8c
+#define	ROFDM0_XCTXIQIMBALANCE			0xc90
+#define	ROFDM0_XCTXAFE				0xc94
+#define	ROFDM0_XDTXIQIMBALANCE			0xc98
+#define	ROFDM0_XDTXAFE				0xc9c
+
+#define ROFDM0_RXIQEXTANTA			0xca0
+
+#define	ROFDM0_RXHPPARAMETER			0xce0
+#define	ROFDM0_TXPSEUDONOISEWGT			0xce4
+#define	ROFDM0_FRAMESYNC			0xcf0
+#define	ROFDM0_DFSREPORT			0xcf4
+#define	ROFDM0_TXCOEFF1				0xca4
+#define	ROFDM0_TXCOEFF2				0xca8
+#define	ROFDM0_TXCOEFF3				0xcac
+#define	ROFDM0_TXCOEFF4				0xcb0
+#define	ROFDM0_TXCOEFF5				0xcb4
+#define	ROFDM0_TXCOEFF6				0xcb8
+
+#define	ROFDM1_LSTF				0xd00
+#define	ROFDM1_TRXPATHENABLE			0xd04
+
+#define	ROFDM1_CF0				0xd08
+#define	ROFDM1_CSI1				0xd10
+#define	ROFDM1_SBD				0xd14
+#define	ROFDM1_CSI2				0xd18
+#define	ROFDM1_CFOTRACKING			0xd2c
+#define	ROFDM1_TRXMESAURE1			0xd34
+#define	ROFDM1_INTFDET				0xd3c
+#define	ROFDM1_PSEUDONOISESTATEAB		0xd50
+#define	ROFDM1_PSEUDONOISESTATECD		0xd54
+#define	ROFDM1_RXPSEUDONOISEWGT			0xd58
+
+#define	ROFDM_PHYCOUNTER1			0xda0
+#define	ROFDM_PHYCOUNTER2			0xda4
+#define	ROFDM_PHYCOUNTER3			0xda8
+
+#define	ROFDM_SHORTCFOAB			0xdac
+#define	ROFDM_SHORTCFOCD			0xdb0
+#define	ROFDM_LONGCFOAB				0xdb4
+#define	ROFDM_LONGCFOCD				0xdb8
+#define	ROFDM_TAILCF0AB				0xdbc
+#define	ROFDM_TAILCF0CD				0xdc0
+#define	ROFDM_PWMEASURE1			0xdc4
+#define	ROFDM_PWMEASURE2			0xdc8
+#define	ROFDM_BWREPORT				0xdcc
+#define	ROFDM_AGCREPORT				0xdd0
+#define	ROFDM_RXSNR				0xdd4
+#define	ROFDM_RXEVMCSI				0xdd8
+#define	ROFDM_SIGREPORT				0xddc
+
+#define	RTXAGC_A_RATE18_06			0xe00
+#define	RTXAGC_A_RATE54_24			0xe04
+#define	RTXAGC_A_CCK1_MCS32			0xe08
+#define	RTXAGC_A_MCS03_MCS00			0xe10
+#define	RTXAGC_A_MCS07_MCS04			0xe14
+#define	RTXAGC_A_MCS11_MCS08			0xe18
+#define	RTXAGC_A_MCS15_MCS12			0xe1c
+
+#define	RTXAGC_B_RATE18_06			0x830
+#define	RTXAGC_B_RATE54_24			0x834
+#define	RTXAGC_B_CCK1_55_MCS32			0x838
+#define	RTXAGC_B_MCS03_MCS00			0x83c
+#define	RTXAGC_B_MCS07_MCS04			0x848
+#define	RTXAGC_B_MCS11_MCS08			0x84c
+#define	RTXAGC_B_MCS15_MCS12			0x868
+#define	RTXAGC_B_CCK11_A_CCK2_11		0x86c
+
+#define	RZEBRA1_HSSIENABLE			0x0
+#define	RZEBRA1_TRXENABLE1			0x1
+#define	RZEBRA1_TRXENABLE2			0x2
+#define	RZEBRA1_AGC				0x4
+#define	RZEBRA1_CHARGEPUMP			0x5
+#define	RZEBRA1_CHANNEL				0x7
+
+#define	RZEBRA1_TXGAIN				0x8
+#define	RZEBRA1_TXLPF				0x9
+#define	RZEBRA1_RXLPF				0xb
+#define	RZEBRA1_RXHPFCORNER			0xc
+
+#define	RGLOBALCTRL				0
+#define	RRTL8256_TXLPF				19
+#define	RRTL8256_RXLPF				11
+#define	RRTL8258_TXLPF				0x11
+#define	RRTL8258_RXLPF				0x13
+#define	RRTL8258_RSSILPF			0xa
+
+#define	RF_AC					0x00
+
+#define	RF_IQADJ_G1				0x01
+#define	RF_IQADJ_G2				0x02
+#define	RF_POW_TRSW				0x05
+
+#define	RF_GAIN_RX				0x06
+#define	RF_GAIN_TX				0x07
+
+#define	RF_TXM_IDAC				0x08
+#define	RF_BS_IQGEN				0x0F
+
+#define	RF_MODE1				0x10
+#define	RF_MODE2				0x11
+
+#define	RF_RX_AGC_HP				0x12
+#define	RF_TX_AGC				0x13
+#define	RF_BIAS					0x14
+#define	RF_IPA					0x15
+#define	RF_POW_ABILITY				0x17
+#define	RF_MODE_AG				0x18
+#define	RRFCHANNEL				0x18
+#define	RF_CHNLBW				0x18
+#define	RF_TOP					0x19
+
+#define	RF_RX_G1				0x1A
+#define	RF_RX_G2				0x1B
+
+#define	RF_RX_BB2				0x1C
+#define	RF_RX_BB1				0x1D
+
+#define	RF_RCK1					0x1E
+#define	RF_RCK2					0x1F
+
+#define	RF_TX_G1				0x20
+#define	RF_TX_G2				0x21
+#define	RF_TX_G3				0x22
+
+#define	RF_TX_BB1				0x23
+#define	RF_T_METER				0x24
+
+#define	RF_SYN_G1				0x25
+#define	RF_SYN_G2				0x26
+#define	RF_SYN_G3				0x27
+#define	RF_SYN_G4				0x28
+#define	RF_SYN_G5				0x29
+#define	RF_SYN_G6				0x2A
+#define	RF_SYN_G7				0x2B
+#define	RF_SYN_G8				0x2C
+
+#define	RF_RCK_OS				0x30
+#define	RF_TXPA_G1				0x31
+#define	RF_TXPA_G2				0x32
+#define	RF_TXPA_G3				0x33
+
+#define	BBBRESETB				0x100
+#define	BGLOBALRESETB				0x200
+#define	BOFDMTXSTART				0x4
+#define	BCCKTXSTART				0x8
+#define	BCRC32DEBUG				0x100
+#define	BPMACLOOPBACK				0x10
+#define	BTXLSIG					0xffffff
+#define	BOFDMTXRATE				0xf
+#define	BOFDMTXRESERVED				0x10
+#define	BOFDMTXLENGTH				0x1ffe0
+#define	BOFDMTXPARITY				0x20000
+#define	BTXHTSIG1				0xffffff
+#define	BTXHTMCSRATE				0x7f
+#define	BTXHTBW					0x80
+#define	BTXHTLENGTH				0xffff00
+#define	BTXHTSIG2				0xffffff
+#define	BTXHTSMOOTHING				0x1
+#define	BTXHTSOUNDING				0x2
+#define	BTXHTRESERVED				0x4
+#define	BTXHTAGGREATION				0x8
+#define	BTXHTSTBC				0x30
+#define	BTXHTADVANCECODING			0x40
+#define	BTXHTSHORTGI				0x80
+#define	BTXHTNUMBERHT_LTF			0x300
+#define	BTXHTCRC8				0x3fc00
+#define	BCOUNTERRESET				0x10000
+#define	BNUMOFOFDMTX				0xffff
+#define	BNUMOFCCKTX				0xffff0000
+#define	BTXIDLEINTERVAL				0xffff
+#define	BOFDMSERVICE				0xffff0000
+#define	BTXMACHEADER				0xffffffff
+#define	BTXDATAINIT				0xff
+#define	BTXHTMODE				0x100
+#define	BTXDATATYPE				0x30000
+#define	BTXRANDOMSEED				0xffffffff
+#define	BCCKTXPREAMBLE				0x1
+#define	BCCKTXSFD				0xffff0000
+#define	BCCKTXSIG				0xff
+#define	BCCKTXSERVICE				0xff00
+#define	BCCKLENGTHEXT				0x8000
+#define	BCCKTXLENGHT				0xffff0000
+#define	BCCKTXCRC16				0xffff
+#define	BCCKTXSTATUS				0x1
+#define	BOFDMTXSTATUS				0x2
+#define IS_BB_REG_OFFSET_92S(_Offset)	\
+	((_Offset >= 0x800) && (_Offset <= 0xfff))
+
+#define	BRFMOD					0x1
+#define	BJAPANMODE				0x2
+#define	BCCKTXSC				0x30
+#define	BCCKEN					0x1000000
+#define	BOFDMEN					0x2000000
+
+#define	BOFDMRXADCPHASE				0x10000
+#define	BOFDMTXDACPHASE				0x40000
+#define	BXATXAGC				0x3f
+
+#define	BXBTXAGC				0xf00
+#define	BXCTXAGC				0xf000
+#define	BXDTXAGC				0xf0000
+
+#define	BPASTART				0xf0000000
+#define	BTRSTART				0x00f00000
+#define	BRFSTART				0x0000f000
+#define	BBBSTART				0x000000f0
+#define	BBBCCKSTART				0x0000000f
+#define	BPAEND					0xf
+#define	BTREND					0x0f000000
+#define	BRFEND					0x000f0000
+#define	BCCAMASK				0x000000f0
+#define	BR2RCCAMASK				0x00000f00
+#define	BHSSI_R2TDELAY				0xf8000000
+#define	BHSSI_T2RDELAY				0xf80000
+#define	BCONTXHSSI				0x400
+#define	BIGFROMCCK				0x200
+#define	BAGCADDRESS				0x3f
+#define	BRXHPTX					0x7000
+#define	BRXHP2RX				0x38000
+#define	BRXHPCCKINI				0xc0000
+#define	BAGCTXCODE				0xc00000
+#define	BAGCRXCODE				0x300000
+
+#define	B3WIREDATALENGTH			0x800
+#define	B3WIREADDREAALENGTH			0x400
+
+#define	B3WIRERFPOWERDOWN			0x1
+#define	B5GPAPEPOLARITY				0x40000000
+#define	B2GPAPEPOLARITY				0x80000000
+#define	BRFSW_TXDEFAULTANT			0x3
+#define	BRFSW_TXOPTIONANT			0x30
+#define	BRFSW_RXDEFAULTANT			0x300
+#define	BRFSW_RXOPTIONANT			0x3000
+#define	BRFSI_3WIREDATA				0x1
+#define	BRFSI_3WIRECLOCK			0x2
+#define	BRFSI_3WIRELOAD				0x4
+#define	BRFSI_3WIRERW				0x8
+#define	BRFSI_3WIRE				0xf
+
+#define	BRFSI_RFENV				0x10
+
+#define	BRFSI_TRSW				0x20
+#define	BRFSI_TRSWB				0x40
+#define	BRFSI_ANTSW				0x100
+#define	BRFSI_ANTSWB				0x200
+#define	BRFSI_PAPE				0x400
+#define	BRFSI_PAPE5G				0x800
+#define	BBANDSELECT				0x1
+#define	BHTSIG2_GI				0x80
+#define	BHTSIG2_SMOOTHING			0x01
+#define	BHTSIG2_SOUNDING			0x02
+#define	BHTSIG2_AGGREATON			0x08
+#define	BHTSIG2_STBC				0x30
+#define	BHTSIG2_ADVCODING			0x40
+#define	BHTSIG2_NUMOFHTLTF			0x300
+#define	BHTSIG2_CRC8				0x3fc
+#define	BHTSIG1_MCS				0x7f
+#define	BHTSIG1_BANDWIDTH			0x80
+#define	BHTSIG1_HTLENGTH			0xffff
+#define	BLSIG_RATE				0xf
+#define	BLSIG_RESERVED				0x10
+#define	BLSIG_LENGTH				0x1fffe
+#define	BLSIG_PARITY				0x20
+#define	BCCKRXPHASE				0x4
+
+#define	BLSSIREADADDRESS			0x7f800000
+#define	BLSSIREADEDGE				0x80000000
+
+#define	BLSSIREADBACKDATA			0xfffff
+
+#define	BLSSIREADOKFLAG				0x1000
+#define	BCCKSAMPLERATE				0x8
+#define	BREGULATOR0STANDBY			0x1
+#define	BREGULATORPLLSTANDBY			0x2
+#define	BREGULATOR1STANDBY			0x4
+#define	BPLLPOWERUP				0x8
+#define	BDPLLPOWERUP				0x10
+#define	BDA10POWERUP				0x20
+#define	BAD7POWERUP				0x200
+#define	BDA6POWERUP				0x2000
+#define	BXTALPOWERUP				0x4000
+#define	B40MDCLKPOWERUP				0x8000
+#define	BDA6DEBUGMODE				0x20000
+#define	BDA6SWING				0x380000
+
+#define	BADCLKPHASE				0x4000000
+#define	B80MCLKDELAY				0x18000000
+#define	BAFEWATCHDOGENABLE			0x20000000
+
+#define	BXTALCAP01				0xc0000000
+#define	BXTALCAP23				0x3
+#define	BXTALCAP92X				0x0f000000
+#define BXTALCAP				0x0f000000
+
+#define	BINTDIFCLKENABLE			0x400
+#define	BEXTSIGCLKENABLE			0x800
+#define	BBANDGAP_MBIAS_POWERUP			0x10000
+#define	BAD11SH_GAIN				0xc0000
+#define	BAD11NPUT_RANGE				0x700000
+#define	BAD110P_CURRENT				0x3800000
+#define	BLPATH_LOOPBACK				0x4000000
+#define	BQPATH_LOOPBACK				0x8000000
+#define	BAFE_LOOPBACK				0x10000000
+#define	BDA10_SWING				0x7e0
+#define	BDA10_REVERSE				0x800
+#define	BDA_CLK_SOURCE				0x1000
+#define	BDA7INPUT_RANGE				0x6000
+#define	BDA7_GAIN				0x38000
+#define	BDA7OUTPUT_CM_MODE			0x40000
+#define	BDA7INPUT_CM_MODE			0x380000
+#define	BDA7CURRENT				0xc00000
+#define	BREGULATOR_ADJUST			0x7000000
+#define	BAD11POWERUP_ATTX			0x1
+#define	BDA10PS_ATTX				0x10
+#define	BAD11POWERUP_ATRX			0x100
+#define	BDA10PS_ATRX				0x1000
+#define	BCCKRX_AGC_FORMAT			0x200
+#define	BPSDFFT_SAMPLE_POINT			0xc000
+#define	BPSD_AVERAGE_NUM			0x3000
+#define	BIQPATH_CONTROL				0xc00
+#define	BPSD_FREQ				0x3ff
+#define	BPSD_ANTENNA_PATH			0x30
+#define	BPSD_IQ_SWITCH				0x40
+#define	BPSD_RX_TRIGGER				0x400000
+#define	BPSD_TX_TRIGGER				0x80000000
+#define	BPSD_SINE_TONE_SCALE			0x7f000000
+#define	BPSD_REPORT				0xffff
+
+#define	BOFDM_TXSC				0x30000000
+#define	BCCK_TXON				0x1
+#define	BOFDM_TXON				0x2
+#define	BDEBUG_PAGE				0xfff
+#define	BDEBUG_ITEM				0xff
+#define	BANTL					0x10
+#define	BANT_NONHT				0x100
+#define	BANT_HT1				0x1000
+#define	BANT_HT2				0x10000
+#define	BANT_HT1S1				0x100000
+#define	BANT_NONHTS1				0x1000000
+
+#define	BCCK_BBMODE				0x3
+#define	BCCK_TXPOWERSAVING			0x80
+#define	BCCK_RXPOWERSAVING			0x40
+
+#define	BCCK_SIDEBAND				0x10
+
+#define	BCCK_SCRAMBLE				0x8
+#define	BCCK_ANTDIVERSITY			0x8000
+#define	BCCK_CARRIER_RECOVERY			0x4000
+#define	BCCK_TXRATE				0x3000
+#define	BCCK_DCCANCEL				0x0800
+#define	BCCK_ISICANCEL				0x0400
+#define	BCCK_MATCH_FILTER			0x0200
+#define	BCCK_EQUALIZER				0x0100
+#define	BCCK_PREAMBLE_DETECT			0x800000
+#define	BCCK_FAST_FALSECCAi			0x400000
+#define	BCCK_CH_ESTSTARTi			0x300000
+#define	BCCK_CCA_COUNTi				0x080000
+#define	BCCK_CS_LIM				0x070000
+#define	BCCK_BIST_MODEi				0x80000000
+#define	BCCK_CCAMASK				0x40000000
+#define	BCCK_TX_DAC_PHASE			0x4
+#define	BCCK_RX_ADC_PHASE			0x20000000
+#define	BCCKR_CP_MODE				0x0100
+#define	BCCK_TXDC_OFFSET			0xf0
+#define	BCCK_RXDC_OFFSET			0xf
+#define	BCCK_CCA_MODE				0xc000
+#define	BCCK_FALSECS_LIM			0x3f00
+#define	BCCK_CS_RATIO				0xc00000
+#define	BCCK_CORGBIT_SEL			0x300000
+#define	BCCK_PD_LIM				0x0f0000
+#define	BCCK_NEWCCA				0x80000000
+#define	BCCK_RXHP_OF_IG				0x8000
+#define	BCCK_RXIG				0x7f00
+#define	BCCK_LNA_POLARITY			0x800000
+#define	BCCK_RX1ST_BAIN				0x7f0000
+#define	BCCK_RF_EXTEND				0x20000000
+#define	BCCK_RXAGC_SATLEVEL			0x1f000000
+#define	BCCK_RXAGC_SATCOUNT			0xe0
+#define	bCCKRxRFSettle				0x1f
+#define	BCCK_FIXED_RXAGC			0x8000
+#define	BCCK_ANTENNA_POLARITY			0x2000
+#define	BCCK_TXFILTER_TYPE			0x0c00
+#define	BCCK_RXAGC_REPORTTYPE			0x0300
+#define	BCCK_RXDAGC_EN				0x80000000
+#define	BCCK_RXDAGC_PERIOD			0x20000000
+#define	BCCK_RXDAGC_SATLEVEL			0x1f000000
+#define	BCCK_TIMING_RECOVERY			0x800000
+#define	BCCK_TXC0				0x3f0000
+#define	BCCK_TXC1				0x3f000000
+#define	BCCK_TXC2				0x3f
+#define	BCCK_TXC3				0x3f00
+#define	BCCK_TXC4				0x3f0000
+#define	BCCK_TXC5				0x3f000000
+#define	BCCK_TXC6				0x3f
+#define	BCCK_TXC7				0x3f00
+#define	BCCK_DEBUGPORT				0xff0000
+#define	BCCK_DAC_DEBUG				0x0f000000
+#define	BCCK_FALSEALARM_ENABLE			0x8000
+#define	BCCK_FALSEALARM_READ			0x4000
+#define	BCCK_TRSSI				0x7f
+#define	BCCK_RXAGC_REPORT			0xfe
+#define	BCCK_RXREPORT_ANTSEL			0x80000000
+#define	BCCK_RXREPORT_MFOFF			0x40000000
+#define	BCCK_RXREPORT_SQLOSS			0x20000000
+#define	BCCK_RXREPORT_PKTLOSS			0x10000000
+#define	BCCK_RXREPORT_LOCKEDBIT			0x08000000
+#define	BCCK_RXREPORT_RATEERROR			0x04000000
+#define	BCCK_RXREPORT_RXRATE			0x03000000
+#define	BCCK_RXFA_COUNTER_LOWER			0xff
+#define	BCCK_RXFA_COUNTER_UPPER			0xff000000
+#define	BCCK_RXHPAGC_START			0xe000
+#define	BCCK_RXHPAGC_FINAL			0x1c00
+#define	BCCK_RXFALSEALARM_ENABLE		0x8000
+#define	BCCK_FACOUNTER_FREEZE			0x4000
+#define	BCCK_TXPATH_SEL				0x10000000
+#define	BCCK_DEFAULT_RXPATH			0xc000000
+#define	BCCK_OPTION_RXPATH			0x3000000
+
+#define	BNUM_OFSTF				0x3
+#define	BSHIFT_L				0xc0
+#define	BGI_TH					0xc
+#define	BRXPATH_A				0x1
+#define	BRXPATH_B				0x2
+#define	BRXPATH_C				0x4
+#define	BRXPATH_D				0x8
+#define	BTXPATH_A				0x1
+#define	BTXPATH_B				0x2
+#define	BTXPATH_C				0x4
+#define	BTXPATH_D				0x8
+#define	BTRSSI_FREQ				0x200
+#define	BADC_BACKOFF				0x3000
+#define	BDFIR_BACKOFF				0xc000
+#define	BTRSSI_LATCH_PHASE			0x10000
+#define	BRX_LDC_OFFSET				0xff
+#define	BRX_QDC_OFFSET				0xff00
+#define	BRX_DFIR_MODE				0x1800000
+#define	BRX_DCNF_TYPE				0xe000000
+#define	BRXIQIMB_A				0x3ff
+#define	BRXIQIMB_B				0xfc00
+#define	BRXIQIMB_C				0x3f0000
+#define	BRXIQIMB_D				0xffc00000
+#define	BDC_DC_NOTCH				0x60000
+#define	BRXNB_NOTCH				0x1f000000
+#define	BPD_TH					0xf
+#define	BPD_TH_OPT2				0xc000
+#define	BPWED_TH				0x700
+#define	BIFMF_WIN_L				0x800
+#define	BPD_OPTION				0x1000
+#define	BMF_WIN_L				0xe000
+#define	BBW_SEARCH_L				0x30000
+#define	BWIN_ENH_L				0xc0000
+#define	BBW_TH					0x700000
+#define	BED_TH2					0x3800000
+#define	BBW_OPTION				0x4000000
+#define	BRADIO_TH				0x18000000
+#define	BWINDOW_L				0xe0000000
+#define	BSBD_OPTION				0x1
+#define	BFRAME_TH				0x1c
+#define	BFS_OPTION				0x60
+#define	BDC_SLOPE_CHECK				0x80
+#define	BFGUARD_COUNTER_DC_L			0xe00
+#define	BFRAME_WEIGHT_SHORT			0x7000
+#define	BSUB_TUNE				0xe00000
+#define	BFRAME_DC_LENGTH			0xe000000
+#define	BSBD_START_OFFSET			0x30000000
+#define	BFRAME_TH_2				0x7
+#define	BFRAME_GI2_TH				0x38
+#define	BGI2_SYNC_EN				0x40
+#define	BSARCH_SHORT_EARLY			0x300
+#define	BSARCH_SHORT_LATE			0xc00
+#define	BSARCH_GI2_LATE				0x70000
+#define	BCFOANTSUM				0x1
+#define	BCFOACC					0x2
+#define	BCFOSTARTOFFSET				0xc
+#define	BCFOLOOPBACK				0x70
+#define	BCFOSUMWEIGHT				0x80
+#define	BDAGCENABLE				0x10000
+#define	BTXIQIMB_A				0x3ff
+#define	BTXIQIMB_b				0xfc00
+#define	BTXIQIMB_C				0x3f0000
+#define	BTXIQIMB_D				0xffc00000
+#define	BTXIDCOFFSET				0xff
+#define	BTXIQDCOFFSET				0xff00
+#define	BTXDFIRMODE				0x10000
+#define	BTXPESUDO_NOISEON			0x4000000
+#define	BTXPESUDO_NOISE_A			0xff
+#define	BTXPESUDO_NOISE_B			0xff00
+#define	BTXPESUDO_NOISE_C			0xff0000
+#define	BTXPESUDO_NOISE_D			0xff000000
+#define	BCCA_DROPOPTION				0x20000
+#define	BCCA_DROPTHRES				0xfff00000
+#define	BEDCCA_H				0xf
+#define	BEDCCA_L				0xf0
+#define	BLAMBDA_ED				0x300
+#define	BRX_INITIALGAIN				0x7f
+#define	BRX_ANTDIV_EN				0x80
+#define	BRX_AGC_ADDRESS_FOR_LNA			0x7f00
+#define	BRX_HIGHPOWER_FLOW			0x8000
+#define	BRX_AGC_FREEZE_THRES			0xc0000
+#define	BRX_FREEZESTEP_AGC1			0x300000
+#define	BRX_FREEZESTEP_AGC2			0xc00000
+#define	BRX_FREEZESTEP_AGC3			0x3000000
+#define	BRX_FREEZESTEP_AGC0			0xc000000
+#define	BRXRSSI_CMP_EN				0x10000000
+#define	BRXQUICK_AGCEN				0x20000000
+#define	BRXAGC_FREEZE_THRES_MODE		0x40000000
+#define	BRX_OVERFLOW_CHECKTYPE			0x80000000
+#define	BRX_AGCSHIFT				0x7f
+#define	BTRSW_TRI_ONLY				0x80
+#define	BPOWER_THRES				0x300
+#define	BRXAGC_EN				0x1
+#define	BRXAGC_TOGETHER_EN			0x2
+#define	BRXAGC_MIN				0x4
+#define	BRXHP_INI				0x7
+#define	BRXHP_TRLNA				0x70
+#define	BRXHP_RSSI				0x700
+#define	BRXHP_BBP1				0x7000
+#define	BRXHP_BBP2				0x70000
+#define	BRXHP_BBP3				0x700000
+#define	BRSSI_H					0x7f0000
+#define	BRSSI_GEN				0x7f000000
+#define	BRXSETTLE_TRSW				0x7
+#define	BRXSETTLE_LNA				0x38
+#define	BRXSETTLE_RSSI				0x1c0
+#define	BRXSETTLE_BBP				0xe00
+#define	BRXSETTLE_RXHP				0x7000
+#define	BRXSETTLE_ANTSW_RSSI			0x38000
+#define	BRXSETTLE_ANTSW				0xc0000
+#define	BRXPROCESS_TIME_DAGC			0x300000
+#define	BRXSETTLE_HSSI				0x400000
+#define	BRXPROCESS_TIME_BBPPW			0x800000
+#define	BRXANTENNA_POWER_SHIFT			0x3000000
+#define	BRSSI_TABLE_SELECT			0xc000000
+#define	BRXHP_FINAL				0x7000000
+#define	BRXHPSETTLE_BBP				0x7
+#define	BRXHTSETTLE_HSSI			0x8
+#define	BRXHTSETTLE_RXHP			0x70
+#define	BRXHTSETTLE_BBPPW			0x80
+#define	BRXHTSETTLE_IDLE			0x300
+#define	BRXHTSETTLE_RESERVED			0x1c00
+#define	BRXHT_RXHP_EN				0x8000
+#define	BRXAGC_FREEZE_THRES			0x30000
+#define	BRXAGC_TOGETHEREN			0x40000
+#define	BRXHTAGC_MIN				0x80000
+#define	BRXHTAGC_EN				0x100000
+#define	BRXHTDAGC_EN				0x200000
+#define	BRXHT_RXHP_BBP				0x1c00000
+#define	BRXHT_RXHP_FINAL			0xe0000000
+#define	BRXPW_RADIO_TH				0x3
+#define	BRXPW_RADIO_EN				0x4
+#define	BRXMF_HOLD				0x3800
+#define	BRXPD_DELAY_TH1				0x38
+#define	BRXPD_DELAY_TH2				0x1c0
+#define	BRXPD_DC_COUNT_MAX			0x600
+#define	BRXPD_DELAY_TH				0x8000
+#define	BRXPROCESS_DELAY			0xf0000
+#define	BRXSEARCHRANGE_GI2_EARLY		0x700000
+#define	BRXFRAME_FUARD_COUNTER_L		0x3800000
+#define	BRXSGI_GUARD_L				0xc000000
+#define	BRXSGI_SEARCH_L				0x30000000
+#define	BRXSGI_TH				0xc0000000
+#define	BDFSCNT0				0xff
+#define	BDFSCNT1				0xff00
+#define	BDFSFLAG				0xf0000
+#define	BMF_WEIGHT_SUM				0x300000
+#define	BMINIDX_TH				0x7f000000
+#define	BDAFORMAT				0x40000
+#define	BTXCH_EMU_ENABLE			0x01000000
+#define	BTRSW_ISOLATION_A			0x7f
+#define	BTRSW_ISOLATION_B			0x7f00
+#define	BTRSW_ISOLATION_C			0x7f0000
+#define	BTRSW_ISOLATION_D			0x7f000000
+#define	BEXT_LNA_GAIN				0x7c00
+
+#define	BSTBC_EN				0x4
+#define	BANTENNA_MAPPING			0x10
+#define	BNSS					0x20
+#define	BCFO_ANTSUM_ID				0x200
+#define	BPHY_COUNTER_RESET			0x8000000
+#define	BCFO_REPORT_GET				0x4000000
+#define	BOFDM_CONTINUE_TX			0x10000000
+#define	BOFDM_SINGLE_CARRIER			0x20000000
+#define	BOFDM_SINGLE_TONE			0x40000000
+#define	BHT_DETECT				0x100
+#define	BCFOEN					0x10000
+#define	BCFOVALUE				0xfff00000
+#define	BSIGTONE_RE				0x3f
+#define	BSIGTONE_IM				0x7f00
+#define	BCOUNTER_CCA				0xffff
+#define	BCOUNTER_PARITYFAIL			0xffff0000
+#define	BCOUNTER_RATEILLEGAL			0xffff
+#define	BCOUNTER_CRC8FAIL			0xffff0000
+#define	BCOUNTER_MCSNOSUPPORT			0xffff
+#define	BCOUNTER_FASTSYNC			0xffff
+#define	BSHORTCFO				0xfff
+#define	BSHORTCFOT_LENGTH			12
+#define	BSHORTCFOF_LENGTH			11
+#define	BLONGCFO				0x7ff
+#define	BLONGCFOT_LENGTH			11
+#define	BLONGCFOF_LENGTH			11
+#define	BTAILCFO				0x1fff
+#define	BTAILCFOT_LENGTH			13
+#define	BTAILCFOF_LENGTH			12
+#define	BNOISE_EN_PWDB				0xffff
+#define	BCC_POWER_DB				0xffff0000
+#define	BMOISE_PWDB				0xffff
+#define	BPOWERMEAST_LENGTH			10
+#define	BPOWERMEASF_LENGTH			3
+#define	BRX_HT_BW				0x1
+#define	BRXSC					0x6
+#define	BRX_HT					0x8
+#define	BNB_INTF_DET_ON				0x1
+#define	BINTF_WIN_LEN_CFG			0x30
+#define	BNB_INTF_TH_CFG				0x1c0
+#define	BRFGAIN					0x3f
+#define	BTABLESEL				0x40
+#define	BTRSW					0x80
+#define	BRXSNR_A				0xff
+#define	BRXSNR_B				0xff00
+#define	BRXSNR_C				0xff0000
+#define	BRXSNR_D				0xff000000
+#define	BSNR_EVMT_LENGTH			8
+#define	BSNR_EVMF_LENGTH			1
+#define	BCSI1ST					0xff
+#define	BCSI2ND					0xff00
+#define	BRXEVM1ST				0xff0000
+#define	BRXEVM2ND				0xff000000
+#define	BSIGEVM					0xff
+#define	BPWDB					0xff00
+#define	BSGIEN					0x10000
+
+#define	BSFACTOR_QMA1				0xf
+#define	BSFACTOR_QMA2				0xf0
+#define	BSFACTOR_QMA3				0xf00
+#define	BSFACTOR_QMA4				0xf000
+#define	BSFACTOR_QMA5				0xf0000
+#define	BSFACTOR_QMA6				0xf0000
+#define	BSFACTOR_QMA7				0xf00000
+#define	BSFACTOR_QMA8				0xf000000
+#define	BSFACTOR_QMA9				0xf0000000
+#define	BCSI_SCHEME				0x100000
+
+#define	BNOISE_LVL_TOP_SET			0x3
+#define	BCHSMOOTH				0x4
+#define	BCHSMOOTH_CFG1				0x38
+#define	BCHSMOOTH_CFG2				0x1c0
+#define	BCHSMOOTH_CFG3				0xe00
+#define	BCHSMOOTH_CFG4				0x7000
+#define	BMRCMODE				0x800000
+#define	BTHEVMCFG				0x7000000
+
+#define	BLOOP_FIT_TYPE				0x1
+#define	BUPD_CFO				0x40
+#define	BUPD_CFO_OFFDATA			0x80
+#define	BADV_UPD_CFO				0x100
+#define	BADV_TIME_CTRL				0x800
+#define	BUPD_CLKO				0x1000
+#define	BFC					0x6000
+#define	BTRACKING_MODE				0x8000
+#define	BPHCMP_ENABLE				0x10000
+#define	BUPD_CLKO_LTF				0x20000
+#define	BCOM_CH_CFO				0x40000
+#define	BCSI_ESTI_MODE				0x80000
+#define	BADV_UPD_EQZ				0x100000
+#define	BUCHCFG					0x7000000
+#define	BUPDEQZ					0x8000000
+
+#define	BRX_PESUDO_NOISE_ON			0x20000000
+#define	BRX_PESUDO_NOISE_A			0xff
+#define	BRX_PESUDO_NOISE_B			0xff00
+#define	BRX_PESUDO_NOISE_C			0xff0000
+#define	BRX_PESUDO_NOISE_D			0xff000000
+#define	BRX_PESUDO_NOISESTATE_A			0xffff
+#define	BRX_PESUDO_NOISESTATE_B			0xffff0000
+#define	BRX_PESUDO_NOISESTATE_C			0xffff
+#define	BRX_PESUDO_NOISESTATE_D			0xffff0000
+
+#define	BZEBRA1_HSSIENABLE			0x8
+#define	BZEBRA1_TRXCONTROL			0xc00
+#define	BZEBRA1_TRXGAINSETTING			0x07f
+#define	BZEBRA1_RXCOUNTER			0xc00
+#define	BZEBRA1_TXCHANGEPUMP			0x38
+#define	BZEBRA1_RXCHANGEPUMP			0x7
+#define	BZEBRA1_CHANNEL_NUM			0xf80
+#define	BZEBRA1_TXLPFBW				0x400
+#define	BZEBRA1_RXLPFBW				0x600
+
+#define	BRTL8256REG_MODE_CTRL1			0x100
+#define	BRTL8256REG_MODE_CTRL0			0x40
+#define	BRTL8256REG_TXLPFBW			0x18
+#define	BRTL8256REG_RXLPFBW			0x600
+
+#define	BRTL8258_TXLPFBW			0xc
+#define	BRTL8258_RXLPFBW			0xc00
+#define	BRTL8258_RSSILPFBW			0xc0
+
+#define	BBYTE0					0x1
+#define	BBYTE1					0x2
+#define	BBYTE2					0x4
+#define	BBYTE3					0x8
+#define	BWORD0					0x3
+#define	BWORD1					0xc
+#define	BWORD					0xf
+
+#define	MASKBYTE0				0xff
+#define	MASKBYTE1				0xff00
+#define	MASKBYTE2				0xff0000
+#define	MASKBYTE3				0xff000000
+#define	MASKHWORD				0xffff0000
+#define	MASKLWORD				0x0000ffff
+#define	MASKDWORD				0xffffffff
+#define	MASK12BITS				0xfff
+#define	MASKH4BITS				0xf0000000
+#define MASKOFDM_D				0xffc00000
+#define	MASKCCK					0x3f3f3f3f
+
+#define	MASK4BITS				0x0f
+#define	MASK20BITS				0xfffff
+#define RFREG_OFFSET_MASK			0xfffff
+
+#define	BENABLE					0x1
+#define	BDISABLE				0x0
+
+#define	LEFT_ANTENNA				0x0
+#define	RIGHT_ANTENNA				0x1
+
+#define	TCHECK_TXSTATUS				500
+#define	TUPDATE_RXCOUNTER			100
+
+/* 2 EFUSE_TEST (For RTL8723 partially) */
+#define EFUSE_SEL(x)				(((x) & 0x3) << 8)
+#define EFUSE_SEL_MASK				0x300
+#define EFUSE_WIFI_SEL_0			0x0
+
+/* Enable GPIO[9] as WiFi HW PDn source*/
+#define	WL_HWPDN_EN				BIT(0)
+/* WiFi HW PDn polarity control*/
+#define	WL_HWPDN_SL				BIT(1)
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c
new file mode 100644
index 0000000..50dd2fb
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c
@@ -0,0 +1,505 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+
+void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	switch (bandwidth) {
+	case HT_CHANNEL_WIDTH_20:
+		rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+					     0xfffff3ff) | 0x0400);
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+			      rtlphy->rfreg_chnlval[0]);
+		break;
+	case HT_CHANNEL_WIDTH_20_40:
+		rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+					     0xfffff3ff));
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+			      rtlphy->rfreg_chnlval[0]);
+		break;
+	default:
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "unknown bandwidth: %#X\n", bandwidth);
+		break;
+	}
+}
+
+void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+					  u8 *ppowerlevel)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u32 tx_agc[2] = {0, 0}, tmpval;
+	bool turbo_scanoff = false;
+	u8 idx1, idx2;
+	u8 *ptr;
+
+	if (rtlefuse->eeprom_regulatory != 0)
+		turbo_scanoff = true;
+
+	if (mac->act_scanning == true) {
+		tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+		tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+
+		if (turbo_scanoff) {
+			for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+				tx_agc[idx1] = ppowerlevel[idx1] |
+				    (ppowerlevel[idx1] << 8) |
+				    (ppowerlevel[idx1] << 16) |
+				    (ppowerlevel[idx1] << 24);
+			}
+		}
+	} else {
+		for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+			tx_agc[idx1] = ppowerlevel[idx1] |
+				       (ppowerlevel[idx1] << 8) |
+				       (ppowerlevel[idx1] << 16) |
+				       (ppowerlevel[idx1] << 24);
+		}
+
+		if (rtlefuse->eeprom_regulatory == 0) {
+			tmpval = (rtlphy->mcs_offset[0][6]) +
+				(rtlphy->mcs_offset[0][7] << 8);
+			tx_agc[RF90_PATH_A] += tmpval;
+
+			tmpval = (rtlphy->mcs_offset[0][14]) +
+			    (rtlphy->mcs_offset[0][15] << 24);
+			tx_agc[RF90_PATH_B] += tmpval;
+		}
+	}
+
+	for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+		ptr = (u8 *) (&(tx_agc[idx1]));
+		for (idx2 = 0; idx2 < 4; idx2++) {
+			if (*ptr > RF6052_MAX_TX_PWR)
+				*ptr = RF6052_MAX_TX_PWR;
+			ptr++;
+		}
+	}
+
+	tmpval = tx_agc[RF90_PATH_A] & 0xff;
+	rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
+
+	RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+		"CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+		RTXAGC_A_CCK1_MCS32);
+
+	tmpval = tx_agc[RF90_PATH_A] >> 8;
+
+	tmpval = tmpval & 0xff00ffff;
+
+	rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+
+	RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+		"CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+		RTXAGC_B_CCK11_A_CCK2_11);
+
+	tmpval = tx_agc[RF90_PATH_B] >> 24;
+	rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
+
+	RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+		"CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+		RTXAGC_B_CCK11_A_CCK2_11);
+
+	tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
+	rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
+
+	RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+		"CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+		RTXAGC_B_CCK1_55_MCS32);
+}
+
+static void rtl8723ae_phy_get_power_base(struct ieee80211_hw *hw,
+					 u8 *ppowerlevel, u8 channel,
+					 u32 *ofdmbase, u32 *mcsbase)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u32 powerBase0, powerBase1;
+	u8 legacy_pwrdiff, ht20_pwrdiff;
+	u8 i, powerlevel[2];
+
+	for (i = 0; i < 2; i++) {
+		powerlevel[i] = ppowerlevel[i];
+		legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
+		powerBase0 = powerlevel[i] + legacy_pwrdiff;
+
+		powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
+		    (powerBase0 << 8) | powerBase0;
+		*(ofdmbase + i) = powerBase0;
+		RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+			" [OFDM power base index rf(%c) = 0x%x]\n",
+			((i == 0) ? 'A' : 'B'), *(ofdmbase + i));
+	}
+
+	for (i = 0; i < 2; i++) {
+		if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+			ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
+			powerlevel[i] += ht20_pwrdiff;
+		}
+		powerBase1 = powerlevel[i];
+		powerBase1 = (powerBase1 << 24) |
+		    (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
+
+		*(mcsbase + i) = powerBase1;
+
+		RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+			" [MCS power base index rf(%c) = 0x%x]\n",
+			((i == 0) ? 'A' : 'B'), *(mcsbase + i));
+	}
+}
+
+static void rtl8723ae_get_txpwr_val_by_reg(struct ieee80211_hw *hw,
+					   u8 channel, u8 index,
+					   u32 *powerBase0,
+					   u32 *powerBase1,
+					   u32 *p_outwriteval)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u8 i, chnlgroup = 0, pwr_diff_limit[4];
+	u32 writeVal, customer_limit, rf;
+
+	for (rf = 0; rf < 2; rf++) {
+		switch (rtlefuse->eeprom_regulatory) {
+		case 0:
+			chnlgroup = 0;
+
+			writeVal = rtlphy->mcs_offset[chnlgroup]
+				   [index + (rf ? 8 : 0)] +
+				   ((index < 2) ? powerBase0[rf] :
+				   powerBase1[rf]);
+
+			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+				"RTK better performance, "
+				"writeVal(%c) = 0x%x\n",
+				((rf == 0) ? 'A' : 'B'), writeVal);
+			break;
+		case 1:
+			if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+				writeVal = ((index < 2) ? powerBase0[rf] :
+					    powerBase1[rf]);
+
+				RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+					"Realtek regulatory, 40MHz, "
+					"writeVal(%c) = 0x%x\n",
+					((rf == 0) ? 'A' : 'B'), writeVal);
+			} else {
+				if (rtlphy->pwrgroup_cnt == 1)
+					chnlgroup = 0;
+				if (rtlphy->pwrgroup_cnt >= 3) {
+					if (channel <= 3)
+						chnlgroup = 0;
+					else if (channel >= 4 && channel <= 9)
+						chnlgroup = 1;
+					else if (channel > 9)
+						chnlgroup = 2;
+					if (rtlphy->current_chan_bw ==
+					    HT_CHANNEL_WIDTH_20)
+						chnlgroup++;
+					else
+						chnlgroup += 4;
+				}
+
+				writeVal = rtlphy->mcs_offset[chnlgroup]
+				    [index + (rf ? 8 : 0)] + ((index < 2) ?
+							      powerBase0[rf] :
+							      powerBase1[rf]);
+
+				RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+					"Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n",
+					((rf == 0) ? 'A' : 'B'), writeVal);
+			}
+			break;
+		case 2:
+			writeVal =
+			    ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+				"Better regulatory, writeVal(%c) = 0x%x\n",
+				((rf == 0) ? 'A' : 'B'), writeVal);
+			break;
+		case 3:
+			chnlgroup = 0;
+
+			if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+				RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+					"customer's limit, 40MHz rf(%c) = 0x%x\n",
+					((rf == 0) ? 'A' : 'B'),
+					rtlefuse->pwrgroup_ht40[rf][channel-1]);
+			} else {
+				RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+					"customer's limit, 20MHz rf(%c) = 0x%x\n",
+					((rf == 0) ? 'A' : 'B'),
+					rtlefuse->pwrgroup_ht20[rf][channel-1]);
+			}
+			for (i = 0; i < 4; i++) {
+				pwr_diff_limit[i] =
+					(u8) ((rtlphy->mcs_offset
+					[chnlgroup][index + (rf ? 8 : 0)] &
+					(0x7f << (i * 8))) >> (i * 8));
+
+				if (rtlphy->current_chan_bw ==
+				    HT_CHANNEL_WIDTH_20_40) {
+					if (pwr_diff_limit[i] >
+					    rtlefuse->
+					    pwrgroup_ht40[rf][channel - 1])
+						pwr_diff_limit[i] =
+						    rtlefuse->pwrgroup_ht40[rf]
+						    [channel - 1];
+				} else {
+					if (pwr_diff_limit[i] >
+					    rtlefuse->
+					    pwrgroup_ht20[rf][channel - 1])
+						pwr_diff_limit[i] =
+						    rtlefuse->pwrgroup_ht20[rf]
+						    [channel - 1];
+				}
+			}
+
+			customer_limit = (pwr_diff_limit[3] << 24) |
+			    (pwr_diff_limit[2] << 16) |
+			    (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
+
+			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+				"Customer's limit rf(%c) = 0x%x\n",
+				((rf == 0) ? 'A' : 'B'), customer_limit);
+
+			writeVal = customer_limit +
+			    ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+				"Customer, writeVal rf(%c)= 0x%x\n",
+				((rf == 0) ? 'A' : 'B'), writeVal);
+			break;
+		default:
+			chnlgroup = 0;
+			writeVal = rtlphy->mcs_offset[chnlgroup][index +
+			    (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] :
+			    powerBase1[rf]);
+
+			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+				"RTK better performance, writeVal rf(%c) = 0x%x\n",
+				((rf == 0) ? 'A' : 'B'), writeVal);
+			break;
+		}
+
+		if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+			writeVal = writeVal - 0x06060606;
+		else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+			 TXHIGHPWRLEVEL_BT2)
+			writeVal = writeVal - 0x0c0c0c0c;
+		*(p_outwriteval + rf) = writeVal;
+	}
+}
+
+static void _rtl8723ae_write_ofdm_power_reg(struct ieee80211_hw *hw,
+					    u8 index, u32 *pValue)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	u16 regoffset_a[6] = {
+		RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
+		RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+		RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+	};
+	u16 regoffset_b[6] = {
+		RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
+		RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+		RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+	};
+	u8 i, rf, pwr_val[4];
+	u32 writeVal;
+	u16 regoffset;
+
+	for (rf = 0; rf < 2; rf++) {
+		writeVal = pValue[rf];
+		for (i = 0; i < 4; i++) {
+			pwr_val[i] = (u8) ((writeVal & (0x7f <<
+							(i * 8))) >> (i * 8));
+
+			if (pwr_val[i] > RF6052_MAX_TX_PWR)
+				pwr_val[i] = RF6052_MAX_TX_PWR;
+		}
+		writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+		    (pwr_val[1] << 8) | pwr_val[0];
+
+		if (rf == 0)
+			regoffset = regoffset_a[index];
+		else
+			regoffset = regoffset_b[index];
+		rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
+
+		RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+			"Set 0x%x = %08x\n", regoffset, writeVal);
+
+		if (((get_rf_type(rtlphy) == RF_2T2R) &&
+		     (regoffset == RTXAGC_A_MCS15_MCS12 ||
+		      regoffset == RTXAGC_B_MCS15_MCS12)) ||
+		    ((get_rf_type(rtlphy) != RF_2T2R) &&
+		     (regoffset == RTXAGC_A_MCS07_MCS04 ||
+		      regoffset == RTXAGC_B_MCS07_MCS04))) {
+
+			writeVal = pwr_val[3];
+			if (regoffset == RTXAGC_A_MCS15_MCS12 ||
+			    regoffset == RTXAGC_A_MCS07_MCS04)
+				regoffset = 0xc90;
+			if (regoffset == RTXAGC_B_MCS15_MCS12 ||
+			    regoffset == RTXAGC_B_MCS07_MCS04)
+				regoffset = 0xc98;
+
+			for (i = 0; i < 3; i++) {
+				writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
+				rtl_write_byte(rtlpriv, (u32) (regoffset + i),
+					       (u8) writeVal);
+			}
+		}
+	}
+}
+
+void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+					   u8 *ppowerlevel, u8 channel)
+{
+	u32 writeVal[2], powerBase0[2], powerBase1[2];
+	u8 index;
+
+	rtl8723ae_phy_get_power_base(hw, ppowerlevel,
+				  channel, &powerBase0[0], &powerBase1[0]);
+
+	for (index = 0; index < 6; index++) {
+		rtl8723ae_get_txpwr_val_by_reg(hw, channel, index,
+					      &powerBase0[0],
+					      &powerBase1[0],
+					      &writeVal[0]);
+
+		_rtl8723ae_write_ofdm_power_reg(hw, index, &writeVal[0]);
+	}
+}
+
+static bool _rtl8723ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u32 u4_regvalue = 0;
+	u8 rfpath;
+	bool rtstatus = true;
+	struct bb_reg_def *pphyreg;
+
+	for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+
+		pphyreg = &rtlphy->phyreg_def[rfpath];
+
+		switch (rfpath) {
+		case RF90_PATH_A:
+		case RF90_PATH_C:
+			u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+						    BRFSI_RFENV);
+			break;
+		case RF90_PATH_B:
+		case RF90_PATH_D:
+			u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+						    BRFSI_RFENV << 16);
+			break;
+		}
+
+		rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+		udelay(1);
+
+		rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+		udelay(1);
+
+		rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
+			      B3WIREADDREAALENGTH, 0x0);
+		udelay(1);
+
+		rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+		udelay(1);
+
+		switch (rfpath) {
+		case RF90_PATH_A:
+			rtstatus = rtl8723ae_phy_config_rf_with_headerfile(hw,
+						(enum radio_path)rfpath);
+			break;
+		case RF90_PATH_B:
+			rtstatus = rtl8723ae_phy_config_rf_with_headerfile(hw,
+						(enum radio_path)rfpath);
+			break;
+		case RF90_PATH_C:
+			break;
+		case RF90_PATH_D:
+			break;
+		}
+		switch (rfpath) {
+		case RF90_PATH_A:
+		case RF90_PATH_C:
+			rtl_set_bbreg(hw, pphyreg->rfintfs,
+				      BRFSI_RFENV, u4_regvalue);
+			break;
+		case RF90_PATH_B:
+		case RF90_PATH_D:
+			rtl_set_bbreg(hw, pphyreg->rfintfs,
+				      BRFSI_RFENV << 16, u4_regvalue);
+			break;
+		}
+		if (rtstatus != true) {
+			RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+				 "Radio[%d] Fail!!", rfpath);
+			return false;
+		}
+	}
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
+	return rtstatus;
+}
+
+bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	if (rtlphy->rf_type == RF_1T1R)
+		rtlphy->num_total_rfpath = 1;
+	else
+		rtlphy->num_total_rfpath = 2;
+
+	return _rtl8723ae_phy_rf6052_config_parafile(hw);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
new file mode 100644
index 0000000..d0f9dd7
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723E_RF_H__
+#define __RTL8723E_RF_H__
+
+#define RF6052_MAX_TX_PWR		0x3F
+
+extern void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+					    u8 bandwidth);
+extern void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+					      u8 *ppowerlevel);
+extern void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+					       u8 *ppowerlevel, u8 channel);
+extern bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
new file mode 100644
index 0000000..0afdc24
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -0,0 +1,387 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+#include "../core.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "hw.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+#include "table.h"
+#include "hal_btc.h"
+
+static void rtl8723ae_init_aspm_vars(struct ieee80211_hw *hw)
+{
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	/*close ASPM for AMD defaultly */
+	rtlpci->const_amdpci_aspm = 0;
+
+	/* ASPM PS mode.
+	 * 0 - Disable ASPM,
+	 * 1 - Enable ASPM without Clock Req,
+	 * 2 - Enable ASPM with Clock Req,
+	 * 3 - Alwyas Enable ASPM with Clock Req,
+	 * 4 - Always Enable ASPM without Clock Req.
+	 * set defult to RTL8192CE:3 RTL8192E:2
+	 */
+	rtlpci->const_pci_aspm = 3;
+
+	/*Setting for PCI-E device */
+	rtlpci->const_devicepci_aspm_setting = 0x03;
+
+	/*Setting for PCI-E bridge */
+	rtlpci->const_hostpci_aspm_setting = 0x02;
+
+	/* In Hw/Sw Radio Off situation.
+	 * 0 - Default,
+	 * 1 - From ASPM setting without low Mac Pwr,
+	 * 2 - From ASPM setting with low Mac Pwr,
+	 * 3 - Bus D3
+	 * set default to RTL8192CE:0 RTL8192SE:2
+	 */
+	rtlpci->const_hwsw_rfoff_d3 = 0;
+
+	/* This setting works for those device with
+	 * backdoor ASPM setting such as EPHY setting.
+	 * 0 - Not support ASPM,
+	 * 1 - Support ASPM,
+	 * 2 - According to chipset.
+	 */
+	rtlpci->const_support_pciaspm = 1;
+}
+
+int rtl8723ae_init_sw_vars(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	int err;
+
+	rtl8723ae_bt_reg_init(hw);
+	rtlpriv->dm.dm_initialgain_enable = 1;
+	rtlpriv->dm.dm_flag = 0;
+	rtlpriv->dm.disable_framebursting = 0;
+	rtlpriv->dm.thermalvalue = 0;
+	rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
+
+	/* compatible 5G band 88ce just 2.4G band & smsp */
+	rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
+	rtlpriv->rtlhal.bandset = BAND_ON_2_4G;
+	rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
+
+	rtlpci->receive_config = (RCR_APPFCS |
+				  RCR_APP_MIC |
+				  RCR_APP_ICV |
+				  RCR_APP_PHYST_RXFF |
+				  RCR_HTC_LOC_CTRL |
+				  RCR_AMF |
+				  RCR_ACF |
+				  RCR_ADF |
+				  RCR_AICV |
+				  RCR_AB |
+				  RCR_AM |
+				  RCR_APM |
+				  0);
+
+	rtlpci->irq_mask[0] =
+	    (u32) (PHIMR_ROK |
+		   PHIMR_RDU |
+		   PHIMR_VODOK |
+		   PHIMR_VIDOK |
+		   PHIMR_BEDOK |
+		   PHIMR_BKDOK |
+		   PHIMR_MGNTDOK |
+		   PHIMR_HIGHDOK |
+		   PHIMR_C2HCMD |
+		   PHIMR_HISRE_IND |
+		   PHIMR_TSF_BIT32_TOGGLE |
+		   PHIMR_TXBCNOK |
+		   PHIMR_PSTIMEOUT |
+		   0);
+
+	rtlpci->irq_mask[1] = (u32)(PHIMR_RXFOVW | 0);
+
+	/* for debug level */
+	rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
+	/* for LPS & IPS */
+	rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+	rtlpriv->psc.reg_fwctrl_lps = 3;
+	rtlpriv->psc.reg_max_lps_awakeintvl = 5;
+	/* for ASPM, you can close aspm through
+	 * set const_support_pciaspm = 0
+	 */
+	rtl8723ae_init_aspm_vars(hw);
+
+	if (rtlpriv->psc.reg_fwctrl_lps == 1)
+		rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
+	else if (rtlpriv->psc.reg_fwctrl_lps == 2)
+		rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
+	else if (rtlpriv->psc.reg_fwctrl_lps == 3)
+		rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
+
+	/* for firmware buf */
+	rtlpriv->rtlhal.pfirmware = vmalloc(0x6000);
+	if (!rtlpriv->rtlhal.pfirmware) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "Can't alloc buffer for fw.\n");
+		return 1;
+	}
+
+	if (IS_VENDOR_8723_A_CUT(rtlhal->version))
+		rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw.bin";
+	else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
+		rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin";
+
+	rtlpriv->max_fw_size = 0x6000;
+	pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
+	err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+				      rtlpriv->io.dev, GFP_KERNEL, hw,
+				      rtl_fw_cb);
+	if (err) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+			 "Failed to request firmware!\n");
+		return 1;
+	}
+	return 0;
+}
+
+void rtl8723ae_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (rtlpriv->rtlhal.pfirmware) {
+		vfree(rtlpriv->rtlhal.pfirmware);
+		rtlpriv->rtlhal.pfirmware = NULL;
+	}
+}
+
+static struct rtl_hal_ops rtl8723ae_hal_ops = {
+	.init_sw_vars = rtl8723ae_init_sw_vars,
+	.deinit_sw_vars = rtl8723ae_deinit_sw_vars,
+	.read_eeprom_info = rtl8723ae_read_eeprom_info,
+	.interrupt_recognized = rtl8723ae_interrupt_recognized,
+	.hw_init = rtl8723ae_hw_init,
+	.hw_disable = rtl8723ae_card_disable,
+	.hw_suspend = rtl8723ae_suspend,
+	.hw_resume = rtl8723ae_resume,
+	.enable_interrupt = rtl8723ae_enable_interrupt,
+	.disable_interrupt = rtl8723ae_disable_interrupt,
+	.set_network_type = rtl8723ae_set_network_type,
+	.set_chk_bssid = rtl8723ae_set_check_bssid,
+	.set_qos = rtl8723ae_set_qos,
+	.set_bcn_reg = rtl8723ae_set_beacon_related_registers,
+	.set_bcn_intv = rtl8723ae_set_beacon_interval,
+	.update_interrupt_mask = rtl8723ae_update_interrupt_mask,
+	.get_hw_reg = rtl8723ae_get_hw_reg,
+	.set_hw_reg = rtl8723ae_set_hw_reg,
+	.update_rate_tbl = rtl8723ae_update_hal_rate_tbl,
+	.fill_tx_desc = rtl8723ae_tx_fill_desc,
+	.fill_tx_cmddesc = rtl8723ae_tx_fill_cmddesc,
+	.query_rx_desc = rtl8723ae_rx_query_desc,
+	.set_channel_access = rtl8723ae_update_channel_access_setting,
+	.radio_onoff_checking = rtl8723ae_gpio_radio_on_off_checking,
+	.set_bw_mode = rtl8723ae_phy_set_bw_mode,
+	.switch_channel = rtl8723ae_phy_sw_chnl,
+	.dm_watchdog = rtl8723ae_dm_watchdog,
+	.scan_operation_backup = rtl8723ae_phy_scan_operation_backup,
+	.set_rf_power_state = rtl8723ae_phy_set_rf_power_state,
+	.led_control = rtl8723ae_led_control,
+	.set_desc = rtl8723ae_set_desc,
+	.get_desc = rtl8723ae_get_desc,
+	.tx_polling = rtl8723ae_tx_polling,
+	.enable_hw_sec = rtl8723ae_enable_hw_security_config,
+	.set_key = rtl8723ae_set_key,
+	.init_sw_leds = rtl8723ae_init_sw_leds,
+	.allow_all_destaddr = rtl8723ae_allow_all_destaddr,
+	.get_bbreg = rtl8723ae_phy_query_bb_reg,
+	.set_bbreg = rtl8723ae_phy_set_bb_reg,
+	.get_rfreg = rtl8723ae_phy_query_rf_reg,
+	.set_rfreg = rtl8723ae_phy_set_rf_reg,
+	.c2h_command_handle = rtl_8723e_c2h_command_handle,
+	.bt_wifi_media_status_notify = rtl_8723e_bt_wifi_media_status_notify,
+	.bt_coex_off_before_lps = rtl8723ae_bt_coex_off_before_lps,
+};
+
+static struct rtl_mod_params rtl8723ae_mod_params = {
+	.sw_crypto = false,
+	.inactiveps = true,
+	.swctrl_lps = false,
+	.fwctrl_lps = true,
+	.debug = DBG_EMERG,
+};
+
+static struct rtl_hal_cfg rtl8723ae_hal_cfg = {
+	.bar_id = 2,
+	.write_readback = true,
+	.name = "rtl8723ae_pci",
+	.fw_name = "rtlwifi/rtl8723aefw.bin",
+	.ops = &rtl8723ae_hal_ops,
+	.mod_params = &rtl8723ae_mod_params,
+	.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+	.maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+	.maps[SYS_CLK] = REG_SYS_CLKR,
+	.maps[MAC_RCR_AM] = AM,
+	.maps[MAC_RCR_AB] = AB,
+	.maps[MAC_RCR_ACRC32] = ACRC32,
+	.maps[MAC_RCR_ACF] = ACF,
+	.maps[MAC_RCR_AAP] = AAP,
+	.maps[EFUSE_TEST] = REG_EFUSE_TEST,
+	.maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+	.maps[EFUSE_CLK] = 0,
+	.maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+	.maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+	.maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+	.maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+	.maps[EFUSE_ANA8M] = ANA8M,
+	.maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+	.maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+	.maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+	.maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
+
+	.maps[RWCAM] = REG_CAMCMD,
+	.maps[WCAMI] = REG_CAMWRITE,
+	.maps[RCAMO] = REG_CAMREAD,
+	.maps[CAMDBG] = REG_CAMDBG,
+	.maps[SECR] = REG_SECCFG,
+	.maps[SEC_CAM_NONE] = CAM_NONE,
+	.maps[SEC_CAM_WEP40] = CAM_WEP40,
+	.maps[SEC_CAM_TKIP] = CAM_TKIP,
+	.maps[SEC_CAM_AES] = CAM_AES,
+	.maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+	.maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+	.maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+	.maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+	.maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+	.maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+	.maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+	.maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
+	.maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+	.maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+	.maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+	.maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+	.maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+	.maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+	.maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+	.maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
+	.maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
+
+	.maps[RTL_IMR_TXFOVW] = PHIMR_TXFOVW,
+	.maps[RTL_IMR_PSTIMEOUT] = PHIMR_PSTIMEOUT,
+	.maps[RTL_IMR_BcnInt] = PHIMR_BCNDMAINT0,
+	.maps[RTL_IMR_RXFOVW] = PHIMR_RXFOVW,
+	.maps[RTL_IMR_RDU] = PHIMR_RDU,
+	.maps[RTL_IMR_ATIMEND] = PHIMR_ATIMEND_E,
+	.maps[RTL_IMR_BDOK] = PHIMR_BCNDOK0,
+	.maps[RTL_IMR_MGNTDOK] = PHIMR_MGNTDOK,
+	.maps[RTL_IMR_TBDER] = PHIMR_TXBCNERR,
+	.maps[RTL_IMR_HIGHDOK] = PHIMR_HIGHDOK,
+	.maps[RTL_IMR_TBDOK] = PHIMR_TXBCNOK,
+	.maps[RTL_IMR_BKDOK] = PHIMR_BKDOK,
+	.maps[RTL_IMR_BEDOK] = PHIMR_BEDOK,
+	.maps[RTL_IMR_VIDOK] = PHIMR_VIDOK,
+	.maps[RTL_IMR_VODOK] = PHIMR_VODOK,
+	.maps[RTL_IMR_ROK] = PHIMR_ROK,
+	.maps[RTL_IBSS_INT_MASKS] = (PHIMR_BCNDMAINT0 |
+				     PHIMR_TXBCNOK | PHIMR_TXBCNERR),
+	.maps[RTL_IMR_C2HCMD] = PHIMR_C2HCMD,
+
+
+	.maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
+	.maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
+	.maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
+	.maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
+	.maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
+	.maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
+	.maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
+	.maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
+	.maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
+	.maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
+	.maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
+	.maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
+
+	.maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
+	.maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+};
+
+static struct pci_device_id rtl8723ae_pci_ids[] __devinitdata = {
+	{RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8723, rtl8723ae_hal_cfg)},
+	{},
+};
+
+MODULE_DEVICE_TABLE(pci, rtl8723ae_pci_ids);
+
+MODULE_AUTHOR("lizhaoming	<chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE	<wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger	<Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8723aefw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aefw_B.bin");
+
+module_param_named(swenc, rtl8723ae_mod_params.sw_crypto, bool, 0444);
+module_param_named(debug, rtl8723ae_mod_params.debug, int, 0444);
+module_param_named(ips, rtl8723ae_mod_params.inactiveps, bool, 0444);
+module_param_named(swlps, rtl8723ae_mod_params.swctrl_lps, bool, 0444);
+module_param_named(fwlps, rtl8723ae_mod_params.fwctrl_lps, bool, 0444);
+MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+
+static const struct dev_pm_ops rtlwifi_pm_ops = {
+	.suspend = rtl_pci_suspend,
+	.resume = rtl_pci_resume,
+	.freeze = rtl_pci_suspend,
+	.thaw = rtl_pci_resume,
+	.poweroff = rtl_pci_suspend,
+	.restore = rtl_pci_resume,
+};
+
+static struct pci_driver rtl8723ae_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = rtl8723ae_pci_ids,
+	.probe = rtl_pci_probe,
+	.remove = rtl_pci_disconnect,
+	.driver.pm = &rtlwifi_pm_ops,
+};
+
+module_pci_driver(rtl8723ae_driver);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h
new file mode 100644
index 0000000..fc4fde5
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723E_SW_H__
+#define __RTL8723E_SW_H__
+
+int rtl8723ae_init_sw_vars(struct ieee80211_hw *hw);
+void rtl8723ae_deinit_sw_vars(struct ieee80211_hw *hw);
+void rtl8723ae_init_var_map(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.c b/drivers/net/wireless/rtlwifi/rtl8723ae/table.c
new file mode 100644
index 0000000..9b0b50c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/table.c
@@ -0,0 +1,738 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on  2010/ 5/18,  1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "table.h"
+
+u32 RTL8723EPHY_REG_1TARRAY[RTL8723E_PHY_REG_1TARRAY_LENGTH] = {
+	0x800, 0x80040000,
+	0x804, 0x00000003,
+	0x808, 0x0000fc00,
+	0x80c, 0x0000000a,
+	0x810, 0x10005388,
+	0x814, 0x020c3d10,
+	0x818, 0x02200385,
+	0x81c, 0x00000000,
+	0x820, 0x01000100,
+	0x824, 0x00390004,
+	0x828, 0x00000000,
+	0x82c, 0x00000000,
+	0x830, 0x00000000,
+	0x834, 0x00000000,
+	0x838, 0x00000000,
+	0x83c, 0x00000000,
+	0x840, 0x00010000,
+	0x844, 0x00000000,
+	0x848, 0x00000000,
+	0x84c, 0x00000000,
+	0x850, 0x00000000,
+	0x854, 0x00000000,
+	0x858, 0x569a569a,
+	0x85c, 0x001b25a4,
+	0x860, 0x66f60110,
+	0x864, 0x061f0130,
+	0x868, 0x00000000,
+	0x86c, 0x32323200,
+	0x870, 0x07000760,
+	0x874, 0x22004000,
+	0x878, 0x00000808,
+	0x87c, 0x00000000,
+	0x880, 0xc0083070,
+	0x884, 0x000004d5,
+	0x888, 0x00000000,
+	0x88c, 0xccc000c0,
+	0x890, 0x00000800,
+	0x894, 0xfffffffe,
+	0x898, 0x40302010,
+	0x89c, 0x00706050,
+	0x900, 0x00000000,
+	0x904, 0x00000023,
+	0x908, 0x00000000,
+	0x90c, 0x81121111,
+	0xa00, 0x00d047c8,
+	0xa04, 0x80ff000c,
+	0xa08, 0x8c838300,
+	0xa0c, 0x2e68120f,
+	0xa10, 0x9500bb78,
+	0xa14, 0x11144028,
+	0xa18, 0x00881117,
+	0xa1c, 0x89140f00,
+	0xa20, 0x1a1b0000,
+	0xa24, 0x090e1317,
+	0xa28, 0x00000204,
+	0xa2c, 0x00d30000,
+	0xa70, 0x101fbf00,
+	0xa74, 0x00000007,
+	0xa78, 0x00000900,
+	0xc00, 0x48071d40,
+	0xc04, 0x03a05611,
+	0xc08, 0x000000e4,
+	0xc0c, 0x6c6c6c6c,
+	0xc10, 0x08800000,
+	0xc14, 0x40000100,
+	0xc18, 0x08800000,
+	0xc1c, 0x40000100,
+	0xc20, 0x00000000,
+	0xc24, 0x00000000,
+	0xc28, 0x00000000,
+	0xc2c, 0x00000000,
+	0xc30, 0x69e9ac44,
+	0xc34, 0x469652cf,
+	0xc38, 0x49795994,
+	0xc3c, 0x0a97971c,
+	0xc40, 0x1f7c403f,
+	0xc44, 0x000100b7,
+	0xc48, 0xec020107,
+	0xc4c, 0x007f037f,
+	0xc50, 0x69543420,
+	0xc54, 0x43bc0094,
+	0xc58, 0x69543420,
+	0xc5c, 0x433c0094,
+	0xc60, 0x00000000,
+	0xc64, 0x7116848b,
+	0xc68, 0x47c00bff,
+	0xc6c, 0x00000036,
+	0xc70, 0x2c7f000d,
+	0xc74, 0x018610db,
+	0xc78, 0x0000001f,
+	0xc7c, 0x00b91612,
+	0xc80, 0x40000100,
+	0xc84, 0x20f60000,
+	0xc88, 0x40000100,
+	0xc8c, 0x20200000,
+	0xc90, 0x00121820,
+	0xc94, 0x00000000,
+	0xc98, 0x00121820,
+	0xc9c, 0x00007f7f,
+	0xca0, 0x00000000,
+	0xca4, 0x00000080,
+	0xca8, 0x00000000,
+	0xcac, 0x00000000,
+	0xcb0, 0x00000000,
+	0xcb4, 0x00000000,
+	0xcb8, 0x00000000,
+	0xcbc, 0x28000000,
+	0xcc0, 0x00000000,
+	0xcc4, 0x00000000,
+	0xcc8, 0x00000000,
+	0xccc, 0x00000000,
+	0xcd0, 0x00000000,
+	0xcd4, 0x00000000,
+	0xcd8, 0x64b22427,
+	0xcdc, 0x00766932,
+	0xce0, 0x00222222,
+	0xce4, 0x00000000,
+	0xce8, 0x37644302,
+	0xcec, 0x2f97d40c,
+	0xd00, 0x00080740,
+	0xd04, 0x00020401,
+	0xd08, 0x0000907f,
+	0xd0c, 0x20010201,
+	0xd10, 0xa0633333,
+	0xd14, 0x3333bc43,
+	0xd18, 0x7a8f5b6b,
+	0xd2c, 0xcc979975,
+	0xd30, 0x00000000,
+	0xd34, 0x80608000,
+	0xd38, 0x00000000,
+	0xd3c, 0x00027293,
+	0xd40, 0x00000000,
+	0xd44, 0x00000000,
+	0xd48, 0x00000000,
+	0xd4c, 0x00000000,
+	0xd50, 0x6437140a,
+	0xd54, 0x00000000,
+	0xd58, 0x00000000,
+	0xd5c, 0x30032064,
+	0xd60, 0x4653de68,
+	0xd64, 0x04518a3c,
+	0xd68, 0x00002101,
+	0xd6c, 0x2a201c16,
+	0xd70, 0x1812362e,
+	0xd74, 0x322c2220,
+	0xd78, 0x000e3c24,
+	0xe00, 0x2a2a2a2a,
+	0xe04, 0x2a2a2a2a,
+	0xe08, 0x03902a2a,
+	0xe10, 0x2a2a2a2a,
+	0xe14, 0x2a2a2a2a,
+	0xe18, 0x2a2a2a2a,
+	0xe1c, 0x2a2a2a2a,
+	0xe28, 0x00000000,
+	0xe30, 0x1000dc1f,
+	0xe34, 0x10008c1f,
+	0xe38, 0x02140102,
+	0xe3c, 0x681604c2,
+	0xe40, 0x01007c00,
+	0xe44, 0x01004800,
+	0xe48, 0xfb000000,
+	0xe4c, 0x000028d1,
+	0xe50, 0x1000dc1f,
+	0xe54, 0x10008c1f,
+	0xe58, 0x02140102,
+	0xe5c, 0x28160d05,
+	0xe60, 0x00000008,
+	0xe68, 0x001b25a4,
+	0xe6c, 0x631b25a0,
+	0xe70, 0x631b25a0,
+	0xe74, 0x081b25a0,
+	0xe78, 0x081b25a0,
+	0xe7c, 0x081b25a0,
+	0xe80, 0x081b25a0,
+	0xe84, 0x631b25a0,
+	0xe88, 0x081b25a0,
+	0xe8c, 0x631b25a0,
+	0xed0, 0x631b25a0,
+	0xed4, 0x631b25a0,
+	0xed8, 0x631b25a0,
+	0xedc, 0x001b25a0,
+	0xee0, 0x001b25a0,
+	0xeec, 0x6b1b25a0,
+	0xf14, 0x00000003,
+	0xf4c, 0x00000000,
+	0xf00, 0x00000300,
+};
+
+u32 RTL8723EPHY_REG_ARRAY_PG[RTL8723E_PHY_REG_ARRAY_PGLENGTH] = {
+	0xe00, 0xffffffff, 0x0a0c0c0c,
+	0xe04, 0xffffffff, 0x02040608,
+	0xe08, 0x0000ff00, 0x00000000,
+	0x86c, 0xffffff00, 0x00000000,
+	0xe10, 0xffffffff, 0x0a0c0d0e,
+	0xe14, 0xffffffff, 0x02040608,
+	0xe18, 0xffffffff, 0x0a0c0d0e,
+	0xe1c, 0xffffffff, 0x02040608,
+	0x830, 0xffffffff, 0x0a0c0c0c,
+	0x834, 0xffffffff, 0x02040608,
+	0x838, 0xffffff00, 0x00000000,
+	0x86c, 0x000000ff, 0x00000000,
+	0x83c, 0xffffffff, 0x0a0c0d0e,
+	0x848, 0xffffffff, 0x02040608,
+	0x84c, 0xffffffff, 0x0a0c0d0e,
+	0x868, 0xffffffff, 0x02040608,
+	0xe00, 0xffffffff, 0x00000000,
+	0xe04, 0xffffffff, 0x00000000,
+	0xe08, 0x0000ff00, 0x00000000,
+	0x86c, 0xffffff00, 0x00000000,
+	0xe10, 0xffffffff, 0x00000000,
+	0xe14, 0xffffffff, 0x00000000,
+	0xe18, 0xffffffff, 0x00000000,
+	0xe1c, 0xffffffff, 0x00000000,
+	0x830, 0xffffffff, 0x00000000,
+	0x834, 0xffffffff, 0x00000000,
+	0x838, 0xffffff00, 0x00000000,
+	0x86c, 0x000000ff, 0x00000000,
+	0x83c, 0xffffffff, 0x00000000,
+	0x848, 0xffffffff, 0x00000000,
+	0x84c, 0xffffffff, 0x00000000,
+	0x868, 0xffffffff, 0x00000000,
+	0xe00, 0xffffffff, 0x04040404,
+	0xe04, 0xffffffff, 0x00020204,
+	0xe08, 0x0000ff00, 0x00000000,
+	0x86c, 0xffffff00, 0x00000000,
+	0xe10, 0xffffffff, 0x06060606,
+	0xe14, 0xffffffff, 0x00020406,
+	0xe18, 0xffffffff, 0x00000000,
+	0xe1c, 0xffffffff, 0x00000000,
+	0x830, 0xffffffff, 0x04040404,
+	0x834, 0xffffffff, 0x00020204,
+	0x838, 0xffffff00, 0x00000000,
+	0x86c, 0x000000ff, 0x00000000,
+	0x83c, 0xffffffff, 0x06060606,
+	0x848, 0xffffffff, 0x00020406,
+	0x84c, 0xffffffff, 0x00000000,
+	0x868, 0xffffffff, 0x00000000,
+	0xe00, 0xffffffff, 0x00000000,
+	0xe04, 0xffffffff, 0x00000000,
+	0xe08, 0x0000ff00, 0x00000000,
+	0x86c, 0xffffff00, 0x00000000,
+	0xe10, 0xffffffff, 0x00000000,
+	0xe14, 0xffffffff, 0x00000000,
+	0xe18, 0xffffffff, 0x00000000,
+	0xe1c, 0xffffffff, 0x00000000,
+	0x830, 0xffffffff, 0x00000000,
+	0x834, 0xffffffff, 0x00000000,
+	0x838, 0xffffff00, 0x00000000,
+	0x86c, 0x000000ff, 0x00000000,
+	0x83c, 0xffffffff, 0x00000000,
+	0x848, 0xffffffff, 0x00000000,
+	0x84c, 0xffffffff, 0x00000000,
+	0x868, 0xffffffff, 0x00000000,
+	0xe00, 0xffffffff, 0x00000000,
+	0xe04, 0xffffffff, 0x00000000,
+	0xe08, 0x0000ff00, 0x00000000,
+	0x86c, 0xffffff00, 0x00000000,
+	0xe10, 0xffffffff, 0x00000000,
+	0xe14, 0xffffffff, 0x00000000,
+	0xe18, 0xffffffff, 0x00000000,
+	0xe1c, 0xffffffff, 0x00000000,
+	0x830, 0xffffffff, 0x00000000,
+	0x834, 0xffffffff, 0x00000000,
+	0x838, 0xffffff00, 0x00000000,
+	0x86c, 0x000000ff, 0x00000000,
+	0x83c, 0xffffffff, 0x00000000,
+	0x848, 0xffffffff, 0x00000000,
+	0x84c, 0xffffffff, 0x00000000,
+	0x868, 0xffffffff, 0x00000000,
+	0xe00, 0xffffffff, 0x04040404,
+	0xe04, 0xffffffff, 0x00020204,
+	0xe08, 0x0000ff00, 0x00000000,
+	0x86c, 0xffffff00, 0x00000000,
+	0xe10, 0xffffffff, 0x00000000,
+	0xe14, 0xffffffff, 0x00000000,
+	0xe18, 0xffffffff, 0x00000000,
+	0xe1c, 0xffffffff, 0x00000000,
+	0x830, 0xffffffff, 0x04040404,
+	0x834, 0xffffffff, 0x00020204,
+	0x838, 0xffffff00, 0x00000000,
+	0x86c, 0x000000ff, 0x00000000,
+	0x83c, 0xffffffff, 0x00000000,
+	0x848, 0xffffffff, 0x00000000,
+	0x84c, 0xffffffff, 0x00000000,
+	0x868, 0xffffffff, 0x00000000,
+	0xe00, 0xffffffff, 0x00000000,
+	0xe04, 0xffffffff, 0x00000000,
+	0xe08, 0x0000ff00, 0x00000000,
+	0x86c, 0xffffff00, 0x00000000,
+	0xe10, 0xffffffff, 0x00000000,
+	0xe14, 0xffffffff, 0x00000000,
+	0xe18, 0xffffffff, 0x00000000,
+	0xe1c, 0xffffffff, 0x00000000,
+	0x830, 0xffffffff, 0x00000000,
+	0x834, 0xffffffff, 0x00000000,
+	0x838, 0xffffff00, 0x00000000,
+	0x86c, 0x000000ff, 0x00000000,
+	0x83c, 0xffffffff, 0x00000000,
+	0x848, 0xffffffff, 0x00000000,
+	0x84c, 0xffffffff, 0x00000000,
+	0x868, 0xffffffff, 0x00000000,
+};
+
+u32 RTL8723E_RADIOA_1TARRAY[Rtl8723ERADIOA_1TARRAYLENGTH] = {
+	0x000, 0x00030159,
+	0x001, 0x00031284,
+	0x002, 0x00098000,
+	0x003, 0x00018c63,
+	0x004, 0x000210e7,
+	0x009, 0x0002044f,
+	0x00a, 0x0001a3f1,
+	0x00b, 0x00014787,
+	0x00c, 0x000896fe,
+	0x00d, 0x0000e02c,
+	0x00e, 0x00039ce7,
+	0x00f, 0x00000451,
+	0x019, 0x00000000,
+	0x01a, 0x00030355,
+	0x01b, 0x00060a00,
+	0x01c, 0x000fc378,
+	0x01d, 0x000a1250,
+	0x01e, 0x0004445f,
+	0x01f, 0x00080001,
+	0x020, 0x0000b614,
+	0x021, 0x0006c000,
+	0x022, 0x00000000,
+	0x023, 0x00001558,
+	0x024, 0x00000060,
+	0x025, 0x00000483,
+	0x026, 0x0004f000,
+	0x027, 0x000ec7d9,
+	0x028, 0x00057730,
+	0x029, 0x00004783,
+	0x02a, 0x00000001,
+	0x02b, 0x00021334,
+	0x02a, 0x00000000,
+	0x02b, 0x00000054,
+	0x02a, 0x00000001,
+	0x02b, 0x00000808,
+	0x02b, 0x00053333,
+	0x02c, 0x0000000c,
+	0x02a, 0x00000002,
+	0x02b, 0x00000808,
+	0x02b, 0x0005b333,
+	0x02c, 0x0000000d,
+	0x02a, 0x00000003,
+	0x02b, 0x00000808,
+	0x02b, 0x00063333,
+	0x02c, 0x0000000d,
+	0x02a, 0x00000004,
+	0x02b, 0x00000808,
+	0x02b, 0x0006b333,
+	0x02c, 0x0000000d,
+	0x02a, 0x00000005,
+	0x02b, 0x00000808,
+	0x02b, 0x00073333,
+	0x02c, 0x0000000d,
+	0x02a, 0x00000006,
+	0x02b, 0x00000709,
+	0x02b, 0x0005b333,
+	0x02c, 0x0000000d,
+	0x02a, 0x00000007,
+	0x02b, 0x00000709,
+	0x02b, 0x00063333,
+	0x02c, 0x0000000d,
+	0x02a, 0x00000008,
+	0x02b, 0x0000060a,
+	0x02b, 0x0004b333,
+	0x02c, 0x0000000d,
+	0x02a, 0x00000009,
+	0x02b, 0x0000060a,
+	0x02b, 0x00053333,
+	0x02c, 0x0000000d,
+	0x02a, 0x0000000a,
+	0x02b, 0x0000060a,
+	0x02b, 0x0005b333,
+	0x02c, 0x0000000d,
+	0x02a, 0x0000000b,
+	0x02b, 0x0000060a,
+	0x02b, 0x00063333,
+	0x02c, 0x0000000d,
+	0x02a, 0x0000000c,
+	0x02b, 0x0000060a,
+	0x02b, 0x0006b333,
+	0x02c, 0x0000000d,
+	0x02a, 0x0000000d,
+	0x02b, 0x0000060a,
+	0x02b, 0x00073333,
+	0x02c, 0x0000000d,
+	0x02a, 0x0000000e,
+	0x02b, 0x0000050b,
+	0x02b, 0x00066666,
+	0x02c, 0x0000001a,
+	0x02a, 0x000e0000,
+	0x010, 0x0004000f,
+	0x011, 0x000e31fc,
+	0x010, 0x0006000f,
+	0x011, 0x000ff9f8,
+	0x010, 0x0002000f,
+	0x011, 0x000203f9,
+	0x010, 0x0003000f,
+	0x011, 0x000ff500,
+	0x010, 0x00000000,
+	0x011, 0x00000000,
+	0x010, 0x0008000f,
+	0x011, 0x0003f100,
+	0x010, 0x0009000f,
+	0x011, 0x00023100,
+	0x012, 0x00032000,
+	0x012, 0x00071000,
+	0x012, 0x000b0000,
+	0x012, 0x000fc000,
+	0x013, 0x000287b3,
+	0x013, 0x000244b7,
+	0x013, 0x000204ab,
+	0x013, 0x0001c49f,
+	0x013, 0x00018493,
+	0x013, 0x0001429b,
+	0x013, 0x00010299,
+	0x013, 0x0000c29c,
+	0x013, 0x000081a0,
+	0x013, 0x000040ac,
+	0x013, 0x00000020,
+	0x014, 0x0001944c,
+	0x014, 0x00059444,
+	0x014, 0x0009944c,
+	0x014, 0x000d9444,
+	0x015, 0x0000f424,
+	0x015, 0x0004f407,
+	0x015, 0x0008f424,
+	0x015, 0x000cf424,
+	0x016, 0x00000339,
+	0x016, 0x00040339,
+	0x016, 0x00080339,
+	0x016, 0x000c0336,
+	0x000, 0x00010159,
+	0x018, 0x0000f401,
+	0x0fe, 0x00000000,
+	0x0fe, 0x00000000,
+	0x01f, 0x00080003,
+	0x0fe, 0x00000000,
+	0x0fe, 0x00000000,
+	0x01e, 0x00044457,
+	0x01f, 0x00080000,
+	0x000, 0x00030159,
+};
+
+
+u32 RTL8723E_RADIOB_1TARRAY[RTL8723E_RADIOB_1TARRAYLENGTH] = {
+	0x0,
+};
+
+
+u32 RTL8723EMAC_ARRAY[RTL8723E_MACARRAYLENGTH] = {
+	0x420, 0x00000080,
+	0x423, 0x00000000,
+	0x430, 0x00000000,
+	0x431, 0x00000000,
+	0x432, 0x00000000,
+	0x433, 0x00000001,
+	0x434, 0x00000004,
+	0x435, 0x00000005,
+	0x436, 0x00000006,
+	0x437, 0x00000007,
+	0x438, 0x00000000,
+	0x439, 0x00000000,
+	0x43a, 0x00000000,
+	0x43b, 0x00000001,
+	0x43c, 0x00000004,
+	0x43d, 0x00000005,
+	0x43e, 0x00000006,
+	0x43f, 0x00000007,
+	0x440, 0x0000005d,
+	0x441, 0x00000001,
+	0x442, 0x00000000,
+	0x444, 0x00000015,
+	0x445, 0x000000f0,
+	0x446, 0x0000000f,
+	0x447, 0x00000000,
+	0x458, 0x00000041,
+	0x459, 0x000000a8,
+	0x45a, 0x00000072,
+	0x45b, 0x000000b9,
+	0x460, 0x00000066,
+	0x461, 0x00000066,
+	0x462, 0x00000008,
+	0x463, 0x00000003,
+	0x4c8, 0x000000ff,
+	0x4c9, 0x00000008,
+	0x4cc, 0x000000ff,
+	0x4cd, 0x000000ff,
+	0x4ce, 0x00000001,
+	0x500, 0x00000026,
+	0x501, 0x000000a2,
+	0x502, 0x0000002f,
+	0x503, 0x00000000,
+	0x504, 0x00000028,
+	0x505, 0x000000a3,
+	0x506, 0x0000005e,
+	0x507, 0x00000000,
+	0x508, 0x0000002b,
+	0x509, 0x000000a4,
+	0x50a, 0x0000005e,
+	0x50b, 0x00000000,
+	0x50c, 0x0000004f,
+	0x50d, 0x000000a4,
+	0x50e, 0x00000000,
+	0x50f, 0x00000000,
+	0x512, 0x0000001c,
+	0x514, 0x0000000a,
+	0x515, 0x00000010,
+	0x516, 0x0000000a,
+	0x517, 0x00000010,
+	0x51a, 0x00000016,
+	0x524, 0x0000000f,
+	0x525, 0x0000004f,
+	0x546, 0x00000040,
+	0x547, 0x00000000,
+	0x550, 0x00000010,
+	0x551, 0x00000010,
+	0x559, 0x00000002,
+	0x55a, 0x00000002,
+	0x55d, 0x000000ff,
+	0x605, 0x00000030,
+	0x608, 0x0000000e,
+	0x609, 0x0000002a,
+	0x652, 0x00000020,
+	0x63c, 0x0000000a,
+	0x63d, 0x0000000e,
+	0x63e, 0x0000000a,
+	0x63f, 0x0000000e,
+	0x66e, 0x00000005,
+	0x700, 0x00000021,
+	0x701, 0x00000043,
+	0x702, 0x00000065,
+	0x703, 0x00000087,
+	0x708, 0x00000021,
+	0x709, 0x00000043,
+	0x70a, 0x00000065,
+	0x70b, 0x00000087,
+};
+
+u32 RTL8723EAGCTAB_1TARRAY[RTL8723E_AGCTAB_1TARRAYLENGTH] = {
+	0xc78, 0x7b000001,
+	0xc78, 0x7b010001,
+	0xc78, 0x7b020001,
+	0xc78, 0x7b030001,
+	0xc78, 0x7b040001,
+	0xc78, 0x7b050001,
+	0xc78, 0x7a060001,
+	0xc78, 0x79070001,
+	0xc78, 0x78080001,
+	0xc78, 0x77090001,
+	0xc78, 0x760a0001,
+	0xc78, 0x750b0001,
+	0xc78, 0x740c0001,
+	0xc78, 0x730d0001,
+	0xc78, 0x720e0001,
+	0xc78, 0x710f0001,
+	0xc78, 0x70100001,
+	0xc78, 0x6f110001,
+	0xc78, 0x6e120001,
+	0xc78, 0x6d130001,
+	0xc78, 0x6c140001,
+	0xc78, 0x6b150001,
+	0xc78, 0x6a160001,
+	0xc78, 0x69170001,
+	0xc78, 0x68180001,
+	0xc78, 0x67190001,
+	0xc78, 0x661a0001,
+	0xc78, 0x651b0001,
+	0xc78, 0x641c0001,
+	0xc78, 0x631d0001,
+	0xc78, 0x621e0001,
+	0xc78, 0x611f0001,
+	0xc78, 0x60200001,
+	0xc78, 0x49210001,
+	0xc78, 0x48220001,
+	0xc78, 0x47230001,
+	0xc78, 0x46240001,
+	0xc78, 0x45250001,
+	0xc78, 0x44260001,
+	0xc78, 0x43270001,
+	0xc78, 0x42280001,
+	0xc78, 0x41290001,
+	0xc78, 0x402a0001,
+	0xc78, 0x262b0001,
+	0xc78, 0x252c0001,
+	0xc78, 0x242d0001,
+	0xc78, 0x232e0001,
+	0xc78, 0x222f0001,
+	0xc78, 0x21300001,
+	0xc78, 0x20310001,
+	0xc78, 0x06320001,
+	0xc78, 0x05330001,
+	0xc78, 0x04340001,
+	0xc78, 0x03350001,
+	0xc78, 0x02360001,
+	0xc78, 0x01370001,
+	0xc78, 0x00380001,
+	0xc78, 0x00390001,
+	0xc78, 0x003a0001,
+	0xc78, 0x003b0001,
+	0xc78, 0x003c0001,
+	0xc78, 0x003d0001,
+	0xc78, 0x003e0001,
+	0xc78, 0x003f0001,
+	0xc78, 0x7b400001,
+	0xc78, 0x7b410001,
+	0xc78, 0x7b420001,
+	0xc78, 0x7b430001,
+	0xc78, 0x7b440001,
+	0xc78, 0x7b450001,
+	0xc78, 0x7a460001,
+	0xc78, 0x79470001,
+	0xc78, 0x78480001,
+	0xc78, 0x77490001,
+	0xc78, 0x764a0001,
+	0xc78, 0x754b0001,
+	0xc78, 0x744c0001,
+	0xc78, 0x734d0001,
+	0xc78, 0x724e0001,
+	0xc78, 0x714f0001,
+	0xc78, 0x70500001,
+	0xc78, 0x6f510001,
+	0xc78, 0x6e520001,
+	0xc78, 0x6d530001,
+	0xc78, 0x6c540001,
+	0xc78, 0x6b550001,
+	0xc78, 0x6a560001,
+	0xc78, 0x69570001,
+	0xc78, 0x68580001,
+	0xc78, 0x67590001,
+	0xc78, 0x665a0001,
+	0xc78, 0x655b0001,
+	0xc78, 0x645c0001,
+	0xc78, 0x635d0001,
+	0xc78, 0x625e0001,
+	0xc78, 0x615f0001,
+	0xc78, 0x60600001,
+	0xc78, 0x49610001,
+	0xc78, 0x48620001,
+	0xc78, 0x47630001,
+	0xc78, 0x46640001,
+	0xc78, 0x45650001,
+	0xc78, 0x44660001,
+	0xc78, 0x43670001,
+	0xc78, 0x42680001,
+	0xc78, 0x41690001,
+	0xc78, 0x406a0001,
+	0xc78, 0x266b0001,
+	0xc78, 0x256c0001,
+	0xc78, 0x246d0001,
+	0xc78, 0x236e0001,
+	0xc78, 0x226f0001,
+	0xc78, 0x21700001,
+	0xc78, 0x20710001,
+	0xc78, 0x06720001,
+	0xc78, 0x05730001,
+	0xc78, 0x04740001,
+	0xc78, 0x03750001,
+	0xc78, 0x02760001,
+	0xc78, 0x01770001,
+	0xc78, 0x00780001,
+	0xc78, 0x00790001,
+	0xc78, 0x007a0001,
+	0xc78, 0x007b0001,
+	0xc78, 0x007c0001,
+	0xc78, 0x007d0001,
+	0xc78, 0x007e0001,
+	0xc78, 0x007f0001,
+	0xc78, 0x3800001e,
+	0xc78, 0x3801001e,
+	0xc78, 0x3802001e,
+	0xc78, 0x3803001e,
+	0xc78, 0x3804001e,
+	0xc78, 0x3805001e,
+	0xc78, 0x3806001e,
+	0xc78, 0x3807001e,
+	0xc78, 0x3808001e,
+	0xc78, 0x3c09001e,
+	0xc78, 0x3e0a001e,
+	0xc78, 0x400b001e,
+	0xc78, 0x440c001e,
+	0xc78, 0x480d001e,
+	0xc78, 0x4c0e001e,
+	0xc78, 0x500f001e,
+	0xc78, 0x5210001e,
+	0xc78, 0x5611001e,
+	0xc78, 0x5a12001e,
+	0xc78, 0x5e13001e,
+	0xc78, 0x6014001e,
+	0xc78, 0x6015001e,
+	0xc78, 0x6016001e,
+	0xc78, 0x6217001e,
+	0xc78, 0x6218001e,
+	0xc78, 0x6219001e,
+	0xc78, 0x621a001e,
+	0xc78, 0x621b001e,
+	0xc78, 0x621c001e,
+	0xc78, 0x621d001e,
+	0xc78, 0x621e001e,
+	0xc78, 0x621f001e,
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.h b/drivers/net/wireless/rtlwifi/rtl8723ae/table.h
new file mode 100644
index 0000000..f5ce713
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/table.h
@@ -0,0 +1,50 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on  2010/ 5/18,  1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723E_TABLE__H_
+#define __RTL8723E_TABLE__H_
+
+#include <linux/types.h>
+
+#define RTL8723E_PHY_REG_1TARRAY_LENGTH		372
+extern u32 RTL8723EPHY_REG_1TARRAY[RTL8723E_PHY_REG_1TARRAY_LENGTH];
+#define RTL8723E_PHY_REG_ARRAY_PGLENGTH		336
+extern u32 RTL8723EPHY_REG_ARRAY_PG[RTL8723E_PHY_REG_ARRAY_PGLENGTH];
+#define Rtl8723ERADIOA_1TARRAYLENGTH		 282
+extern u32 RTL8723E_RADIOA_1TARRAY[Rtl8723ERADIOA_1TARRAYLENGTH];
+#define RTL8723E_RADIOB_1TARRAYLENGTH		1
+extern u32 RTL8723E_RADIOB_1TARRAY[RTL8723E_RADIOB_1TARRAYLENGTH];
+#define RTL8723E_MACARRAYLENGTH			172
+extern u32 RTL8723EMAC_ARRAY[RTL8723E_MACARRAYLENGTH];
+#define RTL8723E_AGCTAB_1TARRAYLENGTH		320
+extern u32 RTL8723EAGCTAB_1TARRAY[RTL8723E_AGCTAB_1TARRAYLENGTH];
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
new file mode 100644
index 0000000..87331d8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -0,0 +1,670 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "../stats.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "trx.h"
+#include "led.h"
+
+static u8 _rtl8723ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
+{
+	__le16 fc = rtl_get_fc(skb);
+
+	if (unlikely(ieee80211_is_beacon(fc)))
+		return QSLT_BEACON;
+	if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
+		return QSLT_MGNT;
+
+	return skb->priority;
+}
+
+static void _rtl8723ae_query_rxphystatus(struct ieee80211_hw *hw,
+			struct rtl_stats *pstatus, u8 *pdesc,
+			struct rx_fwinfo_8723e *p_drvinfo,
+			bool bpacket_match_bssid,
+			bool bpacket_toself, bool packet_beacon)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+	struct phy_sts_cck_8723e_t *cck_buf;
+	s8 rx_pwr_all, rx_pwr[4];
+	u8 rf_rx_num = 0, evm, pwdb_all;
+	u8 i, max_spatial_stream;
+	u32 rssi, total_rssi = 0;
+	bool is_cck = pstatus->is_cck;
+
+	/* Record it for next packet processing */
+	pstatus->packet_matchbssid = bpacket_match_bssid;
+	pstatus->packet_toself = bpacket_toself;
+	pstatus->packet_beacon = packet_beacon;
+	pstatus->rx_mimo_sig_qual[0] = -1;
+	pstatus->rx_mimo_sig_qual[1] = -1;
+
+	if (is_cck) {
+		u8 report, cck_highpwr;
+
+		/* CCK Driver info Structure is not the same as OFDM packet. */
+		cck_buf = (struct phy_sts_cck_8723e_t *)p_drvinfo;
+
+		/* (1)Hardware does not provide RSSI for CCK
+		 * (2)PWDB, Average PWDB cacluated by
+		 * hardware (for rate adaptive)
+		 */
+		if (ppsc->rfpwr_state == ERFON)
+			cck_highpwr = (u8) rtl_get_bbreg(hw,
+						 RFPGA0_XA_HSSIPARAMETER2,
+						 BIT(9));
+		else
+			cck_highpwr = false;
+
+		if (!cck_highpwr) {
+			u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+			report = cck_buf->cck_agc_rpt & 0xc0;
+			report = report >> 6;
+			switch (report) {
+			case 0x3:
+				rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+				break;
+			case 0x2:
+				rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+				break;
+			case 0x1:
+				rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+				break;
+			case 0x0:
+				rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+				break;
+			}
+		} else {
+			u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+			report = p_drvinfo->cfosho[0] & 0x60;
+			report = report >> 5;
+			switch (report) {
+			case 0x3:
+				rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
+				break;
+			case 0x2:
+				rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
+				break;
+			case 0x1:
+				rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
+				break;
+			case 0x0:
+				rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
+				break;
+			}
+		}
+
+		pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+		/* CCK gain is smaller than OFDM/MCS gain,
+		 * so we add gain diff. From experience, the val is 6
+		 */
+		pwdb_all += 6;
+		if (pwdb_all > 100)
+			pwdb_all = 100;
+		/* modify the offset to make the same
+		 * gain index with OFDM.
+		 */
+		if (pwdb_all > 34 && pwdb_all <= 42)
+			pwdb_all -= 2;
+		else if (pwdb_all > 26 && pwdb_all <= 34)
+			pwdb_all -= 6;
+		else if (pwdb_all > 14 && pwdb_all <= 26)
+			pwdb_all -= 8;
+		else if (pwdb_all > 4 && pwdb_all <= 14)
+			pwdb_all -= 4;
+
+		pstatus->rx_pwdb_all = pwdb_all;
+		pstatus->recvsignalpower = rx_pwr_all;
+
+		/* (3) Get Signal Quality (EVM) */
+		if (bpacket_match_bssid) {
+			u8 sq;
+
+			if (pstatus->rx_pwdb_all > 40) {
+				sq = 100;
+			} else {
+				sq = cck_buf->sq_rpt;
+				if (sq > 64)
+					sq = 0;
+				else if (sq < 20)
+					sq = 100;
+				else
+					sq = ((64 - sq) * 100) / 44;
+			}
+
+			pstatus->signalquality = sq;
+			pstatus->rx_mimo_sig_qual[0] = sq;
+			pstatus->rx_mimo_sig_qual[1] = -1;
+		}
+	} else {
+		rtlpriv->dm.rfpath_rxenable[0] =
+		    rtlpriv->dm.rfpath_rxenable[1] = true;
+
+		/* (1)Get RSSI for HT rate */
+		for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
+
+			/* we will judge RF RX path now. */
+			if (rtlpriv->dm.rfpath_rxenable[i])
+				rf_rx_num++;
+
+			rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f)*2) - 110;
+
+			/* Translate DBM to percentage. */
+			rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
+			total_rssi += rssi;
+
+			/* Get Rx snr value in DB */
+			rtlpriv->stats.rx_snr_db[i] = (p_drvinfo->rxsnr[i] / 2);
+
+			/* Record Signal Strength for next packet */
+			if (bpacket_match_bssid)
+				pstatus->rx_mimo_signalstrength[i] = (u8) rssi;
+		}
+
+		/* (2)PWDB, Average PWDB cacluated by
+		 * hardware (for rate adaptive)
+		 */
+		rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
+
+		pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+		pstatus->rx_pwdb_all = pwdb_all;
+		pstatus->rxpower = rx_pwr_all;
+		pstatus->recvsignalpower = rx_pwr_all;
+
+		/* (3)EVM of HT rate */
+		if (pstatus->is_ht && pstatus->rate >= DESC92_RATEMCS8 &&
+		    pstatus->rate <= DESC92_RATEMCS15)
+			max_spatial_stream = 2;
+		else
+			max_spatial_stream = 1;
+
+		for (i = 0; i < max_spatial_stream; i++) {
+			evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+
+			if (bpacket_match_bssid) {
+				/* Fill value in RFD, Get the first
+				 * spatial stream only
+				 */
+				if (i == 0)
+					pstatus->signalquality = (evm & 0xff);
+				pstatus->rx_mimo_sig_qual[i] = (evm & 0xff);
+			}
+		}
+	}
+
+	/* UI BSS List signal strength(in percentage),
+	 * make it good looking, from 0~100.
+	 */
+	if (is_cck)
+		pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+			pwdb_all));
+	else if (rf_rx_num != 0)
+		pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+			total_rssi /= rf_rx_num));
+}
+
+static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+		struct sk_buff *skb, struct rtl_stats *pstatus,
+		u8 *pdesc, struct rx_fwinfo_8723e *p_drvinfo)
+{
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct ieee80211_hdr *hdr;
+	u8 *tmp_buf;
+	u8 *praddr;
+	u8 *psaddr;
+	__le16 fc;
+	u16 type;
+	bool packet_matchbssid, packet_toself, packet_beacon;
+
+	tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
+
+	hdr = (struct ieee80211_hdr *)tmp_buf;
+	fc = hdr->frame_control;
+	type = WLAN_FC_GET_TYPE(fc);
+	praddr = hdr->addr1;
+	psaddr = ieee80211_get_SA(hdr);
+
+	packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
+			    (!compare_ether_addr(mac->bssid,
+			    (le16_to_cpu(fc) & IEEE80211_FCTL_TODS) ?
+			    hdr->addr1 : (le16_to_cpu(fc) &
+			    IEEE80211_FCTL_FROMDS) ?
+			    hdr->addr2 : hdr->addr3)) && (!pstatus->hwerror) &&
+			    (!pstatus->crc) && (!pstatus->icv));
+
+	packet_toself = packet_matchbssid &&
+	    (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+
+	if (ieee80211_is_beacon(fc))
+		packet_beacon = true;
+
+	_rtl8723ae_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo,
+				   packet_matchbssid, packet_toself,
+				   packet_beacon);
+
+	rtl_process_phyinfo(hw, tmp_buf, pstatus);
+}
+
+bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
+			     struct rtl_stats *status,
+			     struct ieee80211_rx_status *rx_status,
+			     u8 *pdesc, struct sk_buff *skb)
+{
+	struct rx_fwinfo_8723e *p_drvinfo;
+	struct ieee80211_hdr *hdr;
+	u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+
+	status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+	status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+				   RX_DRV_INFO_SIZE_UNIT;
+	status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+	status->icv = (u16) GET_RX_DESC_ICV(pdesc);
+	status->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+	status->hwerror = (status->crc | status->icv);
+	status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+	status->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
+	status->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+	status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+	status->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
+				 && (GET_RX_DESC_FAGGR(pdesc) == 1));
+	status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+	status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+	status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+
+	status->is_cck = RTL8723E_RX_HAL_IS_CCK_RATE(status->rate);
+
+	rx_status->freq = hw->conf.channel->center_freq;
+	rx_status->band = hw->conf.channel->band;
+
+	hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size
+		+ status->rx_bufshift);
+
+	if (status->crc)
+		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+	if (status->rx_is40Mhzpacket)
+		rx_status->flag |= RX_FLAG_40MHZ;
+
+	if (status->is_ht)
+		rx_status->flag |= RX_FLAG_HT;
+
+	rx_status->flag |= RX_FLAG_MACTIME_START;
+
+	/* hw will set status->decrypted true, if it finds the
+	 * frame is open data frame or mgmt frame.
+	 * Thus hw will not decrypt a robust managment frame
+	 * for IEEE80211w but still set status->decrypted
+	 * true, so here we should set it back to undecrypted
+	 * for IEEE80211w frame, and mac80211 sw will help
+	 * to decrypt it
+	 */
+	if (status->decrypted) {
+		if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+			(ieee80211_has_protected(hdr->frame_control)))
+			rx_status->flag &= ~RX_FLAG_DECRYPTED;
+		else
+			rx_status->flag |= RX_FLAG_DECRYPTED;
+	}
+
+	/* rate_idx: index of data rate into band's
+	 * supported rates or MCS index if HT rates
+	 * are use (RX_FLAG_HT)
+	 */
+	rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+						   status->rate, false);
+
+	rx_status->mactime = status->timestamp_low;
+	if (phystatus == true) {
+		p_drvinfo = (struct rx_fwinfo_8723e *)(skb->data +
+			     status->rx_bufshift);
+
+		_rtl8723ae_translate_rx_signal_stuff(hw,
+			   skb, status, pdesc, p_drvinfo);
+	}
+
+	/*rx_status->qual = status->signal; */
+	rx_status->signal = status->recvsignalpower + 10;
+	/*rx_status->noise = -status->noise; */
+
+	return true;
+}
+
+void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
+			    struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+			    struct ieee80211_tx_info *info,
+			    struct ieee80211_sta *sta,
+			    struct sk_buff *skb, u8 hw_queue,
+			    struct rtl_tcb_desc *ptcdesc)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	bool defaultadapter = true;
+	u8 *pdesc = (u8 *) pdesc_tx;
+	u16 seq_number;
+	__le16 fc = hdr->frame_control;
+	u8 fw_qsel = _rtl8723ae_map_hwqueue_to_fwqueue(skb, hw_queue);
+	bool firstseg = ((hdr->seq_ctrl &
+			    cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
+	bool lastseg = ((hdr->frame_control &
+			   cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
+	dma_addr_t mapping = pci_map_single(rtlpci->pdev,
+					    skb->data, skb->len,
+					    PCI_DMA_TODEVICE);
+	u8 bw_40 = 0;
+
+	if (mac->opmode == NL80211_IFTYPE_STATION) {
+		bw_40 = mac->bw_40;
+	} else if (mac->opmode == NL80211_IFTYPE_AP ||
+		mac->opmode == NL80211_IFTYPE_ADHOC) {
+		if (sta)
+			bw_40 = sta->ht_cap.cap &
+				IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+	}
+
+	seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+
+	rtl_get_tcb_desc(hw, info, sta, skb, ptcdesc);
+
+	CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8723e));
+
+	if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
+		firstseg = true;
+		lastseg = true;
+	}
+
+	if (firstseg) {
+		SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+
+		SET_TX_DESC_TX_RATE(pdesc, ptcdesc->hw_rate);
+
+		if (ptcdesc->use_shortgi || ptcdesc->use_shortpreamble)
+			SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
+
+		if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+			SET_TX_DESC_AGG_BREAK(pdesc, 1);
+			SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+		}
+		SET_TX_DESC_SEQ(pdesc, seq_number);
+
+		SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcdesc->rts_enable &&
+						!ptcdesc->
+						cts_enable) ? 1 : 0));
+		SET_TX_DESC_HW_RTS_ENABLE(pdesc,
+					  ((ptcdesc->rts_enable
+					    || ptcdesc->cts_enable) ? 1 : 0));
+		SET_TX_DESC_CTS2SELF(pdesc, ((ptcdesc->cts_enable) ? 1 : 0));
+		SET_TX_DESC_RTS_STBC(pdesc, ((ptcdesc->rts_stbc) ? 1 : 0));
+
+		SET_TX_DESC_RTS_RATE(pdesc, ptcdesc->rts_rate);
+		SET_TX_DESC_RTS_BW(pdesc, 0);
+		SET_TX_DESC_RTS_SC(pdesc, ptcdesc->rts_sc);
+		SET_TX_DESC_RTS_SHORT(pdesc,
+				      ((ptcdesc->rts_rate <= DESC92_RATE54M) ?
+				       (ptcdesc->rts_use_shortpreamble ? 1 : 0)
+				       : (ptcdesc->rts_use_shortgi ? 1 : 0)));
+
+		if (bw_40) {
+			if (ptcdesc->packet_bw) {
+				SET_TX_DESC_DATA_BW(pdesc, 1);
+				SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+			} else {
+				SET_TX_DESC_DATA_BW(pdesc, 0);
+				SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+							 mac->cur_40_prime_sc);
+			}
+		} else {
+			SET_TX_DESC_DATA_BW(pdesc, 0);
+			SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+		}
+
+		SET_TX_DESC_LINIP(pdesc, 0);
+		SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
+
+		if (sta) {
+			u8 ampdu_density = sta->ht_cap.ampdu_density;
+			SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+		}
+
+		if (info->control.hw_key) {
+			struct ieee80211_key_conf *keyconf =
+			    info->control.hw_key;
+
+			switch (keyconf->cipher) {
+			case WLAN_CIPHER_SUITE_WEP40:
+			case WLAN_CIPHER_SUITE_WEP104:
+			case WLAN_CIPHER_SUITE_TKIP:
+				SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+				break;
+			case WLAN_CIPHER_SUITE_CCMP:
+				SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+				break;
+			default:
+				SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+				break;
+			}
+		}
+
+		SET_TX_DESC_PKT_ID(pdesc, 0);
+		SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+
+		SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
+		SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
+		SET_TX_DESC_DISABLE_FB(pdesc, 0);
+		SET_TX_DESC_USE_RATE(pdesc, ptcdesc->use_driver_rate ? 1 : 0);
+
+		if (ieee80211_is_data_qos(fc)) {
+			if (mac->rdg_en) {
+				RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+					 "Enable RDG function.\n");
+				SET_TX_DESC_RDG_ENABLE(pdesc, 1);
+				SET_TX_DESC_HTC(pdesc, 1);
+			}
+		}
+	}
+
+	SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
+	SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
+
+	SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
+
+	SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+
+	if (rtlpriv->dm.useramask) {
+		SET_TX_DESC_RATE_ID(pdesc, ptcdesc->ratr_index);
+		SET_TX_DESC_MACID(pdesc, ptcdesc->mac_id);
+	} else {
+		SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcdesc->ratr_index);
+		SET_TX_DESC_MACID(pdesc, ptcdesc->ratr_index);
+	}
+
+	if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
+		SET_TX_DESC_HWSEQ_EN_8723(pdesc, 1);
+
+		if (!defaultadapter)
+			SET_TX_DESC_HWSEQ_SEL_8723(pdesc, 1);
+	}
+
+	SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+
+	if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+	    is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
+		SET_TX_DESC_BMC(pdesc, 1);
+	}
+
+	RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+}
+
+void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
+			      u8 *pdesc, bool firstseg,
+			      bool lastseg, struct sk_buff *skb)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+	u8 fw_queue = QSLT_BEACON;
+	dma_addr_t mapping = pci_map_single(rtlpci->pdev,
+					    skb->data, skb->len,
+					    PCI_DMA_TODEVICE);
+	__le16 fc = hdr->frame_control;
+
+	CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+
+	if (firstseg)
+		SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+
+	SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
+
+	SET_TX_DESC_SEQ(pdesc, 0);
+
+	SET_TX_DESC_LINIP(pdesc, 0);
+
+	SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+
+	SET_TX_DESC_FIRST_SEG(pdesc, 1);
+	SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+	SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
+
+	SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+
+	SET_TX_DESC_RATE_ID(pdesc, 7);
+	SET_TX_DESC_MACID(pdesc, 0);
+
+	SET_TX_DESC_OWN(pdesc, 1);
+
+	SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+
+	SET_TX_DESC_FIRST_SEG(pdesc, 1);
+	SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+	SET_TX_DESC_OFFSET(pdesc, 0x20);
+
+	SET_TX_DESC_USE_RATE(pdesc, 1);
+
+	if (!ieee80211_is_data_qos(fc)) {
+		SET_TX_DESC_HWSEQ_EN_8723(pdesc, 1);
+		/* SET_TX_DESC_HWSEQ_EN(pdesc, 1); */
+		/* SET_TX_DESC_PKT_ID(pdesc, 8); */
+	}
+
+	RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+		      "H2C Tx Cmd Content\n",
+		      pdesc, TX_DESC_SIZE);
+}
+
+void rtl8723ae_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+{
+	if (istx == true) {
+		switch (desc_name) {
+		case HW_DESC_OWN:
+			SET_TX_DESC_OWN(pdesc, 1);
+			break;
+		case HW_DESC_TX_NEXTDESC_ADDR:
+			SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+			break;
+		default:
+			RT_ASSERT(false, "ERR txdesc :%d not process\n",
+				  desc_name);
+			break;
+		}
+	} else {
+		switch (desc_name) {
+		case HW_DESC_RXOWN:
+			SET_RX_DESC_OWN(pdesc, 1);
+			break;
+		case HW_DESC_RXBUFF_ADDR:
+			SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+			break;
+		case HW_DESC_RXPKT_LEN:
+			SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
+			break;
+		case HW_DESC_RXERO:
+			SET_RX_DESC_EOR(pdesc, 1);
+			break;
+		default:
+			RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+				  desc_name);
+			break;
+		}
+	}
+}
+
+u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name)
+{
+	u32 ret = 0;
+
+	if (istx == true) {
+		switch (desc_name) {
+		case HW_DESC_OWN:
+			ret = GET_TX_DESC_OWN(pdesc);
+			break;
+		case HW_DESC_TXBUFF_ADDR:
+			ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
+			break;
+		default:
+			RT_ASSERT(false, "ERR txdesc :%d not process\n",
+				  desc_name);
+			break;
+		}
+	} else {
+		switch (desc_name) {
+		case HW_DESC_OWN:
+			ret = GET_RX_DESC_OWN(pdesc);
+			break;
+		case HW_DESC_RXPKT_LEN:
+			ret = GET_RX_DESC_PKT_LEN(pdesc);
+			break;
+		default:
+			RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+				  desc_name);
+			break;
+		}
+	}
+	return ret;
+}
+
+void rtl8723ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	if (hw_queue == BEACON_QUEUE) {
+		rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
+	} else {
+		rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
+			       BIT(0) << (hw_queue));
+	}
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
new file mode 100644
index 0000000..ad05b54
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
@@ -0,0 +1,725 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723E_TRX_H__
+#define __RTL8723E_TRX_H__
+
+#define TX_DESC_SIZE				64
+#define TX_DESC_AGGR_SUBFRAME_SIZE		32
+
+#define RX_DESC_SIZE				32
+#define RX_DRV_INFO_SIZE_UNIT			8
+
+#define	TX_DESC_NEXT_DESC_OFFSET		40
+#define USB_HWDESC_HEADER_LEN			32
+#define CRCLENGTH				4
+
+#define SET_TX_DESC_PKT_SIZE(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
+#define SET_TX_DESC_OFFSET(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
+#define SET_TX_DESC_BMC(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
+#define SET_TX_DESC_HTC(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
+#define SET_TX_DESC_LAST_SEG(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
+#define SET_TX_DESC_FIRST_SEG(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
+#define SET_TX_DESC_LINIP(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
+#define SET_TX_DESC_NO_ACM(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
+#define SET_TX_DESC_GF(__pdesc, __val)			\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_TX_DESC_OWN(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_TX_DESC_PKT_SIZE(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 0, 16)
+#define GET_TX_DESC_OFFSET(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 16, 8)
+#define GET_TX_DESC_BMC(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 24, 1)
+#define GET_TX_DESC_HTC(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 25, 1)
+#define GET_TX_DESC_LAST_SEG(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_TX_DESC_FIRST_SEG(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_TX_DESC_LINIP(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_TX_DESC_NO_ACM(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_TX_DESC_GF(__pdesc)				\
+	LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_TX_DESC_OWN(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_TX_DESC_MACID(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 5, __val)
+#define SET_TX_DESC_AGG_BREAK(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 5, 1, __val)
+#define SET_TX_DESC_BK(__pdesc, __val)			\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 6, 1, __val)
+#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 7, 1, __val)
+#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
+#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
+#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
+#define SET_TX_DESC_PIFS(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
+#define SET_TX_DESC_RATE_ID(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val)
+#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val)
+#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
+#define SET_TX_DESC_SEC_TYPE(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
+#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val)
+
+#define GET_TX_DESC_MACID(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
+#define GET_TX_DESC_AGG_ENABLE(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+4, 5, 1)
+#define GET_TX_DESC_AGG_BREAK(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 6, 1)
+#define GET_TX_DESC_RDG_ENABLE(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+4, 7, 1)
+#define GET_TX_DESC_QUEUE_SEL(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 8, 5)
+#define GET_TX_DESC_RDG_NAV_EXT(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
+#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+#define GET_TX_DESC_PIFS(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+#define GET_TX_DESC_RATE_ID(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+#define GET_TX_DESC_NAV_USE_HDR(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
+#define GET_TX_DESC_EN_DESC_ID(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
+#define GET_TX_DESC_SEC_TYPE(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 22, 2)
+#define GET_TX_DESC_PKT_OFFSET(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+4, 24, 8)
+
+#define SET_TX_DESC_RTS_RC(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val)
+#define SET_TX_DESC_DATA_RC(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val)
+#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
+#define SET_TX_DESC_MORE_FRAG(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
+#define SET_TX_DESC_RAW(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
+#define SET_TX_DESC_CCX(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
+#define SET_TX_DESC_ANTSEL_A(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val)
+#define SET_TX_DESC_ANTSEL_B(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val)
+#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val)
+#define SET_TX_DESC_TX_ANTL(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val)
+#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val)
+
+#define GET_TX_DESC_RTS_RC(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 0, 6)
+#define GET_TX_DESC_DATA_RC(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 6, 6)
+#define GET_TX_DESC_BAR_RTY_TH(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+8, 14, 2)
+#define GET_TX_DESC_MORE_FRAG(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 17, 1)
+#define GET_TX_DESC_RAW(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 18, 1)
+#define GET_TX_DESC_CCX(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 19, 1)
+#define GET_TX_DESC_AMPDU_DENSITY(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+8, 20, 3)
+#define GET_TX_DESC_ANTSEL_A(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 24, 1)
+#define GET_TX_DESC_ANTSEL_B(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 25, 1)
+#define GET_TX_DESC_TX_ANT_CCK(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+8, 26, 2)
+#define GET_TX_DESC_TX_ANTL(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 28, 2)
+#define GET_TX_DESC_TX_ANT_HT(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 30, 2)
+
+#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val)
+#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val)
+#define SET_TX_DESC_SEQ(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val)
+#define SET_TX_DESC_PKT_ID(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 4, __val)
+
+#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+12, 0, 8)
+#define GET_TX_DESC_TAIL_PAGE(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+12, 8, 8)
+#define GET_TX_DESC_SEQ(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+12, 16, 12)
+#define GET_TX_DESC_PKT_ID(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+12, 28, 4)
+
+/* For RTL8723 */
+#define SET_TX_DESC_TRIGGER_INT(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 30, 1, __val)
+#define SET_TX_DESC_HWSEQ_EN_8723(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 31, 1, __val)
+#define SET_TX_DESC_HWSEQ_SEL_8723(__pTxDesc, __Value)	\
+	SET_BITS_TO_LE_4BYTE(__pTxDesc+16, 6, 2, __Value)
+
+#define SET_TX_DESC_RTS_RATE(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val)
+#define SET_TX_DESC_AP_DCFE(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val)
+#define SET_TX_DESC_QOS(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val)
+#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val)
+#define SET_TX_DESC_USE_RATE(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val)
+#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val)
+#define SET_TX_DESC_DISABLE_FB(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val)
+#define SET_TX_DESC_CTS2SELF(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val)
+#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val)
+#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val)
+#define SET_TX_DESC_PORT_ID(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val)
+#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val)
+#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val)
+#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val)
+#define SET_TX_DESC_TX_STBC(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val)
+#define SET_TX_DESC_DATA_SHORT(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val)
+#define SET_TX_DESC_DATA_BW(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val)
+#define SET_TX_DESC_RTS_SHORT(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val)
+#define SET_TX_DESC_RTS_BW(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val)
+#define SET_TX_DESC_RTS_SC(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val)
+#define SET_TX_DESC_RTS_STBC(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val)
+
+#define GET_TX_DESC_RTS_RATE(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 0, 5)
+#define GET_TX_DESC_AP_DCFE(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 5, 1)
+#define GET_TX_DESC_QOS(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 6, 1)
+#define GET_TX_DESC_HWSEQ_EN(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 7, 1)
+#define GET_TX_DESC_USE_RATE(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 8, 1)
+#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+16, 9, 1)
+#define GET_TX_DESC_DISABLE_FB(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+16, 10, 1)
+#define GET_TX_DESC_CTS2SELF(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 11, 1)
+#define GET_TX_DESC_RTS_ENABLE(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+16, 12, 1)
+#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+16, 13, 1)
+#define GET_TX_DESC_PORT_ID(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 14, 1)
+#define GET_TX_DESC_WAIT_DCTS(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 18, 1)
+#define GET_TX_DESC_CTS2AP_EN(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 19, 1)
+#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+16, 20, 2)
+#define GET_TX_DESC_TX_STBC(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 22, 2)
+#define GET_TX_DESC_DATA_SHORT(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+16, 24, 1)
+#define GET_TX_DESC_DATA_BW(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 25, 1)
+#define GET_TX_DESC_RTS_SHORT(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 26, 1)
+#define GET_TX_DESC_RTS_BW(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 27, 1)
+#define GET_TX_DESC_RTS_SC(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 28, 2)
+#define GET_TX_DESC_RTS_STBC(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 30, 2)
+
+#define SET_TX_DESC_TX_RATE(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val)
+#define SET_TX_DESC_CCX_TAG(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val)
+#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val)
+
+#define GET_TX_DESC_TX_RATE(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+20, 0, 6)
+#define GET_TX_DESC_DATA_SHORTGI(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+20, 6, 1)
+#define GET_TX_DESC_CCX_TAG(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+20, 7, 1)
+#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc)	\
+	LE_BITS_TO_4BYTE(__pdesc+20, 8, 5)
+#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+20, 13, 4)
+#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc)	\
+	LE_BITS_TO_4BYTE(__pdesc+20, 17, 1)
+#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+20, 18, 6)
+#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+20, 24, 8)
+
+#define SET_TX_DESC_TXAGC_A(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val)
+#define SET_TX_DESC_TXAGC_B(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val)
+#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val)
+#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val)
+#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val)
+#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val)
+#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val)
+#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val)\
+	SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val)
+
+#define GET_TX_DESC_TXAGC_A(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+24, 0, 5)
+#define GET_TX_DESC_TXAGC_B(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+24, 5, 5)
+#define GET_TX_DESC_USE_MAX_LEN(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+24, 10, 1)
+#define GET_TX_DESC_MAX_AGG_NUM(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+24, 11, 5)
+#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+24, 16, 4)
+#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+24, 20, 4)
+#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+24, 24, 4)
+#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+24, 28, 4)
+
+#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
+#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 4, __val)
+#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+28, 20, 4, __val)
+#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val)
+#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+28, 28, 4, __val)
+
+#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
+#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+28, 16, 4)
+#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+28, 20, 4)
+#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+28, 24, 4)
+#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+28, 28, 4)
+
+#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val)
+#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
+	SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val)
+
+#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+32, 0, 32)
+#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc)	\
+	LE_BITS_TO_4BYTE(__pdesc+36, 0, 32)
+
+#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
+#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
+	SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val)
+
+#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
+#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc)	\
+	LE_BITS_TO_4BYTE(__pdesc+44, 0, 32)
+
+#define GET_RX_DESC_PKT_LEN(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 0, 14)
+#define GET_RX_DESC_CRC32(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 14, 1)
+#define GET_RX_DESC_ICV(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 15, 1)
+#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 20, 3)
+#define GET_RX_DESC_QOS(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 24, 2)
+#define GET_RX_DESC_PHYST(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_RX_DESC_LS(__pdesc)				\
+	LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_RX_DESC_FS(__pdesc)				\
+	LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_RX_DESC_EOR(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_RX_DESC_OWN(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_RX_DESC_PKT_LEN(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
+#define SET_RX_DESC_EOR(__pdesc, __val)			\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_RX_DESC_OWN(__pdesc, __val)			\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_RX_DESC_MACID(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
+#define GET_RX_DESC_TID(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 5, 4)
+#define GET_RX_DESC_HWRSVD(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 9, 5)
+#define GET_RX_DESC_PAGGR(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+#define GET_RX_DESC_FAGGR(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+#define GET_RX_DESC_A2_FIT(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 20, 4)
+#define GET_RX_DESC_PAM(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
+#define GET_RX_DESC_MD(__pdesc)				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
+#define GET_RX_DESC_MF(__pdesc)				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__pdesc)				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__pdesc)				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
+#define GET_RX_DESC_SEQ(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
+#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+8, 16, 14)
+#define GET_RX_DESC_NEXT_IND(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 30, 1)
+#define GET_RX_DESC_RSVD(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+8, 31, 1)
+
+#define GET_RX_DESC_RXMCS(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+12, 0, 6)
+#define GET_RX_DESC_RXHT(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
+#define GET_RX_DESC_SPLCP(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+12, 8, 1)
+#define GET_RX_DESC_BW(__pdesc)				\
+	LE_BITS_TO_4BYTE(__pdesc+12, 9, 1)
+#define GET_RX_DESC_HTC(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
+#define GET_RX_DESC_HWPC_ERR(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+12, 14, 1)
+#define GET_RX_DESC_HWPC_IND(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+12, 15, 1)
+#define GET_RX_DESC_IV0(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+12, 16, 16)
+
+#define GET_RX_DESC_IV1(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+16, 0, 32)
+#define GET_RX_DESC_TSFL(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
+
+#define GET_RX_DESC_BUFF_ADDR(__pdesc)			\
+	LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
+#define GET_RX_DESC_BUFF_ADDR64(__pdesc)		\
+	LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
+
+#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
+#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val)		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+
+#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size)	\
+do {							\
+	if (_size > TX_DESC_NEXT_DESC_OFFSET)		\
+		memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET);	\
+	else						\
+		memset(__pdesc, 0, _size);		\
+} while (0)
+
+#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs)		\
+	((rxmcs) == DESC92_RATE1M ||			\
+	 (rxmcs) == DESC92_RATE2M ||			\
+	 (rxmcs) == DESC92_RATE5_5M ||			\
+	 (rxmcs) == DESC92_RATE11M)
+
+struct rx_fwinfo_8723e {
+	u8 gain_trsw[4];
+	u8 pwdb_all;
+	u8 cfosho[4];
+	u8 cfotail[4];
+	char rxevm[2];
+	char rxsnr[4];
+	u8 pdsnr[2];
+	u8 csi_current[2];
+	u8 csi_target[2];
+	u8 sigevm;
+	u8 max_ex_pwr;
+	u8 ex_intf_flag:1;
+	u8 sgi_en:1;
+	u8 rxsc:2;
+	u8 reserve:4;
+} __packed;
+
+struct tx_desc_8723e {
+	u32 pktsize:16;
+	u32 offset:8;
+	u32 bmc:1;
+	u32 htc:1;
+	u32 lastseg:1;
+	u32 firstseg:1;
+	u32 linip:1;
+	u32 noacm:1;
+	u32 gf:1;
+	u32 own:1;
+
+	u32 macid:5;
+	u32 agg_en:1;
+	u32 bk:1;
+	u32 rdg_en:1;
+	u32 queuesel:5;
+	u32 rd_nav_ext:1;
+	u32 lsig_txop_en:1;
+	u32 pifs:1;
+	u32 rateid:4;
+	u32 nav_usehdr:1;
+	u32 en_descid:1;
+	u32 sectype:2;
+	u32 pktoffset:8;
+
+	u32 rts_rc:6;
+	u32 data_rc:6;
+	u32 rsvd0:2;
+	u32 bar_retryht:2;
+	u32 rsvd1:1;
+	u32 morefrag:1;
+	u32 raw:1;
+	u32 ccx:1;
+	u32 ampdudensity:3;
+	u32 rsvd2:1;
+	u32 ant_sela:1;
+	u32 ant_selb:1;
+	u32 txant_cck:2;
+	u32 txant_l:2;
+	u32 txant_ht:2;
+
+	u32 nextheadpage:8;
+	u32 tailpage:8;
+	u32 seq:12;
+	u32 pktid:4;
+
+	u32 rtsrate:5;
+	u32 apdcfe:1;
+	u32 qos:1;
+	u32 hwseq_enable:1;
+	u32 userrate:1;
+	u32 dis_rtsfb:1;
+	u32 dis_datafb:1;
+	u32 cts2self:1;
+	u32 rts_en:1;
+	u32 hwrts_en:1;
+	u32 portid:1;
+	u32 rsvd3:3;
+	u32 waitdcts:1;
+	u32 cts2ap_en:1;
+	u32 txsc:2;
+	u32 stbc:2;
+	u32 txshort:1;
+	u32 txbw:1;
+	u32 rtsshort:1;
+	u32 rtsbw:1;
+	u32 rtssc:2;
+	u32 rtsstbc:2;
+
+	u32 txrate:6;
+	u32 shortgi:1;
+	u32 ccxt:1;
+	u32 txrate_fb_lmt:5;
+	u32 rtsrate_fb_lmt:4;
+	u32 retrylmt_en:1;
+	u32 txretrylmt:6;
+	u32 usb_txaggnum:8;
+
+	u32 txagca:5;
+	u32 txagcb:5;
+	u32 usemaxlen:1;
+	u32 maxaggnum:5;
+	u32 mcsg1maxlen:4;
+	u32 mcsg2maxlen:4;
+	u32 mcsg3maxlen:4;
+	u32 mcs7sgimaxlen:4;
+
+	u32 txbuffersize:16;
+	u32 mcsg4maxlen:4;
+	u32 mcsg5maxlen:4;
+	u32 mcsg6maxlen:4;
+	u32 mcsg15sgimaxlen:4;
+
+	u32 txbuffaddr;
+	u32 txbufferaddr64;
+	u32 nextdescaddress;
+	u32 nextdescaddress64;
+
+	u32 reserve_pass_pcie_mm_limit[4];
+} __packed;
+
+struct rx_desc_8723e {
+	u32 length:14;
+	u32 crc32:1;
+	u32 icverror:1;
+	u32 drv_infosize:4;
+	u32 security:3;
+	u32 qos:1;
+	u32 shift:2;
+	u32 phystatus:1;
+	u32 swdec:1;
+	u32 lastseg:1;
+	u32 firstseg:1;
+	u32 eor:1;
+	u32 own:1;
+
+	u32 macid:5;
+	u32 tid:4;
+	u32 hwrsvd:5;
+	u32 paggr:1;
+	u32 faggr:1;
+	u32 a1_fit:4;
+	u32 a2_fit:4;
+	u32 pam:1;
+	u32 pwr:1;
+	u32 moredata:1;
+	u32 morefrag:1;
+	u32 type:2;
+	u32 mc:1;
+	u32 bc:1;
+
+	u32 seq:12;
+	u32 frag:4;
+	u32 nextpktlen:14;
+	u32 nextind:1;
+	u32 rsvd:1;
+
+	u32 rxmcs:6;
+	u32 rxht:1;
+	u32 amsdu:1;
+	u32 splcp:1;
+	u32 bandwidth:1;
+	u32 htc:1;
+	u32 tcpchk_rpt:1;
+	u32 ipcchk_rpt:1;
+	u32 tcpchk_valid:1;
+	u32 hwpcerr:1;
+	u32 hwpcind:1;
+	u32 iv0:16;
+
+	u32 iv1;
+
+	u32 tsfl;
+
+	u32 bufferaddress;
+	u32 bufferaddress64;
+
+} __packed;
+
+void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
+			    struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+			    struct ieee80211_tx_info *info,
+			    struct ieee80211_sta *sta,
+			    struct sk_buff *skb, u8 hw_queue,
+			    struct rtl_tcb_desc *ptcb_desc);
+bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
+			     struct rtl_stats *status,
+			     struct ieee80211_rx_status *rx_status,
+			     u8 *pdesc, struct sk_buff *skb);
+void rtl8723ae_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+void rtl8723ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
+void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
+			       bool b_firstseg, bool b_lastseg,
+			       struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/rtlwifi/stats.c
new file mode 100644
index 0000000..8ed3174
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/stats.c
@@ -0,0 +1,268 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "wifi.h"
+#include "stats.h"
+#include <linux/export.h>
+
+u8 rtl_query_rxpwrpercentage(char antpower)
+{
+	if ((antpower <= -100) || (antpower >= 20))
+		return 0;
+	else if (antpower >= 0)
+		return 100;
+	else
+		return 100 + antpower;
+}
+EXPORT_SYMBOL(rtl_query_rxpwrpercentage);
+
+u8 rtl_evm_db_to_percentage(char value)
+{
+	char ret_val;
+	ret_val = value;
+
+	if (ret_val >= 0)
+		ret_val = 0;
+	if (ret_val <= -33)
+		ret_val = -33;
+	ret_val = 0 - ret_val;
+	ret_val *= 3;
+	if (ret_val == 99)
+		ret_val = 100;
+
+	return ret_val;
+}
+EXPORT_SYMBOL(rtl_evm_db_to_percentage);
+
+static long rtl_translate_todbm(struct ieee80211_hw *hw,
+				u8 signal_strength_index)
+{
+	long signal_power;
+
+	signal_power = (long)((signal_strength_index + 1) >> 1);
+	signal_power -= 95;
+	return signal_power;
+}
+
+long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig)
+{
+	long retsig;
+
+	if (currsig >= 61 && currsig <= 100)
+		retsig = 90 + ((currsig - 60) / 4);
+	else if (currsig >= 41 && currsig <= 60)
+		retsig = 78 + ((currsig - 40) / 2);
+	else if (currsig >= 31 && currsig <= 40)
+		retsig = 66 + (currsig - 30);
+	else if (currsig >= 21 && currsig <= 30)
+		retsig = 54 + (currsig - 20);
+	else if (currsig >= 5 && currsig <= 20)
+		retsig = 42 + (((currsig - 5) * 2) / 3);
+	else if (currsig == 4)
+		retsig = 36;
+	else if (currsig == 3)
+		retsig = 27;
+	else if (currsig == 2)
+		retsig = 18;
+	else if (currsig == 1)
+		retsig = 9;
+	else
+		retsig = currsig;
+
+	return retsig;
+}
+EXPORT_SYMBOL(rtl_signal_scale_mapping);
+
+static void rtl_process_ui_rssi(struct ieee80211_hw *hw,
+				struct rtl_stats *pstatus)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 rfpath;
+	u32 last_rssi, tmpval;
+
+	rtlpriv->stats.rssi_calculate_cnt++;
+
+	if (rtlpriv->stats.ui_rssi.total_num++ >= PHY_RSSI_SLID_WIN_MAX) {
+		rtlpriv->stats.ui_rssi.total_num = PHY_RSSI_SLID_WIN_MAX;
+		last_rssi = rtlpriv->stats.ui_rssi.elements[
+			rtlpriv->stats.ui_rssi.index];
+		rtlpriv->stats.ui_rssi.total_val -= last_rssi;
+	}
+	rtlpriv->stats.ui_rssi.total_val += pstatus->signalstrength;
+	rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.index++] =
+	    pstatus->signalstrength;
+	if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
+		rtlpriv->stats.ui_rssi.index = 0;
+	tmpval = rtlpriv->stats.ui_rssi.total_val /
+		rtlpriv->stats.ui_rssi.total_num;
+	rtlpriv->stats.signal_strength = rtl_translate_todbm(hw,
+		(u8) tmpval);
+	pstatus->rssi = rtlpriv->stats.signal_strength;
+
+	if (pstatus->is_cck)
+		return;
+
+	for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+	     rfpath++) {
+		if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
+			rtlpriv->stats.rx_rssi_percentage[rfpath] =
+			    pstatus->rx_mimo_signalstrength[rfpath];
+
+		}
+		if (pstatus->rx_mimo_signalstrength[rfpath] >
+		    rtlpriv->stats.rx_rssi_percentage[rfpath]) {
+			rtlpriv->stats.rx_rssi_percentage[rfpath] =
+			    ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+			      (RX_SMOOTH_FACTOR - 1)) +
+			     (pstatus->rx_mimo_signalstrength[rfpath])) /
+			    (RX_SMOOTH_FACTOR);
+			rtlpriv->stats.rx_rssi_percentage[rfpath] =
+			    rtlpriv->stats.rx_rssi_percentage[rfpath] + 1;
+		} else {
+			rtlpriv->stats.rx_rssi_percentage[rfpath] =
+			    ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+			      (RX_SMOOTH_FACTOR - 1)) +
+			     (pstatus->rx_mimo_signalstrength[rfpath])) /
+			    (RX_SMOOTH_FACTOR);
+		}
+	}
+}
+
+static void rtl_update_rxsignalstatistics(struct ieee80211_hw *hw,
+					  struct rtl_stats *pstatus)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int weighting = 0;
+
+	if (rtlpriv->stats.recv_signal_power == 0)
+		rtlpriv->stats.recv_signal_power = pstatus->recvsignalpower;
+	if (pstatus->recvsignalpower > rtlpriv->stats.recv_signal_power)
+		weighting = 5;
+	else if (pstatus->recvsignalpower < rtlpriv->stats.recv_signal_power)
+		weighting = (-5);
+	rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power *
+		5 + pstatus->recvsignalpower + weighting) / 6;
+}
+
+static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_sta_info *drv_priv = NULL;
+	struct ieee80211_sta *sta = NULL;
+	long undec_sm_pwdb;
+
+	rcu_read_lock();
+	if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+		sta = rtl_find_sta(hw, pstatus->psaddr);
+
+	/* adhoc or ap mode */
+	if (sta) {
+		drv_priv = (struct rtl_sta_info *) sta->drv_priv;
+		undec_sm_pwdb = drv_priv->rssi_stat.undec_sm_pwdb;
+	} else {
+		undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+	}
+
+	if (undec_sm_pwdb < 0)
+		undec_sm_pwdb = pstatus->rx_pwdb_all;
+	if (pstatus->rx_pwdb_all > (u32) undec_sm_pwdb) {
+		undec_sm_pwdb = (((undec_sm_pwdb) *
+		      (RX_SMOOTH_FACTOR - 1)) +
+		     (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+		undec_sm_pwdb = undec_sm_pwdb + 1;
+	} else {
+		undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) +
+		     (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+	}
+
+	if (sta) {
+		drv_priv->rssi_stat.undec_sm_pwdb = undec_sm_pwdb;
+	} else {
+		rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
+	}
+	rcu_read_unlock();
+
+	rtl_update_rxsignalstatistics(hw, pstatus);
+}
+
+static void rtl_process_ui_link_quality(struct ieee80211_hw *hw,
+					struct rtl_stats *pstatus)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 last_evm, n_stream, tmpval;
+
+	if (pstatus->signalquality == 0)
+		return;
+
+	if (rtlpriv->stats.ui_link_quality.total_num++ >=
+	    PHY_LINKQUALITY_SLID_WIN_MAX) {
+		rtlpriv->stats.ui_link_quality.total_num =
+		    PHY_LINKQUALITY_SLID_WIN_MAX;
+		last_evm = rtlpriv->stats.ui_link_quality.elements[
+			rtlpriv->stats.ui_link_quality.index];
+		rtlpriv->stats.ui_link_quality.total_val -= last_evm;
+	}
+	rtlpriv->stats.ui_link_quality.total_val += pstatus->signalquality;
+	rtlpriv->stats.ui_link_quality.elements[
+		rtlpriv->stats.ui_link_quality.index++] =
+						 pstatus->signalquality;
+	if (rtlpriv->stats.ui_link_quality.index >=
+	    PHY_LINKQUALITY_SLID_WIN_MAX)
+		rtlpriv->stats.ui_link_quality.index = 0;
+	tmpval = rtlpriv->stats.ui_link_quality.total_val /
+	    rtlpriv->stats.ui_link_quality.total_num;
+	rtlpriv->stats.signal_quality = tmpval;
+	rtlpriv->stats.last_sigstrength_inpercent = tmpval;
+	for (n_stream = 0; n_stream < 2; n_stream++) {
+		if (pstatus->rx_mimo_sig_qual[n_stream] != -1) {
+			if (rtlpriv->stats.rx_evm_percentage[n_stream] == 0) {
+				rtlpriv->stats.rx_evm_percentage[n_stream] =
+				    pstatus->rx_mimo_sig_qual[n_stream];
+			}
+			rtlpriv->stats.rx_evm_percentage[n_stream] =
+			    ((rtlpriv->stats.rx_evm_percentage[n_stream]
+			      * (RX_SMOOTH_FACTOR - 1)) +
+			     (pstatus->rx_mimo_sig_qual[n_stream] * 1)) /
+			    (RX_SMOOTH_FACTOR);
+		}
+	}
+}
+
+void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
+	struct rtl_stats *pstatus)
+{
+
+	if (!pstatus->packet_matchbssid)
+		return;
+
+	rtl_process_ui_rssi(hw, pstatus);
+	rtl_process_pwdb(hw, pstatus);
+	rtl_process_ui_link_quality(hw, pstatus);
+}
+EXPORT_SYMBOL(rtl_process_phyinfo);
diff --git a/drivers/net/wireless/rtlwifi/stats.h b/drivers/net/wireless/rtlwifi/stats.h
new file mode 100644
index 0000000..0dbdc52
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/stats.h
@@ -0,0 +1,46 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_STATS_H__
+#define __RTL_STATS_H__
+
+#define	PHY_RSSI_SLID_WIN_MAX			100
+#define	PHY_LINKQUALITY_SLID_WIN_MAX		20
+#define	PHY_BEACON_RSSI_SLID_WIN_MAX		10
+
+/* Rx smooth factor */
+#define	RX_SMOOTH_FACTOR			20
+
+u8 rtl_query_rxpwrpercentage(char antpower);
+u8 rtl_evm_db_to_percentage(char value);
+long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig);
+void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
+	struct rtl_stats *pstatus);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index f1b6bc6..21a5f4f 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -198,15 +198,15 @@
 	u32 rftxgain_stage;
 	u32 rfhssi_para1;
 	u32 rfhssi_para2;
-	u32 rfswitch_control;
+	u32 rfsw_ctrl;
 	u32 rfagc_control1;
 	u32 rfagc_control2;
-	u32 rfrxiq_imbalance;
+	u32 rfrxiq_imbal;
 	u32 rfrx_afe;
-	u32 rftxiq_imbalance;
+	u32 rftxiq_imbal;
 	u32 rftx_afe;
-	u32 rflssi_readback;
-	u32 rflssi_readbackpi;
+	u32 rf_rb;		/* rflssi_readback */
+	u32 rf_rbpi;		/* rflssi_readbackpi */
 };
 
 enum io_type {
@@ -350,6 +350,11 @@
 	RT_CID_819x_WNC_COREGA = 31,
 	RT_CID_819x_Foxcoon = 32,
 	RT_CID_819x_DELL = 33,
+	RT_CID_819x_PRONETS = 34,
+	RT_CID_819x_Edimax_ASUS = 35,
+	RT_CID_NETGEAR = 36,
+	RT_CID_PLANEX = 37,
+	RT_CID_CC_C = 38,
 };
 
 enum hw_descs {
@@ -505,6 +510,7 @@
 	RTL_IMR_ROK,		/*Receive DMA OK Interrupt */
 	RTL_IBSS_INT_MASKS,	/*(RTL_IMR_BcnInt | RTL_IMR_TBDOK |
 				 * RTL_IMR_TBDER) */
+	RTL_IMR_C2HCMD,		/*fw interrupt*/
 
 	/*CCK Rates, TxHT = 0 */
 	RTL_RC_CCK_RATE1M,
@@ -661,6 +667,11 @@
 	ACT_DELBA = 2,
 };
 
+enum rt_polarity_ctl {
+	RT_POLARITY_LOW_ACT = 0,
+	RT_POLARITY_HIGH_ACT = 1,
+};
+
 struct octet_string {
 	u8 *octet;
 	u16 length;
@@ -885,7 +896,7 @@
 	u8 pwrgroup_cnt;
 	u8 cck_high_power;
 	/* MAX_PG_GROUP groups of pwr diff by rates */
-	u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16];
+	u32 mcs_offset[MAX_PG_GROUP][16];
 	u8 default_initialgain[4];
 
 	/* the current Tx power level */
@@ -903,6 +914,8 @@
 	u8 num_total_rfpath;
 	struct phy_parameters hwparam_tables[MAX_TAB];
 	u16 rf_pathmap;
+
+	enum rt_polarity_ctl polarity_ctl;
 };
 
 #define MAX_TID_COUNT				9
@@ -933,7 +946,7 @@
 };
 
 struct rssi_sta {
-	long undecorated_smoothed_pwdb;
+	long undec_sm_pwdb;
 };
 
 struct rtl_sta_info {
@@ -1042,13 +1055,64 @@
 	/*QOS & EDCA */
 	struct ieee80211_tx_queue_params edca_param[RTL_MAC80211_NUM_QUEUE];
 	struct rtl_qos_parameters ac[AC_MAX];
+
+	/* counters */
+	u64 last_txok_cnt;
+	u64 last_rxok_cnt;
+	u32 last_bt_edca_ul;
+	u32 last_bt_edca_dl;
+};
+
+struct btdm_8723 {
+	bool all_off;
+	bool agc_table_en;
+	bool adc_back_off_on;
+	bool b2_ant_hid_en;
+	bool low_penalty_rate_adaptive;
+	bool rf_rx_lpf_shrink;
+	bool reject_aggre_pkt;
+	bool tra_tdma_on;
+	u8 tra_tdma_nav;
+	u8 tra_tdma_ant;
+	bool tdma_on;
+	u8 tdma_ant;
+	u8 tdma_nav;
+	u8 tdma_dac_swing;
+	u8 fw_dac_swing_lvl;
+	bool ps_tdma_on;
+	u8 ps_tdma_byte[5];
+	bool pta_on;
+	u32 val_0x6c0;
+	u32 val_0x6c8;
+	u32 val_0x6cc;
+	bool sw_dac_swing_on;
+	u32 sw_dac_swing_lvl;
+	u32 wlan_act_hi;
+	u32 wlan_act_lo;
+	u32 bt_retry_index;
+	bool dec_bt_pwr;
+	bool ignore_wlan_act;
+};
+
+struct bt_coexist_8723 {
+	u32 high_priority_tx;
+	u32 high_priority_rx;
+	u32 low_priority_tx;
+	u32 low_priority_rx;
+	u8 c2h_bt_info;
+	bool c2h_bt_info_req_sent;
+	bool c2h_bt_inquiry_page;
+	u32 bt_inq_page_start_time;
+	u8 bt_retry_cnt;
+	u8 c2h_bt_info_original;
+	u8 bt_inquiry_page_cnt;
+	struct btdm_8723 btdm;
 };
 
 struct rtl_hal {
 	struct ieee80211_hw *hw;
-
+	struct bt_coexist_8723 hal_coex_8723;
 	bool up_first_time;
-	bool first_init;
 	bool being_init_adapter;
 	bool bbrf_ready;
 
@@ -1131,9 +1195,9 @@
 
 struct rtl_dm {
 	/*PHY status for Dynamic Management */
-	long entry_min_undecoratedsmoothed_pwdb;
-	long undecorated_smoothed_pwdb;	/*out dm */
-	long entry_max_undecoratedsmoothed_pwdb;
+	long entry_min_undec_sm_pwdb;
+	long undec_sm_pwdb;	/*out dm */
+	long entry_max_undec_sm_pwdb;
 	bool dm_initialgain_enable;
 	bool dynamic_txpower_enable;
 	bool current_turbo_edca;
@@ -1209,7 +1273,7 @@
 	u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
 	u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
 	u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
-	u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX];
+	u8 eprom_chnl_txpwr_ht40_2sdf[2][CHANNEL_GROUP_MAX];
 	u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
 	u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER];	/*For HT 40MHZ pwr */
 	u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER];	/*For HT 40MHZ pwr */
@@ -1312,6 +1376,7 @@
 };
 
 struct rtl_stats {
+	u8 psaddr[ETH_ALEN];
 	u32 mac_time[2];
 	s8 rssi;
 	u8 signal;
@@ -1351,7 +1416,7 @@
 	bool rx_is40Mhzpacket;
 	u32 rx_pwdb_all;
 	u8 rx_mimo_signalstrength[4];	/*in 0~100 index */
-	s8 rx_mimo_signalquality[2];
+	s8 rx_mimo_sig_qual[2];
 	bool packet_matchbssid;
 	bool is_cck;
 	bool is_ht;
@@ -1503,6 +1568,10 @@
 	void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t);
 	void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw);
 	void (*dm_dynamic_txpower) (struct ieee80211_hw *hw);
+	void (*c2h_command_handle) (struct ieee80211_hw *hw);
+	void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw,
+					     bool mstate);
+	void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
 };
 
 struct rtl_intf_ops {
@@ -1679,7 +1748,7 @@
 	u32 rssi_highthresh;
 	u32 fa_lowthresh;
 	u32 fa_highthresh;
-	long last_min_undecorated_pwdb_for_dm;
+	long last_min_undec_pwdb_for_dm;
 	long rssi_highpower_lowthresh;
 	long rssi_highpower_highthresh;
 	u32 recover_cnt;
@@ -1692,15 +1761,15 @@
 	u8 dig_twoport_algorithm;
 	u8 dig_dbgmode;
 	u8 dig_slgorithm_switch;
-	u8 cursta_connectstate;
-	u8 presta_connectstate;
-	u8 curmultista_connectstate;
-	char backoff_val;
-	char backoff_val_range_max;
-	char backoff_val_range_min;
+	u8 cursta_cstate;
+	u8 presta_cstate;
+	u8 curmultista_cstate;
+	char back_val;
+	char back_range_max;
+	char back_range_min;
 	u8 rx_gain_range_max;
 	u8 rx_gain_range_min;
-	u8 min_undecorated_pwdb_for_dm;
+	u8 min_undec_pwdb_for_dm;
 	u8 rssi_val_min;
 	u8 pre_cck_pd_state;
 	u8 cur_cck_pd_state;
@@ -1712,10 +1781,10 @@
 	u8 forbidden_igi;
 	u8 dig_state;
 	u8 dig_highpwrstate;
-	u8 cur_sta_connectstate;
-	u8 pre_sta_connectstate;
-	u8 cur_ap_connectstate;
-	u8 pre_ap_connectstate;
+	u8 cur_sta_cstate;
+	u8 pre_sta_cstate;
+	u8 cur_ap_cstate;
+	u8 pre_ap_cstate;
 	u8 cur_pd_thstate;
 	u8 pre_pd_thstate;
 	u8 cur_cs_ratiostate;
@@ -1781,9 +1850,22 @@
 	struct dig_t dm_digtable;
 	struct ps_t dm_pstable;
 
-	/* data buffer pointer for USB reads */
-	__le32 *usb_data;
-	int usb_data_index;
+	/* section shared by individual drivers */
+	union {
+		struct {	/* data buffer pointer for USB reads */
+			__le32 *usb_data;
+			int usb_data_index;
+			bool initialized;
+		};
+		struct {	/* section for 8723ae */
+			bool reg_init;	/* true if regs saved */
+			u32 reg_874;
+			u32 reg_c70;
+			u32 reg_85c;
+			u32 reg_a74;
+			bool bt_operation_on;
+		};
+	};
 
 	/*This must be the last item so
 	   that it points to the data allocated
@@ -1815,6 +1897,7 @@
 	BT_CSR_BC4 = 3,
 	BT_CSR_BC8 = 4,
 	BT_RTL8756 = 5,
+	BT_RTL8723A = 6,
 };
 
 enum bt_cur_state {
@@ -1846,7 +1929,7 @@
 	u8 eeprom_bt_coexist;
 	u8 eeprom_bt_type;
 	u8 eeprom_bt_ant_num;
-	u8 eeprom_bt_ant_isolation;
+	u8 eeprom_bt_ant_isol;
 	u8 eeprom_bt_radio_shared;
 
 	u8 bt_coexistence;
@@ -1873,13 +1956,27 @@
 
 	bool fw_coexist_all_off;
 	bool sw_coexist_all_off;
-	u32 current_state;
+	bool hw_coexist_all_off;
+	u32 cstate;
 	u32 previous_state;
+	u32 cstate_h;
+	u32 previous_state_h;
+
 	u8 bt_pre_rssi_state;
+	u8 bt_pre_rssi_state1;
 
 	u8 reg_bt_iso;
 	u8 reg_bt_sco;
+	bool balance_on;
+	u8 bt_active_zero_cnt;
+	bool cur_bt_disabled;
+	bool pre_bt_disabled;
 
+	u8 bt_profile_case;
+	u8 bt_profile_action;
+	bool bt_busy;
+	bool hold_for_bt_operation;
+	u8 lps_counter;
 };
 
 
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index 6af3526..23289d4 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -81,7 +81,7 @@
 	status->freq = ieee80211_channel_to_frequency(desc->channel,
 						      status->band);
 
-	status->flag |= RX_FLAG_MACTIME_MPDU;
+	status->flag |= RX_FLAG_MACTIME_START;
 
 	if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
 		status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 25530c8..4f1a05b 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -677,7 +677,7 @@
 	memset(data, 0, sizeof(*data));
 	data->cur_vif = cur_vif;
 
-	ieee80211_iterate_active_interfaces(hw,
+	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
 					    wl12xx_vif_count_iter, data);
 }
 
@@ -3791,7 +3791,7 @@
 
 	/* Handle HT information change */
 	if ((changed & BSS_CHANGED_HT) &&
-	    (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
 		ret = wl1271_acx_set_ht_information(wl, wlvif,
 					bss_conf->ht_operation_mode);
 		if (ret < 0) {
@@ -3905,7 +3905,8 @@
 			u32 rates;
 			int ieoffset;
 			wlvif->aid = bss_conf->aid;
-			wlvif->channel_type = bss_conf->channel_type;
+			wlvif->channel_type =
+				cfg80211_get_chandef_type(&bss_conf->chandef);
 			wlvif->beacon_int = bss_conf->beacon_int;
 			do_join = true;
 			set_assoc = true;
@@ -4071,7 +4072,7 @@
 	/* Handle new association with HT. Do this after join. */
 	if (sta_exists) {
 		if ((changed & BSS_CHANGED_HT) &&
-		    (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+		    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
 			ret = wl1271_acx_set_ht_capabilities(wl,
 							     &sta_ht_cap,
 							     true,
@@ -4098,7 +4099,7 @@
 
 	/* Handle HT information change. Done after join. */
 	if ((changed & BSS_CHANGED_HT) &&
-	    (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
 		ret = wl1271_acx_set_ht_information(wl, wlvif,
 					bss_conf->ht_operation_mode);
 		if (ret < 0) {
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index bf05831..36c3590 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -2,7 +2,7 @@
 # Makefile for nfc devices
 #
 
-obj-$(CONFIG_PN544_HCI_NFC)	+= pn544_hci.o
+obj-$(CONFIG_PN544_HCI_NFC)	+= pn544/
 obj-$(CONFIG_NFC_PN533)		+= pn533.o
 obj-$(CONFIG_NFC_WILINK)	+= nfcwilink.o
 
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 30ae18a..ada681b 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -84,6 +84,10 @@
 #define PN533_LISTEN_TIME 2
 
 /* frame definitions */
+#define PN533_NORMAL_FRAME_MAX_LEN 262  /* 6   (PREAMBLE, SOF, LEN, LCS, TFI)
+					   254 (DATA)
+					   2   (DCS, postamble) */
+
 #define PN533_FRAME_TAIL_SIZE 2
 #define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \
 				PN533_FRAME_TAIL_SIZE)
@@ -1166,8 +1170,7 @@
 		pn533_poll_add_mod(dev, PN533_LISTEN_MOD);
 }
 
-static int pn533_start_poll_complete(struct pn533 *dev, void *arg,
-				     u8 *params, int params_len)
+static int pn533_start_poll_complete(struct pn533 *dev, u8 *params, int params_len)
 {
 	struct pn533_poll_response *resp;
 	int rc;
@@ -1305,8 +1308,7 @@
 }
 
 #define ATR_REQ_GB_OFFSET 17
-static int pn533_init_target_complete(struct pn533 *dev, void *arg,
-				      u8 *params, int params_len)
+static int pn533_init_target_complete(struct pn533 *dev, u8 *params, int params_len)
 {
 	struct pn533_cmd_init_target_response *resp;
 	u8 frame, comm_mode = NFC_COMM_PASSIVE, *gb;
@@ -1403,9 +1405,9 @@
 	if (cur_mod->len == 0) {
 		del_timer(&dev->listen_timer);
 
-		return pn533_init_target_complete(dev, arg, params, params_len);
+		return pn533_init_target_complete(dev, params, params_len);
 	} else {
-		rc = pn533_start_poll_complete(dev, arg, params, params_len);
+		rc = pn533_start_poll_complete(dev, params, params_len);
 		if (!rc)
 			return rc;
 	}
@@ -2376,9 +2378,9 @@
 		goto error;
 	}
 
-	dev->in_frame = kmalloc(dev->in_maxlen, GFP_KERNEL);
+	dev->in_frame = kmalloc(PN533_NORMAL_FRAME_MAX_LEN, GFP_KERNEL);
 	dev->in_urb = usb_alloc_urb(0, GFP_KERNEL);
-	dev->out_frame = kmalloc(dev->out_maxlen, GFP_KERNEL);
+	dev->out_frame = kmalloc(PN533_NORMAL_FRAME_MAX_LEN, GFP_KERNEL);
 	dev->out_urb = usb_alloc_urb(0, GFP_KERNEL);
 
 	if (!dev->in_frame || !dev->out_frame ||
diff --git a/drivers/nfc/pn544/Makefile b/drivers/nfc/pn544/Makefile
new file mode 100644
index 0000000..72573388
--- /dev/null
+++ b/drivers/nfc/pn544/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for PN544 HCI based NFC driver
+#
+
+obj-$(CONFIG_PN544_HCI_NFC)	+= pn544_i2c.o
+
+pn544_i2c-y		:= pn544.o i2c.o
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
new file mode 100644
index 0000000..7da9071
--- /dev/null
+++ b/drivers/nfc/pn544/i2c.c
@@ -0,0 +1,500 @@
+/*
+ * I2C Link Layer for PN544 HCI based Driver
+ *
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/crc-ccitt.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <linux/platform_data/pn544.h>
+
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "pn544.h"
+
+#define PN544_I2C_FRAME_HEADROOM 1
+#define PN544_I2C_FRAME_TAILROOM 2
+
+/* framing in HCI mode */
+#define PN544_HCI_I2C_LLC_LEN		1
+#define PN544_HCI_I2C_LLC_CRC		2
+#define PN544_HCI_I2C_LLC_LEN_CRC	(PN544_HCI_I2C_LLC_LEN + \
+					 PN544_HCI_I2C_LLC_CRC)
+#define PN544_HCI_I2C_LLC_MIN_SIZE	(1 + PN544_HCI_I2C_LLC_LEN_CRC)
+#define PN544_HCI_I2C_LLC_MAX_PAYLOAD	29
+#define PN544_HCI_I2C_LLC_MAX_SIZE	(PN544_HCI_I2C_LLC_LEN_CRC + 1 + \
+					 PN544_HCI_I2C_LLC_MAX_PAYLOAD)
+
+static struct i2c_device_id pn544_hci_i2c_id_table[] = {
+	{"pn544", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
+
+#define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c"
+
+struct pn544_i2c_phy {
+	struct i2c_client *i2c_dev;
+	struct nfc_hci_dev *hdev;
+
+	unsigned int gpio_en;
+	unsigned int gpio_irq;
+	unsigned int gpio_fw;
+	unsigned int en_polarity;
+
+	int powered;
+
+	int hard_fault;		/*
+				 * < 0 if hardware error occured (e.g. i2c err)
+				 * and prevents normal operation.
+				 */
+};
+
+#define I2C_DUMP_SKB(info, skb)					\
+do {								\
+	pr_debug("%s:\n", info);				\
+	print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET,	\
+		       16, 1, (skb)->data, (skb)->len, 0);	\
+} while (0)
+
+static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
+{
+	int polarity, retry, ret;
+	char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
+	int count = sizeof(rset_cmd);
+
+	pr_info(DRIVER_DESC ": %s\n", __func__);
+	dev_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
+
+	/* Disable fw download */
+	gpio_set_value(phy->gpio_fw, 0);
+
+	for (polarity = 0; polarity < 2; polarity++) {
+		phy->en_polarity = polarity;
+		retry = 3;
+		while (retry--) {
+			/* power off */
+			gpio_set_value(phy->gpio_en, !phy->en_polarity);
+			usleep_range(10000, 15000);
+
+			/* power on */
+			gpio_set_value(phy->gpio_en, phy->en_polarity);
+			usleep_range(10000, 15000);
+
+			/* send reset */
+			dev_dbg(&phy->i2c_dev->dev, "Sending reset cmd\n");
+			ret = i2c_master_send(phy->i2c_dev, rset_cmd, count);
+			if (ret == count) {
+				dev_info(&phy->i2c_dev->dev,
+					 "nfc_en polarity : active %s\n",
+					 (polarity == 0 ? "low" : "high"));
+				goto out;
+			}
+		}
+	}
+
+	dev_err(&phy->i2c_dev->dev,
+		"Could not detect nfc_en polarity, fallback to active high\n");
+
+out:
+	gpio_set_value(phy->gpio_en, !phy->en_polarity);
+}
+
+static int pn544_hci_i2c_enable(void *phy_id)
+{
+	struct pn544_i2c_phy *phy = phy_id;
+
+	pr_info(DRIVER_DESC ": %s\n", __func__);
+
+	gpio_set_value(phy->gpio_fw, 0);
+	gpio_set_value(phy->gpio_en, phy->en_polarity);
+	usleep_range(10000, 15000);
+
+	phy->powered = 1;
+
+	return 0;
+}
+
+static void pn544_hci_i2c_disable(void *phy_id)
+{
+	struct pn544_i2c_phy *phy = phy_id;
+
+	pr_info(DRIVER_DESC ": %s\n", __func__);
+
+	gpio_set_value(phy->gpio_fw, 0);
+	gpio_set_value(phy->gpio_en, !phy->en_polarity);
+	usleep_range(10000, 15000);
+
+	gpio_set_value(phy->gpio_en, phy->en_polarity);
+	usleep_range(10000, 15000);
+
+	gpio_set_value(phy->gpio_en, !phy->en_polarity);
+	usleep_range(10000, 15000);
+
+	phy->powered = 0;
+}
+
+static void pn544_hci_i2c_add_len_crc(struct sk_buff *skb)
+{
+	u16 crc;
+	int len;
+
+	len = skb->len + 2;
+	*skb_push(skb, 1) = len;
+
+	crc = crc_ccitt(0xffff, skb->data, skb->len);
+	crc = ~crc;
+	*skb_put(skb, 1) = crc & 0xff;
+	*skb_put(skb, 1) = crc >> 8;
+}
+
+static void pn544_hci_i2c_remove_len_crc(struct sk_buff *skb)
+{
+	skb_pull(skb, PN544_I2C_FRAME_HEADROOM);
+	skb_trim(skb, PN544_I2C_FRAME_TAILROOM);
+}
+
+/*
+ * Writing a frame must not return the number of written bytes.
+ * It must return either zero for success, or <0 for error.
+ * In addition, it must not alter the skb
+ */
+static int pn544_hci_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+	int r;
+	struct pn544_i2c_phy *phy = phy_id;
+	struct i2c_client *client = phy->i2c_dev;
+
+	if (phy->hard_fault != 0)
+		return phy->hard_fault;
+
+	usleep_range(3000, 6000);
+
+	pn544_hci_i2c_add_len_crc(skb);
+
+	I2C_DUMP_SKB("i2c frame written", skb);
+
+	r = i2c_master_send(client, skb->data, skb->len);
+
+	if (r == -EREMOTEIO) {	/* Retry, chip was in standby */
+		usleep_range(6000, 10000);
+		r = i2c_master_send(client, skb->data, skb->len);
+	}
+
+	if (r >= 0) {
+		if (r != skb->len)
+			r = -EREMOTEIO;
+		else
+			r = 0;
+	}
+
+	pn544_hci_i2c_remove_len_crc(skb);
+
+	return r;
+}
+
+static int check_crc(u8 *buf, int buflen)
+{
+	int len;
+	u16 crc;
+
+	len = buf[0] + 1;
+	crc = crc_ccitt(0xffff, buf, len - 2);
+	crc = ~crc;
+
+	if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) {
+		pr_err(PN544_HCI_I2C_DRIVER_NAME
+		       ": CRC error 0x%x != 0x%x 0x%x\n",
+		       crc, buf[len - 1], buf[len - 2]);
+
+		pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
+		print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+			       16, 2, buf, buflen, false);
+		return -EPERM;
+	}
+	return 0;
+}
+
+/*
+ * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees
+ * that i2c bus will be flushed and that next read will start on a new frame.
+ * returned skb contains only LLC header and payload.
+ * returns:
+ * -EREMOTEIO : i2c read error (fatal)
+ * -EBADMSG : frame was incorrect and discarded
+ * -ENOMEM : cannot allocate skb, frame dropped
+ */
+static int pn544_hci_i2c_read(struct pn544_i2c_phy *phy, struct sk_buff **skb)
+{
+	int r;
+	u8 len;
+	u8 tmp[PN544_HCI_I2C_LLC_MAX_SIZE - 1];
+	struct i2c_client *client = phy->i2c_dev;
+
+	r = i2c_master_recv(client, &len, 1);
+	if (r != 1) {
+		dev_err(&client->dev, "cannot read len byte\n");
+		return -EREMOTEIO;
+	}
+
+	if ((len < (PN544_HCI_I2C_LLC_MIN_SIZE - 1)) ||
+	    (len > (PN544_HCI_I2C_LLC_MAX_SIZE - 1))) {
+		dev_err(&client->dev, "invalid len byte\n");
+		r = -EBADMSG;
+		goto flush;
+	}
+
+	*skb = alloc_skb(1 + len, GFP_KERNEL);
+	if (*skb == NULL) {
+		r = -ENOMEM;
+		goto flush;
+	}
+
+	*skb_put(*skb, 1) = len;
+
+	r = i2c_master_recv(client, skb_put(*skb, len), len);
+	if (r != len) {
+		kfree_skb(*skb);
+		return -EREMOTEIO;
+	}
+
+	I2C_DUMP_SKB("i2c frame read", *skb);
+
+	r = check_crc((*skb)->data, (*skb)->len);
+	if (r != 0) {
+		kfree_skb(*skb);
+		r = -EBADMSG;
+		goto flush;
+	}
+
+	skb_pull(*skb, 1);
+	skb_trim(*skb, (*skb)->len - 2);
+
+	usleep_range(3000, 6000);
+
+	return 0;
+
+flush:
+	if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0)
+		r = -EREMOTEIO;
+
+	usleep_range(3000, 6000);
+
+	return r;
+}
+
+/*
+ * Reads an shdlc frame from the chip. This is not as straightforward as it
+ * seems. There are cases where we could loose the frame start synchronization.
+ * The frame format is len-data-crc, and corruption can occur anywhere while
+ * transiting on i2c bus, such that we could read an invalid len.
+ * In order to recover synchronization with the next frame, we must be sure
+ * to read the real amount of data without using the len byte. We do this by
+ * assuming the following:
+ * - the chip will always present only one single complete frame on the bus
+ *   before triggering the interrupt
+ * - the chip will not present a new frame until we have completely read
+ *   the previous one (or until we have handled the interrupt).
+ * The tricky case is when we read a corrupted len that is less than the real
+ * len. We must detect this here in order to determine that we need to flush
+ * the bus. This is the reason why we check the crc here.
+ */
+static irqreturn_t pn544_hci_i2c_irq_thread_fn(int irq, void *phy_id)
+{
+	struct pn544_i2c_phy *phy = phy_id;
+	struct i2c_client *client;
+	struct sk_buff *skb = NULL;
+	int r;
+
+	if (!phy || irq != phy->i2c_dev->irq) {
+		WARN_ON_ONCE(1);
+		return IRQ_NONE;
+	}
+
+	client = phy->i2c_dev;
+	dev_dbg(&client->dev, "IRQ\n");
+
+	if (phy->hard_fault != 0)
+		return IRQ_HANDLED;
+
+	r = pn544_hci_i2c_read(phy, &skb);
+	if (r == -EREMOTEIO) {
+		phy->hard_fault = r;
+
+		nfc_hci_recv_frame(phy->hdev, NULL);
+
+		return IRQ_HANDLED;
+	} else if ((r == -ENOMEM) || (r == -EBADMSG)) {
+		return IRQ_HANDLED;
+	}
+
+	nfc_hci_recv_frame(phy->hdev, skb);
+
+	return IRQ_HANDLED;
+}
+
+static struct nfc_phy_ops i2c_phy_ops = {
+	.write = pn544_hci_i2c_write,
+	.enable = pn544_hci_i2c_enable,
+	.disable = pn544_hci_i2c_disable,
+};
+
+static int __devinit pn544_hci_i2c_probe(struct i2c_client *client,
+				     const struct i2c_device_id *id)
+{
+	struct pn544_i2c_phy *phy;
+	struct pn544_nfc_platform_data *pdata;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+	dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "Need I2C_FUNC_I2C\n");
+		return -ENODEV;
+	}
+
+	phy = kzalloc(sizeof(struct pn544_i2c_phy), GFP_KERNEL);
+	if (!phy) {
+		dev_err(&client->dev,
+			"Cannot allocate memory for pn544 i2c phy.\n");
+		r = -ENOMEM;
+		goto err_phy_alloc;
+	}
+
+	phy->i2c_dev = client;
+	i2c_set_clientdata(client, phy);
+
+	pdata = client->dev.platform_data;
+	if (pdata == NULL) {
+		dev_err(&client->dev, "No platform data\n");
+		r = -EINVAL;
+		goto err_pdata;
+	}
+
+	if (pdata->request_resources == NULL) {
+		dev_err(&client->dev, "request_resources() missing\n");
+		r = -EINVAL;
+		goto err_pdata;
+	}
+
+	r = pdata->request_resources(client);
+	if (r) {
+		dev_err(&client->dev, "Cannot get platform resources\n");
+		goto err_pdata;
+	}
+
+	phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
+	phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
+	phy->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
+
+	pn544_hci_i2c_platform_init(phy);
+
+	r = request_threaded_irq(client->irq, NULL, pn544_hci_i2c_irq_thread_fn,
+				 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+				 PN544_HCI_I2C_DRIVER_NAME, phy);
+	if (r < 0) {
+		dev_err(&client->dev, "Unable to register IRQ handler\n");
+		goto err_rti;
+	}
+
+	r = pn544_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
+			    PN544_I2C_FRAME_HEADROOM, PN544_I2C_FRAME_TAILROOM,
+			    PN544_HCI_I2C_LLC_MAX_PAYLOAD, &phy->hdev);
+	if (r < 0)
+		goto err_hci;
+
+	return 0;
+
+err_hci:
+	free_irq(client->irq, phy);
+
+err_rti:
+	if (pdata->free_resources != NULL)
+		pdata->free_resources();
+
+err_pdata:
+	kfree(phy);
+
+err_phy_alloc:
+	return r;
+}
+
+static __devexit int pn544_hci_i2c_remove(struct i2c_client *client)
+{
+	struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
+	struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+
+	pn544_hci_remove(phy->hdev);
+
+	if (phy->powered)
+		pn544_hci_i2c_disable(phy);
+
+	free_irq(client->irq, phy);
+	if (pdata->free_resources)
+		pdata->free_resources();
+
+	kfree(phy);
+
+	return 0;
+}
+
+static struct i2c_driver pn544_hci_i2c_driver = {
+	.driver = {
+		   .name = PN544_HCI_I2C_DRIVER_NAME,
+		  },
+	.probe = pn544_hci_i2c_probe,
+	.id_table = pn544_hci_i2c_id_table,
+	.remove = __devexit_p(pn544_hci_i2c_remove),
+};
+
+static int __init pn544_hci_i2c_init(void)
+{
+	int r;
+
+	pr_debug(DRIVER_DESC ": %s\n", __func__);
+
+	r = i2c_add_driver(&pn544_hci_i2c_driver);
+	if (r) {
+		pr_err(PN544_HCI_I2C_DRIVER_NAME
+		       ": driver registration failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+static void __exit pn544_hci_i2c_exit(void)
+{
+	i2c_del_driver(&pn544_hci_i2c_driver);
+}
+
+module_init(pn544_hci_i2c_init);
+module_exit(pn544_hci_i2c_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
new file mode 100644
index 0000000..cc666de
--- /dev/null
+++ b/drivers/nfc/pn544/pn544.c
@@ -0,0 +1,862 @@
+/*
+ * HCI based Driver for NXP PN544 NFC Chip
+ *
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <linux/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "pn544.h"
+
+/* Timing restrictions (ms) */
+#define PN544_HCI_RESETVEN_TIME		30
+
+#define HCI_MODE 0
+#define FW_MODE 1
+
+enum pn544_state {
+	PN544_ST_COLD,
+	PN544_ST_FW_READY,
+	PN544_ST_READY,
+};
+
+#define FULL_VERSION_LEN 11
+
+/* Proprietary commands */
+#define PN544_WRITE		0x3f
+
+/* Proprietary gates, events, commands and registers */
+
+/* NFC_HCI_RF_READER_A_GATE additional registers and commands */
+#define PN544_RF_READER_A_AUTO_ACTIVATION			0x10
+#define PN544_RF_READER_A_CMD_CONTINUE_ACTIVATION		0x12
+#define PN544_MIFARE_CMD					0x21
+
+/* Commands that apply to all RF readers */
+#define PN544_RF_READER_CMD_PRESENCE_CHECK	0x30
+#define PN544_RF_READER_CMD_ACTIVATE_NEXT	0x32
+
+/* NFC_HCI_ID_MGMT_GATE additional registers */
+#define PN544_ID_MGMT_FULL_VERSION_SW		0x10
+
+#define PN544_RF_READER_ISO15693_GATE		0x12
+
+#define PN544_RF_READER_F_GATE			0x14
+#define PN544_FELICA_ID				0x04
+#define PN544_FELICA_RAW			0x20
+
+#define PN544_RF_READER_JEWEL_GATE		0x15
+#define PN544_JEWEL_RAW_CMD			0x23
+
+#define PN544_RF_READER_NFCIP1_INITIATOR_GATE	0x30
+#define PN544_RF_READER_NFCIP1_TARGET_GATE	0x31
+
+#define PN544_SYS_MGMT_GATE			0x90
+#define PN544_SYS_MGMT_INFO_NOTIFICATION	0x02
+
+#define PN544_POLLING_LOOP_MGMT_GATE		0x94
+#define PN544_DEP_MODE				0x01
+#define PN544_DEP_ATR_REQ			0x02
+#define PN544_DEP_ATR_RES			0x03
+#define PN544_DEP_MERGE				0x0D
+#define PN544_PL_RDPHASES			0x06
+#define PN544_PL_EMULATION			0x07
+#define PN544_PL_NFCT_DEACTIVATED		0x09
+
+#define PN544_SWP_MGMT_GATE			0xA0
+
+#define PN544_NFC_WI_MGMT_GATE			0xA1
+
+#define PN544_HCI_EVT_SND_DATA			0x01
+#define PN544_HCI_EVT_ACTIVATED			0x02
+#define PN544_HCI_EVT_DEACTIVATED		0x03
+#define PN544_HCI_EVT_RCV_DATA			0x04
+#define PN544_HCI_EVT_CONTINUE_MI		0x05
+
+#define PN544_HCI_CMD_ATTREQUEST		0x12
+#define PN544_HCI_CMD_CONTINUE_ACTIVATION	0x13
+
+static struct nfc_hci_gate pn544_gates[] = {
+	{NFC_HCI_ADMIN_GATE, NFC_HCI_INVALID_PIPE},
+	{NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
+	{NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+	{NFC_HCI_LINK_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+	{NFC_HCI_RF_READER_B_GATE, NFC_HCI_INVALID_PIPE},
+	{NFC_HCI_RF_READER_A_GATE, NFC_HCI_INVALID_PIPE},
+	{PN544_SYS_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+	{PN544_SWP_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+	{PN544_POLLING_LOOP_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+	{PN544_NFC_WI_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+	{PN544_RF_READER_F_GATE, NFC_HCI_INVALID_PIPE},
+	{PN544_RF_READER_JEWEL_GATE, NFC_HCI_INVALID_PIPE},
+	{PN544_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE},
+	{PN544_RF_READER_NFCIP1_INITIATOR_GATE, NFC_HCI_INVALID_PIPE},
+	{PN544_RF_READER_NFCIP1_TARGET_GATE, NFC_HCI_INVALID_PIPE}
+};
+
+/* Largest headroom needed for outgoing custom commands */
+#define PN544_CMDS_HEADROOM	2
+
+struct pn544_hci_info {
+	struct nfc_phy_ops *phy_ops;
+	void *phy_id;
+
+	struct nfc_hci_dev *hdev;
+
+	enum pn544_state state;
+
+	struct mutex info_lock;
+
+	int async_cb_type;
+	data_exchange_cb_t async_cb;
+	void *async_cb_context;
+};
+
+static int pn544_hci_open(struct nfc_hci_dev *hdev)
+{
+	struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
+	int r = 0;
+
+	mutex_lock(&info->info_lock);
+
+	if (info->state != PN544_ST_COLD) {
+		r = -EBUSY;
+		goto out;
+	}
+
+	r = info->phy_ops->enable(info->phy_id);
+
+	if (r == 0)
+		info->state = PN544_ST_READY;
+
+out:
+	mutex_unlock(&info->info_lock);
+	return r;
+}
+
+static void pn544_hci_close(struct nfc_hci_dev *hdev)
+{
+	struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+	mutex_lock(&info->info_lock);
+
+	if (info->state == PN544_ST_COLD)
+		goto out;
+
+	info->phy_ops->disable(info->phy_id);
+
+	info->state = PN544_ST_COLD;
+
+out:
+	mutex_unlock(&info->info_lock);
+}
+
+static int pn544_hci_ready(struct nfc_hci_dev *hdev)
+{
+	struct sk_buff *skb;
+	static struct hw_config {
+		u8 adr[2];
+		u8 value;
+	} hw_config[] = {
+		{{0x9f, 0x9a}, 0x00},
+
+		{{0x98, 0x10}, 0xbc},
+
+		{{0x9e, 0x71}, 0x00},
+
+		{{0x98, 0x09}, 0x00},
+
+		{{0x9e, 0xb4}, 0x00},
+
+		{{0x9e, 0xd9}, 0xff},
+		{{0x9e, 0xda}, 0xff},
+		{{0x9e, 0xdb}, 0x23},
+		{{0x9e, 0xdc}, 0x21},
+		{{0x9e, 0xdd}, 0x22},
+		{{0x9e, 0xde}, 0x24},
+
+		{{0x9c, 0x01}, 0x08},
+
+		{{0x9e, 0xaa}, 0x01},
+
+		{{0x9b, 0xd1}, 0x0d},
+		{{0x9b, 0xd2}, 0x24},
+		{{0x9b, 0xd3}, 0x0a},
+		{{0x9b, 0xd4}, 0x22},
+		{{0x9b, 0xd5}, 0x08},
+		{{0x9b, 0xd6}, 0x1e},
+		{{0x9b, 0xdd}, 0x1c},
+
+		{{0x9b, 0x84}, 0x13},
+		{{0x99, 0x81}, 0x7f},
+		{{0x99, 0x31}, 0x70},
+
+		{{0x98, 0x00}, 0x3f},
+
+		{{0x9f, 0x09}, 0x00},
+
+		{{0x9f, 0x0a}, 0x05},
+
+		{{0x9e, 0xd1}, 0xa1},
+		{{0x99, 0x23}, 0x00},
+
+		{{0x9e, 0x74}, 0x80},
+
+		{{0x9f, 0x28}, 0x10},
+
+		{{0x9f, 0x35}, 0x14},
+
+		{{0x9f, 0x36}, 0x60},
+
+		{{0x9c, 0x31}, 0x00},
+
+		{{0x9c, 0x32}, 0xc8},
+
+		{{0x9c, 0x19}, 0x40},
+
+		{{0x9c, 0x1a}, 0x40},
+
+		{{0x9c, 0x0c}, 0x00},
+
+		{{0x9c, 0x0d}, 0x00},
+
+		{{0x9c, 0x12}, 0x00},
+
+		{{0x9c, 0x13}, 0x00},
+
+		{{0x98, 0xa2}, 0x0e},
+
+		{{0x98, 0x93}, 0x40},
+
+		{{0x98, 0x7d}, 0x02},
+		{{0x98, 0x7e}, 0x00},
+		{{0x9f, 0xc8}, 0x01},
+	};
+	struct hw_config *p = hw_config;
+	int count = ARRAY_SIZE(hw_config);
+	struct sk_buff *res_skb;
+	u8 param[4];
+	int r;
+
+	param[0] = 0;
+	while (count--) {
+		param[1] = p->adr[0];
+		param[2] = p->adr[1];
+		param[3] = p->value;
+
+		r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE, PN544_WRITE,
+				     param, 4, &res_skb);
+		if (r < 0)
+			return r;
+
+		if (res_skb->len != 1) {
+			kfree_skb(res_skb);
+			return -EPROTO;
+		}
+
+		if (res_skb->data[0] != p->value) {
+			kfree_skb(res_skb);
+			return -EIO;
+		}
+
+		kfree_skb(res_skb);
+
+		p++;
+	}
+
+	param[0] = NFC_HCI_UICC_HOST_ID;
+	r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
+			      NFC_HCI_ADMIN_WHITELIST, param, 1);
+	if (r < 0)
+		return r;
+
+	param[0] = 0x3d;
+	r = nfc_hci_set_param(hdev, PN544_SYS_MGMT_GATE,
+			      PN544_SYS_MGMT_INFO_NOTIFICATION, param, 1);
+	if (r < 0)
+		return r;
+
+	param[0] = 0x0;
+	r = nfc_hci_set_param(hdev, NFC_HCI_RF_READER_A_GATE,
+			      PN544_RF_READER_A_AUTO_ACTIVATION, param, 1);
+	if (r < 0)
+		return r;
+
+	r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+			       NFC_HCI_EVT_END_OPERATION, NULL, 0);
+	if (r < 0)
+		return r;
+
+	param[0] = 0x1;
+	r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+			      PN544_PL_NFCT_DEACTIVATED, param, 1);
+	if (r < 0)
+		return r;
+
+	param[0] = 0x0;
+	r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+			      PN544_PL_RDPHASES, param, 1);
+	if (r < 0)
+		return r;
+
+	r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
+			      PN544_ID_MGMT_FULL_VERSION_SW, &skb);
+	if (r < 0)
+		return r;
+
+	if (skb->len != FULL_VERSION_LEN) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	print_hex_dump(KERN_DEBUG, "FULL VERSION SOFTWARE INFO: ",
+		       DUMP_PREFIX_NONE, 16, 1,
+		       skb->data, FULL_VERSION_LEN, false);
+
+	kfree_skb(skb);
+
+	return 0;
+}
+
+static int pn544_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+	struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+	return info->phy_ops->write(info->phy_id, skb);
+}
+
+static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
+				u32 im_protocols, u32 tm_protocols)
+{
+	u8 phases = 0;
+	int r;
+	u8 duration[2];
+	u8 activated;
+	u8 i_mode = 0x3f; /* Enable all supported modes */
+	u8 t_mode = 0x0f;
+	u8 t_merge = 0x01; /* Enable merge by default */
+
+	pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n",
+		__func__, im_protocols, tm_protocols);
+
+	r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+			       NFC_HCI_EVT_END_OPERATION, NULL, 0);
+	if (r < 0)
+		return r;
+
+	duration[0] = 0x18;
+	duration[1] = 0x6a;
+	r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+			      PN544_PL_EMULATION, duration, 2);
+	if (r < 0)
+		return r;
+
+	activated = 0;
+	r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+			      PN544_PL_NFCT_DEACTIVATED, &activated, 1);
+	if (r < 0)
+		return r;
+
+	if (im_protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK |
+			 NFC_PROTO_JEWEL_MASK))
+		phases |= 1;		/* Type A */
+	if (im_protocols & NFC_PROTO_FELICA_MASK) {
+		phases |= (1 << 2);	/* Type F 212 */
+		phases |= (1 << 3);	/* Type F 424 */
+	}
+
+	phases |= (1 << 5);		/* NFC active */
+
+	r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+			      PN544_PL_RDPHASES, &phases, 1);
+	if (r < 0)
+		return r;
+
+	if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) {
+		hdev->gb = nfc_get_local_general_bytes(hdev->ndev,
+							&hdev->gb_len);
+		pr_debug("generate local bytes %p", hdev->gb);
+		if (hdev->gb == NULL || hdev->gb_len == 0) {
+			im_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
+			tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
+		}
+	}
+
+	if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+		r = nfc_hci_send_event(hdev,
+				PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+				NFC_HCI_EVT_END_OPERATION, NULL, 0);
+		if (r < 0)
+			return r;
+
+		r = nfc_hci_set_param(hdev,
+				PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+				PN544_DEP_MODE, &i_mode, 1);
+		if (r < 0)
+			return r;
+
+		r = nfc_hci_set_param(hdev,
+				PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+				PN544_DEP_ATR_REQ, hdev->gb, hdev->gb_len);
+		if (r < 0)
+			return r;
+
+		r = nfc_hci_send_event(hdev,
+				PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+				NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
+		if (r < 0)
+			nfc_hci_send_event(hdev,
+					PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+					NFC_HCI_EVT_END_OPERATION, NULL, 0);
+	}
+
+	if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) {
+		r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
+				PN544_DEP_MODE, &t_mode, 1);
+		if (r < 0)
+			return r;
+
+		r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
+				PN544_DEP_ATR_RES, hdev->gb, hdev->gb_len);
+		if (r < 0)
+			return r;
+
+		r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
+				PN544_DEP_MERGE, &t_merge, 1);
+		if (r < 0)
+			return r;
+	}
+
+	r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+			       NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
+	if (r < 0)
+		nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+				   NFC_HCI_EVT_END_OPERATION, NULL, 0);
+
+	return r;
+}
+
+static int pn544_hci_dep_link_up(struct nfc_hci_dev *hdev,
+				struct nfc_target *target, u8 comm_mode,
+				u8 *gb, size_t gb_len)
+{
+	struct sk_buff *rgb_skb = NULL;
+	int r;
+
+	r = nfc_hci_get_param(hdev, target->hci_reader_gate,
+				PN544_DEP_ATR_RES, &rgb_skb);
+	if (r < 0)
+		return r;
+
+	if (rgb_skb->len == 0 || rgb_skb->len > NFC_GB_MAXSIZE) {
+		r = -EPROTO;
+		goto exit;
+	}
+	print_hex_dump(KERN_DEBUG, "remote gb: ", DUMP_PREFIX_OFFSET,
+			16, 1, rgb_skb->data, rgb_skb->len, true);
+
+	r = nfc_set_remote_general_bytes(hdev->ndev, rgb_skb->data,
+						rgb_skb->len);
+
+	if (r == 0)
+		r = nfc_dep_link_is_up(hdev->ndev, target->idx, comm_mode,
+					NFC_RF_INITIATOR);
+exit:
+	kfree_skb(rgb_skb);
+	return r;
+}
+
+static int pn544_hci_dep_link_down(struct nfc_hci_dev *hdev)
+{
+
+	return nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+					NFC_HCI_EVT_END_OPERATION, NULL, 0);
+}
+
+static int pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
+				      struct nfc_target *target)
+{
+	switch (gate) {
+	case PN544_RF_READER_F_GATE:
+		target->supported_protocols = NFC_PROTO_FELICA_MASK;
+		break;
+	case PN544_RF_READER_JEWEL_GATE:
+		target->supported_protocols = NFC_PROTO_JEWEL_MASK;
+		target->sens_res = 0x0c00;
+		break;
+	case PN544_RF_READER_NFCIP1_INITIATOR_GATE:
+		target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+		break;
+	default:
+		return -EPROTO;
+	}
+
+	return 0;
+}
+
+static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
+						u8 gate,
+						struct nfc_target *target)
+{
+	struct sk_buff *uid_skb;
+	int r = 0;
+
+	if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE)
+		return r;
+
+	if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) {
+		r = nfc_hci_send_cmd(hdev,
+			PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+			PN544_HCI_CMD_CONTINUE_ACTIVATION, NULL, 0, NULL);
+		if (r < 0)
+			return r;
+
+		target->hci_reader_gate = PN544_RF_READER_NFCIP1_INITIATOR_GATE;
+	} else if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
+		if (target->nfcid1_len != 4 && target->nfcid1_len != 7 &&
+		    target->nfcid1_len != 10)
+			return -EPROTO;
+
+		r = nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
+				     PN544_RF_READER_CMD_ACTIVATE_NEXT,
+				     target->nfcid1, target->nfcid1_len, NULL);
+	} else if (target->supported_protocols & NFC_PROTO_FELICA_MASK) {
+		r = nfc_hci_get_param(hdev, PN544_RF_READER_F_GATE,
+				      PN544_FELICA_ID, &uid_skb);
+		if (r < 0)
+			return r;
+
+		if (uid_skb->len != 8) {
+			kfree_skb(uid_skb);
+			return -EPROTO;
+		}
+
+		r = nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE,
+				     PN544_RF_READER_CMD_ACTIVATE_NEXT,
+				     uid_skb->data, uid_skb->len, NULL);
+		kfree_skb(uid_skb);
+
+		r = nfc_hci_send_cmd(hdev,
+					PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+					PN544_HCI_CMD_CONTINUE_ACTIVATION,
+					NULL, 0, NULL);
+		if (r < 0)
+			return r;
+
+		target->hci_reader_gate = PN544_RF_READER_NFCIP1_INITIATOR_GATE;
+		target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+	} else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) {
+		/*
+		 * TODO: maybe other ISO 14443 require some kind of continue
+		 * activation, but for now we've seen only this one below.
+		 */
+		if (target->sens_res == 0x4403)	/* Type 4 Mifare DESFire */
+			r = nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
+			      PN544_RF_READER_A_CMD_CONTINUE_ACTIVATION,
+			      NULL, 0, NULL);
+	}
+
+	return r;
+}
+
+#define PN544_CB_TYPE_READER_F 1
+
+static void pn544_hci_data_exchange_cb(void *context, struct sk_buff *skb,
+				       int err)
+{
+	struct pn544_hci_info *info = context;
+
+	switch (info->async_cb_type) {
+	case PN544_CB_TYPE_READER_F:
+		if (err == 0)
+			skb_pull(skb, 1);
+		info->async_cb(info->async_cb_context, skb, err);
+		break;
+	default:
+		if (err == 0)
+			kfree_skb(skb);
+		break;
+	}
+}
+
+#define MIFARE_CMD_AUTH_KEY_A	0x60
+#define MIFARE_CMD_AUTH_KEY_B	0x61
+#define MIFARE_CMD_HEADER	2
+#define MIFARE_UID_LEN		4
+#define MIFARE_KEY_LEN		6
+#define MIFARE_CMD_LEN		12
+/*
+ * Returns:
+ * <= 0: driver handled the data exchange
+ *    1: driver doesn't especially handle, please do standard processing
+ */
+static int pn544_hci_im_transceive(struct nfc_hci_dev *hdev,
+				   struct nfc_target *target,
+				   struct sk_buff *skb, data_exchange_cb_t cb,
+				   void *cb_context)
+{
+	struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+	pr_info(DRIVER_DESC ": %s for gate=%d\n", __func__,
+		target->hci_reader_gate);
+
+	switch (target->hci_reader_gate) {
+	case NFC_HCI_RF_READER_A_GATE:
+		if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
+			/*
+			 * It seems that pn544 is inverting key and UID for
+			 * MIFARE authentication commands.
+			 */
+			if (skb->len == MIFARE_CMD_LEN &&
+			    (skb->data[0] == MIFARE_CMD_AUTH_KEY_A ||
+			     skb->data[0] == MIFARE_CMD_AUTH_KEY_B)) {
+				u8 uid[MIFARE_UID_LEN];
+				u8 *data = skb->data + MIFARE_CMD_HEADER;
+
+				memcpy(uid, data + MIFARE_KEY_LEN,
+				       MIFARE_UID_LEN);
+				memmove(data + MIFARE_UID_LEN, data,
+					MIFARE_KEY_LEN);
+				memcpy(data, uid, MIFARE_UID_LEN);
+			}
+
+			return nfc_hci_send_cmd_async(hdev,
+						      target->hci_reader_gate,
+						      PN544_MIFARE_CMD,
+						      skb->data, skb->len,
+						      cb, cb_context);
+		} else
+			return 1;
+	case PN544_RF_READER_F_GATE:
+		*skb_push(skb, 1) = 0;
+		*skb_push(skb, 1) = 0;
+
+		info->async_cb_type = PN544_CB_TYPE_READER_F;
+		info->async_cb = cb;
+		info->async_cb_context = cb_context;
+
+		return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+					      PN544_FELICA_RAW, skb->data,
+					      skb->len,
+					      pn544_hci_data_exchange_cb, info);
+	case PN544_RF_READER_JEWEL_GATE:
+		return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+					      PN544_JEWEL_RAW_CMD, skb->data,
+					      skb->len, cb, cb_context);
+	case PN544_RF_READER_NFCIP1_INITIATOR_GATE:
+		*skb_push(skb, 1) = 0;
+
+		return nfc_hci_send_event(hdev, target->hci_reader_gate,
+					PN544_HCI_EVT_SND_DATA, skb->data,
+					skb->len);
+	default:
+		return 1;
+	}
+}
+
+static int pn544_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+	/* Set default false for multiple information chaining */
+	*skb_push(skb, 1) = 0;
+
+	return nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
+				PN544_HCI_EVT_SND_DATA, skb->data, skb->len);
+}
+
+static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
+				   struct nfc_target *target)
+{
+	pr_debug("supported protocol %d", target->supported_protocols);
+	if (target->supported_protocols & (NFC_PROTO_ISO14443_MASK |
+					NFC_PROTO_ISO14443_B_MASK)) {
+		return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+					PN544_RF_READER_CMD_PRESENCE_CHECK,
+					NULL, 0, NULL);
+	} else if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
+		if (target->nfcid1_len != 4 && target->nfcid1_len != 7 &&
+		    target->nfcid1_len != 10)
+			return -EOPNOTSUPP;
+
+		 return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
+				     PN544_RF_READER_CMD_ACTIVATE_NEXT,
+				     target->nfcid1, target->nfcid1_len, NULL);
+	} else if (target->supported_protocols & NFC_PROTO_JEWEL_MASK) {
+		return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+					PN544_JEWEL_RAW_CMD, NULL, 0, NULL);
+	} else if (target->supported_protocols & NFC_PROTO_FELICA_MASK) {
+		return nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE,
+					PN544_FELICA_RAW, NULL, 0, NULL);
+	} else if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) {
+		return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+					PN544_HCI_CMD_ATTREQUEST,
+					NULL, 0, NULL);
+	}
+
+	return 0;
+}
+
+static void pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate,
+					u8 event, struct sk_buff *skb)
+{
+	struct sk_buff *rgb_skb = NULL;
+	int r = 0;
+
+	pr_debug("hci event %d", event);
+	switch (event) {
+	case PN544_HCI_EVT_ACTIVATED:
+		if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE)
+			nfc_hci_target_discovered(hdev, gate);
+		else if (gate == PN544_RF_READER_NFCIP1_TARGET_GATE) {
+			r = nfc_hci_get_param(hdev, gate, PN544_DEP_ATR_REQ,
+						&rgb_skb);
+
+			if (r < 0)
+				goto exit;
+
+			nfc_tm_activated(hdev->ndev, NFC_PROTO_NFC_DEP_MASK,
+					NFC_COMM_PASSIVE, rgb_skb->data,
+					rgb_skb->len);
+
+			kfree_skb(rgb_skb);
+		}
+
+		break;
+	case PN544_HCI_EVT_DEACTIVATED:
+		nfc_hci_send_event(hdev, gate,
+			NFC_HCI_EVT_END_OPERATION, NULL, 0);
+		break;
+	case PN544_HCI_EVT_RCV_DATA:
+		if (skb->len < 2) {
+			r = -EPROTO;
+			goto exit;
+		}
+
+		if (skb->data[0] != 0) {
+			pr_debug("data0 %d", skb->data[0]);
+			r = -EPROTO;
+			goto exit;
+		}
+
+		skb_pull(skb, 2);
+		nfc_tm_data_received(hdev->ndev, skb);
+
+		return;
+	default:
+		break;
+	}
+
+exit:
+	kfree_skb(skb);
+}
+
+static struct nfc_hci_ops pn544_hci_ops = {
+	.open = pn544_hci_open,
+	.close = pn544_hci_close,
+	.hci_ready = pn544_hci_ready,
+	.xmit = pn544_hci_xmit,
+	.start_poll = pn544_hci_start_poll,
+	.dep_link_up = pn544_hci_dep_link_up,
+	.dep_link_down = pn544_hci_dep_link_down,
+	.target_from_gate = pn544_hci_target_from_gate,
+	.complete_target_discovered = pn544_hci_complete_target_discovered,
+	.im_transceive = pn544_hci_im_transceive,
+	.tm_send = pn544_hci_tm_send,
+	.check_presence = pn544_hci_check_presence,
+	.event_received = pn544_hci_event_received,
+};
+
+int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
+		    int phy_headroom, int phy_tailroom, int phy_payload,
+		    struct nfc_hci_dev **hdev)
+{
+	struct pn544_hci_info *info;
+	u32 protocols;
+	struct nfc_hci_init_data init_data;
+	int r;
+
+	info = kzalloc(sizeof(struct pn544_hci_info), GFP_KERNEL);
+	if (!info) {
+		pr_err("Cannot allocate memory for pn544_hci_info.\n");
+		r = -ENOMEM;
+		goto err_info_alloc;
+	}
+
+	info->phy_ops = phy_ops;
+	info->phy_id = phy_id;
+	info->state = PN544_ST_COLD;
+	mutex_init(&info->info_lock);
+
+	init_data.gate_count = ARRAY_SIZE(pn544_gates);
+
+	memcpy(init_data.gates, pn544_gates, sizeof(pn544_gates));
+
+	/*
+	 * TODO: Session id must include the driver name + some bus addr
+	 * persistent info to discriminate 2 identical chips
+	 */
+	strcpy(init_data.session_id, "ID544HCI");
+
+	protocols = NFC_PROTO_JEWEL_MASK |
+		    NFC_PROTO_MIFARE_MASK |
+		    NFC_PROTO_FELICA_MASK |
+		    NFC_PROTO_ISO14443_MASK |
+		    NFC_PROTO_ISO14443_B_MASK |
+		    NFC_PROTO_NFC_DEP_MASK;
+
+	info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data,
+					     protocols, llc_name,
+					     phy_headroom + PN544_CMDS_HEADROOM,
+					     phy_tailroom, phy_payload);
+	if (!info->hdev) {
+		pr_err("Cannot allocate nfc hdev.\n");
+		r = -ENOMEM;
+		goto err_alloc_hdev;
+	}
+
+	nfc_hci_set_clientdata(info->hdev, info);
+
+	r = nfc_hci_register_device(info->hdev);
+	if (r)
+		goto err_regdev;
+
+	*hdev = info->hdev;
+
+	return 0;
+
+err_regdev:
+	nfc_hci_free_device(info->hdev);
+
+err_alloc_hdev:
+	kfree(info);
+
+err_info_alloc:
+	return r;
+}
+
+void pn544_hci_remove(struct nfc_hci_dev *hdev)
+{
+	struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+	nfc_hci_unregister_device(hdev);
+	nfc_hci_free_device(hdev);
+	kfree(info);
+}
diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h
new file mode 100644
index 0000000..f47c645
--- /dev/null
+++ b/drivers/nfc/pn544/pn544.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 - 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LOCAL_PN544_H_
+#define __LOCAL_PN544_H_
+
+#include <net/nfc/hci.h>
+
+#define DRIVER_DESC "HCI NFC driver for PN544"
+
+int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
+		    int phy_headroom, int phy_tailroom, int phy_payload,
+		    struct nfc_hci_dev **hdev);
+void pn544_hci_remove(struct nfc_hci_dev *hdev);
+
+#endif /* __LOCAL_PN544_H_ */
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
deleted file mode 100644
index c9c8570..0000000
--- a/drivers/nfc/pn544_hci.c
+++ /dev/null
@@ -1,1023 +0,0 @@
-/*
- * HCI based Driver for NXP PN544 NFC Chip
- *
- * Copyright (C) 2012  Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/crc-ccitt.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/miscdevice.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-
-#include <linux/nfc.h>
-#include <net/nfc/hci.h>
-#include <net/nfc/llc.h>
-
-#include <linux/nfc/pn544.h>
-
-#define DRIVER_DESC "HCI NFC driver for PN544"
-
-#define PN544_HCI_DRIVER_NAME "pn544_hci"
-
-/* Timing restrictions (ms) */
-#define PN544_HCI_RESETVEN_TIME		30
-
-static struct i2c_device_id pn544_hci_id_table[] = {
-	{"pn544", 0},
-	{}
-};
-
-MODULE_DEVICE_TABLE(i2c, pn544_hci_id_table);
-
-#define HCI_MODE 0
-#define FW_MODE 1
-
-/* framing in HCI mode */
-#define PN544_HCI_LLC_LEN		1
-#define PN544_HCI_LLC_CRC		2
-#define PN544_HCI_LLC_LEN_CRC		(PN544_HCI_LLC_LEN + PN544_HCI_LLC_CRC)
-#define PN544_HCI_LLC_MIN_SIZE		(1 + PN544_HCI_LLC_LEN_CRC)
-#define PN544_HCI_LLC_MAX_PAYLOAD	29
-#define PN544_HCI_LLC_MAX_SIZE		(PN544_HCI_LLC_LEN_CRC + 1 + \
-					 PN544_HCI_LLC_MAX_PAYLOAD)
-
-enum pn544_state {
-	PN544_ST_COLD,
-	PN544_ST_FW_READY,
-	PN544_ST_READY,
-};
-
-#define FULL_VERSION_LEN 11
-
-/* Proprietary commands */
-#define PN544_WRITE		0x3f
-
-/* Proprietary gates, events, commands and registers */
-
-/* NFC_HCI_RF_READER_A_GATE additional registers and commands */
-#define PN544_RF_READER_A_AUTO_ACTIVATION			0x10
-#define PN544_RF_READER_A_CMD_CONTINUE_ACTIVATION		0x12
-#define PN544_MIFARE_CMD					0x21
-
-/* Commands that apply to all RF readers */
-#define PN544_RF_READER_CMD_PRESENCE_CHECK	0x30
-#define PN544_RF_READER_CMD_ACTIVATE_NEXT	0x32
-
-/* NFC_HCI_ID_MGMT_GATE additional registers */
-#define PN544_ID_MGMT_FULL_VERSION_SW		0x10
-
-#define PN544_RF_READER_ISO15693_GATE		0x12
-
-#define PN544_RF_READER_F_GATE			0x14
-#define PN544_FELICA_ID				0x04
-#define PN544_FELICA_RAW			0x20
-
-#define PN544_RF_READER_JEWEL_GATE		0x15
-#define PN544_JEWEL_RAW_CMD			0x23
-
-#define PN544_RF_READER_NFCIP1_INITIATOR_GATE	0x30
-#define PN544_RF_READER_NFCIP1_TARGET_GATE	0x31
-
-#define PN544_SYS_MGMT_GATE			0x90
-#define PN544_SYS_MGMT_INFO_NOTIFICATION	0x02
-
-#define PN544_POLLING_LOOP_MGMT_GATE		0x94
-#define PN544_PL_RDPHASES			0x06
-#define PN544_PL_EMULATION			0x07
-#define PN544_PL_NFCT_DEACTIVATED		0x09
-
-#define PN544_SWP_MGMT_GATE			0xA0
-
-#define PN544_NFC_WI_MGMT_GATE			0xA1
-
-static struct nfc_hci_gate pn544_gates[] = {
-	{NFC_HCI_ADMIN_GATE, NFC_HCI_INVALID_PIPE},
-	{NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
-	{NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE},
-	{NFC_HCI_LINK_MGMT_GATE, NFC_HCI_INVALID_PIPE},
-	{NFC_HCI_RF_READER_B_GATE, NFC_HCI_INVALID_PIPE},
-	{NFC_HCI_RF_READER_A_GATE, NFC_HCI_INVALID_PIPE},
-	{PN544_SYS_MGMT_GATE, NFC_HCI_INVALID_PIPE},
-	{PN544_SWP_MGMT_GATE, NFC_HCI_INVALID_PIPE},
-	{PN544_POLLING_LOOP_MGMT_GATE, NFC_HCI_INVALID_PIPE},
-	{PN544_NFC_WI_MGMT_GATE, NFC_HCI_INVALID_PIPE},
-	{PN544_RF_READER_F_GATE, NFC_HCI_INVALID_PIPE},
-	{PN544_RF_READER_JEWEL_GATE, NFC_HCI_INVALID_PIPE},
-	{PN544_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE},
-	{PN544_RF_READER_NFCIP1_INITIATOR_GATE, NFC_HCI_INVALID_PIPE},
-	{PN544_RF_READER_NFCIP1_TARGET_GATE, NFC_HCI_INVALID_PIPE}
-};
-
-/* Largest headroom needed for outgoing custom commands */
-#define PN544_CMDS_HEADROOM	2
-#define PN544_FRAME_HEADROOM 1
-#define PN544_FRAME_TAILROOM 2
-
-struct pn544_hci_info {
-	struct i2c_client *i2c_dev;
-	struct nfc_hci_dev *hdev;
-
-	enum pn544_state state;
-
-	struct mutex info_lock;
-
-	unsigned int gpio_en;
-	unsigned int gpio_irq;
-	unsigned int gpio_fw;
-	unsigned int en_polarity;
-
-	int hard_fault;		/*
-				 * < 0 if hardware error occured (e.g. i2c err)
-				 * and prevents normal operation.
-				 */
-	int async_cb_type;
-	data_exchange_cb_t async_cb;
-	void *async_cb_context;
-};
-
-static void pn544_hci_platform_init(struct pn544_hci_info *info)
-{
-	int polarity, retry, ret;
-	char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
-	int count = sizeof(rset_cmd);
-
-	pr_info(DRIVER_DESC ": %s\n", __func__);
-	dev_info(&info->i2c_dev->dev, "Detecting nfc_en polarity\n");
-
-	/* Disable fw download */
-	gpio_set_value(info->gpio_fw, 0);
-
-	for (polarity = 0; polarity < 2; polarity++) {
-		info->en_polarity = polarity;
-		retry = 3;
-		while (retry--) {
-			/* power off */
-			gpio_set_value(info->gpio_en, !info->en_polarity);
-			usleep_range(10000, 15000);
-
-			/* power on */
-			gpio_set_value(info->gpio_en, info->en_polarity);
-			usleep_range(10000, 15000);
-
-			/* send reset */
-			dev_dbg(&info->i2c_dev->dev, "Sending reset cmd\n");
-			ret = i2c_master_send(info->i2c_dev, rset_cmd, count);
-			if (ret == count) {
-				dev_info(&info->i2c_dev->dev,
-					 "nfc_en polarity : active %s\n",
-					 (polarity == 0 ? "low" : "high"));
-				goto out;
-			}
-		}
-	}
-
-	dev_err(&info->i2c_dev->dev,
-		"Could not detect nfc_en polarity, fallback to active high\n");
-
-out:
-	gpio_set_value(info->gpio_en, !info->en_polarity);
-}
-
-static int pn544_hci_enable(struct pn544_hci_info *info, int mode)
-{
-	pr_info(DRIVER_DESC ": %s\n", __func__);
-
-	gpio_set_value(info->gpio_fw, 0);
-	gpio_set_value(info->gpio_en, info->en_polarity);
-	usleep_range(10000, 15000);
-
-	return 0;
-}
-
-static void pn544_hci_disable(struct pn544_hci_info *info)
-{
-	pr_info(DRIVER_DESC ": %s\n", __func__);
-
-	gpio_set_value(info->gpio_fw, 0);
-	gpio_set_value(info->gpio_en, !info->en_polarity);
-	usleep_range(10000, 15000);
-
-	gpio_set_value(info->gpio_en, info->en_polarity);
-	usleep_range(10000, 15000);
-
-	gpio_set_value(info->gpio_en, !info->en_polarity);
-	usleep_range(10000, 15000);
-}
-
-static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
-{
-	int r;
-
-	usleep_range(3000, 6000);
-
-	r = i2c_master_send(client, buf, len);
-
-	if (r == -EREMOTEIO) {	/* Retry, chip was in standby */
-		usleep_range(6000, 10000);
-		r = i2c_master_send(client, buf, len);
-	}
-
-	if (r >= 0) {
-		if (r != len)
-			return -EREMOTEIO;
-		else
-			return 0;
-	}
-
-	return r;
-}
-
-static int check_crc(u8 *buf, int buflen)
-{
-	int len;
-	u16 crc;
-
-	len = buf[0] + 1;
-	crc = crc_ccitt(0xffff, buf, len - 2);
-	crc = ~crc;
-
-	if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) {
-		pr_err(PN544_HCI_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
-		       crc, buf[len - 1], buf[len - 2]);
-
-		pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
-		print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
-			       16, 2, buf, buflen, false);
-		return -EPERM;
-	}
-	return 0;
-}
-
-/*
- * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees
- * that i2c bus will be flushed and that next read will start on a new frame.
- * returned skb contains only LLC header and payload.
- * returns:
- * -EREMOTEIO : i2c read error (fatal)
- * -EBADMSG : frame was incorrect and discarded
- * -ENOMEM : cannot allocate skb, frame dropped
- */
-static int pn544_hci_i2c_read(struct i2c_client *client, struct sk_buff **skb)
-{
-	int r;
-	u8 len;
-	u8 tmp[PN544_HCI_LLC_MAX_SIZE - 1];
-
-	r = i2c_master_recv(client, &len, 1);
-	if (r != 1) {
-		dev_err(&client->dev, "cannot read len byte\n");
-		return -EREMOTEIO;
-	}
-
-	if ((len < (PN544_HCI_LLC_MIN_SIZE - 1)) ||
-	    (len > (PN544_HCI_LLC_MAX_SIZE - 1))) {
-		dev_err(&client->dev, "invalid len byte\n");
-		r = -EBADMSG;
-		goto flush;
-	}
-
-	*skb = alloc_skb(1 + len, GFP_KERNEL);
-	if (*skb == NULL) {
-		r = -ENOMEM;
-		goto flush;
-	}
-
-	*skb_put(*skb, 1) = len;
-
-	r = i2c_master_recv(client, skb_put(*skb, len), len);
-	if (r != len) {
-		kfree_skb(*skb);
-		return -EREMOTEIO;
-	}
-
-	r = check_crc((*skb)->data, (*skb)->len);
-	if (r != 0) {
-		kfree_skb(*skb);
-		r = -EBADMSG;
-		goto flush;
-	}
-
-	skb_pull(*skb, 1);
-	skb_trim(*skb, (*skb)->len - 2);
-
-	usleep_range(3000, 6000);
-
-	return 0;
-
-flush:
-	if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0)
-		r = -EREMOTEIO;
-
-	usleep_range(3000, 6000);
-
-	return r;
-}
-
-/*
- * Reads an shdlc frame from the chip. This is not as straightforward as it
- * seems. There are cases where we could loose the frame start synchronization.
- * The frame format is len-data-crc, and corruption can occur anywhere while
- * transiting on i2c bus, such that we could read an invalid len.
- * In order to recover synchronization with the next frame, we must be sure
- * to read the real amount of data without using the len byte. We do this by
- * assuming the following:
- * - the chip will always present only one single complete frame on the bus
- *   before triggering the interrupt
- * - the chip will not present a new frame until we have completely read
- *   the previous one (or until we have handled the interrupt).
- * The tricky case is when we read a corrupted len that is less than the real
- * len. We must detect this here in order to determine that we need to flush
- * the bus. This is the reason why we check the crc here.
- */
-static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
-{
-	struct pn544_hci_info *info = dev_id;
-	struct i2c_client *client;
-	struct sk_buff *skb = NULL;
-	int r;
-
-	if (!info || irq != info->i2c_dev->irq) {
-		WARN_ON_ONCE(1);
-		return IRQ_NONE;
-	}
-
-	client = info->i2c_dev;
-	dev_dbg(&client->dev, "IRQ\n");
-
-	if (info->hard_fault != 0)
-		return IRQ_HANDLED;
-
-	r = pn544_hci_i2c_read(client, &skb);
-	if (r == -EREMOTEIO) {
-		info->hard_fault = r;
-
-		nfc_hci_recv_frame(info->hdev, NULL);
-
-		return IRQ_HANDLED;
-	} else if ((r == -ENOMEM) || (r == -EBADMSG)) {
-		return IRQ_HANDLED;
-	}
-
-	nfc_hci_recv_frame(info->hdev, skb);
-
-	return IRQ_HANDLED;
-}
-
-static int pn544_hci_open(struct nfc_hci_dev *hdev)
-{
-	struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
-	int r = 0;
-
-	mutex_lock(&info->info_lock);
-
-	if (info->state != PN544_ST_COLD) {
-		r = -EBUSY;
-		goto out;
-	}
-
-	r = pn544_hci_enable(info, HCI_MODE);
-
-	if (r == 0)
-		info->state = PN544_ST_READY;
-
-out:
-	mutex_unlock(&info->info_lock);
-	return r;
-}
-
-static void pn544_hci_close(struct nfc_hci_dev *hdev)
-{
-	struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
-
-	mutex_lock(&info->info_lock);
-
-	if (info->state == PN544_ST_COLD)
-		goto out;
-
-	pn544_hci_disable(info);
-
-	info->state = PN544_ST_COLD;
-
-out:
-	mutex_unlock(&info->info_lock);
-}
-
-static int pn544_hci_ready(struct nfc_hci_dev *hdev)
-{
-	struct sk_buff *skb;
-	static struct hw_config {
-		u8 adr[2];
-		u8 value;
-	} hw_config[] = {
-		{{0x9f, 0x9a}, 0x00},
-
-		{{0x98, 0x10}, 0xbc},
-
-		{{0x9e, 0x71}, 0x00},
-
-		{{0x98, 0x09}, 0x00},
-
-		{{0x9e, 0xb4}, 0x00},
-
-		{{0x9e, 0xd9}, 0xff},
-		{{0x9e, 0xda}, 0xff},
-		{{0x9e, 0xdb}, 0x23},
-		{{0x9e, 0xdc}, 0x21},
-		{{0x9e, 0xdd}, 0x22},
-		{{0x9e, 0xde}, 0x24},
-
-		{{0x9c, 0x01}, 0x08},
-
-		{{0x9e, 0xaa}, 0x01},
-
-		{{0x9b, 0xd1}, 0x0d},
-		{{0x9b, 0xd2}, 0x24},
-		{{0x9b, 0xd3}, 0x0a},
-		{{0x9b, 0xd4}, 0x22},
-		{{0x9b, 0xd5}, 0x08},
-		{{0x9b, 0xd6}, 0x1e},
-		{{0x9b, 0xdd}, 0x1c},
-
-		{{0x9b, 0x84}, 0x13},
-		{{0x99, 0x81}, 0x7f},
-		{{0x99, 0x31}, 0x70},
-
-		{{0x98, 0x00}, 0x3f},
-
-		{{0x9f, 0x09}, 0x00},
-
-		{{0x9f, 0x0a}, 0x05},
-
-		{{0x9e, 0xd1}, 0xa1},
-		{{0x99, 0x23}, 0x00},
-
-		{{0x9e, 0x74}, 0x80},
-
-		{{0x9f, 0x28}, 0x10},
-
-		{{0x9f, 0x35}, 0x14},
-
-		{{0x9f, 0x36}, 0x60},
-
-		{{0x9c, 0x31}, 0x00},
-
-		{{0x9c, 0x32}, 0xc8},
-
-		{{0x9c, 0x19}, 0x40},
-
-		{{0x9c, 0x1a}, 0x40},
-
-		{{0x9c, 0x0c}, 0x00},
-
-		{{0x9c, 0x0d}, 0x00},
-
-		{{0x9c, 0x12}, 0x00},
-
-		{{0x9c, 0x13}, 0x00},
-
-		{{0x98, 0xa2}, 0x0e},
-
-		{{0x98, 0x93}, 0x40},
-
-		{{0x98, 0x7d}, 0x02},
-		{{0x98, 0x7e}, 0x00},
-		{{0x9f, 0xc8}, 0x01},
-	};
-	struct hw_config *p = hw_config;
-	int count = ARRAY_SIZE(hw_config);
-	struct sk_buff *res_skb;
-	u8 param[4];
-	int r;
-
-	param[0] = 0;
-	while (count--) {
-		param[1] = p->adr[0];
-		param[2] = p->adr[1];
-		param[3] = p->value;
-
-		r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE, PN544_WRITE,
-				     param, 4, &res_skb);
-		if (r < 0)
-			return r;
-
-		if (res_skb->len != 1) {
-			kfree_skb(res_skb);
-			return -EPROTO;
-		}
-
-		if (res_skb->data[0] != p->value) {
-			kfree_skb(res_skb);
-			return -EIO;
-		}
-
-		kfree_skb(res_skb);
-
-		p++;
-	}
-
-	param[0] = NFC_HCI_UICC_HOST_ID;
-	r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
-			      NFC_HCI_ADMIN_WHITELIST, param, 1);
-	if (r < 0)
-		return r;
-
-	param[0] = 0x3d;
-	r = nfc_hci_set_param(hdev, PN544_SYS_MGMT_GATE,
-			      PN544_SYS_MGMT_INFO_NOTIFICATION, param, 1);
-	if (r < 0)
-		return r;
-
-	param[0] = 0x0;
-	r = nfc_hci_set_param(hdev, NFC_HCI_RF_READER_A_GATE,
-			      PN544_RF_READER_A_AUTO_ACTIVATION, param, 1);
-	if (r < 0)
-		return r;
-
-	r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
-			       NFC_HCI_EVT_END_OPERATION, NULL, 0);
-	if (r < 0)
-		return r;
-
-	param[0] = 0x1;
-	r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
-			      PN544_PL_NFCT_DEACTIVATED, param, 1);
-	if (r < 0)
-		return r;
-
-	param[0] = 0x0;
-	r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
-			      PN544_PL_RDPHASES, param, 1);
-	if (r < 0)
-		return r;
-
-	r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
-			      PN544_ID_MGMT_FULL_VERSION_SW, &skb);
-	if (r < 0)
-		return r;
-
-	if (skb->len != FULL_VERSION_LEN) {
-		kfree_skb(skb);
-		return -EINVAL;
-	}
-
-	print_hex_dump(KERN_DEBUG, "FULL VERSION SOFTWARE INFO: ",
-		       DUMP_PREFIX_NONE, 16, 1,
-		       skb->data, FULL_VERSION_LEN, false);
-
-	kfree_skb(skb);
-
-	return 0;
-}
-
-static void pn544_hci_add_len_crc(struct sk_buff *skb)
-{
-	u16 crc;
-	int len;
-
-	len = skb->len + 2;
-	*skb_push(skb, 1) = len;
-
-	crc = crc_ccitt(0xffff, skb->data, skb->len);
-	crc = ~crc;
-	*skb_put(skb, 1) = crc & 0xff;
-	*skb_put(skb, 1) = crc >> 8;
-}
-
-static void pn544_hci_remove_len_crc(struct sk_buff *skb)
-{
-	skb_pull(skb, PN544_FRAME_HEADROOM);
-	skb_trim(skb, PN544_FRAME_TAILROOM);
-}
-
-static int pn544_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
-{
-	struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
-	struct i2c_client *client = info->i2c_dev;
-	int r;
-
-	if (info->hard_fault != 0)
-		return info->hard_fault;
-
-	pn544_hci_add_len_crc(skb);
-	r = pn544_hci_i2c_write(client, skb->data, skb->len);
-	pn544_hci_remove_len_crc(skb);
-
-	return r;
-}
-
-static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
-				u32 im_protocols, u32 tm_protocols)
-{
-	u8 phases = 0;
-	int r;
-	u8 duration[2];
-	u8 activated;
-
-	pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n",
-		__func__, im_protocols, tm_protocols);
-
-	r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
-			       NFC_HCI_EVT_END_OPERATION, NULL, 0);
-	if (r < 0)
-		return r;
-
-	duration[0] = 0x18;
-	duration[1] = 0x6a;
-	r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
-			      PN544_PL_EMULATION, duration, 2);
-	if (r < 0)
-		return r;
-
-	activated = 0;
-	r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
-			      PN544_PL_NFCT_DEACTIVATED, &activated, 1);
-	if (r < 0)
-		return r;
-
-	if (im_protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK |
-			 NFC_PROTO_JEWEL_MASK))
-		phases |= 1;		/* Type A */
-	if (im_protocols & NFC_PROTO_FELICA_MASK) {
-		phases |= (1 << 2);	/* Type F 212 */
-		phases |= (1 << 3);	/* Type F 424 */
-	}
-
-	phases |= (1 << 5);		/* NFC active */
-
-	r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
-			      PN544_PL_RDPHASES, &phases, 1);
-	if (r < 0)
-		return r;
-
-	r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
-			       NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
-	if (r < 0)
-		nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
-				   NFC_HCI_EVT_END_OPERATION, NULL, 0);
-
-	return r;
-}
-
-static int pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
-				      struct nfc_target *target)
-{
-	switch (gate) {
-	case PN544_RF_READER_F_GATE:
-		target->supported_protocols = NFC_PROTO_FELICA_MASK;
-		break;
-	case PN544_RF_READER_JEWEL_GATE:
-		target->supported_protocols = NFC_PROTO_JEWEL_MASK;
-		target->sens_res = 0x0c00;
-		break;
-	default:
-		return -EPROTO;
-	}
-
-	return 0;
-}
-
-static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
-						u8 gate,
-						struct nfc_target *target)
-{
-	struct sk_buff *uid_skb;
-	int r = 0;
-
-	if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
-		if (target->nfcid1_len != 4 && target->nfcid1_len != 7 &&
-		    target->nfcid1_len != 10)
-			return -EPROTO;
-
-		r = nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
-				     PN544_RF_READER_CMD_ACTIVATE_NEXT,
-				     target->nfcid1, target->nfcid1_len, NULL);
-	} else if (target->supported_protocols & NFC_PROTO_FELICA_MASK) {
-		r = nfc_hci_get_param(hdev, PN544_RF_READER_F_GATE,
-				      PN544_FELICA_ID, &uid_skb);
-		if (r < 0)
-			return r;
-
-		if (uid_skb->len != 8) {
-			kfree_skb(uid_skb);
-			return -EPROTO;
-		}
-
-		r = nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE,
-				     PN544_RF_READER_CMD_ACTIVATE_NEXT,
-				     uid_skb->data, uid_skb->len, NULL);
-		kfree_skb(uid_skb);
-	} else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) {
-		/*
-		 * TODO: maybe other ISO 14443 require some kind of continue
-		 * activation, but for now we've seen only this one below.
-		 */
-		if (target->sens_res == 0x4403)	/* Type 4 Mifare DESFire */
-			r = nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
-			      PN544_RF_READER_A_CMD_CONTINUE_ACTIVATION,
-			      NULL, 0, NULL);
-	}
-
-	return r;
-}
-
-#define PN544_CB_TYPE_READER_F 1
-
-static void pn544_hci_data_exchange_cb(void *context, struct sk_buff *skb,
-				       int err)
-{
-	struct pn544_hci_info *info = context;
-
-	switch (info->async_cb_type) {
-	case PN544_CB_TYPE_READER_F:
-		if (err == 0)
-			skb_pull(skb, 1);
-		info->async_cb(info->async_cb_context, skb, err);
-		break;
-	default:
-		if (err == 0)
-			kfree_skb(skb);
-		break;
-	}
-}
-
-#define MIFARE_CMD_AUTH_KEY_A	0x60
-#define MIFARE_CMD_AUTH_KEY_B	0x61
-#define MIFARE_CMD_HEADER	2
-#define MIFARE_UID_LEN		4
-#define MIFARE_KEY_LEN		6
-#define MIFARE_CMD_LEN		12
-/*
- * Returns:
- * <= 0: driver handled the data exchange
- *    1: driver doesn't especially handle, please do standard processing
- */
-static int pn544_hci_data_exchange(struct nfc_hci_dev *hdev,
-				   struct nfc_target *target,
-				   struct sk_buff *skb, data_exchange_cb_t cb,
-				   void *cb_context)
-{
-	struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
-
-	pr_info(DRIVER_DESC ": %s for gate=%d\n", __func__,
-		target->hci_reader_gate);
-
-	switch (target->hci_reader_gate) {
-	case NFC_HCI_RF_READER_A_GATE:
-		if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
-			/*
-			 * It seems that pn544 is inverting key and UID for
-			 * MIFARE authentication commands.
-			 */
-			if (skb->len == MIFARE_CMD_LEN &&
-			    (skb->data[0] == MIFARE_CMD_AUTH_KEY_A ||
-			     skb->data[0] == MIFARE_CMD_AUTH_KEY_B)) {
-				u8 uid[MIFARE_UID_LEN];
-				u8 *data = skb->data + MIFARE_CMD_HEADER;
-
-				memcpy(uid, data + MIFARE_KEY_LEN,
-				       MIFARE_UID_LEN);
-				memmove(data + MIFARE_UID_LEN, data,
-					MIFARE_KEY_LEN);
-				memcpy(data, uid, MIFARE_UID_LEN);
-			}
-
-			return nfc_hci_send_cmd_async(hdev,
-						      target->hci_reader_gate,
-						      PN544_MIFARE_CMD,
-						      skb->data, skb->len,
-						      cb, cb_context);
-		} else
-			return 1;
-	case PN544_RF_READER_F_GATE:
-		*skb_push(skb, 1) = 0;
-		*skb_push(skb, 1) = 0;
-
-		info->async_cb_type = PN544_CB_TYPE_READER_F;
-		info->async_cb = cb;
-		info->async_cb_context = cb_context;
-
-		return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
-					      PN544_FELICA_RAW, skb->data,
-					      skb->len,
-					      pn544_hci_data_exchange_cb, info);
-	case PN544_RF_READER_JEWEL_GATE:
-		return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
-					      PN544_JEWEL_RAW_CMD, skb->data,
-					      skb->len, cb, cb_context);
-	default:
-		return 1;
-	}
-}
-
-static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
-				   struct nfc_target *target)
-{
-	return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
-				PN544_RF_READER_CMD_PRESENCE_CHECK,
-				NULL, 0, NULL);
-}
-
-static struct nfc_hci_ops pn544_hci_ops = {
-	.open = pn544_hci_open,
-	.close = pn544_hci_close,
-	.hci_ready = pn544_hci_ready,
-	.xmit = pn544_hci_xmit,
-	.start_poll = pn544_hci_start_poll,
-	.target_from_gate = pn544_hci_target_from_gate,
-	.complete_target_discovered = pn544_hci_complete_target_discovered,
-	.data_exchange = pn544_hci_data_exchange,
-	.check_presence = pn544_hci_check_presence,
-};
-
-static int __devinit pn544_hci_probe(struct i2c_client *client,
-				     const struct i2c_device_id *id)
-{
-	struct pn544_hci_info *info;
-	struct pn544_nfc_platform_data *pdata;
-	int r = 0;
-	u32 protocols;
-	struct nfc_hci_init_data init_data;
-
-	dev_dbg(&client->dev, "%s\n", __func__);
-	dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
-
-	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
-		dev_err(&client->dev, "Need I2C_FUNC_I2C\n");
-		return -ENODEV;
-	}
-
-	info = kzalloc(sizeof(struct pn544_hci_info), GFP_KERNEL);
-	if (!info) {
-		dev_err(&client->dev,
-			"Cannot allocate memory for pn544_hci_info.\n");
-		r = -ENOMEM;
-		goto err_info_alloc;
-	}
-
-	info->i2c_dev = client;
-	info->state = PN544_ST_COLD;
-	mutex_init(&info->info_lock);
-	i2c_set_clientdata(client, info);
-
-	pdata = client->dev.platform_data;
-	if (pdata == NULL) {
-		dev_err(&client->dev, "No platform data\n");
-		r = -EINVAL;
-		goto err_pdata;
-	}
-
-	if (pdata->request_resources == NULL) {
-		dev_err(&client->dev, "request_resources() missing\n");
-		r = -EINVAL;
-		goto err_pdata;
-	}
-
-	r = pdata->request_resources(client);
-	if (r) {
-		dev_err(&client->dev, "Cannot get platform resources\n");
-		goto err_pdata;
-	}
-
-	info->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
-	info->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
-	info->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
-
-	pn544_hci_platform_init(info);
-
-	r = request_threaded_irq(client->irq, NULL, pn544_hci_irq_thread_fn,
-				 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
-				 PN544_HCI_DRIVER_NAME, info);
-	if (r < 0) {
-		dev_err(&client->dev, "Unable to register IRQ handler\n");
-		goto err_rti;
-	}
-
-	init_data.gate_count = ARRAY_SIZE(pn544_gates);
-
-	memcpy(init_data.gates, pn544_gates, sizeof(pn544_gates));
-
-	/*
-	 * TODO: Session id must include the driver name + some bus addr
-	 * persistent info to discriminate 2 identical chips
-	 */
-	strcpy(init_data.session_id, "ID544HCI");
-
-	protocols = NFC_PROTO_JEWEL_MASK |
-		    NFC_PROTO_MIFARE_MASK |
-		    NFC_PROTO_FELICA_MASK |
-		    NFC_PROTO_ISO14443_MASK |
-		    NFC_PROTO_ISO14443_B_MASK |
-		    NFC_PROTO_NFC_DEP_MASK;
-
-	info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data,
-					     protocols, LLC_SHDLC_NAME,
-					     PN544_FRAME_HEADROOM +
-					     PN544_CMDS_HEADROOM,
-					     PN544_FRAME_TAILROOM,
-					     PN544_HCI_LLC_MAX_PAYLOAD);
-	if (!info->hdev) {
-		dev_err(&client->dev, "Cannot allocate nfc hdev.\n");
-		r = -ENOMEM;
-		goto err_alloc_hdev;
-	}
-
-	nfc_hci_set_clientdata(info->hdev, info);
-
-	r = nfc_hci_register_device(info->hdev);
-	if (r)
-		goto err_regdev;
-
-	return 0;
-
-err_regdev:
-	nfc_hci_free_device(info->hdev);
-
-err_alloc_hdev:
-	free_irq(client->irq, info);
-
-err_rti:
-	if (pdata->free_resources != NULL)
-		pdata->free_resources();
-
-err_pdata:
-	kfree(info);
-
-err_info_alloc:
-	return r;
-}
-
-static __devexit int pn544_hci_remove(struct i2c_client *client)
-{
-	struct pn544_hci_info *info = i2c_get_clientdata(client);
-	struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
-
-	dev_dbg(&client->dev, "%s\n", __func__);
-
-	nfc_hci_free_device(info->hdev);
-
-	if (info->state != PN544_ST_COLD) {
-		if (pdata->disable)
-			pdata->disable();
-	}
-
-	free_irq(client->irq, info);
-	if (pdata->free_resources)
-		pdata->free_resources();
-
-	kfree(info);
-
-	return 0;
-}
-
-static struct i2c_driver pn544_hci_driver = {
-	.driver = {
-		   .name = PN544_HCI_DRIVER_NAME,
-		  },
-	.probe = pn544_hci_probe,
-	.id_table = pn544_hci_id_table,
-	.remove = __devexit_p(pn544_hci_remove),
-};
-
-static int __init pn544_hci_init(void)
-{
-	int r;
-
-	pr_debug(DRIVER_DESC ": %s\n", __func__);
-
-	r = i2c_add_driver(&pn544_hci_driver);
-	if (r) {
-		pr_err(PN544_HCI_DRIVER_NAME ": driver registration failed\n");
-		return r;
-	}
-
-	return 0;
-}
-
-static void __exit pn544_hci_exit(void)
-{
-	i2c_del_driver(&pn544_hci_driver);
-}
-
-module_init(pn544_hci_init);
-module_exit(pn544_hci_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
index 258ca59..982d16b 100644
--- a/drivers/pps/Kconfig
+++ b/drivers/pps/Kconfig
@@ -6,7 +6,6 @@
 
 config PPS
 	tristate "PPS support"
-	depends on EXPERIMENTAL
 	---help---
 	  PPS (Pulse Per Second) is a special pulse provided by some GPS
 	  antennae. Userland can use it to get a high-precision time
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index ffdf712..1ea6f1d 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -4,13 +4,9 @@
 
 menu "PTP clock support"
 
-comment "Enable Device Drivers -> PPS to see the PTP clock options."
-	depends on PPS=n
-
 config PTP_1588_CLOCK
 	tristate "PTP clock support"
-	depends on EXPERIMENTAL
-	depends on PPS
+	select PPS
 	help
 	  The IEEE 1588 standard defines a method to precisely
 	  synchronize distributed clocks over Ethernet networks. The
@@ -29,8 +25,9 @@
 
 config PTP_1588_CLOCK_GIANFAR
 	tristate "Freescale eTSEC as PTP clock"
-	depends on PTP_1588_CLOCK
 	depends on GIANFAR
+	select PTP_1588_CLOCK
+	default y
 	help
 	  This driver adds support for using the eTSEC as a PTP
 	  clock. This clock is only useful if your PTP programs are
@@ -42,8 +39,9 @@
 
 config PTP_1588_CLOCK_IXP46X
 	tristate "Intel IXP46x as PTP clock"
-	depends on PTP_1588_CLOCK
 	depends on IXP4XX_ETH
+	select PTP_1588_CLOCK
+	default y
 	help
 	  This driver adds support for using the IXP46X as a PTP
 	  clock. This clock is only useful if your PTP programs are
@@ -54,13 +52,13 @@
 	  will be called ptp_ixp46x.
 
 comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks."
-	depends on PTP_1588_CLOCK && (PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n)
+	depends on PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n
 
 config DP83640_PHY
 	tristate "Driver for the National Semiconductor DP83640 PHYTER"
-	depends on PTP_1588_CLOCK
 	depends on NETWORK_PHY_TIMESTAMPING
 	depends on PHYLIB
+	select PTP_1588_CLOCK
 	---help---
 	  Supports the DP83640 PHYTER with IEEE 1588 features.
 
@@ -74,8 +72,7 @@
 
 config PTP_1588_CLOCK_PCH
 	tristate "Intel PCH EG20T as PTP clock"
-	depends on PTP_1588_CLOCK
-	depends on PCH_GBE
+	select PTP_1588_CLOCK
 	help
 	  This driver adds support for using the PCH EG20T as a PTP
 	  clock. The hardware supports time stamping of PTP packets
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index e7f301da2..34a0c60 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -21,6 +21,7 @@
 #include <linux/posix-clock.h>
 #include <linux/poll.h>
 #include <linux/sched.h>
+#include <linux/slab.h>
 
 #include "ptp_private.h"
 
@@ -33,9 +34,13 @@
 {
 	struct ptp_clock_caps caps;
 	struct ptp_clock_request req;
+	struct ptp_sys_offset *sysoff = NULL;
 	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
 	struct ptp_clock_info *ops = ptp->info;
+	struct ptp_clock_time *pct;
+	struct timespec ts;
 	int enable, err = 0;
+	unsigned int i;
 
 	switch (cmd) {
 
@@ -88,10 +93,45 @@
 		err = ops->enable(ops, &req, enable);
 		break;
 
+	case PTP_SYS_OFFSET:
+		sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL);
+		if (!sysoff) {
+			err = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(sysoff, (void __user *)arg,
+				   sizeof(*sysoff))) {
+			err = -EFAULT;
+			break;
+		}
+		if (sysoff->n_samples > PTP_MAX_SAMPLES) {
+			err = -EINVAL;
+			break;
+		}
+		pct = &sysoff->ts[0];
+		for (i = 0; i < sysoff->n_samples; i++) {
+			getnstimeofday(&ts);
+			pct->sec = ts.tv_sec;
+			pct->nsec = ts.tv_nsec;
+			pct++;
+			ptp->info->gettime(ptp->info, &ts);
+			pct->sec = ts.tv_sec;
+			pct->nsec = ts.tv_nsec;
+			pct++;
+		}
+		getnstimeofday(&ts);
+		pct->sec = ts.tv_sec;
+		pct->nsec = ts.tv_nsec;
+		if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
+			err = -EFAULT;
+		break;
+
 	default:
 		err = -ENOTTY;
 		break;
 	}
+
+	kfree(sysoff);
 	return err;
 }
 
@@ -104,20 +144,23 @@
 	return queue_cnt(&ptp->tsevq) ? POLLIN : 0;
 }
 
+#define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
+
 ssize_t ptp_read(struct posix_clock *pc,
 		 uint rdflags, char __user *buf, size_t cnt)
 {
 	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
 	struct timestamp_event_queue *queue = &ptp->tsevq;
-	struct ptp_extts_event event[PTP_BUF_TIMESTAMPS];
+	struct ptp_extts_event *event;
 	unsigned long flags;
 	size_t qcnt, i;
+	int result;
 
 	if (cnt % sizeof(struct ptp_extts_event) != 0)
 		return -EINVAL;
 
-	if (cnt > sizeof(event))
-		cnt = sizeof(event);
+	if (cnt > EXTTS_BUFSIZE)
+		cnt = EXTTS_BUFSIZE;
 
 	cnt = cnt / sizeof(struct ptp_extts_event);
 
@@ -135,6 +178,12 @@
 		return -ENODEV;
 	}
 
+	event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL);
+	if (!event) {
+		mutex_unlock(&ptp->tsevq_mux);
+		return -ENOMEM;
+	}
+
 	spin_lock_irqsave(&queue->lock, flags);
 
 	qcnt = queue_cnt(queue);
@@ -153,8 +202,10 @@
 
 	mutex_unlock(&ptp->tsevq_mux);
 
+	result = cnt;
 	if (copy_to_user(buf, event, cnt))
-		return -EFAULT;
+		result = -EFAULT;
 
-	return cnt;
+	kfree(event);
+	return result;
 }
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index a0a4afe..5c70a65 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -3018,10 +3018,8 @@
 {
 	struct claw_privbk *priv;
 
-	BUG_ON(!cgdev);
 	CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
 	priv = dev_get_drvdata(&cgdev->dev);
-	BUG_ON(!priv);
 	dev_info(&cgdev->dev, " will be removed.\n");
 	if (cgdev->state == CCWGROUP_ONLINE)
 		claw_shutdown_device(cgdev);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 98ea9cc..817b689 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1691,8 +1691,6 @@
 {
 	struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev);
 
-	BUG_ON(priv == NULL);
-
 	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
 			"removing device %p, proto : %d",
 			cgdev, priv->protocol);
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 05b734a..2dbc77b 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1367,7 +1367,6 @@
 	struct mpc_group *grp;
 	struct channel *wch;
 
-	BUG_ON(dev == NULL);
 	CTCM_PR_DEBUG("Enter %s: %s\n",	__func__, dev->name);
 
 	priv  = dev->ml_priv;
@@ -1472,8 +1471,6 @@
 	struct channel *wch;
 	struct channel *rch;
 
-	BUG_ON(dev == NULL);
-
 	priv = dev->ml_priv;
 	grp = priv->mpcg;
 	wch = priv->channel[CTCM_WRITE];
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index fa7adad..480fbea 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -933,6 +933,7 @@
 int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
 int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
 int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
+void qeth_trace_features(struct qeth_card *);
 
 /* exports for OSN */
 int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4d6ba00..638a57f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -73,13 +73,13 @@
 	if (card->info.guestlan) {
 		switch (card->info.type) {
 		case QETH_CARD_TYPE_OSD:
-			return " Guest LAN QDIO";
+			return " Virtual NIC QDIO";
 		case QETH_CARD_TYPE_IQD:
-			return " Guest LAN Hiper";
+			return " Virtual NIC Hiper";
 		case QETH_CARD_TYPE_OSM:
-			return " Guest LAN QDIO - OSM";
+			return " Virtual NIC QDIO - OSM";
 		case QETH_CARD_TYPE_OSX:
-			return " Guest LAN QDIO - OSX";
+			return " Virtual NIC QDIO - OSX";
 		default:
 			return " unknown";
 		}
@@ -108,13 +108,13 @@
 	if (card->info.guestlan) {
 		switch (card->info.type) {
 		case QETH_CARD_TYPE_OSD:
-			return "GuestLAN QDIO";
+			return "Virt.NIC QDIO";
 		case QETH_CARD_TYPE_IQD:
-			return "GuestLAN Hiper";
+			return "Virt.NIC Hiper";
 		case QETH_CARD_TYPE_OSM:
-			return "GuestLAN OSM";
+			return "Virt.NIC OSM";
 		case QETH_CARD_TYPE_OSX:
-			return "GuestLAN OSX";
+			return "Virt.NIC OSX";
 		default:
 			return "unknown";
 		}
@@ -383,7 +383,7 @@
 				qeth_release_skbs(c);
 
 				c = f->next_pending;
-				BUG_ON(head->next_pending != f);
+				WARN_ON_ONCE(head->next_pending != f);
 				head->next_pending = c;
 				kmem_cache_free(qeth_qdio_outbuf_cache, f);
 			} else {
@@ -415,13 +415,12 @@
 	buffer = (struct qeth_qdio_out_buffer *) aob->user1;
 	QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
 
-	BUG_ON(buffer == NULL);
-
 	if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
 			   QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
 		notification = TX_NOTIFY_OK;
 	} else {
-		BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
+		WARN_ON_ONCE(atomic_read(&buffer->state) !=
+							QETH_QDIO_BUF_PENDING);
 		atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
 		notification = TX_NOTIFY_DELAYED_OK;
 	}
@@ -1131,7 +1130,7 @@
 		notify_general_error = 1;
 
 	/* release may never happen from within CQ tasklet scope */
-	BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
+	WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
 
 	skb = skb_dequeue(&buf->skb_list);
 	while (skb) {
@@ -2280,7 +2279,6 @@
 		unsigned long data)
 {
 	struct qeth_cmd_buffer *iob;
-	int rc = 0;
 
 	QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
 
@@ -2296,7 +2294,7 @@
 		iob->rc = -EMLINK;
 	}
 	QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
-	return rc;
+	return 0;
 }
 
 static int qeth_ulp_setup(struct qeth_card *card)
@@ -2401,7 +2399,7 @@
 		card->qdio.out_qs[i]->queue_no = i;
 		/* give outbound qeth_qdio_buffers their qdio_buffers */
 		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
-			BUG_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
+			WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
 			if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
 				goto out_freeoutqbufs;
 		}
@@ -2969,9 +2967,6 @@
 	} else
 		QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
 					"\n", dev_name(&card->gdev->dev));
-	QETH_DBF_TEXT(SETUP, 2, "suppenbl");
-	QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported);
-	QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled);
 	return 0;
 }
 
@@ -3569,7 +3564,7 @@
 		if (queue->bufstates &&
 		    (queue->bufstates[bidx].flags &
 		     QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
-			BUG_ON(card->options.cq != QETH_CQ_ENABLED);
+			WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
 
 			if (atomic_cmpxchg(&buffer->state,
 					   QETH_QDIO_BUF_PRIMED,
@@ -3583,7 +3578,6 @@
 			QETH_CARD_TEXT(queue->card, 5, "aob");
 			QETH_CARD_TEXT_(queue->card, 5, "%lx",
 					virt_to_phys(buffer->aob));
-			BUG_ON(bidx < 0 || bidx >= QDIO_MAX_BUFFERS_PER_Q);
 			if (qeth_init_qdio_out_buf(queue, bidx)) {
 				QETH_CARD_TEXT(card, 2, "outofbuf");
 				qeth_schedule_recovery(card);
@@ -4731,6 +4725,19 @@
 	kfree(card);
 }
 
+void qeth_trace_features(struct qeth_card *card)
+{
+	QETH_CARD_TEXT(card, 2, "features");
+	QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.supported_funcs);
+	QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.enabled_funcs);
+	QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.supported_funcs);
+	QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.enabled_funcs);
+	QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.supported_funcs);
+	QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.enabled_funcs);
+	QETH_CARD_TEXT_(card, 2, "%x", card->info.diagass_support);
+}
+EXPORT_SYMBOL_GPL(qeth_trace_features);
+
 static struct ccw_device_id qeth_ids[] = {
 	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
 					.driver_info = QETH_CARD_TYPE_OSD},
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index fddb626..7319555 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -411,7 +411,7 @@
 	unsigned int len;
 
 	*done = 0;
-	BUG_ON(!budget);
+	WARN_ON_ONCE(!budget);
 	while (budget) {
 		skb = qeth_core_get_next_skb(card,
 			&card->qdio.in_q->bufs[card->rx.b_index],
@@ -973,7 +973,6 @@
 	int rc = 0;
 	enum qeth_card_states recover_flag;
 
-	BUG_ON(!card);
 	mutex_lock(&card->discipline_mutex);
 	mutex_lock(&card->conf_mutex);
 	QETH_DBF_TEXT(SETUP, 2, "setonlin");
@@ -986,6 +985,7 @@
 		rc = -ENODEV;
 		goto out_remove;
 	}
+	qeth_trace_features(card);
 
 	if (!card->dev && qeth_l2_setup_netdev(card)) {
 		rc = -ENODEV;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 5ba3906..6e5eef0 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1939,7 +1939,7 @@
 	__u16 magic;
 
 	*done = 0;
-	BUG_ON(!budget);
+	WARN_ON_ONCE(!budget);
 	while (budget) {
 		skb = qeth_core_get_next_skb(card,
 			&card->qdio.in_q->bufs[card->rx.b_index],
@@ -3334,7 +3334,6 @@
 	int rc = 0;
 	enum qeth_card_states recover_flag;
 
-	BUG_ON(!card);
 	mutex_lock(&card->discipline_mutex);
 	mutex_lock(&card->conf_mutex);
 	QETH_DBF_TEXT(SETUP, 2, "setonlin");
@@ -3347,6 +3346,7 @@
 		rc = -ENODEV;
 		goto out_remove;
 	}
+	qeth_trace_features(card);
 
 	if (!card->dev && qeth_l3_setup_netdev(card)) {
 		rc = -ENODEV;
@@ -3714,9 +3714,9 @@
 {
 
 	QETH_DBF_TEXT(SETUP, 5, "unregnot");
-	BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
+	WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
 #ifdef CONFIG_QETH_IPV6
-	BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
+	WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
 #endif /* QETH_IPV6 */
 }
 
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index 266aa16..19396dc 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -37,6 +37,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4350) },
 	{ 0, },
 };
 MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index b58fef7..d7d5804 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -346,6 +346,8 @@
 			chipco_write32(cc, SSB_CHIPCO_PLLCTL_DATA, 0x380005C0);
 		}
 		break;
+	case 43222:
+		break;
 	default:
 		ssb_printk(KERN_ERR PFX
 			   "ERROR: PLL init unknown for device %04X\n",
@@ -434,6 +436,7 @@
 		 min_msk = 0xCBB;
 		 break;
 	case 0x4322:
+	case 43222:
 		/* We keep the default settings:
 		 * min_msk = 0xCBB
 		 * max_msk = 0x7FFFF
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index c625086..b918ba9 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -192,9 +192,10 @@
 
 	/* When there is no chipcommon on the bus there is 4MB flash */
 	if (!bus->chipco.dev) {
-		mcore->flash_buswidth = 2;
-		mcore->flash_window = SSB_FLASH1;
-		mcore->flash_window_size = SSB_FLASH1_SZ;
+		mcore->pflash.present = true;
+		mcore->pflash.buswidth = 2;
+		mcore->pflash.window = SSB_FLASH1;
+		mcore->pflash.window_size = SSB_FLASH1_SZ;
 		return;
 	}
 
@@ -206,13 +207,14 @@
 		break;
 	case SSB_CHIPCO_FLASHT_PARA:
 		pr_debug("Found parallel flash\n");
-		mcore->flash_window = SSB_FLASH2;
-		mcore->flash_window_size = SSB_FLASH2_SZ;
+		mcore->pflash.present = true;
+		mcore->pflash.window = SSB_FLASH2;
+		mcore->pflash.window_size = SSB_FLASH2_SZ;
 		if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
 		               & SSB_CHIPCO_CFG_DS16) == 0)
-			mcore->flash_buswidth = 1;
+			mcore->pflash.buswidth = 1;
 		else
-			mcore->flash_buswidth = 2;
+			mcore->pflash.buswidth = 2;
 		break;
 	}
 }
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7f93f34..67898fa 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -42,6 +42,21 @@
 #define VHOST_MAX_PEND 128
 #define VHOST_GOODCOPY_LEN 256
 
+/*
+ * For transmit, used buffer len is unused; we override it to track buffer
+ * status internally; used for zerocopy tx only.
+ */
+/* Lower device DMA failed */
+#define VHOST_DMA_FAILED_LEN	3
+/* Lower device DMA done */
+#define VHOST_DMA_DONE_LEN	2
+/* Lower device DMA in progress */
+#define VHOST_DMA_IN_PROGRESS	1
+/* Buffer unused */
+#define VHOST_DMA_CLEAR_LEN	0
+
+#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
+
 enum {
 	VHOST_NET_VQ_RX = 0,
 	VHOST_NET_VQ_TX = 1,
@@ -62,8 +77,33 @@
 	 * We only do this when socket buffer fills up.
 	 * Protected by tx vq lock. */
 	enum vhost_net_poll_state tx_poll_state;
+	/* Number of TX recently submitted.
+	 * Protected by tx vq lock. */
+	unsigned tx_packets;
+	/* Number of times zerocopy TX recently failed.
+	 * Protected by tx vq lock. */
+	unsigned tx_zcopy_err;
 };
 
+static void vhost_net_tx_packet(struct vhost_net *net)
+{
+	++net->tx_packets;
+	if (net->tx_packets < 1024)
+		return;
+	net->tx_packets = 0;
+	net->tx_zcopy_err = 0;
+}
+
+static void vhost_net_tx_err(struct vhost_net *net)
+{
+	++net->tx_zcopy_err;
+}
+
+static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
+{
+	return net->tx_packets / 64 >= net->tx_zcopy_err;
+}
+
 static bool vhost_sock_zcopy(struct socket *sock)
 {
 	return unlikely(experimental_zcopytx) &&
@@ -126,6 +166,55 @@
 	net->tx_poll_state = VHOST_NET_POLL_STARTED;
 }
 
+/* In case of DMA done not in order in lower device driver for some reason.
+ * upend_idx is used to track end of used idx, done_idx is used to track head
+ * of used idx. Once lower device DMA done contiguously, we will signal KVM
+ * guest used idx.
+ */
+static int vhost_zerocopy_signal_used(struct vhost_net *net,
+				      struct vhost_virtqueue *vq)
+{
+	int i;
+	int j = 0;
+
+	for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
+		if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
+			vhost_net_tx_err(net);
+		if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
+			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
+			vhost_add_used_and_signal(vq->dev, vq,
+						  vq->heads[i].id, 0);
+			++j;
+		} else
+			break;
+	}
+	if (j)
+		vq->done_idx = i;
+	return j;
+}
+
+static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
+{
+	struct vhost_ubuf_ref *ubufs = ubuf->ctx;
+	struct vhost_virtqueue *vq = ubufs->vq;
+	int cnt = atomic_read(&ubufs->kref.refcount);
+
+	/*
+	 * Trigger polling thread if guest stopped submitting new buffers:
+	 * in this case, the refcount after decrement will eventually reach 1
+	 * so here it is 2.
+	 * We also trigger polling periodically after each 16 packets
+	 * (the value 16 here is more or less arbitrary, it's tuned to trigger
+	 * less than 10% of times).
+	 */
+	if (cnt <= 2 || !(cnt % 16))
+		vhost_poll_queue(&vq->poll);
+	/* set len to mark this desc buffers done DMA */
+	vq->heads[ubuf->desc].len = success ?
+		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
+	vhost_ubuf_put(ubufs);
+}
+
 /* Expects to be always run from workqueue - which acts as
  * read-size critical section for our kind of RCU. */
 static void handle_tx(struct vhost_net *net)
@@ -172,7 +261,7 @@
 	for (;;) {
 		/* Release DMAs done buffers first */
 		if (zcopy)
-			vhost_zerocopy_signal_used(vq);
+			vhost_zerocopy_signal_used(net, vq);
 
 		head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
 					 ARRAY_SIZE(vq->iov),
@@ -227,7 +316,8 @@
 		/* use msg_control to pass vhost zerocopy ubuf info to skb */
 		if (zcopy) {
 			vq->heads[vq->upend_idx].id = head;
-			if (len < VHOST_GOODCOPY_LEN) {
+			if (!vhost_net_tx_select_zcopy(net) ||
+			    len < VHOST_GOODCOPY_LEN) {
 				/* copy don't need to wait for DMA done */
 				vq->heads[vq->upend_idx].len =
 							VHOST_DMA_DONE_LEN;
@@ -237,7 +327,8 @@
 			} else {
 				struct ubuf_info *ubuf = &vq->ubuf_info[head];
 
-				vq->heads[vq->upend_idx].len = len;
+				vq->heads[vq->upend_idx].len =
+					VHOST_DMA_IN_PROGRESS;
 				ubuf->callback = vhost_zerocopy_callback;
 				ubuf->ctx = vq->ubufs;
 				ubuf->desc = vq->upend_idx;
@@ -268,8 +359,9 @@
 		if (!zcopy)
 			vhost_add_used_and_signal(&net->dev, vq, head, 0);
 		else
-			vhost_zerocopy_signal_used(vq);
+			vhost_zerocopy_signal_used(net, vq);
 		total_len += len;
+		vhost_net_tx_packet(net);
 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
 			vhost_poll_queue(&vq->poll);
 			break;
@@ -594,9 +686,18 @@
 	struct vhost_net *n = f->private_data;
 	struct socket *tx_sock;
 	struct socket *rx_sock;
+	int i;
 
 	vhost_net_stop(n, &tx_sock, &rx_sock);
 	vhost_net_flush(n);
+	vhost_dev_stop(&n->dev);
+	for (i = 0; i < n->dev.nvqs; ++i) {
+		/* Wait for all lower device DMAs done. */
+		if (n->dev.vqs[i].ubufs)
+			vhost_ubuf_put_and_wait(n->dev.vqs[i].ubufs);
+
+		vhost_zerocopy_signal_used(n, &n->dev.vqs[i]);
+	}
 	vhost_dev_cleanup(&n->dev, false);
 	if (tx_sock)
 		fput(tx_sock->file);
@@ -729,7 +830,7 @@
 	if (oldubufs) {
 		vhost_ubuf_put_and_wait(oldubufs);
 		mutex_lock(&vq->mutex);
-		vhost_zerocopy_signal_used(vq);
+		vhost_zerocopy_signal_used(n, vq);
 		mutex_unlock(&vq->mutex);
 	}
 
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index aa31692..ef88844 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -34,7 +34,6 @@
 #include <linux/ctype.h>
 #include <linux/compat.h>
 #include <linux/eventfd.h>
-#include <linux/vhost.h>
 #include <linux/fs.h>
 #include <linux/miscdevice.h>
 #include <asm/unaligned.h>
@@ -895,6 +894,7 @@
 		vhost_scsi_clear_endpoint(s, &backend);
 	}
 
+	vhost_dev_stop(&s->dev);
 	vhost_dev_cleanup(&s->dev, false);
 	kfree(s);
 	return 0;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index dedaf81..5a3d0f1 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -26,10 +26,6 @@
 #include <linux/kthread.h>
 #include <linux/cgroup.h>
 
-#include <linux/net.h>
-#include <linux/if_packet.h>
-#include <linux/if_arp.h>
-
 #include "vhost.h"
 
 enum {
@@ -414,32 +410,7 @@
 	return 0;
 }
 
-/* In case of DMA done not in order in lower device driver for some reason.
- * upend_idx is used to track end of used idx, done_idx is used to track head
- * of used idx. Once lower device DMA done contiguously, we will signal KVM
- * guest used idx.
- */
-int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
-{
-	int i;
-	int j = 0;
-
-	for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
-		if ((vq->heads[i].len == VHOST_DMA_DONE_LEN)) {
-			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
-			vhost_add_used_and_signal(vq->dev, vq,
-						  vq->heads[i].id, 0);
-			++j;
-		} else
-			break;
-	}
-	if (j)
-		vq->done_idx = i;
-	return j;
-}
-
-/* Caller should have device mutex if and only if locked is set */
-void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
+void vhost_dev_stop(struct vhost_dev *dev)
 {
 	int i;
 
@@ -448,13 +419,15 @@
 			vhost_poll_stop(&dev->vqs[i].poll);
 			vhost_poll_flush(&dev->vqs[i].poll);
 		}
-		/* Wait for all lower device DMAs done. */
-		if (dev->vqs[i].ubufs)
-			vhost_ubuf_put_and_wait(dev->vqs[i].ubufs);
+	}
+}
 
-		/* Signal guest as appropriate. */
-		vhost_zerocopy_signal_used(&dev->vqs[i]);
+/* Caller should have device mutex if and only if locked is set */
+void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
+{
+	int i;
 
+	for (i = 0; i < dev->nvqs; ++i) {
 		if (dev->vqs[i].error_ctx)
 			eventfd_ctx_put(dev->vqs[i].error_ctx);
 		if (dev->vqs[i].error)
@@ -1599,14 +1572,3 @@
 	wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
 	kfree(ubufs);
 }
-
-void vhost_zerocopy_callback(struct ubuf_info *ubuf)
-{
-	struct vhost_ubuf_ref *ubufs = ubuf->ctx;
-	struct vhost_virtqueue *vq = ubufs->vq;
-
-	vhost_poll_queue(&vq->poll);
-	/* set len = 1 to mark this desc buffers done DMA */
-	vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
-	kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
-}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 1125af3..5e19e3d 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -7,17 +7,11 @@
 #include <linux/mutex.h>
 #include <linux/poll.h>
 #include <linux/file.h>
-#include <linux/skbuff.h>
 #include <linux/uio.h>
 #include <linux/virtio_config.h>
 #include <linux/virtio_ring.h>
 #include <linux/atomic.h>
 
-/* This is for zerocopy, used buffer len is set to 1 when lower device DMA
- * done */
-#define VHOST_DMA_DONE_LEN	1
-#define VHOST_DMA_CLEAR_LEN	0
-
 struct vhost_device;
 
 struct vhost_work;
@@ -70,6 +64,8 @@
 void vhost_ubuf_put(struct vhost_ubuf_ref *);
 void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *);
 
+struct ubuf_info;
+
 /* The virtqueue structure describes a queue attached to a device. */
 struct vhost_virtqueue {
 	struct vhost_dev *dev;
@@ -167,6 +163,7 @@
 long vhost_dev_check_owner(struct vhost_dev *);
 long vhost_dev_reset_owner(struct vhost_dev *);
 void vhost_dev_cleanup(struct vhost_dev *, bool locked);
+void vhost_dev_stop(struct vhost_dev *);
 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
 int vhost_vq_access_ok(struct vhost_virtqueue *vq);
 int vhost_log_access_ok(struct vhost_dev *);
@@ -191,8 +188,6 @@
 
 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
 		    unsigned int log_num, u64 len);
-void vhost_zerocopy_callback(struct ubuf_info *);
-int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
 
 #define vq_err(vq, fmt, ...) do {                                  \
 		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index a781bdf..701580d 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -378,12 +378,13 @@
 	return -EACCES;
 }
 
-static int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
+static int sysctl_perm(struct ctl_table_header *head, struct ctl_table *table, int op)
 {
+	struct ctl_table_root *root = head->root;
 	int mode;
 
 	if (root->permissions)
-		mode = root->permissions(root, current->nsproxy, table);
+		mode = root->permissions(head, table);
 	else
 		mode = table->mode;
 
@@ -491,7 +492,7 @@
 	 * and won't be until we finish.
 	 */
 	error = -EPERM;
-	if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
+	if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
 		goto out;
 
 	/* if that can happen at all, it should be -EINVAL, not -EISDIR */
@@ -717,7 +718,7 @@
 	if (!table) /* global root - r-xr-xr-x */
 		error = mask & MAY_WRITE ? -EACCES : 0;
 	else /* Use the permissions on the sysctl table entry */
-		error = sysctl_perm(head->root, table, mask & ~MAY_NOT_BLOCK);
+		error = sysctl_perm(head, table, mask & ~MAY_NOT_BLOCK);
 
 	sysctl_head_finish(head);
 	return error;
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 4180eb7..93b1e09 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -157,6 +157,7 @@
 
 /* Chip IDs of SoCs */
 #define BCMA_CHIP_ID_BCM4706	0x5300
+#define  BCMA_PKG_ID_BCM4706L	1
 #define BCMA_CHIP_ID_BCM4716	0x4716
 #define  BCMA_PKG_ID_BCM4716	8
 #define  BCMA_PKG_ID_BCM4717	9
@@ -166,7 +167,11 @@
 #define BCMA_CHIP_ID_BCM4749	0x4749
 #define BCMA_CHIP_ID_BCM5356	0x5356
 #define BCMA_CHIP_ID_BCM5357	0x5357
+#define  BCMA_PKG_ID_BCM5358	9
+#define  BCMA_PKG_ID_BCM47186	10
+#define  BCMA_PKG_ID_BCM5357	11
 #define BCMA_CHIP_ID_BCM53572	53572
+#define  BCMA_PKG_ID_BCM47188	9
 
 struct bcma_device {
 	struct bcma_bus *bus;
@@ -251,7 +256,7 @@
 	u8 num;
 
 	struct bcma_drv_cc drv_cc;
-	struct bcma_drv_pci drv_pci;
+	struct bcma_drv_pci drv_pci[2];
 	struct bcma_drv_mips drv_mips;
 	struct bcma_drv_gmac_cmn drv_gmac_cmn;
 
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 1cf1749..145f3c5 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -510,6 +510,7 @@
 
 #ifdef CONFIG_BCMA_DRIVER_MIPS
 struct bcma_pflash {
+	bool present;
 	u8 buswidth;
 	u32 window;
 	u32 window_size;
@@ -532,6 +533,7 @@
 
 struct bcma_nflash {
 	bool present;
+	bool boot;		/* This is the flash the SoC boots from */
 
 	struct mtd_info *mtd;
 };
@@ -552,6 +554,7 @@
 	u32 capabilities;
 	u32 capabilities_ext;
 	u8 setup_done:1;
+	u8 early_setup_done:1;
 	/* Fast Powerup Delay constant */
 	u16 fast_pwrup_delay;
 	struct bcma_chipcommon_pmu pmu;
@@ -583,6 +586,7 @@
 	bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
 
 extern void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
+extern void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
 
 extern void bcma_chipco_suspend(struct bcma_drv_cc *cc);
 extern void bcma_chipco_resume(struct bcma_drv_cc *cc);
@@ -606,6 +610,7 @@
 
 /* PMU support */
 extern void bcma_pmu_init(struct bcma_drv_cc *cc);
+extern void bcma_pmu_early_init(struct bcma_drv_cc *cc);
 
 extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset,
 				  u32 value);
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h
index c004364..0baf8a5 100644
--- a/include/linux/bcma/bcma_driver_mips.h
+++ b/include/linux/bcma/bcma_driver_mips.h
@@ -35,13 +35,16 @@
 struct bcma_drv_mips {
 	struct bcma_device *core;
 	u8 setup_done:1;
+	u8 early_setup_done:1;
 	unsigned int assigned_irqs;
 };
 
 #ifdef CONFIG_BCMA_DRIVER_MIPS
 extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
+extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
 #else
 static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
+static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { }
 #endif
 
 extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore);
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index 6c9cb93..7e8104b 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -85,6 +85,9 @@
 							 * (2 ZettaBytes), high 32 bits
 							 */
 
-#define BCMA_SFLASH			0x1c000000
+#define BCMA_SOC_FLASH1			0x1fc00000	/* MIPS Flash Region 1 */
+#define BCMA_SOC_FLASH1_SZ		0x00400000	/* MIPS Size of Flash Region 1 */
+#define BCMA_SOC_FLASH2			0x1c000000	/* Flash Region 2 (region 1 shadowed here) */
+#define BCMA_SOC_FLASH2_SZ		0x02000000	/* Size of Flash Region 2 */
 
 #endif /* LINUX_BCMA_REGS_H_ */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index b006ba0..243eea1 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -51,6 +51,26 @@
 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
 
+/* Reserved Ethernet Addresses per IEEE 802.1Q */
+static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
+{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+
+/**
+ * is_link_local_ether_addr - Determine if given Ethernet address is link-local
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per
+ * IEEE 802.1Q 8.6.3 Frame filtering.
+ */
+static inline bool is_link_local_ether_addr(const u8 *addr)
+{
+	__be16 *a = (__be16 *)addr;
+	static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
+	static const __be16 m = cpu_to_be16(0xfff0);
+
+	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
+}
+
 /**
  * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
  * @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 24d251f..c45eabc 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -45,6 +45,7 @@
 extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 extern int sk_detach_filter(struct sock *sk);
 extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
+extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
 
 #ifdef CONFIG_BPF_JIT
 extern void bpf_jit_compile(struct sk_filter *fp);
@@ -123,6 +124,8 @@
 	BPF_S_ANC_CPU,
 	BPF_S_ANC_ALU_XOR_X,
 	BPF_S_ANC_SECCOMP_LD_W,
+	BPF_S_ANC_VLAN_TAG,
+	BPF_S_ANC_VLAN_TAG_PRESENT,
 };
 
 #endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/hdlc/Kbuild b/include/linux/hdlc/Kbuild
index 1fb2644..e69de29 100644
--- a/include/linux/hdlc/Kbuild
+++ b/include/linux/hdlc/Kbuild
@@ -1 +0,0 @@
-header-y += ioctl.h
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 2385119..f9c5a78 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -131,6 +131,8 @@
 
 #define IEEE80211_MAX_MESH_ID_LEN	32
 
+#define IEEE80211_NUM_TIDS		16
+
 #define IEEE80211_QOS_CTL_LEN		2
 /* 1d tag mask */
 #define IEEE80211_QOS_CTL_TAG1D_MASK		0x0007
@@ -666,6 +668,21 @@
 } __attribute__ ((packed));
 
 /**
+ * enum mesh_config_capab_flags - Mesh Configuration IE capability field flags
+ *
+ * @IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
+ *	additional mesh peerings with other mesh STAs
+ * @IEEE80211_MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
+ * @IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure
+ *	is ongoing
+ */
+enum mesh_config_capab_flags {
+	IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS		= 0x01,
+	IEEE80211_MESHCONF_CAPAB_FORWARDING		= 0x08,
+	IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING		= 0x20,
+};
+
+/**
  * struct ieee80211_rann_ie
  *
  * This structure refers to "Root Announcement information element"
@@ -905,6 +922,38 @@
 	} u;
 } __packed;
 
+/*
+ * Peer-to-Peer IE attribute related definitions.
+ */
+/**
+ * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute.
+ */
+enum ieee80211_p2p_attr_id {
+	IEEE80211_P2P_ATTR_STATUS = 0,
+	IEEE80211_P2P_ATTR_MINOR_REASON,
+	IEEE80211_P2P_ATTR_CAPABILITY,
+	IEEE80211_P2P_ATTR_DEVICE_ID,
+	IEEE80211_P2P_ATTR_GO_INTENT,
+	IEEE80211_P2P_ATTR_GO_CONFIG_TIMEOUT,
+	IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
+	IEEE80211_P2P_ATTR_GROUP_BSSID,
+	IEEE80211_P2P_ATTR_EXT_LISTEN_TIMING,
+	IEEE80211_P2P_ATTR_INTENDED_IFACE_ADDR,
+	IEEE80211_P2P_ATTR_MANAGABILITY,
+	IEEE80211_P2P_ATTR_CHANNEL_LIST,
+	IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
+	IEEE80211_P2P_ATTR_DEVICE_INFO,
+	IEEE80211_P2P_ATTR_GROUP_INFO,
+	IEEE80211_P2P_ATTR_GROUP_ID,
+	IEEE80211_P2P_ATTR_INTERFACE,
+	IEEE80211_P2P_ATTR_OPER_CHANNEL,
+	IEEE80211_P2P_ATTR_INVITE_FLAGS,
+	/* 19 - 220: Reserved */
+	IEEE80211_P2P_ATTR_VENDOR_SPECIFIC = 221,
+
+	IEEE80211_P2P_ATTR_MAX
+};
+
 /**
  * struct ieee80211_bar - HT Block Ack Request
  *
@@ -1107,20 +1156,6 @@
 #define WLAN_HT_SMPS_CONTROL_STATIC	1
 #define WLAN_HT_SMPS_CONTROL_DYNAMIC	3
 
-#define VHT_MCS_SUPPORTED_SET_SIZE      8
-
-struct ieee80211_vht_capabilities {
-	__le32 vht_capabilities_info;
-	u8 vht_supported_mcs_set[VHT_MCS_SUPPORTED_SET_SIZE];
-} __packed;
-
-struct ieee80211_vht_operation {
-	u8 vht_op_info_chwidth;
-	u8 vht_op_info_chan_center_freq_seg1_idx;
-	u8 vht_op_info_chan_center_freq_seg2_idx;
-	__le16 vht_basic_mcs_set;
-} __packed;
-
 /**
  * struct ieee80211_vht_mcs_info - VHT MCS information
  * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams
@@ -1128,11 +1163,13 @@
  *	STA can receive. Rate expressed in units of 1 Mbps.
  *	If this field is 0 this value should not be used to
  *	consider the highest RX data rate supported.
+ *	The top 3 bits of this field are reserved.
  * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
  * @tx_highest: Indicates highest long GI VHT PPDU data rate
  *	STA can transmit. Rate expressed in units of 1 Mbps.
  *	If this field is 0 this value should not be used to
  *	consider the highest TX data rate supported.
+ *	The top 3 bits of this field are reserved.
  */
 struct ieee80211_vht_mcs_info {
 	__le16 rx_mcs_map;
@@ -1141,6 +1178,58 @@
 	__le16 tx_highest;
 } __packed;
 
+/**
+ * enum ieee80211_vht_mcs_support - VHT MCS support definitions
+ * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ *	number of streams
+ * @IEEE80211_VHT_MCS_SUPPORT_0_8: MCSes 0-8 are supported
+ * @IEEE80211_VHT_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_VHT_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the @rx_mcs_map
+ * and @tx_mcs_map fields of &struct ieee80211_vht_mcs_info, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_vht_mcs_support {
+	IEEE80211_VHT_MCS_SUPPORT_0_7	= 0,
+	IEEE80211_VHT_MCS_SUPPORT_0_8	= 1,
+	IEEE80211_VHT_MCS_SUPPORT_0_9	= 2,
+	IEEE80211_VHT_MCS_NOT_SUPPORTED	= 3,
+};
+
+/**
+ * struct ieee80211_vht_cap - VHT capabilities
+ *
+ * This structure is the "VHT capabilities element" as
+ * described in 802.11ac D3.0 8.4.2.160
+ * @vht_cap_info: VHT capability info
+ * @supp_mcs: VHT MCS supported rates
+ */
+struct ieee80211_vht_cap {
+	__le32 vht_cap_info;
+	struct ieee80211_vht_mcs_info supp_mcs;
+} __packed;
+
+/**
+ * struct ieee80211_vht_operation - VHT operation IE
+ *
+ * This structure is the "VHT operation element" as
+ * described in 802.11ac D3.0 8.4.2.161
+ * @chan_width: Operating channel width
+ * @center_freq_seg1_idx: center freq segment 1 index
+ * @center_freq_seg2_idx: center freq segment 2 index
+ * @basic_mcs_set: VHT Basic MCS rate set
+ */
+struct ieee80211_vht_operation {
+	u8 chan_width;
+	u8 center_freq_seg1_idx;
+	u8 center_freq_seg2_idx;
+	__le16 basic_mcs_set;
+} __packed;
+
+
 #define IEEE80211_VHT_MCS_ZERO_TO_SEVEN_SUPPORT 0
 #define IEEE80211_VHT_MCS_ZERO_TO_EIGHT_SUPPORT 1
 #define IEEE80211_VHT_MCS_ZERO_TO_NINE_SUPPORT  2
@@ -1440,8 +1529,6 @@
 
 	WLAN_EID_RSN = 48,
 	WLAN_EID_MMIE = 76,
-	WLAN_EID_WPA = 221,
-	WLAN_EID_GENERIC = 221,
 	WLAN_EID_VENDOR_SPECIFIC = 221,
 	WLAN_EID_QOS_PARAMETER = 222,
 
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 1cc595a..f4e56ec 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -4,5 +4,22 @@
 #include <linux/ip.h>
 #include <linux/in6.h>
 #include <uapi/linux/if_tunnel.h>
+#include <linux/u64_stats_sync.h>
+
+/*
+ * Locking : hash tables are protected by RCU and RTNL
+ */
+
+#define for_each_ip_tunnel_rcu(pos, start) \
+	for (pos = rcu_dereference(start); pos; pos = rcu_dereference(pos->next))
+
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
+	struct u64_stats_sync	syncp;
+};
 
 #endif /* _IF_TUNNEL_H_ */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index bcba48a..5e11905 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -47,6 +47,7 @@
 	__s32		disable_ipv6;
 	__s32		accept_dad;
 	__s32		force_tllao;
+	__s32           ndisc_notify;
 	void		*sysctl;
 };
 
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 06177ba10..e83512f 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -282,6 +282,25 @@
 	return cmp1.tv64 == cmp2.tv64;
 }
 
+/**
+ * ktime_compare - Compares two ktime_t variables for less, greater or equal
+ * @cmp1:	comparable1
+ * @cmp2:	comparable2
+ *
+ * Returns ...
+ *   cmp1  < cmp2: return <0
+ *   cmp1 == cmp2: return 0
+ *   cmp1  > cmp2: return >0
+ */
+static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
+{
+	if (cmp1.tv64 < cmp2.tv64)
+		return -1;
+	if (cmp1.tv64 > cmp2.tv64)
+		return 1;
+	return 0;
+}
+
 static inline s64 ktime_to_us(const ktime_t kt)
 {
 	struct timeval tv = ktime_to_timeval(kt);
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index de20120..adfe8c0 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -15,6 +15,7 @@
 
 #define MICREL_PHY_ID_MASK	0x00fffff0
 
+#define PHY_ID_KSZ8873MLL	0x000e7237
 #define PHY_ID_KSZ9021		0x00221610
 #define PHY_ID_KS8737		0x00221720
 #define PHY_ID_KSZ8021		0x00221555
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f8eda02..e9929ab 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -887,6 +887,10 @@
  *		       struct net_device *dev, int idx)
  *	Used to add FDB entries to dump requests. Implementers should add
  *	entries to skb and update idx with the number of entries.
+ *
+ * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
+ * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
+ *			     struct net_device *dev)
  */
 struct net_device_ops {
 	int			(*ndo_init)(struct net_device *dev);
@@ -998,6 +1002,12 @@
 						struct netlink_callback *cb,
 						struct net_device *dev,
 						int idx);
+
+	int			(*ndo_bridge_setlink)(struct net_device *dev,
+						      struct nlmsghdr *nlh);
+	int			(*ndo_bridge_getlink)(struct sk_buff *skb,
+						      u32 pid, u32 seq,
+						      struct net_device *dev);
 };
 
 /*
@@ -1499,16 +1509,25 @@
 					 struct net_device *,
 					 struct packet_type *,
 					 struct net_device *);
+	bool			(*id_match)(struct packet_type *ptype,
+					    struct sock *sk);
+	void			*af_packet_priv;
+	struct list_head	list;
+};
+
+struct offload_callbacks {
 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
 						netdev_features_t features);
 	int			(*gso_send_check)(struct sk_buff *skb);
 	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
 					       struct sk_buff *skb);
 	int			(*gro_complete)(struct sk_buff *skb);
-	bool			(*id_match)(struct packet_type *ptype,
-					    struct sock *sk);
-	void			*af_packet_priv;
-	struct list_head	list;
+};
+
+struct packet_offload {
+	__be16			 type;	/* This is really htons(ether_type). */
+	struct offload_callbacks callbacks;
+	struct list_head	 list;
 };
 
 #include <linux/notifier.h>
@@ -1548,6 +1567,8 @@
 
 extern rwlock_t				dev_base_lock;		/* Device list lock */
 
+extern seqlock_t	devnet_rename_seq;	/* Device rename lock */
+
 
 #define for_each_netdev(net, d)		\
 		list_for_each_entry(d, &(net)->dev_base_head, dev_list)
@@ -1605,6 +1626,9 @@
 extern void		dev_add_pack(struct packet_type *pt);
 extern void		dev_remove_pack(struct packet_type *pt);
 extern void		__dev_remove_pack(struct packet_type *pt);
+extern void		dev_add_offload(struct packet_offload *po);
+extern void		dev_remove_offload(struct packet_offload *po);
+extern void		__dev_remove_offload(struct packet_offload *po);
 
 extern struct net_device	*dev_get_by_flags_rcu(struct net *net, unsigned short flags,
 						      unsigned short mask);
diff --git a/include/linux/nfc/pn544.h b/include/linux/nfc/pn544.h
deleted file mode 100644
index 9890bba..0000000
--- a/include/linux/nfc/pn544.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Driver include for the PN544 NFC chip.
- *
- * Copyright (C) Nokia Corporation
- *
- * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
- * Contact: Matti Aaltoenn <matti.j.aaltonen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _PN544_H_
-#define _PN544_H_
-
-#include <linux/i2c.h>
-
-#define PN544_DRIVER_NAME	"pn544"
-#define PN544_MAXWINDOW_SIZE	7
-#define PN544_WINDOW_SIZE	4
-#define PN544_RETRIES		10
-#define PN544_MAX_I2C_TRANSFER	0x0400
-#define PN544_MSG_MAX_SIZE	0x21 /* at normal HCI mode */
-
-/* ioctl */
-#define PN544_CHAR_BASE		'P'
-#define PN544_IOR(num, dtype)	_IOR(PN544_CHAR_BASE, num, dtype)
-#define PN544_IOW(num, dtype)	_IOW(PN544_CHAR_BASE, num, dtype)
-#define PN544_GET_FW_MODE	PN544_IOW(1, unsigned int)
-#define PN544_SET_FW_MODE	PN544_IOW(2, unsigned int)
-#define PN544_GET_DEBUG		PN544_IOW(3, unsigned int)
-#define PN544_SET_DEBUG		PN544_IOW(4, unsigned int)
-
-/* Timing restrictions (ms) */
-#define PN544_RESETVEN_TIME	30 /* 7 */
-#define PN544_PVDDVEN_TIME	0
-#define PN544_VBATVEN_TIME	0
-#define PN544_GPIO4VEN_TIME	0
-#define PN544_WAKEUP_ACK	5
-#define PN544_WAKEUP_GUARD	(PN544_WAKEUP_ACK + 1)
-#define PN544_INACTIVITY_TIME	1000
-#define PN544_INTERFRAME_DELAY	200 /* us */
-#define PN544_BAUDRATE_CHANGE	150 /* us */
-
-/* Debug bits */
-#define PN544_DEBUG_BUF		0x01
-#define PN544_DEBUG_READ	0x02
-#define PN544_DEBUG_WRITE	0x04
-#define PN544_DEBUG_IRQ		0x08
-#define PN544_DEBUG_CALLS	0x10
-#define PN544_DEBUG_MODE	0x20
-
-/* Normal (HCI) mode */
-#define PN544_LLC_HCI_OVERHEAD	3 /* header + crc (to length) */
-#define PN544_LLC_MIN_SIZE	(1 + PN544_LLC_HCI_OVERHEAD) /* length + */
-#define PN544_LLC_MAX_DATA	(PN544_MSG_MAX_SIZE - 2)
-#define PN544_LLC_MAX_HCI_SIZE	(PN544_LLC_MAX_DATA - 2)
-
-struct pn544_llc_packet {
-	unsigned char length; /* of rest of packet */
-	unsigned char header;
-	unsigned char data[PN544_LLC_MAX_DATA]; /* includes crc-ccitt */
-};
-
-/* Firmware upgrade mode */
-#define PN544_FW_HEADER_SIZE	3
-/* max fw transfer is 1024bytes, but I2C limits it to 0xC0 */
-#define PN544_MAX_FW_DATA	(PN544_MAX_I2C_TRANSFER - PN544_FW_HEADER_SIZE)
-
-struct pn544_fw_packet {
-	unsigned char command; /* status in answer */
-	unsigned char length[2]; /* big-endian order (msf) */
-	unsigned char data[PN544_MAX_FW_DATA];
-};
-
-#ifdef __KERNEL__
-enum {
-	NFC_GPIO_ENABLE,
-	NFC_GPIO_FW_RESET,
-	NFC_GPIO_IRQ
-};
-
-/* board config */
-struct pn544_nfc_platform_data {
-	int (*request_resources) (struct i2c_client *client);
-	void (*free_resources) (void);
-	void (*enable) (int fw);
-	int (*test) (void);
-	void (*disable) (void);
-	int (*get_gpio)(int type);
-};
-#endif /* __KERNEL__ */
-
-#endif /* _PN544_H_ */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index ee21795..9cbd670 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -604,6 +604,20 @@
 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
 
 /**
+ * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ * @subvend: the 16 bit PCI Subvendor ID
+ * @subdev: the 16 bit PCI Subdevice ID
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific device with subsystem information.
+ */
+#define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
+	.vendor = (vend), .device = (dev), \
+	.subvendor = (subvend), .subdevice = (subdev)
+
+/**
  * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
  * @dev_class: the class, subclass, prog-if triple for this device
  * @dev_class_mask: the class mask for this device
diff --git a/include/linux/platform_data/cpsw.h b/include/linux/platform_data/cpsw.h
index c4e23d0..24368a2 100644
--- a/include/linux/platform_data/cpsw.h
+++ b/include/linux/platform_data/cpsw.h
@@ -18,9 +18,7 @@
 #include <linux/if_ether.h>
 
 struct cpsw_slave_data {
-	u32		slave_reg_ofs;
-	u32		sliver_reg_ofs;
-	const char	*phy_id;
+	char		phy_id[MII_BUS_ID_SIZE];
 	int		phy_if;
 	u8		mac_addr[ETH_ALEN];
 };
@@ -28,27 +26,14 @@
 struct cpsw_platform_data {
 	u32	ss_reg_ofs;	/* Subsystem control register offset */
 	u32	channels;	/* number of cpdma channels (symmetric) */
-	u32	cpdma_reg_ofs;	/* cpdma register offset */
-	u32	cpdma_sram_ofs;	/* cpdma sram offset */
-
 	u32	slaves;		/* number of slave cpgmac ports */
 	struct cpsw_slave_data	*slave_data;
-
-	u32	ale_reg_ofs;	/* address lookup engine reg offset */
+	u32	cpts_active_slave; /* time stamping slave */
+	u32	cpts_clock_mult;  /* convert input clock ticks to nanoseconds */
+	u32	cpts_clock_shift; /* convert input clock ticks to nanoseconds */
 	u32	ale_entries;	/* ale table size */
-
-	u32	host_port_reg_ofs; /* cpsw cpdma host port registers */
-	u32     host_port_num; /* The port number for the host port */
-
-	u32	hw_stats_reg_ofs;  /* cpsw hardware statistics counters */
-
-	u32	bd_ram_ofs;   /* embedded buffer descriptor RAM offset*/
 	u32	bd_ram_size;  /*buffer descriptor ram size */
-	u32	hw_ram_addr; /*if the HW address for BD RAM is different */
-	bool	no_bd_ram; /* no embedded BD ram*/
-
 	u32	rx_descs;	/* Number of Rx Descriptios */
-
 	u32	mac_control;	/* Mac control register */
 };
 
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
index b081c72..044a124 100644
--- a/include/linux/platform_data/macb.h
+++ b/include/linux/platform_data/macb.h
@@ -12,6 +12,7 @@
 	u32		phy_mask;
 	int		phy_irq_pin;	/* PHY IRQ */
 	u8		is_rmii;	/* using RMII interface? */
+	u8		rev_eth_addr;	/* reverse Ethernet address byte order */
 };
 
 #endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/pn544.h b/include/linux/platform_data/pn544.h
new file mode 100644
index 0000000..713bfd7
--- /dev/null
+++ b/include/linux/platform_data/pn544.h
@@ -0,0 +1,44 @@
+/*
+ * Driver include for the PN544 NFC chip.
+ *
+ * Copyright (C) Nokia Corporation
+ *
+ * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
+ * Contact: Matti Aaltoenn <matti.j.aaltonen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _PN544_H_
+#define _PN544_H_
+
+#include <linux/i2c.h>
+
+enum {
+	NFC_GPIO_ENABLE,
+	NFC_GPIO_FW_RESET,
+	NFC_GPIO_IRQ
+};
+
+/* board config */
+struct pn544_nfc_platform_data {
+	int (*request_resources) (struct i2c_client *client);
+	void (*free_resources) (void);
+	void (*enable) (int fw);
+	int (*test) (void);
+	void (*disable) (void);
+	int (*get_gpio)(int type);
+};
+
+#endif /* _PN544_H_ */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 7002bbf..489dd7bb 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -69,4 +69,7 @@
 			     struct netlink_callback *cb,
 			     struct net_device *dev,
 			     int idx);
+
+extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+				   struct net_device *dev, u16 mode);
 #endif	/* __LINUX_RTNETLINK_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6a2c34e..f2af494 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -235,11 +235,13 @@
 /*
  * The callback notifies userspace to release buffers when skb DMA is done in
  * lower device, the skb last reference should be 0 when calling this.
+ * The zerocopy_success argument is true if zero copy transmit occurred,
+ * false on data copy or out of memory error caused by data copy attempt.
  * The ctx field is used to track device context.
  * The desc field is used to track userspace buffer index.
  */
 struct ubuf_info {
-	void (*callback)(struct ubuf_info *);
+	void (*callback)(struct ubuf_info *, bool zerocopy_success);
 	void *ctx;
 	unsigned long desc;
 };
@@ -566,6 +568,7 @@
 }
 
 extern void kfree_skb(struct sk_buff *skb);
+extern void skb_tx_error(struct sk_buff *skb);
 extern void consume_skb(struct sk_buff *skb);
 extern void	       __kfree_skb(struct sk_buff *skb);
 extern struct kmem_cache *skbuff_head_cache;
@@ -643,7 +646,7 @@
 extern void __skb_get_rxhash(struct sk_buff *skb);
 static inline __u32 skb_get_rxhash(struct sk_buff *skb)
 {
-	if (!skb->rxhash)
+	if (!skb->l4_rxhash)
 		__skb_get_rxhash(skb);
 
 	return skb->rxhash;
diff --git a/include/linux/ssb/ssb_driver_mips.h b/include/linux/ssb/ssb_driver_mips.h
index 5f44e97..07a9c7a 100644
--- a/include/linux/ssb/ssb_driver_mips.h
+++ b/include/linux/ssb/ssb_driver_mips.h
@@ -13,6 +13,12 @@
 	unsigned int reg_shift;
 };
 
+struct ssb_pflash {
+	bool present;
+	u8 buswidth;
+	u32 window;
+	u32 window_size;
+};
 
 struct ssb_mipscore {
 	struct ssb_device *dev;
@@ -20,9 +26,7 @@
 	int nr_serial_ports;
 	struct ssb_serial_port serial_ports[4];
 
-	u8 flash_buswidth;
-	u32 flash_window;
-	u32 flash_window_size;
+	struct ssb_pflash pflash;
 };
 
 extern void ssb_mipscore_init(struct ssb_mipscore *mcore);
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index a052501..6ecfa02 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -485,7 +485,7 @@
 #define  SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT	4
 #define  SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL	0x0020
 #define  SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT	5
-#define SSB_SPROM8_TEMPDELTA		0x00BA
+#define SSB_SPROM8_TEMPDELTA		0x00BC
 #define  SSB_SPROM8_TEMPDELTA_PHYCAL	0x00ff
 #define  SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT	0
 #define  SSB_SPROM8_TEMPDELTA_PERIOD	0x0f00
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index a1547ea..de5b2f8 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -104,6 +104,7 @@
 	int bugged_jumbo;
 	int pmt;
 	int force_sf_dma_mode;
+	int riwt_off;
 	void (*fix_mac_speed)(void *priv, unsigned int speed);
 	void (*bus_setup)(void __iomem *ioaddr);
 	int (*init)(struct platform_device *pdev);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index cd844a6..14a8ff2 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -158,8 +158,7 @@
 	struct ctl_table_set default_set;
 	struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
 					   struct nsproxy *namespaces);
-	int (*permissions)(struct ctl_table_root *root,
-			struct nsproxy *namespaces, struct ctl_table *table);
+	int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
 };
 
 /* struct ctl_path describes where in the hierarchy a table is added */
diff --git a/include/linux/timecompare.h b/include/linux/timecompare.h
deleted file mode 100644
index 546e223..0000000
--- a/include/linux/timecompare.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Utility code which helps transforming between two different time
- * bases, called "source" and "target" time in this code.
- *
- * Source time has to be provided via the timecounter API while target
- * time is accessed via a function callback whose prototype
- * intentionally matches ktime_get() and ktime_get_real(). These
- * interfaces where chosen like this so that the code serves its
- * initial purpose without additional glue code.
- *
- * This purpose is synchronizing a hardware clock in a NIC with system
- * time, in order to implement the Precision Time Protocol (PTP,
- * IEEE1588) with more accurate hardware assisted time stamping.  In
- * that context only synchronization against system time (=
- * ktime_get_real()) is currently needed. But this utility code might
- * become useful in other situations, which is why it was written as
- * general purpose utility code.
- *
- * The source timecounter is assumed to return monotonically
- * increasing time (but this code does its best to compensate if that
- * is not the case) whereas target time may jump.
- *
- * The target time corresponding to a source time is determined by
- * reading target time, reading source time, reading target time
- * again, then assuming that average target time corresponds to source
- * time. In other words, the assumption is that reading the source
- * time is slow and involves equal time for sending the request and
- * receiving the reply, whereas reading target time is assumed to be
- * fast.
- *
- * Copyright (C) 2009 Intel Corporation.
- * Author: Patrick Ohly <patrick.ohly@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- */
-#ifndef _LINUX_TIMECOMPARE_H
-#define _LINUX_TIMECOMPARE_H
-
-#include <linux/clocksource.h>
-#include <linux/ktime.h>
-
-/**
- * struct timecompare - stores state and configuration for the two clocks
- *
- * Initialize to zero, then set source/target/num_samples.
- *
- * Transformation between source time and target time is done with:
- * target_time = source_time + offset +
- *               (source_time - last_update) * skew /
- *               TIMECOMPARE_SKEW_RESOLUTION
- *
- * @source:          used to get source time stamps via timecounter_read()
- * @target:          function returning target time (for example, ktime_get
- *                   for monotonic time, or ktime_get_real for wall clock)
- * @num_samples:     number of times that source time and target time are to
- *                   be compared when determining their offset
- * @offset:          (target time - source time) at the time of the last update
- * @skew:            average (target time - source time) / delta source time *
- *                   TIMECOMPARE_SKEW_RESOLUTION
- * @last_update:     last source time stamp when time offset was measured
- */
-struct timecompare {
-	struct timecounter *source;
-	ktime_t (*target)(void);
-	int num_samples;
-
-	s64 offset;
-	s64 skew;
-	u64 last_update;
-};
-
-/**
- * timecompare_transform - transform source time stamp into target time base
- * @sync:            context for time sync
- * @source_tstamp:   the result of timecounter_read() or
- *                   timecounter_cyc2time()
- */
-extern ktime_t timecompare_transform(struct timecompare *sync,
-				     u64 source_tstamp);
-
-/**
- * timecompare_offset - measure current (target time - source time) offset
- * @sync:            context for time sync
- * @offset:          average offset during sample period returned here
- * @source_tstamp:   average source time during sample period returned here
- *
- * Returns number of samples used. Might be zero (= no result) in the
- * unlikely case that target time was monotonically decreasing for all
- * samples (= broken).
- */
-extern int timecompare_offset(struct timecompare *sync,
-			      s64 *offset,
-			      u64 *source_tstamp);
-
-extern void __timecompare_update(struct timecompare *sync,
-				 u64 source_tstamp);
-
-/**
- * timecompare_update - update offset and skew by measuring current offset
- * @sync:            context for time sync
- * @source_tstamp:   the result of timecounter_read() or
- *                   timecounter_cyc2time(), pass zero to force update
- *
- * Updates are only done at most once per second.
- */
-static inline void timecompare_update(struct timecompare *sync,
-				      u64 source_tstamp)
-{
-	if (!source_tstamp ||
-	    (s64)(source_tstamp - sync->last_update) >= NSEC_PER_SEC)
-		__timecompare_update(sync, source_tstamp);
-}
-
-#endif /* _LINUX_TIMECOMPARE_H */
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
new file mode 100644
index 0000000..3b8f9d4
--- /dev/null
+++ b/include/linux/usb/cdc_ncm.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) ST-Ericsson 2010-2012
+ * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
+ * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
+ *
+ * USB Host Driver for Network Control Model (NCM)
+ * http://www.usb.org/developers/devclass_docs/NCM10.zip
+ *
+ * The NCM encoding, decoding and initialization logic
+ * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose this file to be licensed under the terms
+ * of the GNU General Public License (GPL) Version 2 or the 2-clause
+ * BSD license listed below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define CDC_NCM_COMM_ALTSETTING_NCM		0
+#define CDC_NCM_COMM_ALTSETTING_MBIM		1
+
+#define CDC_NCM_DATA_ALTSETTING_NCM		1
+#define CDC_NCM_DATA_ALTSETTING_MBIM		2
+
+/* CDC NCM subclass 3.2.1 */
+#define USB_CDC_NCM_NDP16_LENGTH_MIN		0x10
+
+/* Maximum NTB length */
+#define	CDC_NCM_NTB_MAX_SIZE_TX			32768	/* bytes */
+#define	CDC_NCM_NTB_MAX_SIZE_RX			32768	/* bytes */
+
+/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
+#define	CDC_NCM_MIN_DATAGRAM_SIZE		1514	/* bytes */
+
+/* Minimum value for MaxDatagramSize, ch. 8.1.3 */
+#define CDC_MBIM_MIN_DATAGRAM_SIZE		2048	/* bytes */
+
+#define	CDC_NCM_MIN_TX_PKT			512	/* bytes */
+
+/* Default value for MaxDatagramSize */
+#define	CDC_NCM_MAX_DATAGRAM_SIZE		8192	/* bytes */
+
+/*
+ * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
+ * the last NULL entry.
+ */
+#define	CDC_NCM_DPT_DATAGRAMS_MAX		40
+
+/* Restart the timer, if amount of datagrams is less than given value */
+#define	CDC_NCM_RESTART_TIMER_DATAGRAM_CNT	3
+#define	CDC_NCM_TIMER_PENDING_CNT		2
+#define CDC_NCM_TIMER_INTERVAL			(400UL * NSEC_PER_USEC)
+
+/* The following macro defines the minimum header space */
+#define	CDC_NCM_MIN_HDR_SIZE \
+	(sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
+	(CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
+
+#define CDC_NCM_NDP_SIZE \
+	(sizeof(struct usb_cdc_ncm_ndp16) +				\
+	      (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
+
+#define cdc_ncm_comm_intf_is_mbim(x)  ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
+				       (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
+#define cdc_ncm_data_intf_is_mbim(x)  ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
+
+struct cdc_ncm_ctx {
+	struct usb_cdc_ncm_ntb_parameters ncm_parm;
+	struct hrtimer tx_timer;
+	struct tasklet_struct bh;
+
+	const struct usb_cdc_ncm_desc *func_desc;
+	const struct usb_cdc_mbim_desc   *mbim_desc;
+	const struct usb_cdc_header_desc *header_desc;
+	const struct usb_cdc_union_desc *union_desc;
+	const struct usb_cdc_ether_desc *ether_desc;
+
+	struct net_device *netdev;
+	struct usb_device *udev;
+	struct usb_host_endpoint *in_ep;
+	struct usb_host_endpoint *out_ep;
+	struct usb_host_endpoint *status_ep;
+	struct usb_interface *intf;
+	struct usb_interface *control;
+	struct usb_interface *data;
+
+	struct sk_buff *tx_curr_skb;
+	struct sk_buff *tx_rem_skb;
+	__le32 tx_rem_sign;
+
+	spinlock_t mtx;
+	atomic_t stop;
+
+	u32 tx_timer_pending;
+	u32 tx_curr_frame_num;
+	u32 rx_speed;
+	u32 tx_speed;
+	u32 rx_max;
+	u32 tx_max;
+	u32 max_datagram_size;
+	u16 tx_max_datagrams;
+	u16 tx_remainder;
+	u16 tx_modulus;
+	u16 tx_ndp_modulus;
+	u16 tx_seq;
+	u16 rx_seq;
+	u16 connected;
+};
+
+extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
+extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
+extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign);
+extern int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
+extern int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index ddbbb7d..9bbeabf 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -163,6 +163,16 @@
 extern void usbnet_disconnect(struct usb_interface *);
 extern void usbnet_device_suggests_idle(struct usbnet *dev);
 
+extern int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+		    u16 value, u16 index, void *data, u16 size);
+extern int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+		    u16 value, u16 index, const void *data, u16 size);
+extern int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
+		    u16 value, u16 index, void *data, u16 size);
+extern int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
+		    u16 value, u16 index, const void *data, u16 size);
+extern int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
+		    u16 value, u16 index, const void *data, u16 size);
 
 /* Drivers that reuse some of the standard USB CDC infrastructure
  * (notably, using multiple interfaces according to the CDC
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index b5f8988..0a996a3 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -53,7 +53,6 @@
 	struct path		path;
 	struct mutex		readlock;
 	struct sock		*peer;
-	struct sock		*other;
 	struct list_head	link;
 	atomic_long_t		inflight;
 	spinlock_t		lock;
diff --git a/include/net/bluetooth/a2mp.h b/include/net/bluetooth/a2mp.h
index 6a76e0a..42f2176 100644
--- a/include/net/bluetooth/a2mp.h
+++ b/include/net/bluetooth/a2mp.h
@@ -19,13 +19,25 @@
 
 #define A2MP_FEAT_EXT	0x8000
 
+enum amp_mgr_state {
+	READ_LOC_AMP_INFO,
+	READ_LOC_AMP_ASSOC,
+	READ_LOC_AMP_ASSOC_FINAL,
+};
+
 struct amp_mgr {
+	struct list_head	list;
 	struct l2cap_conn	*l2cap_conn;
 	struct l2cap_chan	*a2mp_chan;
+	struct l2cap_chan	*bredr_chan;
 	struct kref		kref;
 	__u8			ident;
 	__u8			handle;
+	enum amp_mgr_state	state;
 	unsigned long		flags;
+
+	struct list_head	amp_ctrls;
+	struct mutex		amp_ctrls_lock;
 };
 
 struct a2mp_cmd {
@@ -118,9 +130,19 @@
 #define A2MP_STATUS_PHYS_LINK_EXISTS		0x05
 #define A2MP_STATUS_SECURITY_VIOLATION		0x06
 
-void amp_mgr_get(struct amp_mgr *mgr);
+extern struct list_head amp_mgr_list;
+extern struct mutex amp_mgr_list_lock;
+
+struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
 int amp_mgr_put(struct amp_mgr *mgr);
+u8 __next_ident(struct amp_mgr *mgr);
 struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
 				       struct sk_buff *skb);
+struct amp_mgr *amp_mgr_lookup_by_state(u8 state);
+void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data);
+void a2mp_discover_amp(struct l2cap_chan *chan);
+void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
+void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
+void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
 
 #endif /* __A2MP_H */
diff --git a/include/net/bluetooth/amp.h b/include/net/bluetooth/amp.h
new file mode 100644
index 0000000..7ea3db7
--- /dev/null
+++ b/include/net/bluetooth/amp.h
@@ -0,0 +1,54 @@
+/*
+   Copyright (c) 2011,2012 Intel Corp.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 and
+   only version 2 as published by the Free Software Foundation.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+*/
+
+#ifndef __AMP_H
+#define __AMP_H
+
+struct amp_ctrl {
+	struct list_head	list;
+	struct kref		kref;
+	__u8			id;
+	__u16			assoc_len_so_far;
+	__u16			assoc_rem_len;
+	__u16			assoc_len;
+	__u8			*assoc;
+};
+
+int amp_ctrl_put(struct amp_ctrl *ctrl);
+void amp_ctrl_get(struct amp_ctrl *ctrl);
+struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id);
+struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id);
+void amp_ctrl_list_flush(struct amp_mgr *mgr);
+
+struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
+			     u8 remote_id, bool out);
+
+int phylink_gen_key(struct hci_conn *hcon, u8 *data, u8 *len, u8 *type);
+
+void amp_read_loc_info(struct hci_dev *hdev, struct amp_mgr *mgr);
+void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle);
+void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr);
+void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
+				   struct hci_conn *hcon);
+void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+			struct hci_conn *hcon);
+void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+			struct hci_conn *hcon);
+void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
+void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
+void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
+void amp_create_logical_link(struct l2cap_chan *chan);
+void amp_disconnect_logical_link(struct hci_chan *hchan);
+void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason);
+
+#endif /* __AMP_H */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index ede0369..2554b3f 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -180,7 +180,6 @@
 }
 
 void baswap(bdaddr_t *dst, bdaddr_t *src);
-char *batostr(bdaddr_t *ba);
 
 /* Common socket structures and functions */
 
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 76b2b6b..45eee08 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -33,6 +33,8 @@
 #define HCI_LINK_KEY_SIZE	16
 #define HCI_AMP_LINK_KEY_SIZE	(2 * HCI_LINK_KEY_SIZE)
 
+#define HCI_MAX_AMP_ASSOC_SIZE	672
+
 /* HCI dev events */
 #define HCI_DEV_REG			1
 #define HCI_DEV_UNREG			2
@@ -113,6 +115,7 @@
 	HCI_SSP_ENABLED,
 	HCI_HS_ENABLED,
 	HCI_LE_ENABLED,
+	HCI_LE_PERIPHERAL,
 	HCI_CONNECTABLE,
 	HCI_DISCOVERABLE,
 	HCI_LINK_SECURITY,
@@ -151,7 +154,7 @@
 #define HCI_DISCONN_TIMEOUT	msecs_to_jiffies(2000)	/* 2 seconds */
 #define HCI_PAIRING_TIMEOUT	msecs_to_jiffies(60000)	/* 60 seconds */
 #define HCI_INIT_TIMEOUT	msecs_to_jiffies(10000)	/* 10 seconds */
-#define HCI_CMD_TIMEOUT		msecs_to_jiffies(1000)	/* 1 second */
+#define HCI_CMD_TIMEOUT		msecs_to_jiffies(2000)	/* 2 seconds */
 #define HCI_ACL_TX_TIMEOUT	msecs_to_jiffies(45000)	/* 45 seconds */
 #define HCI_AUTO_OFF_TIMEOUT	msecs_to_jiffies(2000)	/* 2 seconds */
 
@@ -196,6 +199,7 @@
 #define ACL_START_NO_FLUSH	0x00
 #define ACL_CONT		0x01
 #define ACL_START		0x02
+#define ACL_COMPLETE		0x03
 #define ACL_ACTIVE_BCAST	0x04
 #define ACL_PICO_BCAST		0x08
 
@@ -205,6 +209,7 @@
 #define ESCO_LINK	0x02
 /* Low Energy links do not have defined link type. Use invented one */
 #define LE_LINK		0x80
+#define AMP_LINK	0x81
 
 /* LMP features */
 #define LMP_3SLOT	0x01
@@ -314,6 +319,9 @@
 #define HCI_FLOW_CTL_MODE_PACKET_BASED	0x00
 #define HCI_FLOW_CTL_MODE_BLOCK_BASED	0x01
 
+/* The core spec defines 127 as the "not available" value */
+#define HCI_TX_POWER_INVALID	127
+
 /* Extended Inquiry Response field types */
 #define EIR_FLAGS		0x01 /* flags */
 #define EIR_UUID16_SOME		0x02 /* 16-bit UUID, more available */
@@ -330,6 +338,13 @@
 #define EIR_SSP_RAND_R		0x0F /* Simple Pairing Randomizer R */
 #define EIR_DEVICE_ID		0x10 /* device ID */
 
+/* Low Energy Advertising Flags */
+#define LE_AD_LIMITED		0x01 /* Limited Discoverable */
+#define LE_AD_GENERAL		0x02 /* General Discoverable */
+#define LE_AD_NO_BREDR		0x04 /* BR/EDR not supported */
+#define LE_AD_SIM_LE_BREDR_CTRL	0x08 /* Simultaneous LE & BR/EDR Controller */
+#define LE_AD_SIM_LE_BREDR_HOST	0x10 /* Simultaneous LE & BR/EDR Host */
+
 /* -----  HCI Commands ---- */
 #define HCI_OP_NOP			0x0000
 
@@ -556,12 +571,46 @@
 	__u8     key[HCI_AMP_LINK_KEY_SIZE];
 } __packed;
 
-#define HCI_OP_DISCONN_PHY_LINK	0x0437
+#define HCI_OP_DISCONN_PHY_LINK		0x0437
 struct hci_cp_disconn_phy_link {
 	__u8     phy_handle;
 	__u8     reason;
 } __packed;
 
+struct ext_flow_spec {
+	__u8       id;
+	__u8       stype;
+	__le16     msdu;
+	__le32     sdu_itime;
+	__le32     acc_lat;
+	__le32     flush_to;
+} __packed;
+
+#define HCI_OP_CREATE_LOGICAL_LINK	0x0438
+#define HCI_OP_ACCEPT_LOGICAL_LINK	0x0439
+struct hci_cp_create_accept_logical_link {
+	__u8                  phy_handle;
+	struct ext_flow_spec  tx_flow_spec;
+	struct ext_flow_spec  rx_flow_spec;
+} __packed;
+
+#define HCI_OP_DISCONN_LOGICAL_LINK	0x043a
+struct hci_cp_disconn_logical_link {
+	__le16   log_handle;
+} __packed;
+
+#define HCI_OP_LOGICAL_LINK_CANCEL	0x043b
+struct hci_cp_logical_link_cancel {
+	__u8     phy_handle;
+	__u8     flow_spec_id;
+} __packed;
+
+struct hci_rp_logical_link_cancel {
+	__u8     status;
+	__u8     phy_handle;
+	__u8     flow_spec_id;
+} __packed;
+
 #define HCI_OP_SNIFF_MODE		0x0803
 struct hci_cp_sniff_mode {
 	__le16   handle;
@@ -894,6 +943,22 @@
 	__u8     le_max_pkt;
 } __packed;
 
+#define HCI_OP_LE_READ_ADV_TX_POWER	0x2007
+struct hci_rp_le_read_adv_tx_power {
+	__u8	status;
+	__s8	tx_power;
+} __packed;
+
+#define HCI_MAX_AD_LENGTH		31
+
+#define HCI_OP_LE_SET_ADV_DATA		0x2008
+struct hci_cp_le_set_adv_data {
+	__u8	length;
+	__u8	data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define HCI_OP_LE_SET_ADV_ENABLE	0x200a
+
 #define HCI_OP_LE_SET_SCAN_PARAM	0x200b
 struct hci_cp_le_set_scan_param {
 	__u8    type;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index e7d4546..ef5b85d 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -73,6 +73,7 @@
 struct hci_conn_hash {
 	struct list_head list;
 	unsigned int     acl_num;
+	unsigned int     amp_num;
 	unsigned int     sco_num;
 	unsigned int     le_num;
 };
@@ -124,6 +125,14 @@
 
 #define HCI_MAX_SHORT_NAME_LENGTH	10
 
+struct amp_assoc {
+	__u16	len;
+	__u16	offset;
+	__u16	rem_len;
+	__u16	len_so_far;
+	__u8	data[HCI_MAX_AMP_ASSOC_SIZE];
+};
+
 #define NUM_REASSEMBLY 4
 struct hci_dev {
 	struct list_head list;
@@ -177,6 +186,8 @@
 	__u32		amp_max_flush_to;
 	__u32		amp_be_flush_to;
 
+	struct amp_assoc	loc_assoc;
+
 	__u8		flow_ctl_mode;
 
 	unsigned int	auto_accept_delay;
@@ -252,8 +263,6 @@
 
 	struct sk_buff_head	driver_init;
 
-	void			*core_data;
-
 	atomic_t		promisc;
 
 	struct dentry		*debugfs;
@@ -269,6 +278,10 @@
 	struct work_struct	le_scan;
 	struct le_scan_params	le_scan_params;
 
+	__s8			adv_tx_power;
+	__u8			adv_data[HCI_MAX_AD_LENGTH];
+	__u8			adv_data_len;
+
 	int (*open)(struct hci_dev *hdev);
 	int (*close)(struct hci_dev *hdev);
 	int (*flush)(struct hci_dev *hdev);
@@ -277,6 +290,8 @@
 	int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
 };
 
+#define HCI_PHY_HANDLE(handle)	(handle & 0xff)
+
 struct hci_conn {
 	struct list_head list;
 
@@ -310,6 +325,7 @@
 
 	__u8		remote_cap;
 	__u8		remote_auth;
+	__u8		remote_id;
 	bool		flush_key;
 
 	unsigned int	sent;
@@ -339,10 +355,11 @@
 
 struct hci_chan {
 	struct list_head list;
-
+	__u16 handle;
 	struct hci_conn *conn;
 	struct sk_buff_head data_q;
 	unsigned int	sent;
+	__u8		state;
 };
 
 extern struct list_head hci_dev_list;
@@ -438,6 +455,9 @@
 	case ACL_LINK:
 		h->acl_num++;
 		break;
+	case AMP_LINK:
+		h->amp_num++;
+		break;
 	case LE_LINK:
 		h->le_num++;
 		break;
@@ -459,6 +479,9 @@
 	case ACL_LINK:
 		h->acl_num--;
 		break;
+	case AMP_LINK:
+		h->amp_num--;
+		break;
 	case LE_LINK:
 		h->le_num--;
 		break;
@@ -475,6 +498,8 @@
 	switch (type) {
 	case ACL_LINK:
 		return h->acl_num;
+	case AMP_LINK:
+		return h->amp_num;
 	case LE_LINK:
 		return h->le_num;
 	case SCO_LINK:
@@ -556,6 +581,7 @@
 struct hci_chan *hci_chan_create(struct hci_conn *conn);
 void hci_chan_del(struct hci_chan *chan);
 void hci_chan_list_flush(struct hci_conn *conn);
+struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
 
 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
 			     __u8 dst_type, __u8 sec_level, __u8 auth_type);
@@ -584,7 +610,10 @@
 
 	if (atomic_dec_and_test(&conn->refcnt)) {
 		unsigned long timeo;
-		if (conn->type == ACL_LINK || conn->type == LE_LINK) {
+
+		switch (conn->type) {
+		case ACL_LINK:
+		case LE_LINK:
 			del_timer(&conn->idle_timer);
 			if (conn->state == BT_CONNECTED) {
 				timeo = conn->disc_timeout;
@@ -593,12 +622,20 @@
 			} else {
 				timeo = msecs_to_jiffies(10);
 			}
-		} else {
+			break;
+
+		case AMP_LINK:
+			timeo = conn->disc_timeout;
+			break;
+
+		default:
 			timeo = msecs_to_jiffies(10);
+			break;
 		}
+
 		cancel_delayed_work(&conn->disc_work);
 		queue_delayed_work(conn->hdev->workqueue,
-					&conn->disc_work, timeo);
+				   &conn->disc_work, timeo);
 	}
 }
 
@@ -650,7 +687,7 @@
 }
 
 struct hci_dev *hci_dev_get(int index);
-struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
+struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src);
 
 struct hci_dev *hci_alloc_dev(void);
 void hci_free_dev(struct hci_dev *hdev);
@@ -699,6 +736,8 @@
 								u8 *randomizer);
 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
 
+int hci_update_ad(struct hci_dev *hdev);
+
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
 
 int hci_recv_frame(struct sk_buff *skb);
@@ -715,18 +754,29 @@
 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
 
 /* ----- LMP capabilities ----- */
-#define lmp_rswitch_capable(dev)   ((dev)->features[0] & LMP_RSWITCH)
 #define lmp_encrypt_capable(dev)   ((dev)->features[0] & LMP_ENCRYPT)
+#define lmp_rswitch_capable(dev)   ((dev)->features[0] & LMP_RSWITCH)
+#define lmp_hold_capable(dev)      ((dev)->features[0] & LMP_HOLD)
 #define lmp_sniff_capable(dev)     ((dev)->features[0] & LMP_SNIFF)
-#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
+#define lmp_park_capable(dev)      ((dev)->features[1] & LMP_PARK)
+#define lmp_inq_rssi_capable(dev)  ((dev)->features[3] & LMP_RSSI_INQ)
 #define lmp_esco_capable(dev)      ((dev)->features[3] & LMP_ESCO)
+#define lmp_bredr_capable(dev)     (!((dev)->features[4] & LMP_NO_BREDR))
+#define lmp_le_capable(dev)        ((dev)->features[4] & LMP_LE)
+#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
+#define lmp_pause_enc_capable(dev) ((dev)->features[5] & LMP_PAUSE_ENC)
+#define lmp_ext_inq_capable(dev)   ((dev)->features[6] & LMP_EXT_INQ)
+#define lmp_le_br_capable(dev)     ((dev)->features[6] & LMP_SIMUL_LE_BR)
 #define lmp_ssp_capable(dev)       ((dev)->features[6] & LMP_SIMPLE_PAIR)
 #define lmp_no_flush_capable(dev)  ((dev)->features[6] & LMP_NO_FLUSH)
-#define lmp_le_capable(dev)        ((dev)->features[4] & LMP_LE)
-#define lmp_bredr_capable(dev)     (!((dev)->features[4] & LMP_NO_BREDR))
+#define lmp_lsto_capable(dev)      ((dev)->features[7] & LMP_LSTO)
+#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[7] & LMP_INQ_TX_PWR)
+#define lmp_ext_feat_capable(dev)  ((dev)->features[7] & LMP_EXTFEATURES)
 
 /* ----- Extended LMP capabilities ----- */
+#define lmp_host_ssp_capable(dev)  ((dev)->host_features[0] & LMP_HOST_SSP)
 #define lmp_host_le_capable(dev)   ((dev)->host_features[0] & LMP_HOST_LE)
+#define lmp_host_le_br_capable(dev) ((dev)->host_features[0] & LMP_HOST_LE_BREDR)
 
 /* ----- HCI protocols ----- */
 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -789,6 +839,10 @@
 		sco_disconn_cfm(conn, reason);
 		break;
 
+	/* L2CAP would be handled for BREDR chan */
+	case AMP_LINK:
+		break;
+
 	default:
 		BT_ERR("unknown link type %d", conn->type);
 		break;
@@ -841,7 +895,7 @@
 
 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
 {
-	struct list_head *p;
+	struct hci_cb *cb;
 	__u8 encrypt;
 
 	hci_proto_auth_cfm(conn, status);
@@ -852,8 +906,7 @@
 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
 
 	read_lock(&hci_cb_list_lock);
-	list_for_each(p, &hci_cb_list) {
-		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+	list_for_each_entry(cb, &hci_cb_list, list) {
 		if (cb->security_cfm)
 			cb->security_cfm(conn, status, encrypt);
 	}
@@ -863,7 +916,7 @@
 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
 								__u8 encrypt)
 {
-	struct list_head *p;
+	struct hci_cb *cb;
 
 	if (conn->sec_level == BT_SECURITY_SDP)
 		conn->sec_level = BT_SECURITY_LOW;
@@ -874,8 +927,7 @@
 	hci_proto_encrypt_cfm(conn, status, encrypt);
 
 	read_lock(&hci_cb_list_lock);
-	list_for_each(p, &hci_cb_list) {
-		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+	list_for_each_entry(cb, &hci_cb_list, list) {
 		if (cb->security_cfm)
 			cb->security_cfm(conn, status, encrypt);
 	}
@@ -884,11 +936,10 @@
 
 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
 {
-	struct list_head *p;
+	struct hci_cb *cb;
 
 	read_lock(&hci_cb_list_lock);
-	list_for_each(p, &hci_cb_list) {
-		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+	list_for_each_entry(cb, &hci_cb_list, list) {
 		if (cb->key_change_cfm)
 			cb->key_change_cfm(conn, status);
 	}
@@ -898,11 +949,10 @@
 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
 								__u8 role)
 {
-	struct list_head *p;
+	struct hci_cb *cb;
 
 	read_lock(&hci_cb_list_lock);
-	list_for_each(p, &hci_cb_list) {
-		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+	list_for_each_entry(cb, &hci_cb_list, list) {
 		if (cb->role_switch_cfm)
 			cb->role_switch_cfm(conn, status, role);
 	}
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 7ed8e35..f57fab0 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -32,13 +32,14 @@
 /* L2CAP defaults */
 #define L2CAP_DEFAULT_MTU		672
 #define L2CAP_DEFAULT_MIN_MTU		48
-#define L2CAP_DEFAULT_FLUSH_TO		0xffff
+#define L2CAP_DEFAULT_FLUSH_TO		0xFFFF
+#define L2CAP_EFS_DEFAULT_FLUSH_TO	0xFFFFFFFF
 #define L2CAP_DEFAULT_TX_WINDOW		63
 #define L2CAP_DEFAULT_EXT_WINDOW	0x3FFF
 #define L2CAP_DEFAULT_MAX_TX		3
 #define L2CAP_DEFAULT_RETRANS_TO	2000    /* 2 seconds */
 #define L2CAP_DEFAULT_MONITOR_TO	12000   /* 12 seconds */
-#define L2CAP_DEFAULT_MAX_PDU_SIZE	1009    /* Sized for 3-DH5 packet */
+#define L2CAP_DEFAULT_MAX_PDU_SIZE	1492    /* Sized for AMP packet */
 #define L2CAP_DEFAULT_ACK_TO		200
 #define L2CAP_DEFAULT_MAX_SDU_SIZE	0xFFFF
 #define L2CAP_DEFAULT_SDU_ITIME		0xFFFFFFFF
@@ -51,6 +52,8 @@
 #define L2CAP_ENC_TIMEOUT		msecs_to_jiffies(5000)
 #define L2CAP_CONN_TIMEOUT		msecs_to_jiffies(40000)
 #define L2CAP_INFO_TIMEOUT		msecs_to_jiffies(4000)
+#define L2CAP_MOVE_TIMEOUT		msecs_to_jiffies(4000)
+#define L2CAP_MOVE_ERTX_TIMEOUT		msecs_to_jiffies(60000)
 
 #define L2CAP_A2MP_DEFAULT_MTU		670
 
@@ -433,6 +436,8 @@
 	struct sock *sk;
 
 	struct l2cap_conn	*conn;
+	struct hci_conn		*hs_hcon;
+	struct hci_chan		*hs_hchan;
 	struct kref	kref;
 
 	__u8		state;
@@ -476,6 +481,12 @@
 	unsigned long	conn_state;
 	unsigned long	flags;
 
+	__u8		remote_amp_id;
+	__u8		local_amp_id;
+	__u8		move_id;
+	__u8		move_state;
+	__u8		move_role;
+
 	__u16		next_tx_seq;
 	__u16		expected_ack_seq;
 	__u16		expected_tx_seq;
@@ -538,6 +549,7 @@
 	void			(*state_change) (struct l2cap_chan *chan,
 						 int state);
 	void			(*ready) (struct l2cap_chan *chan);
+	void			(*defer) (struct l2cap_chan *chan);
 	struct sk_buff		*(*alloc_skb) (struct l2cap_chan *chan,
 					       unsigned long len, int nb);
 };
@@ -640,6 +652,9 @@
 enum {
 	L2CAP_RX_STATE_RECV,
 	L2CAP_RX_STATE_SREJ_SENT,
+	L2CAP_RX_STATE_MOVE,
+	L2CAP_RX_STATE_WAIT_P,
+	L2CAP_RX_STATE_WAIT_F,
 };
 
 enum {
@@ -670,6 +685,25 @@
 	L2CAP_EV_RECV_FRAME,
 };
 
+enum {
+	L2CAP_MOVE_ROLE_NONE,
+	L2CAP_MOVE_ROLE_INITIATOR,
+	L2CAP_MOVE_ROLE_RESPONDER,
+};
+
+enum {
+	L2CAP_MOVE_STABLE,
+	L2CAP_MOVE_WAIT_REQ,
+	L2CAP_MOVE_WAIT_RSP,
+	L2CAP_MOVE_WAIT_RSP_SUCCESS,
+	L2CAP_MOVE_WAIT_CONFIRM,
+	L2CAP_MOVE_WAIT_CONFIRM_RSP,
+	L2CAP_MOVE_WAIT_LOGICAL_COMP,
+	L2CAP_MOVE_WAIT_LOGICAL_CFM,
+	L2CAP_MOVE_WAIT_LOCAL_BUSY,
+	L2CAP_MOVE_WAIT_PREPARE,
+};
+
 void l2cap_chan_hold(struct l2cap_chan *c);
 void l2cap_chan_put(struct l2cap_chan *c);
 
@@ -745,6 +779,10 @@
 {
 }
 
+static inline void l2cap_chan_no_defer(struct l2cap_chan *chan)
+{
+}
+
 extern bool disable_ertm;
 
 int l2cap_init_sockets(void);
@@ -767,6 +805,12 @@
 void l2cap_chan_set_defaults(struct l2cap_chan *chan);
 int l2cap_ertm_init(struct l2cap_chan *chan);
 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
+void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
 void l2cap_chan_del(struct l2cap_chan *chan, int err);
+void l2cap_send_conn_req(struct l2cap_chan *chan);
+void l2cap_move_start(struct l2cap_chan *chan);
+void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
+		       u8 status);
+void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
 
 #endif /* __L2CAP_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 7d5b600..e78db2c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -306,6 +306,88 @@
 };
 
 /**
+ * struct cfg80211_chan_def - channel definition
+ * @chan: the (control) channel
+ * @width: channel width
+ * @center_freq1: center frequency of first segment
+ * @center_freq2: center frequency of second segment
+ *	(only with 80+80 MHz)
+ */
+struct cfg80211_chan_def {
+	struct ieee80211_channel *chan;
+	enum nl80211_chan_width width;
+	u32 center_freq1;
+	u32 center_freq2;
+};
+
+/**
+ * cfg80211_get_chandef_type - return old channel type from chandef
+ * @chandef: the channel definition
+ *
+ * Returns the old channel type (NOHT, HT20, HT40+/-) from a given
+ * chandef, which must have a bandwidth allowing this conversion.
+ */
+static inline enum nl80211_channel_type
+cfg80211_get_chandef_type(const struct cfg80211_chan_def *chandef)
+{
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_20_NOHT:
+		return NL80211_CHAN_NO_HT;
+	case NL80211_CHAN_WIDTH_20:
+		return NL80211_CHAN_HT20;
+	case NL80211_CHAN_WIDTH_40:
+		if (chandef->center_freq1 > chandef->chan->center_freq)
+			return NL80211_CHAN_HT40PLUS;
+		return NL80211_CHAN_HT40MINUS;
+	default:
+		WARN_ON(1);
+		return NL80211_CHAN_NO_HT;
+	}
+}
+
+/**
+ * cfg80211_chandef_create - create channel definition using channel type
+ * @chandef: the channel definition struct to fill
+ * @channel: the control channel
+ * @chantype: the channel type
+ *
+ * Given a channel type, create a channel definition.
+ */
+void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
+			     struct ieee80211_channel *channel,
+			     enum nl80211_channel_type chantype);
+
+/**
+ * cfg80211_chandef_identical - check if two channel definitions are identical
+ * @chandef1: first channel definition
+ * @chandef2: second channel definition
+ *
+ * Returns %true if the channels defined by the channel definitions are
+ * identical, %false otherwise.
+ */
+static inline bool
+cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
+			   const struct cfg80211_chan_def *chandef2)
+{
+	return (chandef1->chan == chandef2->chan &&
+		chandef1->width == chandef2->width &&
+		chandef1->center_freq1 == chandef2->center_freq1 &&
+		chandef1->center_freq2 == chandef2->center_freq2);
+}
+
+/**
+ * cfg80211_chandef_compatible - check if two channel definitions are compatible
+ * @chandef1: first channel definition
+ * @chandef2: second channel definition
+ *
+ * Returns %NULL if the given channel definitions are incompatible,
+ * chandef1 or chandef2 otherwise.
+ */
+const struct cfg80211_chan_def *
+cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1,
+			    const struct cfg80211_chan_def *chandef2);
+
+/**
  * enum survey_info_flags - survey information flags
  *
  * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
@@ -426,8 +508,7 @@
  *
  * Used to configure an AP interface.
  *
- * @channel: the channel to start the AP on
- * @channel_type: the channel type to use
+ * @chandef: defines the channel to use
  * @beacon: beacon data
  * @beacon_interval: beacon interval
  * @dtim_period: DTIM period
@@ -441,8 +522,7 @@
  * @inactivity_timeout: time in seconds to determine station's inactivity.
  */
 struct cfg80211_ap_settings {
-	struct ieee80211_channel *channel;
-	enum nl80211_channel_type channel_type;
+	struct cfg80211_chan_def chandef;
 
 	struct cfg80211_beacon_data beacon;
 
@@ -498,6 +578,7 @@
  * @plink_action: plink action to take
  * @plink_state: set the peer link state for a station
  * @ht_capa: HT capabilities of station
+ * @vht_capa: VHT capabilities of station
  * @uapsd_queues: bitmap of queues configured for uapsd. same format
  *	as the AC bitmap in the QoS info field
  * @max_sp: max Service Period. same format as the MAX_SP in the
@@ -517,6 +598,7 @@
 	u8 plink_action;
 	u8 plink_state;
 	struct ieee80211_ht_cap *ht_capa;
+	struct ieee80211_vht_cap *vht_capa;
 	u8 uapsd_queues;
 	u8 max_sp;
 };
@@ -580,16 +662,24 @@
  * Used by the driver to indicate the specific rate transmission
  * type for 802.11n transmissions.
  *
- * @RATE_INFO_FLAGS_MCS: @tx_bitrate_mcs filled
- * @RATE_INFO_FLAGS_40_MHZ_WIDTH: 40 Mhz width transmission
+ * @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS
+ * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
+ * @RATE_INFO_FLAGS_40_MHZ_WIDTH: 40 MHz width transmission
+ * @RATE_INFO_FLAGS_80_MHZ_WIDTH: 80 MHz width transmission
+ * @RATE_INFO_FLAGS_80P80_MHZ_WIDTH: 80+80 MHz width transmission
+ * @RATE_INFO_FLAGS_160_MHZ_WIDTH: 160 MHz width transmission
  * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
- * @RATE_INFO_FLAGS_60G: 60gHz MCS
+ * @RATE_INFO_FLAGS_60G: 60GHz MCS
  */
 enum rate_info_flags {
-	RATE_INFO_FLAGS_MCS		= 1<<0,
-	RATE_INFO_FLAGS_40_MHZ_WIDTH	= 1<<1,
-	RATE_INFO_FLAGS_SHORT_GI	= 1<<2,
-	RATE_INFO_FLAGS_60G		= 1<<3,
+	RATE_INFO_FLAGS_MCS			= BIT(0),
+	RATE_INFO_FLAGS_VHT_MCS			= BIT(1),
+	RATE_INFO_FLAGS_40_MHZ_WIDTH		= BIT(2),
+	RATE_INFO_FLAGS_80_MHZ_WIDTH		= BIT(3),
+	RATE_INFO_FLAGS_80P80_MHZ_WIDTH		= BIT(4),
+	RATE_INFO_FLAGS_160_MHZ_WIDTH		= BIT(5),
+	RATE_INFO_FLAGS_SHORT_GI		= BIT(6),
+	RATE_INFO_FLAGS_60G			= BIT(7),
 };
 
 /**
@@ -600,11 +690,13 @@
  * @flags: bitflag of flags from &enum rate_info_flags
  * @mcs: mcs index if struct describes a 802.11n bitrate
  * @legacy: bitrate in 100kbit/s for 802.11abg
+ * @nss: number of streams (VHT only)
  */
 struct rate_info {
 	u8 flags;
 	u8 mcs;
 	u16 legacy;
+	u8 nss;
 };
 
 /**
@@ -907,8 +999,7 @@
 
 /**
  * struct mesh_setup - 802.11s mesh setup configuration
- * @channel: the channel to start the mesh network on
- * @channel_type: the channel type to use
+ * @chandef: defines the channel to use
  * @mesh_id: the mesh ID
  * @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes
  * @sync_method: which synchronization method to use
@@ -923,8 +1014,7 @@
  * These parameters are fixed when the mesh is created.
  */
 struct mesh_setup {
-	struct ieee80211_channel *channel;
-	enum nl80211_channel_type channel_type;
+	struct cfg80211_chan_def chandef;
 	const u8 *mesh_id;
 	u8 mesh_id_len;
 	u8 sync_method;
@@ -1000,8 +1090,10 @@
  * @n_channels: total number of channels to scan
  * @ie: optional information element(s) to add into Probe Request or %NULL
  * @ie_len: length of ie in octets
+ * @flags: bit field of flags controlling operation
  * @rates: bitmap of rates to advertise for each band
  * @wiphy: the wiphy this was for
+ * @scan_start: time (in jiffies) when the scan started
  * @wdev: the wireless device to scan for
  * @aborted: (internal) scan request was notified as aborted
  * @no_cck: used to send probe requests at non CCK rate in 2GHz band
@@ -1012,6 +1104,7 @@
 	u32 n_channels;
 	const u8 *ie;
 	size_t ie_len;
+	u32 flags;
 
 	u32 rates[IEEE80211_NUM_BANDS];
 
@@ -1019,6 +1112,7 @@
 
 	/* internal */
 	struct wiphy *wiphy;
+	unsigned long scan_start;
 	bool aborted;
 	bool no_cck;
 
@@ -1044,6 +1138,7 @@
  * @interval: interval between each scheduled scan cycle
  * @ie: optional information element(s) to add into Probe Request or %NULL
  * @ie_len: length of ie in octets
+ * @flags: bit field of flags controlling operation
  * @match_sets: sets of parameters to be matched for a scan result
  * 	entry to be considered valid and to be passed to the host
  * 	(others are filtered out).
@@ -1061,6 +1156,7 @@
 	u32 interval;
 	const u8 *ie;
 	size_t ie_len;
+	u32 flags;
 	struct cfg80211_match_set *match_sets;
 	int n_match_sets;
 	s32 rssi_thold;
@@ -1068,6 +1164,7 @@
 	/* internal */
 	struct wiphy *wiphy;
 	struct net_device *dev;
+	unsigned long scan_start;
 
 	/* keep last */
 	struct ieee80211_channel *channels[0];
@@ -1152,6 +1249,9 @@
  * @key_len: length of WEP key for shared key authentication
  * @key_idx: index of WEP key for shared key authentication
  * @key: WEP key for shared key authentication
+ * @sae_data: Non-IE data to use with SAE or %NULL. This starts with
+ *	Authentication transaction sequence number field.
+ * @sae_data_len: Length of sae_data buffer in octets
  */
 struct cfg80211_auth_request {
 	struct cfg80211_bss *bss;
@@ -1160,6 +1260,8 @@
 	enum nl80211_auth_type auth_type;
 	const u8 *key;
 	u8 key_len, key_idx;
+	const u8 *sae_data;
+	size_t sae_data_len;
 };
 
 /**
@@ -1252,8 +1354,7 @@
  * @ssid_len: The length of the SSID, will always be non-zero.
  * @bssid: Fixed BSSID requested, maybe be %NULL, if set do not
  *	search for IBSSs with a different BSSID.
- * @channel: The channel to use if no IBSS can be found to join.
- * @channel_type: channel type (HT mode)
+ * @chandef: defines the channel to use if no other IBSS to join can be found
  * @channel_fixed: The channel should be fixed -- do not search for
  *	IBSSs to join on other channels.
  * @ie: information element(s) to include in the beacon
@@ -1271,8 +1372,7 @@
 struct cfg80211_ibss_params {
 	u8 *ssid;
 	u8 *bssid;
-	struct ieee80211_channel *channel;
-	enum nl80211_channel_type channel_type;
+	struct cfg80211_chan_def chandef;
 	u8 *ie;
 	u8 ssid_len, ie_len;
 	u16 beacon_interval;
@@ -1531,13 +1631,19 @@
  *	to a merge.
  * @leave_ibss: Leave the IBSS.
  *
+ * @set_mcast_rate: Set the specified multicast rate (only if vif is in ADHOC or
+ *	MESH mode)
+ *
  * @set_wiphy_params: Notify that wiphy parameters have changed;
  *	@changed bitfield (see &enum wiphy_params_flags) describes which values
  *	have changed. The actual parameter values are available in
  *	struct wiphy. If returning an error, no value should be changed.
  *
  * @set_tx_power: set the transmit power according to the parameters,
- *	the power passed is in mBm, to get dBm use MBM_TO_DBM().
+ *	the power passed is in mBm, to get dBm use MBM_TO_DBM(). The
+ *	wdev may be %NULL if power was set for the wiphy, and will
+ *	always be %NULL unless the driver supports per-vif TX power
+ *	(as advertised by the nl80211 feature flag.)
  * @get_tx_power: store the current TX power into the dbm variable;
  *	return 0 if successful
  *
@@ -1708,8 +1814,7 @@
 					     struct ieee80211_channel *chan);
 
 	int	(*set_monitor_channel)(struct wiphy *wiphy,
-				       struct ieee80211_channel *chan,
-				       enum nl80211_channel_type channel_type);
+				       struct cfg80211_chan_def *chandef);
 
 	int	(*scan)(struct wiphy *wiphy,
 			struct cfg80211_scan_request *request);
@@ -1732,11 +1837,15 @@
 			     struct cfg80211_ibss_params *params);
 	int	(*leave_ibss)(struct wiphy *wiphy, struct net_device *dev);
 
+	int	(*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev,
+				  int rate[IEEE80211_NUM_BANDS]);
+
 	int	(*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
 
-	int	(*set_tx_power)(struct wiphy *wiphy,
+	int	(*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
 				enum nl80211_tx_power_setting type, int mbm);
-	int	(*get_tx_power)(struct wiphy *wiphy, int *dbm);
+	int	(*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
+				int *dbm);
 
 	int	(*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev,
 				const u8 *addr);
@@ -1767,7 +1876,6 @@
 	int	(*remain_on_channel)(struct wiphy *wiphy,
 				     struct wireless_dev *wdev,
 				     struct ieee80211_channel *chan,
-				     enum nl80211_channel_type channel_type,
 				     unsigned int duration,
 				     u64 *cookie);
 	int	(*cancel_remain_on_channel)(struct wiphy *wiphy,
@@ -1776,10 +1884,8 @@
 
 	int	(*mgmt_tx)(struct wiphy *wiphy, struct wireless_dev *wdev,
 			  struct ieee80211_channel *chan, bool offchan,
-			  enum nl80211_channel_type channel_type,
-			  bool channel_type_valid, unsigned int wait,
-			  const u8 *buf, size_t len, bool no_cck,
-			  bool dont_wait_for_ack, u64 *cookie);
+			  unsigned int wait, const u8 *buf, size_t len,
+			  bool no_cck, bool dont_wait_for_ack, u64 *cookie);
 	int	(*mgmt_tx_cancel_wait)(struct wiphy *wiphy,
 				       struct wireless_dev *wdev,
 				       u64 cookie);
@@ -1834,10 +1940,9 @@
 	void	(*get_et_strings)(struct wiphy *wiphy, struct net_device *dev,
 				  u32 sset, u8 *data);
 
-	struct ieee80211_channel *
-		(*get_channel)(struct wiphy *wiphy,
+	int	(*get_channel)(struct wiphy *wiphy,
 			       struct wireless_dev *wdev,
-			       enum nl80211_channel_type *type);
+			       struct cfg80211_chan_def *chandef);
 
 	int	(*start_p2p_device)(struct wiphy *wiphy,
 				    struct wireless_dev *wdev);
@@ -2445,8 +2550,7 @@
 	spinlock_t event_lock;
 
 	struct cfg80211_internal_bss *current_bss; /* associated / joined */
-	struct ieee80211_channel *preset_chan;
-	enum nl80211_channel_type preset_chantype;
+	struct cfg80211_chan_def preset_chandef;
 
 	/* for AP and mesh channel tracking */
 	struct ieee80211_channel *channel;
@@ -3326,14 +3430,12 @@
  * @wdev: wireless device
  * @cookie: the request cookie
  * @chan: The current channel (from remain_on_channel request)
- * @channel_type: Channel type
  * @duration: Duration in milliseconds that the driver intents to remain on the
  *	channel
  * @gfp: allocation flags
  */
 void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
 			       struct ieee80211_channel *chan,
-			       enum nl80211_channel_type channel_type,
 			       unsigned int duration, gfp_t gfp);
 
 /**
@@ -3341,12 +3443,10 @@
  * @wdev: wireless device
  * @cookie: the request cookie
  * @chan: The current channel (from remain_on_channel request)
- * @channel_type: Channel type
  * @gfp: allocation flags
  */
 void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
 					struct ieee80211_channel *chan,
-					enum nl80211_channel_type channel_type,
 					gfp_t gfp);
 
 
@@ -3536,7 +3636,6 @@
  * @len: length of the frame
  * @freq: frequency the frame was received on
  * @sig_dbm: signal strength in mBm, or 0 if unknown
- * @gfp: allocation flags
  *
  * Use this function to report to userspace when a beacon was
  * received. It is not useful to call this when there is no
@@ -3544,31 +3643,47 @@
  */
 void cfg80211_report_obss_beacon(struct wiphy *wiphy,
 				 const u8 *frame, size_t len,
-				 int freq, int sig_dbm, gfp_t gfp);
+				 int freq, int sig_dbm);
 
 /**
- * cfg80211_can_beacon_sec_chan - test if ht40 on extension channel can be used
+ * cfg80211_reg_can_beacon - check if beaconing is allowed
  * @wiphy: the wiphy
- * @chan: main channel
- * @channel_type: HT mode
+ * @chandef: the channel definition
  *
  * This function returns true if there is no secondary channel or the secondary
- * channel can be used for beaconing (i.e. is not a radar channel etc.)
+ * channel(s) can be used for beaconing (i.e. is not a radar channel etc.)
  */
-bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
-				  struct ieee80211_channel *chan,
-				  enum nl80211_channel_type channel_type);
+bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
+			     struct cfg80211_chan_def *chandef);
 
 /*
  * cfg80211_ch_switch_notify - update wdev channel and notify userspace
  * @dev: the device which switched channels
- * @freq: new channel frequency (in MHz)
- * @type: channel type
+ * @chandef: the new channel definition
  *
  * Acquires wdev_lock, so must only be called from sleepable driver context!
  */
-void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
-			       enum nl80211_channel_type type);
+void cfg80211_ch_switch_notify(struct net_device *dev,
+			       struct cfg80211_chan_def *chandef);
+
+/*
+ * cfg80211_tdls_oper_request - request userspace to perform TDLS operation
+ * @dev: the device on which the operation is requested
+ * @peer: the MAC address of the peer device
+ * @oper: the requested TDLS operation (NL80211_TDLS_SETUP or
+ *	NL80211_TDLS_TEARDOWN)
+ * @reason_code: the reason code for teardown request
+ * @gfp: allocation flags
+ *
+ * This function is used to request userspace to perform TDLS operation that
+ * requires knowledge of keys, i.e., link setup or teardown when the AP
+ * connection uses encryption. This is optional mechanism for the driver to use
+ * if it can automatically determine when a TDLS link could be useful (e.g.,
+ * based on traffic and signal strength for a peer).
+ */
+void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
+				enum nl80211_tdls_operation oper,
+				u16 reason_code, gfp_t gfp);
 
 /*
  * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units)
@@ -3594,6 +3709,26 @@
  */
 void cfg80211_unregister_wdev(struct wireless_dev *wdev);
 
+/**
+ * cfg80211_get_p2p_attr - find and copy a P2P attribute from IE buffer
+ * @ies: the input IE buffer
+ * @len: the input length
+ * @attr: the attribute ID to find
+ * @buf: output buffer, can be %NULL if the data isn't needed, e.g.
+ *	if the function is only called to get the needed buffer size
+ * @bufsize: size of the output buffer
+ *
+ * The function finds a given P2P attribute in the (vendor) IEs and
+ * copies its contents to the given buffer.
+ *
+ * The return value is a negative error code (-%EILSEQ or -%ENOENT) if
+ * the data is malformed or the attribute can't be found (respectively),
+ * or the length of the found attribute (which can be zero).
+ */
+int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
+			  enum ieee80211_p2p_attr_id attr,
+			  u8 *buf, unsigned int bufsize);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index b6a6eeb..2581638 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -24,12 +24,12 @@
 	u32 classid;
 };
 
-extern void sock_update_classid(struct sock *sk);
+extern void sock_update_classid(struct sock *sk, struct task_struct *task);
 
 #if IS_BUILTIN(CONFIG_NET_CLS_CGROUP)
 static inline u32 task_cls_classid(struct task_struct *p)
 {
-	int classid;
+	u32 classid;
 
 	if (in_interrupt())
 		return 0;
@@ -61,7 +61,7 @@
 }
 #endif
 #else /* !CGROUP_NET_CLS_CGROUP */
-static inline void sock_update_classid(struct sock *sk)
+static inline void sock_update_classid(struct sock *sk, struct task_struct *task)
 {
 }
 
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index bc1b0fd..652d3d3 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -31,6 +31,8 @@
 #include <net/ip.h>
 #include <asm/checksum.h>
 #include <linux/in6.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
 
 #ifndef _HAVE_ARCH_IPV6_CSUM
 
@@ -91,4 +93,37 @@
 }
 
 #endif
+
+static __inline__ __sum16 tcp_v6_check(int len,
+				   const struct in6_addr *saddr,
+				   const struct in6_addr *daddr,
+				   __wsum base)
+{
+	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
+}
+
+static inline void __tcp_v6_send_check(struct sk_buff *skb,
+				       const struct in6_addr *saddr,
+				       const struct in6_addr *daddr)
+{
+	struct tcphdr *th = tcp_hdr(skb);
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
+		skb->csum_start = skb_transport_header(skb) - skb->head;
+		skb->csum_offset = offsetof(struct tcphdr, check);
+	} else {
+		th->check = tcp_v6_check(skb->len, saddr, daddr,
+					 csum_partial(th, th->doff << 2,
+						      skb->csum));
+	}
+}
+
+static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
+{
+	struct ipv6_pinfo *np = inet6_sk(sk);
+
+	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
+}
+
 #endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 8a2a203..fdc48a9 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -47,6 +47,8 @@
 	unsigned long	fc_expires;
 	struct nlattr	*fc_mx;
 	int		fc_mx_len;
+	int		fc_mp_len;
+	struct nlattr	*fc_mp;
 
 	struct nl_info	fc_nlinfo;
 };
@@ -99,6 +101,14 @@
 
 	struct in6_addr			rt6i_gateway;
 
+	/* Multipath routes:
+	 * siblings is a list of rt6_info that have the the same metric/weight,
+	 * destination, but not the same gateway. nsiblings is just a cache
+	 * to speed up lookup.
+	 */
+	struct list_head		rt6i_siblings;
+	unsigned int			rt6i_nsiblings;
+
 	atomic_t			rt6i_ref;
 
 	/* These are in a separate cache line. */
@@ -107,7 +117,6 @@
 	struct rt6key			rt6i_src;
 	struct rt6key			rt6i_prefsrc;
 	u32				rt6i_metric;
-	u32				rt6i_peer_genid;
 
 	struct inet6_dev		*rt6i_idev;
 	unsigned long			_rt6i_peer;
@@ -203,6 +212,15 @@
 	dst_hold(new);
 }
 
+static inline void ip6_rt_put(struct rt6_info *rt)
+{
+	/* dst_release() accepts a NULL parameter.
+	 * We rely on dst being first structure in struct rt6_info
+	 */
+	BUILD_BUG_ON(offsetof(struct rt6_info, dst) != 0);
+	dst_release(&rt->dst);
+}
+
 struct fib6_walker_t {
 	struct list_head lh;
 	struct fib6_node *root, *node;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 5fa2af0..27d8318 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -1,9 +1,6 @@
 #ifndef _NET_IP6_ROUTE_H
 #define _NET_IP6_ROUTE_H
 
-#define IP6_RT_PRIO_USER	1024
-#define IP6_RT_PRIO_ADDRCONF	256
-
 struct route_info {
 	__u8			type;
 	__u8			length;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index ee75ccd..68c69d5 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -22,7 +22,10 @@
 #include <linux/ip.h>
 #include <linux/ipv6.h>			/* for struct ipv6hdr */
 #include <net/ipv6.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_IP_VS_IPV6)
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #include <net/netfilter/nf_conntrack.h>
 #endif
 #include <net/net_namespace.h>		/* Netw namespace */
@@ -103,30 +106,117 @@
 /* Connections' size value needed by ip_vs_ctl.c */
 extern int ip_vs_conn_tab_size;
 
-
 struct ip_vs_iphdr {
-	int len;
-	__u8 protocol;
+	__u32 len;	/* IPv4 simply where L4 starts
+			   IPv6 where L4 Transport Header starts */
+	__u32 thoff_reasm; /* Transport Header Offset in nfct_reasm skb */
+	__u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
+	__s16 protocol;
+	__s32 flags;
 	union nf_inet_addr saddr;
 	union nf_inet_addr daddr;
 };
 
+/* Dependency to module: nf_defrag_ipv6 */
+#if defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
+static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb)
+{
+	return skb->nfct_reasm;
+}
+static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
+				      int len, void *buffer,
+				      const struct ip_vs_iphdr *ipvsh)
+{
+	if (unlikely(ipvsh->fragoffs && skb_nfct_reasm(skb)))
+		return skb_header_pointer(skb_nfct_reasm(skb),
+					  ipvsh->thoff_reasm, len, buffer);
+
+	return skb_header_pointer(skb, offset, len, buffer);
+}
+#else
+static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb)
+{
+	return NULL;
+}
+static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
+				      int len, void *buffer,
+				      const struct ip_vs_iphdr *ipvsh)
+{
+	return skb_header_pointer(skb, offset, len, buffer);
+}
+#endif
+
 static inline void
-ip_vs_fill_iphdr(int af, const void *nh, struct ip_vs_iphdr *iphdr)
+ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr)
+{
+	const struct iphdr *iph = nh;
+
+	iphdr->len	= iph->ihl * 4;
+	iphdr->fragoffs	= 0;
+	iphdr->protocol	= iph->protocol;
+	iphdr->saddr.ip	= iph->saddr;
+	iphdr->daddr.ip	= iph->daddr;
+}
+
+/* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6.
+ * IPv6 requires some extra work, as finding proper header position,
+ * depend on the IPv6 extension headers.
+ */
+static inline void
+ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, struct ip_vs_iphdr *iphdr)
 {
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6) {
-		const struct ipv6hdr *iph = nh;
-		iphdr->len = sizeof(struct ipv6hdr);
-		iphdr->protocol = iph->nexthdr;
+		const struct ipv6hdr *iph =
+			(struct ipv6hdr *)skb_network_header(skb);
+		iphdr->saddr.in6 = iph->saddr;
+		iphdr->daddr.in6 = iph->daddr;
+		/* ipv6_find_hdr() updates len, flags, thoff_reasm */
+		iphdr->thoff_reasm = 0;
+		iphdr->len	 = 0;
+		iphdr->flags	 = 0;
+		iphdr->protocol  = ipv6_find_hdr(skb, &iphdr->len, -1,
+						 &iphdr->fragoffs,
+						 &iphdr->flags);
+		/* get proto from re-assembled packet and it's offset */
+		if (skb_nfct_reasm(skb))
+			iphdr->protocol = ipv6_find_hdr(skb_nfct_reasm(skb),
+							&iphdr->thoff_reasm,
+							-1, NULL, NULL);
+
+	} else
+#endif
+	{
+		const struct iphdr *iph =
+			(struct iphdr *)skb_network_header(skb);
+		iphdr->len	= iph->ihl * 4;
+		iphdr->fragoffs	= 0;
+		iphdr->protocol	= iph->protocol;
+		iphdr->saddr.ip	= iph->saddr;
+		iphdr->daddr.ip	= iph->daddr;
+	}
+}
+
+/* This function is a faster version of ip_vs_fill_iph_skb().
+ * Where we only populate {s,d}addr (and avoid calling ipv6_find_hdr()).
+ * This is used by the some of the ip_vs_*_schedule() functions.
+ * (Mostly done to avoid ABI breakage of external schedulers)
+ */
+static inline void
+ip_vs_fill_iph_addr_only(int af, const struct sk_buff *skb,
+			 struct ip_vs_iphdr *iphdr)
+{
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6) {
+		const struct ipv6hdr *iph =
+			(struct ipv6hdr *)skb_network_header(skb);
 		iphdr->saddr.in6 = iph->saddr;
 		iphdr->daddr.in6 = iph->daddr;
 	} else
 #endif
 	{
-		const struct iphdr *iph = nh;
-		iphdr->len = iph->ihl * 4;
-		iphdr->protocol = iph->protocol;
+		const struct iphdr *iph =
+			(struct iphdr *)skb_network_header(skb);
 		iphdr->saddr.ip = iph->saddr;
 		iphdr->daddr.ip = iph->daddr;
 	}
@@ -165,7 +255,7 @@
 	int len;
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6)
-		len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6]",
+		len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6c]",
 			       &addr->in6) + 1;
 	else
 #endif
@@ -398,27 +488,26 @@
 
 	int (*conn_schedule)(int af, struct sk_buff *skb,
 			     struct ip_vs_proto_data *pd,
-			     int *verdict, struct ip_vs_conn **cpp);
+			     int *verdict, struct ip_vs_conn **cpp,
+			     struct ip_vs_iphdr *iph);
 
 	struct ip_vs_conn *
 	(*conn_in_get)(int af,
 		       const struct sk_buff *skb,
 		       const struct ip_vs_iphdr *iph,
-		       unsigned int proto_off,
 		       int inverse);
 
 	struct ip_vs_conn *
 	(*conn_out_get)(int af,
 			const struct sk_buff *skb,
 			const struct ip_vs_iphdr *iph,
-			unsigned int proto_off,
 			int inverse);
 
-	int (*snat_handler)(struct sk_buff *skb,
-			    struct ip_vs_protocol *pp, struct ip_vs_conn *cp);
+	int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
+			    struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
 
-	int (*dnat_handler)(struct sk_buff *skb,
-			    struct ip_vs_protocol *pp, struct ip_vs_conn *cp);
+	int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
+			    struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
 
 	int (*csum_check)(int af, struct sk_buff *skb,
 			  struct ip_vs_protocol *pp);
@@ -518,7 +607,7 @@
 	   NF_ACCEPT can be returned when destination is local.
 	 */
 	int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
-			   struct ip_vs_protocol *pp);
+			   struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
 
 	/* Note: we can group the following members into a structure,
 	   in order to save more space, and the following members are
@@ -769,13 +858,11 @@
 
 	struct ip_vs_conn *
 	(*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app,
-		       const struct iphdr *iph, unsigned int proto_off,
-		       int inverse);
+		       const struct iphdr *iph, int inverse);
 
 	struct ip_vs_conn *
 	(*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app,
-			const struct iphdr *iph, unsigned int proto_off,
-			int inverse);
+			const struct iphdr *iph, int inverse);
 
 	int (*state_transition)(struct ip_vs_conn *cp, int direction,
 				const struct sk_buff *skb,
@@ -1074,14 +1161,12 @@
 
 struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
 					    const struct ip_vs_iphdr *iph,
-					    unsigned int proto_off,
 					    int inverse);
 
 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
 
 struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
 					     const struct ip_vs_iphdr *iph,
-					     unsigned int proto_off,
 					     int inverse);
 
 /* put back the conn without restarting its timer */
@@ -1254,9 +1339,10 @@
 extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
 extern struct ip_vs_conn *
 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
-	       struct ip_vs_proto_data *pd, int *ignored);
+	       struct ip_vs_proto_data *pd, int *ignored,
+	       struct ip_vs_iphdr *iph);
 extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
-			struct ip_vs_proto_data *pd);
+			struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph);
 
 extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
 
@@ -1315,33 +1401,38 @@
 /*
  *	Various IPVS packet transmitters (from ip_vs_xmit.c)
  */
-extern int ip_vs_null_xmit
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_bypass_xmit
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_nat_xmit
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_tunnel_xmit
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_dr_xmit
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_icmp_xmit
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp,
- int offset, unsigned int hooknum);
+extern int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+			   struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+extern int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+			     struct ip_vs_protocol *pp,
+			     struct ip_vs_iphdr *iph);
+extern int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+			  struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+extern int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+			     struct ip_vs_protocol *pp,
+			     struct ip_vs_iphdr *iph);
+extern int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+			 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+extern int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+			   struct ip_vs_protocol *pp, int offset,
+			   unsigned int hooknum, struct ip_vs_iphdr *iph);
 extern void ip_vs_dst_reset(struct ip_vs_dest *dest);
 
 #ifdef CONFIG_IP_VS_IPV6
-extern int ip_vs_bypass_xmit_v6
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_nat_xmit_v6
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_tunnel_xmit_v6
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_dr_xmit_v6
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_icmp_xmit_v6
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp,
- int offset, unsigned int hooknum);
+extern int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+				struct ip_vs_protocol *pp,
+				struct ip_vs_iphdr *iph);
+extern int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+			     struct ip_vs_protocol *pp,
+			     struct ip_vs_iphdr *iph);
+extern int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+				struct ip_vs_protocol *pp,
+				struct ip_vs_iphdr *iph);
+extern int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+			    struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+extern int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+			      struct ip_vs_protocol *pp, int offset,
+			      unsigned int hooknum, struct ip_vs_iphdr *iph);
 #endif
 
 #ifdef CONFIG_SYSCTL
diff --git a/include/net/ipip.h b/include/net/ipip.h
index ddc077c..21947cf 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -48,25 +48,27 @@
 	struct rcu_head			rcu_head;
 };
 
-#define __IPTUNNEL_XMIT(stats1, stats2) do {				\
-	int err;							\
-	int pkt_len = skb->len - skb_transport_offset(skb);		\
-									\
-	skb->ip_summed = CHECKSUM_NONE;					\
-	ip_select_ident(iph, &rt->dst, NULL);				\
-									\
-	err = ip_local_out(skb);					\
-	if (likely(net_xmit_eval(err) == 0)) {				\
-		u64_stats_update_begin(&(stats1)->syncp);		\
-		(stats1)->tx_bytes += pkt_len;				\
-		(stats1)->tx_packets++;					\
-		u64_stats_update_end(&(stats1)->syncp);			\
-	} else {							\
-		(stats2)->tx_errors++;					\
-		(stats2)->tx_aborted_errors++;				\
-	}								\
-} while (0)
+static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	int err;
+	struct iphdr *iph = ip_hdr(skb);
+	int pkt_len = skb->len - skb_transport_offset(skb);
+	struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
 
-#define IPTUNNEL_XMIT() __IPTUNNEL_XMIT(txq, stats)
+	nf_reset(skb);
+	skb->ip_summed = CHECKSUM_NONE;
+	ip_select_ident(iph, skb_dst(skb), NULL);
+
+	err = ip_local_out(skb);
+	if (likely(net_xmit_eval(err) == 0)) {
+		u64_stats_update_begin(&tstats->syncp);
+		tstats->tx_bytes += pkt_len;
+		tstats->tx_packets++;
+		u64_stats_update_end(&tstats->syncp);
+	} else {
+		dev->stats.tx_errors++;
+		dev->stats.tx_aborted_errors++;
+	}
+}
 
 #endif
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 82558c8..db7680a 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -144,6 +144,39 @@
 };
 
 /**
+ * enum ieee80211_chanctx_change - change flag for channel context
+ * @IEEE80211_CHANCTX_CHANGE_WIDTH: The channel width changed
+ * @IEEE80211_CHANCTX_CHANGE_RX_CHAINS: The number of RX chains changed
+ */
+enum ieee80211_chanctx_change {
+	IEEE80211_CHANCTX_CHANGE_WIDTH		= BIT(0),
+	IEEE80211_CHANCTX_CHANGE_RX_CHAINS	= BIT(1),
+};
+
+/**
+ * struct ieee80211_chanctx_conf - channel context that vifs may be tuned to
+ *
+ * This is the driver-visible part. The ieee80211_chanctx
+ * that contains it is visible in mac80211 only.
+ *
+ * @def: the channel definition
+ * @rx_chains_static: The number of RX chains that must always be
+ *	active on the channel to receive MIMO transmissions
+ * @rx_chains_dynamic: The number of RX chains that must be enabled
+ *	after RTS/CTS handshake to receive SMPS MIMO transmissions;
+ *	this will always be >= @rx_chains_always.
+ * @drv_priv: data area for driver use, will always be aligned to
+ *	sizeof(void *), size is determined in hw information.
+ */
+struct ieee80211_chanctx_conf {
+	struct cfg80211_chan_def def;
+
+	u8 rx_chains_static, rx_chains_dynamic;
+
+	u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
+};
+
+/**
  * enum ieee80211_bss_change - BSS change notification flags
  *
  * These flags are used with the bss_info_changed() callback
@@ -172,6 +205,9 @@
  * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
  * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
  * @BSS_CHANGED_PS: PS changed for this BSS (STA mode)
+ * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface
+ * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS)
+ *	changed (currently only in P2P client mode, GO mode will be later)
  */
 enum ieee80211_bss_change {
 	BSS_CHANGED_ASSOC		= 1<<0,
@@ -192,6 +228,8 @@
 	BSS_CHANGED_SSID		= 1<<15,
 	BSS_CHANGED_AP_PROBE_RESP	= 1<<16,
 	BSS_CHANGED_PS			= 1<<17,
+	BSS_CHANGED_TXPOWER		= 1<<18,
+	BSS_CHANGED_P2P_PS		= 1<<19,
 
 	/* when adding here, make sure to change ieee80211_reconfig */
 };
@@ -223,6 +261,7 @@
  * @assoc: association status
  * @ibss_joined: indicates whether this station is part of an IBSS
  *	or not
+ * @ibss_creator: indicates if a new IBSS network is being created
  * @aid: association ID number, valid only when @assoc is true
  * @use_cts_prot: use CTS protection
  * @use_short_preamble: use 802.11b short preamble;
@@ -247,9 +286,8 @@
  * @mcast_rate: per-band multicast rate index + 1 (0: disabled)
  * @bssid: The BSSID for this BSS
  * @enable_beacon: whether beaconing should be enabled or not
- * @channel_type: Channel type for this BSS -- the hardware might be
- *	configured for HT40+ while this BSS only uses no-HT, for
- *	example.
+ * @chandef: Channel definition for this BSS -- the hardware might be
+ *	configured a higher bandwidth than this BSS uses, for example.
  * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
  *	This field is only valid when the channel type is one of the HT types.
  * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
@@ -273,11 +311,15 @@
  * @ssid: The SSID of the current vif. Only valid in AP-mode.
  * @ssid_len: Length of SSID given in @ssid.
  * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
+ * @txpower: TX power in dBm
+ * @p2p_ctwindow: P2P CTWindow, only for P2P client interfaces
+ * @p2p_oppps: P2P opportunistic PS is enabled
  */
 struct ieee80211_bss_conf {
 	const u8 *bssid;
 	/* association related data */
 	bool assoc, ibss_joined;
+	bool ibss_creator;
 	u16 aid;
 	/* erp related data */
 	bool use_cts_prot;
@@ -294,7 +336,7 @@
 	u16 ht_operation_mode;
 	s32 cqm_rssi_thold;
 	u32 cqm_rssi_hyst;
-	enum nl80211_channel_type channel_type;
+	struct cfg80211_chan_def chandef;
 	__be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
 	u8 arp_addr_cnt;
 	bool arp_filter_enabled;
@@ -304,6 +346,9 @@
 	u8 ssid[IEEE80211_MAX_SSID_LEN];
 	size_t ssid_len;
 	bool hidden_ssid;
+	int txpower;
+	u8 p2p_ctwindow;
+	bool p2p_oppps;
 };
 
 /**
@@ -454,9 +499,14 @@
  *	This is set if the current BSS requires ERP protection.
  * @IEEE80211_TX_RC_USE_SHORT_PREAMBLE: Use short preamble.
  * @IEEE80211_TX_RC_MCS: HT rate.
+ * @IEEE80211_TX_RC_VHT_MCS: VHT MCS rate, in this case the idx field is split
+ *	into a higher 4 bits (Nss) and lower 4 bits (MCS number)
  * @IEEE80211_TX_RC_GREEN_FIELD: Indicates whether this rate should be used in
  *	Greenfield mode.
  * @IEEE80211_TX_RC_40_MHZ_WIDTH: Indicates if the Channel Width should be 40 MHz.
+ * @IEEE80211_TX_RC_80_MHZ_WIDTH: Indicates 80 MHz transmission
+ * @IEEE80211_TX_RC_160_MHZ_WIDTH: Indicates 160 MHz transmission
+ *	(80+80 isn't supported yet)
  * @IEEE80211_TX_RC_DUP_DATA: The frame should be transmitted on both of the
  *	adjacent 20 MHz channels, if the current channel type is
  *	NL80211_CHAN_HT40MINUS or NL80211_CHAN_HT40PLUS.
@@ -467,12 +517,15 @@
 	IEEE80211_TX_RC_USE_CTS_PROTECT		= BIT(1),
 	IEEE80211_TX_RC_USE_SHORT_PREAMBLE	= BIT(2),
 
-	/* rate index is an MCS rate number instead of an index */
+	/* rate index is an HT/VHT MCS instead of an index */
 	IEEE80211_TX_RC_MCS			= BIT(3),
 	IEEE80211_TX_RC_GREEN_FIELD		= BIT(4),
 	IEEE80211_TX_RC_40_MHZ_WIDTH		= BIT(5),
 	IEEE80211_TX_RC_DUP_DATA		= BIT(6),
 	IEEE80211_TX_RC_SHORT_GI		= BIT(7),
+	IEEE80211_TX_RC_VHT_MCS			= BIT(8),
+	IEEE80211_TX_RC_80_MHZ_WIDTH		= BIT(9),
+	IEEE80211_TX_RC_160_MHZ_WIDTH		= BIT(10),
 };
 
 
@@ -515,10 +568,32 @@
  */
 struct ieee80211_tx_rate {
 	s8 idx;
-	u8 count;
-	u8 flags;
+	u16 count:5,
+	    flags:11;
 } __packed;
 
+#define IEEE80211_MAX_TX_RETRY		31
+
+static inline void ieee80211_rate_set_vht(struct ieee80211_tx_rate *rate,
+					  u8 mcs, u8 nss)
+{
+	WARN_ON(mcs & ~0xF);
+	WARN_ON(nss & ~0x7);
+	rate->idx = (nss << 4) | mcs;
+}
+
+static inline u8
+ieee80211_rate_get_vht_mcs(const struct ieee80211_tx_rate *rate)
+{
+	return rate->idx & 0xF;
+}
+
+static inline u8
+ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
+{
+	return rate->idx >> 4;
+}
+
 /**
  * struct ieee80211_tx_info - skb transmit information
  *
@@ -663,13 +738,20 @@
  *	the frame.
  * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
  *	the frame.
- * @RX_FLAG_MACTIME_MPDU: The timestamp passed in the RX status (@mactime
+ * @RX_FLAG_MACTIME_START: The timestamp passed in the RX status (@mactime
  *	field) is valid and contains the time the first symbol of the MPDU
  *	was received. This is useful in monitor mode and for proper IBSS
  *	merging.
+ * @RX_FLAG_MACTIME_END: The timestamp passed in the RX status (@mactime
+ *	field) is valid and contains the time the last symbol of the MPDU
+ *	(including FCS) was received.
  * @RX_FLAG_SHORTPRE: Short preamble was used for this frame
  * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
+ * @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index
  * @RX_FLAG_40MHZ: HT40 (40 MHz) was used
+ * @RX_FLAG_80MHZ: 80 MHz was used
+ * @RX_FLAG_80P80MHZ: 80+80 MHz was used
+ * @RX_FLAG_160MHZ: 160 MHz was used
  * @RX_FLAG_SHORT_GI: Short guard interval was used
  * @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present.
  *	Valid only for data frames (mainly A-MPDU)
@@ -697,7 +779,7 @@
 	RX_FLAG_IV_STRIPPED		= BIT(4),
 	RX_FLAG_FAILED_FCS_CRC		= BIT(5),
 	RX_FLAG_FAILED_PLCP_CRC 	= BIT(6),
-	RX_FLAG_MACTIME_MPDU		= BIT(7),
+	RX_FLAG_MACTIME_START		= BIT(7),
 	RX_FLAG_SHORTPRE		= BIT(8),
 	RX_FLAG_HT			= BIT(9),
 	RX_FLAG_40MHZ			= BIT(10),
@@ -711,6 +793,11 @@
 	RX_FLAG_AMPDU_IS_LAST		= BIT(18),
 	RX_FLAG_AMPDU_DELIM_CRC_ERROR	= BIT(19),
 	RX_FLAG_AMPDU_DELIM_CRC_KNOWN	= BIT(20),
+	RX_FLAG_MACTIME_END		= BIT(21),
+	RX_FLAG_VHT			= BIT(22),
+	RX_FLAG_80MHZ			= BIT(23),
+	RX_FLAG_80P80MHZ		= BIT(24),
+	RX_FLAG_160MHZ			= BIT(25),
 };
 
 /**
@@ -731,25 +818,39 @@
  *	@IEEE80211_HW_SIGNAL_*
  * @antenna: antenna used
  * @rate_idx: index of data rate into band's supported rates or MCS index if
- *	HT rates are use (RX_FLAG_HT)
+ *	HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
+ * @vht_nss: number of streams (VHT only)
  * @flag: %RX_FLAG_*
  * @rx_flags: internal RX flags for mac80211
  * @ampdu_reference: A-MPDU reference number, must be a different value for
  *	each A-MPDU but the same for each subframe within one A-MPDU
  * @ampdu_delimiter_crc: A-MPDU delimiter CRC
+ * @vendor_radiotap_bitmap: radiotap vendor namespace presence bitmap
+ * @vendor_radiotap_len: radiotap vendor namespace length
+ * @vendor_radiotap_align: radiotap vendor namespace alignment. Note
+ *	that the actual data must be at the start of the SKB data
+ *	already.
+ * @vendor_radiotap_oui: radiotap vendor namespace OUI
+ * @vendor_radiotap_subns: radiotap vendor sub namespace
  */
 struct ieee80211_rx_status {
 	u64 mactime;
 	u32 device_timestamp;
 	u32 ampdu_reference;
 	u32 flag;
+	u32 vendor_radiotap_bitmap;
+	u16 vendor_radiotap_len;
 	u16 freq;
 	u8 rate_idx;
+	u8 vht_nss;
 	u8 rx_flags;
 	u8 band;
 	u8 antenna;
 	s8 signal;
 	u8 ampdu_delimiter_crc;
+	u8 vendor_radiotap_align;
+	u8 vendor_radiotap_oui[3];
+	u8 vendor_radiotap_subns;
 };
 
 /**
@@ -794,6 +895,8 @@
  * @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed
  * @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed
  * @IEEE80211_CONF_CHANGE_SMPS: Spatial multiplexing powersave mode changed
+ *	Note that this is only valid if channel contexts are not used,
+ *	otherwise each channel context has the number of chains listed.
  */
 enum ieee80211_conf_changed {
 	IEEE80211_CONF_CHANGE_SMPS		= BIT(1),
@@ -845,7 +948,8 @@
  *	powersave documentation below. This variable is valid only when
  *	the CONF_PS flag is set.
  *
- * @power_level: requested transmit power (in dBm)
+ * @power_level: requested transmit power (in dBm), backward compatibility
+ *	value only that is set to the minimum of all interfaces
  *
  * @channel: the channel to tune to
  * @channel_type: the channel (HT) type
@@ -859,7 +963,9 @@
  *
  * @smps_mode: spatial multiplexing powersave mode; note that
  *	%IEEE80211_SMPS_STATIC is used when the device is not
- *	configured for an HT channel
+ *	configured for an HT channel.
+ *	Note that this is only valid if channel contexts are not used,
+ *	otherwise each channel context has the number of chains listed.
  */
 struct ieee80211_conf {
 	u32 flags;
@@ -931,6 +1037,11 @@
  *	at runtime, mac80211 will never touch this field
  * @hw_queue: hardware queue for each AC
  * @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only
+ * @chanctx_conf: The channel context this interface is assigned to, or %NULL
+ *	when it is not assigned. This pointer is RCU-protected due to the TX
+ *	path needing to access it; even though the netdev carrier will always
+ *	be off when it is %NULL there can still be races and packets could be
+ *	processed after it switches back to %NULL.
  * @drv_priv: data area for driver use, will always be aligned to
  *	sizeof(void *).
  */
@@ -943,6 +1054,8 @@
 	u8 cab_queue;
 	u8 hw_queue[IEEE80211_NUM_ACS];
 
+	struct ieee80211_chanctx_conf __rcu *chanctx_conf;
+
 	u32 driver_flags;
 
 	/* must be last */
@@ -1076,6 +1189,8 @@
  * @aid: AID we assigned to the station if we're an AP
  * @supp_rates: Bitmap of supported rates (per band)
  * @ht_cap: HT capabilities of this STA; restricted to our own TX capabilities
+ * @vht_cap: VHT capabilities of this STA; Not restricting any capabilities
+ * 	of remote STA. Taking as is.
  * @wme: indicates whether the STA supports WME. Only valid during AP-mode.
  * @drv_priv: data area for driver use, will always be aligned to
  *	sizeof(void *), size is determined in hw information.
@@ -1088,6 +1203,7 @@
 	u8 addr[ETH_ALEN];
 	u16 aid;
 	struct ieee80211_sta_ht_cap ht_cap;
+	struct ieee80211_sta_vht_cap vht_cap;
 	bool wme;
 	u8 uapsd_queues;
 	u8 max_sp;
@@ -1325,6 +1441,8 @@
  *	within &struct ieee80211_vif.
  * @sta_data_size: size (in bytes) of the drv_priv data area
  *	within &struct ieee80211_sta.
+ * @chanctx_data_size: size (in bytes) of the drv_priv data area
+ *	within &struct ieee80211_chanctx_conf.
  *
  * @max_rates: maximum number of alternate rate retry stages the hw
  *	can handle.
@@ -1369,6 +1487,7 @@
 	int channel_change_time;
 	int vif_data_size;
 	int sta_data_size;
+	int chanctx_data_size;
 	int napi_weight;
 	u16 queues;
 	u16 max_listen_interval;
@@ -2126,6 +2245,14 @@
  * @sta_remove: Notifies low level driver about removal of an associated
  *	station, AP, IBSS/WDS/mesh peer etc. This callback can sleep.
  *
+ * @sta_add_debugfs: Drivers can use this callback to add debugfs files
+ *	when a station is added to mac80211's station list. This callback
+ *	and @sta_remove_debugfs should be within a CONFIG_MAC80211_DEBUGFS
+ *	conditional. This callback can sleep.
+ *
+ * @sta_remove_debugfs: Remove the debugfs files which were added using
+ *	@sta_add_debugfs. This callback can sleep.
+ *
  * @sta_notify: Notifies low level driver about power state transition of an
  *	associated station, AP,  IBSS/WDS/mesh peer etc. For a VIF operating
  *	in AP mode, this callback will not be called when the flag
@@ -2317,6 +2444,27 @@
  *	The callback will be called before each transmission and upon return
  *	mac80211 will transmit the frame right away.
  *	The callback is optional and can (should!) sleep.
+ *
+ * @add_chanctx: Notifies device driver about new channel context creation.
+ * @remove_chanctx: Notifies device driver about channel context destruction.
+ * @change_chanctx: Notifies device driver about channel context changes that
+ *	may happen when combining different virtual interfaces on the same
+ *	channel context with different settings
+ * @assign_vif_chanctx: Notifies device driver about channel context being bound
+ *	to vif. Possible use is for hw queue remapping.
+ * @unassign_vif_chanctx: Notifies device driver about channel context being
+ *	unbound from vif.
+ * @start_ap: Start operation on the AP interface, this is called after all the
+ *	information in bss_conf is set and beacon can be retrieved. A channel
+ *	context is bound before this is called. Note that if the driver uses
+ *	software scan or ROC, this (and @stop_ap) isn't called when the AP is
+ *	just "paused" for scanning/ROC, which is indicated by the beacon being
+ *	disabled/enabled via @bss_info_changed.
+ * @stop_ap: Stop operation on the AP interface.
+ *
+ * @restart_complete: Called after a call to ieee80211_restart_hw(), when the
+ *	reconfiguration has completed. This can help the driver implement the
+ *	reconfiguration step. This callback may sleep.
  */
 struct ieee80211_ops {
 	void (*tx)(struct ieee80211_hw *hw,
@@ -2342,6 +2490,9 @@
 				 struct ieee80211_bss_conf *info,
 				 u32 changed);
 
+	int (*start_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+	void (*stop_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+
 	u64 (*prepare_multicast)(struct ieee80211_hw *hw,
 				 struct netdev_hw_addr_list *mc_list);
 	void (*configure_filter)(struct ieee80211_hw *hw,
@@ -2383,6 +2534,16 @@
 		       struct ieee80211_sta *sta);
 	int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			  struct ieee80211_sta *sta);
+#ifdef CONFIG_MAC80211_DEBUGFS
+	void (*sta_add_debugfs)(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_sta *sta,
+				struct dentry *dir);
+	void (*sta_remove_debugfs)(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_sta *sta,
+				   struct dentry *dir);
+#endif
 	void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			enum sta_notify_cmd, struct ieee80211_sta *sta);
 	int (*sta_state)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -2424,8 +2585,8 @@
 	int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
 
 	int (*remain_on_channel)(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
 				 struct ieee80211_channel *chan,
-				 enum nl80211_channel_type channel_type,
 				 int duration);
 	int (*cancel_remain_on_channel)(struct ieee80211_hw *hw);
 	int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx);
@@ -2461,6 +2622,22 @@
 
 	void	(*mgd_prepare_tx)(struct ieee80211_hw *hw,
 				  struct ieee80211_vif *vif);
+
+	int (*add_chanctx)(struct ieee80211_hw *hw,
+			   struct ieee80211_chanctx_conf *ctx);
+	void (*remove_chanctx)(struct ieee80211_hw *hw,
+			       struct ieee80211_chanctx_conf *ctx);
+	void (*change_chanctx)(struct ieee80211_hw *hw,
+			       struct ieee80211_chanctx_conf *ctx,
+			       u32 changed);
+	int (*assign_vif_chanctx)(struct ieee80211_hw *hw,
+				  struct ieee80211_vif *vif,
+				  struct ieee80211_chanctx_conf *ctx);
+	void (*unassign_vif_chanctx)(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif,
+				     struct ieee80211_chanctx_conf *ctx);
+
+	void (*restart_complete)(struct ieee80211_hw *hw);
 };
 
 /**
@@ -3145,6 +3322,19 @@
 			    struct sk_buff *skb, u8 *p2k);
 
 /**
+ * ieee80211_aes_cmac_calculate_k1_k2 - calculate the AES-CMAC sub keys
+ *
+ * This function computes the two AES-CMAC sub-keys, based on the
+ * previously installed master key.
+ *
+ * @keyconf: the parameter passed with the set key
+ * @k1: a buffer to be filled with the 1st sub-key
+ * @k2: a buffer to be filled with the 2nd sub-key
+ */
+void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
+					u8 *k1, u8 *k2);
+
+/**
  * struct ieee80211_key_seq - key sequence counter
  *
  * @tkip: TKIP data, containing IV32 and IV16 in host byte order
@@ -3294,6 +3484,21 @@
 void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw);
 
 /**
+ * enum ieee80211_interface_iteration_flags - interface iteration flags
+ * @IEEE80211_IFACE_ITER_NORMAL: Iterate over all interfaces that have
+ *	been added to the driver; However, note that during hardware
+ *	reconfiguration (after restart_hw) it will iterate over a new
+ *	interface and over all the existing interfaces even if they
+ *	haven't been re-added to the driver yet.
+ * @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all
+ *	interfaces, even if they haven't been re-added to the driver yet.
+ */
+enum ieee80211_interface_iteration_flags {
+	IEEE80211_IFACE_ITER_NORMAL	= 0,
+	IEEE80211_IFACE_ITER_RESUME_ALL	= BIT(0),
+};
+
+/**
  * ieee80211_iterate_active_interfaces - iterate active interfaces
  *
  * This function iterates over the interfaces associated with a given
@@ -3301,13 +3506,15 @@
  * This function allows the iterator function to sleep, when the iterator
  * function is atomic @ieee80211_iterate_active_interfaces_atomic can
  * be used.
- * Does not iterate over a new interface during add_interface()
+ * Does not iterate over a new interface during add_interface().
  *
  * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
  * @iterator: the iterator function to call
  * @data: first argument of the iterator function
  */
 void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
+					 u32 iter_flags,
 					 void (*iterator)(void *data, u8 *mac,
 						struct ieee80211_vif *vif),
 					 void *data);
@@ -3319,13 +3526,15 @@
  * hardware that are currently active and calls the callback for them.
  * This function requires the iterator callback function to be atomic,
  * if that is not desired, use @ieee80211_iterate_active_interfaces instead.
- * Does not iterate over a new interface during add_interface()
+ * Does not iterate over a new interface during add_interface().
  *
  * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
  * @iterator: the iterator function to call, cannot sleep
  * @data: first argument of the iterator function
  */
 void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
+						u32 iter_flags,
 						void (*iterator)(void *data,
 						    u8 *mac,
 						    struct ieee80211_vif *vif),
@@ -3524,6 +3733,27 @@
 			 void *iter_data);
 
 /**
+ * ieee80211_iter_chan_contexts_atomic - iterate channel contexts
+ * @hw: pointre obtained from ieee80211_alloc_hw().
+ * @iter: iterator function
+ * @iter_data: data passed to iterator function
+ *
+ * Iterate all active channel contexts. This function is atomic and
+ * doesn't acquire any locks internally that might be held in other
+ * places while calling into the driver.
+ *
+ * The iterator will not find a context that's being added (during
+ * the driver callback to add it) but will find it while it's being
+ * removed.
+ */
+void ieee80211_iter_chan_contexts_atomic(
+	struct ieee80211_hw *hw,
+	void (*iter)(struct ieee80211_hw *hw,
+		     struct ieee80211_chanctx_conf *chanctx_conf,
+		     void *data),
+	void *iter_data);
+
+/**
  * ieee80211_ap_probereq_get - retrieve a Probe Request template
  * @hw: pointer obtained from ieee80211_alloc_hw().
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 95e6466..c5a43f5 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -23,6 +23,7 @@
 #endif
 #include <net/netns/xfrm.h>
 
+struct user_namespace;
 struct proc_dir_entry;
 struct net_device;
 struct sock;
@@ -53,6 +54,8 @@
 	struct list_head	cleanup_list;	/* namespaces on death row */
 	struct list_head	exit_list;	/* Use only net_mutex */
 
+	struct user_namespace   *user_ns;	/* Owning user namespace */
+
 	struct proc_dir_entry 	*proc_net;
 	struct proc_dir_entry 	*proc_net_stat;
 
@@ -126,16 +129,21 @@
 /* Init's network namespace */
 extern struct net init_net;
 
-#ifdef CONFIG_NET
-extern struct net *copy_net_ns(unsigned long flags, struct net *net_ns);
+#ifdef CONFIG_NET_NS
+extern struct net *copy_net_ns(unsigned long flags,
+	struct user_namespace *user_ns, struct net *old_net);
 
-#else /* CONFIG_NET */
-static inline struct net *copy_net_ns(unsigned long flags, struct net *net_ns)
+#else /* CONFIG_NET_NS */
+#include <linux/sched.h>
+#include <linux/nsproxy.h>
+static inline struct net *copy_net_ns(unsigned long flags,
+	struct user_namespace *user_ns, struct net *old_net)
 {
-	/* There is nothing to copy so this is a noop */
-	return net_ns;
+	if (flags & CLONE_NEWNET)
+		return ERR_PTR(-EINVAL);
+	return old_net;
 }
-#endif /* CONFIG_NET */
+#endif /* CONFIG_NET_NS */
 
 
 extern struct list_head net_namespace_list;
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index 5e5eb1f..3573a81 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -62,6 +62,9 @@
 	/* Whether Cookie Preservative is enabled(1) or not(0) */
 	int cookie_preserve_enable;
 
+	/* The namespace default hmac alg */
+	char *sctp_hmac_alg;
+
 	/* Valid.Cookie.Life	    - 60  seconds  */
 	unsigned int valid_cookie_life;
 
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index e900072..671953e 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -24,6 +24,12 @@
 
 #include <net/nfc/nfc.h>
 
+struct nfc_phy_ops {
+	int (*write)(void *dev_id, struct sk_buff *skb);
+	int (*enable)(void *dev_id);
+	void (*disable)(void *dev_id);
+};
+
 struct nfc_hci_dev;
 
 struct nfc_hci_ops {
@@ -38,15 +44,21 @@
 	int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
 	int (*start_poll) (struct nfc_hci_dev *hdev,
 			   u32 im_protocols, u32 tm_protocols);
+	int (*dep_link_up)(struct nfc_hci_dev *hdev, struct nfc_target *target,
+			   u8 comm_mode, u8 *gb, size_t gb_len);
+	int (*dep_link_down)(struct nfc_hci_dev *hdev);
 	int (*target_from_gate) (struct nfc_hci_dev *hdev, u8 gate,
 				 struct nfc_target *target);
 	int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate,
 					   struct nfc_target *target);
-	int (*data_exchange) (struct nfc_hci_dev *hdev,
+	int (*im_transceive) (struct nfc_hci_dev *hdev,
 			      struct nfc_target *target, struct sk_buff *skb,
 			      data_exchange_cb_t cb, void *cb_context);
+	int (*tm_send)(struct nfc_hci_dev *hdev, struct sk_buff *skb);
 	int (*check_presence)(struct nfc_hci_dev *hdev,
 			      struct nfc_target *target);
+	void (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+				struct sk_buff *skb);
 };
 
 /* Pipes */
@@ -114,6 +126,9 @@
 	int async_cb_type;
 	data_exchange_cb_t async_cb;
 	void *async_cb_context;
+
+	u8 *gb;
+	size_t gb_len;
 };
 
 /* hci device allocation */
@@ -134,6 +149,8 @@
 
 void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err);
 
+int nfc_hci_result_to_errno(u8 result);
+
 /* Host IDs */
 #define NFC_HCI_HOST_CONTROLLER_ID	0x00
 #define NFC_HCI_TERMINAL_HOST_ID	0x01
@@ -219,5 +236,7 @@
 			  const u8 *param, size_t param_len);
 int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
 		       const u8 *param, size_t param_len);
+int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate);
+u32 nfc_hci_sak_to_protocol(u8 sak);
 
 #endif /* __NET_HCI_H */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index f05b106..fce80b2 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -95,7 +95,7 @@
 };
 
 struct nfc_dev {
-	unsigned int idx;
+	int idx;
 	u32 target_next_idx;
 	struct nfc_target *targets;
 	int n_targets;
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 929528c..047c047 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -25,9 +25,11 @@
 #define _PROTOCOL_H
 
 #include <linux/in6.h>
+#include <linux/skbuff.h>
 #if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
+#include <linux/netdevice.h>
 
 /* This is one larger than the largest protocol value that can be
  * found in an ipv4 or ipv6 header.  Since in both cases the protocol
@@ -40,12 +42,6 @@
 	void			(*early_demux)(struct sk_buff *skb);
 	int			(*handler)(struct sk_buff *skb);
 	void			(*err_handler)(struct sk_buff *skb, u32 info);
-	int			(*gso_send_check)(struct sk_buff *skb);
-	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
-					       netdev_features_t features);
-	struct sk_buff	      **(*gro_receive)(struct sk_buff **head,
-					       struct sk_buff *skb);
-	int			(*gro_complete)(struct sk_buff *skb);
 	unsigned int		no_policy:1,
 				netns_ok:1;
 };
@@ -60,23 +56,20 @@
 			       struct inet6_skb_parm *opt,
 			       u8 type, u8 code, int offset,
 			       __be32 info);
-
-	int	(*gso_send_check)(struct sk_buff *skb);
-	struct sk_buff *(*gso_segment)(struct sk_buff *skb,
-				       netdev_features_t features);
-	struct sk_buff **(*gro_receive)(struct sk_buff **head,
-					struct sk_buff *skb);
-	int	(*gro_complete)(struct sk_buff *skb);
-
 	unsigned int	flags;	/* INET6_PROTO_xxx */
 };
 
 #define INET6_PROTO_NOPOLICY	0x1
 #define INET6_PROTO_FINAL	0x2
-/* This should be set for any extension header which is compatible with GSO. */
-#define INET6_PROTO_GSO_EXTHDR	0x4
 #endif
 
+struct net_offload {
+	struct offload_callbacks callbacks;
+	unsigned int		 flags;	/* Flags used by IPv6 for now */
+};
+/* This should be set for any extension header which is compatible with GSO. */
+#define INET6_PROTO_GSO_EXTHDR	0x1
+
 /* This is used to register socket interfaces for IP protocols.  */
 struct inet_protosw {
 	struct list_head list;
@@ -96,6 +89,8 @@
 #define INET_PROTOSW_ICSK      0x04  /* Is this an inet_connection_sock? */
 
 extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
+extern const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS];
+extern const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS];
 
 #if IS_ENABLED(CONFIG_IPV6)
 extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
@@ -103,6 +98,8 @@
 
 extern int	inet_add_protocol(const struct net_protocol *prot, unsigned char num);
 extern int	inet_del_protocol(const struct net_protocol *prot, unsigned char num);
+extern int	inet_add_offload(const struct net_offload *prot, unsigned char num);
+extern int	inet_del_offload(const struct net_offload *prot, unsigned char num);
 extern void	inet_register_protosw(struct inet_protosw *p);
 extern void	inet_unregister_protosw(struct inet_protosw *p);
 
@@ -112,5 +109,7 @@
 extern int	inet6_register_protosw(struct inet_protosw *p);
 extern void	inet6_unregister_protosw(struct inet_protosw *p);
 #endif
+extern int	inet6_add_offload(const struct net_offload *prot, unsigned char num);
+extern int	inet6_del_offload(const struct net_offload *prot, unsigned char num);
 
 #endif	/* _PROTOCOL_H */
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index b01d8dd..a51dbd1 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -49,13 +49,16 @@
 					   struct request_sock *req);
 };
 
+extern int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
+
 /* struct request_sock - mini sock to represent a connection request
  */
 struct request_sock {
 	struct request_sock		*dl_next; /* Must be first member! */
 	u16				mss;
-	u8				retrans;
-	u8				cookie_ts; /* syncookie: encode tcpopts in timestamp */
+	u8				num_retrans; /* number of retransmits */
+	u8				cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
+	u8				num_timeout:7; /* number of timeouts */
 	/* The following two fields can be easily recomputed I think -AK */
 	u32				window_clamp; /* window clamp at creation time */
 	u32				rcv_wnd;	  /* rcv_wnd offered first time */
@@ -231,7 +234,7 @@
 {
 	struct listen_sock *lopt = queue->listen_opt;
 
-	if (req->retrans == 0)
+	if (req->num_timeout == 0)
 		--lopt->qlen_young;
 
 	return --lopt->qlen;
@@ -269,7 +272,8 @@
 	struct listen_sock *lopt = queue->listen_opt;
 
 	req->expires = jiffies + timeout;
-	req->retrans = 0;
+	req->num_retrans = 0;
+	req->num_timeout = 0;
 	req->sk = NULL;
 	req->dl_next = lopt->syn_table[hash];
 
diff --git a/include/net/route.h b/include/net/route.h
index bc40b63..2ea40c1 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -198,10 +198,13 @@
 extern void fib_add_ifaddr(struct in_ifaddr *);
 extern void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
 
-static inline void ip_rt_put(struct rtable * rt)
+static inline void ip_rt_put(struct rtable *rt)
 {
-	if (rt)
-		dst_release(&rt->dst);
+	/* dst_release() accepts a NULL parameter.
+	 * We rely on dst being first structure in struct rtable
+	 */
+	BUILD_BUG_ON(offsetof(struct rtable, dst) != 0);
+	dst_release(&rt->dst);
 }
 
 #define IPTOS_RT_MASK	(IPTOS_TOS_MASK & ~3)
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 712b3be..3524727 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -130,8 +130,6 @@
 	__be16 err;
 	sctp_state_t state;
 	sctp_event_timeout_t to;
-	unsigned long zero;
-	void *ptr;
 	struct sctp_chunk *chunk;
 	struct sctp_association *asoc;
 	struct sctp_transport *transport;
@@ -154,23 +152,15 @@
  * which takes an __s32 and returns a sctp_arg_t containing the
  * __s32.  So, after foo = SCTP_I32(arg), foo.i32 == arg.
  */
-static inline sctp_arg_t SCTP_NULL(void)
-{
-	sctp_arg_t retval; retval.ptr = NULL; return retval;
-}
-static inline sctp_arg_t SCTP_NOFORCE(void)
-{
-	sctp_arg_t retval = {.zero = 0UL}; retval.i32 = 0; return retval;
-}
-static inline sctp_arg_t SCTP_FORCE(void)
-{
-	sctp_arg_t retval = {.zero = 0UL}; retval.i32 = 1; return retval;
-}
 
 #define SCTP_ARG_CONSTRUCTOR(name, type, elt) \
 static inline sctp_arg_t	\
 SCTP_## name (type arg)		\
-{ sctp_arg_t retval = {.zero = 0UL}; retval.elt = arg; return retval; }
+{ sctp_arg_t retval;\
+  memset(&retval, 0, sizeof(sctp_arg_t));\
+  retval.elt = arg;\
+  return retval;\
+}
 
 SCTP_ARG_CONSTRUCTOR(I32,	__s32, i32)
 SCTP_ARG_CONSTRUCTOR(U32,	__u32, u32)
@@ -181,7 +171,6 @@
 SCTP_ARG_CONSTRUCTOR(PERR,      __be16, err)	/* protocol error */
 SCTP_ARG_CONSTRUCTOR(STATE,	sctp_state_t, state)
 SCTP_ARG_CONSTRUCTOR(TO,	sctp_event_timeout_t, to)
-SCTP_ARG_CONSTRUCTOR(PTR,	void *, ptr)
 SCTP_ARG_CONSTRUCTOR(CHUNK,	struct sctp_chunk *, chunk)
 SCTP_ARG_CONSTRUCTOR(ASOC,	struct sctp_association *, asoc)
 SCTP_ARG_CONSTRUCTOR(TRANSPORT,	struct sctp_transport *, transport)
@@ -192,6 +181,23 @@
 SCTP_ARG_CONSTRUCTOR(SACKH,	sctp_sackhdr_t *, sackh)
 SCTP_ARG_CONSTRUCTOR(DATAMSG,	struct sctp_datamsg *, msg)
 
+static inline sctp_arg_t SCTP_FORCE(void)
+{
+	return SCTP_I32(1);
+}
+
+static inline sctp_arg_t SCTP_NOFORCE(void)
+{
+	return SCTP_I32(0);
+}
+
+static inline sctp_arg_t SCTP_NULL(void)
+{
+	sctp_arg_t retval;
+	memset(&retval, 0, sizeof(sctp_arg_t));
+	return retval;
+}
+
 typedef struct {
 	sctp_arg_t obj;
 	sctp_verb_t verb;
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index d053d2e..c29707d 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -312,14 +312,6 @@
 				 * functions simpler to write.
 				 */
 
-#if defined (CONFIG_SCTP_HMAC_MD5)
-#define SCTP_COOKIE_HMAC_ALG "hmac(md5)"
-#elif defined (CONFIG_SCTP_HMAC_SHA1)
-#define SCTP_COOKIE_HMAC_ALG "hmac(sha1)"
-#else
-#define SCTP_COOKIE_HMAC_ALG NULL
-#endif
-
 /* These return values describe the success or failure of a number of
  * routines which form the lower interface to SCTP_outqueue.
  */
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index b5887e1..2a82d13 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -234,6 +234,8 @@
 struct sctp_chunk *sctp_make_violation_paramlen(const struct sctp_association *,
 				   const struct sctp_chunk *,
 				   struct sctp_paramhdr *);
+struct sctp_chunk *sctp_make_violation_max_retrans(const struct sctp_association *,
+						   const struct sctp_chunk *);
 struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *,
 				  const struct sctp_transport *);
 struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 64158aa..2b2f61d 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -177,6 +177,7 @@
 
 	/* Access to HMAC transform. */
 	struct crypto_hash *hmac;
+	char *sctp_hmac_alg;
 
 	/* What is our base endpointer? */
 	struct sctp_endpoint *ep;
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index 2e5ee0d..ff1b8ba7 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -72,7 +72,7 @@
 void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
 
 /* Perform partial delivery. */
-void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
+void sctp_ulpq_partial_delivery(struct sctp_ulpq *, gfp_t);
 
 /* Abort the partial delivery. */
 void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6feeccd..3202bde 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -369,7 +369,6 @@
 extern void tcp_v4_early_demux(struct sk_buff *skb);
 extern int tcp_v4_rcv(struct sk_buff *skb);
 
-extern struct inet_peer *tcp_v4_get_peer(struct sock *sk);
 extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
 extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 		       size_t size);
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index b1bea03..2d32d07 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -43,6 +43,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER	26
 #define SO_DETACH_FILTER	27
+#define SO_GET_FILTER		SO_ATTACH_FILTER
 
 #define SO_PEERNAME		28
 #define SO_TIMESTAMP		29
diff --git a/include/uapi/linux/filter.h b/include/uapi/linux/filter.h
index 3d79224..9cfde69 100644
--- a/include/uapi/linux/filter.h
+++ b/include/uapi/linux/filter.h
@@ -127,7 +127,9 @@
 #define SKF_AD_RXHASH	32
 #define SKF_AD_CPU	36
 #define SKF_AD_ALU_XOR_X	40
-#define SKF_AD_MAX	44
+#define SKF_AD_VLAN_TAG	44
+#define SKF_AD_VLAN_TAG_PRESENT 48
+#define SKF_AD_MAX	52
 #define SKF_NET_OFF   (-0x100000)
 #define SKF_LL_OFF    (-0x200000)
 
diff --git a/include/uapi/linux/hdlc/Kbuild b/include/uapi/linux/hdlc/Kbuild
index aafaa5a..8c1d2cb 100644
--- a/include/uapi/linux/hdlc/Kbuild
+++ b/include/uapi/linux/hdlc/Kbuild
@@ -1 +1,2 @@
 # UAPI Header export list
+header-y += ioctl.h
diff --git a/include/linux/hdlc/ioctl.h b/include/uapi/linux/hdlc/ioctl.h
similarity index 96%
rename from include/linux/hdlc/ioctl.h
rename to include/uapi/linux/hdlc/ioctl.h
index 5839723..04bc027 100644
--- a/include/linux/hdlc/ioctl.h
+++ b/include/uapi/linux/hdlc/ioctl.h
@@ -34,13 +34,15 @@
 #define LMI_CCITT		3 /* ITU-T Annex A */
 #define LMI_CISCO		4 /* The "original" LMI, aka Gang of Four */
 
-typedef struct { 
+#ifndef __ASSEMBLY__
+
+typedef struct {
 	unsigned int clock_rate; /* bits per second */
 	unsigned int clock_type; /* internal, external, TX-internal etc. */
 	unsigned short loopback;
 } sync_serial_settings;          /* V.35, V.24, X.21 */
 
-typedef struct { 
+typedef struct {
 	unsigned int clock_rate; /* bits per second */
 	unsigned int clock_type; /* internal, external, TX-internal etc. */
 	unsigned short loopback;
@@ -78,4 +80,5 @@
 
 /* PPP doesn't need any info now - supply length = 0 to ioctl */
 
+#endif /* __ASSEMBLY__ */
 #endif /* __HDLC_IOCTL_H__ */
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index a8fe954..b388579 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -97,5 +97,23 @@
 	__u16 unused;
 };
 
+/* Bridge Flags */
+#define BRIDGE_FLAGS_MASTER	1	/* Bridge command to/from master */
+#define BRIDGE_FLAGS_SELF	2	/* Bridge command to/from lowerdev */
 
+#define BRIDGE_MODE_VEB		0	/* Default loopback mode */
+#define BRIDGE_MODE_VEPA	1	/* 802.1Qbg defined VEPA mode */
+
+/* Bridge management nested attributes
+ * [IFLA_AF_SPEC] = {
+ *     [IFLA_BRIDGE_FLAGS]
+ *     [IFLA_BRIDGE_MODE]
+ * }
+ */
+enum {
+	IFLA_BRIDGE_FLAGS,
+	IFLA_BRIDGE_MODE,
+	__IFLA_BRIDGE_MAX,
+};
+#define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1)
 #endif /* _UAPI_LINUX_IF_BRIDGE_H */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 0343e1f..67fb87c 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -48,6 +48,7 @@
 #define	ETH_P_BPQ	0x08FF		/* G8BPQ AX.25 Ethernet Packet	[ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_IEEEPUP	0x0a00		/* Xerox IEEE802.3 PUP packet */
 #define ETH_P_IEEEPUPAT	0x0a01		/* Xerox IEEE802.3 PUP Addr Trans packet */
+#define ETH_P_BATMAN	0x4305		/* B.A.T.M.A.N.-Advanced packet [ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_DEC       0x6000          /* DEC Assigned proto           */
 #define ETH_P_DNA_DL    0x6001          /* DEC DNA Dump/Load            */
 #define ETH_P_DNA_RC    0x6002          /* DEC DNA Remote Console       */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 5c80cb1..bb58aeb 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -205,6 +205,23 @@
 
 #define IFLA_INET6_MAX	(__IFLA_INET6_MAX - 1)
 
+enum {
+	BRIDGE_MODE_UNSPEC,
+	BRIDGE_MODE_HAIRPIN,
+};
+
+enum {
+	IFLA_BRPORT_UNSPEC,
+	IFLA_BRPORT_STATE,	/* Spanning tree state     */
+	IFLA_BRPORT_PRIORITY,	/* "             priority  */
+	IFLA_BRPORT_COST,	/* "             cost      */
+	IFLA_BRPORT_MODE,	/* mode (hairpin)          */
+	IFLA_BRPORT_GUARD,	/* bpdu guard              */
+	IFLA_BRPORT_PROTECT,	/* root port protection    */
+	__IFLA_BRPORT_MAX
+};
+#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
+
 struct ifla_cacheinfo {
 	__u32	max_reasm_len;
 	__u32	tstamp;		/* ipv6InterfaceTable updated timestamp */
@@ -285,6 +302,10 @@
 	IFLA_VXLAN_AGEING,
 	IFLA_VXLAN_LIMIT,
 	IFLA_VXLAN_PORT_RANGE,
+	IFLA_VXLAN_PROXY,
+	IFLA_VXLAN_RSC,
+	IFLA_VXLAN_L2MISS,
+	IFLA_VXLAN_L3MISS,
 	__IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX	(__IFLA_VXLAN_MAX - 1)
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index f379929..f9a6037 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -50,6 +50,7 @@
 #define PACKET_TX_TIMESTAMP		16
 #define PACKET_TIMESTAMP		17
 #define PACKET_FANOUT			18
+#define PACKET_TX_HAS_OFF		19
 
 #define PACKET_FANOUT_HASH		0
 #define PACKET_FANOUT_LB		1
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 25a585c..958497a 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -34,6 +34,7 @@
 #define TUN_ONE_QUEUE	0x0080
 #define TUN_PERSIST 	0x0100	
 #define TUN_VNET_HDR 	0x0200
+#define TUN_TAP_MQ      0x0400
 
 /* Ioctl defines */
 #define TUNSETNOCSUM  _IOW('T', 200, int) 
@@ -53,6 +54,7 @@
 #define TUNDETACHFILTER _IOW('T', 214, struct sock_fprog)
 #define TUNGETVNETHDRSZ _IOR('T', 215, int)
 #define TUNSETVNETHDRSZ _IOW('T', 216, int)
+#define TUNSETQUEUE  _IOW('T', 217, int)
 
 /* TUNSETIFF ifr flags */
 #define IFF_TUN		0x0001
@@ -61,6 +63,9 @@
 #define IFF_ONE_QUEUE	0x2000
 #define IFF_VNET_HDR	0x4000
 #define IFF_TUN_EXCL	0x8000
+#define IFF_MULTI_QUEUE 0x0100
+#define IFF_ATTACH_QUEUE 0x0200
+#define IFF_DETACH_QUEUE 0x0400
 
 /* Features for GSO (TUNSETOFFLOAD). */
 #define TUN_F_CSUM	0x01	/* You can hand me unchecksummed packets. */
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 5db5942..aee73d0 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -37,6 +37,26 @@
 	struct iphdr		iph;
 };
 
+enum {
+	IFLA_IPTUN_UNSPEC,
+	IFLA_IPTUN_LINK,
+	IFLA_IPTUN_LOCAL,
+	IFLA_IPTUN_REMOTE,
+	IFLA_IPTUN_TTL,
+	IFLA_IPTUN_TOS,
+	IFLA_IPTUN_ENCAP_LIMIT,
+	IFLA_IPTUN_FLOWINFO,
+	IFLA_IPTUN_FLAGS,
+	IFLA_IPTUN_PROTO,
+	IFLA_IPTUN_PMTUDISC,
+	IFLA_IPTUN_6RD_PREFIX,
+	IFLA_IPTUN_6RD_RELAY_PREFIX,
+	IFLA_IPTUN_6RD_PREFIXLEN,
+	IFLA_IPTUN_6RD_RELAY_PREFIXLEN,
+	__IFLA_IPTUN_MAX,
+};
+#define IFLA_IPTUN_MAX	(__IFLA_IPTUN_MAX - 1)
+
 /* SIT-mode i_flags */
 #define	SIT_ISATAP	0x0001
 
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 1e31599..f79c372 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -240,6 +240,7 @@
  *
  * IP6T_SO_GET_REVISION_MATCH	68
  * IP6T_SO_GET_REVISION_TARGET	69
+ * IP6T_SO_ORIGINAL_DST		80
  */
 
 /* RFC5014: Source address selection */
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 8c469af..bbde90f 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -109,9 +109,10 @@
 	INET_DIAG_TOS,
 	INET_DIAG_TCLASS,
 	INET_DIAG_SKMEMINFO,
+	INET_DIAG_SHUTDOWN,
 };
 
-#define INET_DIAG_MAX INET_DIAG_SKMEMINFO
+#define INET_DIAG_MAX INET_DIAG_SHUTDOWN
 
 
 /* INET_DIAG_MEM */
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index a6d7d1c..5a2991c 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -157,6 +157,7 @@
 	DEVCONF_DISABLE_IPV6,
 	DEVCONF_ACCEPT_DAD,
 	DEVCONF_FORCE_TLLAO,
+	DEVCONF_NDISC_NOTIFY,
 	DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
index 0459664..2be7bd1 100644
--- a/include/uapi/linux/ipv6_route.h
+++ b/include/uapi/linux/ipv6_route.h
@@ -55,4 +55,7 @@
 #define RTMSG_NEWROUTE		0x21
 #define RTMSG_DELROUTE		0x22
 
+#define IP6_RT_PRIO_USER	1024
+#define IP6_RT_PRIO_ADDRCONF	256
+
 #endif /* _UAPI_LINUX_IPV6_ROUTE_H */
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
new file mode 100644
index 0000000..75dcbc5
--- /dev/null
+++ b/include/uapi/linux/netconf.h
@@ -0,0 +1,23 @@
+#ifndef _UAPI_LINUX_NETCONF_H_
+#define _UAPI_LINUX_NETCONF_H_
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+
+struct netconfmsg {
+	__u8	ncm_family;
+};
+
+enum {
+	NETCONFA_UNSPEC,
+	NETCONFA_IFINDEX,
+	NETCONFA_FORWARDING,
+	NETCONFA_RP_FILTER,
+	__NETCONFA_MAX
+};
+#define NETCONFA_MAX	(__NETCONFA_MAX - 1)
+
+#define NETCONFA_IFINDEX_ALL		-1
+#define NETCONFA_IFINDEX_DEFAULT	-2
+
+#endif /* _UAPI_LINUX_NETCONF_H_ */
diff --git a/include/uapi/linux/netfilter_ipv6/ip6_tables.h b/include/uapi/linux/netfilter_ipv6/ip6_tables.h
index bf1ef65..649c680 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6_tables.h
@@ -178,6 +178,9 @@
 #define IP6T_SO_GET_REVISION_TARGET	(IP6T_BASE_CTL + 5)
 #define IP6T_SO_GET_MAX			IP6T_SO_GET_REVISION_TARGET
 
+/* obtain original address if REDIRECT'd connection */
+#define IP6T_SO_ORIGINAL_DST            80
+
 /* ICMP matching stuff */
 struct ip6t_icmp {
 	__u8 type;				/* type to match */
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index d908d17..0e63cee 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -60,6 +60,13 @@
  *      target mode.
  * @NFC_EVENT_DEVICE_DEACTIVATED: event emitted when the adapter is deactivated
  *      from target mode.
+ * @NFC_CMD_LLC_GET_PARAMS: request LTO, RW, and MIUX parameters for a device
+ * @NFC_CMD_LLC_SET_PARAMS: set one or more of LTO, RW, and MIUX parameters for
+ *	a device. LTO must be set before the link is up otherwise -EINPROGRESS
+ *	is returned. RW and MIUX can be set at anytime and will be passed in
+ *	subsequent CONNECT and CC messages.
+ *	If one of the passed parameters is wrong none is set and -EINVAL is
+ *	returned.
  */
 enum nfc_commands {
 	NFC_CMD_UNSPEC,
@@ -77,6 +84,8 @@
 	NFC_EVENT_TARGET_LOST,
 	NFC_EVENT_TM_ACTIVATED,
 	NFC_EVENT_TM_DEACTIVATED,
+	NFC_CMD_LLC_GET_PARAMS,
+	NFC_CMD_LLC_SET_PARAMS,
 /* private: internal use only */
 	__NFC_CMD_AFTER_LAST
 };
@@ -102,6 +111,9 @@
  * @NFC_ATTR_RF_MODE: Initiator or target
  * @NFC_ATTR_IM_PROTOCOLS: Initiator mode protocols to poll for
  * @NFC_ATTR_TM_PROTOCOLS: Target mode protocols to listen for
+ * @NFC_ATTR_LLC_PARAM_LTO: Link TimeOut parameter
+ * @NFC_ATTR_LLC_PARAM_RW: Receive Window size parameter
+ * @NFC_ATTR_LLC_PARAM_MIUX: MIU eXtension parameter
  */
 enum nfc_attrs {
 	NFC_ATTR_UNSPEC,
@@ -119,6 +131,9 @@
 	NFC_ATTR_DEVICE_POWERED,
 	NFC_ATTR_IM_PROTOCOLS,
 	NFC_ATTR_TM_PROTOCOLS,
+	NFC_ATTR_LLC_PARAM_LTO,
+	NFC_ATTR_LLC_PARAM_RW,
+	NFC_ATTR_LLC_PARAM_MIUX,
 /* private: internal use only */
 	__NFC_ATTR_AFTER_LAST
 };
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 7df9b500..33a4174 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -118,8 +118,9 @@
  *	to get a list of all present wiphys.
  * @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or
  *	%NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME,
- *	%NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ,
- *	%NL80211_ATTR_WIPHY_CHANNEL_TYPE, %NL80211_ATTR_WIPHY_RETRY_SHORT,
+ *	%NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ (and the
+ *	attributes determining the channel width; this is used for setting
+ *	monitor mode channel),  %NL80211_ATTR_WIPHY_RETRY_SHORT,
  *	%NL80211_ATTR_WIPHY_RETRY_LONG, %NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
  *	and/or %NL80211_ATTR_WIPHY_RTS_THRESHOLD.
  *	However, for setting the channel, see %NL80211_CMD_SET_CHANNEL
@@ -171,7 +172,7 @@
  *	%NL80211_ATTR_AKM_SUITES, %NL80211_ATTR_PRIVACY,
  *	%NL80211_ATTR_AUTH_TYPE and %NL80211_ATTR_INACTIVITY_TIMEOUT.
  *	The channel to use can be set on the interface or be given using the
- *	%NL80211_ATTR_WIPHY_FREQ and %NL80211_ATTR_WIPHY_CHANNEL_TYPE attrs.
+ *	%NL80211_ATTR_WIPHY_FREQ and the attributes determining channel width.
  * @NL80211_CMD_NEW_BEACON: old alias for %NL80211_CMD_START_AP
  * @NL80211_CMD_STOP_AP: Stop AP operation on the given interface
  * @NL80211_CMD_DEL_BEACON: old alias for %NL80211_CMD_STOP_AP
@@ -401,8 +402,7 @@
  *	a response while being associated to an AP on another channel.
  *	%NL80211_ATTR_IFINDEX is used to specify which interface (and thus
  *	radio) is used. %NL80211_ATTR_WIPHY_FREQ is used to specify the
- *	frequency for the operation and %NL80211_ATTR_WIPHY_CHANNEL_TYPE may be
- *	optionally used to specify additional channel parameters.
+ *	frequency for the operation.
  *	%NL80211_ATTR_DURATION is used to specify the duration in milliseconds
  *	to remain on the channel. This command is also used as an event to
  *	notify when the requested duration starts (it may take a while for the
@@ -440,12 +440,11 @@
  *	as an event indicating reception of a frame that was not processed in
  *	kernel code, but is for us (i.e., which may need to be processed in a
  *	user space application). %NL80211_ATTR_FRAME is used to specify the
- *	frame contents (including header). %NL80211_ATTR_WIPHY_FREQ (and
- *	optionally %NL80211_ATTR_WIPHY_CHANNEL_TYPE) is used to indicate on
- *	which channel the frame is to be transmitted or was received. If this
- *	channel is not the current channel (remain-on-channel or the
- *	operational channel) the device will switch to the given channel and
- *	transmit the frame, optionally waiting for a response for the time
+ *	frame contents (including header). %NL80211_ATTR_WIPHY_FREQ is used
+ *	to indicate on which channel the frame is to be transmitted or was
+ *	received. If this channel is not the current channel (remain-on-channel
+ *	or the operational channel) the device will switch to the given channel
+ *	and transmit the frame, optionally waiting for a response for the time
  *	specified using %NL80211_ATTR_DURATION. When called, this operation
  *	returns a cookie (%NL80211_ATTR_COOKIE) that will be included with the
  *	TX status event pertaining to the TX request.
@@ -473,8 +472,8 @@
  *	command is used as an event to indicate the that a trigger level was
  *	reached.
  * @NL80211_CMD_SET_CHANNEL: Set the channel (using %NL80211_ATTR_WIPHY_FREQ
- *	and %NL80211_ATTR_WIPHY_CHANNEL_TYPE) the given interface (identifed
- *	by %NL80211_ATTR_IFINDEX) shall operate on.
+ *	and the attributes determining channel width) the given interface
+ *	(identifed by %NL80211_ATTR_IFINDEX) shall operate on.
  *	In case multiple channels are supported by the device, the mechanism
  *	with which it switches channels is implementation-defined.
  *	When a monitor interface is given, it can only switch channel while
@@ -526,6 +525,12 @@
  *	of PMKSA caching dandidates.
  *
  * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup).
+ *	In addition, this can be used as an event to request userspace to take
+ *	actions on TDLS links (set up a new link or tear down an existing one).
+ *	In such events, %NL80211_ATTR_TDLS_OPERATION indicates the requested
+ *	operation, %NL80211_ATTR_MAC contains the peer MAC address, and
+ *	%NL80211_ATTR_REASON_CODE the reason code to be used (only with
+ *	%NL80211_TDLS_TEARDOWN).
  * @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame.
  *
  * @NL80211_CMD_UNEXPECTED_FRAME: Used by an application controlling an AP
@@ -562,8 +567,8 @@
  *
  * @NL80211_CMD_CH_SWITCH_NOTIFY: An AP or GO may decide to switch channels
  *	independently of the userspace SME, send this event indicating
- *	%NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ with
- *	%NL80211_ATTR_WIPHY_CHANNEL_TYPE.
+ *	%NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ and the
+ *	attributes determining channel width.
  *
  * @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by
  *	its %NL80211_ATTR_WDEV identifier. It must have been created with
@@ -578,6 +583,9 @@
  *	station, due to particular reason. %NL80211_ATTR_CONN_FAILED_REASON
  *	is used for this.
  *
+ * @NL80211_CMD_SET_MCAST_RATE: Change the rate used to send multicast frames
+ *	for IBSS or MESH vif.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -726,6 +734,8 @@
 
 	NL80211_CMD_CONN_FAILED,
 
+	NL80211_CMD_SET_MCAST_RATE,
+
 	/* add new commands above here */
 
 	/* used to define NL80211_CMD_MAX below */
@@ -762,14 +772,26 @@
  *	/sys/class/ieee80211/<phyname>/index
  * @NL80211_ATTR_WIPHY_NAME: wiphy name (used for renaming)
  * @NL80211_ATTR_WIPHY_TXQ_PARAMS: a nested array of TX queue parameters
- * @NL80211_ATTR_WIPHY_FREQ: frequency of the selected channel in MHz
+ * @NL80211_ATTR_WIPHY_FREQ: frequency of the selected channel in MHz,
+ *	defines the channel together with the (deprecated)
+ *	%NL80211_ATTR_WIPHY_CHANNEL_TYPE attribute or the attributes
+ *	%NL80211_ATTR_CHANNEL_WIDTH and if needed %NL80211_ATTR_CENTER_FREQ1
+ *	and %NL80211_ATTR_CENTER_FREQ2
+ * @NL80211_ATTR_CHANNEL_WIDTH: u32 attribute containing one of the values
+ *	of &enum nl80211_chan_width, describing the channel width. See the
+ *	documentation of the enum for more information.
+ * @NL80211_ATTR_CENTER_FREQ1: Center frequency of the first part of the
+ *	channel, used for anything but 20 MHz bandwidth
+ * @NL80211_ATTR_CENTER_FREQ2: Center frequency of the second part of the
+ *	channel, used only for 80+80 MHz bandwidth
  * @NL80211_ATTR_WIPHY_CHANNEL_TYPE: included with NL80211_ATTR_WIPHY_FREQ
- *	if HT20 or HT40 are allowed (i.e., 802.11n disabled if not included):
+ *	if HT20 or HT40 are to be used (i.e., HT disabled if not included):
  *	NL80211_CHAN_NO_HT = HT not allowed (i.e., same as not including
  *		this attribute)
  *	NL80211_CHAN_HT20 = HT20 only
  *	NL80211_CHAN_HT40MINUS = secondary channel is below the primary channel
  *	NL80211_CHAN_HT40PLUS = secondary channel is above the primary channel
+ *	This attribute is now deprecated.
  * @NL80211_ATTR_WIPHY_RETRY_SHORT: TX retry limit for frames whose length is
  *	less than or equal to the RTS threshold; allowed range: 1..255;
  *	dot11ShortRetryLimit; u8
@@ -1273,6 +1295,14 @@
  *	the connection request from a station. nl80211_connect_failed_reason
  *	enum has different reasons of connection failure.
  *
+ * @NL80211_ATTR_SAE_DATA: SAE elements in Authentication frames. This starts
+ *	with the Authentication transaction sequence number field.
+ *
+ * @NL80211_ATTR_VHT_CAPABILITY: VHT Capability information element (from
+ *	association request when used with NL80211_CMD_NEW_STATION)
+ *
+ * @NL80211_ATTR_SCAN_FLAGS: scan request control flags (u32)
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1530,6 +1560,16 @@
 
 	NL80211_ATTR_CONN_FAILED_REASON,
 
+	NL80211_ATTR_SAE_DATA,
+
+	NL80211_ATTR_VHT_CAPABILITY,
+
+	NL80211_ATTR_SCAN_FLAGS,
+
+	NL80211_ATTR_CHANNEL_WIDTH,
+	NL80211_ATTR_CENTER_FREQ1,
+	NL80211_ATTR_CENTER_FREQ2,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -1573,6 +1613,7 @@
 #define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY	16
 #define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY	24
 #define NL80211_HT_CAPABILITY_LEN		26
+#define NL80211_VHT_CAPABILITY_LEN		12
 
 #define NL80211_MAX_NR_CIPHER_SUITES		5
 #define NL80211_MAX_NR_AKM_SUITES		2
@@ -1693,10 +1734,15 @@
  * @__NL80211_RATE_INFO_INVALID: attribute number 0 is reserved
  * @NL80211_RATE_INFO_BITRATE: total bitrate (u16, 100kbit/s)
  * @NL80211_RATE_INFO_MCS: mcs index for 802.11n (u8)
- * @NL80211_RATE_INFO_40_MHZ_WIDTH: 40 Mhz dualchannel bitrate
+ * @NL80211_RATE_INFO_40_MHZ_WIDTH: 40 MHz dualchannel bitrate
  * @NL80211_RATE_INFO_SHORT_GI: 400ns guard interval
  * @NL80211_RATE_INFO_BITRATE32: total bitrate (u32, 100kbit/s)
  * @NL80211_RATE_INFO_MAX: highest rate_info number currently defined
+ * @NL80211_RATE_INFO_VHT_MCS: MCS index for VHT (u8)
+ * @NL80211_RATE_INFO_VHT_NSS: number of streams in VHT (u8)
+ * @NL80211_RATE_INFO_80_MHZ_WIDTH: 80 MHz VHT rate
+ * @NL80211_RATE_INFO_80P80_MHZ_WIDTH: 80+80 MHz VHT rate
+ * @NL80211_RATE_INFO_160_MHZ_WIDTH: 160 MHz VHT rate
  * @__NL80211_RATE_INFO_AFTER_LAST: internal use
  */
 enum nl80211_rate_info {
@@ -1706,6 +1752,11 @@
 	NL80211_RATE_INFO_40_MHZ_WIDTH,
 	NL80211_RATE_INFO_SHORT_GI,
 	NL80211_RATE_INFO_BITRATE32,
+	NL80211_RATE_INFO_VHT_MCS,
+	NL80211_RATE_INFO_VHT_NSS,
+	NL80211_RATE_INFO_80_MHZ_WIDTH,
+	NL80211_RATE_INFO_80P80_MHZ_WIDTH,
+	NL80211_RATE_INFO_160_MHZ_WIDTH,
 
 	/* keep last */
 	__NL80211_RATE_INFO_AFTER_LAST,
@@ -2414,6 +2465,15 @@
 #define NL80211_TXQ_Q_BE	NL80211_AC_BE
 #define NL80211_TXQ_Q_BK	NL80211_AC_BK
 
+/**
+ * enum nl80211_channel_type - channel type
+ * @NL80211_CHAN_NO_HT: 20 MHz, non-HT channel
+ * @NL80211_CHAN_HT20: 20 MHz HT channel
+ * @NL80211_CHAN_HT40MINUS: HT40 channel, secondary channel
+ *	below the control channel
+ * @NL80211_CHAN_HT40PLUS: HT40 channel, secondary channel
+ *	above the control channel
+ */
 enum nl80211_channel_type {
 	NL80211_CHAN_NO_HT,
 	NL80211_CHAN_HT20,
@@ -2422,6 +2482,32 @@
 };
 
 /**
+ * enum nl80211_chan_width - channel width definitions
+ *
+ * These values are used with the %NL80211_ATTR_CHANNEL_WIDTH
+ * attribute.
+ *
+ * @NL80211_CHAN_WIDTH_20_NOHT: 20 MHz, non-HT channel
+ * @NL80211_CHAN_WIDTH_20: 20 MHz HT channel
+ * @NL80211_CHAN_WIDTH_40: 40 MHz channel, the %NL80211_ATTR_CENTER_FREQ1
+ *	attribute must be provided as well
+ * @NL80211_CHAN_WIDTH_80: 80 MHz channel, the %NL80211_ATTR_CENTER_FREQ1
+ *	attribute must be provided as well
+ * @NL80211_CHAN_WIDTH_80P80: 80+80 MHz channel, the %NL80211_ATTR_CENTER_FREQ1
+ *	and %NL80211_ATTR_CENTER_FREQ2 attributes must be provided as well
+ * @NL80211_CHAN_WIDTH_160: 160 MHz channel, the %NL80211_ATTR_CENTER_FREQ1
+ *	attribute must be provided as well
+ */
+enum nl80211_chan_width {
+	NL80211_CHAN_WIDTH_20_NOHT,
+	NL80211_CHAN_WIDTH_20,
+	NL80211_CHAN_WIDTH_40,
+	NL80211_CHAN_WIDTH_80,
+	NL80211_CHAN_WIDTH_80P80,
+	NL80211_CHAN_WIDTH_160,
+};
+
+/**
  * enum nl80211_bss - netlink attributes for a BSS
  *
  * @__NL80211_BSS_INVALID: invalid
@@ -2489,6 +2575,7 @@
  * @NL80211_AUTHTYPE_SHARED_KEY: Shared Key authentication (WEP only)
  * @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r)
  * @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP)
+ * @NL80211_AUTHTYPE_SAE: Simultaneous authentication of equals
  * @__NL80211_AUTHTYPE_NUM: internal
  * @NL80211_AUTHTYPE_MAX: maximum valid auth algorithm
  * @NL80211_AUTHTYPE_AUTOMATIC: determine automatically (if necessary by
@@ -2500,6 +2587,7 @@
 	NL80211_AUTHTYPE_SHARED_KEY,
 	NL80211_AUTHTYPE_FT,
 	NL80211_AUTHTYPE_NETWORK_EAP,
+	NL80211_AUTHTYPE_SAE,
 
 	/* keep last */
 	__NL80211_AUTHTYPE_NUM,
@@ -3028,6 +3116,16 @@
  *	in the interface combinations, even when it's only used for scan
  *	and remain-on-channel. This could be due to, for example, the
  *	remain-on-channel implementation requiring a channel context.
+ * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of
+ *	equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station
+ *	mode
+ * @NL80211_FEATURE_LOW_PRIORITY_SCAN: This driver supports low priority scan
+ * @NL80211_FEATURE_SCAN_FLUSH: Scan flush is supported
+ * @NL80211_FEATURE_AP_SCAN: Support scanning using an AP vif
+ * @NL80211_FEATURE_VIF_TXPOWER: The driver supports per-vif TX power setting
+ * @NL80211_FEATURE_NEED_OBSS_SCAN: The driver expects userspace to perform
+ *	OBSS scans and generate 20/40 BSS coex reports. This flag is used only
+ *	for drivers implementing the CONNECT API, for AUTH/ASSOC it is implied.
  */
 enum nl80211_feature_flags {
 	NL80211_FEATURE_SK_TX_STATUS			= 1 << 0,
@@ -3035,6 +3133,12 @@
 	NL80211_FEATURE_INACTIVITY_TIMER		= 1 << 2,
 	NL80211_FEATURE_CELL_BASE_REG_HINTS		= 1 << 3,
 	NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL	= 1 << 4,
+	NL80211_FEATURE_SAE				= 1 << 5,
+	NL80211_FEATURE_LOW_PRIORITY_SCAN		= 1 << 6,
+	NL80211_FEATURE_SCAN_FLUSH			= 1 << 7,
+	NL80211_FEATURE_AP_SCAN				= 1 << 8,
+	NL80211_FEATURE_VIF_TXPOWER			= 1 << 9,
+	NL80211_FEATURE_NEED_OBSS_SCAN			= 1 << 10,
 };
 
 /**
@@ -3069,4 +3173,25 @@
 	NL80211_CONN_FAIL_BLOCKED_CLIENT,
 };
 
+/**
+ * enum nl80211_scan_flags -  scan request control flags
+ *
+ * Scan request control flags are used to control the handling
+ * of NL80211_CMD_TRIGGER_SCAN and NL80211_CMD_START_SCHED_SCAN
+ * requests.
+ *
+ * @NL80211_SCAN_FLAG_LOW_PRIORITY: scan request has low priority
+ * @NL80211_SCAN_FLAG_FLUSH: flush cache before scanning
+ * @NL80211_SCAN_FLAG_AP: force a scan even if the interface is configured
+ *	as AP and the beaconing has already been configured. This attribute is
+ *	dangerous because will destroy stations performance as a lot of frames
+ *	will be lost while scanning off-channel, therefore it must be used only
+ *	when really needed
+ */
+enum nl80211_scan_flags {
+	NL80211_SCAN_FLAG_LOW_PRIORITY			= 1<<0,
+	NL80211_SCAN_FLAG_FLUSH				= 1<<1,
+	NL80211_SCAN_FLAG_AP				= 1<<2,
+};
+
 #endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 94e981f..b65c834 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -67,12 +67,26 @@
 	unsigned int rsv[4];          /* Reserved for future use. */
 };
 
+#define PTP_MAX_SAMPLES 25 /* Maximum allowed offset measurement samples. */
+
+struct ptp_sys_offset {
+	unsigned int n_samples; /* Desired number of measurements. */
+	unsigned int rsv[3];    /* Reserved for future use. */
+	/*
+	 * Array of interleaved system/phc time stamps. The kernel
+	 * will provide 2*n_samples + 1 time stamps, with the last
+	 * one as a system time stamp.
+	 */
+	struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
+};
+
 #define PTP_CLK_MAGIC '='
 
 #define PTP_CLOCK_GETCAPS  _IOR(PTP_CLK_MAGIC, 1, struct ptp_clock_caps)
 #define PTP_EXTTS_REQUEST  _IOW(PTP_CLK_MAGIC, 2, struct ptp_extts_request)
 #define PTP_PEROUT_REQUEST _IOW(PTP_CLK_MAGIC, 3, struct ptp_perout_request)
 #define PTP_ENABLE_PPS     _IOW(PTP_CLK_MAGIC, 4, int)
+#define PTP_SYS_OFFSET     _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset)
 
 struct ptp_extts_event {
 	struct ptp_clock_time t; /* Time event occured. */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index fcd768b..3dee071 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -120,6 +120,11 @@
 	RTM_SETDCB,
 #define RTM_SETDCB RTM_SETDCB
 
+	RTM_NEWNETCONF = 80,
+#define RTM_NEWNETCONF RTM_NEWNETCONF
+	RTM_GETNETCONF = 82,
+#define RTM_GETNETCONF RTM_GETNETCONF
+
 	__RTM_MAX,
 #define RTM_MAX		(((__RTM_MAX + 3) & ~3) - 1)
 };
@@ -587,6 +592,10 @@
 #define RTNLGRP_PHONET_ROUTE	RTNLGRP_PHONET_ROUTE
 	RTNLGRP_DCB,
 #define RTNLGRP_DCB		RTNLGRP_DCB
+	RTNLGRP_IPV4_NETCONF,
+#define RTNLGRP_IPV4_NETCONF	RTNLGRP_IPV4_NETCONF
+	RTNLGRP_IPV6_NETCONF,
+#define RTNLGRP_IPV6_NETCONF	RTNLGRP_IPV6_NETCONF
 	__RTNLGRP_MAX
 };
 #define RTNLGRP_MAX	(__RTNLGRP_MAX - 1)
diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h
index b1d2bf1..b8a2494 100644
--- a/include/uapi/linux/unix_diag.h
+++ b/include/uapi/linux/unix_diag.h
@@ -37,6 +37,7 @@
 	UNIX_DIAG_ICONS,
 	UNIX_DIAG_RQLEN,
 	UNIX_DIAG_MEMINFO,
+	UNIX_DIAG_SHUTDOWN,
 
 	UNIX_DIAG_MAX,
 };
diff --git a/include/uapi/linux/usb/cdc.h b/include/uapi/linux/usb/cdc.h
index 81a9279..f35aa0a 100644
--- a/include/uapi/linux/usb/cdc.h
+++ b/include/uapi/linux/usb/cdc.h
@@ -19,6 +19,7 @@
 #define USB_CDC_SUBCLASS_OBEX			0x0b
 #define USB_CDC_SUBCLASS_EEM			0x0c
 #define USB_CDC_SUBCLASS_NCM			0x0d
+#define USB_CDC_SUBCLASS_MBIM			0x0e
 
 #define USB_CDC_PROTO_NONE			0
 
@@ -33,6 +34,7 @@
 #define USB_CDC_PROTO_EEM			7
 
 #define USB_CDC_NCM_PROTO_NTB			1
+#define USB_CDC_MBIM_PROTO_NTB			2
 
 /*-------------------------------------------------------------------------*/
 
@@ -53,6 +55,7 @@
 #define USB_CDC_DMM_TYPE		0x14
 #define USB_CDC_OBEX_TYPE		0x15
 #define USB_CDC_NCM_TYPE		0x1a
+#define USB_CDC_MBIM_TYPE		0x1b
 
 /* "Header Functional Descriptor" from CDC spec  5.2.3.1 */
 struct usb_cdc_header_desc {
@@ -187,6 +190,21 @@
 	__le16	bcdNcmVersion;
 	__u8	bmNetworkCapabilities;
 } __attribute__ ((packed));
+
+/* "MBIM Control Model Functional Descriptor" */
+struct usb_cdc_mbim_desc {
+	__u8	bLength;
+	__u8	bDescriptorType;
+	__u8	bDescriptorSubType;
+
+	__le16	bcdMBIMVersion;
+	__le16  wMaxControlMessage;
+	__u8    bNumberFilters;
+	__u8    bMaxFilterSize;
+	__le16  wMaxSegmentSize;
+	__u8    bmNetworkCapabilities;
+} __attribute__ ((packed));
+
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -332,6 +350,11 @@
 #define USB_CDC_NCM_NDP32_CRC_SIGN	0x316D636E /* ncm1 */
 #define USB_CDC_NCM_NDP32_NOCRC_SIGN	0x306D636E /* ncm0 */
 
+#define USB_CDC_MBIM_NDP16_IPS_SIGN     0x00535049 /* IPS<sessionID> : IPS0 for now */
+#define USB_CDC_MBIM_NDP32_IPS_SIGN     0x00737069 /* ips<sessionID> : ips0 for now */
+#define USB_CDC_MBIM_NDP16_DSS_SIGN     0x00535344 /* DSS<sessionID> */
+#define USB_CDC_MBIM_NDP32_DSS_SIGN     0x00737364 /* dss<sessionID> */
+
 /* 16-bit NCM Datagram Pointer Entry */
 struct usb_cdc_ncm_dpe16 {
 	__le16	wDatagramIndex;
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index b576f7f..7e1c3de 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -90,7 +90,7 @@
 		goto out_pid;
 	}
 
-	new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns);
+	new_nsp->net_ns = copy_net_ns(flags, task_cred_xxx(tsk, user_ns), tsk->nsproxy->net_ns);
 	if (IS_ERR(new_nsp->net_ns)) {
 		err = PTR_ERR(new_nsp->net_ns);
 		goto out_net;
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index e2fd74b..ff7d9d2 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,4 +1,4 @@
-obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
+obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
 obj-y += timeconv.o posix-clock.o alarmtimer.o
 
 obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD)		+= clockevents.o
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
deleted file mode 100644
index a9ae369..0000000
--- a/kernel/time/timecompare.c
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (C) 2009 Intel Corporation.
- * Author: Patrick Ohly <patrick.ohly@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/timecompare.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/math64.h>
-#include <linux/kernel.h>
-
-/*
- * fixed point arithmetic scale factor for skew
- *
- * Usually one would measure skew in ppb (parts per billion, 1e9), but
- * using a factor of 2 simplifies the math.
- */
-#define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30)
-
-ktime_t timecompare_transform(struct timecompare *sync,
-			      u64 source_tstamp)
-{
-	u64 nsec;
-
-	nsec = source_tstamp + sync->offset;
-	nsec += (s64)(source_tstamp - sync->last_update) * sync->skew /
-		TIMECOMPARE_SKEW_RESOLUTION;
-
-	return ns_to_ktime(nsec);
-}
-EXPORT_SYMBOL_GPL(timecompare_transform);
-
-int timecompare_offset(struct timecompare *sync,
-		       s64 *offset,
-		       u64 *source_tstamp)
-{
-	u64 start_source = 0, end_source = 0;
-	struct {
-		s64 offset;
-		s64 duration_target;
-	} buffer[10], sample, *samples;
-	int counter = 0, i;
-	int used;
-	int index;
-	int num_samples = sync->num_samples;
-
-	if (num_samples > ARRAY_SIZE(buffer)) {
-		samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
-		if (!samples) {
-			samples = buffer;
-			num_samples = ARRAY_SIZE(buffer);
-		}
-	} else {
-		samples = buffer;
-	}
-
-	/* run until we have enough valid samples, but do not try forever */
-	i = 0;
-	counter = 0;
-	while (1) {
-		u64 ts;
-		ktime_t start, end;
-
-		start = sync->target();
-		ts = timecounter_read(sync->source);
-		end = sync->target();
-
-		if (!i)
-			start_source = ts;
-
-		/* ignore negative durations */
-		sample.duration_target = ktime_to_ns(ktime_sub(end, start));
-		if (sample.duration_target >= 0) {
-			/*
-			 * assume symetric delay to and from source:
-			 * average target time corresponds to measured
-			 * source time
-			 */
-			sample.offset =
-				(ktime_to_ns(end) + ktime_to_ns(start)) / 2 -
-				ts;
-
-			/* simple insertion sort based on duration */
-			index = counter - 1;
-			while (index >= 0) {
-				if (samples[index].duration_target <
-				    sample.duration_target)
-					break;
-				samples[index + 1] = samples[index];
-				index--;
-			}
-			samples[index + 1] = sample;
-			counter++;
-		}
-
-		i++;
-		if (counter >= num_samples || i >= 100000) {
-			end_source = ts;
-			break;
-		}
-	}
-
-	*source_tstamp = (end_source + start_source) / 2;
-
-	/* remove outliers by only using 75% of the samples */
-	used = counter * 3 / 4;
-	if (!used)
-		used = counter;
-	if (used) {
-		/* calculate average */
-		s64 off = 0;
-		for (index = 0; index < used; index++)
-			off += samples[index].offset;
-		*offset = div_s64(off, used);
-	}
-
-	if (samples && samples != buffer)
-		kfree(samples);
-
-	return used;
-}
-EXPORT_SYMBOL_GPL(timecompare_offset);
-
-void __timecompare_update(struct timecompare *sync,
-			  u64 source_tstamp)
-{
-	s64 offset;
-	u64 average_time;
-
-	if (!timecompare_offset(sync, &offset, &average_time))
-		return;
-
-	if (!sync->last_update) {
-		sync->last_update = average_time;
-		sync->offset = offset;
-		sync->skew = 0;
-	} else {
-		s64 delta_nsec = average_time - sync->last_update;
-
-		/* avoid division by negative or small deltas */
-		if (delta_nsec >= 10000) {
-			s64 delta_offset_nsec = offset - sync->offset;
-			s64 skew; /* delta_offset_nsec *
-				     TIMECOMPARE_SKEW_RESOLUTION /
-				     delta_nsec */
-			u64 divisor;
-
-			/* div_s64() is limited to 32 bit divisor */
-			skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
-			divisor = delta_nsec;
-			while (unlikely(divisor >= ((s64)1) << 32)) {
-				/* divide both by 2; beware, right shift
-				   of negative value has undefined
-				   behavior and can only be used for
-				   the positive divisor */
-				skew = div_s64(skew, 2);
-				divisor >>= 1;
-			}
-			skew = div_s64(skew, divisor);
-
-			/*
-			 * Calculate new overall skew as 4/16 the
-			 * old value and 12/16 the new one. This is
-			 * a rather arbitrary tradeoff between
-			 * only using the latest measurement (0/16 and
-			 * 16/16) and even more weight on past measurements.
-			 */
-#define TIMECOMPARE_NEW_SKEW_PER_16 12
-			sync->skew =
-				div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
-					sync->skew +
-					TIMECOMPARE_NEW_SKEW_PER_16 * skew,
-					16);
-			sync->last_update = average_time;
-			sync->offset = offset;
-		}
-	}
-}
-EXPORT_SYMBOL_GPL(__timecompare_update);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index ee07072..afba51e 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -294,7 +294,7 @@
 	else
 		vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
 
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
 	vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
 #endif
 
@@ -529,7 +529,7 @@
 	switch (args.cmd) {
 	case SET_VLAN_INGRESS_PRIORITY_CMD:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			break;
 		vlan_dev_set_ingress_priority(dev,
 					      args.u.skb_priority,
@@ -539,7 +539,7 @@
 
 	case SET_VLAN_EGRESS_PRIORITY_CMD:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			break;
 		err = vlan_dev_set_egress_priority(dev,
 						   args.u.skb_priority,
@@ -548,7 +548,7 @@
 
 	case SET_VLAN_FLAG_CMD:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			break;
 		err = vlan_dev_change_flags(dev,
 					    args.vlan_qos ? args.u.flag : 0,
@@ -557,7 +557,7 @@
 
 	case SET_VLAN_NAME_TYPE_CMD:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			break;
 		if ((args.u.name_type >= 0) &&
 		    (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
@@ -573,14 +573,14 @@
 
 	case ADD_VLAN_CMD:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			break;
 		err = register_vlan_device(dev, args.u.VID);
 		break;
 
 	case DEL_VLAN_CMD:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			break;
 		unregister_vlan_dev(dev, NULL);
 		err = 0;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4024424..4a6d31a 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -409,7 +409,7 @@
 	return err;
 }
 
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
 static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
 				   struct scatterlist *sgl, unsigned int sgc)
 {
@@ -531,6 +531,10 @@
 	.parse	 = eth_header_parse,
 };
 
+static struct device_type vlan_type = {
+	.name	= "vlan",
+};
+
 static const struct net_device_ops vlan_netdev_ops;
 
 static int vlan_dev_init(struct net_device *dev)
@@ -564,7 +568,7 @@
 	if (is_zero_ether_addr(dev->broadcast))
 		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
 
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
 	dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
 #endif
 
@@ -579,6 +583,8 @@
 
 	dev->netdev_ops = &vlan_netdev_ops;
 
+	SET_NETDEV_DEVTYPE(dev, &vlan_type);
+
 	if (is_vlan_dev(real_dev))
 		subclass = 1;
 
@@ -741,7 +747,7 @@
 	.ndo_do_ioctl		= vlan_dev_ioctl,
 	.ndo_neigh_setup	= vlan_dev_neigh_setup,
 	.ndo_get_stats64	= vlan_dev_get_stats64,
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
 	.ndo_fcoe_ddp_setup	= vlan_dev_fcoe_ddp_setup,
 	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,
 	.ndo_fcoe_enable	= vlan_dev_fcoe_enable,
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 4819d315..8eb6fbe8 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -74,6 +74,7 @@
 	struct br2684_filter filter;
 #endif /* CONFIG_ATM_BR2684_IPFILTER */
 	unsigned int copies_needed, copies_failed;
+	atomic_t qspace;
 };
 
 struct br2684_dev {
@@ -181,18 +182,15 @@
 static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
 {
 	struct br2684_vcc *brvcc = BR2684_VCC(vcc);
-	struct net_device *net_dev = skb->dev;
 
-	pr_debug("(vcc %p ; net_dev %p )\n", vcc, net_dev);
+	pr_debug("(vcc %p ; net_dev %p )\n", vcc, brvcc->device);
 	brvcc->old_pop(vcc, skb);
 
-	if (!net_dev)
-		return;
-
-	if (atm_may_send(vcc, 0))
-		netif_wake_queue(net_dev);
-
+	/* If the queue space just went up from zero, wake */
+	if (atomic_inc_return(&brvcc->qspace) == 1)
+		netif_wake_queue(brvcc->device);
 }
+
 /*
  * Send a packet out a particular vcc.  Not to useful right now, but paves
  * the way for multiple vcc's per itf.  Returns true if we can send,
@@ -256,16 +254,19 @@
 	ATM_SKB(skb)->atm_options = atmvcc->atm_options;
 	dev->stats.tx_packets++;
 	dev->stats.tx_bytes += skb->len;
-	atmvcc->send(atmvcc, skb);
 
-	if (!atm_may_send(atmvcc, 0)) {
+	if (atomic_dec_return(&brvcc->qspace) < 1) {
+		/* No more please! */
 		netif_stop_queue(brvcc->device);
-		/*check for race with br2684_pop*/
-		if (atm_may_send(atmvcc, 0))
-			netif_start_queue(brvcc->device);
+		/* We might have raced with br2684_pop() */
+		if (unlikely(atomic_read(&brvcc->qspace) > 0))
+			netif_wake_queue(brvcc->device);
 	}
 
-	return 1;
+	/* If this fails immediately, the skb will be freed and br2684_pop()
+	   will wake the queue if appropriate. Just return an error so that
+	   the stats are updated correctly */
+	return !atmvcc->send(atmvcc, skb);
 }
 
 static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb,
@@ -504,6 +505,13 @@
 	brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
 	if (!brvcc)
 		return -ENOMEM;
+	/*
+	 * Allow two packets in the ATM queue. One actually being sent, and one
+	 * for the ATM 'TX done' handler to send. It shouldn't take long to get
+	 * the next one from the netdev queue, when we need it. More than that
+	 * would be bufferbloat.
+	 */
+	atomic_set(&brvcc->qspace, 2);
 	write_lock_irq(&devs_lock);
 	net_dev = br2684_find_dev(&be.ifspec);
 	if (net_dev == NULL) {
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 53f5244..8d8afb1 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -6,6 +6,7 @@
 	tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
 	depends on NET
 	select CRC16
+	select LIBCRC32C
         default n
 	help
           B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
@@ -25,6 +26,16 @@
 	  more than one mesh node in the same LAN, you can safely remove
 	  this feature and save some space.
 
+config BATMAN_ADV_DAT
+	bool "Distributed ARP Table"
+	depends on BATMAN_ADV && INET
+	default n
+	help
+	  This option enables DAT (Distributed ARP Table), a DHT based
+	  mechanism that increases ARP reliability on sparse wireless
+	  mesh networks. If you think that your network does not need
+	  this option you can safely remove it and save some space.
+
 config BATMAN_ADV_DEBUG
 	bool "B.A.T.M.A.N. debugging"
 	depends on BATMAN_ADV
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 8676d2b..e45e3b4 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -23,6 +23,7 @@
 batman-adv-y += bitarray.o
 batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
 batman-adv-y += debugfs.o
+batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
 batman-adv-y += gateway_client.o
 batman-adv-y += gateway_common.o
 batman-adv-y += hard-interface.o
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index b02b75d..9f3925a 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -57,20 +57,22 @@
 static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
 {
 	struct batadv_ogm_packet *batadv_ogm_packet;
+	unsigned char *ogm_buff;
 	uint32_t random_seqno;
 	int res = -ENOMEM;
 
 	/* randomize initial seqno to avoid collision */
 	get_random_bytes(&random_seqno, sizeof(random_seqno));
-	atomic_set(&hard_iface->seqno, random_seqno);
+	atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
 
-	hard_iface->packet_len = BATADV_OGM_HLEN;
-	hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
-
-	if (!hard_iface->packet_buff)
+	hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
+	ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
+	if (!ogm_buff)
 		goto out;
 
-	batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
+	hard_iface->bat_iv.ogm_buff = ogm_buff;
+
+	batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
 	batadv_ogm_packet->header.packet_type = BATADV_IV_OGM;
 	batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
 	batadv_ogm_packet->header.ttl = 2;
@@ -87,15 +89,16 @@
 
 static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
 {
-	kfree(hard_iface->packet_buff);
-	hard_iface->packet_buff = NULL;
+	kfree(hard_iface->bat_iv.ogm_buff);
+	hard_iface->bat_iv.ogm_buff = NULL;
 }
 
 static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
 {
 	struct batadv_ogm_packet *batadv_ogm_packet;
+	unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
 
-	batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
+	batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
 	memcpy(batadv_ogm_packet->orig,
 	       hard_iface->net_dev->dev_addr, ETH_ALEN);
 	memcpy(batadv_ogm_packet->prev_sender,
@@ -106,8 +109,9 @@
 batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
 {
 	struct batadv_ogm_packet *batadv_ogm_packet;
+	unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
 
-	batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
+	batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
 	batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
 	batadv_ogm_packet->header.ttl = BATADV_TTL;
 }
@@ -407,9 +411,11 @@
 
 	if ((atomic_read(&bat_priv->aggregated_ogms)) &&
 	    (packet_len < BATADV_MAX_AGGREGATION_BYTES))
-		skb_size = BATADV_MAX_AGGREGATION_BYTES + ETH_HLEN;
+		skb_size = BATADV_MAX_AGGREGATION_BYTES;
 	else
-		skb_size = packet_len + ETH_HLEN;
+		skb_size = packet_len;
+
+	skb_size += ETH_HLEN + NET_IP_ALIGN;
 
 	forw_packet_aggr->skb = dev_alloc_skb(skb_size);
 	if (!forw_packet_aggr->skb) {
@@ -418,7 +424,7 @@
 		kfree(forw_packet_aggr);
 		goto out;
 	}
-	skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
+	skb_reserve(forw_packet_aggr->skb, ETH_HLEN + NET_IP_ALIGN);
 
 	INIT_HLIST_NODE(&forw_packet_aggr->list);
 
@@ -590,8 +596,10 @@
 static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+	unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	struct batadv_hard_iface *primary_if;
+	int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len;
 	int vis_server, tt_num_changes = 0;
 	uint32_t seqno;
 	uint8_t bandwidth;
@@ -600,17 +608,16 @@
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 
 	if (hard_iface == primary_if)
-		tt_num_changes = batadv_tt_append_diff(bat_priv,
-						       &hard_iface->packet_buff,
-						       &hard_iface->packet_len,
+		tt_num_changes = batadv_tt_append_diff(bat_priv, ogm_buff,
+						       ogm_buff_len,
 						       BATADV_OGM_HLEN);
 
-	batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
+	batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
 
 	/* change sequence number to network order */
-	seqno = (uint32_t)atomic_read(&hard_iface->seqno);
+	seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
 	batadv_ogm_packet->seqno = htonl(seqno);
-	atomic_inc(&hard_iface->seqno);
+	atomic_inc(&hard_iface->bat_iv.ogm_seqno);
 
 	batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
 	batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
@@ -631,8 +638,8 @@
 	}
 
 	batadv_slide_own_bcast_window(hard_iface);
-	batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
-				hard_iface->packet_len, hard_iface, 1,
+	batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff,
+				hard_iface->bat_iv.ogm_buff_len, hard_iface, 1,
 				batadv_iv_ogm_emit_send_time(bat_priv));
 
 	if (primary_if)
@@ -1015,7 +1022,7 @@
 		return;
 
 	/* could be changed by schedule_own_packet() */
-	if_incoming_seqno = atomic_read(&if_incoming->seqno);
+	if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
 
 	if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
 		has_directlink_flag = 1;
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index aea174c..5453b17 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -79,20 +79,17 @@
 	 * or the old packet got delayed somewhere in the network. The
 	 * packet should be dropped without calling this function if the
 	 * seqno window is protected.
+	 *
+	 * seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE
+	 * or
+	 * seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE
 	 */
-	if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
-	    seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
+	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+		   "Other host probably restarted!\n");
 
-		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-			   "Other host probably restarted!\n");
+	bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+	if (set_mark)
+		batadv_set_bit(seq_bits, 0);
 
-		bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
-		if (set_mark)
-			batadv_set_bit(seq_bits, 0);
-
-		return 1;
-	}
-
-	/* never reached */
-	return 0;
+	return 1;
 }
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index fd8d5af..5aebe93 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -40,15 +40,11 @@
 /* return the index of the claim */
 static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
 {
-	const unsigned char *key = data;
+	struct batadv_claim *claim = (struct batadv_claim *)data;
 	uint32_t hash = 0;
-	size_t i;
 
-	for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
-		hash += key[i];
-		hash += (hash << 10);
-		hash ^= (hash >> 6);
-	}
+	hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
+	hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
 
 	hash += (hash << 3);
 	hash ^= (hash >> 11);
@@ -61,15 +57,11 @@
 static inline uint32_t batadv_choose_backbone_gw(const void *data,
 						 uint32_t size)
 {
-	const unsigned char *key = data;
+	struct batadv_claim *claim = (struct batadv_claim *)data;
 	uint32_t hash = 0;
-	size_t i;
 
-	for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
-		hash += key[i];
-		hash += (hash << 10);
-		hash ^= (hash >> 6);
-	}
+	hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
+	hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
 
 	hash += (hash << 3);
 	hash ^= (hash >> 11);
@@ -85,8 +77,15 @@
 {
 	const void *data1 = container_of(node, struct batadv_backbone_gw,
 					 hash_entry);
+	const struct batadv_backbone_gw *gw1 = data1, *gw2 = data2;
 
-	return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
+	if (!batadv_compare_eth(gw1->orig, gw2->orig))
+		return 0;
+
+	if (gw1->vid != gw2->vid)
+		return 0;
+
+	return 1;
 }
 
 /* compares address and vid of two claims */
@@ -95,8 +94,15 @@
 {
 	const void *data1 = container_of(node, struct batadv_claim,
 					 hash_entry);
+	const struct batadv_claim *cl1 = data1, *cl2 = data2;
 
-	return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
+	if (!batadv_compare_eth(cl1->addr, cl2->addr))
+		return 0;
+
+	if (cl1->vid != cl2->vid)
+		return 0;
+
+	return 1;
 }
 
 /* free a backbone gw */
@@ -362,7 +368,7 @@
  */
 static struct batadv_backbone_gw *
 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
-			   short vid)
+			   short vid, bool own_backbone)
 {
 	struct batadv_backbone_gw *entry;
 	struct batadv_orig_node *orig_node;
@@ -386,6 +392,7 @@
 	entry->crc = BATADV_BLA_CRC_INIT;
 	entry->bat_priv = bat_priv;
 	atomic_set(&entry->request_sent, 0);
+	atomic_set(&entry->wait_periods, 0);
 	memcpy(entry->orig, orig, ETH_ALEN);
 
 	/* one for the hash, one for returning */
@@ -409,6 +416,16 @@
 					  "became a backbone gateway");
 		batadv_orig_node_free_ref(orig_node);
 	}
+
+	if (own_backbone) {
+		batadv_bla_send_announce(bat_priv, entry);
+
+		/* this will be decreased in the worker thread */
+		atomic_inc(&entry->request_sent);
+		atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
+		atomic_inc(&bat_priv->bla.num_requests);
+	}
+
 	return entry;
 }
 
@@ -424,7 +441,7 @@
 
 	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
 						 primary_if->net_dev->dev_addr,
-						 vid);
+						 vid, true);
 	if (unlikely(!backbone_gw))
 		return;
 
@@ -632,7 +649,8 @@
 	if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
 		return 0;
 
-	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
+						 false);
 
 	if (unlikely(!backbone_gw))
 		return 1;
@@ -730,7 +748,8 @@
 
 	/* register the gateway if not yet available, and add the claim. */
 
-	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
+						 false);
 
 	if (unlikely(!backbone_gw))
 		return 1;
@@ -1140,6 +1159,24 @@
 			backbone_gw->lasttime = jiffies;
 
 			batadv_bla_send_announce(bat_priv, backbone_gw);
+
+			/* request_sent is only set after creation to avoid
+			 * problems when we are not yet known as backbone gw
+			 * in the backbone.
+			 *
+			 * We can reset this now after we waited some periods
+			 * to give bridge forward delays and bla group forming
+			 * some grace time.
+			 */
+
+			if (atomic_read(&backbone_gw->request_sent) == 0)
+				continue;
+
+			if (!atomic_dec_and_test(&backbone_gw->wait_periods))
+				continue;
+
+			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
+			atomic_set(&backbone_gw->request_sent, 0);
 		}
 		rcu_read_unlock();
 	}
@@ -1212,8 +1249,7 @@
 /**
  * batadv_bla_check_bcast_duplist
  * @bat_priv: the bat priv with all the soft interface information
- * @bcast_packet: encapsulated broadcast frame plus batman header
- * @bcast_packet_len: length of encapsulated broadcast frame plus batman header
+ * @skb: contains the bcast_packet to be checked
  *
  * check if it is on our broadcast list. Another gateway might
  * have sent the same packet because it is connected to the same backbone,
@@ -1225,20 +1261,17 @@
  * the same host however as this might be intended.
  */
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
-				   struct batadv_bcast_packet *bcast_packet,
-				   int bcast_packet_len)
+				   struct sk_buff *skb)
 {
-	int i, length, curr, ret = 0;
-	uint8_t *content;
-	uint16_t crc;
+	int i, curr, ret = 0;
+	__be32 crc;
+	struct batadv_bcast_packet *bcast_packet;
 	struct batadv_bcast_duplist_entry *entry;
 
-	length = bcast_packet_len - sizeof(*bcast_packet);
-	content = (uint8_t *)bcast_packet;
-	content += sizeof(*bcast_packet);
+	bcast_packet = (struct batadv_bcast_packet *)skb->data;
 
 	/* calculate the crc ... */
-	crc = crc16(0, content, length);
+	crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
 
 	spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
 
@@ -1585,23 +1618,11 @@
 	struct hlist_head *head;
 	uint32_t i;
 	bool is_own;
-	int ret = 0;
 	uint8_t *primary_addr;
 
-	primary_if = batadv_primary_if_get_selected(bat_priv);
-	if (!primary_if) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
-				 net_dev->name);
+	primary_if = batadv_seq_print_text_primary_if_get(seq);
+	if (!primary_if)
 		goto out;
-	}
-
-	if (primary_if->if_status != BATADV_IF_ACTIVE) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - primary interface not active\n",
-				 net_dev->name);
-		goto out;
-	}
 
 	primary_addr = primary_if->net_dev->dev_addr;
 	seq_printf(seq,
@@ -1628,7 +1649,7 @@
 out:
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
-	return ret;
+	return 0;
 }
 
 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
@@ -1643,23 +1664,11 @@
 	int secs, msecs;
 	uint32_t i;
 	bool is_own;
-	int ret = 0;
 	uint8_t *primary_addr;
 
-	primary_if = batadv_primary_if_get_selected(bat_priv);
-	if (!primary_if) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
-				 net_dev->name);
+	primary_if = batadv_seq_print_text_primary_if_get(seq);
+	if (!primary_if)
 		goto out;
-	}
-
-	if (primary_if->if_status != BATADV_IF_ACTIVE) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - primary interface not active\n",
-				 net_dev->name);
-		goto out;
-	}
 
 	primary_addr = primary_if->net_dev->dev_addr;
 	seq_printf(seq,
@@ -1693,5 +1702,5 @@
 out:
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
-	return ret;
+	return 0;
 }
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 789cb73..196d9a0 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -31,8 +31,7 @@
 					     void *offset);
 int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
-				   struct batadv_bcast_packet *bcast_packet,
-				   int hdr_size);
+				   struct sk_buff *skb);
 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
 				    struct batadv_hard_iface *primary_if,
 				    struct batadv_hard_iface *oldif);
@@ -81,8 +80,7 @@
 
 static inline int
 batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
-			       struct batadv_bcast_packet *bcast_packet,
-			       int hdr_size)
+			       struct sk_buff *skb)
 {
 	return 0;
 }
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 391d4fb..6f58ddd 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -31,6 +31,7 @@
 #include "vis.h"
 #include "icmp_socket.h"
 #include "bridge_loop_avoidance.h"
+#include "distributed-arp-table.h"
 
 static struct dentry *batadv_debugfs;
 
@@ -99,15 +100,17 @@
 
 static int batadv_log_open(struct inode *inode, struct file *file)
 {
+	if (!try_module_get(THIS_MODULE))
+		return -EBUSY;
+
 	nonseekable_open(inode, file);
 	file->private_data = inode->i_private;
-	batadv_inc_module_count();
 	return 0;
 }
 
 static int batadv_log_release(struct inode *inode, struct file *file)
 {
-	batadv_dec_module_count();
+	module_put(THIS_MODULE);
 	return 0;
 }
 
@@ -278,6 +281,19 @@
 
 #endif
 
+#ifdef CONFIG_BATMAN_ADV_DAT
+/**
+ * batadv_dat_cache_open - Prepare file handler for reads from dat_chache
+ * @inode: inode which was opened
+ * @file: file handle to be initialized
+ */
+static int batadv_dat_cache_open(struct inode *inode, struct file *file)
+{
+	struct net_device *net_dev = (struct net_device *)inode->i_private;
+	return single_open(file, batadv_dat_cache_seq_print_text, net_dev);
+}
+#endif
+
 static int batadv_transtable_local_open(struct inode *inode, struct file *file)
 {
 	struct net_device *net_dev = (struct net_device *)inode->i_private;
@@ -307,7 +323,17 @@
 		}					\
 };
 
+/* the following attributes are general and therefore they will be directly
+ * placed in the BATADV_DEBUGFS_SUBDIR subdirectory of debugfs
+ */
 static BATADV_DEBUGINFO(routing_algos, S_IRUGO, batadv_algorithms_open);
+
+static struct batadv_debuginfo *batadv_general_debuginfos[] = {
+	&batadv_debuginfo_routing_algos,
+	NULL,
+};
+
+/* The following attributes are per soft interface */
 static BATADV_DEBUGINFO(originators, S_IRUGO, batadv_originators_open);
 static BATADV_DEBUGINFO(gateways, S_IRUGO, batadv_gateways_open);
 static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
@@ -317,6 +343,9 @@
 static BATADV_DEBUGINFO(bla_backbone_table, S_IRUGO,
 			batadv_bla_backbone_table_open);
 #endif
+#ifdef CONFIG_BATMAN_ADV_DAT
+static BATADV_DEBUGINFO(dat_cache, S_IRUGO, batadv_dat_cache_open);
+#endif
 static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
 			batadv_transtable_local_open);
 static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open);
@@ -329,6 +358,9 @@
 	&batadv_debuginfo_bla_claim_table,
 	&batadv_debuginfo_bla_backbone_table,
 #endif
+#ifdef CONFIG_BATMAN_ADV_DAT
+	&batadv_debuginfo_dat_cache,
+#endif
 	&batadv_debuginfo_transtable_local,
 	&batadv_debuginfo_vis_data,
 	NULL,
@@ -336,7 +368,7 @@
 
 void batadv_debugfs_init(void)
 {
-	struct batadv_debuginfo *bat_debug;
+	struct batadv_debuginfo **bat_debug;
 	struct dentry *file;
 
 	batadv_debugfs = debugfs_create_dir(BATADV_DEBUGFS_SUBDIR, NULL);
@@ -344,17 +376,23 @@
 		batadv_debugfs = NULL;
 
 	if (!batadv_debugfs)
-		goto out;
+		goto err;
 
-	bat_debug = &batadv_debuginfo_routing_algos;
-	file = debugfs_create_file(bat_debug->attr.name,
-				   S_IFREG | bat_debug->attr.mode,
-				   batadv_debugfs, NULL, &bat_debug->fops);
-	if (!file)
-		pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
+	for (bat_debug = batadv_general_debuginfos; *bat_debug; ++bat_debug) {
+		file = debugfs_create_file(((*bat_debug)->attr).name,
+					   S_IFREG | ((*bat_debug)->attr).mode,
+					   batadv_debugfs, NULL,
+					   &(*bat_debug)->fops);
+		if (!file) {
+			pr_err("Can't add general debugfs file: %s\n",
+			       ((*bat_debug)->attr).name);
+			goto err;
+		}
+	}
 
-out:
 	return;
+err:
+	debugfs_remove_recursive(batadv_debugfs);
 }
 
 void batadv_debugfs_destroy(void)
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
new file mode 100644
index 0000000..8e1d89d
--- /dev/null
+++ b/net/batman-adv/distributed-arp-table.c
@@ -0,0 +1,1066 @@
+/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+ *
+ * Antonio Quartulli
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <net/arp.h>
+
+#include "main.h"
+#include "hash.h"
+#include "distributed-arp-table.h"
+#include "hard-interface.h"
+#include "originator.h"
+#include "send.h"
+#include "types.h"
+#include "translation-table.h"
+#include "unicast.h"
+
+static void batadv_dat_purge(struct work_struct *work);
+
+/**
+ * batadv_dat_start_timer - initialise the DAT periodic worker
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
+{
+	INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
+	queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work,
+			   msecs_to_jiffies(10000));
+}
+
+/**
+ * batadv_dat_entry_free_ref - decrements the dat_entry refcounter and possibly
+ * free it
+ * @dat_entry: the oentry to free
+ */
+static void batadv_dat_entry_free_ref(struct batadv_dat_entry *dat_entry)
+{
+	if (atomic_dec_and_test(&dat_entry->refcount))
+		kfree_rcu(dat_entry, rcu);
+}
+
+/**
+ * batadv_dat_to_purge - checks whether a dat_entry has to be purged or not
+ * @dat_entry: the entry to check
+ *
+ * Returns true if the entry has to be purged now, false otherwise
+ */
+static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
+{
+	return batadv_has_timed_out(dat_entry->last_update,
+				    BATADV_DAT_ENTRY_TIMEOUT);
+}
+
+/**
+ * __batadv_dat_purge - delete entries from the DAT local storage
+ * @bat_priv: the bat priv with all the soft interface information
+ * @to_purge: function in charge to decide whether an entry has to be purged or
+ *	      not. This function takes the dat_entry as argument and has to
+ *	      returns a boolean value: true is the entry has to be deleted,
+ *	      false otherwise
+ *
+ * Loops over each entry in the DAT local storage and delete it if and only if
+ * the to_purge function passed as argument returns true
+ */
+static void __batadv_dat_purge(struct batadv_priv *bat_priv,
+			       bool (*to_purge)(struct batadv_dat_entry *))
+{
+	spinlock_t *list_lock; /* protects write access to the hash lists */
+	struct batadv_dat_entry *dat_entry;
+	struct hlist_node *node, *node_tmp;
+	struct hlist_head *head;
+	uint32_t i;
+
+	if (!bat_priv->dat.hash)
+		return;
+
+	for (i = 0; i < bat_priv->dat.hash->size; i++) {
+		head = &bat_priv->dat.hash->table[i];
+		list_lock = &bat_priv->dat.hash->list_locks[i];
+
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(dat_entry, node, node_tmp, head,
+					  hash_entry) {
+			/* if an helper function has been passed as parameter,
+			 * ask it if the entry has to be purged or not
+			 */
+			if (to_purge && !to_purge(dat_entry))
+				continue;
+
+			hlist_del_rcu(node);
+			batadv_dat_entry_free_ref(dat_entry);
+		}
+		spin_unlock_bh(list_lock);
+	}
+}
+
+/**
+ * batadv_dat_purge - periodic task that deletes old entries from the local DAT
+ * hash table
+ * @work: kernel work struct
+ */
+static void batadv_dat_purge(struct work_struct *work)
+{
+	struct delayed_work *delayed_work;
+	struct batadv_priv_dat *priv_dat;
+	struct batadv_priv *bat_priv;
+
+	delayed_work = container_of(work, struct delayed_work, work);
+	priv_dat = container_of(delayed_work, struct batadv_priv_dat, work);
+	bat_priv = container_of(priv_dat, struct batadv_priv, dat);
+
+	__batadv_dat_purge(bat_priv, batadv_dat_to_purge);
+	batadv_dat_start_timer(bat_priv);
+}
+
+/**
+ * batadv_compare_dat - comparing function used in the local DAT hash table
+ * @node: node in the local table
+ * @data2: second object to compare the node to
+ *
+ * Returns 1 if the two entry are the same, 0 otherwise
+ */
+static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
+{
+	const void *data1 = container_of(node, struct batadv_dat_entry,
+					 hash_entry);
+
+	return (memcmp(data1, data2, sizeof(__be32)) == 0 ? 1 : 0);
+}
+
+/**
+ * batadv_arp_hw_src - extract the hw_src field from an ARP packet
+ * @skb: ARP packet
+ * @hdr_size: size of the possible header before the ARP packet
+ *
+ * Returns the value of the hw_src field in the ARP packet
+ */
+static uint8_t *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
+{
+	uint8_t *addr;
+
+	addr = (uint8_t *)(skb->data + hdr_size);
+	addr += ETH_HLEN + sizeof(struct arphdr);
+
+	return addr;
+}
+
+/**
+ * batadv_arp_ip_src - extract the ip_src field from an ARP packet
+ * @skb: ARP packet
+ * @hdr_size: size of the possible header before the ARP packet
+ *
+ * Returns the value of the ip_src field in the ARP packet
+ */
+static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
+{
+	return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN);
+}
+
+/**
+ * batadv_arp_hw_dst - extract the hw_dst field from an ARP packet
+ * @skb: ARP packet
+ * @hdr_size: size of the possible header before the ARP packet
+ *
+ * Returns the value of the hw_dst field in the ARP packet
+ */
+static uint8_t *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
+{
+	return batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN + 4;
+}
+
+/**
+ * batadv_arp_ip_dst - extract the ip_dst field from an ARP packet
+ * @skb: ARP packet
+ * @hdr_size: size of the possible header before the ARP packet
+ *
+ * Returns the value of the ip_dst field in the ARP packet
+ */
+static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
+{
+	return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN * 2 + 4);
+}
+
+/**
+ * batadv_hash_dat - compute the hash value for an IP address
+ * @data: data to hash
+ * @size: size of the hash table
+ *
+ * Returns the selected index in the hash table for the given data
+ */
+static uint32_t batadv_hash_dat(const void *data, uint32_t size)
+{
+	const unsigned char *key = data;
+	uint32_t hash = 0;
+	size_t i;
+
+	for (i = 0; i < 4; i++) {
+		hash += key[i];
+		hash += (hash << 10);
+		hash ^= (hash >> 6);
+	}
+
+	hash += (hash << 3);
+	hash ^= (hash >> 11);
+	hash += (hash << 15);
+
+	return hash % size;
+}
+
+/**
+ * batadv_dat_entry_hash_find - looks for a given dat_entry in the local hash
+ * table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ip: search key
+ *
+ * Returns the dat_entry if found, NULL otherwise
+ */
+static struct batadv_dat_entry *
+batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
+{
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL;
+	struct batadv_hashtable *hash = bat_priv->dat.hash;
+	uint32_t index;
+
+	if (!hash)
+		return NULL;
+
+	index = batadv_hash_dat(&ip, hash->size);
+	head = &hash->table[index];
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) {
+		if (dat_entry->ip != ip)
+			continue;
+
+		if (!atomic_inc_not_zero(&dat_entry->refcount))
+			continue;
+
+		dat_entry_tmp = dat_entry;
+		break;
+	}
+	rcu_read_unlock();
+
+	return dat_entry_tmp;
+}
+
+/**
+ * batadv_dat_entry_add - add a new dat entry or update it if already exists
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ip: ipv4 to add/edit
+ * @mac_addr: mac address to assign to the given ipv4
+ */
+static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
+				 uint8_t *mac_addr)
+{
+	struct batadv_dat_entry *dat_entry;
+	int hash_added;
+
+	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip);
+	/* if this entry is already known, just update it */
+	if (dat_entry) {
+		if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr))
+			memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
+		dat_entry->last_update = jiffies;
+		batadv_dbg(BATADV_DBG_DAT, bat_priv,
+			   "Entry updated: %pI4 %pM\n", &dat_entry->ip,
+			   dat_entry->mac_addr);
+		goto out;
+	}
+
+	dat_entry = kmalloc(sizeof(*dat_entry), GFP_ATOMIC);
+	if (!dat_entry)
+		goto out;
+
+	dat_entry->ip = ip;
+	memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
+	dat_entry->last_update = jiffies;
+	atomic_set(&dat_entry->refcount, 2);
+
+	hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat,
+				     batadv_hash_dat, &dat_entry->ip,
+				     &dat_entry->hash_entry);
+
+	if (unlikely(hash_added != 0)) {
+		/* remove the reference for the hash */
+		batadv_dat_entry_free_ref(dat_entry);
+		goto out;
+	}
+
+	batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM\n",
+		   &dat_entry->ip, dat_entry->mac_addr);
+
+out:
+	if (dat_entry)
+		batadv_dat_entry_free_ref(dat_entry);
+}
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+
+/**
+ * batadv_dbg_arp - print a debug message containing all the ARP packet details
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: ARP packet
+ * @type: ARP type
+ * @hdr_size: size of the possible header before the ARP packet
+ * @msg: message to print together with the debugging information
+ */
+static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
+			   uint16_t type, int hdr_size, char *msg)
+{
+	struct batadv_unicast_4addr_packet *unicast_4addr_packet;
+	struct batadv_bcast_packet *bcast_pkt;
+	uint8_t *orig_addr;
+	__be32 ip_src, ip_dst;
+
+	if (msg)
+		batadv_dbg(BATADV_DBG_DAT, bat_priv, "%s\n", msg);
+
+	ip_src = batadv_arp_ip_src(skb, hdr_size);
+	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
+	batadv_dbg(BATADV_DBG_DAT, bat_priv,
+		   "ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]\n",
+		   batadv_arp_hw_src(skb, hdr_size), &ip_src,
+		   batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
+
+	if (hdr_size == 0)
+		return;
+
+	/* if the ARP packet is encapsulated in a batman packet, let's print
+	 * some debug messages
+	 */
+	unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
+
+	switch (unicast_4addr_packet->u.header.packet_type) {
+	case BATADV_UNICAST:
+		batadv_dbg(BATADV_DBG_DAT, bat_priv,
+			   "* encapsulated within a UNICAST packet\n");
+		break;
+	case BATADV_UNICAST_4ADDR:
+		batadv_dbg(BATADV_DBG_DAT, bat_priv,
+			   "* encapsulated within a UNICAST_4ADDR packet (src: %pM)\n",
+			   unicast_4addr_packet->src);
+		switch (unicast_4addr_packet->subtype) {
+		case BATADV_P_DAT_DHT_PUT:
+			batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_PUT\n");
+			break;
+		case BATADV_P_DAT_DHT_GET:
+			batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_GET\n");
+			break;
+		case BATADV_P_DAT_CACHE_REPLY:
+			batadv_dbg(BATADV_DBG_DAT, bat_priv,
+				   "* type: DAT_CACHE_REPLY\n");
+			break;
+		case BATADV_P_DATA:
+			batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DATA\n");
+			break;
+		default:
+			batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n",
+				   unicast_4addr_packet->u.header.packet_type);
+		}
+		break;
+	case BATADV_BCAST:
+		bcast_pkt = (struct batadv_bcast_packet *)unicast_4addr_packet;
+		orig_addr = bcast_pkt->orig;
+		batadv_dbg(BATADV_DBG_DAT, bat_priv,
+			   "* encapsulated within a BCAST packet (src: %pM)\n",
+			   orig_addr);
+		break;
+	default:
+		batadv_dbg(BATADV_DBG_DAT, bat_priv,
+			   "* encapsulated within an unknown packet type (0x%x)\n",
+			   unicast_4addr_packet->u.header.packet_type);
+	}
+}
+
+#else
+
+static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
+			   uint16_t type, int hdr_size, char *msg)
+{
+}
+
+#endif /* CONFIG_BATMAN_ADV_DEBUG */
+
+/**
+ * batadv_is_orig_node_eligible - check whether a node can be a DHT candidate
+ * @res: the array with the already selected candidates
+ * @select: number of already selected candidates
+ * @tmp_max: address of the currently evaluated node
+ * @max: current round max address
+ * @last_max: address of the last selected candidate
+ * @candidate: orig_node under evaluation
+ * @max_orig_node: last selected candidate
+ *
+ * Returns true if the node has been elected as next candidate or false othrwise
+ */
+static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
+					 int select, batadv_dat_addr_t tmp_max,
+					 batadv_dat_addr_t max,
+					 batadv_dat_addr_t last_max,
+					 struct batadv_orig_node *candidate,
+					 struct batadv_orig_node *max_orig_node)
+{
+	bool ret = false;
+	int j;
+
+	/* Check if this node has already been selected... */
+	for (j = 0; j < select; j++)
+		if (res[j].orig_node == candidate)
+			break;
+	/* ..and possibly skip it */
+	if (j < select)
+		goto out;
+	/* sanity check: has it already been selected? This should not happen */
+	if (tmp_max > last_max)
+		goto out;
+	/* check if during this iteration an originator with a closer dht
+	 * address has already been found
+	 */
+	if (tmp_max < max)
+		goto out;
+	/* this is an hash collision with the temporary selected node. Choose
+	 * the one with the lowest address
+	 */
+	if ((tmp_max == max) &&
+	    (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0))
+		goto out;
+
+	ret = true;
+out:
+	return ret;
+}
+
+/**
+ * batadv_choose_next_candidate - select the next DHT candidate
+ * @bat_priv: the bat priv with all the soft interface information
+ * @cands: candidates array
+ * @select: number of candidates already present in the array
+ * @ip_key: key to look up in the DHT
+ * @last_max: pointer where the address of the selected candidate will be saved
+ */
+static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
+					 struct batadv_dat_candidate *cands,
+					 int select, batadv_dat_addr_t ip_key,
+					 batadv_dat_addr_t *last_max)
+{
+	batadv_dat_addr_t max = 0, tmp_max = 0;
+	struct batadv_orig_node *orig_node, *max_orig_node = NULL;
+	struct batadv_hashtable *hash = bat_priv->orig_hash;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	int i;
+
+	/* if no node is eligible as candidate, leave the candidate type as
+	 * NOT_FOUND
+	 */
+	cands[select].type = BATADV_DAT_CANDIDATE_NOT_FOUND;
+
+	/* iterate over the originator list and find the node with closest
+	 * dat_address which has not been selected yet
+	 */
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+			/* the dht space is a ring and addresses are unsigned */
+			tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr +
+				  ip_key;
+
+			if (!batadv_is_orig_node_eligible(cands, select,
+							  tmp_max, max,
+							  *last_max, orig_node,
+							  max_orig_node))
+				continue;
+
+			if (!atomic_inc_not_zero(&orig_node->refcount))
+				continue;
+
+			max = tmp_max;
+			if (max_orig_node)
+				batadv_orig_node_free_ref(max_orig_node);
+			max_orig_node = orig_node;
+		}
+		rcu_read_unlock();
+	}
+	if (max_orig_node) {
+		cands[select].type = BATADV_DAT_CANDIDATE_ORIG;
+		cands[select].orig_node = max_orig_node;
+		batadv_dbg(BATADV_DBG_DAT, bat_priv,
+			   "dat_select_candidates() %d: selected %pM addr=%u dist=%u\n",
+			   select, max_orig_node->orig, max_orig_node->dat_addr,
+			   max);
+	}
+	*last_max = max;
+}
+
+/**
+ * batadv_dat_select_candidates - selects the nodes which the DHT message has to
+ * be sent to
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ip_dst: ipv4 to look up in the DHT
+ *
+ * An originator O is selected if and only if its DHT_ID value is one of three
+ * closest values (from the LEFT, with wrap around if needed) then the hash
+ * value of the key. ip_dst is the key.
+ *
+ * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM
+ */
+static struct batadv_dat_candidate *
+batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+{
+	int select;
+	batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
+	struct batadv_dat_candidate *res;
+
+	if (!bat_priv->orig_hash)
+		return NULL;
+
+	res = kmalloc(BATADV_DAT_CANDIDATES_NUM * sizeof(*res), GFP_ATOMIC);
+	if (!res)
+		return NULL;
+
+	ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst,
+						    BATADV_DAT_ADDR_MAX);
+
+	batadv_dbg(BATADV_DBG_DAT, bat_priv,
+		   "dat_select_candidates(): IP=%pI4 hash(IP)=%u\n", &ip_dst,
+		   ip_key);
+
+	for (select = 0; select < BATADV_DAT_CANDIDATES_NUM; select++)
+		batadv_choose_next_candidate(bat_priv, res, select, ip_key,
+					     &last_max);
+
+	return res;
+}
+
+/**
+ * batadv_dat_send_data - send a payload to the selected candidates
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @ip: the DHT key
+ * @packet_subtype: unicast4addr packet subtype to use
+ *
+ * In this function the skb is copied by means of pskb_copy() and is sent as
+ * unicast packet to each of the selected candidates
+ *
+ * Returns true if the packet is sent to at least one candidate, false otherwise
+ */
+static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
+				 struct sk_buff *skb, __be32 ip,
+				 int packet_subtype)
+{
+	int i;
+	bool ret = false;
+	int send_status;
+	struct batadv_neigh_node *neigh_node = NULL;
+	struct sk_buff *tmp_skb;
+	struct batadv_dat_candidate *cand;
+
+	cand = batadv_dat_select_candidates(bat_priv, ip);
+	if (!cand)
+		goto out;
+
+	batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip);
+
+	for (i = 0; i < BATADV_DAT_CANDIDATES_NUM; i++) {
+		if (cand[i].type == BATADV_DAT_CANDIDATE_NOT_FOUND)
+			continue;
+
+		neigh_node = batadv_orig_node_get_router(cand[i].orig_node);
+		if (!neigh_node)
+			goto free_orig;
+
+		tmp_skb = pskb_copy(skb, GFP_ATOMIC);
+		if (!batadv_unicast_4addr_prepare_skb(bat_priv, tmp_skb,
+						      cand[i].orig_node,
+						      packet_subtype)) {
+			kfree_skb(tmp_skb);
+			goto free_neigh;
+		}
+
+		send_status = batadv_send_skb_packet(tmp_skb,
+						     neigh_node->if_incoming,
+						     neigh_node->addr);
+		if (send_status == NET_XMIT_SUCCESS) {
+			/* count the sent packet */
+			switch (packet_subtype) {
+			case BATADV_P_DAT_DHT_GET:
+				batadv_inc_counter(bat_priv,
+						   BATADV_CNT_DAT_GET_TX);
+				break;
+			case BATADV_P_DAT_DHT_PUT:
+				batadv_inc_counter(bat_priv,
+						   BATADV_CNT_DAT_PUT_TX);
+				break;
+			}
+
+			/* packet sent to a candidate: return true */
+			ret = true;
+		}
+free_neigh:
+		batadv_neigh_node_free_ref(neigh_node);
+free_orig:
+		batadv_orig_node_free_ref(cand[i].orig_node);
+	}
+
+out:
+	kfree(cand);
+	return ret;
+}
+
+/**
+ * batadv_dat_hash_free - free the local DAT hash table
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
+{
+	if (!bat_priv->dat.hash)
+		return;
+
+	__batadv_dat_purge(bat_priv, NULL);
+
+	batadv_hash_destroy(bat_priv->dat.hash);
+
+	bat_priv->dat.hash = NULL;
+}
+
+/**
+ * batadv_dat_init - initialise the DAT internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+int batadv_dat_init(struct batadv_priv *bat_priv)
+{
+	if (bat_priv->dat.hash)
+		return 0;
+
+	bat_priv->dat.hash = batadv_hash_new(1024);
+
+	if (!bat_priv->dat.hash)
+		return -ENOMEM;
+
+	batadv_dat_start_timer(bat_priv);
+
+	return 0;
+}
+
+/**
+ * batadv_dat_free - free the DAT internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_dat_free(struct batadv_priv *bat_priv)
+{
+	cancel_delayed_work_sync(&bat_priv->dat.work);
+
+	batadv_dat_hash_free(bat_priv);
+}
+
+/**
+ * batadv_dat_cache_seq_print_text - print the local DAT hash table
+ * @seq: seq file to print on
+ * @offset: not used
+ */
+int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
+{
+	struct net_device *net_dev = (struct net_device *)seq->private;
+	struct batadv_priv *bat_priv = netdev_priv(net_dev);
+	struct batadv_hashtable *hash = bat_priv->dat.hash;
+	struct batadv_dat_entry *dat_entry;
+	struct batadv_hard_iface *primary_if;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	unsigned long last_seen_jiffies;
+	int last_seen_msecs, last_seen_secs, last_seen_mins;
+	uint32_t i;
+
+	primary_if = batadv_seq_print_text_primary_if_get(seq);
+	if (!primary_if)
+		goto out;
+
+	seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name);
+	seq_printf(seq, "          %-7s          %-13s %5s\n", "IPv4", "MAC",
+		   "last-seen");
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) {
+			last_seen_jiffies = jiffies - dat_entry->last_update;
+			last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
+			last_seen_mins = last_seen_msecs / 60000;
+			last_seen_msecs = last_seen_msecs % 60000;
+			last_seen_secs = last_seen_msecs / 1000;
+
+			seq_printf(seq, " * %15pI4 %14pM %6i:%02i\n",
+				   &dat_entry->ip, dat_entry->mac_addr,
+				   last_seen_mins, last_seen_secs);
+		}
+		rcu_read_unlock();
+	}
+
+out:
+	if (primary_if)
+		batadv_hardif_free_ref(primary_if);
+	return 0;
+}
+
+/**
+ * batadv_arp_get_type - parse an ARP packet and gets the type
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: packet to analyse
+ * @hdr_size: size of the possible header before the ARP packet in the skb
+ *
+ * Returns the ARP type if the skb contains a valid ARP packet, 0 otherwise
+ */
+static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
+				    struct sk_buff *skb, int hdr_size)
+{
+	struct arphdr *arphdr;
+	struct ethhdr *ethhdr;
+	__be32 ip_src, ip_dst;
+	uint16_t type = 0;
+
+	/* pull the ethernet header */
+	if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
+		goto out;
+
+	ethhdr = (struct ethhdr *)(skb->data + hdr_size);
+
+	if (ethhdr->h_proto != htons(ETH_P_ARP))
+		goto out;
+
+	/* pull the ARP payload */
+	if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN +
+				    arp_hdr_len(skb->dev))))
+		goto out;
+
+	arphdr = (struct arphdr *)(skb->data + hdr_size + ETH_HLEN);
+
+	/* Check whether the ARP packet carries a valid
+	 * IP information
+	 */
+	if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
+		goto out;
+
+	if (arphdr->ar_pro != htons(ETH_P_IP))
+		goto out;
+
+	if (arphdr->ar_hln != ETH_ALEN)
+		goto out;
+
+	if (arphdr->ar_pln != 4)
+		goto out;
+
+	/* Check for bad reply/request. If the ARP message is not sane, DAT
+	 * will simply ignore it
+	 */
+	ip_src = batadv_arp_ip_src(skb, hdr_size);
+	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
+	if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) ||
+	    ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst))
+		goto out;
+
+	type = ntohs(arphdr->ar_op);
+out:
+	return type;
+}
+
+/**
+ * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
+ * answer using DAT
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: packet to check
+ *
+ * Returns true if the message has been sent to the dht candidates, false
+ * otherwise. In case of true the message has to be enqueued to permit the
+ * fallback
+ */
+bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
+					   struct sk_buff *skb)
+{
+	uint16_t type = 0;
+	__be32 ip_dst, ip_src;
+	uint8_t *hw_src;
+	bool ret = false;
+	struct batadv_dat_entry *dat_entry = NULL;
+	struct sk_buff *skb_new;
+	struct batadv_hard_iface *primary_if = NULL;
+
+	if (!atomic_read(&bat_priv->distributed_arp_table))
+		goto out;
+
+	type = batadv_arp_get_type(bat_priv, skb, 0);
+	/* If the node gets an ARP_REQUEST it has to send a DHT_GET unicast
+	 * message to the selected DHT candidates
+	 */
+	if (type != ARPOP_REQUEST)
+		goto out;
+
+	batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REQUEST");
+
+	ip_src = batadv_arp_ip_src(skb, 0);
+	hw_src = batadv_arp_hw_src(skb, 0);
+	ip_dst = batadv_arp_ip_dst(skb, 0);
+
+	batadv_dat_entry_add(bat_priv, ip_src, hw_src);
+
+	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+	if (dat_entry) {
+		primary_if = batadv_primary_if_get_selected(bat_priv);
+		if (!primary_if)
+			goto out;
+
+		skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
+				     primary_if->soft_iface, ip_dst, hw_src,
+				     dat_entry->mac_addr, hw_src);
+		if (!skb_new)
+			goto out;
+
+		skb_reset_mac_header(skb_new);
+		skb_new->protocol = eth_type_trans(skb_new,
+						   primary_if->soft_iface);
+		bat_priv->stats.rx_packets++;
+		bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+		primary_if->soft_iface->last_rx = jiffies;
+
+		netif_rx(skb_new);
+		batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
+		ret = true;
+	} else {
+		/* Send the request on the DHT */
+		ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
+					   BATADV_P_DAT_DHT_GET);
+	}
+out:
+	if (dat_entry)
+		batadv_dat_entry_free_ref(dat_entry);
+	if (primary_if)
+		batadv_hardif_free_ref(primary_if);
+	return ret;
+}
+
+/**
+ * batadv_dat_snoop_incoming_arp_request - snoop the ARP request and try to
+ * answer using the local DAT storage
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: packet to check
+ * @hdr_size: size of the encapsulation header
+ *
+ * Returns true if the request has been answered, false otherwise
+ */
+bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
+					   struct sk_buff *skb, int hdr_size)
+{
+	uint16_t type;
+	__be32 ip_src, ip_dst;
+	uint8_t *hw_src;
+	struct sk_buff *skb_new;
+	struct batadv_hard_iface *primary_if = NULL;
+	struct batadv_dat_entry *dat_entry = NULL;
+	bool ret = false;
+	int err;
+
+	if (!atomic_read(&bat_priv->distributed_arp_table))
+		goto out;
+
+	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
+	if (type != ARPOP_REQUEST)
+		goto out;
+
+	hw_src = batadv_arp_hw_src(skb, hdr_size);
+	ip_src = batadv_arp_ip_src(skb, hdr_size);
+	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
+
+	batadv_dbg_arp(bat_priv, skb, type, hdr_size,
+		       "Parsing incoming ARP REQUEST");
+
+	batadv_dat_entry_add(bat_priv, ip_src, hw_src);
+
+	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+	if (!dat_entry)
+		goto out;
+
+	primary_if = batadv_primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
+			     primary_if->soft_iface, ip_dst, hw_src,
+			     dat_entry->mac_addr, hw_src);
+
+	if (!skb_new)
+		goto out;
+
+	/* to preserve backwards compatibility, here the node has to answer
+	 * using the same packet type it received for the request. This is due
+	 * to that if a node is not using the 4addr packet format it may not
+	 * support it.
+	 */
+	if (hdr_size == sizeof(struct batadv_unicast_4addr_packet))
+		err = batadv_unicast_4addr_send_skb(bat_priv, skb_new,
+						    BATADV_P_DAT_CACHE_REPLY);
+	else
+		err = batadv_unicast_send_skb(bat_priv, skb_new);
+
+	if (!err) {
+		batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX);
+		ret = true;
+	}
+out:
+	if (dat_entry)
+		batadv_dat_entry_free_ref(dat_entry);
+	if (primary_if)
+		batadv_hardif_free_ref(primary_if);
+	if (ret)
+		kfree_skb(skb);
+	return ret;
+}
+
+/**
+ * batadv_dat_snoop_outgoing_arp_reply - snoop the ARP reply and fill the DHT
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: packet to check
+ */
+void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
+					 struct sk_buff *skb)
+{
+	uint16_t type;
+	__be32 ip_src, ip_dst;
+	uint8_t *hw_src, *hw_dst;
+
+	if (!atomic_read(&bat_priv->distributed_arp_table))
+		return;
+
+	type = batadv_arp_get_type(bat_priv, skb, 0);
+	if (type != ARPOP_REPLY)
+		return;
+
+	batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REPLY");
+
+	hw_src = batadv_arp_hw_src(skb, 0);
+	ip_src = batadv_arp_ip_src(skb, 0);
+	hw_dst = batadv_arp_hw_dst(skb, 0);
+	ip_dst = batadv_arp_ip_dst(skb, 0);
+
+	batadv_dat_entry_add(bat_priv, ip_src, hw_src);
+	batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
+
+	/* Send the ARP reply to the candidates for both the IP addresses that
+	 * the node got within the ARP reply
+	 */
+	batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
+	batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
+}
+/**
+ * batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local
+ * DAT storage only
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: packet to check
+ * @hdr_size: siaze of the encapsulation header
+ */
+bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
+					 struct sk_buff *skb, int hdr_size)
+{
+	uint16_t type;
+	__be32 ip_src, ip_dst;
+	uint8_t *hw_src, *hw_dst;
+	bool ret = false;
+
+	if (!atomic_read(&bat_priv->distributed_arp_table))
+		goto out;
+
+	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
+	if (type != ARPOP_REPLY)
+		goto out;
+
+	batadv_dbg_arp(bat_priv, skb, type, hdr_size,
+		       "Parsing incoming ARP REPLY");
+
+	hw_src = batadv_arp_hw_src(skb, hdr_size);
+	ip_src = batadv_arp_ip_src(skb, hdr_size);
+	hw_dst = batadv_arp_hw_dst(skb, hdr_size);
+	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
+
+	/* Update our internal cache with both the IP addresses the node got
+	 * within the ARP reply
+	 */
+	batadv_dat_entry_add(bat_priv, ip_src, hw_src);
+	batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
+
+	/* if this REPLY is directed to a client of mine, let's deliver the
+	 * packet to the interface
+	 */
+	ret = !batadv_is_my_client(bat_priv, hw_dst);
+out:
+	/* if ret == false -> packet has to be delivered to the interface */
+	return ret;
+}
+
+/**
+ * batadv_dat_drop_broadcast_packet - check if an ARP request has to be dropped
+ * (because the node has already got the reply via DAT) or not
+ * @bat_priv: the bat priv with all the soft interface information
+ * @forw_packet: the broadcast packet
+ *
+ * Returns true if the node can drop the packet, false otherwise
+ */
+bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
+				      struct batadv_forw_packet *forw_packet)
+{
+	uint16_t type;
+	__be32 ip_dst;
+	struct batadv_dat_entry *dat_entry = NULL;
+	bool ret = false;
+	const size_t bcast_len = sizeof(struct batadv_bcast_packet);
+
+	if (!atomic_read(&bat_priv->distributed_arp_table))
+		goto out;
+
+	/* If this packet is an ARP_REQUEST and the node already has the
+	 * information that it is going to ask, then the packet can be dropped
+	 */
+	if (forw_packet->num_packets)
+		goto out;
+
+	type = batadv_arp_get_type(bat_priv, forw_packet->skb, bcast_len);
+	if (type != ARPOP_REQUEST)
+		goto out;
+
+	ip_dst = batadv_arp_ip_dst(forw_packet->skb, bcast_len);
+	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+	/* check if the node already got this entry */
+	if (!dat_entry) {
+		batadv_dbg(BATADV_DBG_DAT, bat_priv,
+			   "ARP Request for %pI4: fallback\n", &ip_dst);
+		goto out;
+	}
+
+	batadv_dbg(BATADV_DBG_DAT, bat_priv,
+		   "ARP Request for %pI4: fallback prevented\n", &ip_dst);
+	ret = true;
+
+out:
+	if (dat_entry)
+		batadv_dat_entry_free_ref(dat_entry);
+	return ret;
+}
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
new file mode 100644
index 0000000..d060c03
--- /dev/null
+++ b/net/batman-adv/distributed-arp-table.h
@@ -0,0 +1,167 @@
+/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+ *
+ * Antonio Quartulli
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef _NET_BATMAN_ADV_ARP_H_
+#define _NET_BATMAN_ADV_ARP_H_
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+
+#include "types.h"
+#include "originator.h"
+
+#include <linux/if_arp.h>
+
+#define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
+
+bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
+					   struct sk_buff *skb);
+bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
+					   struct sk_buff *skb, int hdr_size);
+void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
+					 struct sk_buff *skb);
+bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
+					 struct sk_buff *skb, int hdr_size);
+bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
+				      struct batadv_forw_packet *forw_packet);
+
+/**
+ * batadv_dat_init_orig_node_addr - assign a DAT address to the orig_node
+ * @orig_node: the node to assign the DAT address to
+ */
+static inline void
+batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
+{
+	uint32_t addr;
+
+	addr = batadv_choose_orig(orig_node->orig, BATADV_DAT_ADDR_MAX);
+	orig_node->dat_addr = (batadv_dat_addr_t)addr;
+}
+
+/**
+ * batadv_dat_init_own_addr - assign a DAT address to the node itself
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: a pointer to the primary interface
+ */
+static inline void
+batadv_dat_init_own_addr(struct batadv_priv *bat_priv,
+			 struct batadv_hard_iface *primary_if)
+{
+	uint32_t addr;
+
+	addr = batadv_choose_orig(primary_if->net_dev->dev_addr,
+				  BATADV_DAT_ADDR_MAX);
+
+	bat_priv->dat.addr = (batadv_dat_addr_t)addr;
+}
+
+int batadv_dat_init(struct batadv_priv *bat_priv);
+void batadv_dat_free(struct batadv_priv *bat_priv);
+int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset);
+
+/**
+ * batadv_dat_inc_counter - increment the correct DAT packet counter
+ * @bat_priv: the bat priv with all the soft interface information
+ * @subtype: the 4addr subtype of the packet to be counted
+ *
+ * Updates the ethtool statistics for the received packet if it is a DAT subtype
+ */
+static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
+					  uint8_t subtype)
+{
+	switch (subtype) {
+	case BATADV_P_DAT_DHT_GET:
+		batadv_inc_counter(bat_priv,
+				   BATADV_CNT_DAT_GET_RX);
+		break;
+	case BATADV_P_DAT_DHT_PUT:
+		batadv_inc_counter(bat_priv,
+				   BATADV_CNT_DAT_PUT_RX);
+		break;
+	}
+}
+
+#else
+
+static inline bool
+batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
+				      struct sk_buff *skb)
+{
+	return false;
+}
+
+static inline bool
+batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
+				      struct sk_buff *skb, int hdr_size)
+{
+	return false;
+}
+
+static inline bool
+batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
+				    struct sk_buff *skb)
+{
+	return false;
+}
+
+static inline bool
+batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
+				    struct sk_buff *skb, int hdr_size)
+{
+	return false;
+}
+
+static inline bool
+batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
+				 struct batadv_forw_packet *forw_packet)
+{
+	return false;
+}
+
+static inline void
+batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
+{
+}
+
+static inline void batadv_dat_init_own_addr(struct batadv_priv *bat_priv,
+					    struct batadv_hard_iface *iface)
+{
+}
+
+static inline void batadv_arp_change_timeout(struct net_device *soft_iface,
+					     const char *name)
+{
+}
+
+static inline int batadv_dat_init(struct batadv_priv *bat_priv)
+{
+	return 0;
+}
+
+static inline void batadv_dat_free(struct batadv_priv *bat_priv)
+{
+}
+
+static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
+					  uint8_t subtype)
+{
+}
+
+#endif /* CONFIG_BATMAN_ADV_DAT */
+
+#endif /* _NET_BATMAN_ADV_ARP_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 15d67ab..dd07c7e 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -477,22 +477,11 @@
 	struct batadv_hard_iface *primary_if;
 	struct batadv_gw_node *gw_node;
 	struct hlist_node *node;
-	int gw_count = 0, ret = 0;
+	int gw_count = 0;
 
-	primary_if = batadv_primary_if_get_selected(bat_priv);
-	if (!primary_if) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
-				 net_dev->name);
+	primary_if = batadv_seq_print_text_primary_if_get(seq);
+	if (!primary_if)
 		goto out;
-	}
-
-	if (primary_if->if_status != BATADV_IF_ACTIVE) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - primary interface not active\n",
-				 net_dev->name);
-		goto out;
-	}
 
 	seq_printf(seq,
 		   "      %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
@@ -519,7 +508,7 @@
 out:
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
-	return ret;
+	return 0;
 }
 
 static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index d112fd6..365ed74 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -18,6 +18,7 @@
  */
 
 #include "main.h"
+#include "distributed-arp-table.h"
 #include "hard-interface.h"
 #include "soft-interface.h"
 #include "send.h"
@@ -58,6 +59,45 @@
 	return hard_iface;
 }
 
+/**
+ * batadv_is_on_batman_iface - check if a device is a batman iface descendant
+ * @net_dev: the device to check
+ *
+ * If the user creates any virtual device on top of a batman-adv interface, it
+ * is important to prevent this new interface to be used to create a new mesh
+ * network (this behaviour would lead to a batman-over-batman configuration).
+ * This function recursively checks all the fathers of the device passed as
+ * argument looking for a batman-adv soft interface.
+ *
+ * Returns true if the device is descendant of a batman-adv mesh interface (or
+ * if it is a batman-adv interface itself), false otherwise
+ */
+static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
+{
+	struct net_device *parent_dev;
+	bool ret;
+
+	/* check if this is a batman-adv mesh interface */
+	if (batadv_softif_is_valid(net_dev))
+		return true;
+
+	/* no more parents..stop recursion */
+	if (net_dev->iflink == net_dev->ifindex)
+		return false;
+
+	/* recurse over the parent device */
+	parent_dev = dev_get_by_index(&init_net, net_dev->iflink);
+	/* if we got a NULL parent_dev there is something broken.. */
+	if (WARN(!parent_dev, "Cannot find parent device"))
+		return false;
+
+	ret = batadv_is_on_batman_iface(parent_dev);
+
+	if (parent_dev)
+		dev_put(parent_dev);
+	return ret;
+}
+
 static int batadv_is_valid_iface(const struct net_device *net_dev)
 {
 	if (net_dev->flags & IFF_LOOPBACK)
@@ -70,7 +110,7 @@
 		return 0;
 
 	/* no batman over batman */
-	if (batadv_softif_is_valid(net_dev))
+	if (batadv_is_on_batman_iface(net_dev))
 		return 0;
 
 	return 1;
@@ -109,6 +149,8 @@
 	if (!primary_if)
 		goto out;
 
+	batadv_dat_init_own_addr(bat_priv, primary_if);
+
 	skb = bat_priv->vis.my_info->skb_packet;
 	vis_packet = (struct batadv_vis_packet *)skb->data;
 	memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
@@ -450,8 +492,8 @@
 	/* This can't be called via a bat_priv callback because
 	 * we have no bat_priv yet.
 	 */
-	atomic_set(&hard_iface->seqno, 1);
-	hard_iface->packet_buff = NULL;
+	atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
+	hard_iface->bat_iv.ogm_buff = NULL;
 
 	return hard_iface;
 
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 977de9c..e053339 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -82,6 +82,28 @@
 }
 
 /**
+ *	batadv_hash_bytes - hash some bytes and add them to the previous hash
+ *	@hash: previous hash value
+ *	@data: data to be hashed
+ *	@size: number of bytes to be hashed
+ *
+ *	Returns the new hash value.
+ */
+static inline uint32_t batadv_hash_bytes(uint32_t hash, void *data,
+					 uint32_t size)
+{
+	const unsigned char *key = data;
+	int i;
+
+	for (i = 0; i < size; i++) {
+		hash += key[i];
+		hash += (hash << 10);
+		hash ^= (hash >> 6);
+	}
+	return hash;
+}
+
+/**
  *	batadv_hash_add - adds data to the hashtable
  *	@hash: storage hash table
  *	@compare: callback to determine if 2 hash elements are identical
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index bde3cf747..87ca809 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -42,12 +42,16 @@
 	unsigned int i;
 	struct batadv_socket_client *socket_client;
 
+	if (!try_module_get(THIS_MODULE))
+		return -EBUSY;
+
 	nonseekable_open(inode, file);
 
 	socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL);
-
-	if (!socket_client)
+	if (!socket_client) {
+		module_put(THIS_MODULE);
 		return -ENOMEM;
+	}
 
 	for (i = 0; i < ARRAY_SIZE(batadv_socket_client_hash); i++) {
 		if (!batadv_socket_client_hash[i]) {
@@ -59,6 +63,7 @@
 	if (i == ARRAY_SIZE(batadv_socket_client_hash)) {
 		pr_err("Error - can't add another packet client: maximum number of clients reached\n");
 		kfree(socket_client);
+		module_put(THIS_MODULE);
 		return -EXFULL;
 	}
 
@@ -71,7 +76,6 @@
 
 	file->private_data = socket_client;
 
-	batadv_inc_module_count();
 	return 0;
 }
 
@@ -96,7 +100,7 @@
 	spin_unlock_bh(&socket_client->lock);
 
 	kfree(socket_client);
-	batadv_dec_module_count();
+	module_put(THIS_MODULE);
 
 	return 0;
 }
@@ -173,13 +177,13 @@
 	if (len >= sizeof(struct batadv_icmp_packet_rr))
 		packet_len = sizeof(struct batadv_icmp_packet_rr);
 
-	skb = dev_alloc_skb(packet_len + ETH_HLEN);
+	skb = dev_alloc_skb(packet_len + ETH_HLEN + NET_IP_ALIGN);
 	if (!skb) {
 		len = -ENOMEM;
 		goto out;
 	}
 
-	skb_reserve(skb, ETH_HLEN);
+	skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
 	icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
 
 	if (copy_from_user(icmp_packet, buff, packet_len)) {
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index b4aa470..f65a222 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -17,6 +17,8 @@
  * 02110-1301, USA
  */
 
+#include <linux/crc32c.h>
+#include <linux/highmem.h>
 #include "main.h"
 #include "sysfs.h"
 #include "debugfs.h"
@@ -29,6 +31,7 @@
 #include "hard-interface.h"
 #include "gateway_client.h"
 #include "bridge_loop_avoidance.h"
+#include "distributed-arp-table.h"
 #include "vis.h"
 #include "hash.h"
 #include "bat_algo.h"
@@ -128,6 +131,10 @@
 	if (ret < 0)
 		goto err;
 
+	ret = batadv_dat_init(bat_priv);
+	if (ret < 0)
+		goto err;
+
 	atomic_set(&bat_priv->gw.reselect, 0);
 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
 
@@ -155,21 +162,13 @@
 
 	batadv_bla_free(bat_priv);
 
+	batadv_dat_free(bat_priv);
+
 	free_percpu(bat_priv->bat_counters);
 
 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
 }
 
-void batadv_inc_module_count(void)
-{
-	try_module_get(THIS_MODULE);
-}
-
-void batadv_dec_module_count(void)
-{
-	module_put(THIS_MODULE);
-}
-
 int batadv_is_my_mac(const uint8_t *addr)
 {
 	const struct batadv_hard_iface *hard_iface;
@@ -188,6 +187,42 @@
 	return 0;
 }
 
+/**
+ * batadv_seq_print_text_primary_if_get - called from debugfs table printing
+ *  function that requires the primary interface
+ * @seq: debugfs table seq_file struct
+ *
+ * Returns primary interface if found or NULL otherwise.
+ */
+struct batadv_hard_iface *
+batadv_seq_print_text_primary_if_get(struct seq_file *seq)
+{
+	struct net_device *net_dev = (struct net_device *)seq->private;
+	struct batadv_priv *bat_priv = netdev_priv(net_dev);
+	struct batadv_hard_iface *primary_if;
+
+	primary_if = batadv_primary_if_get_selected(bat_priv);
+
+	if (!primary_if) {
+		seq_printf(seq,
+			   "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
+			   net_dev->name);
+		goto out;
+	}
+
+	if (primary_if->if_status == BATADV_IF_ACTIVE)
+		goto out;
+
+	seq_printf(seq,
+		   "BATMAN mesh %s disabled - primary interface not active\n",
+		   net_dev->name);
+	batadv_hardif_free_ref(primary_if);
+	primary_if = NULL;
+
+out:
+	return primary_if;
+}
+
 static int batadv_recv_unhandled_packet(struct sk_buff *skb,
 					struct batadv_hard_iface *recv_if)
 {
@@ -274,6 +309,8 @@
 
 	/* batman icmp packet */
 	batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
+	/* unicast with 4 addresses packet */
+	batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
 	/* unicast packet */
 	batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
 	/* fragmented unicast packet */
@@ -385,6 +422,38 @@
 	return 0;
 }
 
+/**
+ * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
+ *  the header
+ * @skb: skb pointing to fragmented socket buffers
+ * @payload_ptr: Pointer to position inside the head buffer of the skb
+ *  marking the start of the data to be CRC'ed
+ *
+ * payload_ptr must always point to an address in the skb head buffer and not to
+ * a fragment.
+ */
+__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
+{
+	u32 crc = 0;
+	unsigned int from;
+	unsigned int to = skb->len;
+	struct skb_seq_state st;
+	const u8 *data;
+	unsigned int len;
+	unsigned int consumed = 0;
+
+	from = (unsigned int)(payload_ptr - skb->data);
+
+	skb_prepare_seq_read(skb, from, to, &st);
+	while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
+		crc = crc32c(crc, data, len);
+		consumed += len;
+	}
+	skb_abort_seq_read(&st);
+
+	return htonl(crc);
+}
+
 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
 {
 	struct batadv_algo_ops *bat_algo_ops;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index d57b746..2f85577 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -26,7 +26,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2012.4.0"
+#define BATADV_SOURCE_VERSION "2012.5.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -44,6 +44,7 @@
 #define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */
 #define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
 #define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
+#define BATADV_DAT_ENTRY_TIMEOUT (5*60000) /* 5 mins in milliseconds */
 /* sliding packet range of received originator messages in sequence numbers
  * (should be a multiple of our word size)
  */
@@ -73,6 +74,11 @@
 
 #define BATADV_LOG_BUF_LEN 8192	  /* has to be a power of 2 */
 
+/* msecs after which an ARP_REQUEST is sent in broadcast as fallback */
+#define ARP_REQ_DELAY 250
+/* numbers of originator to contact for any PUT/GET DHT operation */
+#define BATADV_DAT_CANDIDATES_NUM 3
+
 #define BATADV_VIS_INTERVAL 5000	/* 5 seconds */
 
 /* how much worse secondary interfaces may be to be considered as bonding
@@ -89,6 +95,7 @@
 #define BATADV_BLA_PERIOD_LENGTH	10000	/* 10 seconds */
 #define BATADV_BLA_BACKBONE_TIMEOUT	(BATADV_BLA_PERIOD_LENGTH * 3)
 #define BATADV_BLA_CLAIM_TIMEOUT	(BATADV_BLA_PERIOD_LENGTH * 10)
+#define BATADV_BLA_WAIT_PERIODS		3
 
 #define BATADV_DUPLIST_SIZE		16
 #define BATADV_DUPLIST_TIMEOUT		500	/* 500 ms */
@@ -117,6 +124,9 @@
 
 #define BATADV_GW_THRESHOLD	50
 
+#define BATADV_DAT_CANDIDATE_NOT_FOUND	0
+#define BATADV_DAT_CANDIDATE_ORIG	1
+
 /* Debug Messages */
 #ifdef pr_fmt
 #undef pr_fmt
@@ -150,9 +160,9 @@
 
 int batadv_mesh_init(struct net_device *soft_iface);
 void batadv_mesh_free(struct net_device *soft_iface);
-void batadv_inc_module_count(void);
-void batadv_dec_module_count(void);
 int batadv_is_my_mac(const uint8_t *addr);
+struct batadv_hard_iface *
+batadv_seq_print_text_primary_if_get(struct seq_file *seq);
 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
 			   struct packet_type *ptype,
 			   struct net_device *orig_dev);
@@ -164,14 +174,24 @@
 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
+__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr);
 
-/* all messages related to routing / flooding / broadcasting / etc */
+/**
+ * enum batadv_dbg_level - available log levels
+ * @BATADV_DBG_BATMAN: OGM and TQ computations related messages
+ * @BATADV_DBG_ROUTES: route added / changed / deleted
+ * @BATADV_DBG_TT: translation table messages
+ * @BATADV_DBG_BLA: bridge loop avoidance messages
+ * @BATADV_DBG_DAT: ARP snooping and DAT related messages
+ * @BATADV_DBG_ALL: the union of all the above log levels
+ */
 enum batadv_dbg_level {
 	BATADV_DBG_BATMAN = BIT(0),
-	BATADV_DBG_ROUTES = BIT(1), /* route added / changed / deleted */
-	BATADV_DBG_TT	  = BIT(2), /* translation table operations */
-	BATADV_DBG_BLA    = BIT(3), /* bridge loop avoidance */
-	BATADV_DBG_ALL    = 15,
+	BATADV_DBG_ROUTES = BIT(1),
+	BATADV_DBG_TT	  = BIT(2),
+	BATADV_DBG_BLA    = BIT(3),
+	BATADV_DBG_DAT    = BIT(4),
+	BATADV_DBG_ALL    = 31,
 };
 
 #ifdef CONFIG_BATMAN_ADV_DEBUG
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index ac9bdf8..8c32cf1 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -18,6 +18,7 @@
  */
 
 #include "main.h"
+#include "distributed-arp-table.h"
 #include "originator.h"
 #include "hash.h"
 #include "translation-table.h"
@@ -220,9 +221,9 @@
 	atomic_set(&orig_node->refcount, 2);
 
 	orig_node->tt_initialised = false;
-	orig_node->tt_poss_change = false;
 	orig_node->bat_priv = bat_priv;
 	memcpy(orig_node->orig, addr, ETH_ALEN);
+	batadv_dat_init_orig_node_addr(orig_node);
 	orig_node->router = NULL;
 	orig_node->tt_crc = 0;
 	atomic_set(&orig_node->last_ttvn, 0);
@@ -415,23 +416,10 @@
 	int last_seen_msecs;
 	unsigned long last_seen_jiffies;
 	uint32_t i;
-	int ret = 0;
 
-	primary_if = batadv_primary_if_get_selected(bat_priv);
-
-	if (!primary_if) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
-				 net_dev->name);
+	primary_if = batadv_seq_print_text_primary_if_get(seq);
+	if (!primary_if)
 		goto out;
-	}
-
-	if (primary_if->if_status != BATADV_IF_ACTIVE) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - primary interface not active\n",
-				 net_dev->name);
-		goto out;
-	}
 
 	seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
 		   BATADV_SOURCE_VERSION, primary_if->net_dev->name,
@@ -485,7 +473,7 @@
 out:
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
-	return ret;
+	return 0;
 }
 
 static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 2d23a14..1c5454d 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -23,14 +23,29 @@
 #define BATADV_ETH_P_BATMAN  0x4305 /* unofficial/not registered Ethertype */
 
 enum batadv_packettype {
-	BATADV_IV_OGM	    = 0x01,
-	BATADV_ICMP	    = 0x02,
-	BATADV_UNICAST	    = 0x03,
-	BATADV_BCAST	    = 0x04,
-	BATADV_VIS	    = 0x05,
-	BATADV_UNICAST_FRAG = 0x06,
-	BATADV_TT_QUERY	    = 0x07,
-	BATADV_ROAM_ADV	    = 0x08,
+	BATADV_IV_OGM		= 0x01,
+	BATADV_ICMP		= 0x02,
+	BATADV_UNICAST		= 0x03,
+	BATADV_BCAST		= 0x04,
+	BATADV_VIS		= 0x05,
+	BATADV_UNICAST_FRAG	= 0x06,
+	BATADV_TT_QUERY		= 0x07,
+	BATADV_ROAM_ADV		= 0x08,
+	BATADV_UNICAST_4ADDR	= 0x09,
+};
+
+/**
+ * enum batadv_subtype - packet subtype for unicast4addr
+ * @BATADV_P_DATA: user payload
+ * @BATADV_P_DAT_DHT_GET: DHT request message
+ * @BATADV_P_DAT_DHT_PUT: DHT store message
+ * @BATADV_P_DAT_CACHE_REPLY: ARP reply generated by DAT
+ */
+enum batadv_subtype {
+	BATADV_P_DATA			= 0x01,
+	BATADV_P_DAT_DHT_GET		= 0x02,
+	BATADV_P_DAT_DHT_PUT		= 0x03,
+	BATADV_P_DAT_CACHE_REPLY	= 0x04,
 };
 
 /* this file is included by batctl which needs these defines */
@@ -106,13 +121,16 @@
 	uint8_t magic[3];	/* FF:43:05 */
 	uint8_t type;		/* bla_claimframe */
 	__be16 group;		/* group id */
-} __packed;
+};
 
 struct batadv_header {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
 	uint8_t  ttl;
-} __packed;
+	/* the parent struct has to add a byte after the header to make
+	 * everything 4 bytes aligned again
+	 */
+};
 
 struct batadv_ogm_packet {
 	struct batadv_header header;
@@ -137,7 +155,7 @@
 	__be16   seqno;
 	uint8_t  uid;
 	uint8_t  reserved;
-} __packed;
+};
 
 #define BATADV_RR_LEN 16
 
@@ -153,13 +171,44 @@
 	uint8_t  uid;
 	uint8_t  rr_cur;
 	uint8_t  rr[BATADV_RR_LEN][ETH_ALEN];
-} __packed;
+};
+
+/* All packet headers in front of an ethernet header have to be completely
+ * divisible by 2 but not by 4 to make the payload after the ethernet
+ * header again 4 bytes boundary aligned.
+ *
+ * A packing of 2 is necessary to avoid extra padding at the end of the struct
+ * caused by a structure member which is larger than two bytes. Otherwise
+ * the structure would not fulfill the previously mentioned rule to avoid the
+ * misalignment of the payload after the ethernet header. It may also lead to
+ * leakage of information when the padding it not initialized before sending.
+ */
+#pragma pack(2)
 
 struct batadv_unicast_packet {
 	struct batadv_header header;
 	uint8_t  ttvn; /* destination translation table version number */
 	uint8_t  dest[ETH_ALEN];
-} __packed;
+	/* "4 bytes boundary + 2 bytes" long to make the payload after the
+	 * following ethernet header again 4 bytes boundary aligned
+	 */
+};
+
+/**
+ * struct batadv_unicast_4addr_packet - extended unicast packet
+ * @u: common unicast packet header
+ * @src: address of the source
+ * @subtype: packet subtype
+ */
+struct batadv_unicast_4addr_packet {
+	struct batadv_unicast_packet u;
+	uint8_t src[ETH_ALEN];
+	uint8_t subtype;
+	uint8_t reserved;
+	/* "4 bytes boundary + 2 bytes" long to make the payload after the
+	 * following ethernet header again 4 bytes boundary aligned
+	 */
+};
 
 struct batadv_unicast_frag_packet {
 	struct batadv_header header;
@@ -176,7 +225,12 @@
 	uint8_t  reserved;
 	__be32   seqno;
 	uint8_t  orig[ETH_ALEN];
-} __packed;
+	/* "4 bytes boundary + 2 bytes" long to make the payload after the
+	 * following ethernet header again 4 bytes boundary aligned
+	 */
+};
+
+#pragma pack()
 
 struct batadv_vis_packet {
 	struct batadv_header header;
@@ -187,7 +241,7 @@
 	uint8_t  vis_orig[ETH_ALEN];	/* originator reporting its neighbors */
 	uint8_t  target_orig[ETH_ALEN]; /* who should receive this packet */
 	uint8_t  sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
-} __packed;
+};
 
 struct batadv_tt_query_packet {
 	struct batadv_header header;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 376b4cc..1aa1722 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -28,6 +28,7 @@
 #include "vis.h"
 #include "unicast.h"
 #include "bridge_loop_avoidance.h"
+#include "distributed-arp-table.h"
 
 static int batadv_route_unicast_packet(struct sk_buff *skb,
 				       struct batadv_hard_iface *recv_if);
@@ -284,7 +285,6 @@
 {
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_orig_node *orig_node = NULL;
-	struct batadv_neigh_node *router = NULL;
 	struct batadv_icmp_packet_rr *icmp_packet;
 	int ret = NET_RX_DROP;
 
@@ -306,10 +306,6 @@
 	if (!orig_node)
 		goto out;
 
-	router = batadv_orig_node_get_router(orig_node);
-	if (!router)
-		goto out;
-
 	/* create a copy of the skb, if needed, to modify it. */
 	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
@@ -321,14 +317,12 @@
 	icmp_packet->msg_type = BATADV_ECHO_REPLY;
 	icmp_packet->header.ttl = BATADV_TTL;
 
-	batadv_send_skb_packet(skb, router->if_incoming, router->addr);
-	ret = NET_RX_SUCCESS;
+	if (batadv_send_skb_to_orig(skb, orig_node, NULL))
+		ret = NET_RX_SUCCESS;
 
 out:
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
-	if (router)
-		batadv_neigh_node_free_ref(router);
 	if (orig_node)
 		batadv_orig_node_free_ref(orig_node);
 	return ret;
@@ -339,7 +333,6 @@
 {
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_orig_node *orig_node = NULL;
-	struct batadv_neigh_node *router = NULL;
 	struct batadv_icmp_packet *icmp_packet;
 	int ret = NET_RX_DROP;
 
@@ -361,10 +354,6 @@
 	if (!orig_node)
 		goto out;
 
-	router = batadv_orig_node_get_router(orig_node);
-	if (!router)
-		goto out;
-
 	/* create a copy of the skb, if needed, to modify it. */
 	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
@@ -376,14 +365,12 @@
 	icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
 	icmp_packet->header.ttl = BATADV_TTL;
 
-	batadv_send_skb_packet(skb, router->if_incoming, router->addr);
-	ret = NET_RX_SUCCESS;
+	if (batadv_send_skb_to_orig(skb, orig_node, NULL))
+		ret = NET_RX_SUCCESS;
 
 out:
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
-	if (router)
-		batadv_neigh_node_free_ref(router);
 	if (orig_node)
 		batadv_orig_node_free_ref(orig_node);
 	return ret;
@@ -397,7 +384,6 @@
 	struct batadv_icmp_packet_rr *icmp_packet;
 	struct ethhdr *ethhdr;
 	struct batadv_orig_node *orig_node = NULL;
-	struct batadv_neigh_node *router = NULL;
 	int hdr_size = sizeof(struct batadv_icmp_packet);
 	int ret = NET_RX_DROP;
 
@@ -446,10 +432,6 @@
 	if (!orig_node)
 		goto out;
 
-	router = batadv_orig_node_get_router(orig_node);
-	if (!router)
-		goto out;
-
 	/* create a copy of the skb, if needed, to modify it. */
 	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
@@ -460,12 +442,10 @@
 	icmp_packet->header.ttl--;
 
 	/* route it */
-	batadv_send_skb_packet(skb, router->if_incoming, router->addr);
-	ret = NET_RX_SUCCESS;
+	if (batadv_send_skb_to_orig(skb, orig_node, recv_if))
+		ret = NET_RX_SUCCESS;
 
 out:
-	if (router)
-		batadv_neigh_node_free_ref(router);
 	if (orig_node)
 		batadv_orig_node_free_ref(orig_node);
 	return ret;
@@ -549,25 +529,18 @@
 		if (tmp_neigh_node->if_incoming == recv_if)
 			continue;
 
+		if (router && tmp_neigh_node->tq_avg <= router->tq_avg)
+			continue;
+
 		if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
 			continue;
 
-		/* if we don't have a router yet
-		 * or this one is better, choose it.
-		 */
-		if ((!router) ||
-		    (tmp_neigh_node->tq_avg > router->tq_avg)) {
-			/* decrement refcount of
-			 * previously selected router
-			 */
-			if (router)
-				batadv_neigh_node_free_ref(router);
+		/* decrement refcount of previously selected router */
+		if (router)
+			batadv_neigh_node_free_ref(router);
 
-			router = tmp_neigh_node;
-			atomic_inc_not_zero(&router->refcount);
-		}
-
-		batadv_neigh_node_free_ref(tmp_neigh_node);
+		/* we found a better router (or at least one valid router) */
+		router = tmp_neigh_node;
 	}
 
 	/* use the first candidate if nothing was found. */
@@ -687,21 +660,8 @@
 	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct batadv_roam_adv_packet *roam_adv_packet;
 	struct batadv_orig_node *orig_node;
-	struct ethhdr *ethhdr;
 
-	/* drop packet if it has not necessary minimum size */
-	if (unlikely(!pskb_may_pull(skb,
-				    sizeof(struct batadv_roam_adv_packet))))
-		goto out;
-
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-	/* packet with unicast indication but broadcast recipient */
-	if (is_broadcast_ether_addr(ethhdr->h_dest))
-		goto out;
-
-	/* packet with broadcast sender address */
-	if (is_broadcast_ether_addr(ethhdr->h_source))
+	if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0)
 		goto out;
 
 	batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
@@ -730,12 +690,6 @@
 			     BATADV_TT_CLIENT_ROAM,
 			     atomic_read(&orig_node->last_ttvn) + 1);
 
-	/* Roaming phase starts: I have new information but the ttvn has not
-	 * been incremented yet. This flag will make me check all the incoming
-	 * packets for the correct destination.
-	 */
-	bat_priv->tt.poss_change = true;
-
 	batadv_orig_node_free_ref(orig_node);
 out:
 	/* returning NET_RX_DROP will make the caller function kfree the skb */
@@ -907,8 +861,8 @@
 			   skb->len + ETH_HLEN);
 
 	/* route it */
-	batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
-	ret = NET_RX_SUCCESS;
+	if (batadv_send_skb_to_orig(skb, orig_node, recv_if))
+		ret = NET_RX_SUCCESS;
 
 out:
 	if (neigh_node)
@@ -918,80 +872,161 @@
 	return ret;
 }
 
+/**
+ * batadv_reroute_unicast_packet - update the unicast header for re-routing
+ * @bat_priv: the bat priv with all the soft interface information
+ * @unicast_packet: the unicast header to be updated
+ * @dst_addr: the payload destination
+ *
+ * Search the translation table for dst_addr and update the unicast header with
+ * the new corresponding information (originator address where the destination
+ * client currently is and its known TTVN)
+ *
+ * Returns true if the packet header has been updated, false otherwise
+ */
+static bool
+batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
+			      struct batadv_unicast_packet *unicast_packet,
+			      uint8_t *dst_addr)
+{
+	struct batadv_orig_node *orig_node = NULL;
+	struct batadv_hard_iface *primary_if = NULL;
+	bool ret = false;
+	uint8_t *orig_addr, orig_ttvn;
+
+	if (batadv_is_my_client(bat_priv, dst_addr)) {
+		primary_if = batadv_primary_if_get_selected(bat_priv);
+		if (!primary_if)
+			goto out;
+		orig_addr = primary_if->net_dev->dev_addr;
+		orig_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+	} else {
+		orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr);
+		if (!orig_node)
+			goto out;
+
+		if (batadv_compare_eth(orig_node->orig, unicast_packet->dest))
+			goto out;
+
+		orig_addr = orig_node->orig;
+		orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+	}
+
+	/* update the packet header */
+	memcpy(unicast_packet->dest, orig_addr, ETH_ALEN);
+	unicast_packet->ttvn = orig_ttvn;
+
+	ret = true;
+out:
+	if (primary_if)
+		batadv_hardif_free_ref(primary_if);
+	if (orig_node)
+		batadv_orig_node_free_ref(orig_node);
+
+	return ret;
+}
+
 static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
 				     struct sk_buff *skb) {
-	uint8_t curr_ttvn;
+	uint8_t curr_ttvn, old_ttvn;
 	struct batadv_orig_node *orig_node;
 	struct ethhdr *ethhdr;
 	struct batadv_hard_iface *primary_if;
 	struct batadv_unicast_packet *unicast_packet;
-	bool tt_poss_change;
 	int is_old_ttvn;
 
-	/* I could need to modify it */
-	if (skb_cow(skb, sizeof(struct batadv_unicast_packet)) < 0)
+	/* check if there is enough data before accessing it */
+	if (pskb_may_pull(skb, sizeof(*unicast_packet) + ETH_HLEN) < 0)
+		return 0;
+
+	/* create a copy of the skb (in case of for re-routing) to modify it. */
+	if (skb_cow(skb, sizeof(*unicast_packet)) < 0)
 		return 0;
 
 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
+	ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet));
 
-	if (batadv_is_my_mac(unicast_packet->dest)) {
-		tt_poss_change = bat_priv->tt.poss_change;
-		curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
-	} else {
+	/* check if the destination client was served by this node and it is now
+	 * roaming. In this case, it means that the node has got a ROAM_ADV
+	 * message and that it knows the new destination in the mesh to re-route
+	 * the packet to
+	 */
+	if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest)) {
+		if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
+						  ethhdr->h_dest))
+			net_ratelimited_function(batadv_dbg, BATADV_DBG_TT,
+						 bat_priv,
+						 "Rerouting unicast packet to %pM (dst=%pM): Local Roaming\n",
+						 unicast_packet->dest,
+						 ethhdr->h_dest);
+		/* at this point the mesh destination should have been
+		 * substituted with the originator address found in the global
+		 * table. If not, let the packet go untouched anyway because
+		 * there is nothing the node can do
+		 */
+		return 1;
+	}
+
+	/* retrieve the TTVN known by this node for the packet destination. This
+	 * value is used later to check if the node which sent (or re-routed
+	 * last time) the packet had an updated information or not
+	 */
+	curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+	if (!batadv_is_my_mac(unicast_packet->dest)) {
 		orig_node = batadv_orig_hash_find(bat_priv,
 						  unicast_packet->dest);
-
+		/* if it is not possible to find the orig_node representing the
+		 * destination, the packet can immediately be dropped as it will
+		 * not be possible to deliver it
+		 */
 		if (!orig_node)
 			return 0;
 
 		curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
-		tt_poss_change = orig_node->tt_poss_change;
 		batadv_orig_node_free_ref(orig_node);
 	}
 
-	/* Check whether I have to reroute the packet */
+	/* check if the TTVN contained in the packet is fresher than what the
+	 * node knows
+	 */
 	is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
-	if (is_old_ttvn || tt_poss_change) {
-		/* check if there is enough data before accessing it */
-		if (pskb_may_pull(skb, sizeof(struct batadv_unicast_packet) +
-				  ETH_HLEN) < 0)
-			return 0;
+	if (!is_old_ttvn)
+		return 1;
 
-		ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet));
-
-		/* we don't have an updated route for this client, so we should
-		 * not try to reroute the packet!!
-		 */
-		if (batadv_tt_global_client_is_roaming(bat_priv,
-						       ethhdr->h_dest))
-			return 1;
-
-		orig_node = batadv_transtable_search(bat_priv, NULL,
-						     ethhdr->h_dest);
-
-		if (!orig_node) {
-			if (!batadv_is_my_client(bat_priv, ethhdr->h_dest))
-				return 0;
-			primary_if = batadv_primary_if_get_selected(bat_priv);
-			if (!primary_if)
-				return 0;
-			memcpy(unicast_packet->dest,
-			       primary_if->net_dev->dev_addr, ETH_ALEN);
-			batadv_hardif_free_ref(primary_if);
-		} else {
-			memcpy(unicast_packet->dest, orig_node->orig,
-			       ETH_ALEN);
-			curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
-			batadv_orig_node_free_ref(orig_node);
-		}
-
-		batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
-			   "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
-			   unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
-			   unicast_packet->dest);
-
-		unicast_packet->ttvn = curr_ttvn;
+	old_ttvn = unicast_packet->ttvn;
+	/* the packet was forged based on outdated network information. Its
+	 * destination can possibly be updated and forwarded towards the new
+	 * target host
+	 */
+	if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
+					  ethhdr->h_dest)) {
+		net_ratelimited_function(batadv_dbg, BATADV_DBG_TT, bat_priv,
+					 "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
+					 unicast_packet->dest, ethhdr->h_dest,
+					 old_ttvn, curr_ttvn);
+		return 1;
 	}
+
+	/* the packet has not been re-routed: either the destination is
+	 * currently served by this node or there is no destination at all and
+	 * it is possible to drop the packet
+	 */
+	if (!batadv_is_my_client(bat_priv, ethhdr->h_dest))
+		return 0;
+
+	/* update the header in order to let the packet be delivered to this
+	 * node's soft interface
+	 */
+	primary_if = batadv_primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		return 0;
+
+	memcpy(unicast_packet->dest, primary_if->net_dev->dev_addr, ETH_ALEN);
+
+	batadv_hardif_free_ref(primary_if);
+
+	unicast_packet->ttvn = curr_ttvn;
+
 	return 1;
 }
 
@@ -1000,7 +1035,19 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct batadv_unicast_packet *unicast_packet;
+	struct batadv_unicast_4addr_packet *unicast_4addr_packet;
+	uint8_t *orig_addr;
+	struct batadv_orig_node *orig_node = NULL;
 	int hdr_size = sizeof(*unicast_packet);
+	bool is4addr;
+
+	unicast_packet = (struct batadv_unicast_packet *)skb->data;
+	unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
+
+	is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR;
+	/* the caller function should have already pulled 2 bytes */
+	if (is4addr)
+		hdr_size = sizeof(*unicast_4addr_packet);
 
 	if (batadv_check_unicast_packet(skb, hdr_size) < 0)
 		return NET_RX_DROP;
@@ -1008,12 +1055,28 @@
 	if (!batadv_check_unicast_ttvn(bat_priv, skb))
 		return NET_RX_DROP;
 
-	unicast_packet = (struct batadv_unicast_packet *)skb->data;
-
 	/* packet for me */
 	if (batadv_is_my_mac(unicast_packet->dest)) {
+		if (is4addr) {
+			batadv_dat_inc_counter(bat_priv,
+					       unicast_4addr_packet->subtype);
+			orig_addr = unicast_4addr_packet->src;
+			orig_node = batadv_orig_hash_find(bat_priv, orig_addr);
+		}
+
+		if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb,
+							  hdr_size))
+			goto rx_success;
+		if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb,
+							hdr_size))
+			goto rx_success;
+
 		batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
-				    NULL);
+				    orig_node);
+
+rx_success:
+		if (orig_node)
+			batadv_orig_node_free_ref(orig_node);
 
 		return NET_RX_SUCCESS;
 	}
@@ -1050,8 +1113,17 @@
 		if (!new_skb)
 			return NET_RX_SUCCESS;
 
+		if (batadv_dat_snoop_incoming_arp_request(bat_priv, new_skb,
+							  hdr_size))
+			goto rx_success;
+		if (batadv_dat_snoop_incoming_arp_reply(bat_priv, new_skb,
+							hdr_size))
+			goto rx_success;
+
 		batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
 				    sizeof(struct batadv_unicast_packet), NULL);
+
+rx_success:
 		return NET_RX_SUCCESS;
 	}
 
@@ -1124,14 +1196,8 @@
 
 	spin_unlock_bh(&orig_node->bcast_seqno_lock);
 
-	/* keep skb linear for crc calculation */
-	if (skb_linearize(skb) < 0)
-		goto out;
-
-	bcast_packet = (struct batadv_bcast_packet *)skb->data;
-
 	/* check whether this has been sent by another originator before */
-	if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, skb->len))
+	if (batadv_bla_check_bcast_duplist(bat_priv, skb))
 		goto out;
 
 	/* rebroadcast packet */
@@ -1143,9 +1209,16 @@
 	if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
 		goto out;
 
+	if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, hdr_size))
+		goto rx_success;
+	if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb, hdr_size))
+		goto rx_success;
+
 	/* broadcast for me */
 	batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
 			    orig_node);
+
+rx_success:
 	ret = NET_RX_SUCCESS;
 	goto out;
 
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 570a8bc..c7f7023 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -18,6 +18,7 @@
  */
 
 #include "main.h"
+#include "distributed-arp-table.h"
 #include "send.h"
 #include "routing.h"
 #include "translation-table.h"
@@ -77,6 +78,39 @@
 	return NET_XMIT_DROP;
 }
 
+/**
+ * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
+ * @skb: Packet to be transmitted.
+ * @orig_node: Final destination of the packet.
+ * @recv_if: Interface used when receiving the packet (can be NULL).
+ *
+ * Looks up the best next-hop towards the passed originator and passes the
+ * skb on for preparation of MAC header. If the packet originated from this
+ * host, NULL can be passed as recv_if and no interface alternating is
+ * attempted.
+ *
+ * Returns TRUE on success; FALSE otherwise.
+ */
+bool batadv_send_skb_to_orig(struct sk_buff *skb,
+			     struct batadv_orig_node *orig_node,
+			     struct batadv_hard_iface *recv_if)
+{
+	struct batadv_priv *bat_priv = orig_node->bat_priv;
+	struct batadv_neigh_node *neigh_node;
+
+	/* batadv_find_router() increases neigh_nodes refcount if found. */
+	neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
+	if (!neigh_node)
+		return false;
+
+	/* route it */
+	batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+
+	batadv_neigh_node_free_ref(neigh_node);
+
+	return true;
+}
+
 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
@@ -209,6 +243,9 @@
 	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
 		goto out;
 
+	if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
+		goto out;
+
 	/* rebroadcast packet */
 	rcu_read_lock();
 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 643329b..0078dec 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -23,6 +23,9 @@
 int batadv_send_skb_packet(struct sk_buff *skb,
 			   struct batadv_hard_iface *hard_iface,
 			   const uint8_t *dst_addr);
+bool batadv_send_skb_to_orig(struct sk_buff *skb,
+			     struct batadv_orig_node *orig_node,
+			     struct batadv_hard_iface *recv_if);
 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface);
 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
 				    const struct sk_buff *skb,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index ce0684a..54800c7 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -20,6 +20,7 @@
 #include "main.h"
 #include "soft-interface.h"
 #include "hard-interface.h"
+#include "distributed-arp-table.h"
 #include "routing.h"
 #include "send.h"
 #include "debugfs.h"
@@ -146,13 +147,16 @@
 	struct batadv_bcast_packet *bcast_packet;
 	struct vlan_ethhdr *vhdr;
 	__be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
-	static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00,
-						   0x00};
+	static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
+						   0x00, 0x00};
+	static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
+						    0x00, 0x00};
 	unsigned int header_len = 0;
 	int data_len = skb->len, ret;
 	short vid __maybe_unused = -1;
 	bool do_bcast = false;
 	uint32_t seqno;
+	unsigned long brd_delay = 1;
 
 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
 		goto dropped;
@@ -180,10 +184,16 @@
 
 	/* don't accept stp packets. STP does not help in meshes.
 	 * better use the bridge loop avoidance ...
+	 *
+	 * The same goes for ECTP sent at least by some Cisco Switches,
+	 * it might confuse the mesh when used with bridge loop avoidance.
 	 */
 	if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
 		goto dropped;
 
+	if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
+		goto dropped;
+
 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
 		do_bcast = true;
 
@@ -216,6 +226,13 @@
 		if (!primary_if)
 			goto dropped;
 
+		/* in case of ARP request, we do not immediately broadcasti the
+		 * packet, instead we first wait for DAT to try to retrieve the
+		 * correct ARP entry
+		 */
+		if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
+			brd_delay = msecs_to_jiffies(ARP_REQ_DELAY);
+
 		if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
 			goto dropped;
 
@@ -237,7 +254,7 @@
 		seqno = atomic_inc_return(&bat_priv->bcast_seqno);
 		bcast_packet->seqno = htonl(seqno);
 
-		batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
+		batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
 
 		/* a copy is stored in the bcast list, therefore removing
 		 * the original skb.
@@ -252,7 +269,12 @@
 				goto dropped;
 		}
 
-		ret = batadv_unicast_send_skb(skb, bat_priv);
+		if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
+			goto dropped;
+
+		batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
+
+		ret = batadv_unicast_send_skb(bat_priv, skb);
 		if (ret != 0)
 			goto dropped_freed;
 	}
@@ -347,7 +369,51 @@
 	return;
 }
 
+/* batman-adv network devices have devices nesting below it and are a special
+ * "super class" of normal network devices; split their locks off into a
+ * separate class since they always nest.
+ */
+static struct lock_class_key batadv_netdev_xmit_lock_key;
+static struct lock_class_key batadv_netdev_addr_lock_key;
+
+/**
+ * batadv_set_lockdep_class_one - Set lockdep class for a single tx queue
+ * @dev: device which owns the tx queue
+ * @txq: tx queue to modify
+ * @_unused: always NULL
+ */
+static void batadv_set_lockdep_class_one(struct net_device *dev,
+					 struct netdev_queue *txq,
+					 void *_unused)
+{
+	lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
+}
+
+/**
+ * batadv_set_lockdep_class - Set txq and addr_list lockdep class
+ * @dev: network device to modify
+ */
+static void batadv_set_lockdep_class(struct net_device *dev)
+{
+	lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
+	netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
+}
+
+/**
+ * batadv_softif_init - Late stage initialization of soft interface
+ * @dev: registered network device to modify
+ *
+ * Returns error code on failures
+ */
+static int batadv_softif_init(struct net_device *dev)
+{
+	batadv_set_lockdep_class(dev);
+
+	return 0;
+}
+
 static const struct net_device_ops batadv_netdev_ops = {
+	.ndo_init = batadv_softif_init,
 	.ndo_open = batadv_interface_open,
 	.ndo_stop = batadv_interface_release,
 	.ndo_get_stats = batadv_interface_stats,
@@ -414,6 +480,9 @@
 	atomic_set(&bat_priv->aggregated_ogms, 1);
 	atomic_set(&bat_priv->bonding, 0);
 	atomic_set(&bat_priv->bridge_loop_avoidance, 0);
+#ifdef CONFIG_BATMAN_ADV_DAT
+	atomic_set(&bat_priv->distributed_arp_table, 1);
+#endif
 	atomic_set(&bat_priv->ap_isolation, 0);
 	atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE);
 	atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
@@ -436,7 +505,6 @@
 #endif
 	bat_priv->tt.last_changeset = NULL;
 	bat_priv->tt.last_changeset_len = 0;
-	bat_priv->tt.poss_change = false;
 
 	bat_priv->primary_if = NULL;
 	bat_priv->num_ifaces = 0;
@@ -556,6 +624,13 @@
 	{ "tt_response_rx" },
 	{ "tt_roam_adv_tx" },
 	{ "tt_roam_adv_rx" },
+#ifdef CONFIG_BATMAN_ADV_DAT
+	{ "dat_get_tx" },
+	{ "dat_get_rx" },
+	{ "dat_put_tx" },
+	{ "dat_put_rx" },
+	{ "dat_cached_reply_tx" },
+#endif
 };
 
 static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 66518c7..84a55cb 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -20,6 +20,7 @@
 #include "main.h"
 #include "sysfs.h"
 #include "translation-table.h"
+#include "distributed-arp-table.h"
 #include "originator.h"
 #include "hard-interface.h"
 #include "gateway_common.h"
@@ -122,55 +123,6 @@
 			   batadv_store_##_name)
 
 
-#define BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func)	\
-ssize_t batadv_store_##_name(struct kobject *kobj,			\
-			     struct attribute *attr, char *buff,	\
-			     size_t count)				\
-{									\
-	struct net_device *net_dev = batadv_kobj_to_netdev(kobj);	\
-	struct batadv_hard_iface *hard_iface;				\
-	ssize_t length;							\
-									\
-	hard_iface = batadv_hardif_get_by_netdev(net_dev);		\
-	if (!hard_iface)						\
-		return 0;						\
-									\
-	length = __batadv_store_uint_attr(buff, count, _min, _max,	\
-					  _post_func, attr,		\
-					  &hard_iface->_name, net_dev);	\
-									\
-	batadv_hardif_free_ref(hard_iface);				\
-	return length;							\
-}
-
-#define BATADV_ATTR_HIF_SHOW_UINT(_name)				\
-ssize_t batadv_show_##_name(struct kobject *kobj,			\
-			    struct attribute *attr, char *buff)		\
-{									\
-	struct net_device *net_dev = batadv_kobj_to_netdev(kobj);	\
-	struct batadv_hard_iface *hard_iface;				\
-	ssize_t length;							\
-									\
-	hard_iface = batadv_hardif_get_by_netdev(net_dev);		\
-	if (!hard_iface)						\
-		return 0;						\
-									\
-	length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
-									\
-	batadv_hardif_free_ref(hard_iface);				\
-	return length;							\
-}
-
-/* Use this, if you are going to set [name] in hard_iface to an
- * unsigned integer value
- */
-#define BATADV_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func)	\
-	static BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func)\
-	static BATADV_ATTR_HIF_SHOW_UINT(_name)				\
-	static BATADV_ATTR(_name, _mode, batadv_show_##_name,		\
-			   batadv_store_##_name)
-
-
 static int batadv_store_bool_attr(char *buff, size_t count,
 				  struct net_device *net_dev,
 				  const char *attr_name, atomic_t *attr)
@@ -469,6 +421,9 @@
 #ifdef CONFIG_BATMAN_ADV_BLA
 BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
 #endif
+#ifdef CONFIG_BATMAN_ADV_DAT
+BATADV_ATTR_SIF_BOOL(distributed_arp_table, S_IRUGO | S_IWUSR, NULL);
+#endif
 BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu);
 BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
 static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode,
@@ -494,6 +449,9 @@
 #ifdef CONFIG_BATMAN_ADV_BLA
 	&batadv_attr_bridge_loop_avoidance,
 #endif
+#ifdef CONFIG_BATMAN_ADV_DAT
+	&batadv_attr_distributed_arp_table,
+#endif
 	&batadv_attr_fragmentation,
 	&batadv_attr_ap_isolation,
 	&batadv_attr_vis_mode,
@@ -730,7 +688,7 @@
 			enum batadv_uev_action action, const char *data)
 {
 	int ret = -ENOMEM;
-	struct batadv_hard_iface *primary_if = NULL;
+	struct batadv_hard_iface *primary_if;
 	struct kobject *bat_kobj;
 	char *uevent_env[4] = { NULL, NULL, NULL, NULL };
 
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index baae715..22457a7 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -238,92 +238,134 @@
 	return 0;
 }
 
+static void batadv_tt_global_free(struct batadv_priv *bat_priv,
+				  struct batadv_tt_global_entry *tt_global,
+				  const char *message)
+{
+	batadv_dbg(BATADV_DBG_TT, bat_priv,
+		   "Deleting global tt entry %pM: %s\n",
+		   tt_global->common.addr, message);
+
+	batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
+			   batadv_choose_orig, tt_global->common.addr);
+	batadv_tt_global_entry_free_ref(tt_global);
+
+}
+
 void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 			 int ifindex)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
-	struct batadv_tt_local_entry *tt_local_entry = NULL;
-	struct batadv_tt_global_entry *tt_global_entry = NULL;
+	struct batadv_tt_local_entry *tt_local;
+	struct batadv_tt_global_entry *tt_global;
 	struct hlist_head *head;
 	struct hlist_node *node;
 	struct batadv_tt_orig_list_entry *orig_entry;
 	int hash_added;
+	bool roamed_back = false;
 
-	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+	tt_local = batadv_tt_local_hash_find(bat_priv, addr);
+	tt_global = batadv_tt_global_hash_find(bat_priv, addr);
 
-	if (tt_local_entry) {
-		tt_local_entry->last_seen = jiffies;
-		/* possibly unset the BATADV_TT_CLIENT_PENDING flag */
-		tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING;
-		goto out;
+	if (tt_local) {
+		tt_local->last_seen = jiffies;
+		if (tt_local->common.flags & BATADV_TT_CLIENT_PENDING) {
+			batadv_dbg(BATADV_DBG_TT, bat_priv,
+				   "Re-adding pending client %pM\n", addr);
+			/* whatever the reason why the PENDING flag was set,
+			 * this is a client which was enqueued to be removed in
+			 * this orig_interval. Since it popped up again, the
+			 * flag can be reset like it was never enqueued
+			 */
+			tt_local->common.flags &= ~BATADV_TT_CLIENT_PENDING;
+			goto add_event;
+		}
+
+		if (tt_local->common.flags & BATADV_TT_CLIENT_ROAM) {
+			batadv_dbg(BATADV_DBG_TT, bat_priv,
+				   "Roaming client %pM came back to its original location\n",
+				   addr);
+			/* the ROAM flag is set because this client roamed away
+			 * and the node got a roaming_advertisement message. Now
+			 * that the client popped up again at its original
+			 * location such flag can be unset
+			 */
+			tt_local->common.flags &= ~BATADV_TT_CLIENT_ROAM;
+			roamed_back = true;
+		}
+		goto check_roaming;
 	}
 
-	tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
-	if (!tt_local_entry)
+	tt_local = kmalloc(sizeof(*tt_local), GFP_ATOMIC);
+	if (!tt_local)
 		goto out;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
 		   (uint8_t)atomic_read(&bat_priv->tt.vn));
 
-	memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
-	tt_local_entry->common.flags = BATADV_NO_FLAGS;
+	memcpy(tt_local->common.addr, addr, ETH_ALEN);
+	tt_local->common.flags = BATADV_NO_FLAGS;
 	if (batadv_is_wifi_iface(ifindex))
-		tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
-	atomic_set(&tt_local_entry->common.refcount, 2);
-	tt_local_entry->last_seen = jiffies;
-	tt_local_entry->common.added_at = tt_local_entry->last_seen;
+		tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
+	atomic_set(&tt_local->common.refcount, 2);
+	tt_local->last_seen = jiffies;
+	tt_local->common.added_at = tt_local->last_seen;
 
 	/* the batman interface mac address should never be purged */
 	if (batadv_compare_eth(addr, soft_iface->dev_addr))
-		tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE;
+		tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
 
 	/* The local entry has to be marked as NEW to avoid to send it in
 	 * a full table response going out before the next ttvn increment
 	 * (consistency check)
 	 */
-	tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
+	tt_local->common.flags |= BATADV_TT_CLIENT_NEW;
 
 	hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
-				     batadv_choose_orig,
-				     &tt_local_entry->common,
-				     &tt_local_entry->common.hash_entry);
+				     batadv_choose_orig, &tt_local->common,
+				     &tt_local->common.hash_entry);
 
 	if (unlikely(hash_added != 0)) {
 		/* remove the reference for the hash */
-		batadv_tt_local_entry_free_ref(tt_local_entry);
+		batadv_tt_local_entry_free_ref(tt_local);
 		goto out;
 	}
 
-	batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
+add_event:
+	batadv_tt_local_event(bat_priv, addr, tt_local->common.flags);
 
-	/* remove address from global hash if present */
-	tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
-
-	/* Check whether it is a roaming! */
-	if (tt_global_entry) {
+check_roaming:
+	/* Check whether it is a roaming, but don't do anything if the roaming
+	 * process has already been handled
+	 */
+	if (tt_global && !(tt_global->common.flags & BATADV_TT_CLIENT_ROAM)) {
 		/* These node are probably going to update their tt table */
-		head = &tt_global_entry->orig_list;
+		head = &tt_global->orig_list;
 		rcu_read_lock();
 		hlist_for_each_entry_rcu(orig_entry, node, head, list) {
-			orig_entry->orig_node->tt_poss_change = true;
-
-			batadv_send_roam_adv(bat_priv,
-					     tt_global_entry->common.addr,
+			batadv_send_roam_adv(bat_priv, tt_global->common.addr,
 					     orig_entry->orig_node);
 		}
 		rcu_read_unlock();
-		/* The global entry has to be marked as ROAMING and
-		 * has to be kept for consistency purpose
-		 */
-		tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
-		tt_global_entry->roam_at = jiffies;
+		if (roamed_back) {
+			batadv_tt_global_free(bat_priv, tt_global,
+					      "Roaming canceled");
+			tt_global = NULL;
+		} else {
+			/* The global entry has to be marked as ROAMING and
+			 * has to be kept for consistency purpose
+			 */
+			tt_global->common.flags |= BATADV_TT_CLIENT_ROAM;
+			tt_global->roam_at = jiffies;
+		}
 	}
+
 out:
-	if (tt_local_entry)
-		batadv_tt_local_entry_free_ref(tt_local_entry);
-	if (tt_global_entry)
-		batadv_tt_global_entry_free_ref(tt_global_entry);
+	if (tt_local)
+		batadv_tt_local_entry_free_ref(tt_local);
+	if (tt_global)
+		batadv_tt_global_entry_free_ref(tt_global);
 }
 
 static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
@@ -434,22 +476,10 @@
 	struct hlist_node *node;
 	struct hlist_head *head;
 	uint32_t i;
-	int ret = 0;
 
-	primary_if = batadv_primary_if_get_selected(bat_priv);
-	if (!primary_if) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
-				 net_dev->name);
+	primary_if = batadv_seq_print_text_primary_if_get(seq);
+	if (!primary_if)
 		goto out;
-	}
-
-	if (primary_if->if_status != BATADV_IF_ACTIVE) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - primary interface not active\n",
-				 net_dev->name);
-		goto out;
-	}
 
 	seq_printf(seq,
 		   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
@@ -479,7 +509,7 @@
 out:
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
-	return ret;
+	return 0;
 }
 
 static void
@@ -501,24 +531,57 @@
 		   tt_local_entry->common.addr, message);
 }
 
-void batadv_tt_local_remove(struct batadv_priv *bat_priv, const uint8_t *addr,
-			    const char *message, bool roaming)
+/**
+ * batadv_tt_local_remove - logically remove an entry from the local table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the MAC address of the client to remove
+ * @message: message to append to the log on deletion
+ * @roaming: true if the deletion is due to a roaming event
+ *
+ * Returns the flags assigned to the local entry before being deleted
+ */
+uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
+				const uint8_t *addr, const char *message,
+				bool roaming)
 {
-	struct batadv_tt_local_entry *tt_local_entry = NULL;
-	uint16_t flags;
+	struct batadv_tt_local_entry *tt_local_entry;
+	uint16_t flags, curr_flags = BATADV_NO_FLAGS;
 
 	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
 	if (!tt_local_entry)
 		goto out;
 
-	flags = BATADV_TT_CLIENT_DEL;
-	if (roaming)
-		flags |= BATADV_TT_CLIENT_ROAM;
+	curr_flags = tt_local_entry->common.flags;
 
-	batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message);
+	flags = BATADV_TT_CLIENT_DEL;
+	/* if this global entry addition is due to a roaming, the node has to
+	 * mark the local entry as "roamed" in order to correctly reroute
+	 * packets later
+	 */
+	if (roaming) {
+		flags |= BATADV_TT_CLIENT_ROAM;
+		/* mark the local client as ROAMed */
+		tt_local_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
+	}
+
+	if (!(tt_local_entry->common.flags & BATADV_TT_CLIENT_NEW)) {
+		batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags,
+					    message);
+		goto out;
+	}
+	/* if this client has been added right now, it is possible to
+	 * immediately purge it
+	 */
+	batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
+			      curr_flags | BATADV_TT_CLIENT_DEL);
+	hlist_del_rcu(&tt_local_entry->common.hash_entry);
+	batadv_tt_local_entry_free_ref(tt_local_entry);
+
 out:
 	if (tt_local_entry)
 		batadv_tt_local_entry_free_ref(tt_local_entry);
+
+	return curr_flags;
 }
 
 static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
@@ -721,12 +784,23 @@
 			 const unsigned char *tt_addr, uint8_t flags,
 			 uint8_t ttvn)
 {
-	struct batadv_tt_global_entry *tt_global_entry = NULL;
+	struct batadv_tt_global_entry *tt_global_entry;
+	struct batadv_tt_local_entry *tt_local_entry;
 	int ret = 0;
 	int hash_added;
 	struct batadv_tt_common_entry *common;
+	uint16_t local_flags;
 
 	tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
+	tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr);
+
+	/* if the node already has a local client for this entry, it has to wait
+	 * for a roaming advertisement instead of manually messing up the global
+	 * table
+	 */
+	if ((flags & BATADV_TT_CLIENT_TEMP) && tt_local_entry &&
+	    !(tt_local_entry->common.flags & BATADV_TT_CLIENT_NEW))
+		goto out;
 
 	if (!tt_global_entry) {
 		tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
@@ -738,6 +812,12 @@
 
 		common->flags = flags;
 		tt_global_entry->roam_at = 0;
+		/* node must store current time in case of roaming. This is
+		 * needed to purge this entry out on timeout (if nobody claims
+		 * it)
+		 */
+		if (flags & BATADV_TT_CLIENT_ROAM)
+			tt_global_entry->roam_at = jiffies;
 		atomic_set(&common->refcount, 2);
 		common->added_at = jiffies;
 
@@ -755,19 +835,31 @@
 			goto out_remove;
 		}
 	} else {
+		common = &tt_global_entry->common;
 		/* If there is already a global entry, we can use this one for
 		 * our processing.
-		 * But if we are trying to add a temporary client we can exit
-		 * directly because the temporary information should never
-		 * override any already known client state (whatever it is)
+		 * But if we are trying to add a temporary client then here are
+		 * two options at this point:
+		 * 1) the global client is not a temporary client: the global
+		 *    client has to be left as it is, temporary information
+		 *    should never override any already known client state
+		 * 2) the global client is a temporary client: purge the
+		 *    originator list and add the new one orig_entry
 		 */
-		if (flags & BATADV_TT_CLIENT_TEMP)
-			goto out;
+		if (flags & BATADV_TT_CLIENT_TEMP) {
+			if (!(common->flags & BATADV_TT_CLIENT_TEMP))
+				goto out;
+			if (batadv_tt_global_entry_has_orig(tt_global_entry,
+							    orig_node))
+				goto out_remove;
+			batadv_tt_global_del_orig_list(tt_global_entry);
+			goto add_orig_entry;
+		}
 
 		/* if the client was temporary added before receiving the first
 		 * OGM announcing it, we have to clear the TEMP flag
 		 */
-		tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
+		common->flags &= ~BATADV_TT_CLIENT_TEMP;
 
 		/* the change can carry possible "attribute" flags like the
 		 * TT_CLIENT_WIFI, therefore they have to be copied in the
@@ -782,33 +874,81 @@
 		 * We should first delete the old originator before adding the
 		 * new one.
 		 */
-		if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) {
+		if (common->flags & BATADV_TT_CLIENT_ROAM) {
 			batadv_tt_global_del_orig_list(tt_global_entry);
-			tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
+			common->flags &= ~BATADV_TT_CLIENT_ROAM;
 			tt_global_entry->roam_at = 0;
 		}
 	}
+add_orig_entry:
 	/* add the new orig_entry (if needed) or update it */
 	batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Creating new global tt entry: %pM (via %pM)\n",
-		   tt_global_entry->common.addr, orig_node->orig);
+		   common->addr, orig_node->orig);
+	ret = 1;
 
 out_remove:
+
 	/* remove address from local hash if present */
-	batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
-			       "global tt received",
-			       flags & BATADV_TT_CLIENT_ROAM);
-	ret = 1;
+	local_flags = batadv_tt_local_remove(bat_priv, tt_addr,
+					     "global tt received",
+					     !!(flags & BATADV_TT_CLIENT_ROAM));
+	tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
+
+	if (!(flags & BATADV_TT_CLIENT_ROAM))
+		/* this is a normal global add. Therefore the client is not in a
+		 * roaming state anymore.
+		 */
+		tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
+
 out:
 	if (tt_global_entry)
 		batadv_tt_global_entry_free_ref(tt_global_entry);
+	if (tt_local_entry)
+		batadv_tt_local_entry_free_ref(tt_local_entry);
 	return ret;
 }
 
-/* print all orig nodes who announce the address for this global entry.
- * it is assumed that the caller holds rcu_read_lock();
+/* batadv_transtable_best_orig - Get best originator list entry from tt entry
+ * @tt_global_entry: global translation table entry to be analyzed
+ *
+ * This functon assumes the caller holds rcu_read_lock().
+ * Returns best originator list entry or NULL on errors.
+ */
+static struct batadv_tt_orig_list_entry *
+batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
+{
+	struct batadv_neigh_node *router = NULL;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
+	int best_tq = 0;
+
+	head = &tt_global_entry->orig_list;
+	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+		router = batadv_orig_node_get_router(orig_entry->orig_node);
+		if (!router)
+			continue;
+
+		if (router->tq_avg > best_tq) {
+			best_entry = orig_entry;
+			best_tq = router->tq_avg;
+		}
+
+		batadv_neigh_node_free_ref(router);
+	}
+
+	return best_entry;
+}
+
+/* batadv_tt_global_print_entry - print all orig nodes who announce the address
+ * for this global entry
+ * @tt_global_entry: global translation table entry to be printed
+ * @seq: debugfs table seq_file struct
+ *
+ * This functon assumes the caller holds rcu_read_lock().
  */
 static void
 batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
@@ -816,21 +956,37 @@
 {
 	struct hlist_head *head;
 	struct hlist_node *node;
-	struct batadv_tt_orig_list_entry *orig_entry;
+	struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
 	struct batadv_tt_common_entry *tt_common_entry;
 	uint16_t flags;
 	uint8_t last_ttvn;
 
 	tt_common_entry = &tt_global_entry->common;
+	flags = tt_common_entry->flags;
+
+	best_entry = batadv_transtable_best_orig(tt_global_entry);
+	if (best_entry) {
+		last_ttvn = atomic_read(&best_entry->orig_node->last_ttvn);
+		seq_printf(seq,	" %c %pM  (%3u) via %pM     (%3u)   [%c%c%c]\n",
+			   '*', tt_global_entry->common.addr,
+			   best_entry->ttvn, best_entry->orig_node->orig,
+			   last_ttvn,
+			   (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
+			   (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
+			   (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+	}
 
 	head = &tt_global_entry->orig_list;
 
 	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
-		flags = tt_common_entry->flags;
+		if (best_entry == orig_entry)
+			continue;
+
 		last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
-		seq_printf(seq,	" * %pM  (%3u) via %pM     (%3u)   [%c%c%c]\n",
-			   tt_global_entry->common.addr, orig_entry->ttvn,
-			   orig_entry->orig_node->orig, last_ttvn,
+		seq_printf(seq,	" %c %pM  (%3u) via %pM     (%3u)   [%c%c%c]\n",
+			   '+', tt_global_entry->common.addr,
+			   orig_entry->ttvn, orig_entry->orig_node->orig,
+			   last_ttvn,
 			   (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
 			   (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
 			   (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
@@ -848,22 +1004,10 @@
 	struct hlist_node *node;
 	struct hlist_head *head;
 	uint32_t i;
-	int ret = 0;
 
-	primary_if = batadv_primary_if_get_selected(bat_priv);
-	if (!primary_if) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
-				 net_dev->name);
+	primary_if = batadv_seq_print_text_primary_if_get(seq);
+	if (!primary_if)
 		goto out;
-	}
-
-	if (primary_if->if_status != BATADV_IF_ACTIVE) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - primary interface not active\n",
-				 net_dev->name);
-		goto out;
-	}
 
 	seq_printf(seq,
 		   "Globally announced TT entries received via the mesh %s\n",
@@ -887,7 +1031,7 @@
 out:
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
-	return ret;
+	return 0;
 }
 
 /* deletes the orig list of a tt_global_entry */
@@ -933,21 +1077,6 @@
 	spin_unlock_bh(&tt_global_entry->list_lock);
 }
 
-static void
-batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
-			    struct batadv_tt_global_entry *tt_global_entry,
-			    const char *message)
-{
-	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Deleting global tt entry %pM: %s\n",
-		   tt_global_entry->common.addr, message);
-
-	batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
-			   batadv_choose_orig, tt_global_entry->common.addr);
-	batadv_tt_global_entry_free_ref(tt_global_entry);
-
-}
-
 /* If the client is to be deleted, we check if it is the last origantor entry
  * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
  * timer, otherwise we simply remove the originator scheduled for deletion.
@@ -996,7 +1125,7 @@
 				 const unsigned char *addr,
 				 const char *message, bool roaming)
 {
-	struct batadv_tt_global_entry *tt_global_entry = NULL;
+	struct batadv_tt_global_entry *tt_global_entry;
 	struct batadv_tt_local_entry *local_entry = NULL;
 
 	tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
@@ -1008,8 +1137,8 @@
 						orig_node, message);
 
 		if (hlist_empty(&tt_global_entry->orig_list))
-			batadv_tt_global_del_struct(bat_priv, tt_global_entry,
-						    message);
+			batadv_tt_global_free(bat_priv, tt_global_entry,
+					      message);
 
 		goto out;
 	}
@@ -1032,7 +1161,7 @@
 	if (local_entry) {
 		/* local entry exists, case 2: client roamed to us. */
 		batadv_tt_global_del_orig_list(tt_global_entry);
-		batadv_tt_global_del_struct(bat_priv, tt_global_entry, message);
+		batadv_tt_global_free(bat_priv, tt_global_entry, message);
 	} else
 		/* no local entry exists, case 1: check for roaming */
 		batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
@@ -1203,15 +1332,12 @@
 	struct batadv_tt_local_entry *tt_local_entry = NULL;
 	struct batadv_tt_global_entry *tt_global_entry = NULL;
 	struct batadv_orig_node *orig_node = NULL;
-	struct batadv_neigh_node *router = NULL;
-	struct hlist_head *head;
-	struct hlist_node *node;
-	struct batadv_tt_orig_list_entry *orig_entry;
-	int best_tq;
+	struct batadv_tt_orig_list_entry *best_entry;
 
 	if (src && atomic_read(&bat_priv->ap_isolation)) {
 		tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
-		if (!tt_local_entry)
+		if (!tt_local_entry ||
+		    (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING))
 			goto out;
 	}
 
@@ -1226,25 +1352,15 @@
 	    _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
 		goto out;
 
-	best_tq = 0;
-
 	rcu_read_lock();
-	head = &tt_global_entry->orig_list;
-	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
-		router = batadv_orig_node_get_router(orig_entry->orig_node);
-		if (!router)
-			continue;
-
-		if (router->tq_avg > best_tq) {
-			orig_node = orig_entry->orig_node;
-			best_tq = router->tq_avg;
-		}
-		batadv_neigh_node_free_ref(router);
-	}
+	best_entry = batadv_transtable_best_orig(tt_global_entry);
 	/* found anything? */
+	if (best_entry)
+		orig_node = best_entry->orig_node;
 	if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
 		orig_node = NULL;
 	rcu_read_unlock();
+
 out:
 	if (tt_global_entry)
 		batadv_tt_global_entry_free_ref(tt_global_entry);
@@ -1477,11 +1593,11 @@
 	tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
 	len = tt_query_size + tt_len;
-	skb = dev_alloc_skb(len + ETH_HLEN);
+	skb = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN);
 	if (!skb)
 		goto out;
 
-	skb_reserve(skb, ETH_HLEN);
+	skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
 	tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
 	tt_response->ttvn = ttvn;
 
@@ -1526,7 +1642,6 @@
 {
 	struct sk_buff *skb = NULL;
 	struct batadv_tt_query_packet *tt_request;
-	struct batadv_neigh_node *neigh_node = NULL;
 	struct batadv_hard_iface *primary_if;
 	struct batadv_tt_req_node *tt_req_node = NULL;
 	int ret = 1;
@@ -1543,11 +1658,11 @@
 	if (!tt_req_node)
 		goto out;
 
-	skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN);
+	skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN + NET_IP_ALIGN);
 	if (!skb)
 		goto out;
 
-	skb_reserve(skb, ETH_HLEN);
+	skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
 
 	tt_req_len = sizeof(*tt_request);
 	tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
@@ -1564,23 +1679,15 @@
 	if (full_table)
 		tt_request->flags |= BATADV_TT_FULL_TABLE;
 
-	neigh_node = batadv_orig_node_get_router(dst_orig_node);
-	if (!neigh_node)
-		goto out;
-
-	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Sending TT_REQUEST to %pM via %pM [%c]\n",
-		   dst_orig_node->orig, neigh_node->addr,
-		   (full_table ? 'F' : '.'));
+	batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending TT_REQUEST to %pM [%c]\n",
+		   dst_orig_node->orig, (full_table ? 'F' : '.'));
 
 	batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
 
-	batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
-	ret = 0;
+	if (batadv_send_skb_to_orig(skb, dst_orig_node, NULL))
+		ret = 0;
 
 out:
-	if (neigh_node)
-		batadv_neigh_node_free_ref(neigh_node);
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
 	if (ret)
@@ -1598,9 +1705,8 @@
 batadv_send_other_tt_response(struct batadv_priv *bat_priv,
 			      struct batadv_tt_query_packet *tt_request)
 {
-	struct batadv_orig_node *req_dst_orig_node = NULL;
+	struct batadv_orig_node *req_dst_orig_node;
 	struct batadv_orig_node *res_dst_orig_node = NULL;
-	struct batadv_neigh_node *neigh_node = NULL;
 	struct batadv_hard_iface *primary_if = NULL;
 	uint8_t orig_ttvn, req_ttvn, ttvn;
 	int ret = false;
@@ -1626,10 +1732,6 @@
 	if (!res_dst_orig_node)
 		goto out;
 
-	neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
-	if (!neigh_node)
-		goto out;
-
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 	if (!primary_if)
 		goto out;
@@ -1658,11 +1760,11 @@
 		tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
 		len = sizeof(*tt_response) + tt_len;
-		skb = dev_alloc_skb(len + ETH_HLEN);
+		skb = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN);
 		if (!skb)
 			goto unlock;
 
-		skb_reserve(skb, ETH_HLEN);
+		skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
 		packet_pos = skb_put(skb, len);
 		tt_response = (struct batadv_tt_query_packet *)packet_pos;
 		tt_response->ttvn = req_ttvn;
@@ -1701,14 +1803,13 @@
 		tt_response->flags |= BATADV_TT_FULL_TABLE;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
-		   res_dst_orig_node->orig, neigh_node->addr,
-		   req_dst_orig_node->orig, req_ttvn);
+		   "Sending TT_RESPONSE %pM for %pM (ttvn: %u)\n",
+		   res_dst_orig_node->orig, req_dst_orig_node->orig, req_ttvn);
 
 	batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
 
-	batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
-	ret = true;
+	if (batadv_send_skb_to_orig(skb, res_dst_orig_node, NULL))
+		ret = true;
 	goto out;
 
 unlock:
@@ -1719,8 +1820,6 @@
 		batadv_orig_node_free_ref(res_dst_orig_node);
 	if (req_dst_orig_node)
 		batadv_orig_node_free_ref(req_dst_orig_node);
-	if (neigh_node)
-		batadv_neigh_node_free_ref(neigh_node);
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
 	if (!ret)
@@ -1733,8 +1832,7 @@
 batadv_send_my_tt_response(struct batadv_priv *bat_priv,
 			   struct batadv_tt_query_packet *tt_request)
 {
-	struct batadv_orig_node *orig_node = NULL;
-	struct batadv_neigh_node *neigh_node = NULL;
+	struct batadv_orig_node *orig_node;
 	struct batadv_hard_iface *primary_if = NULL;
 	uint8_t my_ttvn, req_ttvn, ttvn;
 	int ret = false;
@@ -1759,10 +1857,6 @@
 	if (!orig_node)
 		goto out;
 
-	neigh_node = batadv_orig_node_get_router(orig_node);
-	if (!neigh_node)
-		goto out;
-
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 	if (!primary_if)
 		goto out;
@@ -1785,11 +1879,11 @@
 		tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
 		len = sizeof(*tt_response) + tt_len;
-		skb = dev_alloc_skb(len + ETH_HLEN);
+		skb = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN);
 		if (!skb)
 			goto unlock;
 
-		skb_reserve(skb, ETH_HLEN);
+		skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
 		packet_pos = skb_put(skb, len);
 		tt_response = (struct batadv_tt_query_packet *)packet_pos;
 		tt_response->ttvn = req_ttvn;
@@ -1826,14 +1920,14 @@
 		tt_response->flags |= BATADV_TT_FULL_TABLE;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Sending TT_RESPONSE to %pM via %pM [%c]\n",
-		   orig_node->orig, neigh_node->addr,
+		   "Sending TT_RESPONSE to %pM [%c]\n",
+		   orig_node->orig,
 		   (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
 	batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
 
-	batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
-	ret = true;
+	if (batadv_send_skb_to_orig(skb, orig_node, NULL))
+		ret = true;
 	goto out;
 
 unlock:
@@ -1841,8 +1935,6 @@
 out:
 	if (orig_node)
 		batadv_orig_node_free_ref(orig_node);
-	if (neigh_node)
-		batadv_neigh_node_free_ref(neigh_node);
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
 	if (!ret)
@@ -1899,7 +1991,7 @@
 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
 				  struct batadv_tt_query_packet *tt_response)
 {
-	struct batadv_orig_node *orig_node = NULL;
+	struct batadv_orig_node *orig_node;
 
 	orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
 	if (!orig_node)
@@ -1941,7 +2033,7 @@
 
 bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
 {
-	struct batadv_tt_local_entry *tt_local_entry = NULL;
+	struct batadv_tt_local_entry *tt_local_entry;
 	bool ret = false;
 
 	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
@@ -1950,7 +2042,8 @@
 	/* Check if the client has been logically deleted (but is kept for
 	 * consistency purpose)
 	 */
-	if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
+	if ((tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING) ||
+	    (tt_local_entry->common.flags & BATADV_TT_CLIENT_ROAM))
 		goto out;
 	ret = true;
 out:
@@ -2001,10 +2094,6 @@
 
 	/* Recalculate the CRC for this orig_node and store it */
 	orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
-	/* Roaming phase is over: tables are in sync again. I can
-	 * unset the flag
-	 */
-	orig_node->tt_poss_change = false;
 out:
 	if (orig_node)
 		batadv_orig_node_free_ref(orig_node);
@@ -2110,7 +2199,6 @@
 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
 				 struct batadv_orig_node *orig_node)
 {
-	struct batadv_neigh_node *neigh_node = NULL;
 	struct sk_buff *skb = NULL;
 	struct batadv_roam_adv_packet *roam_adv_packet;
 	int ret = 1;
@@ -2123,11 +2211,11 @@
 	if (!batadv_tt_check_roam_count(bat_priv, client))
 		goto out;
 
-	skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN);
+	skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN + NET_IP_ALIGN);
 	if (!skb)
 		goto out;
 
-	skb_reserve(skb, ETH_HLEN);
+	skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
 
 	roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
 
@@ -2143,23 +2231,17 @@
 	memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
 	memcpy(roam_adv_packet->client, client, ETH_ALEN);
 
-	neigh_node = batadv_orig_node_get_router(orig_node);
-	if (!neigh_node)
-		goto out;
-
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
-		   orig_node->orig, client, neigh_node->addr);
+		   "Sending ROAMING_ADV to %pM (client %pM)\n",
+		   orig_node->orig, client);
 
 	batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
 
-	batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
-	ret = 0;
+	if (batadv_send_skb_to_orig(skb, orig_node, NULL))
+		ret = 0;
 
 out:
-	if (neigh_node)
-		batadv_neigh_node_free_ref(neigh_node);
-	if (ret)
+	if (ret && skb)
 		kfree_skb(skb);
 	return;
 }
@@ -2295,7 +2377,6 @@
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Local changes committed, updating to ttvn %u\n",
 		   (uint8_t)atomic_read(&bat_priv->tt.vn));
-	bat_priv->tt.poss_change = false;
 
 	/* reset the sending counter */
 	atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
@@ -2407,11 +2488,6 @@
 		 */
 		if (orig_node->tt_crc != tt_crc)
 			goto request_table;
-
-		/* Roaming phase is over: tables are in sync again. I can
-		 * unset the flag
-		 */
-		orig_node->tt_poss_change = false;
 	} else {
 		/* if we missed more than one change or our tables are not
 		 * in sync anymore -> request fresh tt data
@@ -2444,12 +2520,38 @@
 	if (!tt_global_entry)
 		goto out;
 
-	ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
+	ret = !!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM);
 	batadv_tt_global_entry_free_ref(tt_global_entry);
 out:
 	return ret;
 }
 
+/**
+ * batadv_tt_local_client_is_roaming - tells whether the client is roaming
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the MAC address of the local client to query
+ *
+ * Returns true if the local client is known to be roaming (it is not served by
+ * this node anymore) or not. If yes, the client is still present in the table
+ * to keep the latter consistent with the node TTVN
+ */
+bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
+				       uint8_t *addr)
+{
+	struct batadv_tt_local_entry *tt_local_entry;
+	bool ret = false;
+
+	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+	if (!tt_local_entry)
+		goto out;
+
+	ret = tt_local_entry->common.flags & BATADV_TT_CLIENT_ROAM;
+	batadv_tt_local_entry_free_ref(tt_local_entry);
+out:
+	return ret;
+
+}
+
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig_node,
 					  const unsigned char *addr)
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 811fffd..46d4451 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -24,9 +24,9 @@
 int batadv_tt_init(struct batadv_priv *bat_priv);
 void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 			 int ifindex);
-void batadv_tt_local_remove(struct batadv_priv *bat_priv,
-			    const uint8_t *addr, const char *message,
-			    bool roaming);
+uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
+				const uint8_t *addr, const char *message,
+				bool roaming);
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
 void batadv_tt_global_add_orig(struct batadv_priv *bat_priv,
 			       struct batadv_orig_node *orig_node,
@@ -59,6 +59,8 @@
 			  int packet_min_len);
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
 					uint8_t *addr);
+bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
+				       uint8_t *addr);
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig_node,
 					  const unsigned char *addr);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index ac1e07a..ae9ac9a 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -28,20 +28,41 @@
 	(ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \
 			sizeof(struct batadv_bcast_packet)))
 
+#ifdef CONFIG_BATMAN_ADV_DAT
+
+/* batadv_dat_addr_t is the type used for all DHT addresses. If it is changed,
+ * BATADV_DAT_ADDR_MAX is changed as well.
+ *
+ * *Please be careful: batadv_dat_addr_t must be UNSIGNED*
+ */
+#define batadv_dat_addr_t uint16_t
+
+#endif /* CONFIG_BATMAN_ADV_DAT */
+
+/**
+ * struct batadv_hard_iface_bat_iv - per hard interface B.A.T.M.A.N. IV data
+ * @ogm_buff: buffer holding the OGM packet
+ * @ogm_buff_len: length of the OGM packet buffer
+ * @ogm_seqno: OGM sequence number - used to identify each OGM
+ */
+struct batadv_hard_iface_bat_iv {
+	unsigned char *ogm_buff;
+	int ogm_buff_len;
+	atomic_t ogm_seqno;
+};
+
 struct batadv_hard_iface {
 	struct list_head list;
 	int16_t if_num;
 	char if_status;
 	struct net_device *net_dev;
-	atomic_t seqno;
 	atomic_t frag_seqno;
-	unsigned char *packet_buff;
-	int packet_len;
 	struct kobject *hardif_obj;
 	atomic_t refcount;
 	struct packet_type batman_adv_ptype;
 	struct net_device *soft_iface;
 	struct rcu_head rcu;
+	struct batadv_hard_iface_bat_iv bat_iv;
 };
 
 /**
@@ -63,6 +84,9 @@
 	uint8_t orig[ETH_ALEN];
 	uint8_t primary_addr[ETH_ALEN];
 	struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
+#ifdef CONFIG_BATMAN_ADV_DAT
+	batadv_dat_addr_t dat_addr;
+#endif
 	unsigned long *bcast_own;
 	uint8_t *bcast_own_sum;
 	unsigned long last_seen;
@@ -77,13 +101,6 @@
 	spinlock_t tt_buff_lock; /* protects tt_buff */
 	atomic_t tt_size;
 	bool tt_initialised;
-	/* The tt_poss_change flag is used to detect an ongoing roaming phase.
-	 * If true, then I sent a Roaming_adv to this orig_node and I have to
-	 * inspect every packet directed to it to check whether it is still
-	 * the true destination or not. This flag will be reset to false as
-	 * soon as I receive a new TTVN from this orig_node
-	 */
-	bool tt_poss_change;
 	uint32_t last_real_seqno;
 	uint8_t last_ttl;
 	DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
@@ -139,7 +156,7 @@
 #ifdef CONFIG_BATMAN_ADV_BLA
 struct batadv_bcast_duplist_entry {
 	uint8_t orig[ETH_ALEN];
-	uint16_t crc;
+	__be32 crc;
 	unsigned long entrytime;
 };
 #endif
@@ -162,6 +179,13 @@
 	BATADV_CNT_TT_RESPONSE_RX,
 	BATADV_CNT_TT_ROAM_ADV_TX,
 	BATADV_CNT_TT_ROAM_ADV_RX,
+#ifdef CONFIG_BATMAN_ADV_DAT
+	BATADV_CNT_DAT_GET_TX,
+	BATADV_CNT_DAT_GET_RX,
+	BATADV_CNT_DAT_PUT_TX,
+	BATADV_CNT_DAT_PUT_RX,
+	BATADV_CNT_DAT_CACHED_REPLY_TX,
+#endif
 	BATADV_CNT_NUM,
 };
 
@@ -181,7 +205,6 @@
 	atomic_t vn;
 	atomic_t ogm_append_cnt;
 	atomic_t local_changes;
-	bool poss_change;
 	struct list_head changes_list;
 	struct batadv_hashtable *local_hash;
 	struct batadv_hashtable *global_hash;
@@ -228,6 +251,20 @@
 	struct batadv_vis_info *my_info;
 };
 
+/**
+ * struct batadv_priv_dat - per mesh interface DAT private data
+ * @addr: node DAT address
+ * @hash: hashtable representing the local ARP cache
+ * @work: work queue callback item for cache purging
+ */
+#ifdef CONFIG_BATMAN_ADV_DAT
+struct batadv_priv_dat {
+	batadv_dat_addr_t addr;
+	struct batadv_hashtable *hash;
+	struct delayed_work work;
+};
+#endif
+
 struct batadv_priv {
 	atomic_t mesh_state;
 	struct net_device_stats stats;
@@ -237,6 +274,9 @@
 	atomic_t fragmentation;		/* boolean */
 	atomic_t ap_isolation;		/* boolean */
 	atomic_t bridge_loop_avoidance;	/* boolean */
+#ifdef CONFIG_BATMAN_ADV_DAT
+	atomic_t distributed_arp_table;	/* boolean */
+#endif
 	atomic_t vis_mode;		/* VIS_TYPE_* */
 	atomic_t gw_mode;		/* GW_MODE_* */
 	atomic_t gw_sel_class;		/* uint */
@@ -255,7 +295,7 @@
 	struct hlist_head forw_bcast_list;
 	struct batadv_hashtable *orig_hash;
 	spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
-	spinlock_t forw_bcast_list_lock; /* protects  */
+	spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */
 	struct delayed_work orig_work;
 	struct batadv_hard_iface __rcu *primary_if;  /* rcu protected pointer */
 	struct batadv_algo_ops *bat_algo_ops;
@@ -265,6 +305,9 @@
 	struct batadv_priv_gw gw;
 	struct batadv_priv_tt tt;
 	struct batadv_priv_vis vis;
+#ifdef CONFIG_BATMAN_ADV_DAT
+	struct batadv_priv_dat dat;
+#endif
 };
 
 struct batadv_socket_client {
@@ -318,6 +361,7 @@
 	struct hlist_node hash_entry;
 	struct batadv_priv *bat_priv;
 	unsigned long lasttime;	/* last time we heard of this backbone gw */
+	atomic_t wait_periods;
 	atomic_t request_sent;
 	atomic_t refcount;
 	struct rcu_head rcu;
@@ -437,4 +481,36 @@
 	void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
 };
 
+/**
+ * struct batadv_dat_entry - it is a single entry of batman-adv ARP backend. It
+ * is used to stored ARP entries needed for the global DAT cache
+ * @ip: the IPv4 corresponding to this DAT/ARP entry
+ * @mac_addr: the MAC address associated to the stored IPv4
+ * @last_update: time in jiffies when this entry was refreshed last time
+ * @hash_entry: hlist node for batadv_priv_dat::hash
+ * @refcount: number of contexts the object is used
+ * @rcu: struct used for freeing in an RCU-safe manner
+ */
+struct batadv_dat_entry {
+	__be32 ip;
+	uint8_t mac_addr[ETH_ALEN];
+	unsigned long last_update;
+	struct hlist_node hash_entry;
+	atomic_t refcount;
+	struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_dat_candidate - candidate destination for DAT operations
+ * @type: the type of the selected candidate. It can one of the following:
+ *	  - BATADV_DAT_CANDIDATE_NOT_FOUND
+ *	  - BATADV_DAT_CANDIDATE_ORIG
+ * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to the
+ *	       corresponding originator node structure
+ */
+struct batadv_dat_candidate {
+	int type;
+	struct batadv_orig_node *orig_node;
+};
+
 #endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index f397232..10aff49 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -291,14 +291,118 @@
 	return ret;
 }
 
-int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv)
+/**
+ * batadv_unicast_push_and_fill_skb - extends the buffer and initializes the
+ * common fields for unicast packets
+ * @skb: packet
+ * @hdr_size: amount of bytes to push at the beginning of the skb
+ * @orig_node: the destination node
+ *
+ * Returns false if the buffer extension was not possible or true otherwise
+ */
+static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
+					     struct batadv_orig_node *orig_node)
+{
+	struct batadv_unicast_packet *unicast_packet;
+	uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+
+	if (batadv_skb_head_push(skb, hdr_size) < 0)
+		return false;
+
+	unicast_packet = (struct batadv_unicast_packet *)skb->data;
+	unicast_packet->header.version = BATADV_COMPAT_VERSION;
+	/* batman packet type: unicast */
+	unicast_packet->header.packet_type = BATADV_UNICAST;
+	/* set unicast ttl */
+	unicast_packet->header.ttl = BATADV_TTL;
+	/* copy the destination for faster routing */
+	memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
+	/* set the destination tt version number */
+	unicast_packet->ttvn = ttvn;
+
+	return true;
+}
+
+/**
+ * batadv_unicast_prepare_skb - encapsulate an skb with a unicast header
+ * @skb: the skb containing the payload to encapsulate
+ * @orig_node: the destination node
+ *
+ * Returns false if the payload could not be encapsulated or true otherwise
+ */
+static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
+				       struct batadv_orig_node *orig_node)
+{
+	size_t uni_size = sizeof(struct batadv_unicast_packet);
+	return batadv_unicast_push_and_fill_skb(skb, uni_size, orig_node);
+}
+
+/**
+ * batadv_unicast_4addr_prepare_skb - encapsulate an skb with a unicast4addr
+ * header
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the skb containing the payload to encapsulate
+ * @orig_node: the destination node
+ * @packet_subtype: the batman 4addr packet subtype to use
+ *
+ * Returns false if the payload could not be encapsulated or true otherwise
+ */
+bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
+				      struct sk_buff *skb,
+				      struct batadv_orig_node *orig,
+				      int packet_subtype)
+{
+	struct batadv_hard_iface *primary_if;
+	struct batadv_unicast_4addr_packet *unicast_4addr_packet;
+	bool ret = false;
+
+	primary_if = batadv_primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	/* pull the header space and fill the unicast_packet substructure.
+	 * We can do that because the first member of the unicast_4addr_packet
+	 * is of type struct unicast_packet
+	 */
+	if (!batadv_unicast_push_and_fill_skb(skb,
+					      sizeof(*unicast_4addr_packet),
+					      orig))
+		goto out;
+
+	unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
+	unicast_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
+	memcpy(unicast_4addr_packet->src, primary_if->net_dev->dev_addr,
+	       ETH_ALEN);
+	unicast_4addr_packet->subtype = packet_subtype;
+	unicast_4addr_packet->reserved = 0;
+
+	ret = true;
+out:
+	if (primary_if)
+		batadv_hardif_free_ref(primary_if);
+	return ret;
+}
+
+/**
+ * batadv_unicast_generic_send_skb - send an skb as unicast
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @packet_type: the batman unicast packet type to use
+ * @packet_subtype: the batman packet subtype. It is ignored if packet_type is
+ *		    not BATADV_UNICAT_4ADDR
+ *
+ * Returns 1 in case of error or 0 otherwise
+ */
+int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
+				    struct sk_buff *skb, int packet_type,
+				    int packet_subtype)
 {
 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 	struct batadv_unicast_packet *unicast_packet;
 	struct batadv_orig_node *orig_node;
 	struct batadv_neigh_node *neigh_node;
 	int data_len = skb->len;
-	int ret = 1;
+	int ret = NET_RX_DROP;
 	unsigned int dev_mtu;
 
 	/* get routing information */
@@ -324,21 +428,23 @@
 	if (!neigh_node)
 		goto out;
 
-	if (batadv_skb_head_push(skb, sizeof(*unicast_packet)) < 0)
+	switch (packet_type) {
+	case BATADV_UNICAST:
+		batadv_unicast_prepare_skb(skb, orig_node);
+		break;
+	case BATADV_UNICAST_4ADDR:
+		batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
+						 packet_subtype);
+		break;
+	default:
+		/* this function supports UNICAST and UNICAST_4ADDR only. It
+		 * should never be invoked with any other packet type
+		 */
 		goto out;
+	}
 
 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
-	unicast_packet->header.version = BATADV_COMPAT_VERSION;
-	/* batman packet type: unicast */
-	unicast_packet->header.packet_type = BATADV_UNICAST;
-	/* set unicast ttl */
-	unicast_packet->header.ttl = BATADV_TTL;
-	/* copy the destination for faster routing */
-	memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
-	/* set the destination tt version number */
-	unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
-
 	/* inform the destination node that we are still missing a correct route
 	 * for this client. The destination will receive this packet and will
 	 * try to reroute it because the ttvn contained in the header is less
@@ -348,7 +454,9 @@
 		unicast_packet->ttvn = unicast_packet->ttvn - 1;
 
 	dev_mtu = neigh_node->if_incoming->net_dev->mtu;
-	if (atomic_read(&bat_priv->fragmentation) &&
+	/* fragmentation mechanism only works for UNICAST (now) */
+	if (packet_type == BATADV_UNICAST &&
+	    atomic_read(&bat_priv->fragmentation) &&
 	    data_len + sizeof(*unicast_packet) > dev_mtu) {
 		/* send frag skb decreases ttl */
 		unicast_packet->header.ttl++;
@@ -358,16 +466,15 @@
 		goto out;
 	}
 
-	batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
-	ret = 0;
-	goto out;
+	if (batadv_send_skb_to_orig(skb, orig_node, NULL))
+		ret = 0;
 
 out:
 	if (neigh_node)
 		batadv_neigh_node_free_ref(neigh_node);
 	if (orig_node)
 		batadv_orig_node_free_ref(orig_node);
-	if (ret == 1)
+	if (ret == NET_RX_DROP)
 		kfree_skb(skb);
 	return ret;
 }
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 1c46e2e..61abba5 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -29,10 +29,44 @@
 			       struct batadv_priv *bat_priv,
 			       struct sk_buff **new_skb);
 void batadv_frag_list_free(struct list_head *head);
-int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv);
 int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
 			 struct batadv_hard_iface *hard_iface,
 			 const uint8_t dstaddr[]);
+bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
+				      struct sk_buff *skb,
+				      struct batadv_orig_node *orig_node,
+				      int packet_subtype);
+int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
+				    struct sk_buff *skb, int packet_type,
+				    int packet_subtype);
+
+
+/**
+ * batadv_unicast_send_skb - send the skb encapsulated in a unicast packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the payload to send
+ */
+static inline int batadv_unicast_send_skb(struct batadv_priv *bat_priv,
+					  struct sk_buff *skb)
+{
+	return batadv_unicast_generic_send_skb(bat_priv, skb, BATADV_UNICAST,
+					       0);
+}
+
+/**
+ * batadv_unicast_send_skb - send the skb encapsulated in a unicast4addr packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the payload to send
+ * @packet_subtype: the batman 4addr packet subtype to use
+ */
+static inline int batadv_unicast_4addr_send_skb(struct batadv_priv *bat_priv,
+						struct sk_buff *skb,
+						int packet_subtype)
+{
+	return batadv_unicast_generic_send_skb(bat_priv, skb,
+					       BATADV_UNICAST_4ADDR,
+					       packet_subtype);
+}
 
 static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu)
 {
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 5abd145..0f65a9d 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -396,12 +396,12 @@
 		return NULL;
 
 	len = sizeof(*packet) + vis_info_len;
-	info->skb_packet = dev_alloc_skb(len + ETH_HLEN);
+	info->skb_packet = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN);
 	if (!info->skb_packet) {
 		kfree(info);
 		return NULL;
 	}
-	skb_reserve(info->skb_packet, ETH_HLEN);
+	skb_reserve(info->skb_packet, ETH_HLEN + NET_IP_ALIGN);
 	packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
 
 	kref_init(&info->refcount);
@@ -698,15 +698,12 @@
 static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
 					struct batadv_vis_info *info)
 {
-	struct batadv_neigh_node *router;
 	struct batadv_hashtable *hash = bat_priv->orig_hash;
 	struct hlist_node *node;
 	struct hlist_head *head;
 	struct batadv_orig_node *orig_node;
 	struct batadv_vis_packet *packet;
 	struct sk_buff *skb;
-	struct batadv_hard_iface *hard_iface;
-	uint8_t dstaddr[ETH_ALEN];
 	uint32_t i;
 
 
@@ -722,30 +719,20 @@
 			if (!(orig_node->flags & BATADV_VIS_SERVER))
 				continue;
 
-			router = batadv_orig_node_get_router(orig_node);
-			if (!router)
-				continue;
-
 			/* don't send it if we already received the packet from
 			 * this node.
 			 */
 			if (batadv_recv_list_is_in(bat_priv, &info->recv_list,
-						   orig_node->orig)) {
-				batadv_neigh_node_free_ref(router);
+						   orig_node->orig))
 				continue;
-			}
 
 			memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
-			hard_iface = router->if_incoming;
-			memcpy(dstaddr, router->addr, ETH_ALEN);
-
-			batadv_neigh_node_free_ref(router);
-
 			skb = skb_clone(info->skb_packet, GFP_ATOMIC);
-			if (skb)
-				batadv_send_skb_packet(skb, hard_iface,
-						       dstaddr);
+			if (!skb)
+				continue;
 
+			if (!batadv_send_skb_to_orig(skb, orig_node, NULL))
+				kfree_skb(skb);
 		}
 		rcu_read_unlock();
 	}
@@ -755,7 +742,6 @@
 				      struct batadv_vis_info *info)
 {
 	struct batadv_orig_node *orig_node;
-	struct batadv_neigh_node *router = NULL;
 	struct sk_buff *skb;
 	struct batadv_vis_packet *packet;
 
@@ -765,17 +751,14 @@
 	if (!orig_node)
 		goto out;
 
-	router = batadv_orig_node_get_router(orig_node);
-	if (!router)
+	skb = skb_clone(info->skb_packet, GFP_ATOMIC);
+	if (!skb)
 		goto out;
 
-	skb = skb_clone(info->skb_packet, GFP_ATOMIC);
-	if (skb)
-		batadv_send_skb_packet(skb, router->if_incoming, router->addr);
+	if (!batadv_send_skb_to_orig(skb, orig_node, NULL))
+		kfree_skb(skb);
 
 out:
-	if (router)
-		batadv_neigh_node_free_ref(router);
 	if (orig_node)
 		batadv_orig_node_free_ref(orig_node);
 }
@@ -873,12 +856,13 @@
 	if (!bat_priv->vis.my_info)
 		goto err;
 
-	len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
+	len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE;
+	len += ETH_HLEN + NET_IP_ALIGN;
 	bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len);
 	if (!bat_priv->vis.my_info->skb_packet)
 		goto free_info;
 
-	skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
+	skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN + NET_IP_ALIGN);
 	tmp_skb = bat_priv->vis.my_info->skb_packet;
 	packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
 
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 3537d38..d3f3f7b 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -11,6 +11,7 @@
 	select CRYPTO_BLKCIPHER
 	select CRYPTO_AES
 	select CRYPTO_ECB
+	select CRYPTO_SHA256
 	help
 	  Bluetooth is low-cost, low-power, short-range wireless technology.
 	  It was designed as a replacement for cables and other short-range
@@ -47,4 +48,3 @@
 source "net/bluetooth/hidp/Kconfig"
 
 source "drivers/bluetooth/Kconfig"
-
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index fa6d94a..dea6a28 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -10,4 +10,4 @@
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
 	hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
-	a2mp.o
+	a2mp.o amp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 0760d1f..2f67d5e 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -16,6 +16,11 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/a2mp.h>
+#include <net/bluetooth/amp.h>
+
+/* Global AMP Manager list */
+LIST_HEAD(amp_mgr_list);
+DEFINE_MUTEX(amp_mgr_list_lock);
 
 /* A2MP build & send command helper functions */
 static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
@@ -37,8 +42,7 @@
 	return cmd;
 }
 
-static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len,
-		      void *data)
+void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
 {
 	struct l2cap_chan *chan = mgr->a2mp_chan;
 	struct a2mp_cmd *cmd;
@@ -63,6 +67,14 @@
 	kfree(cmd);
 }
 
+u8 __next_ident(struct amp_mgr *mgr)
+{
+	if (++mgr->ident == 0)
+		mgr->ident = 1;
+
+	return mgr->ident;
+}
+
 static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
 {
 	cl->id = 0;
@@ -161,6 +173,83 @@
 	return 0;
 }
 
+static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+			     struct a2mp_cmd *hdr)
+{
+	struct a2mp_discov_rsp *rsp = (void *) skb->data;
+	u16 len = le16_to_cpu(hdr->len);
+	struct a2mp_cl *cl;
+	u16 ext_feat;
+	bool found = false;
+
+	if (len < sizeof(*rsp))
+		return -EINVAL;
+
+	len -= sizeof(*rsp);
+	skb_pull(skb, sizeof(*rsp));
+
+	ext_feat = le16_to_cpu(rsp->ext_feat);
+
+	BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat);
+
+	/* check that packet is not broken for now */
+	while (ext_feat & A2MP_FEAT_EXT) {
+		if (len < sizeof(ext_feat))
+			return -EINVAL;
+
+		ext_feat = get_unaligned_le16(skb->data);
+		BT_DBG("efm 0x%4.4x", ext_feat);
+		len -= sizeof(ext_feat);
+		skb_pull(skb, sizeof(ext_feat));
+	}
+
+	cl = (void *) skb->data;
+	while (len >= sizeof(*cl)) {
+		BT_DBG("Remote AMP id %d type %d status %d", cl->id, cl->type,
+		       cl->status);
+
+		if (cl->id != HCI_BREDR_ID && cl->type == HCI_AMP) {
+			struct a2mp_info_req req;
+
+			found = true;
+			req.id = cl->id;
+			a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr),
+				  sizeof(req), &req);
+		}
+
+		len -= sizeof(*cl);
+		cl = (void *) skb_pull(skb, sizeof(*cl));
+	}
+
+	/* Fall back to L2CAP init sequence */
+	if (!found) {
+		struct l2cap_conn *conn = mgr->l2cap_conn;
+		struct l2cap_chan *chan;
+
+		mutex_lock(&conn->chan_lock);
+
+		list_for_each_entry(chan, &conn->chan_l, list) {
+
+			BT_DBG("chan %p state %s", chan,
+			       state_to_string(chan->state));
+
+			if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP)
+				continue;
+
+			l2cap_chan_lock(chan);
+
+			if (chan->state == BT_CONNECT)
+				l2cap_send_conn_req(chan);
+
+			l2cap_chan_unlock(chan);
+		}
+
+		mutex_unlock(&conn->chan_lock);
+	}
+
+	return 0;
+}
+
 static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
 			      struct a2mp_cmd *hdr)
 {
@@ -181,7 +270,6 @@
 			    struct a2mp_cmd *hdr)
 {
 	struct a2mp_info_req *req  = (void *) skb->data;
-	struct a2mp_info_rsp rsp;
 	struct hci_dev *hdev;
 
 	if (le16_to_cpu(hdr->len) < sizeof(*req))
@@ -189,53 +277,93 @@
 
 	BT_DBG("id %d", req->id);
 
-	rsp.id = req->id;
-	rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
-
 	hdev = hci_dev_get(req->id);
-	if (hdev && hdev->amp_type != HCI_BREDR) {
-		rsp.status = 0;
-		rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
-		rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
-		rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
-		rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
-		rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
+	if (!hdev || hdev->dev_type != HCI_AMP) {
+		struct a2mp_info_rsp rsp;
+
+		rsp.id = req->id;
+		rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+
+		a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp),
+			  &rsp);
+
+		goto done;
 	}
 
+	mgr->state = READ_LOC_AMP_INFO;
+	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+
+done:
 	if (hdev)
 		hci_dev_put(hdev);
 
-	a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp);
-
 	skb_pull(skb, sizeof(*req));
 	return 0;
 }
 
+static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+			    struct a2mp_cmd *hdr)
+{
+	struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data;
+	struct a2mp_amp_assoc_req req;
+	struct amp_ctrl *ctrl;
+
+	if (le16_to_cpu(hdr->len) < sizeof(*rsp))
+		return -EINVAL;
+
+	BT_DBG("id %d status 0x%2.2x", rsp->id, rsp->status);
+
+	if (rsp->status)
+		return -EINVAL;
+
+	ctrl = amp_ctrl_add(mgr, rsp->id);
+	if (!ctrl)
+		return -ENOMEM;
+
+	req.id = rsp->id;
+	a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
+		  &req);
+
+	skb_pull(skb, sizeof(*rsp));
+	return 0;
+}
+
 static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
 				struct a2mp_cmd *hdr)
 {
 	struct a2mp_amp_assoc_req *req = (void *) skb->data;
 	struct hci_dev *hdev;
+	struct amp_mgr *tmp;
 
 	if (le16_to_cpu(hdr->len) < sizeof(*req))
 		return -EINVAL;
 
 	BT_DBG("id %d", req->id);
 
+	/* Make sure that other request is not processed */
+	tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
+
 	hdev = hci_dev_get(req->id);
-	if (!hdev || hdev->amp_type == HCI_BREDR) {
+	if (!hdev || hdev->amp_type == HCI_BREDR || tmp) {
 		struct a2mp_amp_assoc_rsp rsp;
 		rsp.id = req->id;
-		rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+
+		if (tmp) {
+			rsp.status = A2MP_STATUS_COLLISION_OCCURED;
+			amp_mgr_put(tmp);
+		} else {
+			rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+		}
 
 		a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
 			  &rsp);
-		goto clean;
+
+		goto done;
 	}
 
-	/* Placeholder for HCI Read AMP Assoc */
+	amp_read_loc_assoc(hdev, mgr);
 
-clean:
+done:
 	if (hdev)
 		hci_dev_put(hdev);
 
@@ -243,6 +371,68 @@
 	return 0;
 }
 
+static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+				struct a2mp_cmd *hdr)
+{
+	struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data;
+	u16 len = le16_to_cpu(hdr->len);
+	struct hci_dev *hdev;
+	struct amp_ctrl *ctrl;
+	struct hci_conn *hcon;
+	size_t assoc_len;
+
+	if (len < sizeof(*rsp))
+		return -EINVAL;
+
+	assoc_len = len - sizeof(*rsp);
+
+	BT_DBG("id %d status 0x%2.2x assoc len %zu", rsp->id, rsp->status,
+	       assoc_len);
+
+	if (rsp->status)
+		return -EINVAL;
+
+	/* Save remote ASSOC data */
+	ctrl = amp_ctrl_lookup(mgr, rsp->id);
+	if (ctrl) {
+		u8 *assoc;
+
+		assoc = kzalloc(assoc_len, GFP_KERNEL);
+		if (!assoc) {
+			amp_ctrl_put(ctrl);
+			return -ENOMEM;
+		}
+
+		memcpy(assoc, rsp->amp_assoc, assoc_len);
+		ctrl->assoc = assoc;
+		ctrl->assoc_len = assoc_len;
+		ctrl->assoc_rem_len = assoc_len;
+		ctrl->assoc_len_so_far = 0;
+
+		amp_ctrl_put(ctrl);
+	}
+
+	/* Create Phys Link */
+	hdev = hci_dev_get(rsp->id);
+	if (!hdev)
+		return -EINVAL;
+
+	hcon = phylink_add(hdev, mgr, rsp->id, true);
+	if (!hcon)
+		goto done;
+
+	BT_DBG("Created hcon %p: loc:%d -> rem:%d", hcon, hdev->id, rsp->id);
+
+	mgr->bredr_chan->remote_amp_id = rsp->id;
+
+	amp_create_phylink(hdev, mgr, hcon);
+
+done:
+	hci_dev_put(hdev);
+	skb_pull(skb, len);
+	return 0;
+}
+
 static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
 				   struct a2mp_cmd *hdr)
 {
@@ -250,6 +440,8 @@
 
 	struct a2mp_physlink_rsp rsp;
 	struct hci_dev *hdev;
+	struct hci_conn *hcon;
+	struct amp_ctrl *ctrl;
 
 	if (le16_to_cpu(hdr->len) < sizeof(*req))
 		return -EINVAL;
@@ -265,9 +457,43 @@
 		goto send_rsp;
 	}
 
-	/* TODO process physlink create */
+	ctrl = amp_ctrl_lookup(mgr, rsp.remote_id);
+	if (!ctrl) {
+		ctrl = amp_ctrl_add(mgr, rsp.remote_id);
+		if (ctrl) {
+			amp_ctrl_get(ctrl);
+		} else {
+			rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+			goto send_rsp;
+		}
+	}
 
-	rsp.status = A2MP_STATUS_SUCCESS;
+	if (ctrl) {
+		size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req);
+		u8 *assoc;
+
+		assoc = kzalloc(assoc_len, GFP_KERNEL);
+		if (!assoc) {
+			amp_ctrl_put(ctrl);
+			return -ENOMEM;
+		}
+
+		memcpy(assoc, req->amp_assoc, assoc_len);
+		ctrl->assoc = assoc;
+		ctrl->assoc_len = assoc_len;
+		ctrl->assoc_rem_len = assoc_len;
+		ctrl->assoc_len_so_far = 0;
+
+		amp_ctrl_put(ctrl);
+	}
+
+	hcon = phylink_add(hdev, mgr, req->local_id, false);
+	if (hcon) {
+		amp_accept_phylink(hdev, mgr, hcon);
+		rsp.status = A2MP_STATUS_SUCCESS;
+	} else {
+		rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+	}
 
 send_rsp:
 	if (hdev)
@@ -286,6 +512,7 @@
 	struct a2mp_physlink_req *req = (void *) skb->data;
 	struct a2mp_physlink_rsp rsp;
 	struct hci_dev *hdev;
+	struct hci_conn *hcon;
 
 	if (le16_to_cpu(hdr->len) < sizeof(*req))
 		return -EINVAL;
@@ -296,14 +523,22 @@
 	rsp.remote_id = req->local_id;
 	rsp.status = A2MP_STATUS_SUCCESS;
 
-	hdev = hci_dev_get(req->local_id);
+	hdev = hci_dev_get(req->remote_id);
 	if (!hdev) {
 		rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
 		goto send_rsp;
 	}
 
+	hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, mgr->l2cap_conn->dst);
+	if (!hcon) {
+		BT_ERR("No phys link exist");
+		rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS;
+		goto clean;
+	}
+
 	/* TODO Disconnect Phys Link here */
 
+clean:
 	hci_dev_put(hdev);
 
 send_rsp:
@@ -377,10 +612,19 @@
 			err = a2mp_discphyslink_req(mgr, skb, hdr);
 			break;
 
-		case A2MP_CHANGE_RSP:
 		case A2MP_DISCOVER_RSP:
+			err = a2mp_discover_rsp(mgr, skb, hdr);
+			break;
+
 		case A2MP_GETINFO_RSP:
+			err = a2mp_getinfo_rsp(mgr, skb, hdr);
+			break;
+
 		case A2MP_GETAMPASSOC_RSP:
+			err = a2mp_getampassoc_rsp(mgr, skb, hdr);
+			break;
+
+		case A2MP_CHANGE_RSP:
 		case A2MP_CREATEPHYSLINK_RSP:
 		case A2MP_DISCONNPHYSLINK_RSP:
 			err = a2mp_cmd_rsp(mgr, skb, hdr);
@@ -455,9 +699,10 @@
 	.new_connection = l2cap_chan_no_new_connection,
 	.teardown = l2cap_chan_no_teardown,
 	.ready = l2cap_chan_no_ready,
+	.defer = l2cap_chan_no_defer,
 };
 
-static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn)
+static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
 {
 	struct l2cap_chan *chan;
 	int err;
@@ -492,7 +737,10 @@
 
 	chan->conf_state = 0;
 
-	l2cap_chan_add(conn, chan);
+	if (locked)
+		__l2cap_chan_add(conn, chan);
+	else
+		l2cap_chan_add(conn, chan);
 
 	chan->remote_mps = chan->omtu;
 	chan->mps = chan->omtu;
@@ -503,11 +751,13 @@
 }
 
 /* AMP Manager functions */
-void amp_mgr_get(struct amp_mgr *mgr)
+struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr)
 {
 	BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
 
 	kref_get(&mgr->kref);
+
+	return mgr;
 }
 
 static void amp_mgr_destroy(struct kref *kref)
@@ -516,6 +766,11 @@
 
 	BT_DBG("mgr %p", mgr);
 
+	mutex_lock(&amp_mgr_list_lock);
+	list_del(&mgr->list);
+	mutex_unlock(&amp_mgr_list_lock);
+
+	amp_ctrl_list_flush(mgr);
 	kfree(mgr);
 }
 
@@ -526,7 +781,7 @@
 	return kref_put(&mgr->kref, &amp_mgr_destroy);
 }
 
-static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn)
+static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked)
 {
 	struct amp_mgr *mgr;
 	struct l2cap_chan *chan;
@@ -539,7 +794,7 @@
 
 	mgr->l2cap_conn = conn;
 
-	chan = a2mp_chan_open(conn);
+	chan = a2mp_chan_open(conn, locked);
 	if (!chan) {
 		kfree(mgr);
 		return NULL;
@@ -552,6 +807,14 @@
 
 	kref_init(&mgr->kref);
 
+	/* Remote AMP ctrl list initialization */
+	INIT_LIST_HEAD(&mgr->amp_ctrls);
+	mutex_init(&mgr->amp_ctrls_lock);
+
+	mutex_lock(&amp_mgr_list_lock);
+	list_add(&mgr->list, &amp_mgr_list);
+	mutex_unlock(&amp_mgr_list_lock);
+
 	return mgr;
 }
 
@@ -560,7 +823,7 @@
 {
 	struct amp_mgr *mgr;
 
-	mgr = amp_mgr_create(conn);
+	mgr = amp_mgr_create(conn, false);
 	if (!mgr) {
 		BT_ERR("Could not create AMP manager");
 		return NULL;
@@ -570,3 +833,139 @@
 
 	return mgr->a2mp_chan;
 }
+
+struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
+{
+	struct amp_mgr *mgr;
+
+	mutex_lock(&amp_mgr_list_lock);
+	list_for_each_entry(mgr, &amp_mgr_list, list) {
+		if (mgr->state == state) {
+			amp_mgr_get(mgr);
+			mutex_unlock(&amp_mgr_list_lock);
+			return mgr;
+		}
+	}
+	mutex_unlock(&amp_mgr_list_lock);
+
+	return NULL;
+}
+
+void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
+{
+	struct amp_mgr *mgr;
+	struct a2mp_info_rsp rsp;
+
+	mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO);
+	if (!mgr)
+		return;
+
+	BT_DBG("%s mgr %p", hdev->name, mgr);
+
+	rsp.id = hdev->id;
+	rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+
+	if (hdev->amp_type != HCI_BREDR) {
+		rsp.status = 0;
+		rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
+		rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
+		rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
+		rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
+		rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
+	}
+
+	a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp);
+	amp_mgr_put(mgr);
+}
+
+void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status)
+{
+	struct amp_mgr *mgr;
+	struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+	struct a2mp_amp_assoc_rsp *rsp;
+	size_t len;
+
+	mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
+	if (!mgr)
+		return;
+
+	BT_DBG("%s mgr %p", hdev->name, mgr);
+
+	len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len;
+	rsp = kzalloc(len, GFP_KERNEL);
+	if (!rsp) {
+		amp_mgr_put(mgr);
+		return;
+	}
+
+	rsp->id = hdev->id;
+
+	if (status) {
+		rsp->status = A2MP_STATUS_INVALID_CTRL_ID;
+	} else {
+		rsp->status = A2MP_STATUS_SUCCESS;
+		memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len);
+	}
+
+	a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp);
+	amp_mgr_put(mgr);
+	kfree(rsp);
+}
+
+void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status)
+{
+	struct amp_mgr *mgr;
+	struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+	struct a2mp_physlink_req *req;
+	struct l2cap_chan *bredr_chan;
+	size_t len;
+
+	mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL);
+	if (!mgr)
+		return;
+
+	len = sizeof(*req) + loc_assoc->len;
+
+	BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len);
+
+	req = kzalloc(len, GFP_KERNEL);
+	if (!req) {
+		amp_mgr_put(mgr);
+		return;
+	}
+
+	bredr_chan = mgr->bredr_chan;
+	if (!bredr_chan)
+		goto clean;
+
+	req->local_id = hdev->id;
+	req->remote_id = bredr_chan->remote_amp_id;
+	memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len);
+
+	a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req);
+
+clean:
+	amp_mgr_put(mgr);
+	kfree(req);
+}
+
+void a2mp_discover_amp(struct l2cap_chan *chan)
+{
+	struct l2cap_conn *conn = chan->conn;
+	struct amp_mgr *mgr = conn->hcon->amp_mgr;
+	struct a2mp_discov_req req;
+
+	BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr);
+
+	if (!mgr) {
+		mgr = amp_mgr_create(conn, true);
+		if (!mgr)
+			return;
+	}
+
+	mgr->bredr_chan = chan;
+
+	req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+	req.ext_feat = 0;
+	a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
+}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index ba033f0..5355df6 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -569,7 +569,6 @@
 {
 	struct bt_seq_state *s = seq->private;
 	struct bt_sock_list *l = s->l;
-	bdaddr_t src_baswapped, dst_baswapped;
 
 	if (v == SEQ_START_TOKEN) {
 		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Src Dst Parent");
@@ -583,18 +582,17 @@
 	} else {
 		struct sock *sk = sk_entry(v);
 		struct bt_sock *bt = bt_sk(sk);
-		baswap(&src_baswapped, &bt->src);
-		baswap(&dst_baswapped, &bt->dst);
 
-		seq_printf(seq, "%pK %-6d %-6u %-6u %-6u %-6lu %pM %pM %-6lu",
+		seq_printf(seq,
+			   "%pK %-6d %-6u %-6u %-6u %-6lu %pMR %pMR %-6lu",
 			   sk,
 			   atomic_read(&sk->sk_refcnt),
 			   sk_rmem_alloc_get(sk),
 			   sk_wmem_alloc_get(sk),
 			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
 			   sock_i_ino(sk),
-			   &src_baswapped,
-			   &dst_baswapped,
+			   &bt->src,
+			   &bt->dst,
 			   bt->parent? sock_i_ino(bt->parent): 0LU);
 
 		if (l->custom_seq_show) {
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
new file mode 100644
index 0000000..1b0d92c
--- /dev/null
+++ b/net/bluetooth/amp.c
@@ -0,0 +1,471 @@
+/*
+   Copyright (c) 2011,2012 Intel Corp.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 and
+   only version 2 as published by the Free Software Foundation.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/a2mp.h>
+#include <net/bluetooth/amp.h>
+#include <crypto/hash.h>
+
+/* Remote AMP Controllers interface */
+void amp_ctrl_get(struct amp_ctrl *ctrl)
+{
+	BT_DBG("ctrl %p orig refcnt %d", ctrl,
+	       atomic_read(&ctrl->kref.refcount));
+
+	kref_get(&ctrl->kref);
+}
+
+static void amp_ctrl_destroy(struct kref *kref)
+{
+	struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref);
+
+	BT_DBG("ctrl %p", ctrl);
+
+	kfree(ctrl->assoc);
+	kfree(ctrl);
+}
+
+int amp_ctrl_put(struct amp_ctrl *ctrl)
+{
+	BT_DBG("ctrl %p orig refcnt %d", ctrl,
+	       atomic_read(&ctrl->kref.refcount));
+
+	return kref_put(&ctrl->kref, &amp_ctrl_destroy);
+}
+
+struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id)
+{
+	struct amp_ctrl *ctrl;
+
+	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return NULL;
+
+	kref_init(&ctrl->kref);
+	ctrl->id = id;
+
+	mutex_lock(&mgr->amp_ctrls_lock);
+	list_add(&ctrl->list, &mgr->amp_ctrls);
+	mutex_unlock(&mgr->amp_ctrls_lock);
+
+	BT_DBG("mgr %p ctrl %p", mgr, ctrl);
+
+	return ctrl;
+}
+
+void amp_ctrl_list_flush(struct amp_mgr *mgr)
+{
+	struct amp_ctrl *ctrl, *n;
+
+	BT_DBG("mgr %p", mgr);
+
+	mutex_lock(&mgr->amp_ctrls_lock);
+	list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) {
+		list_del(&ctrl->list);
+		amp_ctrl_put(ctrl);
+	}
+	mutex_unlock(&mgr->amp_ctrls_lock);
+}
+
+struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id)
+{
+	struct amp_ctrl *ctrl;
+
+	BT_DBG("mgr %p id %d", mgr, id);
+
+	mutex_lock(&mgr->amp_ctrls_lock);
+	list_for_each_entry(ctrl, &mgr->amp_ctrls, list) {
+		if (ctrl->id == id) {
+			amp_ctrl_get(ctrl);
+			mutex_unlock(&mgr->amp_ctrls_lock);
+			return ctrl;
+		}
+	}
+	mutex_unlock(&mgr->amp_ctrls_lock);
+
+	return NULL;
+}
+
+/* Physical Link interface */
+static u8 __next_handle(struct amp_mgr *mgr)
+{
+	if (++mgr->handle == 0)
+		mgr->handle = 1;
+
+	return mgr->handle;
+}
+
+struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
+			     u8 remote_id, bool out)
+{
+	bdaddr_t *dst = mgr->l2cap_conn->dst;
+	struct hci_conn *hcon;
+
+	hcon = hci_conn_add(hdev, AMP_LINK, dst);
+	if (!hcon)
+		return NULL;
+
+	BT_DBG("hcon %p dst %pMR", hcon, dst);
+
+	hcon->state = BT_CONNECT;
+	hcon->attempt++;
+	hcon->handle = __next_handle(mgr);
+	hcon->remote_id = remote_id;
+	hcon->amp_mgr = amp_mgr_get(mgr);
+	hcon->out = out;
+
+	return hcon;
+}
+
+/* AMP crypto key generation interface */
+static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
+{
+	int ret = 0;
+	struct crypto_shash *tfm;
+
+	if (!ksize)
+		return -EINVAL;
+
+	tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
+	if (IS_ERR(tfm)) {
+		BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm));
+		return PTR_ERR(tfm);
+	}
+
+	ret = crypto_shash_setkey(tfm, key, ksize);
+	if (ret) {
+		BT_DBG("crypto_ahash_setkey failed: err %d", ret);
+	} else {
+		struct {
+			struct shash_desc shash;
+			char ctx[crypto_shash_descsize(tfm)];
+		} desc;
+
+		desc.shash.tfm = tfm;
+		desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+		ret = crypto_shash_digest(&desc.shash, plaintext, psize,
+					  output);
+	}
+
+	crypto_free_shash(tfm);
+	return ret;
+}
+
+int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
+{
+	struct hci_dev *hdev = conn->hdev;
+	struct link_key *key;
+	u8 keybuf[HCI_AMP_LINK_KEY_SIZE];
+	u8 gamp_key[HCI_AMP_LINK_KEY_SIZE];
+	int err;
+
+	if (!hci_conn_check_link_mode(conn))
+		return -EACCES;
+
+	BT_DBG("conn %p key_type %d", conn, conn->key_type);
+
+	/* Legacy key */
+	if (conn->key_type < 3) {
+		BT_ERR("Legacy key type %d", conn->key_type);
+		return -EACCES;
+	}
+
+	*type = conn->key_type;
+	*len = HCI_AMP_LINK_KEY_SIZE;
+
+	key = hci_find_link_key(hdev, &conn->dst);
+	if (!key) {
+		BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst);
+		return -EACCES;
+	}
+
+	/* BR/EDR Link Key concatenated together with itself */
+	memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE);
+	memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE);
+
+	/* Derive Generic AMP Link Key (gamp) */
+	err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key);
+	if (err) {
+		BT_ERR("Could not derive Generic AMP Key: err %d", err);
+		return err;
+	}
+
+	if (conn->key_type == HCI_LK_DEBUG_COMBINATION) {
+		BT_DBG("Use Generic AMP Key (gamp)");
+		memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE);
+		return err;
+	}
+
+	/* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */
+	return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
+}
+
+void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
+{
+	struct hci_cp_read_local_amp_assoc cp;
+	struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+
+	BT_DBG("%s handle %d", hdev->name, phy_handle);
+
+	cp.phy_handle = phy_handle;
+	cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+	cp.len_so_far = cpu_to_le16(loc_assoc->offset);
+
+	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+}
+
+void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
+{
+	struct hci_cp_read_local_amp_assoc cp;
+
+	memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
+	memset(&cp, 0, sizeof(cp));
+
+	cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+
+	mgr->state = READ_LOC_AMP_ASSOC;
+	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+}
+
+void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
+				   struct hci_conn *hcon)
+{
+	struct hci_cp_read_local_amp_assoc cp;
+	struct amp_mgr *mgr = hcon->amp_mgr;
+
+	cp.phy_handle = hcon->handle;
+	cp.len_so_far = cpu_to_le16(0);
+	cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+
+	mgr->state = READ_LOC_AMP_ASSOC_FINAL;
+
+	/* Read Local AMP Assoc final link information data */
+	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+}
+
+/* Write AMP Assoc data fragments, returns true with last fragment written*/
+static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
+				     struct hci_conn *hcon)
+{
+	struct hci_cp_write_remote_amp_assoc *cp;
+	struct amp_mgr *mgr = hcon->amp_mgr;
+	struct amp_ctrl *ctrl;
+	u16 frag_len, len;
+
+	ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
+	if (!ctrl)
+		return false;
+
+	if (!ctrl->assoc_rem_len) {
+		BT_DBG("all fragments are written");
+		ctrl->assoc_rem_len = ctrl->assoc_len;
+		ctrl->assoc_len_so_far = 0;
+
+		amp_ctrl_put(ctrl);
+		return true;
+	}
+
+	frag_len = min_t(u16, 248, ctrl->assoc_rem_len);
+	len = frag_len + sizeof(*cp);
+
+	cp = kzalloc(len, GFP_KERNEL);
+	if (!cp) {
+		amp_ctrl_put(ctrl);
+		return false;
+	}
+
+	BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u",
+	       hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len);
+
+	cp->phy_handle = hcon->handle;
+	cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far);
+	cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len);
+	memcpy(cp->frag, ctrl->assoc, frag_len);
+
+	ctrl->assoc_len_so_far += frag_len;
+	ctrl->assoc_rem_len -= frag_len;
+
+	amp_ctrl_put(ctrl);
+
+	hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+
+	kfree(cp);
+
+	return false;
+}
+
+void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
+{
+	struct hci_conn *hcon;
+
+	BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
+
+	hcon = hci_conn_hash_lookup_handle(hdev, handle);
+	if (!hcon)
+		return;
+
+	amp_write_rem_assoc_frag(hdev, hcon);
+}
+
+void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
+{
+	struct hci_conn *hcon;
+
+	BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
+
+	hcon = hci_conn_hash_lookup_handle(hdev, handle);
+	if (!hcon)
+		return;
+
+	BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon);
+
+	amp_write_rem_assoc_frag(hdev, hcon);
+}
+
+void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+			struct hci_conn *hcon)
+{
+	struct hci_cp_create_phy_link cp;
+
+	cp.phy_handle = hcon->handle;
+
+	BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
+	       hcon->handle);
+
+	if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
+			    &cp.key_type)) {
+		BT_DBG("Cannot create link key");
+		return;
+	}
+
+	hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+}
+
+void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+			struct hci_conn *hcon)
+{
+	struct hci_cp_accept_phy_link cp;
+
+	cp.phy_handle = hcon->handle;
+
+	BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
+	       hcon->handle);
+
+	if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
+			    &cp.key_type)) {
+		BT_DBG("Cannot create link key");
+		return;
+	}
+
+	hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+}
+
+void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
+{
+	struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev);
+	struct amp_mgr *mgr = hs_hcon->amp_mgr;
+	struct l2cap_chan *bredr_chan;
+
+	BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr);
+
+	if (!bredr_hdev || !mgr || !mgr->bredr_chan)
+		return;
+
+	bredr_chan = mgr->bredr_chan;
+
+	l2cap_chan_lock(bredr_chan);
+
+	set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags);
+	bredr_chan->remote_amp_id = hs_hcon->remote_id;
+	bredr_chan->local_amp_id = hs_hcon->hdev->id;
+	bredr_chan->hs_hcon = hs_hcon;
+	bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu;
+
+	__l2cap_physical_cfm(bredr_chan, 0);
+
+	l2cap_chan_unlock(bredr_chan);
+
+	hci_dev_put(bredr_hdev);
+}
+
+void amp_create_logical_link(struct l2cap_chan *chan)
+{
+	struct hci_cp_create_accept_logical_link cp;
+	struct hci_conn *hcon;
+	struct hci_dev *hdev;
+
+	BT_DBG("chan %p", chan);
+
+	if (!chan->hs_hcon)
+		return;
+
+	hdev = hci_dev_hold(chan->hs_hcon->hdev);
+	if (!hdev)
+		return;
+
+	BT_DBG("chan %p dst %pMR", chan, chan->conn->dst);
+
+	hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, chan->conn->dst);
+	if (!hcon)
+		goto done;
+
+	cp.phy_handle = hcon->handle;
+
+	cp.tx_flow_spec.id = chan->local_id;
+	cp.tx_flow_spec.stype = chan->local_stype;
+	cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu);
+	cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+	cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat);
+	cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to);
+
+	cp.rx_flow_spec.id = chan->remote_id;
+	cp.rx_flow_spec.stype = chan->remote_stype;
+	cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu);
+	cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime);
+	cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
+	cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
+
+	if (hcon->out)
+		hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
+			     &cp);
+	else
+		hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
+			     &cp);
+
+done:
+	hci_dev_put(hdev);
+}
+
+void amp_disconnect_logical_link(struct hci_chan *hchan)
+{
+	struct hci_conn *hcon = hchan->conn;
+	struct hci_cp_disconn_logical_link cp;
+
+	if (hcon->state != BT_CONNECTED) {
+		BT_DBG("hchan %p not connected", hchan);
+		return;
+	}
+
+	cp.log_handle = cpu_to_le16(hchan->handle);
+	hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp);
+}
+
+void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason)
+{
+	BT_DBG("hchan %p", hchan);
+
+	hci_chan_del(hchan);
+}
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 4a6620b..a5b6397 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -182,8 +182,7 @@
 			a2 = data;
 			data += ETH_ALEN;
 
-			BT_DBG("mc filter %s -> %s",
-				batostr((void *) a1), batostr((void *) a2));
+			BT_DBG("mc filter %pMR -> %pMR", a1, a2);
 
 			/* Iterate from a1 to a2 */
 			set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 98f86f9..e58c8b3 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,7 +25,6 @@
    SOFTWARE IS DISCLAIMED.
 */
 
-#include <linux/export.h>
 #include <linux/etherdevice.h>
 
 #include <net/bluetooth/bluetooth.h>
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 50f0d13..a4a9d4b 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -20,7 +20,7 @@
    SOFTWARE IS DISCLAIMED.
 */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/types.h>
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 6c9c1fd..e0a6ebf 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -353,7 +353,7 @@
 
 	BT_DBG("mtu %d", session->mtu);
 
-	sprintf(session->name, "%s", batostr(&bt_sk(sock->sk)->dst));
+	sprintf(session->name, "%pMR", &bt_sk(sock->sk)->dst);
 
 	session->sock  = sock;
 	session->state = BT_CONFIG;
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index aacb802..1c57482 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -20,7 +20,7 @@
    SOFTWARE IS DISCLAIMED.
 */
 
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include <linux/types.h>
 #include <linux/capability.h>
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b9196a4..25bfce0 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -130,6 +130,20 @@
 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
 }
 
+static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
+{
+	struct hci_cp_disconn_phy_link cp;
+
+	BT_DBG("hcon %p", conn);
+
+	conn->state = BT_DISCONN;
+
+	cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
+	cp.reason = reason;
+	hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
+		     sizeof(cp), &cp);
+}
+
 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
 {
 	struct hci_dev *hdev = conn->hdev;
@@ -230,11 +244,24 @@
 	}
 }
 
+static void hci_conn_disconnect(struct hci_conn *conn)
+{
+	__u8 reason = hci_proto_disconn_ind(conn);
+
+	switch (conn->type) {
+	case ACL_LINK:
+		hci_acl_disconn(conn, reason);
+		break;
+	case AMP_LINK:
+		hci_amp_disconn(conn, reason);
+		break;
+	}
+}
+
 static void hci_conn_timeout(struct work_struct *work)
 {
 	struct hci_conn *conn = container_of(work, struct hci_conn,
 					     disc_work.work);
-	__u8 reason;
 
 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
 
@@ -253,8 +280,7 @@
 		break;
 	case BT_CONFIG:
 	case BT_CONNECTED:
-		reason = hci_proto_disconn_ind(conn);
-		hci_acl_disconn(conn, reason);
+		hci_conn_disconnect(conn);
 		break;
 	default:
 		conn->state = BT_CLOSED;
@@ -320,7 +346,7 @@
 {
 	struct hci_conn *conn;
 
-	BT_DBG("%s dst %s", hdev->name, batostr(dst));
+	BT_DBG("%s dst %pMR", hdev->name, dst);
 
 	conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
 	if (!conn)
@@ -437,7 +463,7 @@
 	int use_src = bacmp(src, BDADDR_ANY);
 	struct hci_dev *hdev = NULL, *d;
 
-	BT_DBG("%s -> %s", batostr(src), batostr(dst));
+	BT_DBG("%pMR -> %pMR", src, dst);
 
 	read_lock(&hci_dev_list_lock);
 
@@ -476,6 +502,9 @@
 {
 	struct hci_conn *le;
 
+	if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags))
+		return ERR_PTR(-ENOTSUPP);
+
 	le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
 	if (!le) {
 		le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
@@ -567,7 +596,7 @@
 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
 			     __u8 dst_type, __u8 sec_level, __u8 auth_type)
 {
-	BT_DBG("%s dst %s type 0x%x", hdev->name, batostr(dst), type);
+	BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type);
 
 	switch (type) {
 	case LE_LINK:
@@ -933,6 +962,7 @@
 
 	chan->conn = conn;
 	skb_queue_head_init(&chan->data_q);
+	chan->state = BT_CONNECTED;
 
 	list_add_rcu(&chan->list, &conn->chan_list);
 
@@ -950,6 +980,8 @@
 
 	synchronize_rcu();
 
+	hci_conn_put(conn);
+
 	skb_queue_purge(&chan->data_q);
 	kfree(chan);
 }
@@ -963,3 +995,35 @@
 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
 		hci_chan_del(chan);
 }
+
+static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
+						 __u16 handle)
+{
+	struct hci_chan *hchan;
+
+	list_for_each_entry(hchan, &hcon->chan_list, list) {
+		if (hchan->handle == handle)
+			return hchan;
+	}
+
+	return NULL;
+}
+
+struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
+{
+	struct hci_conn_hash *h = &hdev->conn_hash;
+	struct hci_conn *hcon;
+	struct hci_chan *hchan = NULL;
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(hcon, &h->list, list) {
+		hchan = __hci_chan_lookup_handle(hcon, handle);
+		if (hchan)
+			break;
+	}
+
+	rcu_read_unlock();
+
+	return hchan;
+}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index a0a2f97b..7140f83 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -178,48 +178,13 @@
 
 static void bredr_init(struct hci_dev *hdev)
 {
-	struct hci_cp_delete_stored_link_key cp;
-	__le16 param;
-	__u8 flt_type;
-
 	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
 
-	/* Mandatory initialization */
-
 	/* Read Local Supported Features */
 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
 
 	/* Read Local Version */
 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-
-	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
-	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
-
-	/* Read BD Address */
-	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
-
-	/* Read Class of Device */
-	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
-
-	/* Read Local Name */
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
-
-	/* Read Voice Setting */
-	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
-
-	/* Optional initialization */
-
-	/* Clear Event Filters */
-	flt_type = HCI_FLT_CLEAR_ALL;
-	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
-
-	/* Connection accept timeout ~20 secs */
-	param = __constant_cpu_to_le16(0x7d00);
-	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
-
-	bacpy(&cp.bdaddr, BDADDR_ANY);
-	cp.delete_all = 1;
-	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
 }
 
 static void amp_init(struct hci_dev *hdev)
@@ -273,14 +238,6 @@
 	}
 }
 
-static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
-{
-	BT_DBG("%s", hdev->name);
-
-	/* Read LE buffer size */
-	hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
-}
-
 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
 {
 	__u8 scan = opt;
@@ -405,7 +362,7 @@
 	struct discovery_state *cache = &hdev->discovery;
 	struct inquiry_entry *e;
 
-	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
+	BT_DBG("cache %p, %pMR", cache, bdaddr);
 
 	list_for_each_entry(e, &cache->all, all) {
 		if (!bacmp(&e->data.bdaddr, bdaddr))
@@ -421,7 +378,7 @@
 	struct discovery_state *cache = &hdev->discovery;
 	struct inquiry_entry *e;
 
-	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
+	BT_DBG("cache %p, %pMR", cache, bdaddr);
 
 	list_for_each_entry(e, &cache->unknown, list) {
 		if (!bacmp(&e->data.bdaddr, bdaddr))
@@ -438,7 +395,7 @@
 	struct discovery_state *cache = &hdev->discovery;
 	struct inquiry_entry *e;
 
-	BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
+	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
 
 	list_for_each_entry(e, &cache->resolve, list) {
 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
@@ -475,7 +432,9 @@
 	struct discovery_state *cache = &hdev->discovery;
 	struct inquiry_entry *ie;
 
-	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
+	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
+
+	hci_remove_remote_oob_data(hdev, &data->bdaddr);
 
 	if (ssp)
 		*ssp = data->ssp_mode;
@@ -637,6 +596,99 @@
 	return err;
 }
 
+static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
+{
+	u8 ad_len = 0, flags = 0;
+	size_t name_len;
+
+	if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
+		flags |= LE_AD_GENERAL;
+
+	if (!lmp_bredr_capable(hdev))
+		flags |= LE_AD_NO_BREDR;
+
+	if (lmp_le_br_capable(hdev))
+		flags |= LE_AD_SIM_LE_BREDR_CTRL;
+
+	if (lmp_host_le_br_capable(hdev))
+		flags |= LE_AD_SIM_LE_BREDR_HOST;
+
+	if (flags) {
+		BT_DBG("adv flags 0x%02x", flags);
+
+		ptr[0] = 2;
+		ptr[1] = EIR_FLAGS;
+		ptr[2] = flags;
+
+		ad_len += 3;
+		ptr += 3;
+	}
+
+	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
+		ptr[0] = 2;
+		ptr[1] = EIR_TX_POWER;
+		ptr[2] = (u8) hdev->adv_tx_power;
+
+		ad_len += 3;
+		ptr += 3;
+	}
+
+	name_len = strlen(hdev->dev_name);
+	if (name_len > 0) {
+		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
+
+		if (name_len > max_len) {
+			name_len = max_len;
+			ptr[1] = EIR_NAME_SHORT;
+		} else
+			ptr[1] = EIR_NAME_COMPLETE;
+
+		ptr[0] = name_len + 1;
+
+		memcpy(ptr + 2, hdev->dev_name, name_len);
+
+		ad_len += (name_len + 2);
+		ptr += (name_len + 2);
+	}
+
+	return ad_len;
+}
+
+int hci_update_ad(struct hci_dev *hdev)
+{
+	struct hci_cp_le_set_adv_data cp;
+	u8 len;
+	int err;
+
+	hci_dev_lock(hdev);
+
+	if (!lmp_le_capable(hdev)) {
+		err = -EINVAL;
+		goto unlock;
+	}
+
+	memset(&cp, 0, sizeof(cp));
+
+	len = create_ad(hdev, cp.data);
+
+	if (hdev->adv_data_len == len &&
+	    memcmp(cp.data, hdev->adv_data, len) == 0) {
+		err = 0;
+		goto unlock;
+	}
+
+	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+	hdev->adv_data_len = len;
+
+	cp.length = len;
+	err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
+
+unlock:
+	hci_dev_unlock(hdev);
+
+	return err;
+}
+
 /* ---- HCI ioctl helpers ---- */
 
 int hci_dev_open(__u16 dev)
@@ -687,10 +739,6 @@
 
 		ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
 
-		if (lmp_host_le_capable(hdev))
-			ret = __hci_request(hdev, hci_le_init_req, 0,
-					    HCI_INIT_TIMEOUT);
-
 		clear_bit(HCI_INIT, &hdev->flags);
 	}
 
@@ -698,6 +746,7 @@
 		hci_dev_hold(hdev);
 		set_bit(HCI_UP, &hdev->flags);
 		hci_notify(hdev, HCI_DEV_UP);
+		hci_update_ad(hdev);
 		if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
 		    mgmt_valid_hdev(hdev)) {
 			hci_dev_lock(hdev);
@@ -1039,10 +1088,17 @@
 	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
 	di.flags    = hdev->flags;
 	di.pkt_type = hdev->pkt_type;
-	di.acl_mtu  = hdev->acl_mtu;
-	di.acl_pkts = hdev->acl_pkts;
-	di.sco_mtu  = hdev->sco_mtu;
-	di.sco_pkts = hdev->sco_pkts;
+	if (lmp_bredr_capable(hdev)) {
+		di.acl_mtu  = hdev->acl_mtu;
+		di.acl_pkts = hdev->acl_pkts;
+		di.sco_mtu  = hdev->sco_mtu;
+		di.sco_pkts = hdev->sco_pkts;
+	} else {
+		di.acl_mtu  = hdev->le_mtu;
+		di.acl_pkts = hdev->le_pkts;
+		di.sco_mtu  = 0;
+		di.sco_pkts = 0;
+	}
 	di.link_policy = hdev->link_policy;
 	di.link_mode   = hdev->link_mode;
 
@@ -1259,7 +1315,7 @@
 		list_add(&key->list, &hdev->link_keys);
 	}
 
-	BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
+	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
 
 	/* Some buggy controller combinations generate a changed
 	 * combination key for legacy pairing even when there's no
@@ -1338,7 +1394,7 @@
 	if (!key)
 		return -ENOENT;
 
-	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
 
 	list_del(&key->list);
 	kfree(key);
@@ -1354,7 +1410,7 @@
 		if (bacmp(bdaddr, &k->bdaddr))
 			continue;
 
-		BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
 
 		list_del(&k->list);
 		kfree(k);
@@ -1401,7 +1457,7 @@
 	if (!data)
 		return -ENOENT;
 
-	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
 
 	list_del(&data->list);
 	kfree(data);
@@ -1440,7 +1496,7 @@
 	memcpy(data->hash, hash, sizeof(data->hash));
 	memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
 
-	BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
+	BT_DBG("%s for %pMR", hdev->name, bdaddr);
 
 	return 0;
 }
@@ -1617,6 +1673,9 @@
 
 	BT_DBG("%s", hdev->name);
 
+	if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
+		return -ENOTSUPP;
+
 	if (work_busy(&hdev->le_scan))
 		return -EINPROGRESS;
 
@@ -1643,6 +1702,8 @@
 	hdev->esco_type = (ESCO_HV1);
 	hdev->link_mode = (HCI_LM_ACCEPT);
 	hdev->io_capability = 0x03; /* No Input No Output */
+	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
+	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
 
 	hdev->sniff_max_interval = 800;
 	hdev->sniff_min_interval = 80;
@@ -2153,9 +2214,10 @@
 	hdr->dlen   = cpu_to_le16(len);
 }
 
-static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
+static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
 			  struct sk_buff *skb, __u16 flags)
 {
+	struct hci_conn *conn = chan->conn;
 	struct hci_dev *hdev = conn->hdev;
 	struct sk_buff *list;
 
@@ -2163,7 +2225,18 @@
 	skb->data_len = 0;
 
 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
-	hci_add_acl_hdr(skb, conn->handle, flags);
+
+	switch (hdev->dev_type) {
+	case HCI_BREDR:
+		hci_add_acl_hdr(skb, conn->handle, flags);
+		break;
+	case HCI_AMP:
+		hci_add_acl_hdr(skb, chan->handle, flags);
+		break;
+	default:
+		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
+		return;
+	}
 
 	list = skb_shinfo(skb)->frag_list;
 	if (!list) {
@@ -2202,14 +2275,13 @@
 
 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
 {
-	struct hci_conn *conn = chan->conn;
-	struct hci_dev *hdev = conn->hdev;
+	struct hci_dev *hdev = chan->conn->hdev;
 
 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
 
 	skb->dev = (void *) hdev;
 
-	hci_queue_acl(conn, &chan->data_q, skb, flags);
+	hci_queue_acl(chan, &chan->data_q, skb, flags);
 
 	queue_work(hdev->workqueue, &hdev->tx_work);
 }
@@ -2311,8 +2383,8 @@
 	/* Kill stalled connections */
 	list_for_each_entry_rcu(c, &h->list, list) {
 		if (c->type == type && c->sent) {
-			BT_ERR("%s killing stalled connection %s",
-			       hdev->name, batostr(&c->dst));
+			BT_ERR("%s killing stalled connection %pMR",
+			       hdev->name, &c->dst);
 			hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
 		}
 	}
@@ -2381,6 +2453,9 @@
 	case ACL_LINK:
 		cnt = hdev->acl_cnt;
 		break;
+	case AMP_LINK:
+		cnt = hdev->block_cnt;
+		break;
 	case SCO_LINK:
 	case ESCO_LINK:
 		cnt = hdev->sco_cnt;
@@ -2510,11 +2585,19 @@
 	struct hci_chan *chan;
 	struct sk_buff *skb;
 	int quote;
+	u8 type;
 
 	__check_timeout(hdev, cnt);
 
+	BT_DBG("%s", hdev->name);
+
+	if (hdev->dev_type == HCI_AMP)
+		type = AMP_LINK;
+	else
+		type = ACL_LINK;
+
 	while (hdev->block_cnt > 0 &&
-	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+	       (chan = hci_chan_sent(hdev, type, &quote))) {
 		u32 priority = (skb_peek(&chan->data_q))->priority;
 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
 			int blocks;
@@ -2547,14 +2630,19 @@
 	}
 
 	if (cnt != hdev->block_cnt)
-		hci_prio_recalculate(hdev, ACL_LINK);
+		hci_prio_recalculate(hdev, type);
 }
 
 static void hci_sched_acl(struct hci_dev *hdev)
 {
 	BT_DBG("%s", hdev->name);
 
-	if (!hci_conn_num(hdev, ACL_LINK))
+	/* No ACL link over BR/EDR controller */
+	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
+		return;
+
+	/* No AMP link over AMP controller */
+	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
 		return;
 
 	switch (hdev->flow_ctl_mode) {
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 2022b43..9f5c5f2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -24,12 +24,13 @@
 
 /* Bluetooth HCI event handling. */
 
-#include <linux/export.h>
 #include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/mgmt.h>
+#include <net/bluetooth/a2mp.h>
+#include <net/bluetooth/amp.h>
 
 /* Handle HCI Event packets */
 
@@ -201,6 +202,11 @@
 			     BIT(HCI_PERIODIC_INQ));
 
 	hdev->discovery.state = DISCOVERY_STOPPED;
+	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
+	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
+
+	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
+	hdev->adv_data_len = 0;
 }
 
 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -223,6 +229,9 @@
 
 	hci_dev_unlock(hdev);
 
+	if (!status && !test_bit(HCI_INIT, &hdev->flags))
+		hci_update_ad(hdev);
+
 	hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
 }
 
@@ -438,7 +447,7 @@
 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	__u8 status = *((__u8 *) skb->data);
-	void *sent;
+	struct hci_cp_write_ssp_mode *sent;
 
 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
@@ -446,10 +455,17 @@
 	if (!sent)
 		return;
 
+	if (!status) {
+		if (sent->mode)
+			hdev->host_features[0] |= LMP_HOST_SSP;
+		else
+			hdev->host_features[0] &= ~LMP_HOST_SSP;
+	}
+
 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
-		mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
+		mgmt_ssp_enable_complete(hdev, sent->mode, status);
 	else if (!status) {
-		if (*((u8 *) sent))
+		if (sent->mode)
 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
 		else
 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
@@ -458,10 +474,10 @@
 
 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
 {
-	if (hdev->features[6] & LMP_EXT_INQ)
+	if (lmp_ext_inq_capable(hdev))
 		return 2;
 
-	if (hdev->features[3] & LMP_RSSI_INQ)
+	if (lmp_inq_rssi_capable(hdev))
 		return 1;
 
 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
@@ -505,28 +521,30 @@
 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
 		return;
 
-	events[4] |= 0x01; /* Flow Specification Complete */
-	events[4] |= 0x02; /* Inquiry Result with RSSI */
-	events[4] |= 0x04; /* Read Remote Extended Features Complete */
-	events[5] |= 0x08; /* Synchronous Connection Complete */
-	events[5] |= 0x10; /* Synchronous Connection Changed */
+	if (lmp_bredr_capable(hdev)) {
+		events[4] |= 0x01; /* Flow Specification Complete */
+		events[4] |= 0x02; /* Inquiry Result with RSSI */
+		events[4] |= 0x04; /* Read Remote Extended Features Complete */
+		events[5] |= 0x08; /* Synchronous Connection Complete */
+		events[5] |= 0x10; /* Synchronous Connection Changed */
+	}
 
-	if (hdev->features[3] & LMP_RSSI_INQ)
+	if (lmp_inq_rssi_capable(hdev))
 		events[4] |= 0x02; /* Inquiry Result with RSSI */
 
 	if (lmp_sniffsubr_capable(hdev))
 		events[5] |= 0x20; /* Sniff Subrating */
 
-	if (hdev->features[5] & LMP_PAUSE_ENC)
+	if (lmp_pause_enc_capable(hdev))
 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
 
-	if (hdev->features[6] & LMP_EXT_INQ)
+	if (lmp_ext_inq_capable(hdev))
 		events[5] |= 0x40; /* Extended Inquiry Result */
 
 	if (lmp_no_flush_capable(hdev))
 		events[7] |= 0x01; /* Enhanced Flush Complete */
 
-	if (hdev->features[7] & LMP_LSTO)
+	if (lmp_lsto_capable(hdev))
 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
 
 	if (lmp_ssp_capable(hdev)) {
@@ -546,6 +564,53 @@
 		events[7] |= 0x20;	/* LE Meta-Event */
 
 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
+
+	if (lmp_le_capable(hdev)) {
+		memset(events, 0, sizeof(events));
+		events[0] = 0x1f;
+		hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK,
+			     sizeof(events), events);
+	}
+}
+
+static void bredr_setup(struct hci_dev *hdev)
+{
+	struct hci_cp_delete_stored_link_key cp;
+	__le16 param;
+	__u8 flt_type;
+
+	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
+	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
+
+	/* Read Class of Device */
+	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
+
+	/* Read Local Name */
+	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
+
+	/* Read Voice Setting */
+	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
+
+	/* Clear Event Filters */
+	flt_type = HCI_FLT_CLEAR_ALL;
+	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
+
+	/* Connection accept timeout ~20 secs */
+	param = __constant_cpu_to_le16(0x7d00);
+	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
+
+	bacpy(&cp.bdaddr, BDADDR_ANY);
+	cp.delete_all = 1;
+	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
+}
+
+static void le_setup(struct hci_dev *hdev)
+{
+	/* Read LE Buffer Size */
+	hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
+
+	/* Read LE Advertising Channel TX Power */
+	hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
 }
 
 static void hci_setup(struct hci_dev *hdev)
@@ -553,6 +618,15 @@
 	if (hdev->dev_type != HCI_BREDR)
 		return;
 
+	/* Read BD Address */
+	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
+
+	if (lmp_bredr_capable(hdev))
+		bredr_setup(hdev);
+
+	if (lmp_le_capable(hdev))
+		le_setup(hdev);
+
 	hci_setup_event_mask(hdev);
 
 	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
@@ -573,13 +647,13 @@
 		}
 	}
 
-	if (hdev->features[3] & LMP_RSSI_INQ)
+	if (lmp_inq_rssi_capable(hdev))
 		hci_setup_inquiry_mode(hdev);
 
-	if (hdev->features[7] & LMP_INQ_TX_PWR)
+	if (lmp_inq_tx_pwr_capable(hdev))
 		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
 
-	if (hdev->features[7] & LMP_EXTFEATURES) {
+	if (lmp_ext_feat_capable(hdev)) {
 		struct hci_cp_read_local_ext_features cp;
 
 		cp.page = 0x01;
@@ -626,11 +700,11 @@
 
 	if (lmp_rswitch_capable(hdev))
 		link_policy |= HCI_LP_RSWITCH;
-	if (hdev->features[0] & LMP_HOLD)
+	if (lmp_hold_capable(hdev))
 		link_policy |= HCI_LP_HOLD;
 	if (lmp_sniff_capable(hdev))
 		link_policy |= HCI_LP_SNIFF;
-	if (hdev->features[1] & LMP_PARK)
+	if (lmp_park_capable(hdev))
 		link_policy |= HCI_LP_PARK;
 
 	cp.policy = cpu_to_le16(link_policy);
@@ -720,10 +794,10 @@
 
 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
 		cp.le = 1;
-		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
+		cp.simul = !!lmp_le_br_capable(hdev);
 	}
 
-	if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
+	if (cp.le != !!lmp_host_le_capable(hdev))
 		hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
 			     &cp);
 }
@@ -846,7 +920,7 @@
 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
 	if (rp->status)
-		return;
+		goto a2mp_rsp;
 
 	hdev->amp_status = rp->amp_status;
 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
@@ -860,6 +934,46 @@
 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
 
 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
+
+a2mp_rsp:
+	a2mp_send_getinfo_rsp(hdev);
+}
+
+static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
+					struct sk_buff *skb)
+{
+	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
+	struct amp_assoc *assoc = &hdev->loc_assoc;
+	size_t rem_len, frag_len;
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+	if (rp->status)
+		goto a2mp_rsp;
+
+	frag_len = skb->len - sizeof(*rp);
+	rem_len = __le16_to_cpu(rp->rem_len);
+
+	if (rem_len > frag_len) {
+		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
+
+		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
+		assoc->offset += frag_len;
+
+		/* Read other fragments */
+		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
+
+		return;
+	}
+
+	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
+	assoc->len = assoc->offset + rem_len;
+	assoc->offset = 0;
+
+a2mp_rsp:
+	/* Send A2MP Rsp when all fragments are received */
+	a2mp_send_getampassoc_rsp(hdev, rp->status);
+	a2mp_send_create_phy_link_req(hdev, rp->status);
 }
 
 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
@@ -976,6 +1090,31 @@
 	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
 }
 
+static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
+					struct sk_buff *skb)
+{
+	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+	if (!rp->status) {
+		hdev->adv_tx_power = rp->tx_power;
+		if (!test_bit(HCI_INIT, &hdev->flags))
+			hci_update_ad(hdev);
+	}
+
+	hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status);
+}
+
+static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	__u8 status = *((__u8 *) skb->data);
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+	hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status);
+}
+
 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
@@ -1051,6 +1190,33 @@
 	hci_dev_unlock(hdev);
 }
 
+static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	__u8 *sent, status = *((__u8 *) skb->data);
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
+	if (!sent)
+		return;
+
+	hci_dev_lock(hdev);
+
+	if (!status) {
+		if (*sent)
+			set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
+		else
+			clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
+	}
+
+	hci_dev_unlock(hdev);
+
+	if (!test_bit(HCI_INIT, &hdev->flags))
+		hci_update_ad(hdev);
+
+	hci_req_complete(hdev, HCI_OP_LE_SET_ADV_ENABLE, status);
+}
+
 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	__u8 status = *((__u8 *) skb->data);
@@ -1165,6 +1331,11 @@
 			hdev->host_features[0] |= LMP_HOST_LE;
 		else
 			hdev->host_features[0] &= ~LMP_HOST_LE;
+
+		if (sent->simul)
+			hdev->host_features[0] |= LMP_HOST_LE_BREDR;
+		else
+			hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
 	}
 
 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
@@ -1174,6 +1345,20 @@
 	hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
 }
 
+static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
+					  struct sk_buff *skb)
+{
+	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
+
+	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
+	       hdev->name, rp->status, rp->phy_handle);
+
+	if (rp->status)
+		return;
+
+	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
+}
+
 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
 {
 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -1210,7 +1395,7 @@
 
 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
 
-	BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn);
+	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
 
 	if (status) {
 		if (conn && conn->state == BT_CONNECT) {
@@ -1639,8 +1824,7 @@
 			return;
 		}
 
-		BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&conn->dst),
-		       conn);
+		BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
 
 		conn->state = BT_CLOSED;
 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
@@ -1657,6 +1841,52 @@
 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 }
 
+static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
+{
+	struct hci_cp_create_phy_link *cp;
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
+	if (!cp)
+		return;
+
+	hci_dev_lock(hdev);
+
+	if (status) {
+		struct hci_conn *hcon;
+
+		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
+		if (hcon)
+			hci_conn_del(hcon);
+	} else {
+		amp_write_remote_assoc(hdev, cp->phy_handle);
+	}
+
+	hci_dev_unlock(hdev);
+}
+
+static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
+{
+	struct hci_cp_accept_phy_link *cp;
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+	if (status)
+		return;
+
+	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
+	if (!cp)
+		return;
+
+	amp_write_remote_assoc(hdev, cp->phy_handle);
+}
+
+static void hci_cs_create_logical_link(struct hci_dev *hdev, u8 status)
+{
+	BT_DBG("%s status 0x%2.2x", hdev->name, status);
+}
+
 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	__u8 status = *((__u8 *) skb->data);
@@ -1822,7 +2052,7 @@
 	struct hci_ev_conn_request *ev = (void *) skb->data;
 	int mask = hdev->link_mode;
 
-	BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
+	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
 	       ev->link_type);
 
 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
@@ -2314,6 +2544,10 @@
 		hci_cc_read_local_amp_info(hdev, skb);
 		break;
 
+	case HCI_OP_READ_LOCAL_AMP_ASSOC:
+		hci_cc_read_local_amp_assoc(hdev, skb);
+		break;
+
 	case HCI_OP_DELETE_STORED_LINK_KEY:
 		hci_cc_delete_stored_link_key(hdev, skb);
 		break;
@@ -2350,6 +2584,14 @@
 		hci_cc_le_read_buffer_size(hdev, skb);
 		break;
 
+	case HCI_OP_LE_READ_ADV_TX_POWER:
+		hci_cc_le_read_adv_tx_power(hdev, skb);
+		break;
+
+	case HCI_OP_LE_SET_EVENT_MASK:
+		hci_cc_le_set_event_mask(hdev, skb);
+		break;
+
 	case HCI_OP_USER_CONFIRM_REPLY:
 		hci_cc_user_confirm_reply(hdev, skb);
 		break;
@@ -2370,6 +2612,10 @@
 		hci_cc_le_set_scan_param(hdev, skb);
 		break;
 
+	case HCI_OP_LE_SET_ADV_ENABLE:
+		hci_cc_le_set_adv_enable(hdev, skb);
+		break;
+
 	case HCI_OP_LE_SET_SCAN_ENABLE:
 		hci_cc_le_set_scan_enable(hdev, skb);
 		break;
@@ -2386,6 +2632,10 @@
 		hci_cc_write_le_host_supported(hdev, skb);
 		break;
 
+	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
+		hci_cc_write_remote_amp_assoc(hdev, skb);
+		break;
+
 	default:
 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
 		break;
@@ -2467,6 +2717,18 @@
 		hci_cs_le_start_enc(hdev, ev->status);
 		break;
 
+	case HCI_OP_CREATE_PHY_LINK:
+		hci_cs_create_phylink(hdev, ev->status);
+		break;
+
+	case HCI_OP_ACCEPT_PHY_LINK:
+		hci_cs_accept_phylink(hdev, ev->status);
+		break;
+
+	case HCI_OP_CREATE_LOGICAL_LINK:
+		hci_cs_create_logical_link(hdev, ev->status);
+		break;
+
 	default:
 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
 		break;
@@ -2574,6 +2836,27 @@
 	queue_work(hdev->workqueue, &hdev->tx_work);
 }
 
+static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
+						 __u16 handle)
+{
+	struct hci_chan *chan;
+
+	switch (hdev->dev_type) {
+	case HCI_BREDR:
+		return hci_conn_hash_lookup_handle(hdev, handle);
+	case HCI_AMP:
+		chan = hci_chan_lookup_handle(hdev, handle);
+		if (chan)
+			return chan->conn;
+		break;
+	default:
+		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
+		break;
+	}
+
+	return NULL;
+}
+
 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
@@ -2595,13 +2878,13 @@
 
 	for (i = 0; i < ev->num_hndl; i++) {
 		struct hci_comp_blocks_info *info = &ev->handles[i];
-		struct hci_conn *conn;
+		struct hci_conn *conn = NULL;
 		__u16  handle, block_count;
 
 		handle = __le16_to_cpu(info->handle);
 		block_count = __le16_to_cpu(info->blocks);
 
-		conn = hci_conn_hash_lookup_handle(hdev, handle);
+		conn = __hci_conn_lookup_handle(hdev, handle);
 		if (!conn)
 			continue;
 
@@ -2609,6 +2892,7 @@
 
 		switch (conn->type) {
 		case ACL_LINK:
+		case AMP_LINK:
 			hdev->block_cnt += block_count;
 			if (hdev->block_cnt > hdev->num_blocks)
 				hdev->block_cnt = hdev->num_blocks;
@@ -2705,13 +2989,13 @@
 
 	key = hci_find_link_key(hdev, &ev->bdaddr);
 	if (!key) {
-		BT_DBG("%s link key not found for %s", hdev->name,
-		       batostr(&ev->bdaddr));
+		BT_DBG("%s link key not found for %pMR", hdev->name,
+		       &ev->bdaddr);
 		goto not_found;
 	}
 
-	BT_DBG("%s found key type %u for %s", hdev->name, key->type,
-	       batostr(&ev->bdaddr));
+	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
+	       &ev->bdaddr);
 
 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
 	    key->type == HCI_LK_DEBUG_COMBINATION) {
@@ -3419,6 +3703,130 @@
 	hci_dev_unlock(hdev);
 }
 
+static void hci_phy_link_complete_evt(struct hci_dev *hdev,
+				      struct sk_buff *skb)
+{
+	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
+	struct hci_conn *hcon, *bredr_hcon;
+
+	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
+	       ev->status);
+
+	hci_dev_lock(hdev);
+
+	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+	if (!hcon) {
+		hci_dev_unlock(hdev);
+		return;
+	}
+
+	if (ev->status) {
+		hci_conn_del(hcon);
+		hci_dev_unlock(hdev);
+		return;
+	}
+
+	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
+
+	hcon->state = BT_CONNECTED;
+	bacpy(&hcon->dst, &bredr_hcon->dst);
+
+	hci_conn_hold(hcon);
+	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
+	hci_conn_put(hcon);
+
+	hci_conn_hold_device(hcon);
+	hci_conn_add_sysfs(hcon);
+
+	amp_physical_cfm(bredr_hcon, hcon);
+
+	hci_dev_unlock(hdev);
+}
+
+static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
+	struct hci_conn *hcon;
+	struct hci_chan *hchan;
+	struct amp_mgr *mgr;
+
+	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
+	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
+	       ev->status);
+
+	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+	if (!hcon)
+		return;
+
+	/* Create AMP hchan */
+	hchan = hci_chan_create(hcon);
+	if (!hchan)
+		return;
+
+	hchan->handle = le16_to_cpu(ev->handle);
+
+	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
+
+	mgr = hcon->amp_mgr;
+	if (mgr && mgr->bredr_chan) {
+		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
+
+		l2cap_chan_lock(bredr_chan);
+
+		bredr_chan->conn->mtu = hdev->block_mtu;
+		l2cap_logical_cfm(bredr_chan, hchan, 0);
+		hci_conn_hold(hcon);
+
+		l2cap_chan_unlock(bredr_chan);
+	}
+}
+
+static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
+					     struct sk_buff *skb)
+{
+	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
+	struct hci_chan *hchan;
+
+	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
+	       le16_to_cpu(ev->handle), ev->status);
+
+	if (ev->status)
+		return;
+
+	hci_dev_lock(hdev);
+
+	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
+	if (!hchan)
+		goto unlock;
+
+	amp_destroy_logical_link(hchan, ev->reason);
+
+unlock:
+	hci_dev_unlock(hdev);
+}
+
+static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
+					     struct sk_buff *skb)
+{
+	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
+	struct hci_conn *hcon;
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+
+	if (ev->status)
+		return;
+
+	hci_dev_lock(hdev);
+
+	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+	if (hcon) {
+		hcon->state = BT_CLOSED;
+		hci_conn_del(hcon);
+	}
+
+	hci_dev_unlock(hdev);
+}
+
 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
@@ -3558,6 +3966,22 @@
 	}
 }
 
+static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_ev_channel_selected *ev = (void *) skb->data;
+	struct hci_conn *hcon;
+
+	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
+
+	skb_pull(skb, sizeof(*ev));
+
+	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+	if (!hcon)
+		return;
+
+	amp_read_loc_assoc_final_data(hdev, hcon);
+}
+
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_event_hdr *hdr = (void *) skb->data;
@@ -3722,10 +4146,30 @@
 		hci_le_meta_evt(hdev, skb);
 		break;
 
+	case HCI_EV_CHANNEL_SELECTED:
+		hci_chan_selected_evt(hdev, skb);
+		break;
+
 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
 		hci_remote_oob_data_request_evt(hdev, skb);
 		break;
 
+	case HCI_EV_PHY_LINK_COMPLETE:
+		hci_phy_link_complete_evt(hdev, skb);
+		break;
+
+	case HCI_EV_LOGICAL_LINK_COMPLETE:
+		hci_loglink_complete_evt(hdev, skb);
+		break;
+
+	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
+		hci_disconn_loglink_complete_evt(hdev, skb);
+		break;
+
+	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
+		hci_disconn_phylink_complete_evt(hdev, skb);
+		break;
+
 	case HCI_EV_NUM_COMP_BLOCKS:
 		hci_num_comp_blocks_evt(hdev, skb);
 		break;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index a20e61c..55cceee 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -38,7 +38,7 @@
 				 struct device_attribute *attr, char *buf)
 {
 	struct hci_conn *conn = to_hci_conn(dev);
-	return sprintf(buf, "%s\n", batostr(&conn->dst));
+	return sprintf(buf, "%pMR\n", &conn->dst);
 }
 
 static ssize_t show_link_features(struct device *dev,
@@ -224,7 +224,7 @@
 			    struct device_attribute *attr, char *buf)
 {
 	struct hci_dev *hdev = to_hci_dev(dev);
-	return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
+	return sprintf(buf, "%pMR\n", &hdev->bdaddr);
 }
 
 static ssize_t show_features(struct device *dev,
@@ -406,8 +406,8 @@
 
 	list_for_each_entry(e, &cache->all, all) {
 		struct inquiry_data *data = &e->data;
-		seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
-			   batostr(&data->bdaddr),
+		seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
+			   &data->bdaddr,
 			   data->pscan_rep_mode, data->pscan_period_mode,
 			   data->pscan_mode, data->dev_class[2],
 			   data->dev_class[1], data->dev_class[0],
@@ -440,7 +440,7 @@
 	hci_dev_lock(hdev);
 
 	list_for_each_entry(b, &hdev->blacklist, list)
-		seq_printf(f, "%s\n", batostr(&b->bdaddr));
+		seq_printf(f, "%pMR\n", &b->bdaddr);
 
 	hci_dev_unlock(hdev);
 
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index ccd985d..0c00284 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -932,8 +932,12 @@
 	hid->country = req->country;
 
 	strncpy(hid->name, req->name, 128);
-	strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
-	strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
+
+	snprintf(hid->phys, sizeof(hid->phys), "%pMR",
+		 &bt_sk(session->ctrl_sock->sk)->src);
+
+	snprintf(hid->uniq, sizeof(hid->uniq), "%pMR",
+		 &bt_sk(session->ctrl_sock->sk)->dst);
 
 	hid->dev.parent = &session->conn->dev;
 	hid->ll_driver = &hidp_hid_driver;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a91239dc..b52f66d 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -38,6 +38,7 @@
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/smp.h>
 #include <net/bluetooth/a2mp.h>
+#include <net/bluetooth/amp.h>
 
 bool disable_ertm;
 
@@ -48,19 +49,20 @@
 static DEFINE_RWLOCK(chan_list_lock);
 
 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
-				u8 code, u8 ident, u16 dlen, void *data);
+				       u8 code, u8 ident, u16 dlen, void *data);
 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
-								void *data);
+			   void *data);
 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
 				   struct l2cap_chan *chan, int err);
 
 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
-		    struct sk_buff_head *skbs, u8 event);
+		     struct sk_buff_head *skbs, u8 event);
 
 /* ---- L2CAP channels ---- */
 
-static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
+static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
+						   u16 cid)
 {
 	struct l2cap_chan *c;
 
@@ -71,7 +73,8 @@
 	return NULL;
 }
 
-static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
+static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
+						   u16 cid)
 {
 	struct l2cap_chan *c;
 
@@ -84,7 +87,8 @@
 
 /* Find channel with given SCID.
  * Returns locked channel. */
-static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
+static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
+						 u16 cid)
 {
 	struct l2cap_chan *c;
 
@@ -97,7 +101,25 @@
 	return c;
 }
 
-static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
+/* Find channel with given DCID.
+ * Returns locked channel.
+ */
+static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
+						 u16 cid)
+{
+	struct l2cap_chan *c;
+
+	mutex_lock(&conn->chan_lock);
+	c = __l2cap_get_chan_by_dcid(conn, cid);
+	if (c)
+		l2cap_chan_lock(c);
+	mutex_unlock(&conn->chan_lock);
+
+	return c;
+}
+
+static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
+						    u8 ident)
 {
 	struct l2cap_chan *c;
 
@@ -108,6 +130,20 @@
 	return NULL;
 }
 
+static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
+						  u8 ident)
+{
+	struct l2cap_chan *c;
+
+	mutex_lock(&conn->chan_lock);
+	c = __l2cap_get_chan_by_ident(conn, ident);
+	if (c)
+		l2cap_chan_lock(c);
+	mutex_unlock(&conn->chan_lock);
+
+	return c;
+}
+
 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
 {
 	struct l2cap_chan *c;
@@ -178,7 +214,7 @@
 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
 {
 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
-						state_to_string(state));
+	       state_to_string(state));
 
 	chan->state = state;
 	chan->ops->state_change(chan, state);
@@ -361,7 +397,7 @@
 static void l2cap_chan_timeout(struct work_struct *work)
 {
 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
-							chan_timer.work);
+					       chan_timer.work);
 	struct l2cap_conn *conn = chan->conn;
 	int reason;
 
@@ -373,7 +409,7 @@
 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
 		reason = ECONNREFUSED;
 	else if (chan->state == BT_CONNECT &&
-					chan->sec_level != BT_SECURITY_SDP)
+		 chan->sec_level != BT_SECURITY_SDP)
 		reason = ECONNREFUSED;
 	else
 		reason = ETIMEDOUT;
@@ -455,7 +491,7 @@
 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
 }
 
-static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
 {
 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
 	       __le16_to_cpu(chan->psm), chan->dcid);
@@ -504,7 +540,7 @@
 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
-	chan->local_flush_to	= L2CAP_DEFAULT_FLUSH_TO;
+	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
 
 	l2cap_chan_hold(chan);
 
@@ -527,6 +563,7 @@
 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
 
 	if (conn) {
+		struct amp_mgr *mgr = conn->hcon->amp_mgr;
 		/* Delete from channel list */
 		list_del(&chan->list);
 
@@ -536,10 +573,19 @@
 
 		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
 			hci_conn_put(conn->hcon);
+
+		if (mgr && mgr->bredr_chan == chan)
+			mgr->bredr_chan = NULL;
 	}
 
-	if (chan->ops->teardown)
-		chan->ops->teardown(chan, err);
+	if (chan->hs_hchan) {
+		struct hci_chan *hs_hchan = chan->hs_hchan;
+
+		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
+		amp_disconnect_logical_link(hs_hchan);
+	}
+
+	chan->ops->teardown(chan, err);
 
 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
 		return;
@@ -573,19 +619,18 @@
 	struct l2cap_conn *conn = chan->conn;
 	struct sock *sk = chan->sk;
 
-	BT_DBG("chan %p state %s sk %p", chan,
-					state_to_string(chan->state), sk);
+	BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
+	       sk);
 
 	switch (chan->state) {
 	case BT_LISTEN:
-		if (chan->ops->teardown)
-			chan->ops->teardown(chan, 0);
+		chan->ops->teardown(chan, 0);
 		break;
 
 	case BT_CONNECTED:
 	case BT_CONFIG:
 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
-					conn->hcon->type == ACL_LINK) {
+		    conn->hcon->type == ACL_LINK) {
 			__set_chan_timer(chan, sk->sk_sndtimeo);
 			l2cap_send_disconn_req(conn, chan, reason);
 		} else
@@ -594,7 +639,7 @@
 
 	case BT_CONNECT2:
 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
-					conn->hcon->type == ACL_LINK) {
+		    conn->hcon->type == ACL_LINK) {
 			struct l2cap_conn_rsp rsp;
 			__u16 result;
 
@@ -609,7 +654,7 @@
 			rsp.result = cpu_to_le16(result);
 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
-							sizeof(rsp), &rsp);
+				       sizeof(rsp), &rsp);
 		}
 
 		l2cap_chan_del(chan, reason);
@@ -621,8 +666,7 @@
 		break;
 
 	default:
-		if (chan->ops->teardown)
-			chan->ops->teardown(chan, 0);
+		chan->ops->teardown(chan, 0);
 		break;
 	}
 }
@@ -691,7 +735,8 @@
 	return id;
 }
 
-static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
+static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+			   void *data)
 {
 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
 	u8 flags;
@@ -712,16 +757,31 @@
 	hci_send_acl(conn->hchan, skb, flags);
 }
 
+static bool __chan_is_moving(struct l2cap_chan *chan)
+{
+	return chan->move_state != L2CAP_MOVE_STABLE &&
+	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
+}
+
 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
 {
 	struct hci_conn *hcon = chan->conn->hcon;
 	u16 flags;
 
 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
-							skb->priority);
+	       skb->priority);
+
+	if (chan->hs_hcon && !__chan_is_moving(chan)) {
+		if (chan->hs_hchan)
+			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
+		else
+			kfree_skb(skb);
+
+		return;
+	}
 
 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
-					lmp_no_flush_capable(hcon->hdev))
+	    lmp_no_flush_capable(hcon->hdev))
 		flags = ACL_START_NO_FLUSH;
 	else
 		flags = ACL_START;
@@ -895,6 +955,9 @@
 	if (!control->sframe)
 		return;
 
+	if (__chan_is_moving(chan))
+		return;
+
 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
 	    !control->poll)
 		control->final = 1;
@@ -946,7 +1009,25 @@
 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
 }
 
-static void l2cap_send_conn_req(struct l2cap_chan *chan)
+static bool __amp_capable(struct l2cap_chan *chan)
+{
+	struct l2cap_conn *conn = chan->conn;
+
+	if (enable_hs &&
+	    chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
+	    conn->fixed_chan_mask & L2CAP_FC_A2MP)
+		return true;
+	else
+		return false;
+}
+
+static bool l2cap_check_efs(struct l2cap_chan *chan)
+{
+	/* Check EFS parameters */
+	return true;
+}
+
+void l2cap_send_conn_req(struct l2cap_chan *chan)
 {
 	struct l2cap_conn *conn = chan->conn;
 	struct l2cap_conn_req req;
@@ -961,6 +1042,76 @@
 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
 }
 
+static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
+{
+	struct l2cap_create_chan_req req;
+	req.scid = cpu_to_le16(chan->scid);
+	req.psm  = chan->psm;
+	req.amp_id = amp_id;
+
+	chan->ident = l2cap_get_ident(chan->conn);
+
+	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
+		       sizeof(req), &req);
+}
+
+static void l2cap_move_setup(struct l2cap_chan *chan)
+{
+	struct sk_buff *skb;
+
+	BT_DBG("chan %p", chan);
+
+	if (chan->mode != L2CAP_MODE_ERTM)
+		return;
+
+	__clear_retrans_timer(chan);
+	__clear_monitor_timer(chan);
+	__clear_ack_timer(chan);
+
+	chan->retry_count = 0;
+	skb_queue_walk(&chan->tx_q, skb) {
+		if (bt_cb(skb)->control.retries)
+			bt_cb(skb)->control.retries = 1;
+		else
+			break;
+	}
+
+	chan->expected_tx_seq = chan->buffer_seq;
+
+	clear_bit(CONN_REJ_ACT, &chan->conn_state);
+	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
+	l2cap_seq_list_clear(&chan->retrans_list);
+	l2cap_seq_list_clear(&chan->srej_list);
+	skb_queue_purge(&chan->srej_q);
+
+	chan->tx_state = L2CAP_TX_STATE_XMIT;
+	chan->rx_state = L2CAP_RX_STATE_MOVE;
+
+	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+}
+
+static void l2cap_move_done(struct l2cap_chan *chan)
+{
+	u8 move_role = chan->move_role;
+	BT_DBG("chan %p", chan);
+
+	chan->move_state = L2CAP_MOVE_STABLE;
+	chan->move_role = L2CAP_MOVE_ROLE_NONE;
+
+	if (chan->mode != L2CAP_MODE_ERTM)
+		return;
+
+	switch (move_role) {
+	case L2CAP_MOVE_ROLE_INITIATOR:
+		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
+		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
+		break;
+	case L2CAP_MOVE_ROLE_RESPONDER:
+		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
+		break;
+	}
+}
+
 static void l2cap_chan_ready(struct l2cap_chan *chan)
 {
 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
@@ -972,6 +1123,16 @@
 	chan->ops->ready(chan);
 }
 
+static void l2cap_start_connection(struct l2cap_chan *chan)
+{
+	if (__amp_capable(chan)) {
+		BT_DBG("chan %p AMP capable: discover AMPs", chan);
+		a2mp_discover_amp(chan);
+	} else {
+		l2cap_send_conn_req(chan);
+	}
+}
+
 static void l2cap_do_start(struct l2cap_chan *chan)
 {
 	struct l2cap_conn *conn = chan->conn;
@@ -986,8 +1147,9 @@
 			return;
 
 		if (l2cap_chan_check_security(chan) &&
-				__l2cap_no_conn_pending(chan))
-			l2cap_send_conn_req(chan);
+		    __l2cap_no_conn_pending(chan)) {
+			l2cap_start_connection(chan);
+		}
 	} else {
 		struct l2cap_info_req req;
 		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
@@ -997,8 +1159,8 @@
 
 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
 
-		l2cap_send_cmd(conn, conn->info_ident,
-					L2CAP_INFO_REQ, sizeof(req), &req);
+		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
+			       sizeof(req), &req);
 	}
 }
 
@@ -1018,7 +1180,8 @@
 	}
 }
 
-static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
+static void l2cap_send_disconn_req(struct l2cap_conn *conn,
+				   struct l2cap_chan *chan, int err)
 {
 	struct sock *sk = chan->sk;
 	struct l2cap_disconn_req req;
@@ -1033,14 +1196,14 @@
 	}
 
 	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
-		__l2cap_state_change(chan, BT_DISCONN);
+		l2cap_state_change(chan, BT_DISCONN);
 		return;
 	}
 
 	req.dcid = cpu_to_le16(chan->dcid);
 	req.scid = cpu_to_le16(chan->scid);
-	l2cap_send_cmd(conn, l2cap_get_ident(conn),
-			L2CAP_DISCONN_REQ, sizeof(req), &req);
+	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
+		       sizeof(req), &req);
 
 	lock_sock(sk);
 	__l2cap_state_change(chan, BT_DISCONN);
@@ -1069,20 +1232,20 @@
 
 		if (chan->state == BT_CONNECT) {
 			if (!l2cap_chan_check_security(chan) ||
-					!__l2cap_no_conn_pending(chan)) {
+			    !__l2cap_no_conn_pending(chan)) {
 				l2cap_chan_unlock(chan);
 				continue;
 			}
 
 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
-					&& test_bit(CONF_STATE2_DEVICE,
+			    && test_bit(CONF_STATE2_DEVICE,
 					&chan->conf_state)) {
 				l2cap_chan_close(chan, ECONNRESET);
 				l2cap_chan_unlock(chan);
 				continue;
 			}
 
-			l2cap_send_conn_req(chan);
+			l2cap_start_connection(chan);
 
 		} else if (chan->state == BT_CONNECT2) {
 			struct l2cap_conn_rsp rsp;
@@ -1094,11 +1257,9 @@
 				lock_sock(sk);
 				if (test_bit(BT_SK_DEFER_SETUP,
 					     &bt_sk(sk)->flags)) {
-					struct sock *parent = bt_sk(sk)->parent;
 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
-					if (parent)
-						parent->sk_data_ready(parent, 0);
+					chan->ops->defer(chan);
 
 				} else {
 					__l2cap_state_change(chan, BT_CONFIG);
@@ -1112,17 +1273,17 @@
 			}
 
 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
-							sizeof(rsp), &rsp);
+				       sizeof(rsp), &rsp);
 
 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
-					rsp.result != L2CAP_CR_SUCCESS) {
+			    rsp.result != L2CAP_CR_SUCCESS) {
 				l2cap_chan_unlock(chan);
 				continue;
 			}
 
 			set_bit(CONF_REQ_SENT, &chan->conf_state);
 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-						l2cap_build_conf_req(chan, buf), buf);
+				       l2cap_build_conf_req(chan, buf), buf);
 			chan->num_conf_req++;
 		}
 
@@ -1204,8 +1365,6 @@
 	bacpy(&bt_sk(sk)->src, conn->src);
 	bacpy(&bt_sk(sk)->dst, conn->dst);
 
-	bt_accept_enqueue(parent, sk);
-
 	l2cap_chan_add(conn, chan);
 
 	l2cap_chan_ready(chan);
@@ -1270,7 +1429,7 @@
 
 	list_for_each_entry(chan, &conn->chan_l, list) {
 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
-			__l2cap_chan_set_err(chan, err);
+			l2cap_chan_set_err(chan, err);
 	}
 
 	mutex_unlock(&conn->chan_lock);
@@ -1279,7 +1438,7 @@
 static void l2cap_info_timeout(struct work_struct *work)
 {
 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
-							info_timer.work);
+					       info_timer.work);
 
 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
 	conn->info_ident = 0;
@@ -1333,7 +1492,7 @@
 static void security_timeout(struct work_struct *work)
 {
 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
-						security_timer.work);
+					       security_timer.work);
 
 	BT_DBG("conn %p", conn);
 
@@ -1355,7 +1514,7 @@
 	if (!hchan)
 		return NULL;
 
-	conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
+	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
 	if (!conn) {
 		hci_chan_del(hchan);
 		return NULL;
@@ -1367,10 +1526,22 @@
 
 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
 
-	if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
-		conn->mtu = hcon->hdev->le_mtu;
-	else
+	switch (hcon->type) {
+	case AMP_LINK:
+		conn->mtu = hcon->hdev->block_mtu;
+		break;
+
+	case LE_LINK:
+		if (hcon->hdev->le_mtu) {
+			conn->mtu = hcon->hdev->le_mtu;
+			break;
+		}
+		/* fall through */
+
+	default:
 		conn->mtu = hcon->hdev->acl_mtu;
+		break;
+	}
 
 	conn->src = &hcon->hdev->bdaddr;
 	conn->dst = &hcon->dst;
@@ -1448,7 +1619,7 @@
 	__u8 auth_type;
 	int err;
 
-	BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
+	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
 	       dst_type, __le16_to_cpu(psm));
 
 	hdev = hci_get_route(dst, src);
@@ -1461,7 +1632,7 @@
 
 	/* PSM must be odd and lsb of upper byte must be 0 */
 	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
-					chan->chan_type != L2CAP_CHAN_RAW) {
+	    chan->chan_type != L2CAP_CHAN_RAW) {
 		err = -EINVAL;
 		goto done;
 	}
@@ -1657,6 +1828,9 @@
 
 	BT_DBG("chan %p, skbs %p", chan, skbs);
 
+	if (__chan_is_moving(chan))
+		return;
+
 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
 
 	while (!skb_queue_empty(&chan->tx_q)) {
@@ -1699,6 +1873,9 @@
 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
 		return 0;
 
+	if (__chan_is_moving(chan))
+		return 0;
+
 	while (chan->tx_send_head &&
 	       chan->unacked_frames < chan->remote_tx_win &&
 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
@@ -1764,13 +1941,16 @@
 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
 		return;
 
+	if (__chan_is_moving(chan))
+		return;
+
 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
 		seq = l2cap_seq_list_pop(&chan->retrans_list);
 
 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
 		if (!skb) {
 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
-				seq);
+			       seq);
 			continue;
 		}
 
@@ -1795,9 +1975,9 @@
 			/* Cloned sk_buffs are read-only, so we need a
 			 * writeable copy
 			 */
-			tx_skb = skb_copy(skb, GFP_ATOMIC);
+			tx_skb = skb_copy(skb, GFP_KERNEL);
 		} else {
-			tx_skb = skb_clone(skb, GFP_ATOMIC);
+			tx_skb = skb_clone(skb, GFP_KERNEL);
 		}
 
 		if (!tx_skb) {
@@ -1855,7 +2035,7 @@
 	if (chan->unacked_frames) {
 		skb_queue_walk(&chan->tx_q, skb) {
 			if (bt_cb(skb)->control.txseq == control->reqseq ||
-				skb == chan->tx_send_head)
+			    skb == chan->tx_send_head)
 				break;
 		}
 
@@ -2106,7 +2286,9 @@
 	/* PDU size is derived from the HCI MTU */
 	pdu_len = chan->conn->mtu;
 
-	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+	/* Constrain PDU size for BR/EDR connections */
+	if (!chan->hs_hcon)
+		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
 
 	/* Adjust for largest possible L2CAP overhead. */
 	if (chan->fcs)
@@ -2156,7 +2338,7 @@
 }
 
 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
-								u32 priority)
+		    u32 priority)
 {
 	struct sk_buff *skb;
 	int err;
@@ -2543,7 +2725,7 @@
 		/* Don't send frame to the socket it came from */
 		if (skb->sk == sk)
 			continue;
-		nskb = skb_clone(skb, GFP_ATOMIC);
+		nskb = skb_clone(skb, GFP_KERNEL);
 		if (!nskb)
 			continue;
 
@@ -2569,7 +2751,7 @@
 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
 	count = min_t(unsigned int, conn->mtu, len);
 
-	skb = bt_skb_alloc(count, GFP_ATOMIC);
+	skb = bt_skb_alloc(count, GFP_KERNEL);
 	if (!skb)
 		return NULL;
 
@@ -2599,7 +2781,7 @@
 	while (len) {
 		count = min_t(unsigned int, conn->mtu, len);
 
-		*frag = bt_skb_alloc(count, GFP_ATOMIC);
+		*frag = bt_skb_alloc(count, GFP_KERNEL);
 		if (!*frag)
 			goto fail;
 
@@ -2618,7 +2800,8 @@
 	return NULL;
 }
 
-static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
+static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
+				     unsigned long *val)
 {
 	struct l2cap_conf_opt *opt = *ptr;
 	int len;
@@ -2692,7 +2875,7 @@
 		efs.msdu	= cpu_to_le16(chan->local_msdu);
 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
 		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
-		efs.flush_to	= __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
+		efs.flush_to	= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
 		break;
 
 	case L2CAP_MODE_STREAMING:
@@ -2709,7 +2892,7 @@
 	}
 
 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
-							(unsigned long) &efs);
+			   (unsigned long) &efs);
 }
 
 static void l2cap_ack_timeout(struct work_struct *work)
@@ -2749,6 +2932,11 @@
 
 	skb_queue_head_init(&chan->tx_q);
 
+	chan->local_amp_id = 0;
+	chan->move_id = 0;
+	chan->move_state = L2CAP_MOVE_STABLE;
+	chan->move_role = L2CAP_MOVE_ROLE_NONE;
+
 	if (chan->mode != L2CAP_MODE_ERTM)
 		return 0;
 
@@ -2795,16 +2983,54 @@
 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
 }
 
+static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
+				      struct l2cap_conf_rfc *rfc)
+{
+	if (chan->local_amp_id && chan->hs_hcon) {
+		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
+
+		/* Class 1 devices have must have ERTM timeouts
+		 * exceeding the Link Supervision Timeout.  The
+		 * default Link Supervision Timeout for AMP
+		 * controllers is 10 seconds.
+		 *
+		 * Class 1 devices use 0xffffffff for their
+		 * best-effort flush timeout, so the clamping logic
+		 * will result in a timeout that meets the above
+		 * requirement.  ERTM timeouts are 16-bit values, so
+		 * the maximum timeout is 65.535 seconds.
+		 */
+
+		/* Convert timeout to milliseconds and round */
+		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
+
+		/* This is the recommended formula for class 2 devices
+		 * that start ERTM timers when packets are sent to the
+		 * controller.
+		 */
+		ertm_to = 3 * ertm_to + 500;
+
+		if (ertm_to > 0xffff)
+			ertm_to = 0xffff;
+
+		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
+		rfc->monitor_timeout = rfc->retrans_timeout;
+	} else {
+		rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+		rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+	}
+}
+
 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
 {
 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
-						__l2cap_ews_supported(chan)) {
+	    __l2cap_ews_supported(chan)) {
 		/* use extended control field */
 		set_bit(FLAG_EXT_CTRL, &chan->flags);
 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
 	} else {
 		chan->tx_win = min_t(u16, chan->tx_win,
-						L2CAP_DEFAULT_TX_WINDOW);
+				     L2CAP_DEFAULT_TX_WINDOW);
 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
 	}
 	chan->ack_win = chan->tx_win;
@@ -2844,7 +3070,7 @@
 	switch (chan->mode) {
 	case L2CAP_MODE_BASIC:
 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
-				!(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
+		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
 			break;
 
 		rfc.mode            = L2CAP_MODE_BASIC;
@@ -2855,28 +3081,27 @@
 		rfc.max_pdu_size    = 0;
 
 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-							(unsigned long) &rfc);
+				   (unsigned long) &rfc);
 		break;
 
 	case L2CAP_MODE_ERTM:
 		rfc.mode            = L2CAP_MODE_ERTM;
 		rfc.max_transmit    = chan->max_tx;
-		rfc.retrans_timeout = 0;
-		rfc.monitor_timeout = 0;
+
+		__l2cap_set_ertm_timeouts(chan, &rfc);
 
 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
-						L2CAP_EXT_HDR_SIZE -
-						L2CAP_SDULEN_SIZE -
-						L2CAP_FCS_SIZE);
+			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
+			     L2CAP_FCS_SIZE);
 		rfc.max_pdu_size = cpu_to_le16(size);
 
 		l2cap_txwin_setup(chan);
 
 		rfc.txwin_size = min_t(u16, chan->tx_win,
-						L2CAP_DEFAULT_TX_WINDOW);
+				       L2CAP_DEFAULT_TX_WINDOW);
 
 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-							(unsigned long) &rfc);
+				   (unsigned long) &rfc);
 
 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
 			l2cap_add_opt_efs(&ptr, chan);
@@ -2885,14 +3110,14 @@
 			break;
 
 		if (chan->fcs == L2CAP_FCS_NONE ||
-				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
+		    test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
 			chan->fcs = L2CAP_FCS_NONE;
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
 		}
 
 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
-								chan->tx_win);
+					   chan->tx_win);
 		break;
 
 	case L2CAP_MODE_STREAMING:
@@ -2904,13 +3129,12 @@
 		rfc.monitor_timeout = 0;
 
 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
-						L2CAP_EXT_HDR_SIZE -
-						L2CAP_SDULEN_SIZE -
-						L2CAP_FCS_SIZE);
+			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
+			     L2CAP_FCS_SIZE);
 		rfc.max_pdu_size = cpu_to_le16(size);
 
 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-							(unsigned long) &rfc);
+				   (unsigned long) &rfc);
 
 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
 			l2cap_add_opt_efs(&ptr, chan);
@@ -2919,7 +3143,7 @@
 			break;
 
 		if (chan->fcs == L2CAP_FCS_NONE ||
-				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
+		    test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
 			chan->fcs = L2CAP_FCS_NONE;
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
 		}
@@ -3011,7 +3235,7 @@
 	case L2CAP_MODE_ERTM:
 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
 			chan->mode = l2cap_select_mode(rfc.mode,
-					chan->conn->feat_mask);
+						       chan->conn->feat_mask);
 			break;
 		}
 
@@ -3036,8 +3260,8 @@
 		if (chan->num_conf_rsp == 1)
 			return -ECONNREFUSED;
 
-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-					sizeof(rfc), (unsigned long) &rfc);
+		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+				   (unsigned long) &rfc);
 	}
 
 	if (result == L2CAP_CONF_SUCCESS) {
@@ -3054,8 +3278,8 @@
 
 		if (remote_efs) {
 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
-					efs.stype != L2CAP_SERV_NOTRAFIC &&
-					efs.stype != chan->local_stype) {
+			    efs.stype != L2CAP_SERV_NOTRAFIC &&
+			    efs.stype != chan->local_stype) {
 
 				result = L2CAP_CONF_UNACCEPT;
 
@@ -3063,8 +3287,8 @@
 					return -ECONNREFUSED;
 
 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
-							sizeof(efs),
-							(unsigned long) &efs);
+						   sizeof(efs),
+						   (unsigned long) &efs);
 			} else {
 				/* Send PENDING Conf Rsp */
 				result = L2CAP_CONF_PENDING;
@@ -3087,51 +3311,45 @@
 			chan->remote_max_tx = rfc.max_transmit;
 
 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
-						chan->conn->mtu -
-						L2CAP_EXT_HDR_SIZE -
-						L2CAP_SDULEN_SIZE -
-						L2CAP_FCS_SIZE);
+				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
+				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
 			rfc.max_pdu_size = cpu_to_le16(size);
 			chan->remote_mps = size;
 
-			rfc.retrans_timeout =
-				__constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
-			rfc.monitor_timeout =
-				__constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+			__l2cap_set_ertm_timeouts(chan, &rfc);
 
 			set_bit(CONF_MODE_DONE, &chan->conf_state);
 
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-					sizeof(rfc), (unsigned long) &rfc);
+					   sizeof(rfc), (unsigned long) &rfc);
 
 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
 				chan->remote_id = efs.id;
 				chan->remote_stype = efs.stype;
 				chan->remote_msdu = le16_to_cpu(efs.msdu);
 				chan->remote_flush_to =
-						le32_to_cpu(efs.flush_to);
+					le32_to_cpu(efs.flush_to);
 				chan->remote_acc_lat =
-						le32_to_cpu(efs.acc_lat);
+					le32_to_cpu(efs.acc_lat);
 				chan->remote_sdu_itime =
 					le32_to_cpu(efs.sdu_itime);
 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
-					sizeof(efs), (unsigned long) &efs);
+						   sizeof(efs),
+						   (unsigned long) &efs);
 			}
 			break;
 
 		case L2CAP_MODE_STREAMING:
 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
-						chan->conn->mtu -
-						L2CAP_EXT_HDR_SIZE -
-						L2CAP_SDULEN_SIZE -
-						L2CAP_FCS_SIZE);
+				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
+				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
 			rfc.max_pdu_size = cpu_to_le16(size);
 			chan->remote_mps = size;
 
 			set_bit(CONF_MODE_DONE, &chan->conf_state);
 
-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-					sizeof(rfc), (unsigned long) &rfc);
+			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+					   (unsigned long) &rfc);
 
 			break;
 
@@ -3152,7 +3370,8 @@
 	return ptr - data;
 }
 
-static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
+static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+				void *data, u16 *result)
 {
 	struct l2cap_conf_req *req = data;
 	void *ptr = req->data;
@@ -3179,7 +3398,7 @@
 		case L2CAP_CONF_FLUSH_TO:
 			chan->flush_to = val;
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
-							2, chan->flush_to);
+					   2, chan->flush_to);
 			break;
 
 		case L2CAP_CONF_RFC:
@@ -3187,13 +3406,13 @@
 				memcpy(&rfc, (void *)val, olen);
 
 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
-							rfc.mode != chan->mode)
+			    rfc.mode != chan->mode)
 				return -ECONNREFUSED;
 
 			chan->fcs = 0;
 
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-					sizeof(rfc), (unsigned long) &rfc);
+					   sizeof(rfc), (unsigned long) &rfc);
 			break;
 
 		case L2CAP_CONF_EWS:
@@ -3207,12 +3426,12 @@
 				memcpy(&efs, (void *)val, olen);
 
 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
-					efs.stype != L2CAP_SERV_NOTRAFIC &&
-					efs.stype != chan->local_stype)
+			    efs.stype != L2CAP_SERV_NOTRAFIC &&
+			    efs.stype != chan->local_stype)
 				return -ECONNREFUSED;
 
-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
-					sizeof(efs), (unsigned long) &efs);
+			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+					   (unsigned long) &efs);
 			break;
 		}
 	}
@@ -3235,10 +3454,10 @@
 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
 				chan->local_msdu = le16_to_cpu(efs.msdu);
 				chan->local_sdu_itime =
-						le32_to_cpu(efs.sdu_itime);
+					le32_to_cpu(efs.sdu_itime);
 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
 				chan->local_flush_to =
-						le32_to_cpu(efs.flush_to);
+					le32_to_cpu(efs.flush_to);
 			}
 			break;
 
@@ -3253,7 +3472,8 @@
 	return ptr - data;
 }
 
-static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
+static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
+				u16 result, u16 flags)
 {
 	struct l2cap_conf_rsp *rsp = data;
 	void *ptr = rsp->data;
@@ -3272,19 +3492,27 @@
 	struct l2cap_conn_rsp rsp;
 	struct l2cap_conn *conn = chan->conn;
 	u8 buf[128];
+	u8 rsp_code;
 
 	rsp.scid   = cpu_to_le16(chan->dcid);
 	rsp.dcid   = cpu_to_le16(chan->scid);
 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
-	l2cap_send_cmd(conn, chan->ident,
-				L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+
+	if (chan->hs_hcon)
+		rsp_code = L2CAP_CREATE_CHAN_RSP;
+	else
+		rsp_code = L2CAP_CONN_RSP;
+
+	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
+
+	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
 
 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
 		return;
 
 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-			l2cap_build_conf_req(chan, buf), buf);
+		       l2cap_build_conf_req(chan, buf), buf);
 	chan->num_conf_req++;
 }
 
@@ -3339,7 +3567,8 @@
 	}
 }
 
-static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static inline int l2cap_command_rej(struct l2cap_conn *conn,
+				    struct l2cap_cmd_hdr *cmd, u8 *data)
 {
 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
 
@@ -3347,7 +3576,7 @@
 		return 0;
 
 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
-					cmd->ident == conn->info_ident) {
+	    cmd->ident == conn->info_ident) {
 		cancel_delayed_work(&conn->info_timer);
 
 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
@@ -3359,7 +3588,9 @@
 	return 0;
 }
 
-static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
+					struct l2cap_cmd_hdr *cmd,
+					u8 *data, u8 rsp_code, u8 amp_id)
 {
 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
 	struct l2cap_conn_rsp rsp;
@@ -3386,7 +3617,7 @@
 
 	/* Check if the ACL is secure enough (if not SDP) */
 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
-				!hci_conn_check_link_mode(conn->hcon)) {
+	    !hci_conn_check_link_mode(conn->hcon)) {
 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
 		result = L2CAP_CR_SEC_BLOCK;
 		goto response;
@@ -3410,8 +3641,7 @@
 	bacpy(&bt_sk(sk)->dst, conn->dst);
 	chan->psm  = psm;
 	chan->dcid = scid;
-
-	bt_accept_enqueue(parent, sk);
+	chan->local_amp_id = amp_id;
 
 	__l2cap_chan_add(conn, chan);
 
@@ -3427,10 +3657,19 @@
 				__l2cap_state_change(chan, BT_CONNECT2);
 				result = L2CAP_CR_PEND;
 				status = L2CAP_CS_AUTHOR_PEND;
-				parent->sk_data_ready(parent, 0);
+				chan->ops->defer(chan);
 			} else {
-				__l2cap_state_change(chan, BT_CONFIG);
-				result = L2CAP_CR_SUCCESS;
+				/* Force pending result for AMP controllers.
+				 * The connection will succeed after the
+				 * physical link is up.
+				 */
+				if (amp_id) {
+					__l2cap_state_change(chan, BT_CONNECT2);
+					result = L2CAP_CR_PEND;
+				} else {
+					__l2cap_state_change(chan, BT_CONFIG);
+					result = L2CAP_CR_SUCCESS;
+				}
 				status = L2CAP_CS_NO_INFO;
 			}
 		} else {
@@ -3453,7 +3692,7 @@
 	rsp.dcid   = cpu_to_le16(dcid);
 	rsp.result = cpu_to_le16(result);
 	rsp.status = cpu_to_le16(status);
-	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
 
 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
 		struct l2cap_info_req info;
@@ -3464,23 +3703,31 @@
 
 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
 
-		l2cap_send_cmd(conn, conn->info_ident,
-					L2CAP_INFO_REQ, sizeof(info), &info);
+		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
+			       sizeof(info), &info);
 	}
 
 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
-				result == L2CAP_CR_SUCCESS) {
+	    result == L2CAP_CR_SUCCESS) {
 		u8 buf[128];
 		set_bit(CONF_REQ_SENT, &chan->conf_state);
 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-					l2cap_build_conf_req(chan, buf), buf);
+			       l2cap_build_conf_req(chan, buf), buf);
 		chan->num_conf_req++;
 	}
 
+	return chan;
+}
+
+static int l2cap_connect_req(struct l2cap_conn *conn,
+			     struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
 	return 0;
 }
 
-static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+				    struct l2cap_cmd_hdr *cmd, u8 *data)
 {
 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
 	u16 scid, dcid, result, status;
@@ -3494,7 +3741,7 @@
 	status = __le16_to_cpu(rsp->status);
 
 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
-						dcid, scid, result, status);
+	       dcid, scid, result, status);
 
 	mutex_lock(&conn->chan_lock);
 
@@ -3527,7 +3774,7 @@
 			break;
 
 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-					l2cap_build_conf_req(chan, req), req);
+			       l2cap_build_conf_req(chan, req), req);
 		chan->num_conf_req++;
 		break;
 
@@ -3559,7 +3806,25 @@
 		chan->fcs = L2CAP_FCS_CRC16;
 }
 
-static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
+				    u8 ident, u16 flags)
+{
+	struct l2cap_conn *conn = chan->conn;
+
+	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
+	       flags);
+
+	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
+		       l2cap_build_conf_rsp(chan, data,
+					    L2CAP_CONF_SUCCESS, flags), data);
+}
+
+static inline int l2cap_config_req(struct l2cap_conn *conn,
+				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+				   u8 *data)
 {
 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
 	u16 dcid, flags;
@@ -3584,7 +3849,7 @@
 		rej.dcid = cpu_to_le16(chan->dcid);
 
 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
-				sizeof(rej), &rej);
+			       sizeof(rej), &rej);
 		goto unlock;
 	}
 
@@ -3592,8 +3857,8 @@
 	len = cmd_len - sizeof(*req);
 	if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
-				l2cap_build_conf_rsp(chan, rsp,
-					L2CAP_CONF_REJECT, flags), rsp);
+			       l2cap_build_conf_rsp(chan, rsp,
+			       L2CAP_CONF_REJECT, flags), rsp);
 		goto unlock;
 	}
 
@@ -3604,8 +3869,8 @@
 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
 		/* Incomplete config. Send empty response. */
 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
-				l2cap_build_conf_rsp(chan, rsp,
-					L2CAP_CONF_SUCCESS, flags), rsp);
+			       l2cap_build_conf_rsp(chan, rsp,
+			       L2CAP_CONF_SUCCESS, flags), rsp);
 		goto unlock;
 	}
 
@@ -3616,6 +3881,7 @@
 		goto unlock;
 	}
 
+	chan->ident = cmd->ident;
 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
 	chan->num_conf_rsp++;
 
@@ -3643,23 +3909,22 @@
 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
 		u8 buf[64];
 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-					l2cap_build_conf_req(chan, buf), buf);
+			       l2cap_build_conf_req(chan, buf), buf);
 		chan->num_conf_req++;
 	}
 
 	/* Got Conf Rsp PENDING from remote side and asume we sent
 	   Conf Rsp PENDING in the code above */
 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
-			test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
 
 		/* check compatibility */
 
-		clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
-		set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
-
-		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
-					l2cap_build_conf_rsp(chan, rsp,
-					L2CAP_CONF_SUCCESS, flags), rsp);
+		/* Send rsp for BR/EDR channel */
+		if (!chan->hs_hcon)
+			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
+		else
+			chan->ident = cmd->ident;
 	}
 
 unlock:
@@ -3667,7 +3932,8 @@
 	return err;
 }
 
-static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+				   struct l2cap_cmd_hdr *cmd, u8 *data)
 {
 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
 	u16 scid, flags, result;
@@ -3699,20 +3965,21 @@
 			char buf[64];
 
 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
-								buf, &result);
+						   buf, &result);
 			if (len < 0) {
 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
 				goto done;
 			}
 
-			/* check compatibility */
-
-			clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
-			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
-
-			l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
-						l2cap_build_conf_rsp(chan, buf,
-						L2CAP_CONF_SUCCESS, 0x0000), buf);
+			if (!chan->hs_hcon) {
+				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
+							0);
+			} else {
+				if (l2cap_check_efs(chan)) {
+					amp_create_logical_link(chan);
+					chan->ident = cmd->ident;
+				}
+			}
 		}
 		goto done;
 
@@ -3728,14 +3995,14 @@
 			/* throw out any old stored conf requests */
 			result = L2CAP_CONF_SUCCESS;
 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
-								req, &result);
+						   req, &result);
 			if (len < 0) {
 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
 				goto done;
 			}
 
 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
-						L2CAP_CONF_REQ, len, req);
+				       L2CAP_CONF_REQ, len, req);
 			chan->num_conf_req++;
 			if (result != L2CAP_CONF_SUCCESS)
 				goto done;
@@ -3773,7 +4040,8 @@
 	return err;
 }
 
-static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+				       struct l2cap_cmd_hdr *cmd, u8 *data)
 {
 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
 	struct l2cap_disconn_rsp rsp;
@@ -3819,7 +4087,8 @@
 	return 0;
 }
 
-static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+				       struct l2cap_cmd_hdr *cmd, u8 *data)
 {
 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
 	u16 dcid, scid;
@@ -3853,7 +4122,8 @@
 	return 0;
 }
 
-static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static inline int l2cap_information_req(struct l2cap_conn *conn,
+					struct l2cap_cmd_hdr *cmd, u8 *data)
 {
 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
 	u16 type;
@@ -3870,14 +4140,14 @@
 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
 		if (!disable_ertm)
 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
-							 | L2CAP_FEAT_FCS;
+				| L2CAP_FEAT_FCS;
 		if (enable_hs)
 			feat_mask |= L2CAP_FEAT_EXT_FLOW
-						| L2CAP_FEAT_EXT_WINDOW;
+				| L2CAP_FEAT_EXT_WINDOW;
 
 		put_unaligned_le32(feat_mask, rsp->data);
-		l2cap_send_cmd(conn, cmd->ident,
-					L2CAP_INFO_RSP, sizeof(buf), buf);
+		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
+			       buf);
 	} else if (type == L2CAP_IT_FIXED_CHAN) {
 		u8 buf[12];
 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
@@ -3890,20 +4160,21 @@
 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
-		l2cap_send_cmd(conn, cmd->ident,
-					L2CAP_INFO_RSP, sizeof(buf), buf);
+		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
+			       buf);
 	} else {
 		struct l2cap_info_rsp rsp;
 		rsp.type   = cpu_to_le16(type);
 		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
-		l2cap_send_cmd(conn, cmd->ident,
-					L2CAP_INFO_RSP, sizeof(rsp), &rsp);
+		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
+			       &rsp);
 	}
 
 	return 0;
 }
 
-static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static inline int l2cap_information_rsp(struct l2cap_conn *conn,
+					struct l2cap_cmd_hdr *cmd, u8 *data)
 {
 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
 	u16 type, result;
@@ -3915,7 +4186,7 @@
 
 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
 	if (cmd->ident != conn->info_ident ||
-			conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
+	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
 		return 0;
 
 	cancel_delayed_work(&conn->info_timer);
@@ -3940,7 +4211,7 @@
 			conn->info_ident = l2cap_get_ident(conn);
 
 			l2cap_send_cmd(conn, conn->info_ident,
-					L2CAP_INFO_REQ, sizeof(req), &req);
+				       L2CAP_INFO_REQ, sizeof(req), &req);
 		} else {
 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
 			conn->info_ident = 0;
@@ -3961,12 +4232,14 @@
 	return 0;
 }
 
-static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
-					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
-					void *data)
+static int l2cap_create_channel_req(struct l2cap_conn *conn,
+				    struct l2cap_cmd_hdr *cmd,
+				    u16 cmd_len, void *data)
 {
 	struct l2cap_create_chan_req *req = data;
 	struct l2cap_create_chan_rsp rsp;
+	struct l2cap_chan *chan;
+	struct hci_dev *hdev;
 	u16 psm, scid;
 
 	if (cmd_len != sizeof(*req))
@@ -3980,56 +4253,119 @@
 
 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
 
-	/* Placeholder: Always reject */
+	/* For controller id 0 make BR/EDR connection */
+	if (req->amp_id == HCI_BREDR_ID) {
+		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
+			      req->amp_id);
+		return 0;
+	}
+
+	/* Validate AMP controller id */
+	hdev = hci_dev_get(req->amp_id);
+	if (!hdev)
+		goto error;
+
+	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
+		hci_dev_put(hdev);
+		goto error;
+	}
+
+	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
+			     req->amp_id);
+	if (chan) {
+		struct amp_mgr *mgr = conn->hcon->amp_mgr;
+		struct hci_conn *hs_hcon;
+
+		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
+		if (!hs_hcon) {
+			hci_dev_put(hdev);
+			return -EFAULT;
+		}
+
+		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
+
+		mgr->bredr_chan = chan;
+		chan->hs_hcon = hs_hcon;
+		chan->fcs = L2CAP_FCS_NONE;
+		conn->mtu = hdev->block_mtu;
+	}
+
+	hci_dev_put(hdev);
+
+	return 0;
+
+error:
 	rsp.dcid = 0;
 	rsp.scid = cpu_to_le16(scid);
-	rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
+	rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
 
 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
 		       sizeof(rsp), &rsp);
 
-	return 0;
+	return -EFAULT;
 }
 
-static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
-					struct l2cap_cmd_hdr *cmd, void *data)
+static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
 {
-	BT_DBG("conn %p", conn);
+	struct l2cap_move_chan_req req;
+	u8 ident;
 
-	return l2cap_connect_rsp(conn, cmd, data);
+	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
+
+	ident = l2cap_get_ident(chan->conn);
+	chan->ident = ident;
+
+	req.icid = cpu_to_le16(chan->scid);
+	req.dest_amp_id = dest_amp_id;
+
+	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
+		       &req);
+
+	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
 }
 
-static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
-				     u16 icid, u16 result)
+static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
 {
 	struct l2cap_move_chan_rsp rsp;
 
-	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
+	BT_DBG("chan %p, result 0x%4.4x", chan, result);
 
-	rsp.icid = cpu_to_le16(icid);
+	rsp.icid = cpu_to_le16(chan->dcid);
 	rsp.result = cpu_to_le16(result);
 
-	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
+	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
+		       sizeof(rsp), &rsp);
 }
 
-static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
-				     struct l2cap_chan *chan,
-				     u16 icid, u16 result)
+static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
 {
 	struct l2cap_move_chan_cfm cfm;
-	u8 ident;
 
-	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
+	BT_DBG("chan %p, result 0x%4.4x", chan, result);
 
-	ident = l2cap_get_ident(conn);
-	if (chan)
-		chan->ident = ident;
+	chan->ident = l2cap_get_ident(chan->conn);
 
-	cfm.icid = cpu_to_le16(icid);
+	cfm.icid = cpu_to_le16(chan->scid);
 	cfm.result = cpu_to_le16(result);
 
-	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
+	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
+		       sizeof(cfm), &cfm);
+
+	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
+}
+
+static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
+{
+	struct l2cap_move_chan_cfm cfm;
+
+	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
+
+	cfm.icid = cpu_to_le16(icid);
+	cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
+
+	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
+		       sizeof(cfm), &cfm);
 }
 
 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
@@ -4043,11 +4379,289 @@
 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
 }
 
+static void __release_logical_link(struct l2cap_chan *chan)
+{
+	chan->hs_hchan = NULL;
+	chan->hs_hcon = NULL;
+
+	/* Placeholder - release the logical link */
+}
+
+static void l2cap_logical_fail(struct l2cap_chan *chan)
+{
+	/* Logical link setup failed */
+	if (chan->state != BT_CONNECTED) {
+		/* Create channel failure, disconnect */
+		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+		return;
+	}
+
+	switch (chan->move_role) {
+	case L2CAP_MOVE_ROLE_RESPONDER:
+		l2cap_move_done(chan);
+		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
+		break;
+	case L2CAP_MOVE_ROLE_INITIATOR:
+		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
+		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
+			/* Remote has only sent pending or
+			 * success responses, clean up
+			 */
+			l2cap_move_done(chan);
+		}
+
+		/* Other amp move states imply that the move
+		 * has already aborted
+		 */
+		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+		break;
+	}
+}
+
+static void l2cap_logical_finish_create(struct l2cap_chan *chan,
+					struct hci_chan *hchan)
+{
+	struct l2cap_conf_rsp rsp;
+
+	chan->hs_hchan = hchan;
+	chan->hs_hcon->l2cap_data = chan->conn;
+
+	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
+
+	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
+		int err;
+
+		set_default_fcs(chan);
+
+		err = l2cap_ertm_init(chan);
+		if (err < 0)
+			l2cap_send_disconn_req(chan->conn, chan, -err);
+		else
+			l2cap_chan_ready(chan);
+	}
+}
+
+static void l2cap_logical_finish_move(struct l2cap_chan *chan,
+				      struct hci_chan *hchan)
+{
+	chan->hs_hcon = hchan->conn;
+	chan->hs_hcon->l2cap_data = chan->conn;
+
+	BT_DBG("move_state %d", chan->move_state);
+
+	switch (chan->move_state) {
+	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
+		/* Move confirm will be sent after a success
+		 * response is received
+		 */
+		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
+		break;
+	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
+		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
+		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
+			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
+			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
+		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
+			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
+			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
+		}
+		break;
+	default:
+		/* Move was not in expected state, free the channel */
+		__release_logical_link(chan);
+
+		chan->move_state = L2CAP_MOVE_STABLE;
+	}
+}
+
+/* Call with chan locked */
+void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
+		       u8 status)
+{
+	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
+
+	if (status) {
+		l2cap_logical_fail(chan);
+		__release_logical_link(chan);
+		return;
+	}
+
+	if (chan->state != BT_CONNECTED) {
+		/* Ignore logical link if channel is on BR/EDR */
+		if (chan->local_amp_id)
+			l2cap_logical_finish_create(chan, hchan);
+	} else {
+		l2cap_logical_finish_move(chan, hchan);
+	}
+}
+
+void l2cap_move_start(struct l2cap_chan *chan)
+{
+	BT_DBG("chan %p", chan);
+
+	if (chan->local_amp_id == HCI_BREDR_ID) {
+		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
+			return;
+		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
+		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
+		/* Placeholder - start physical link setup */
+	} else {
+		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
+		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
+		chan->move_id = 0;
+		l2cap_move_setup(chan);
+		l2cap_send_move_chan_req(chan, 0);
+	}
+}
+
+static void l2cap_do_create(struct l2cap_chan *chan, int result,
+			    u8 local_amp_id, u8 remote_amp_id)
+{
+	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
+	       local_amp_id, remote_amp_id);
+
+	chan->fcs = L2CAP_FCS_NONE;
+
+	/* Outgoing channel on AMP */
+	if (chan->state == BT_CONNECT) {
+		if (result == L2CAP_CR_SUCCESS) {
+			chan->local_amp_id = local_amp_id;
+			l2cap_send_create_chan_req(chan, remote_amp_id);
+		} else {
+			/* Revert to BR/EDR connect */
+			l2cap_send_conn_req(chan);
+		}
+
+		return;
+	}
+
+	/* Incoming channel on AMP */
+	if (__l2cap_no_conn_pending(chan)) {
+		struct l2cap_conn_rsp rsp;
+		char buf[128];
+		rsp.scid = cpu_to_le16(chan->dcid);
+		rsp.dcid = cpu_to_le16(chan->scid);
+
+		if (result == L2CAP_CR_SUCCESS) {
+			/* Send successful response */
+			rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
+			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+		} else {
+			/* Send negative response */
+			rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
+			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+		}
+
+		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
+			       sizeof(rsp), &rsp);
+
+		if (result == L2CAP_CR_SUCCESS) {
+			__l2cap_state_change(chan, BT_CONFIG);
+			set_bit(CONF_REQ_SENT, &chan->conf_state);
+			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
+				       L2CAP_CONF_REQ,
+				       l2cap_build_conf_req(chan, buf), buf);
+			chan->num_conf_req++;
+		}
+	}
+}
+
+static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
+				   u8 remote_amp_id)
+{
+	l2cap_move_setup(chan);
+	chan->move_id = local_amp_id;
+	chan->move_state = L2CAP_MOVE_WAIT_RSP;
+
+	l2cap_send_move_chan_req(chan, remote_amp_id);
+}
+
+static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
+{
+	struct hci_chan *hchan = NULL;
+
+	/* Placeholder - get hci_chan for logical link */
+
+	if (hchan) {
+		if (hchan->state == BT_CONNECTED) {
+			/* Logical link is ready to go */
+			chan->hs_hcon = hchan->conn;
+			chan->hs_hcon->l2cap_data = chan->conn;
+			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
+			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
+
+			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
+		} else {
+			/* Wait for logical link to be ready */
+			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
+		}
+	} else {
+		/* Logical link not available */
+		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
+	}
+}
+
+static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
+{
+	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
+		u8 rsp_result;
+		if (result == -EINVAL)
+			rsp_result = L2CAP_MR_BAD_ID;
+		else
+			rsp_result = L2CAP_MR_NOT_ALLOWED;
+
+		l2cap_send_move_chan_rsp(chan, rsp_result);
+	}
+
+	chan->move_role = L2CAP_MOVE_ROLE_NONE;
+	chan->move_state = L2CAP_MOVE_STABLE;
+
+	/* Restart data transmission */
+	l2cap_ertm_send(chan);
+}
+
+/* Invoke with locked chan */
+void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
+{
+	u8 local_amp_id = chan->local_amp_id;
+	u8 remote_amp_id = chan->remote_amp_id;
+
+	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
+	       chan, result, local_amp_id, remote_amp_id);
+
+	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
+		l2cap_chan_unlock(chan);
+		return;
+	}
+
+	if (chan->state != BT_CONNECTED) {
+		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
+	} else if (result != L2CAP_MR_SUCCESS) {
+		l2cap_do_move_cancel(chan, result);
+	} else {
+		switch (chan->move_role) {
+		case L2CAP_MOVE_ROLE_INITIATOR:
+			l2cap_do_move_initiate(chan, local_amp_id,
+					       remote_amp_id);
+			break;
+		case L2CAP_MOVE_ROLE_RESPONDER:
+			l2cap_do_move_respond(chan, result);
+			break;
+		default:
+			l2cap_do_move_cancel(chan, result);
+			break;
+		}
+	}
+}
+
 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
 					 struct l2cap_cmd_hdr *cmd,
 					 u16 cmd_len, void *data)
 {
 	struct l2cap_move_chan_req *req = data;
+	struct l2cap_move_chan_rsp rsp;
+	struct l2cap_chan *chan;
 	u16 icid = 0;
 	u16 result = L2CAP_MR_NOT_ALLOWED;
 
@@ -4061,15 +4675,206 @@
 	if (!enable_hs)
 		return -EINVAL;
 
-	/* Placeholder: Always refuse */
-	l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
+	chan = l2cap_get_chan_by_dcid(conn, icid);
+	if (!chan) {
+		rsp.icid = cpu_to_le16(icid);
+		rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
+		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
+			       sizeof(rsp), &rsp);
+		return 0;
+	}
+
+	chan->ident = cmd->ident;
+
+	if (chan->scid < L2CAP_CID_DYN_START ||
+	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
+	    (chan->mode != L2CAP_MODE_ERTM &&
+	     chan->mode != L2CAP_MODE_STREAMING)) {
+		result = L2CAP_MR_NOT_ALLOWED;
+		goto send_move_response;
+	}
+
+	if (chan->local_amp_id == req->dest_amp_id) {
+		result = L2CAP_MR_SAME_ID;
+		goto send_move_response;
+	}
+
+	if (req->dest_amp_id) {
+		struct hci_dev *hdev;
+		hdev = hci_dev_get(req->dest_amp_id);
+		if (!hdev || hdev->dev_type != HCI_AMP ||
+		    !test_bit(HCI_UP, &hdev->flags)) {
+			if (hdev)
+				hci_dev_put(hdev);
+
+			result = L2CAP_MR_BAD_ID;
+			goto send_move_response;
+		}
+		hci_dev_put(hdev);
+	}
+
+	/* Detect a move collision.  Only send a collision response
+	 * if this side has "lost", otherwise proceed with the move.
+	 * The winner has the larger bd_addr.
+	 */
+	if ((__chan_is_moving(chan) ||
+	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
+	    bacmp(conn->src, conn->dst) > 0) {
+		result = L2CAP_MR_COLLISION;
+		goto send_move_response;
+	}
+
+	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
+	l2cap_move_setup(chan);
+	chan->move_id = req->dest_amp_id;
+	icid = chan->dcid;
+
+	if (!req->dest_amp_id) {
+		/* Moving to BR/EDR */
+		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
+			result = L2CAP_MR_PEND;
+		} else {
+			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
+			result = L2CAP_MR_SUCCESS;
+		}
+	} else {
+		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
+		/* Placeholder - uncomment when amp functions are available */
+		/*amp_accept_physical(chan, req->dest_amp_id);*/
+		result = L2CAP_MR_PEND;
+	}
+
+send_move_response:
+	l2cap_send_move_chan_rsp(chan, result);
+
+	l2cap_chan_unlock(chan);
 
 	return 0;
 }
 
-static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
-					 struct l2cap_cmd_hdr *cmd,
-					 u16 cmd_len, void *data)
+static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
+{
+	struct l2cap_chan *chan;
+	struct hci_chan *hchan = NULL;
+
+	chan = l2cap_get_chan_by_scid(conn, icid);
+	if (!chan) {
+		l2cap_send_move_chan_cfm_icid(conn, icid);
+		return;
+	}
+
+	__clear_chan_timer(chan);
+	if (result == L2CAP_MR_PEND)
+		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
+
+	switch (chan->move_state) {
+	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
+		/* Move confirm will be sent when logical link
+		 * is complete.
+		 */
+		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
+		break;
+	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
+		if (result == L2CAP_MR_PEND) {
+			break;
+		} else if (test_bit(CONN_LOCAL_BUSY,
+				    &chan->conn_state)) {
+			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
+		} else {
+			/* Logical link is up or moving to BR/EDR,
+			 * proceed with move
+			 */
+			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
+			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
+		}
+		break;
+	case L2CAP_MOVE_WAIT_RSP:
+		/* Moving to AMP */
+		if (result == L2CAP_MR_SUCCESS) {
+			/* Remote is ready, send confirm immediately
+			 * after logical link is ready
+			 */
+			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
+		} else {
+			/* Both logical link and move success
+			 * are required to confirm
+			 */
+			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
+		}
+
+		/* Placeholder - get hci_chan for logical link */
+		if (!hchan) {
+			/* Logical link not available */
+			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+			break;
+		}
+
+		/* If the logical link is not yet connected, do not
+		 * send confirmation.
+		 */
+		if (hchan->state != BT_CONNECTED)
+			break;
+
+		/* Logical link is already ready to go */
+
+		chan->hs_hcon = hchan->conn;
+		chan->hs_hcon->l2cap_data = chan->conn;
+
+		if (result == L2CAP_MR_SUCCESS) {
+			/* Can confirm now */
+			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
+		} else {
+			/* Now only need move success
+			 * to confirm
+			 */
+			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
+		}
+
+		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
+		break;
+	default:
+		/* Any other amp move state means the move failed. */
+		chan->move_id = chan->local_amp_id;
+		l2cap_move_done(chan);
+		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+	}
+
+	l2cap_chan_unlock(chan);
+}
+
+static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
+			    u16 result)
+{
+	struct l2cap_chan *chan;
+
+	chan = l2cap_get_chan_by_ident(conn, ident);
+	if (!chan) {
+		/* Could not locate channel, icid is best guess */
+		l2cap_send_move_chan_cfm_icid(conn, icid);
+		return;
+	}
+
+	__clear_chan_timer(chan);
+
+	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
+		if (result == L2CAP_MR_COLLISION) {
+			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
+		} else {
+			/* Cleanup - cancel move */
+			chan->move_id = chan->local_amp_id;
+			l2cap_move_done(chan);
+		}
+	}
+
+	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+
+	l2cap_chan_unlock(chan);
+}
+
+static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
+				  struct l2cap_cmd_hdr *cmd,
+				  u16 cmd_len, void *data)
 {
 	struct l2cap_move_chan_rsp *rsp = data;
 	u16 icid, result;
@@ -4082,17 +4887,20 @@
 
 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
 
-	/* Placeholder: Always unconfirmed */
-	l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
+	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
+		l2cap_move_continue(conn, icid, result);
+	else
+		l2cap_move_fail(conn, cmd->ident, icid, result);
 
 	return 0;
 }
 
-static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
-					     struct l2cap_cmd_hdr *cmd,
-					     u16 cmd_len, void *data)
+static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
+				      struct l2cap_cmd_hdr *cmd,
+				      u16 cmd_len, void *data)
 {
 	struct l2cap_move_chan_cfm *cfm = data;
+	struct l2cap_chan *chan;
 	u16 icid, result;
 
 	if (cmd_len != sizeof(*cfm))
@@ -4103,8 +4911,29 @@
 
 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
 
+	chan = l2cap_get_chan_by_dcid(conn, icid);
+	if (!chan) {
+		/* Spec requires a response even if the icid was not found */
+		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+		return 0;
+	}
+
+	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
+		if (result == L2CAP_MC_CONFIRMED) {
+			chan->local_amp_id = chan->move_id;
+			if (!chan->local_amp_id)
+				__release_logical_link(chan);
+		} else {
+			chan->move_id = chan->local_amp_id;
+		}
+
+		l2cap_move_done(chan);
+	}
+
 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
 
+	l2cap_chan_unlock(chan);
+
 	return 0;
 }
 
@@ -4113,6 +4942,7 @@
 						 u16 cmd_len, void *data)
 {
 	struct l2cap_move_chan_cfm_rsp *rsp = data;
+	struct l2cap_chan *chan;
 	u16 icid;
 
 	if (cmd_len != sizeof(*rsp))
@@ -4122,11 +4952,28 @@
 
 	BT_DBG("icid 0x%4.4x", icid);
 
+	chan = l2cap_get_chan_by_scid(conn, icid);
+	if (!chan)
+		return 0;
+
+	__clear_chan_timer(chan);
+
+	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
+		chan->local_amp_id = chan->move_id;
+
+		if (!chan->local_amp_id && chan->hs_hchan)
+			__release_logical_link(chan);
+
+		l2cap_move_done(chan);
+	}
+
+	l2cap_chan_unlock(chan);
+
 	return 0;
 }
 
 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
-							u16 to_multiplier)
+					 u16 to_multiplier)
 {
 	u16 max_latency;
 
@@ -4147,7 +4994,8 @@
 }
 
 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
-					struct l2cap_cmd_hdr *cmd, u8 *data)
+					      struct l2cap_cmd_hdr *cmd,
+					      u8 *data)
 {
 	struct hci_conn *hcon = conn->hcon;
 	struct l2cap_conn_param_update_req *req;
@@ -4169,7 +5017,7 @@
 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
 
 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
-						min, max, latency, to_multiplier);
+	       min, max, latency, to_multiplier);
 
 	memset(&rsp, 0, sizeof(rsp));
 
@@ -4180,7 +5028,7 @@
 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
 
 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
-							sizeof(rsp), &rsp);
+		       sizeof(rsp), &rsp);
 
 	if (!err)
 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
@@ -4189,7 +5037,8 @@
 }
 
 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
-			struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+				      u8 *data)
 {
 	int err = 0;
 
@@ -4203,7 +5052,8 @@
 		break;
 
 	case L2CAP_CONN_RSP:
-		err = l2cap_connect_rsp(conn, cmd, data);
+	case L2CAP_CREATE_CHAN_RSP:
+		err = l2cap_connect_create_rsp(conn, cmd, data);
 		break;
 
 	case L2CAP_CONF_REQ:
@@ -4241,10 +5091,6 @@
 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
 		break;
 
-	case L2CAP_CREATE_CHAN_RSP:
-		err = l2cap_create_channel_rsp(conn, cmd, data);
-		break;
-
 	case L2CAP_MOVE_CHAN_REQ:
 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
 		break;
@@ -4271,7 +5117,7 @@
 }
 
 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
-					struct l2cap_cmd_hdr *cmd, u8 *data)
+				   struct l2cap_cmd_hdr *cmd, u8 *data)
 {
 	switch (cmd->code) {
 	case L2CAP_COMMAND_REJ:
@@ -4290,7 +5136,7 @@
 }
 
 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
-							struct sk_buff *skb)
+				     struct sk_buff *skb)
 {
 	u8 *data = skb->data;
 	int len = skb->len;
@@ -4307,7 +5153,8 @@
 
 		cmd_len = le16_to_cpu(cmd.len);
 
-		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
+		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
+		       cmd.ident);
 
 		if (cmd_len > len || !cmd.ident) {
 			BT_DBG("corrupted command");
@@ -4326,7 +5173,8 @@
 
 			/* FIXME: Map err to a valid reason */
 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
-			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
+			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
+				       sizeof(rej), &rej);
 		}
 
 		data += cmd_len;
@@ -4391,8 +5239,8 @@
 	}
 }
 
-static void append_skb_frag(struct sk_buff *skb,
-			struct sk_buff *new_frag, struct sk_buff **last_frag)
+static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
+			    struct sk_buff **last_frag)
 {
 	/* skb->len reflects data in skb as well as all fragments
 	 * skb->data_len reflects only data in fragments
@@ -4492,6 +5340,12 @@
 	return err;
 }
 
+static int l2cap_resegment(struct l2cap_chan *chan)
+{
+	/* Placeholder */
+	return 0;
+}
+
 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
 {
 	u8 event;
@@ -4641,7 +5495,7 @@
 
 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
-								chan->tx_win) {
+		    chan->tx_win) {
 			/* See notes below regarding "double poll" and
 			 * invalid packets.
 			 */
@@ -4682,8 +5536,7 @@
 	}
 
 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
-		__seq_offset(chan, chan->expected_tx_seq,
-			     chan->last_acked_seq)){
+	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
 		return L2CAP_TXSEQ_DUPLICATE;
 	}
@@ -4808,8 +5661,8 @@
 		if (control->final) {
 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 
-			if (!test_and_clear_bit(CONN_REJ_ACT,
-						&chan->conn_state)) {
+			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
+			    !__chan_is_moving(chan)) {
 				control->final = 0;
 				l2cap_retransmit_all(chan, control);
 			}
@@ -4998,6 +5851,96 @@
 	return err;
 }
 
+static int l2cap_finish_move(struct l2cap_chan *chan)
+{
+	BT_DBG("chan %p", chan);
+
+	chan->rx_state = L2CAP_RX_STATE_RECV;
+
+	if (chan->hs_hcon)
+		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
+	else
+		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+
+	return l2cap_resegment(chan);
+}
+
+static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
+				 struct l2cap_ctrl *control,
+				 struct sk_buff *skb, u8 event)
+{
+	int err;
+
+	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+	       event);
+
+	if (!control->poll)
+		return -EPROTO;
+
+	l2cap_process_reqseq(chan, control->reqseq);
+
+	if (!skb_queue_empty(&chan->tx_q))
+		chan->tx_send_head = skb_peek(&chan->tx_q);
+	else
+		chan->tx_send_head = NULL;
+
+	/* Rewind next_tx_seq to the point expected
+	 * by the receiver.
+	 */
+	chan->next_tx_seq = control->reqseq;
+	chan->unacked_frames = 0;
+
+	err = l2cap_finish_move(chan);
+	if (err)
+		return err;
+
+	set_bit(CONN_SEND_FBIT, &chan->conn_state);
+	l2cap_send_i_or_rr_or_rnr(chan);
+
+	if (event == L2CAP_EV_RECV_IFRAME)
+		return -EPROTO;
+
+	return l2cap_rx_state_recv(chan, control, NULL, event);
+}
+
+static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
+				 struct l2cap_ctrl *control,
+				 struct sk_buff *skb, u8 event)
+{
+	int err;
+
+	if (!control->final)
+		return -EPROTO;
+
+	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+
+	chan->rx_state = L2CAP_RX_STATE_RECV;
+	l2cap_process_reqseq(chan, control->reqseq);
+
+	if (!skb_queue_empty(&chan->tx_q))
+		chan->tx_send_head = skb_peek(&chan->tx_q);
+	else
+		chan->tx_send_head = NULL;
+
+	/* Rewind next_tx_seq to the point expected
+	 * by the receiver.
+	 */
+	chan->next_tx_seq = control->reqseq;
+	chan->unacked_frames = 0;
+
+	if (chan->hs_hcon)
+		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
+	else
+		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+
+	err = l2cap_resegment(chan);
+
+	if (!err)
+		err = l2cap_rx_state_recv(chan, control, skb, event);
+
+	return err;
+}
+
 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
 {
 	/* Make sure reqseq is for a packet that has been sent but not acked */
@@ -5024,6 +5967,12 @@
 			err = l2cap_rx_state_srej_sent(chan, control, skb,
 						       event);
 			break;
+		case L2CAP_RX_STATE_WAIT_P:
+			err = l2cap_rx_state_wait_p(chan, control, skb, event);
+			break;
+		case L2CAP_RX_STATE_WAIT_F:
+			err = l2cap_rx_state_wait_f(chan, control, skb, event);
+			break;
 		default:
 			/* shut it down */
 			break;
@@ -5143,7 +6092,7 @@
 		       control->super);
 
 		if (len != 0) {
-			BT_ERR("%d", len);
+			BT_ERR("Trailing bytes: %d in sframe", len);
 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
 			goto drop;
 		}
@@ -5323,7 +6272,7 @@
 	int exact = 0, lm1 = 0, lm2 = 0;
 	struct l2cap_chan *c;
 
-	BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
+	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
 
 	/* Find listening sockets and check their link_mode */
 	read_lock(&chan_list_lock);
@@ -5353,15 +6302,15 @@
 {
 	struct l2cap_conn *conn;
 
-	BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
+	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
 
 	if (!status) {
 		conn = l2cap_conn_add(hcon, status);
 		if (conn)
 			l2cap_conn_ready(conn);
-	} else
+	} else {
 		l2cap_conn_del(hcon, bt_to_errno(status));
-
+	}
 }
 
 int l2cap_disconn_ind(struct hci_conn *hcon)
@@ -5437,13 +6386,13 @@
 			continue;
 		}
 
-		if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
+		if (!__l2cap_no_conn_pending(chan)) {
 			l2cap_chan_unlock(chan);
 			continue;
 		}
 
 		if (!status && (chan->state == BT_CONNECTED ||
-						chan->state == BT_CONFIG)) {
+				chan->state == BT_CONFIG)) {
 			struct sock *sk = chan->sk;
 
 			clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
@@ -5456,7 +6405,7 @@
 
 		if (chan->state == BT_CONNECT) {
 			if (!status) {
-				l2cap_send_conn_req(chan);
+				l2cap_start_connection(chan);
 			} else {
 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
 			}
@@ -5470,11 +6419,9 @@
 			if (!status) {
 				if (test_bit(BT_SK_DEFER_SETUP,
 					     &bt_sk(sk)->flags)) {
-					struct sock *parent = bt_sk(sk)->parent;
 					res = L2CAP_CR_PEND;
 					stat = L2CAP_CS_AUTHOR_PEND;
-					if (parent)
-						parent->sk_data_ready(parent, 0);
+					chan->ops->defer(chan);
 				} else {
 					__l2cap_state_change(chan, BT_CONFIG);
 					res = L2CAP_CR_SUCCESS;
@@ -5494,7 +6441,7 @@
 			rsp.result = cpu_to_le16(res);
 			rsp.status = cpu_to_le16(stat);
 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
-							sizeof(rsp), &rsp);
+				       sizeof(rsp), &rsp);
 
 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
 			    res == L2CAP_CR_SUCCESS) {
@@ -5519,6 +6466,12 @@
 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
 {
 	struct l2cap_conn *conn = hcon->l2cap_data;
+	struct l2cap_hdr *hdr;
+	int len;
+
+	/* For AMP controller do not create l2cap conn */
+	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
+		goto drop;
 
 	if (!conn)
 		conn = l2cap_conn_add(hcon, 0);
@@ -5528,10 +6481,10 @@
 
 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
 
-	if (!(flags & ACL_CONT)) {
-		struct l2cap_hdr *hdr;
-		int len;
-
+	switch (flags) {
+	case ACL_START:
+	case ACL_START_NO_FLUSH:
+	case ACL_COMPLETE:
 		if (conn->rx_len) {
 			BT_ERR("Unexpected start frame (len %d)", skb->len);
 			kfree_skb(conn->rx_skb);
@@ -5560,20 +6513,22 @@
 
 		if (skb->len > len) {
 			BT_ERR("Frame is too long (len %d, expected len %d)",
-				skb->len, len);
+			       skb->len, len);
 			l2cap_conn_unreliable(conn, ECOMM);
 			goto drop;
 		}
 
 		/* Allocate skb for the complete frame (with header) */
-		conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
+		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
 		if (!conn->rx_skb)
 			goto drop;
 
 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
-								skb->len);
+					  skb->len);
 		conn->rx_len = len - skb->len;
-	} else {
+		break;
+
+	case ACL_CONT:
 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
 
 		if (!conn->rx_len) {
@@ -5584,7 +6539,7 @@
 
 		if (skb->len > conn->rx_len) {
 			BT_ERR("Fragment is too long (len %d, expected %d)",
-					skb->len, conn->rx_len);
+			       skb->len, conn->rx_len);
 			kfree_skb(conn->rx_skb);
 			conn->rx_skb = NULL;
 			conn->rx_len = 0;
@@ -5593,7 +6548,7 @@
 		}
 
 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
-								skb->len);
+					  skb->len);
 		conn->rx_len -= skb->len;
 
 		if (!conn->rx_len) {
@@ -5601,6 +6556,7 @@
 			l2cap_recv_frame(conn, conn->rx_skb);
 			conn->rx_skb = NULL;
 		}
+		break;
 	}
 
 drop:
@@ -5617,12 +6573,11 @@
 	list_for_each_entry(c, &chan_list, global_l) {
 		struct sock *sk = c->sk;
 
-		seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
-					batostr(&bt_sk(sk)->src),
-					batostr(&bt_sk(sk)->dst),
-					c->state, __le16_to_cpu(c->psm),
-					c->scid, c->dcid, c->imtu, c->omtu,
-					c->sec_level, c->mode);
+		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
+			   &bt_sk(sk)->src, &bt_sk(sk)->dst,
+			   c->state, __le16_to_cpu(c->psm),
+			   c->scid, c->dcid, c->imtu, c->omtu,
+			   c->sec_level, c->mode);
 	}
 
 	read_unlock(&chan_list_lock);
@@ -5653,8 +6608,8 @@
 		return err;
 
 	if (bt_debugfs) {
-		l2cap_debugfs = debugfs_create_file("l2cap", 0444,
-					bt_debugfs, NULL, &l2cap_debugfs_fops);
+		l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
+						    NULL, &l2cap_debugfs_fops);
 		if (!l2cap_debugfs)
 			BT_ERR("Failed to create L2CAP debug file");
 	}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 083f2bf..1bcfb84 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -40,7 +40,8 @@
 
 static const struct proto_ops l2cap_sock_ops;
 static void l2cap_sock_init(struct sock *sk, struct sock *parent);
-static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
+static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+				     int proto, gfp_t prio);
 
 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
 {
@@ -106,7 +107,8 @@
 	return err;
 }
 
-static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
+static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
+			      int alen, int flags)
 {
 	struct sock *sk = sock->sk;
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -134,7 +136,7 @@
 	lock_sock(sk);
 
 	err = bt_sock_wait_state(sk, BT_CONNECTED,
-			sock_sndtimeo(sk, flags & O_NONBLOCK));
+				 sock_sndtimeo(sk, flags & O_NONBLOCK));
 
 	release_sock(sk);
 
@@ -185,7 +187,8 @@
 	return err;
 }
 
-static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
+static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
+			     int flags)
 {
 	DECLARE_WAITQUEUE(wait, current);
 	struct sock *sk = sock->sk, *nsk;
@@ -241,7 +244,8 @@
 	return err;
 }
 
-static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
+static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
+			      int *len, int peer)
 {
 	struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
 	struct sock *sk = sock->sk;
@@ -266,7 +270,8 @@
 	return 0;
 }
 
-static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
+				     char __user *optval, int __user *optlen)
 {
 	struct sock *sk = sock->sk;
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -309,7 +314,7 @@
 			break;
 		case BT_SECURITY_HIGH:
 			opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
-							L2CAP_LM_SECURE;
+			      L2CAP_LM_SECURE;
 			break;
 		default:
 			opt = 0;
@@ -353,7 +358,8 @@
 	return err;
 }
 
-static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
+static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
+				 char __user *optval, int __user *optlen)
 {
 	struct sock *sk = sock->sk;
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -377,19 +383,20 @@
 	switch (optname) {
 	case BT_SECURITY:
 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
-					chan->chan_type != L2CAP_CHAN_RAW) {
+		    chan->chan_type != L2CAP_CHAN_RAW) {
 			err = -EINVAL;
 			break;
 		}
 
 		memset(&sec, 0, sizeof(sec));
-		if (chan->conn)
+		if (chan->conn) {
 			sec.level = chan->conn->hcon->sec_level;
-		else
-			sec.level = chan->sec_level;
 
-		if (sk->sk_state == BT_CONNECTED)
-			sec.key_size = chan->conn->hcon->enc_key_size;
+			if (sk->sk_state == BT_CONNECTED)
+				sec.key_size = chan->conn->hcon->enc_key_size;
+		} else {
+			sec.level = chan->sec_level;
+		}
 
 		len = min_t(unsigned int, len, sizeof(sec));
 		if (copy_to_user(optval, (char *) &sec, len))
@@ -411,14 +418,14 @@
 
 	case BT_FLUSHABLE:
 		if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags),
-						(u32 __user *) optval))
+			     (u32 __user *) optval))
 			err = -EFAULT;
 
 		break;
 
 	case BT_POWER:
 		if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
-				&& sk->sk_type != SOCK_RAW) {
+		    && sk->sk_type != SOCK_RAW) {
 			err = -EINVAL;
 			break;
 		}
@@ -466,7 +473,8 @@
 	return true;
 }
 
-static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
+static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+				     char __user *optval, unsigned int optlen)
 {
 	struct sock *sk = sock->sk;
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -529,6 +537,7 @@
 		chan->fcs  = opts.fcs;
 		chan->max_tx = opts.max_tx;
 		chan->tx_win = opts.txwin_size;
+		chan->flush_to = opts.flush_to;
 		break;
 
 	case L2CAP_LM:
@@ -564,7 +573,8 @@
 	return err;
 }
 
-static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
+static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+				 char __user *optval, unsigned int optlen)
 {
 	struct sock *sk = sock->sk;
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -587,7 +597,7 @@
 	switch (optname) {
 	case BT_SECURITY:
 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
-					chan->chan_type != L2CAP_CHAN_RAW) {
+		    chan->chan_type != L2CAP_CHAN_RAW) {
 			err = -EINVAL;
 			break;
 		}
@@ -601,7 +611,7 @@
 		}
 
 		if (sec.level < BT_SECURITY_LOW ||
-					sec.level > BT_SECURITY_HIGH) {
+		    sec.level > BT_SECURITY_HIGH) {
 			err = -EINVAL;
 			break;
 		}
@@ -627,7 +637,7 @@
 
 		/* or for ACL link */
 		} else if ((sk->sk_state == BT_CONNECT2 &&
-			   test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
+			    test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
 			   sk->sk_state == BT_CONNECTED) {
 			if (!l2cap_chan_check_security(chan))
 				set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
@@ -684,7 +694,7 @@
 
 	case BT_POWER:
 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
-					chan->chan_type != L2CAP_CHAN_RAW) {
+		    chan->chan_type != L2CAP_CHAN_RAW) {
 			err = -EINVAL;
 			break;
 		}
@@ -720,12 +730,17 @@
 		}
 
 		if (chan->mode != L2CAP_MODE_ERTM &&
-				chan->mode != L2CAP_MODE_STREAMING) {
+		    chan->mode != L2CAP_MODE_STREAMING) {
 			err = -EOPNOTSUPP;
 			break;
 		}
 
 		chan->chan_policy = (u8) opt;
+
+		if (sk->sk_state == BT_CONNECTED &&
+		    chan->move_role == L2CAP_MOVE_ROLE_NONE)
+			l2cap_move_start(chan);
+
 		break;
 
 	default:
@@ -737,7 +752,8 @@
 	return err;
 }
 
-static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
+static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+			      struct msghdr *msg, size_t len)
 {
 	struct sock *sk = sock->sk;
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -762,7 +778,8 @@
 	return err;
 }
 
-static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
+static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+			      struct msghdr *msg, size_t len, int flags)
 {
 	struct sock *sk = sock->sk;
 	struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -866,7 +883,7 @@
 
 		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
 			err = bt_sock_wait_state(sk, BT_CLOSED,
-							sk->sk_lingertime);
+						 sk->sk_lingertime);
 	}
 
 	if (!err && sk->sk_err)
@@ -930,7 +947,7 @@
 	}
 
 	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
-								GFP_ATOMIC);
+			      GFP_ATOMIC);
 	if (!sk)
 		return NULL;
 
@@ -938,6 +955,8 @@
 
 	l2cap_sock_init(sk, parent);
 
+	bt_accept_enqueue(parent, sk);
+
 	return l2cap_pi(sk)->chan;
 }
 
@@ -1068,6 +1087,15 @@
 	release_sock(sk);
 }
 
+static void l2cap_sock_defer_cb(struct l2cap_chan *chan)
+{
+	struct sock *sk = chan->data;
+	struct sock *parent = bt_sk(sk)->parent;
+
+	if (parent)
+		parent->sk_data_ready(parent, 0);
+}
+
 static struct l2cap_ops l2cap_chan_ops = {
 	.name		= "L2CAP Socket Interface",
 	.new_connection	= l2cap_sock_new_connection_cb,
@@ -1076,6 +1104,7 @@
 	.teardown	= l2cap_sock_teardown_cb,
 	.state_change	= l2cap_sock_state_change_cb,
 	.ready		= l2cap_sock_ready_cb,
+	.defer		= l2cap_sock_defer_cb,
 	.alloc_skb	= l2cap_sock_alloc_skb_cb,
 };
 
@@ -1083,7 +1112,8 @@
 {
 	BT_DBG("sk %p", sk);
 
-	l2cap_chan_put(l2cap_pi(sk)->chan);
+	if (l2cap_pi(sk)->chan)
+		l2cap_chan_put(l2cap_pi(sk)->chan);
 	if (l2cap_pi(sk)->rx_busy_skb) {
 		kfree_skb(l2cap_pi(sk)->rx_busy_skb);
 		l2cap_pi(sk)->rx_busy_skb = NULL;
@@ -1159,7 +1189,8 @@
 	.obj_size	= sizeof(struct l2cap_pinfo)
 };
 
-static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+				     int proto, gfp_t prio)
 {
 	struct sock *sk;
 	struct l2cap_chan *chan;
@@ -1204,7 +1235,7 @@
 	sock->state = SS_UNCONNECTED;
 
 	if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
-			sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
+	    sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
 		return -ESOCKTNOSUPPORT;
 
 	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
@@ -1261,7 +1292,8 @@
 		goto error;
 	}
 
-	err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list, NULL);
+	err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list,
+			     NULL);
 	if (err < 0) {
 		BT_ERR("Failed to create L2CAP proc file");
 		bt_sock_unregister(BTPROTO_L2CAP);
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index e1c9752..b3fbc73 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -41,20 +41,6 @@
 }
 EXPORT_SYMBOL(baswap);
 
-char *batostr(bdaddr_t *ba)
-{
-	static char str[2][18];
-	static int i = 1;
-
-	i ^= 1;
-	sprintf(str[i], "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X",
-		ba->b[5], ba->b[4], ba->b[3],
-		ba->b[2], ba->b[1], ba->b[0]);
-
-	return str[i];
-}
-EXPORT_SYMBOL(batostr);
-
 /* Bluetooth error codes to Unix errno mapping */
 int bt_to_errno(__u16 code)
 {
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 91de423..142764a 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -222,7 +222,7 @@
 
 	hdr = (void *) skb_put(skb, sizeof(*hdr));
 
-	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
+	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
 	hdr->index = cpu_to_le16(index);
 	hdr->len = cpu_to_le16(sizeof(*ev));
 
@@ -253,7 +253,7 @@
 
 	hdr = (void *) skb_put(skb, sizeof(*hdr));
 
-	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
+	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
 	hdr->index = cpu_to_le16(index);
 	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
 
@@ -377,15 +377,15 @@
 	u32 settings = 0;
 
 	settings |= MGMT_SETTING_POWERED;
-	settings |= MGMT_SETTING_CONNECTABLE;
-	settings |= MGMT_SETTING_FAST_CONNECTABLE;
-	settings |= MGMT_SETTING_DISCOVERABLE;
 	settings |= MGMT_SETTING_PAIRABLE;
 
 	if (lmp_ssp_capable(hdev))
 		settings |= MGMT_SETTING_SSP;
 
 	if (lmp_bredr_capable(hdev)) {
+		settings |= MGMT_SETTING_CONNECTABLE;
+		settings |= MGMT_SETTING_FAST_CONNECTABLE;
+		settings |= MGMT_SETTING_DISCOVERABLE;
 		settings |= MGMT_SETTING_BREDR;
 		settings |= MGMT_SETTING_LINK_SECURITY;
 	}
@@ -485,7 +485,7 @@
 		ptr += (name_len + 2);
 	}
 
-	if (hdev->inq_tx_power) {
+	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
 		ptr[0] = 2;
 		ptr[1] = EIR_TX_POWER;
 		ptr[2] = (u8) hdev->inq_tx_power;
@@ -566,7 +566,7 @@
 	if (!hdev_is_powered(hdev))
 		return 0;
 
-	if (!(hdev->features[6] & LMP_EXT_INQ))
+	if (!lmp_ext_inq_capable(hdev))
 		return 0;
 
 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
@@ -833,7 +833,7 @@
 	if (hdev)
 		hdr->index = cpu_to_le16(hdev->id);
 	else
-		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
+		hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
 	hdr->len = cpu_to_le16(data_len);
 
 	if (data)
@@ -868,6 +868,10 @@
 
 	BT_DBG("request for %s", hdev->name);
 
+	if (!lmp_bredr_capable(hdev))
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+				 MGMT_STATUS_NOT_SUPPORTED);
+
 	timeout = __le16_to_cpu(cp->timeout);
 	if (!cp->val && timeout > 0)
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
@@ -963,6 +967,10 @@
 
 	BT_DBG("request for %s", hdev->name);
 
+	if (!lmp_bredr_capable(hdev))
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+				  MGMT_STATUS_NOT_SUPPORTED);
+
 	hci_dev_lock(hdev);
 
 	if (!hdev_is_powered(hdev)) {
@@ -1061,6 +1069,10 @@
 
 	BT_DBG("request for %s", hdev->name);
 
+	if (!lmp_bredr_capable(hdev))
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+				  MGMT_STATUS_NOT_SUPPORTED);
+
 	hci_dev_lock(hdev);
 
 	if (!hdev_is_powered(hdev)) {
@@ -1214,7 +1226,7 @@
 	}
 
 	val = !!cp->val;
-	enabled = !!(hdev->host_features[0] & LMP_HOST_LE);
+	enabled = !!lmp_host_le_capable(hdev);
 
 	if (!hdev_is_powered(hdev) || val == enabled) {
 		bool changed = false;
@@ -1250,7 +1262,7 @@
 
 	if (val) {
 		hci_cp.le = val;
-		hci_cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
+		hci_cp.simul = !!lmp_le_br_capable(hdev);
 	}
 
 	err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
@@ -2596,6 +2608,10 @@
 
 	BT_DBG("%s", hdev->name);
 
+	if (!lmp_bredr_capable(hdev))
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+				  MGMT_STATUS_NOT_SUPPORTED);
+
 	if (!hdev_is_powered(hdev))
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
 				  MGMT_STATUS_NOT_POWERED);
@@ -2873,6 +2889,21 @@
 	mgmt_pending_free(cmd);
 }
 
+static int set_bredr_scan(struct hci_dev *hdev)
+{
+	u8 scan = 0;
+
+	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+		scan |= SCAN_PAGE;
+	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+		scan |= SCAN_INQUIRY;
+
+	if (!scan)
+		return 0;
+
+	return hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+}
+
 int mgmt_powered(struct hci_dev *hdev, u8 powered)
 {
 	struct cmd_lookup match = { NULL, hdev };
@@ -2884,17 +2915,8 @@
 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
 
 	if (powered) {
-		u8 scan = 0;
-
-		if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
-			scan |= SCAN_PAGE;
-		if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
-			scan |= SCAN_INQUIRY;
-
-		if (scan)
-			hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
-
-		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+		    !lmp_host_ssp_capable(hdev)) {
 			u8 ssp = 1;
 
 			hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
@@ -2904,15 +2926,24 @@
 			struct hci_cp_write_le_host_supported cp;
 
 			cp.le = 1;
-			cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
+			cp.simul = !!lmp_le_br_capable(hdev);
 
-			hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
-				     sizeof(cp), &cp);
+			/* Check first if we already have the right
+			 * host state (host features set)
+			 */
+			if (cp.le != !!lmp_host_le_capable(hdev) ||
+			    cp.simul != !!lmp_host_le_br_capable(hdev))
+				hci_send_cmd(hdev,
+					     HCI_OP_WRITE_LE_HOST_SUPPORTED,
+					     sizeof(cp), &cp);
 		}
 
-		update_class(hdev);
-		update_name(hdev, hdev->dev_name);
-		update_eir(hdev);
+		if (lmp_bredr_capable(hdev)) {
+			set_bredr_scan(hdev);
+			update_class(hdev);
+			update_name(hdev, hdev->dev_name);
+			update_eir(hdev);
+		}
 	} else {
 		u8 status = MGMT_STATUS_NOT_POWERED;
 		mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
@@ -3127,6 +3158,9 @@
 	struct pending_cmd *cmd;
 	int err;
 
+	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
+			     hdev);
+
 	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
 	if (!cmd)
 		return -ENOENT;
@@ -3139,8 +3173,6 @@
 
 	mgmt_pending_remove(cmd);
 
-	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
-			     hdev);
 	return err;
 }
 
@@ -3360,7 +3392,7 @@
 {
 	struct hci_cp_write_eir cp;
 
-	if (!(hdev->features[6] & LMP_EXT_INQ))
+	if (!lmp_ext_inq_capable(hdev))
 		return 0;
 
 	memset(hdev->eir, 0, sizeof(hdev->eir));
@@ -3492,7 +3524,12 @@
 		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
 				 sizeof(ev), cmd ? cmd->sk : NULL);
 
-	update_eir(hdev);
+	/* EIR is taken care of separately when powering on the
+	 * adapter so only update them here if this is a name change
+	 * unrelated to power on.
+	 */
+	if (!test_bit(HCI_INIT, &hdev->flags))
+		update_eir(hdev);
 
 failed:
 	if (cmd)
@@ -3587,9 +3624,9 @@
 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
 	ev->rssi = rssi;
 	if (cfm_name)
-		ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
+		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
 	if (!ssp)
-		ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
+		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
 
 	if (eir_len > 0)
 		memcpy(ev->eir, eir, eir_len);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index c75107e..201fdf7 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -377,8 +377,8 @@
 	int err = 0;
 	u8 dlci;
 
-	BT_DBG("dlc %p state %ld %s %s channel %d",
-			d, d->state, batostr(src), batostr(dst), channel);
+	BT_DBG("dlc %p state %ld %pMR -> %pMR channel %d",
+	       d, d->state, src, dst, channel);
 
 	if (channel < 1 || channel > 30)
 		return -EINVAL;
@@ -676,7 +676,7 @@
 	struct socket *sock;
 	struct sock *sk;
 
-	BT_DBG("%s %s", batostr(src), batostr(dst));
+	BT_DBG("%pMR -> %pMR", src, dst);
 
 	*err = rfcomm_l2sock_create(&sock);
 	if (*err < 0)
@@ -709,7 +709,7 @@
 
 	bacpy(&addr.l2_bdaddr, dst);
 	addr.l2_family = AF_BLUETOOTH;
-	addr.l2_psm    = cpu_to_le16(RFCOMM_PSM);
+	addr.l2_psm    = __constant_cpu_to_le16(RFCOMM_PSM);
 	addr.l2_cid    = 0;
 	*err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
 	if (*err == 0 || *err == -EINPROGRESS)
@@ -1987,7 +1987,7 @@
 	/* Bind socket */
 	bacpy(&addr.l2_bdaddr, ba);
 	addr.l2_family = AF_BLUETOOTH;
-	addr.l2_psm    = cpu_to_le16(RFCOMM_PSM);
+	addr.l2_psm    = __constant_cpu_to_le16(RFCOMM_PSM);
 	addr.l2_cid    = 0;
 	err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
 	if (err < 0) {
@@ -2125,11 +2125,10 @@
 		list_for_each_entry(d, &s->dlcs, list) {
 			struct sock *sk = s->sock->sk;
 
-			seq_printf(f, "%s %s %ld %d %d %d %d\n",
-						batostr(&bt_sk(sk)->src),
-						batostr(&bt_sk(sk)->dst),
-						d->state, d->dlci, d->mtu,
-						d->rx_credits, d->tx_credits);
+			seq_printf(f, "%pMR %pMR %ld %d %d %d %d\n",
+				   &bt_sk(sk)->src, &bt_sk(sk)->dst,
+				   d->state, d->dlci, d->mtu,
+				   d->rx_credits, d->tx_credits);
 		}
 	}
 
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index b3226f3..4ddef57 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -334,7 +334,7 @@
 	struct sock *sk = sock->sk;
 	int err = 0;
 
-	BT_DBG("sk %p %s", sk, batostr(&sa->rc_bdaddr));
+	BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
 
 	if (!addr || addr->sa_family != AF_BLUETOOTH)
 		return -EINVAL;
@@ -975,10 +975,9 @@
 	read_lock(&rfcomm_sk_list.lock);
 
 	sk_for_each(sk, node, &rfcomm_sk_list.head) {
-		seq_printf(f, "%s %s %d %d\n",
-				batostr(&bt_sk(sk)->src),
-				batostr(&bt_sk(sk)->dst),
-				sk->sk_state, rfcomm_pi(sk)->channel);
+		seq_printf(f, "%pMR %pMR %d %d\n",
+			   &bt_sk(sk)->src, &bt_sk(sk)->dst,
+			   sk->sk_state, rfcomm_pi(sk)->channel);
 	}
 
 	read_unlock(&rfcomm_sk_list.lock);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index ccc2487..bd6fd0f 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -166,7 +166,7 @@
 static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf)
 {
 	struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
-	return sprintf(buf, "%s\n", batostr(&dev->dst));
+	return sprintf(buf, "%pMR\n", &dev->dst);
 }
 
 static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf)
@@ -663,8 +663,8 @@
 	if (!dev)
 		return -ENODEV;
 
-	BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
-				dev->channel, dev->port.count);
+	BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
+	       dev->channel, dev->port.count);
 
 	spin_lock_irqsave(&dev->port.lock, flags);
 	if (++dev->port.count > 1) {
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index dc42b91..450cdcd 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -172,7 +172,7 @@
 	struct hci_dev  *hdev;
 	int err, type;
 
-	BT_DBG("%s -> %s", batostr(src), batostr(dst));
+	BT_DBG("%pMR -> %pMR", src, dst);
 
 	hdev = hci_get_route(dst, src);
 	if (!hdev)
@@ -460,7 +460,7 @@
 	struct sock *sk = sock->sk;
 	int err = 0;
 
-	BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
+	BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
 
 	if (!addr || addr->sa_family != AF_BLUETOOTH)
 		return -EINVAL;
@@ -893,7 +893,7 @@
 	struct hlist_node *node;
 	int lm = 0;
 
-	BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
+	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
 
 	/* Find listening sockets */
 	read_lock(&sco_sk_list.lock);
@@ -914,7 +914,7 @@
 
 void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
 {
-	BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
+	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
 	if (!status) {
 		struct sco_conn *conn;
 
@@ -959,8 +959,8 @@
 	read_lock(&sco_sk_list.lock);
 
 	sk_for_each(sk, node, &sco_sk_list.head) {
-		seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
-			   batostr(&bt_sk(sk)->dst), sk->sk_state);
+		seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src,
+			   &bt_sk(sk)->dst, sk->sk_state);
 	}
 
 	read_unlock(&sco_sk_list.lock);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index a592337..68a9587 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -167,7 +167,7 @@
 
 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
 	lh->len = cpu_to_le16(sizeof(code) + dlen);
-	lh->cid = cpu_to_le16(L2CAP_CID_SMP);
+	lh->cid = __constant_cpu_to_le16(L2CAP_CID_SMP);
 
 	memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code));
 
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 070e8a6..7c78e26 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -313,6 +313,8 @@
 	.ndo_fdb_add		 = br_fdb_add,
 	.ndo_fdb_del		 = br_fdb_delete,
 	.ndo_fdb_dump		 = br_fdb_dump,
+	.ndo_bridge_getlink	 = br_getlink,
+	.ndo_bridge_setlink	 = br_setlink,
 };
 
 static void br_dev_free(struct net_device *dev)
@@ -356,7 +358,7 @@
 	br->bridge_id.prio[0] = 0x80;
 	br->bridge_id.prio[1] = 0x00;
 
-	memcpy(br->group_addr, br_group_address, ETH_ALEN);
+	memcpy(br->group_addr, eth_reserved_addr_base, ETH_ALEN);
 
 	br->stp_enabled = BR_NO_STP;
 	br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 76f15fd..4b34207 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -19,9 +19,6 @@
 #include <linux/export.h>
 #include "br_private.h"
 
-/* Bridge group multicast address 802.1d (pg 51). */
-const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
-
 /* Hook for brouter */
 br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
 EXPORT_SYMBOL(br_should_route_hook);
@@ -127,18 +124,6 @@
 	return 0;	 /* process further */
 }
 
-/* Does address match the link local multicast address.
- * 01:80:c2:00:00:0X
- */
-static inline int is_link_local(const unsigned char *dest)
-{
-	__be16 *a = (__be16 *)dest;
-	static const __be16 *b = (const __be16 *)br_group_address;
-	static const __be16 m = cpu_to_be16(0xfff0);
-
-	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
-}
-
 /*
  * Return NULL if skb is handled
  * note: already called with rcu_read_lock
@@ -162,7 +147,7 @@
 
 	p = br_port_get_rcu(skb->dev);
 
-	if (unlikely(is_link_local(dest))) {
+	if (unlikely(is_link_local_ether_addr(dest))) {
 		/*
 		 * See IEEE 802.1D Table 7-10 Reserved addresses
 		 *
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 7222fe1..cd8c3a4 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -85,13 +85,14 @@
 /* called with RTNL */
 static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
 {
+	struct net *net = dev_net(br->dev);
 	struct net_device *dev;
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
-	dev = __dev_get_by_index(dev_net(br->dev), ifindex);
+	dev = __dev_get_by_index(net, ifindex);
 	if (dev == NULL)
 		return -EINVAL;
 
@@ -178,25 +179,25 @@
 	}
 
 	case BRCTL_SET_BRIDGE_FORWARD_DELAY:
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
 		return br_set_forward_delay(br, args[1]);
 
 	case BRCTL_SET_BRIDGE_HELLO_TIME:
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
 		return br_set_hello_time(br, args[1]);
 
 	case BRCTL_SET_BRIDGE_MAX_AGE:
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
 		return br_set_max_age(br, args[1]);
 
 	case BRCTL_SET_AGEING_TIME:
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
 		br->ageing_time = clock_t_to_jiffies(args[1]);
@@ -236,14 +237,14 @@
 	}
 
 	case BRCTL_SET_BRIDGE_STP_STATE:
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
 		br_stp_set_enabled(br, args[1]);
 		return 0;
 
 	case BRCTL_SET_BRIDGE_PRIORITY:
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
 		spin_lock_bh(&br->lock);
@@ -256,7 +257,7 @@
 		struct net_bridge_port *p;
 		int ret;
 
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
 		spin_lock_bh(&br->lock);
@@ -273,7 +274,7 @@
 		struct net_bridge_port *p;
 		int ret;
 
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
 		spin_lock_bh(&br->lock);
@@ -330,7 +331,7 @@
 	{
 		char buf[IFNAMSIZ];
 
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
 		if (copy_from_user(buf, (void __user *)args[1], IFNAMSIZ))
@@ -360,7 +361,7 @@
 	{
 		char buf[IFNAMSIZ];
 
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
 		if (copy_from_user(buf, uarg, IFNAMSIZ))
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 093f527..65429b9 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -20,16 +20,43 @@
 #include "br_private.h"
 #include "br_private_stp.h"
 
+static inline size_t br_port_info_size(void)
+{
+	return nla_total_size(1)	/* IFLA_BRPORT_STATE  */
+		+ nla_total_size(2)	/* IFLA_BRPORT_PRIORITY */
+		+ nla_total_size(4)	/* IFLA_BRPORT_COST */
+		+ nla_total_size(1)	/* IFLA_BRPORT_MODE */
+		+ nla_total_size(1)	/* IFLA_BRPORT_GUARD */
+		+ nla_total_size(1)	/* IFLA_BRPORT_PROTECT */
+		+ 0;
+}
+
 static inline size_t br_nlmsg_size(void)
 {
 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
-	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
-	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
-	       + nla_total_size(4) /* IFLA_MASTER */
-	       + nla_total_size(4) /* IFLA_MTU */
-	       + nla_total_size(4) /* IFLA_LINK */
-	       + nla_total_size(1) /* IFLA_OPERSTATE */
-	       + nla_total_size(1); /* IFLA_PROTINFO */
+		+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+		+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+		+ nla_total_size(4) /* IFLA_MASTER */
+		+ nla_total_size(4) /* IFLA_MTU */
+		+ nla_total_size(4) /* IFLA_LINK */
+		+ nla_total_size(1) /* IFLA_OPERSTATE */
+		+ nla_total_size(br_port_info_size()); /* IFLA_PROTINFO */
+}
+
+static int br_port_fill_attrs(struct sk_buff *skb,
+			      const struct net_bridge_port *p)
+{
+	u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
+
+	if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
+	    nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
+	    nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
+	    nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
+	    nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
+	    nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)))
+		return -EMSGSIZE;
+
+	return 0;
 }
 
 /*
@@ -67,10 +94,18 @@
 	    (dev->addr_len &&
 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
 	    (dev->ifindex != dev->iflink &&
-	     nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
-	    (event == RTM_NEWLINK &&
-	     nla_put_u8(skb, IFLA_PROTINFO, port->state)))
+	     nla_put_u32(skb, IFLA_LINK, dev->iflink)))
 		goto nla_put_failure;
+
+	if (event == RTM_NEWLINK) {
+		struct nlattr *nest
+			= nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
+
+		if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
+			goto nla_put_failure;
+		nla_nest_end(skb, nest);
+	}
+
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -111,91 +146,135 @@
 /*
  * Dump information about all ports, in response to GETLINK
  */
-static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+	       struct net_device *dev)
 {
-	struct net *net = sock_net(skb->sk);
-	struct net_device *dev;
-	int idx;
+	int err = 0;
+	struct net_bridge_port *port = br_port_get_rcu(dev);
 
-	idx = 0;
-	rcu_read_lock();
-	for_each_netdev_rcu(net, dev) {
-		struct net_bridge_port *port = br_port_get_rcu(dev);
+	/* not a bridge port */
+	if (!port)
+		goto out;
 
-		/* not a bridge port */
-		if (!port || idx < cb->args[0])
-			goto skip;
-
-		if (br_fill_ifinfo(skb, port,
-				   NETLINK_CB(cb->skb).portid,
-				   cb->nlh->nlmsg_seq, RTM_NEWLINK,
-				   NLM_F_MULTI) < 0)
-			break;
-skip:
-		++idx;
-	}
-	rcu_read_unlock();
-	cb->args[0] = idx;
-
-	return skb->len;
+	err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI);
+out:
+	return err;
 }
 
-/*
- * Change state of port (ie from forwarding to blocking etc)
- * Used by spanning tree in user space.
- */
-static int br_rtm_setlink(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
+static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
+	[IFLA_BRPORT_STATE]	= { .type = NLA_U8 },
+	[IFLA_BRPORT_COST]	= { .type = NLA_U32 },
+	[IFLA_BRPORT_PRIORITY]	= { .type = NLA_U16 },
+	[IFLA_BRPORT_MODE]	= { .type = NLA_U8 },
+	[IFLA_BRPORT_GUARD]	= { .type = NLA_U8 },
+	[IFLA_BRPORT_PROTECT]	= { .type = NLA_U8 },
+};
+
+/* Change the state of the port and notify spanning tree */
+static int br_set_port_state(struct net_bridge_port *p, u8 state)
 {
-	struct net *net = sock_net(skb->sk);
-	struct ifinfomsg *ifm;
-	struct nlattr *protinfo;
-	struct net_device *dev;
-	struct net_bridge_port *p;
-	u8 new_state;
-
-	if (nlmsg_len(nlh) < sizeof(*ifm))
-		return -EINVAL;
-
-	ifm = nlmsg_data(nlh);
-	if (ifm->ifi_family != AF_BRIDGE)
-		return -EPFNOSUPPORT;
-
-	protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO);
-	if (!protinfo || nla_len(protinfo) < sizeof(u8))
-		return -EINVAL;
-
-	new_state = nla_get_u8(protinfo);
-	if (new_state > BR_STATE_BLOCKING)
-		return -EINVAL;
-
-	dev = __dev_get_by_index(net, ifm->ifi_index);
-	if (!dev)
-		return -ENODEV;
-
-	p = br_port_get_rtnl(dev);
-	if (!p)
+	if (state > BR_STATE_BLOCKING)
 		return -EINVAL;
 
 	/* if kernel STP is running, don't allow changes */
 	if (p->br->stp_enabled == BR_KERNEL_STP)
 		return -EBUSY;
 
-	if (!netif_running(dev) ||
-	    (!netif_carrier_ok(dev) && new_state != BR_STATE_DISABLED))
+	if (!netif_running(p->dev) ||
+	    (!netif_carrier_ok(p->dev) && state != BR_STATE_DISABLED))
 		return -ENETDOWN;
 
-	p->state = new_state;
+	p->state = state;
 	br_log_state(p);
-
-	spin_lock_bh(&p->br->lock);
 	br_port_state_selection(p->br);
-	spin_unlock_bh(&p->br->lock);
-
-	br_ifinfo_notify(RTM_NEWLINK, p);
-
 	return 0;
 }
 
+/* Set/clear or port flags based on attribute */
+static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
+			   int attrtype, unsigned long mask)
+{
+	if (tb[attrtype]) {
+		u8 flag = nla_get_u8(tb[attrtype]);
+		if (flag)
+			p->flags |= mask;
+		else
+			p->flags &= ~mask;
+	}
+}
+
+/* Process bridge protocol info on port */
+static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
+{
+	int err;
+
+	br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
+	br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
+
+	if (tb[IFLA_BRPORT_COST]) {
+		err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
+		if (err)
+			return err;
+	}
+
+	if (tb[IFLA_BRPORT_PRIORITY]) {
+		err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
+		if (err)
+			return err;
+	}
+
+	if (tb[IFLA_BRPORT_STATE]) {
+		err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+/* Change state and parameters on port. */
+int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
+{
+	struct ifinfomsg *ifm;
+	struct nlattr *protinfo;
+	struct net_bridge_port *p;
+	struct nlattr *tb[IFLA_BRPORT_MAX];
+	int err;
+
+	ifm = nlmsg_data(nlh);
+
+	protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO);
+	if (!protinfo)
+		return 0;
+
+	p = br_port_get_rtnl(dev);
+	if (!p)
+		return -EINVAL;
+
+	if (protinfo->nla_type & NLA_F_NESTED) {
+		err = nla_parse_nested(tb, IFLA_BRPORT_MAX,
+				       protinfo, ifla_brport_policy);
+		if (err)
+			return err;
+
+		spin_lock_bh(&p->br->lock);
+		err = br_setport(p, tb);
+		spin_unlock_bh(&p->br->lock);
+	} else {
+		/* Binary compatability with old RSTP */
+		if (nla_len(protinfo) < sizeof(u8))
+			return -EINVAL;
+
+		spin_lock_bh(&p->br->lock);
+		err = br_set_port_state(p, nla_get_u8(protinfo));
+		spin_unlock_bh(&p->br->lock);
+	}
+
+	if (err == 0)
+		br_ifinfo_notify(RTM_NEWLINK, p);
+
+	return err;
+}
+
 static int br_validate(struct nlattr *tb[], struct nlattr *data[])
 {
 	if (tb[IFLA_ADDRESS]) {
@@ -218,29 +297,7 @@
 
 int __init br_netlink_init(void)
 {
-	int err;
-
-	err = rtnl_link_register(&br_link_ops);
-	if (err < 0)
-		goto err1;
-
-	err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL,
-			      br_dump_ifinfo, NULL);
-	if (err)
-		goto err2;
-	err = __rtnl_register(PF_BRIDGE, RTM_SETLINK,
-			      br_rtm_setlink, NULL, NULL);
-	if (err)
-		goto err3;
-
-	return 0;
-
-err3:
-	rtnl_unregister_all(PF_BRIDGE);
-err2:
-	rtnl_link_unregister(&br_link_ops);
-err1:
-	return err;
+	return rtnl_link_register(&br_link_ops);
 }
 
 void __exit br_netlink_fini(void)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 9b278c4..eb9cd42 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -135,6 +135,8 @@
 
 	unsigned long 			flags;
 #define BR_HAIRPIN_MODE		0x00000001
+#define BR_BPDU_GUARD           0x00000002
+#define BR_ROOT_BLOCK		0x00000004
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 	u32				multicast_startup_queries_sent;
@@ -158,7 +160,9 @@
 
 static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
 {
-	struct net_bridge_port *port = rcu_dereference(dev->rx_handler_data);
+	struct net_bridge_port *port =
+			rcu_dereference_rtnl(dev->rx_handler_data);
+
 	return br_port_exists(dev) ? port : NULL;
 }
 
@@ -288,7 +292,6 @@
 	pr_debug("%s: " format,  (br)->dev->name, ##args)
 
 extern struct notifier_block br_device_notifier;
-extern const u8 br_group_address[ETH_ALEN];
 
 /* called under bridge lock */
 static inline int br_is_root_bridge(const struct net_bridge *br)
@@ -553,6 +556,9 @@
 extern int br_netlink_init(void);
 extern void br_netlink_fini(void);
 extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
+extern int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
+extern int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+		      struct net_device *dev);
 
 #ifdef CONFIG_SYSFS
 /* br_sysfs_if.c */
@@ -566,10 +572,10 @@
 
 #else
 
-#define br_sysfs_addif(p)	(0)
-#define br_sysfs_renameif(p)	(0)
-#define br_sysfs_addbr(dev)	(0)
-#define br_sysfs_delbr(dev)	do { } while(0)
+static inline int br_sysfs_addif(struct net_bridge_port *p) { return 0; }
+static inline int br_sysfs_renameif(struct net_bridge_port *p) { return 0; }
+static inline int br_sysfs_addbr(struct net_device *dev) { return 0; }
+static inline void br_sysfs_delbr(struct net_device *dev) { return; }
 #endif /* CONFIG_SYSFS */
 
 #endif
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index af9a120..b01849a 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -100,6 +100,21 @@
 	return 0;
 }
 
+static void br_root_port_block(const struct net_bridge *br,
+			       struct net_bridge_port *p)
+{
+
+	br_notice(br, "port %u(%s) tried to become root port (blocked)",
+		  (unsigned int) p->port_no, p->dev->name);
+
+	p->state = BR_STATE_LISTENING;
+	br_log_state(p);
+	br_ifinfo_notify(RTM_NEWLINK, p);
+
+	if (br->forward_delay > 0)
+		mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay);
+}
+
 /* called under bridge lock */
 static void br_root_selection(struct net_bridge *br)
 {
@@ -107,7 +122,12 @@
 	u16 root_port = 0;
 
 	list_for_each_entry(p, &br->port_list, list) {
-		if (br_should_become_root_port(p, root_port))
+		if (!br_should_become_root_port(p, root_port))
+			continue;
+
+		if (p->flags & BR_ROOT_BLOCK)
+			br_root_port_block(br, p);
+		else
 			root_port = p->port_no;
 	}
 
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index fd30a60..7f884e3 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -170,6 +170,13 @@
 	if (!ether_addr_equal(dest, br->group_addr))
 		goto out;
 
+	if (p->flags & BR_BPDU_GUARD) {
+		br_notice(br, "BPDU received on blocked port %u(%s)\n",
+			  (unsigned int) p->port_no, p->dev->name);
+		br_stp_disable_port(p);
+		goto out;
+	}
+
 	buf = skb_pull(skb, 3);
 
 	if (buf[0] == BPDU_TYPE_CONFIG) {
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index c5c0593..5913a3a 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -14,6 +14,7 @@
 #include <linux/capability.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/if_bridge.h>
 #include <linux/rtnetlink.h>
 #include <linux/spinlock.h>
@@ -36,7 +37,7 @@
 	unsigned long val;
 	int err;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	val = simple_strtoul(buf, &endp, 0);
@@ -132,7 +133,7 @@
 	char *endp;
 	unsigned long val;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	val = simple_strtoul(buf, &endp, 0);
@@ -165,7 +166,7 @@
 	char *endp;
 	unsigned long val;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	val = simple_strtoul(buf, &endp, 0);
@@ -297,23 +298,18 @@
 				const char *buf, size_t len)
 {
 	struct net_bridge *br = to_bridge(d);
-	unsigned int new_addr[6];
+	u8 new_addr[6];
 	int i;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (sscanf(buf, "%x:%x:%x:%x:%x:%x",
+	if (sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
 		   &new_addr[0], &new_addr[1], &new_addr[2],
 		   &new_addr[3], &new_addr[4], &new_addr[5]) != 6)
 		return -EINVAL;
 
-	/* Must be 01:80:c2:00:00:0X */
-	for (i = 0; i < 5; i++)
-		if (new_addr[i] != br_group_address[i])
-			return -EINVAL;
-
-	if (new_addr[5] & ~0xf)
+	if (!is_link_local_ether_addr(new_addr))
 		return -EINVAL;
 
 	if (new_addr[5] == 1 ||		/* 802.3x Pause address */
@@ -337,7 +333,7 @@
 {
 	struct net_bridge *br = to_bridge(d);
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	br_fdb_flush(br);
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 13b36bd..7ff95ba 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -34,6 +34,28 @@
 	.store	= _store,					\
 };
 
+#define BRPORT_ATTR_FLAG(_name, _mask)				\
+static ssize_t show_##_name(struct net_bridge_port *p, char *buf) \
+{								\
+	return sprintf(buf, "%d\n", !!(p->flags & _mask));	\
+}								\
+static int store_##_name(struct net_bridge_port *p, unsigned long v) \
+{								\
+	unsigned long flags = p->flags;				\
+	if (v)							\
+		flags |= _mask;					\
+	else							\
+		flags &= ~_mask;				\
+	if (flags != p->flags) {				\
+		p->flags = flags;				\
+		br_ifinfo_notify(RTM_NEWLINK, p);		\
+	}							\
+	return 0;						\
+}								\
+static BRPORT_ATTR(_name, S_IRUGO | S_IWUSR,			\
+		   show_##_name, store_##_name)
+
+
 static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
 {
 	return sprintf(buf, "%d\n", p->path_cost);
@@ -133,21 +155,9 @@
 }
 static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush);
 
-static ssize_t show_hairpin_mode(struct net_bridge_port *p, char *buf)
-{
-	int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0;
-	return sprintf(buf, "%d\n", hairpin_mode);
-}
-static int store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
-{
-	if (v)
-		p->flags |= BR_HAIRPIN_MODE;
-	else
-		p->flags &= ~BR_HAIRPIN_MODE;
-	return 0;
-}
-static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR,
-		   show_hairpin_mode, store_hairpin_mode);
+BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE);
+BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD);
+BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK);
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
@@ -181,6 +191,8 @@
 	&brport_attr_hold_timer,
 	&brport_attr_flush,
 	&brport_attr_hairpin_mode,
+	&brport_attr_bpdu_guard,
+	&brport_attr_root_block,
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 	&brport_attr_multicast_router,
 #endif
@@ -209,7 +221,7 @@
 	char *endp;
 	unsigned long val;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(dev_net(p->dev)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	val = simple_strtoul(buf, &endp, 0);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 44f270f..a376ec1 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -515,8 +515,7 @@
 							  client_layer : NULL);
 			}
 
-			if (req != NULL)
-				kfree(req);
+			kfree(req);
 
 			spin_unlock_bh(&cfctrl->info_list_lock);
 		}
diff --git a/net/can/gw.c b/net/can/gw.c
index 1f5c978..574dda78e 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -751,6 +751,9 @@
 	struct cgw_job *gwj;
 	int err = 0;
 
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	if (nlmsg_len(nlh) < sizeof(*r))
 		return -EINVAL;
 
@@ -839,6 +842,9 @@
 	struct can_can_gw ccgw;
 	int err = 0;
 
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	if (nlmsg_len(nlh) < sizeof(*r))
 		return -EINVAL;
 
diff --git a/net/core/dev.c b/net/core/dev.c
index c0946cb..2a5f5586 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -176,8 +176,10 @@
 #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)
 
 static DEFINE_SPINLOCK(ptype_lock);
+static DEFINE_SPINLOCK(offload_lock);
 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
 static struct list_head ptype_all __read_mostly;	/* Taps */
+static struct list_head offload_base __read_mostly;
 
 /*
  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -201,6 +203,8 @@
 DEFINE_RWLOCK(dev_base_lock);
 EXPORT_SYMBOL(dev_base_lock);
 
+DEFINE_SEQLOCK(devnet_rename_seq);
+
 static inline void dev_base_seq_inc(struct net *net)
 {
 	while (++net->dev_base_seq == 0);
@@ -470,6 +474,82 @@
 }
 EXPORT_SYMBOL(dev_remove_pack);
 
+
+/**
+ *	dev_add_offload - register offload handlers
+ *	@po: protocol offload declaration
+ *
+ *	Add protocol offload handlers to the networking stack. The passed
+ *	&proto_offload is linked into kernel lists and may not be freed until
+ *	it has been removed from the kernel lists.
+ *
+ *	This call does not sleep therefore it can not
+ *	guarantee all CPU's that are in middle of receiving packets
+ *	will see the new offload handlers (until the next received packet).
+ */
+void dev_add_offload(struct packet_offload *po)
+{
+	struct list_head *head = &offload_base;
+
+	spin_lock(&offload_lock);
+	list_add_rcu(&po->list, head);
+	spin_unlock(&offload_lock);
+}
+EXPORT_SYMBOL(dev_add_offload);
+
+/**
+ *	__dev_remove_offload	 - remove offload handler
+ *	@po: packet offload declaration
+ *
+ *	Remove a protocol offload handler that was previously added to the
+ *	kernel offload handlers by dev_add_offload(). The passed &offload_type
+ *	is removed from the kernel lists and can be freed or reused once this
+ *	function returns.
+ *
+ *      The packet type might still be in use by receivers
+ *	and must not be freed until after all the CPU's have gone
+ *	through a quiescent state.
+ */
+void __dev_remove_offload(struct packet_offload *po)
+{
+	struct list_head *head = &offload_base;
+	struct packet_offload *po1;
+
+	spin_lock(&offload_lock);
+
+	list_for_each_entry(po1, head, list) {
+		if (po == po1) {
+			list_del_rcu(&po->list);
+			goto out;
+		}
+	}
+
+	pr_warn("dev_remove_offload: %p not found\n", po);
+out:
+	spin_unlock(&offload_lock);
+}
+EXPORT_SYMBOL(__dev_remove_offload);
+
+/**
+ *	dev_remove_offload	 - remove packet offload handler
+ *	@po: packet offload declaration
+ *
+ *	Remove a packet offload handler that was previously added to the kernel
+ *	offload handlers by dev_add_offload(). The passed &offload_type is
+ *	removed from the kernel lists and can be freed or reused once this
+ *	function returns.
+ *
+ *	This call sleeps to guarantee that no CPU is looking at the packet
+ *	type after return.
+ */
+void dev_remove_offload(struct packet_offload *po)
+{
+	__dev_remove_offload(po);
+
+	synchronize_net();
+}
+EXPORT_SYMBOL(dev_remove_offload);
+
 /******************************************************************************
 
 		      Device Boot-time Settings Routines
@@ -1013,22 +1093,31 @@
 	if (dev->flags & IFF_UP)
 		return -EBUSY;
 
-	if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
+	write_seqlock(&devnet_rename_seq);
+
+	if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
+		write_sequnlock(&devnet_rename_seq);
 		return 0;
+	}
 
 	memcpy(oldname, dev->name, IFNAMSIZ);
 
 	err = dev_get_valid_name(net, dev, newname);
-	if (err < 0)
+	if (err < 0) {
+		write_sequnlock(&devnet_rename_seq);
 		return err;
+	}
 
 rollback:
 	ret = device_rename(&dev->dev, dev->name);
 	if (ret) {
 		memcpy(dev->name, oldname, IFNAMSIZ);
+		write_sequnlock(&devnet_rename_seq);
 		return ret;
 	}
 
+	write_sequnlock(&devnet_rename_seq);
+
 	write_lock_bh(&dev_base_lock);
 	hlist_del_rcu(&dev->name_hlist);
 	write_unlock_bh(&dev_base_lock);
@@ -1046,6 +1135,7 @@
 		/* err >= 0 after dev_alloc_name() or stores the first errno */
 		if (err >= 0) {
 			err = ret;
+			write_seqlock(&devnet_rename_seq);
 			memcpy(dev->name, oldname, IFNAMSIZ);
 			goto rollback;
 		} else {
@@ -1075,10 +1165,8 @@
 		return -EINVAL;
 
 	if (!len) {
-		if (dev->ifalias) {
-			kfree(dev->ifalias);
-			dev->ifalias = NULL;
-		}
+		kfree(dev->ifalias);
+		dev->ifalias = NULL;
 		return 0;
 	}
 
@@ -1994,7 +2082,7 @@
 	netdev_features_t features)
 {
 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
-	struct packet_type *ptype;
+	struct packet_offload *ptype;
 	__be16 type = skb->protocol;
 	int vlan_depth = ETH_HLEN;
 	int err;
@@ -2023,18 +2111,17 @@
 	}
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(ptype,
-			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
-		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
+	list_for_each_entry_rcu(ptype, &offload_base, list) {
+		if (ptype->type == type && ptype->callbacks.gso_segment) {
 			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
-				err = ptype->gso_send_check(skb);
+				err = ptype->callbacks.gso_send_check(skb);
 				segs = ERR_PTR(err);
 				if (err || skb_gso_ok(skb, features))
 					break;
 				__skb_push(skb, (skb->data -
 						 skb_network_header(skb)));
 			}
-			segs = ptype->gso_segment(skb, features);
+			segs = ptype->callbacks.gso_segment(skb, features);
 			break;
 		}
 	}
@@ -3446,9 +3533,9 @@
 
 static int napi_gro_complete(struct sk_buff *skb)
 {
-	struct packet_type *ptype;
+	struct packet_offload *ptype;
 	__be16 type = skb->protocol;
-	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
+	struct list_head *head = &offload_base;
 	int err = -ENOENT;
 
 	if (NAPI_GRO_CB(skb)->count == 1) {
@@ -3458,10 +3545,10 @@
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(ptype, head, list) {
-		if (ptype->type != type || ptype->dev || !ptype->gro_complete)
+		if (ptype->type != type || !ptype->callbacks.gro_complete)
 			continue;
 
-		err = ptype->gro_complete(skb);
+		err = ptype->callbacks.gro_complete(skb);
 		break;
 	}
 	rcu_read_unlock();
@@ -3508,9 +3595,9 @@
 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
 	struct sk_buff **pp = NULL;
-	struct packet_type *ptype;
+	struct packet_offload *ptype;
 	__be16 type = skb->protocol;
-	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
+	struct list_head *head = &offload_base;
 	int same_flow;
 	int mac_len;
 	enum gro_result ret;
@@ -3523,7 +3610,7 @@
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(ptype, head, list) {
-		if (ptype->type != type || ptype->dev || !ptype->gro_receive)
+		if (ptype->type != type || !ptype->callbacks.gro_receive)
 			continue;
 
 		skb_set_network_header(skb, skb_gro_offset(skb));
@@ -3533,7 +3620,7 @@
 		NAPI_GRO_CB(skb)->flush = 0;
 		NAPI_GRO_CB(skb)->free = 0;
 
-		pp = ptype->gro_receive(&napi->gro_list, skb);
+		pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
 		break;
 	}
 	rcu_read_unlock();
@@ -4073,6 +4160,7 @@
 {
 	struct net_device *dev;
 	struct ifreq ifr;
+	unsigned seq;
 
 	/*
 	 *	Fetch the caller's info block.
@@ -4081,6 +4169,8 @@
 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
 		return -EFAULT;
 
+retry:
+	seq = read_seqbegin(&devnet_rename_seq);
 	rcu_read_lock();
 	dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
 	if (!dev) {
@@ -4090,6 +4180,8 @@
 
 	strcpy(ifr.ifr_name, dev->name);
 	rcu_read_unlock();
+	if (read_seqretry(&devnet_rename_seq, seq))
+		goto retry;
 
 	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
 		return -EFAULT;
@@ -5202,7 +5294,7 @@
 	case SIOCGMIIPHY:
 	case SIOCGMIIREG:
 	case SIOCSIFNAME:
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 		dev_load(net, ifr.ifr_name);
 		rtnl_lock();
@@ -5223,16 +5315,25 @@
 	 *	- require strict serialization.
 	 *	- do not return a value
 	 */
+	case SIOCSIFMAP:
+	case SIOCSIFTXQLEN:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		/* fall through */
+	/*
+	 *	These ioctl calls:
+	 *	- require local superuser power.
+	 *	- require strict serialization.
+	 *	- do not return a value
+	 */
 	case SIOCSIFFLAGS:
 	case SIOCSIFMETRIC:
 	case SIOCSIFMTU:
-	case SIOCSIFMAP:
 	case SIOCSIFHWADDR:
 	case SIOCSIFSLAVE:
 	case SIOCADDMULTI:
 	case SIOCDELMULTI:
 	case SIOCSIFHWBROADCAST:
-	case SIOCSIFTXQLEN:
 	case SIOCSMIIREG:
 	case SIOCBONDENSLAVE:
 	case SIOCBONDRELEASE:
@@ -5241,7 +5342,7 @@
 	case SIOCBRADDIF:
 	case SIOCBRDELIF:
 	case SIOCSHWTSTAMP:
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 		/* fall through */
 	case SIOCBONDSLAVEINFOQUERY:
@@ -6266,7 +6367,6 @@
 		goto out;
 
 	/* Ensure the device has been registrered */
-	err = -EINVAL;
 	if (dev->reg_state != NETREG_REGISTERED)
 		goto out;
 
@@ -6664,6 +6764,8 @@
 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
 		INIT_LIST_HEAD(&ptype_base[i]);
 
+	INIT_LIST_HEAD(&offload_base);
+
 	if (register_pernet_subsys(&netdev_net_ops))
 		goto out;
 
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 4d64cc2..a870543 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1460,7 +1460,7 @@
 	case ETHTOOL_GEEE:
 		break;
 	default:
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 	}
 
diff --git a/net/core/filter.c b/net/core/filter.c
index 3d92ebb..c23543c 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -39,6 +39,7 @@
 #include <linux/reciprocal_div.h>
 #include <linux/ratelimit.h>
 #include <linux/seccomp.h>
+#include <linux/if_vlan.h>
 
 /* No hurry in this branch
  *
@@ -341,6 +342,12 @@
 		case BPF_S_ANC_CPU:
 			A = raw_smp_processor_id();
 			continue;
+		case BPF_S_ANC_VLAN_TAG:
+			A = vlan_tx_tag_get(skb);
+			continue;
+		case BPF_S_ANC_VLAN_TAG_PRESENT:
+			A = !!vlan_tx_tag_present(skb);
+			continue;
 		case BPF_S_ANC_NLATTR: {
 			struct nlattr *nla;
 
@@ -600,6 +607,8 @@
 			ANCILLARY(RXHASH);
 			ANCILLARY(CPU);
 			ANCILLARY(ALU_XOR_X);
+			ANCILLARY(VLAN_TAG);
+			ANCILLARY(VLAN_TAG_PRESENT);
 			}
 		}
 		ftest->code = code;
@@ -751,3 +760,133 @@
 	return ret;
 }
 EXPORT_SYMBOL_GPL(sk_detach_filter);
+
+static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
+{
+	static const u16 decodes[] = {
+		[BPF_S_ALU_ADD_K]	= BPF_ALU|BPF_ADD|BPF_K,
+		[BPF_S_ALU_ADD_X]	= BPF_ALU|BPF_ADD|BPF_X,
+		[BPF_S_ALU_SUB_K]	= BPF_ALU|BPF_SUB|BPF_K,
+		[BPF_S_ALU_SUB_X]	= BPF_ALU|BPF_SUB|BPF_X,
+		[BPF_S_ALU_MUL_K]	= BPF_ALU|BPF_MUL|BPF_K,
+		[BPF_S_ALU_MUL_X]	= BPF_ALU|BPF_MUL|BPF_X,
+		[BPF_S_ALU_DIV_X]	= BPF_ALU|BPF_DIV|BPF_X,
+		[BPF_S_ALU_MOD_K]	= BPF_ALU|BPF_MOD|BPF_K,
+		[BPF_S_ALU_MOD_X]	= BPF_ALU|BPF_MOD|BPF_X,
+		[BPF_S_ALU_AND_K]	= BPF_ALU|BPF_AND|BPF_K,
+		[BPF_S_ALU_AND_X]	= BPF_ALU|BPF_AND|BPF_X,
+		[BPF_S_ALU_OR_K]	= BPF_ALU|BPF_OR|BPF_K,
+		[BPF_S_ALU_OR_X]	= BPF_ALU|BPF_OR|BPF_X,
+		[BPF_S_ALU_XOR_K]	= BPF_ALU|BPF_XOR|BPF_K,
+		[BPF_S_ALU_XOR_X]	= BPF_ALU|BPF_XOR|BPF_X,
+		[BPF_S_ALU_LSH_K]	= BPF_ALU|BPF_LSH|BPF_K,
+		[BPF_S_ALU_LSH_X]	= BPF_ALU|BPF_LSH|BPF_X,
+		[BPF_S_ALU_RSH_K]	= BPF_ALU|BPF_RSH|BPF_K,
+		[BPF_S_ALU_RSH_X]	= BPF_ALU|BPF_RSH|BPF_X,
+		[BPF_S_ALU_NEG]		= BPF_ALU|BPF_NEG,
+		[BPF_S_LD_W_ABS]	= BPF_LD|BPF_W|BPF_ABS,
+		[BPF_S_LD_H_ABS]	= BPF_LD|BPF_H|BPF_ABS,
+		[BPF_S_LD_B_ABS]	= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_PROTOCOL]	= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_PKTTYPE]	= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_IFINDEX]	= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_NLATTR]	= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_NLATTR_NEST]	= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_MARK]	= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_QUEUE]	= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_HATYPE]	= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_RXHASH]	= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_CPU]		= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_ALU_XOR_X]	= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_VLAN_TAG]	= BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
+		[BPF_S_LD_W_LEN]	= BPF_LD|BPF_W|BPF_LEN,
+		[BPF_S_LD_W_IND]	= BPF_LD|BPF_W|BPF_IND,
+		[BPF_S_LD_H_IND]	= BPF_LD|BPF_H|BPF_IND,
+		[BPF_S_LD_B_IND]	= BPF_LD|BPF_B|BPF_IND,
+		[BPF_S_LD_IMM]		= BPF_LD|BPF_IMM,
+		[BPF_S_LDX_W_LEN]	= BPF_LDX|BPF_W|BPF_LEN,
+		[BPF_S_LDX_B_MSH]	= BPF_LDX|BPF_B|BPF_MSH,
+		[BPF_S_LDX_IMM]		= BPF_LDX|BPF_IMM,
+		[BPF_S_MISC_TAX]	= BPF_MISC|BPF_TAX,
+		[BPF_S_MISC_TXA]	= BPF_MISC|BPF_TXA,
+		[BPF_S_RET_K]		= BPF_RET|BPF_K,
+		[BPF_S_RET_A]		= BPF_RET|BPF_A,
+		[BPF_S_ALU_DIV_K]	= BPF_ALU|BPF_DIV|BPF_K,
+		[BPF_S_LD_MEM]		= BPF_LD|BPF_MEM,
+		[BPF_S_LDX_MEM]		= BPF_LDX|BPF_MEM,
+		[BPF_S_ST]		= BPF_ST,
+		[BPF_S_STX]		= BPF_STX,
+		[BPF_S_JMP_JA]		= BPF_JMP|BPF_JA,
+		[BPF_S_JMP_JEQ_K]	= BPF_JMP|BPF_JEQ|BPF_K,
+		[BPF_S_JMP_JEQ_X]	= BPF_JMP|BPF_JEQ|BPF_X,
+		[BPF_S_JMP_JGE_K]	= BPF_JMP|BPF_JGE|BPF_K,
+		[BPF_S_JMP_JGE_X]	= BPF_JMP|BPF_JGE|BPF_X,
+		[BPF_S_JMP_JGT_K]	= BPF_JMP|BPF_JGT|BPF_K,
+		[BPF_S_JMP_JGT_X]	= BPF_JMP|BPF_JGT|BPF_X,
+		[BPF_S_JMP_JSET_K]	= BPF_JMP|BPF_JSET|BPF_K,
+		[BPF_S_JMP_JSET_X]	= BPF_JMP|BPF_JSET|BPF_X,
+	};
+	u16 code;
+
+	code = filt->code;
+
+	to->code = decodes[code];
+	to->jt = filt->jt;
+	to->jf = filt->jf;
+
+	if (code == BPF_S_ALU_DIV_K) {
+		/*
+		 * When loaded this rule user gave us X, which was
+		 * translated into R = r(X). Now we calculate the
+		 * RR = r(R) and report it back. If next time this
+		 * value is loaded and RRR = r(RR) is calculated
+		 * then the R == RRR will be true.
+		 *
+		 * One exception. X == 1 translates into R == 0 and
+		 * we can't calculate RR out of it with r().
+		 */
+
+		if (filt->k == 0)
+			to->k = 1;
+		else
+			to->k = reciprocal_value(filt->k);
+
+		BUG_ON(reciprocal_value(to->k) != filt->k);
+	} else
+		to->k = filt->k;
+}
+
+int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
+{
+	struct sk_filter *filter;
+	int i, ret;
+
+	lock_sock(sk);
+	filter = rcu_dereference_protected(sk->sk_filter,
+			sock_owned_by_user(sk));
+	ret = 0;
+	if (!filter)
+		goto out;
+	ret = filter->len;
+	if (!len)
+		goto out;
+	ret = -EINVAL;
+	if (len < filter->len)
+		goto out;
+
+	ret = -EFAULT;
+	for (i = 0; i < filter->len; i++) {
+		struct sock_filter fb;
+
+		sk_decode_filter(&filter->insns[i], &fb);
+		if (copy_to_user(&ubuf[i], &fb, sizeof(fb)))
+			goto out;
+	}
+
+	ret = filter->len;
+out:
+	release_sock(sk);
+	return ret;
+}
diff --git a/net/core/flow.c b/net/core/flow.c
index e318c7e..b0901ee 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -327,11 +327,9 @@
 static void flow_cache_flush_per_cpu(void *data)
 {
 	struct flow_flush_info *info = data;
-	int cpu;
 	struct tasklet_struct *tasklet;
 
-	cpu = smp_processor_id();
-	tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
+	tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet);
 	tasklet->data = (unsigned long)info;
 	tasklet_schedule(tasklet);
 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 2257148..f1c0c2e 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2987,6 +2987,10 @@
 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
 	}
 
+	/* Don't export sysctls to unprivileged users */
+	if (neigh_parms_net(p)->user_ns != &init_user_ns)
+		t->neigh_vars[0].procname = NULL;
+
 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
 		p_name, dev_name_source);
 	t->sysctl_header =
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 017a8ba..334efd5 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -18,11 +18,9 @@
 #include <net/sock.h>
 #include <net/net_namespace.h>
 #include <linux/rtnetlink.h>
-#include <linux/wireless.h>
 #include <linux/vmalloc.h>
 #include <linux/export.h>
 #include <linux/jiffies.h>
-#include <net/wext.h>
 
 #include "net-sysfs.h"
 
@@ -73,11 +71,12 @@
 			    const char *buf, size_t len,
 			    int (*set)(struct net_device *, unsigned long))
 {
-	struct net_device *net = to_net_dev(dev);
+	struct net_device *netdev = to_net_dev(dev);
+	struct net *net = dev_net(netdev);
 	unsigned long new;
 	int ret = -EINVAL;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	ret = kstrtoul(buf, 0, &new);
@@ -87,8 +86,8 @@
 	if (!rtnl_trylock())
 		return restart_syscall();
 
-	if (dev_isalive(net)) {
-		if ((ret = (*set)(net, new)) == 0)
+	if (dev_isalive(netdev)) {
+		if ((ret = (*set)(netdev, new)) == 0)
 			ret = len;
 	}
 	rtnl_unlock();
@@ -264,6 +263,9 @@
 				  struct device_attribute *attr,
 				  const char *buf, size_t len)
 {
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	return netdev_store(dev, attr, buf, len, change_tx_queue_len);
 }
 
@@ -271,10 +273,11 @@
 			     const char *buf, size_t len)
 {
 	struct net_device *netdev = to_net_dev(dev);
+	struct net *net = dev_net(netdev);
 	size_t count = len;
 	ssize_t ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	/* ignore trailing newline */
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 42f1e1c..6456439 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -13,6 +13,7 @@
 #include <linux/proc_fs.h>
 #include <linux/file.h>
 #include <linux/export.h>
+#include <linux/user_namespace.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 
@@ -145,7 +146,7 @@
 /*
  * setup_net runs the initializers for the network namespace object.
  */
-static __net_init int setup_net(struct net *net)
+static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
 {
 	/* Must be called with net_mutex held */
 	const struct pernet_operations *ops, *saved_ops;
@@ -155,6 +156,7 @@
 	atomic_set(&net->count, 1);
 	atomic_set(&net->passive, 1);
 	net->dev_base_seq = 1;
+	net->user_ns = user_ns;
 
 #ifdef NETNS_REFCNT_DEBUG
 	atomic_set(&net->use_count, 0);
@@ -232,7 +234,8 @@
 		net_free(ns);
 }
 
-struct net *copy_net_ns(unsigned long flags, struct net *old_net)
+struct net *copy_net_ns(unsigned long flags,
+			struct user_namespace *user_ns, struct net *old_net)
 {
 	struct net *net;
 	int rv;
@@ -243,8 +246,11 @@
 	net = net_alloc();
 	if (!net)
 		return ERR_PTR(-ENOMEM);
+
+	get_user_ns(user_ns);
+
 	mutex_lock(&net_mutex);
-	rv = setup_net(net);
+	rv = setup_net(net, user_ns);
 	if (rv == 0) {
 		rtnl_lock();
 		list_add_tail_rcu(&net->list, &net_namespace_list);
@@ -252,6 +258,7 @@
 	}
 	mutex_unlock(&net_mutex);
 	if (rv < 0) {
+		put_user_ns(user_ns);
 		net_drop_ns(net);
 		return ERR_PTR(rv);
 	}
@@ -308,6 +315,7 @@
 	/* Finally it is safe to free my network namespace structure */
 	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
 		list_del_init(&net->exit_list);
+		put_user_ns(net->user_ns);
 		net_drop_ns(net);
 	}
 }
@@ -347,13 +355,6 @@
 }
 
 #else
-struct net *copy_net_ns(unsigned long flags, struct net *old_net)
-{
-	if (flags & CLONE_NEWNET)
-		return ERR_PTR(-EINVAL);
-	return old_net;
-}
-
 struct net *get_net_ns_by_fd(int fd)
 {
 	return ERR_PTR(-EINVAL);
@@ -402,7 +403,7 @@
 	rcu_assign_pointer(init_net.gen, ng);
 
 	mutex_lock(&net_mutex);
-	if (setup_net(&init_net))
+	if (setup_net(&init_net, &init_user_ns))
 		panic("Could not setup the initial network namespace");
 
 	rtnl_lock();
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 79285a3..847c02b 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -248,7 +248,7 @@
 	return 0;
 }
 
-void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 {
 	struct task_struct *p;
 	void *v;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index d1dc14c..b29dacf 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -419,20 +419,6 @@
 #define REMOVE 1
 #define FIND   0
 
-static inline ktime_t ktime_now(void)
-{
-	struct timespec ts;
-	ktime_get_ts(&ts);
-
-	return timespec_to_ktime(ts);
-}
-
-/* This works even if 32 bit because of careful byte order choice */
-static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
-{
-	return cmp1.tv64 < cmp2.tv64;
-}
-
 static const char version[] =
 	"Packet Generator for packet performance testing. "
 	"Version: " VERSION "\n";
@@ -675,7 +661,7 @@
 	seq_puts(seq, "\n");
 
 	/* not really stopped, more like last-running-at */
-	stopped = pkt_dev->running ? ktime_now() : pkt_dev->stopped_at;
+	stopped = pkt_dev->running ? ktime_get() : pkt_dev->stopped_at;
 	idle = pkt_dev->idle_acc;
 	do_div(idle, NSEC_PER_USEC);
 
@@ -2141,12 +2127,12 @@
 		return;
 	}
 
-	start_time = ktime_now();
+	start_time = ktime_get();
 	if (remaining < 100000) {
 		/* for small delays (<100us), just loop until limit is reached */
 		do {
-			end_time = ktime_now();
-		} while (ktime_lt(end_time, spin_until));
+			end_time = ktime_get();
+		} while (ktime_compare(end_time, spin_until) < 0);
 	} else {
 		/* see do_nanosleep */
 		hrtimer_init_sleeper(&t, current);
@@ -2162,7 +2148,7 @@
 			hrtimer_cancel(&t.timer);
 		} while (t.task && pkt_dev->running && !signal_pending(current));
 		__set_current_state(TASK_RUNNING);
-		end_time = ktime_now();
+		end_time = ktime_get();
 	}
 
 	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
@@ -2427,11 +2413,7 @@
 		}
 	} else {		/* IPV6 * */
 
-		if (pkt_dev->min_in6_daddr.s6_addr32[0] == 0 &&
-		    pkt_dev->min_in6_daddr.s6_addr32[1] == 0 &&
-		    pkt_dev->min_in6_daddr.s6_addr32[2] == 0 &&
-		    pkt_dev->min_in6_daddr.s6_addr32[3] == 0) ;
-		else {
+		if (!ipv6_addr_any(&pkt_dev->min_in6_daddr)) {
 			int i;
 
 			/* Only random destinations yet */
@@ -2916,8 +2898,7 @@
 			pktgen_clear_counters(pkt_dev);
 			pkt_dev->running = 1;	/* Cranke yeself! */
 			pkt_dev->skb = NULL;
-			pkt_dev->started_at =
-				pkt_dev->next_tx = ktime_now();
+			pkt_dev->started_at = pkt_dev->next_tx = ktime_get();
 
 			set_pkt_overhead(pkt_dev);
 
@@ -3076,7 +3057,7 @@
 
 	kfree_skb(pkt_dev->skb);
 	pkt_dev->skb = NULL;
-	pkt_dev->stopped_at = ktime_now();
+	pkt_dev->stopped_at = ktime_get();
 	pkt_dev->running = 0;
 
 	show_results(pkt_dev, nr_frags);
@@ -3095,7 +3076,7 @@
 			continue;
 		if (best == NULL)
 			best = pkt_dev;
-		else if (ktime_lt(pkt_dev->next_tx, best->next_tx))
+		else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0)
 			best = pkt_dev;
 	}
 	if_unlock(t);
@@ -3180,14 +3161,14 @@
 
 static void pktgen_resched(struct pktgen_dev *pkt_dev)
 {
-	ktime_t idle_start = ktime_now();
+	ktime_t idle_start = ktime_get();
 	schedule();
-	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
+	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
 }
 
 static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
 {
-	ktime_t idle_start = ktime_now();
+	ktime_t idle_start = ktime_get();
 
 	while (atomic_read(&(pkt_dev->skb->users)) != 1) {
 		if (signal_pending(current))
@@ -3198,7 +3179,7 @@
 		else
 			cpu_relax();
 	}
-	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
+	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
 }
 
 static void pktgen_xmit(struct pktgen_dev *pkt_dev)
@@ -3220,7 +3201,7 @@
 	 * "never transmit"
 	 */
 	if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
-		pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
+		pkt_dev->next_tx = ktime_add_ns(ktime_get(), ULONG_MAX);
 		return;
 	}
 
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index fad649a..575a6ee 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -128,7 +128,7 @@
 	if (tab == NULL || tab[msgindex].doit == NULL)
 		tab = rtnl_msg_handlers[PF_UNSPEC];
 
-	return tab ? tab[msgindex].doit : NULL;
+	return tab[msgindex].doit;
 }
 
 static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
@@ -143,7 +143,7 @@
 	if (tab == NULL || tab[msgindex].dumpit == NULL)
 		tab = rtnl_msg_handlers[PF_UNSPEC];
 
-	return tab ? tab[msgindex].dumpit : NULL;
+	return tab[msgindex].dumpit;
 }
 
 static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
@@ -158,7 +158,7 @@
 	if (tab == NULL || tab[msgindex].calcit == NULL)
 		tab = rtnl_msg_handlers[PF_UNSPEC];
 
-	return tab ? tab[msgindex].calcit : NULL;
+	return tab[msgindex].calcit;
 }
 
 /**
@@ -1316,6 +1316,10 @@
 			err = PTR_ERR(net);
 			goto errout;
 		}
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
+			err = -EPERM;
+			goto errout;
+		}
 		err = dev_change_net_namespace(dev, net, ifname);
 		put_net(net);
 		if (err)
@@ -2057,6 +2061,9 @@
 	u8 *addr;
 	int err;
 
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
 	if (err < 0)
 		return err;
@@ -2123,6 +2130,9 @@
 	int err = -EINVAL;
 	__u8 *addr;
 
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	if (nlmsg_len(nlh) < sizeof(*ndm))
 		return -EINVAL;
 
@@ -2253,6 +2263,211 @@
 	return skb->len;
 }
 
+int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+			    struct net_device *dev, u16 mode)
+{
+	struct nlmsghdr *nlh;
+	struct ifinfomsg *ifm;
+	struct nlattr *br_afspec;
+	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
+
+	nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI);
+	if (nlh == NULL)
+		return -EMSGSIZE;
+
+	ifm = nlmsg_data(nlh);
+	ifm->ifi_family = AF_BRIDGE;
+	ifm->__ifi_pad = 0;
+	ifm->ifi_type = dev->type;
+	ifm->ifi_index = dev->ifindex;
+	ifm->ifi_flags = dev_get_flags(dev);
+	ifm->ifi_change = 0;
+
+
+	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
+	    (dev->master &&
+	     nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) ||
+	    (dev->addr_len &&
+	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
+	    (dev->ifindex != dev->iflink &&
+	     nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+		goto nla_put_failure;
+
+	br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
+	if (!br_afspec)
+		goto nla_put_failure;
+
+	if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF) ||
+	    nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
+		nla_nest_cancel(skb, br_afspec);
+		goto nla_put_failure;
+	}
+	nla_nest_end(skb, br_afspec);
+
+	return nlmsg_end(skb, nlh);
+nla_put_failure:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+EXPORT_SYMBOL(ndo_dflt_bridge_getlink);
+
+static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct net *net = sock_net(skb->sk);
+	struct net_device *dev;
+	int idx = 0;
+	u32 portid = NETLINK_CB(cb->skb).portid;
+	u32 seq = cb->nlh->nlmsg_seq;
+
+	rcu_read_lock();
+	for_each_netdev_rcu(net, dev) {
+		const struct net_device_ops *ops = dev->netdev_ops;
+		struct net_device *master = dev->master;
+
+		if (master && master->netdev_ops->ndo_bridge_getlink) {
+			if (idx >= cb->args[0] &&
+			    master->netdev_ops->ndo_bridge_getlink(
+				    skb, portid, seq, dev) < 0)
+				break;
+			idx++;
+		}
+
+		if (ops->ndo_bridge_getlink) {
+			if (idx >= cb->args[0] &&
+			    ops->ndo_bridge_getlink(skb, portid, seq, dev) < 0)
+				break;
+			idx++;
+		}
+	}
+	rcu_read_unlock();
+	cb->args[0] = idx;
+
+	return skb->len;
+}
+
+static inline size_t bridge_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+		+ nla_total_size(IFNAMSIZ)	/* IFLA_IFNAME */
+		+ nla_total_size(MAX_ADDR_LEN)	/* IFLA_ADDRESS */
+		+ nla_total_size(sizeof(u32))	/* IFLA_MASTER */
+		+ nla_total_size(sizeof(u32))	/* IFLA_MTU */
+		+ nla_total_size(sizeof(u32))	/* IFLA_LINK */
+		+ nla_total_size(sizeof(u32))	/* IFLA_OPERSTATE */
+		+ nla_total_size(sizeof(u8))	/* IFLA_PROTINFO */
+		+ nla_total_size(sizeof(struct nlattr))	/* IFLA_AF_SPEC */
+		+ nla_total_size(sizeof(u16))	/* IFLA_BRIDGE_FLAGS */
+		+ nla_total_size(sizeof(u16));	/* IFLA_BRIDGE_MODE */
+}
+
+static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
+{
+	struct net *net = dev_net(dev);
+	struct net_device *master = dev->master;
+	struct sk_buff *skb;
+	int err = -EOPNOTSUPP;
+
+	skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
+	if (!skb) {
+		err = -ENOMEM;
+		goto errout;
+	}
+
+	if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) &&
+	    master && master->netdev_ops->ndo_bridge_getlink) {
+		err = master->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev);
+		if (err < 0)
+			goto errout;
+	}
+
+	if ((flags & BRIDGE_FLAGS_SELF) &&
+	    dev->netdev_ops->ndo_bridge_getlink) {
+		err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev);
+		if (err < 0)
+			goto errout;
+	}
+
+	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
+	return 0;
+errout:
+	WARN_ON(err == -EMSGSIZE);
+	kfree_skb(skb);
+	rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+	return err;
+}
+
+static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+			       void *arg)
+{
+	struct net *net = sock_net(skb->sk);
+	struct ifinfomsg *ifm;
+	struct net_device *dev;
+	struct nlattr *br_spec, *attr = NULL;
+	int rem, err = -EOPNOTSUPP;
+	u16 oflags, flags = 0;
+	bool have_flags = false;
+
+	if (nlmsg_len(nlh) < sizeof(*ifm))
+		return -EINVAL;
+
+	ifm = nlmsg_data(nlh);
+	if (ifm->ifi_family != AF_BRIDGE)
+		return -EPFNOSUPPORT;
+
+	dev = __dev_get_by_index(net, ifm->ifi_index);
+	if (!dev) {
+		pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
+		return -ENODEV;
+	}
+
+	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+	if (br_spec) {
+		nla_for_each_nested(attr, br_spec, rem) {
+			if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
+				have_flags = true;
+				flags = nla_get_u16(attr);
+				break;
+			}
+		}
+	}
+
+	oflags = flags;
+
+	if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
+		if (!dev->master ||
+		    !dev->master->netdev_ops->ndo_bridge_setlink) {
+			err = -EOPNOTSUPP;
+			goto out;
+		}
+
+		err = dev->master->netdev_ops->ndo_bridge_setlink(dev, nlh);
+		if (err)
+			goto out;
+
+		flags &= ~BRIDGE_FLAGS_MASTER;
+	}
+
+	if ((flags & BRIDGE_FLAGS_SELF)) {
+		if (!dev->netdev_ops->ndo_bridge_setlink)
+			err = -EOPNOTSUPP;
+		else
+			err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh);
+
+		if (!err)
+			flags &= ~BRIDGE_FLAGS_SELF;
+	}
+
+	if (have_flags)
+		memcpy(nla_data(attr), &flags, sizeof(flags));
+	/* Generate event to notify upper layer of bridge change */
+	if (!err)
+		err = rtnl_bridge_notify(dev, oflags);
+out:
+	return err;
+}
+
 /* Protected by RTNL sempahore.  */
 static struct rtattr **rta_buf;
 static int rtattr_max;
@@ -2283,7 +2498,7 @@
 	sz_idx = type>>2;
 	kind = type&3;
 
-	if (kind != 2 && !capable(CAP_NET_ADMIN))
+	if (kind != 2 && !ns_capable(net->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
@@ -2434,5 +2649,8 @@
 	rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
 	rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
 	rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
+
+	rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
+	rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
 }
 
diff --git a/net/core/scm.c b/net/core/scm.c
index ab57084..57fb1ee 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -51,11 +51,11 @@
 	if (!uid_valid(uid) || !gid_valid(gid))
 		return -EINVAL;
 
-	if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) &&
+	if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) &&
 	    ((uid_eq(uid, cred->uid)   || uid_eq(uid, cred->euid) ||
-	      uid_eq(uid, cred->suid)) || capable(CAP_SETUID)) &&
+	      uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
 	    ((gid_eq(gid, cred->gid)   || gid_eq(gid, cred->egid) ||
-	      gid_eq(gid, cred->sgid)) || capable(CAP_SETGID))) {
+	      gid_eq(gid, cred->sgid)) || nsown_capable(CAP_SETGID))) {
 	       return 0;
 	}
 	return -EPERM;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4007c14..880722e2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -519,7 +519,7 @@
 
 			uarg = skb_shinfo(skb)->destructor_arg;
 			if (uarg->callback)
-				uarg->callback(uarg);
+				uarg->callback(uarg, true);
 		}
 
 		if (skb_has_frag_list(skb))
@@ -635,6 +635,26 @@
 EXPORT_SYMBOL(kfree_skb);
 
 /**
+ *	skb_tx_error - report an sk_buff xmit error
+ *	@skb: buffer that triggered an error
+ *
+ *	Report xmit error if a device callback is tracking this skb.
+ *	skb must be freed afterwards.
+ */
+void skb_tx_error(struct sk_buff *skb)
+{
+	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
+		struct ubuf_info *uarg;
+
+		uarg = skb_shinfo(skb)->destructor_arg;
+		if (uarg->callback)
+			uarg->callback(uarg, false);
+		skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
+	}
+}
+EXPORT_SYMBOL(skb_tx_error);
+
+/**
  *	consume_skb - free an skbuff
  *	@skb: buffer to free
  *
@@ -797,7 +817,7 @@
 	for (i = 0; i < num_frags; i++)
 		skb_frag_unref(skb, i);
 
-	uarg->callback(uarg);
+	uarg->callback(uarg, false);
 
 	/* skb frags point to kernel buffers */
 	for (i = num_frags - 1; i >= 0; i--) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 8a146cf..a692ef4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -505,7 +505,8 @@
 }
 EXPORT_SYMBOL(sk_dst_check);
 
-static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
+static int sock_setbindtodevice(struct sock *sk, char __user *optval,
+				int optlen)
 {
 	int ret = -ENOPROTOOPT;
 #ifdef CONFIG_NETDEVICES
@@ -515,7 +516,7 @@
 
 	/* Sorry... */
 	ret = -EPERM;
-	if (!capable(CAP_NET_RAW))
+	if (!ns_capable(net->user_ns, CAP_NET_RAW))
 		goto out;
 
 	ret = -EINVAL;
@@ -562,6 +563,59 @@
 	return ret;
 }
 
+static int sock_getbindtodevice(struct sock *sk, char __user *optval,
+				int __user *optlen, int len)
+{
+	int ret = -ENOPROTOOPT;
+#ifdef CONFIG_NETDEVICES
+	struct net *net = sock_net(sk);
+	struct net_device *dev;
+	char devname[IFNAMSIZ];
+	unsigned seq;
+
+	if (sk->sk_bound_dev_if == 0) {
+		len = 0;
+		goto zero;
+	}
+
+	ret = -EINVAL;
+	if (len < IFNAMSIZ)
+		goto out;
+
+retry:
+	seq = read_seqbegin(&devnet_rename_seq);
+	rcu_read_lock();
+	dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+	ret = -ENODEV;
+	if (!dev) {
+		rcu_read_unlock();
+		goto out;
+	}
+
+	strcpy(devname, dev->name);
+	rcu_read_unlock();
+	if (read_seqretry(&devnet_rename_seq, seq))
+		goto retry;
+
+	len = strlen(devname) + 1;
+
+	ret = -EFAULT;
+	if (copy_to_user(optval, devname, len))
+		goto out;
+
+zero:
+	ret = -EFAULT;
+	if (put_user(len, optlen))
+		goto out;
+
+	ret = 0;
+
+out:
+#endif
+
+	return ret;
+}
+
 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
 {
 	if (valbool)
@@ -589,7 +643,7 @@
 	 */
 
 	if (optname == SO_BINDTODEVICE)
-		return sock_bindtodevice(sk, optval, optlen);
+		return sock_setbindtodevice(sk, optval, optlen);
 
 	if (optlen < sizeof(int))
 		return -EINVAL;
@@ -696,7 +750,8 @@
 		break;
 
 	case SO_PRIORITY:
-		if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
+		if ((val >= 0 && val <= 6) ||
+		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 			sk->sk_priority = val;
 		else
 			ret = -EPERM;
@@ -813,7 +868,7 @@
 			clear_bit(SOCK_PASSSEC, &sock->flags);
 		break;
 	case SO_MARK:
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 			ret = -EPERM;
 		else
 			sk->sk_mark = val;
@@ -1074,6 +1129,17 @@
 	case SO_NOFCS:
 		v.val = sock_flag(sk, SOCK_NOFCS);
 		break;
+
+	case SO_BINDTODEVICE:
+		return sock_getbindtodevice(sk, optval, optlen, len);
+
+	case SO_GET_FILTER:
+		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
+		if (len < 0)
+			return len;
+
+		goto lenout;
+
 	default:
 		return -ENOPROTOOPT;
 	}
@@ -1214,13 +1280,11 @@
 
 #ifdef CONFIG_CGROUPS
 #if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
-void sock_update_classid(struct sock *sk)
+void sock_update_classid(struct sock *sk, struct task_struct *task)
 {
 	u32 classid;
 
-	rcu_read_lock();  /* doing current task, which cannot vanish. */
-	classid = task_cls_classid(current);
-	rcu_read_unlock();
+	classid = task_cls_classid(task);
 	if (classid != sk->sk_classid)
 		sk->sk_classid = classid;
 }
@@ -1263,7 +1327,7 @@
 		sock_net_set(sk, get_net(net));
 		atomic_set(&sk->sk_wmem_alloc, 1);
 
-		sock_update_classid(sk);
+		sock_update_classid(sk, current);
 		sock_update_netprioidx(sk, current);
 	}
 
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index a7c3684..d1b0804 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -216,6 +216,11 @@
 			goto err_dup;
 
 		tbl[0].data = &net->core.sysctl_somaxconn;
+
+		/* Don't export any sysctls to unprivileged users */
+		if (net->user_ns != &init_user_ns) {
+			tbl[0].procname = NULL;
+		}
 	}
 
 	net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 70989e6..b07c75d 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1662,6 +1662,9 @@
 	struct nlmsghdr *reply_nlh = NULL;
 	const struct reply_func *fn;
 
+	if ((nlh->nlmsg_type == RTM_SETDCB) && !capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	if (!net_eq(net, &init_net))
 		return -EINVAL;
 
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index ea850ce..662071b 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -174,8 +174,7 @@
 			 * To protect against Request floods, increment retrans
 			 * counter (backoff, monitored by dccp_response_timer).
 			 */
-			req->retrans++;
-			req->rsk_ops->rtx_syn_ack(sk, req, NULL);
+			inet_rtx_syn_ack(sk, req);
 		}
 		/* Network Duplicate, discard packet */
 		return NULL;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 7b7e561..e47ba9f 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -573,6 +573,9 @@
 	struct dn_ifaddr __rcu **ifap;
 	int err = -EINVAL;
 
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	if (!net_eq(net, &init_net))
 		goto errout;
 
@@ -614,6 +617,9 @@
 	struct dn_ifaddr *ifa;
 	int err;
 
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	if (!net_eq(net, &init_net))
 		return -EINVAL;
 
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 102d610..e36614e 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -520,6 +520,9 @@
 	struct rtattr **rta = arg;
 	struct rtmsg *r = NLMSG_DATA(nlh);
 
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	if (!net_eq(net, &init_net))
 		return -EINVAL;
 
@@ -540,6 +543,9 @@
 	struct rtattr **rta = arg;
 	struct rtmsg *r = NLMSG_DATA(nlh);
 
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	if (!net_eq(net, &init_net))
 		return -EINVAL;
 
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 274791c..f5eede1 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,26 +1,24 @@
-config NET_DSA
-	tristate "Distributed Switch Architecture support"
-	default n
-	depends on EXPERIMENTAL && NETDEVICES && !S390
-	select PHYLIB
-	---help---
-	  This allows you to use hardware switch chips that use
-	  the Distributed Switch Architecture.
+config HAVE_NET_DSA
+	def_bool y
+	depends on NETDEVICES && !S390
 
+# Drivers must select NET_DSA and the appropriate tagging format
+
+config NET_DSA
+	tristate
+	depends on HAVE_NET_DSA
+	select PHYLIB
 
 if NET_DSA
 
 # tagging formats
 config NET_DSA_TAG_DSA
 	bool
-	default n
 
 config NET_DSA_TAG_EDSA
 	bool
-	default n
 
 config NET_DSA_TAG_TRAILER
 	bool
-	default n
 
 endif
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 766c596..24b384b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -346,7 +346,8 @@
 	}
 
 	err = -EPERM;
-	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
+	if (sock->type == SOCK_RAW && !kern &&
+	    !ns_capable(net->user_ns, CAP_NET_RAW))
 		goto out_rcu_unlock;
 
 	err = -EAFNOSUPPORT;
@@ -473,6 +474,7 @@
 	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
 	struct sock *sk = sock->sk;
 	struct inet_sock *inet = inet_sk(sk);
+	struct net *net = sock_net(sk);
 	unsigned short snum;
 	int chk_addr_ret;
 	int err;
@@ -496,7 +498,7 @@
 			goto out;
 	}
 
-	chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
+	chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
 
 	/* Not specified by any standard per-se, however it breaks too
 	 * many applications when removed.  It is unfortunate since
@@ -516,7 +518,8 @@
 
 	snum = ntohs(addr->sin_port);
 	err = -EACCES;
-	if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
+	if (snum && snum < PROT_SOCK &&
+	    !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
 		goto out;
 
 	/*      We keep a pair of addresses. rcv_saddr is the one
@@ -1251,7 +1254,7 @@
 
 static int inet_gso_send_check(struct sk_buff *skb)
 {
-	const struct net_protocol *ops;
+	const struct net_offload *ops;
 	const struct iphdr *iph;
 	int proto;
 	int ihl;
@@ -1275,9 +1278,9 @@
 	err = -EPROTONOSUPPORT;
 
 	rcu_read_lock();
-	ops = rcu_dereference(inet_protos[proto]);
-	if (likely(ops && ops->gso_send_check))
-		err = ops->gso_send_check(skb);
+	ops = rcu_dereference(inet_offloads[proto]);
+	if (likely(ops && ops->callbacks.gso_send_check))
+		err = ops->callbacks.gso_send_check(skb);
 	rcu_read_unlock();
 
 out:
@@ -1288,7 +1291,7 @@
 	netdev_features_t features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
-	const struct net_protocol *ops;
+	const struct net_offload *ops;
 	struct iphdr *iph;
 	int proto;
 	int ihl;
@@ -1325,9 +1328,9 @@
 	segs = ERR_PTR(-EPROTONOSUPPORT);
 
 	rcu_read_lock();
-	ops = rcu_dereference(inet_protos[proto]);
-	if (likely(ops && ops->gso_segment))
-		segs = ops->gso_segment(skb, features);
+	ops = rcu_dereference(inet_offloads[proto]);
+	if (likely(ops && ops->callbacks.gso_segment))
+		segs = ops->callbacks.gso_segment(skb, features);
 	rcu_read_unlock();
 
 	if (!segs || IS_ERR(segs))
@@ -1356,7 +1359,7 @@
 static struct sk_buff **inet_gro_receive(struct sk_buff **head,
 					 struct sk_buff *skb)
 {
-	const struct net_protocol *ops;
+	const struct net_offload *ops;
 	struct sk_buff **pp = NULL;
 	struct sk_buff *p;
 	const struct iphdr *iph;
@@ -1378,8 +1381,8 @@
 	proto = iph->protocol;
 
 	rcu_read_lock();
-	ops = rcu_dereference(inet_protos[proto]);
-	if (!ops || !ops->gro_receive)
+	ops = rcu_dereference(inet_offloads[proto]);
+	if (!ops || !ops->callbacks.gro_receive)
 		goto out_unlock;
 
 	if (*(u8 *)iph != 0x45)
@@ -1420,7 +1423,7 @@
 	skb_gro_pull(skb, sizeof(*iph));
 	skb_set_transport_header(skb, skb_gro_offset(skb));
 
-	pp = ops->gro_receive(head, skb);
+	pp = ops->callbacks.gro_receive(head, skb);
 
 out_unlock:
 	rcu_read_unlock();
@@ -1435,7 +1438,7 @@
 {
 	__be16 newlen = htons(skb->len - skb_network_offset(skb));
 	struct iphdr *iph = ip_hdr(skb);
-	const struct net_protocol *ops;
+	const struct net_offload *ops;
 	int proto = iph->protocol;
 	int err = -ENOSYS;
 
@@ -1443,11 +1446,11 @@
 	iph->tot_len = newlen;
 
 	rcu_read_lock();
-	ops = rcu_dereference(inet_protos[proto]);
-	if (WARN_ON(!ops || !ops->gro_complete))
+	ops = rcu_dereference(inet_offloads[proto]);
+	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
 		goto out_unlock;
 
-	err = ops->gro_complete(skb);
+	err = ops->callbacks.gro_complete(skb);
 
 out_unlock:
 	rcu_read_unlock();
@@ -1558,23 +1561,33 @@
 	.early_demux	=	tcp_v4_early_demux,
 	.handler	=	tcp_v4_rcv,
 	.err_handler	=	tcp_v4_err,
-	.gso_send_check	=	tcp_v4_gso_send_check,
-	.gso_segment	=	tcp_tso_segment,
-	.gro_receive	=	tcp4_gro_receive,
-	.gro_complete	=	tcp4_gro_complete,
 	.no_policy	=	1,
 	.netns_ok	=	1,
 };
 
+static const struct net_offload tcp_offload = {
+	.callbacks = {
+		.gso_send_check	=	tcp_v4_gso_send_check,
+		.gso_segment	=	tcp_tso_segment,
+		.gro_receive	=	tcp4_gro_receive,
+		.gro_complete	=	tcp4_gro_complete,
+	},
+};
+
 static const struct net_protocol udp_protocol = {
 	.handler =	udp_rcv,
 	.err_handler =	udp_err,
-	.gso_send_check = udp4_ufo_send_check,
-	.gso_segment = udp4_ufo_fragment,
 	.no_policy =	1,
 	.netns_ok =	1,
 };
 
+static const struct net_offload udp_offload = {
+	.callbacks = {
+		.gso_send_check = udp4_ufo_send_check,
+		.gso_segment = udp4_ufo_fragment,
+	},
+};
+
 static const struct net_protocol icmp_protocol = {
 	.handler =	icmp_rcv,
 	.err_handler =	ping_err,
@@ -1659,13 +1672,35 @@
  *	IP protocol layer initialiser
  */
 
+static struct packet_offload ip_packet_offload __read_mostly = {
+	.type = cpu_to_be16(ETH_P_IP),
+	.callbacks = {
+		.gso_send_check = inet_gso_send_check,
+		.gso_segment = inet_gso_segment,
+		.gro_receive = inet_gro_receive,
+		.gro_complete = inet_gro_complete,
+	},
+};
+
+static int __init ipv4_offload_init(void)
+{
+	/*
+	 * Add offloads
+	 */
+	if (inet_add_offload(&udp_offload, IPPROTO_UDP) < 0)
+		pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
+	if (inet_add_offload(&tcp_offload, IPPROTO_TCP) < 0)
+		pr_crit("%s: Cannot add TCP protocol offlaod\n", __func__);
+
+	dev_add_offload(&ip_packet_offload);
+	return 0;
+}
+
+fs_initcall(ipv4_offload_init);
+
 static struct packet_type ip_packet_type __read_mostly = {
 	.type = cpu_to_be16(ETH_P_IP),
 	.func = ip_rcv,
-	.gso_send_check = inet_gso_send_check,
-	.gso_segment = inet_gso_segment,
-	.gro_receive = inet_gro_receive,
-	.gro_complete = inet_gro_complete,
 };
 
 static int __init inet_init(void)
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 4780045..ce6fbdf 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1161,7 +1161,7 @@
 	switch (cmd) {
 	case SIOCDARP:
 	case SIOCSARP:
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 	case SIOCGARP:
 		err = copy_from_user(&r, arg, sizeof(struct arpreq));
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 2a6abc1..e13183a 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -55,6 +55,7 @@
 #include <linux/sysctl.h>
 #endif
 #include <linux/kmod.h>
+#include <linux/netconf.h>
 
 #include <net/arp.h>
 #include <net/ip.h>
@@ -723,7 +724,7 @@
 
 	case SIOCSIFFLAGS:
 		ret = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto out;
 		break;
 	case SIOCSIFADDR:	/* Set interface address (and family) */
@@ -731,7 +732,7 @@
 	case SIOCSIFDSTADDR:	/* Set the destination address */
 	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
 		ret = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto out;
 		ret = -EINVAL;
 		if (sin->sin_family != AF_INET)
@@ -1442,6 +1443,149 @@
 	return 0;
 }
 
+static int inet_netconf_msgsize_devconf(int type)
+{
+	int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
+		   + nla_total_size(4);	/* NETCONFA_IFINDEX */
+
+	/* type -1 is used for ALL */
+	if (type == -1 || type == NETCONFA_FORWARDING)
+		size += nla_total_size(4);
+	if (type == -1 || type == NETCONFA_RP_FILTER)
+		size += nla_total_size(4);
+
+	return size;
+}
+
+static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
+				     struct ipv4_devconf *devconf, u32 portid,
+				     u32 seq, int event, unsigned int flags,
+				     int type)
+{
+	struct nlmsghdr  *nlh;
+	struct netconfmsg *ncm;
+
+	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
+			flags);
+	if (nlh == NULL)
+		return -EMSGSIZE;
+
+	ncm = nlmsg_data(nlh);
+	ncm->ncm_family = AF_INET;
+
+	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
+		goto nla_put_failure;
+
+	/* type -1 is used for ALL */
+	if ((type == -1 || type == NETCONFA_FORWARDING) &&
+	    nla_put_s32(skb, NETCONFA_FORWARDING,
+			IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
+		goto nla_put_failure;
+	if ((type == -1 || type == NETCONFA_RP_FILTER) &&
+	    nla_put_s32(skb, NETCONFA_RP_FILTER,
+			IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
+		goto nla_put_failure;
+
+	return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+
+static void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
+					struct ipv4_devconf *devconf)
+{
+	struct sk_buff *skb;
+	int err = -ENOBUFS;
+
+	skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
+	if (skb == NULL)
+		goto errout;
+
+	err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
+					RTM_NEWNETCONF, 0, type);
+	if (err < 0) {
+		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
+		WARN_ON(err == -EMSGSIZE);
+		kfree_skb(skb);
+		goto errout;
+	}
+	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_ATOMIC);
+	return;
+errout:
+	if (err < 0)
+		rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
+}
+
+static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
+	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
+	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
+	[NETCONFA_RP_FILTER]	= { .len = sizeof(int) },
+};
+
+static int inet_netconf_get_devconf(struct sk_buff *in_skb,
+				    struct nlmsghdr *nlh,
+				    void *arg)
+{
+	struct net *net = sock_net(in_skb->sk);
+	struct nlattr *tb[NETCONFA_MAX+1];
+	struct netconfmsg *ncm;
+	struct sk_buff *skb;
+	struct ipv4_devconf *devconf;
+	struct in_device *in_dev;
+	struct net_device *dev;
+	int ifindex;
+	int err;
+
+	err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
+			  devconf_ipv4_policy);
+	if (err < 0)
+		goto errout;
+
+	err = EINVAL;
+	if (!tb[NETCONFA_IFINDEX])
+		goto errout;
+
+	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
+	switch (ifindex) {
+	case NETCONFA_IFINDEX_ALL:
+		devconf = net->ipv4.devconf_all;
+		break;
+	case NETCONFA_IFINDEX_DEFAULT:
+		devconf = net->ipv4.devconf_dflt;
+		break;
+	default:
+		dev = __dev_get_by_index(net, ifindex);
+		if (dev == NULL)
+			goto errout;
+		in_dev = __in_dev_get_rtnl(dev);
+		if (in_dev == NULL)
+			goto errout;
+		devconf = &in_dev->cnf;
+		break;
+	}
+
+	err = -ENOBUFS;
+	skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC);
+	if (skb == NULL)
+		goto errout;
+
+	err = inet_netconf_fill_devconf(skb, ifindex, devconf,
+					NETLINK_CB(in_skb).portid,
+					nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
+					-1);
+	if (err < 0) {
+		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
+		WARN_ON(err == -EMSGSIZE);
+		kfree_skb(skb);
+		goto errout;
+	}
+	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
+errout:
+	return err;
+}
+
 #ifdef CONFIG_SYSCTL
 
 static void devinet_copy_dflt_conf(struct net *net, int i)
@@ -1467,6 +1611,12 @@
 
 	IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
 	IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
+	inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+				    NETCONFA_IFINDEX_ALL,
+				    net->ipv4.devconf_all);
+	inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+				    NETCONFA_IFINDEX_DEFAULT,
+				    net->ipv4.devconf_dflt);
 
 	for_each_netdev(net, dev) {
 		struct in_device *in_dev;
@@ -1474,8 +1624,11 @@
 			dev_disable_lro(dev);
 		rcu_read_lock();
 		in_dev = __in_dev_get_rcu(dev);
-		if (in_dev)
+		if (in_dev) {
 			IN_DEV_CONF_SET(in_dev, FORWARDING, on);
+			inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+						    dev->ifindex, &in_dev->cnf);
+		}
 		rcu_read_unlock();
 	}
 }
@@ -1501,6 +1654,23 @@
 		    i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
 			if ((new_value == 0) && (old_value != 0))
 				rt_cache_flush(net);
+		if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
+		    new_value != old_value) {
+			int ifindex;
+
+			if (cnf == net->ipv4.devconf_dflt)
+				ifindex = NETCONFA_IFINDEX_DEFAULT;
+			else if (cnf == net->ipv4.devconf_all)
+				ifindex = NETCONFA_IFINDEX_ALL;
+			else {
+				struct in_device *idev =
+					container_of(cnf, struct in_device,
+						     cnf);
+				ifindex = idev->dev->ifindex;
+			}
+			inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
+						    ifindex, cnf);
+		}
 	}
 
 	return ret;
@@ -1527,15 +1697,23 @@
 			}
 			if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
 				inet_forward_change(net);
-			} else if (*valp) {
+			} else {
 				struct ipv4_devconf *cnf = ctl->extra1;
 				struct in_device *idev =
 					container_of(cnf, struct in_device, cnf);
-				dev_disable_lro(idev->dev);
+				if (*valp)
+					dev_disable_lro(idev->dev);
+				inet_netconf_notify_devconf(net,
+							    NETCONFA_FORWARDING,
+							    idev->dev->ifindex,
+							    cnf);
 			}
 			rtnl_unlock();
 			rt_cache_flush(net);
-		}
+		} else
+			inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+						    NETCONFA_IFINDEX_DEFAULT,
+						    net->ipv4.devconf_dflt);
 	}
 
 	return ret;
@@ -1809,5 +1987,7 @@
 	rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
 	rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
 	rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
+	rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
+		      NULL, NULL);
 }
 
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 825c608..5cd75e2 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -488,7 +488,7 @@
 	switch (cmd) {
 	case SIOCADDRT:		/* Add a route */
 	case SIOCDELRT:		/* Delete a route */
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
 		if (copy_from_user(&rt, arg, sizeof(rt)))
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 71b125c..4797a80 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -803,7 +803,7 @@
 		unsigned int bytes;
 
 		if (!new_size)
-			new_size = 1;
+			new_size = 16;
 		bytes = new_size * sizeof(struct hlist_head *);
 		new_info_hash = fib_info_hash_alloc(bytes);
 		new_laddrhash = fib_info_hash_alloc(bytes);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index d34ce29..2026542 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -521,21 +521,31 @@
 				  int *expire, int *resend)
 {
 	if (!rskq_defer_accept) {
-		*expire = req->retrans >= thresh;
+		*expire = req->num_timeout >= thresh;
 		*resend = 1;
 		return;
 	}
-	*expire = req->retrans >= thresh &&
-		  (!inet_rsk(req)->acked || req->retrans >= max_retries);
+	*expire = req->num_timeout >= thresh &&
+		  (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
 	/*
 	 * Do not resend while waiting for data after ACK,
 	 * start to resend on end of deferring period to give
 	 * last chance for data or ACK to create established socket.
 	 */
 	*resend = !inet_rsk(req)->acked ||
-		  req->retrans >= rskq_defer_accept - 1;
+		  req->num_timeout >= rskq_defer_accept - 1;
 }
 
+int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
+{
+	int err = req->rsk_ops->rtx_syn_ack(parent, req, NULL);
+
+	if (!err)
+		req->num_retrans++;
+	return err;
+}
+EXPORT_SYMBOL(inet_rtx_syn_ack);
+
 void inet_csk_reqsk_queue_prune(struct sock *parent,
 				const unsigned long interval,
 				const unsigned long timeout,
@@ -599,13 +609,14 @@
 				req->rsk_ops->syn_ack_timeout(parent, req);
 				if (!expire &&
 				    (!resend ||
-				     !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
+				     !inet_rtx_syn_ack(parent, req) ||
 				     inet_rsk(req)->acked)) {
 					unsigned long timeo;
 
-					if (req->retrans++ == 0)
+					if (req->num_timeout++ == 0)
 						lopt->qlen_young--;
-					timeo = min((timeout << req->retrans), max_rto);
+					timeo = min(timeout << req->num_timeout,
+						    max_rto);
 					req->expires = now + timeo;
 					reqp = &req->dl_next;
 					continue;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 0c34bfa..cb98cbe 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -105,6 +105,9 @@
 	r->id.idiag_src[0] = inet->inet_rcv_saddr;
 	r->id.idiag_dst[0] = inet->inet_daddr;
 
+	if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
+		goto errout;
+
 	/* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
 	 * hence this needs to be included regardless of socket family.
 	 */
@@ -617,7 +620,7 @@
 	r->idiag_family = sk->sk_family;
 	r->idiag_state = TCP_SYN_RECV;
 	r->idiag_timer = 1;
-	r->idiag_retrans = req->retrans;
+	r->idiag_retrans = req->num_retrans;
 
 	r->id.idiag_if = sk->sk_bound_dev_if;
 	sock_diag_save_cookie(req, r->id.idiag_cookie);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 448e685..1cf6a76 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -802,6 +802,10 @@
 		table[0].data = &net->ipv4.frags.high_thresh;
 		table[1].data = &net->ipv4.frags.low_thresh;
 		table[2].data = &net->ipv4.frags.timeout;
+
+		/* Don't export sysctls to unprivileged users */
+		if (net->user_ns != &init_user_ns)
+			table[0].procname = NULL;
 	}
 
 	hdr = register_net_sysctl(net, "net/ipv4", table);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 7240f8e..a85ae2f 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -164,21 +164,6 @@
 #define tunnels_r	tunnels[2]
 #define tunnels_l	tunnels[1]
 #define tunnels_wc	tunnels[0]
-/*
- * Locking : hash tables are protected by RCU and RTNL
- */
-
-#define for_each_ip_tunnel_rcu(start) \
-	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
-
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
-	u64	rx_packets;
-	u64	rx_bytes;
-	u64	tx_packets;
-	u64	tx_bytes;
-	struct u64_stats_sync	syncp;
-};
 
 static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
 						   struct rtnl_link_stats64 *tot)
@@ -250,7 +235,7 @@
 		       ARPHRD_ETHER : ARPHRD_IPGRE;
 	int score, cand_score = 4;
 
-	for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
+	for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
 		if (local != t->parms.iph.saddr ||
 		    remote != t->parms.iph.daddr ||
 		    !(t->dev->flags & IFF_UP))
@@ -277,7 +262,7 @@
 		}
 	}
 
-	for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
+	for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
 		if (remote != t->parms.iph.daddr ||
 		    !(t->dev->flags & IFF_UP))
 			continue;
@@ -303,7 +288,7 @@
 		}
 	}
 
-	for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
+	for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
 		if ((local != t->parms.iph.saddr &&
 		     (local != t->parms.iph.daddr ||
 		      !ipv4_is_multicast(local))) ||
@@ -331,7 +316,7 @@
 		}
 	}
 
-	for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
+	for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
 		if (t->parms.i_key != key ||
 		    !(t->dev->flags & IFF_UP))
 			continue;
@@ -753,7 +738,6 @@
 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
-	struct pcpu_tstats *tstats;
 	const struct iphdr  *old_iph = ip_hdr(skb);
 	const struct iphdr  *tiph;
 	struct flowi4 fl4;
@@ -977,9 +961,7 @@
 		}
 	}
 
-	nf_reset(skb);
-	tstats = this_cpu_ptr(dev->tstats);
-	__IPTUNNEL_XMIT(tstats, &dev->stats);
+	iptunnel_xmit(skb, dev);
 	return NETDEV_TX_OK;
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -1082,7 +1064,7 @@
 	case SIOCADDTUNNEL:
 	case SIOCCHGTUNNEL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto done;
 
 		err = -EFAULT;
@@ -1157,7 +1139,7 @@
 
 	case SIOCDELTUNNEL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto done;
 
 		if (dev == ign->fb_tunnel_dev) {
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 1dc01f9..f6289bf 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -409,7 +409,7 @@
 					optptr[2] += 8;
 					break;
 				      default:
-					if (!skb && !capable(CAP_NET_RAW)) {
+					if (!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) {
 						pp_ptr = optptr + 3;
 						goto error;
 					}
@@ -445,7 +445,7 @@
 				opt->router_alert = optptr - iph;
 			break;
 		      case IPOPT_CIPSO:
-			if ((!skb && !capable(CAP_NET_RAW)) || opt->cipso) {
+			if ((!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) || opt->cipso) {
 				pp_ptr = optptr;
 				goto error;
 			}
@@ -458,7 +458,7 @@
 		      case IPOPT_SEC:
 		      case IPOPT_SID:
 		      default:
-			if (!skb && !capable(CAP_NET_RAW)) {
+			if (!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) {
 				pp_ptr = optptr;
 				goto error;
 			}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 14bbfcf..3c9d208 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -989,13 +989,14 @@
 	case IP_IPSEC_POLICY:
 	case IP_XFRM_POLICY:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 			break;
 		err = xfrm_user_policy(sk, optname, optval, optlen);
 		break;
 
 	case IP_TRANSPARENT:
-		if (!!val && !capable(CAP_NET_RAW) && !capable(CAP_NET_ADMIN)) {
+		if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
+		    !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
 			err = -EPERM;
 			break;
 		}
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 858fddf..c3a4233 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -66,20 +66,6 @@
 static void vti_dev_free(struct net_device *dev);
 static int vti_tunnel_bind_dev(struct net_device *dev);
 
-/* Locking : hash tables are protected by RCU and RTNL */
-
-#define for_each_ip_tunnel_rcu(start) \
-	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
-
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
-	u64	rx_packets;
-	u64	rx_bytes;
-	u64	tx_packets;
-	u64	tx_bytes;
-	struct	u64_stats_sync	syncp;
-};
-
 #define VTI_XMIT(stats1, stats2) do {				\
 	int err;						\
 	int pkt_len = skb->len;					\
@@ -142,19 +128,19 @@
 	struct ip_tunnel *t;
 	struct vti_net *ipn = net_generic(net, vti_net_id);
 
-	for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1])
+	for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1])
 		if (local == t->parms.iph.saddr &&
 		    remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
 			return t;
-	for_each_ip_tunnel_rcu(ipn->tunnels_r[h0])
+	for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0])
 		if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
 			return t;
 
-	for_each_ip_tunnel_rcu(ipn->tunnels_l[h1])
+	for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1])
 		if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
 			return t;
 
-	for_each_ip_tunnel_rcu(ipn->tunnels_wc[0])
+	for_each_ip_tunnel_rcu(t, ipn->tunnels_wc[0])
 		if (t && (t->dev->flags&IFF_UP))
 			return t;
 	return NULL;
@@ -502,7 +488,7 @@
 	case SIOCADDTUNNEL:
 	case SIOCCHGTUNNEL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto done;
 
 		err = -EFAULT;
@@ -567,7 +553,7 @@
 
 	case SIOCDELTUNNEL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto done;
 
 		if (dev == ipn->fb_tunnel_dev) {
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 798358b..d763701 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1500,8 +1500,10 @@
 	 * Clue in the operator.
 	 */
 	pr_info("IP-Config: Complete:\n");
-	pr_info("     device=%s, addr=%pI4, mask=%pI4, gw=%pI4\n",
-		ic_dev->name, &ic_myaddr, &ic_netmask, &ic_gateway);
+
+	pr_info("     device=%s, hwaddr=%*phC, ipaddr=%pI4, mask=%pI4, gw=%pI4\n",
+		ic_dev->name, ic_dev->addr_len, ic_dev->dev_addr,
+		&ic_myaddr, &ic_netmask, &ic_gateway);
 	pr_info("     host=%s, domain=%s, nis-domain=%s\n",
 		utsname()->nodename, ic_domain, utsname()->domainname);
 	pr_info("     bootserver=%pI4, rootserver=%pI4, rootpath=%s",
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index e15b452..191fc24 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -138,22 +138,7 @@
 static int ipip_tunnel_init(struct net_device *dev);
 static void ipip_tunnel_setup(struct net_device *dev);
 static void ipip_dev_free(struct net_device *dev);
-
-/*
- * Locking : hash tables are protected by RCU and RTNL
- */
-
-#define for_each_ip_tunnel_rcu(start) \
-	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
-
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
-	u64	rx_packets;
-	u64	rx_bytes;
-	u64	tx_packets;
-	u64	tx_bytes;
-	struct u64_stats_sync	syncp;
-};
+static struct rtnl_link_ops ipip_link_ops __read_mostly;
 
 static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
 						  struct rtnl_link_stats64 *tot)
@@ -197,16 +182,16 @@
 	struct ip_tunnel *t;
 	struct ipip_net *ipn = net_generic(net, ipip_net_id);
 
-	for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1])
+	for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1])
 		if (local == t->parms.iph.saddr &&
 		    remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
 			return t;
 
-	for_each_ip_tunnel_rcu(ipn->tunnels_r[h0])
+	for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0])
 		if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
 			return t;
 
-	for_each_ip_tunnel_rcu(ipn->tunnels_l[h1])
+	for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1])
 		if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
 			return t;
 
@@ -264,6 +249,32 @@
 	rcu_assign_pointer(*tp, t);
 }
 
+static int ipip_tunnel_create(struct net_device *dev)
+{
+	struct ip_tunnel *t = netdev_priv(dev);
+	struct net *net = dev_net(dev);
+	struct ipip_net *ipn = net_generic(net, ipip_net_id);
+	int err;
+
+	err = ipip_tunnel_init(dev);
+	if (err < 0)
+		goto out;
+
+	err = register_netdevice(dev);
+	if (err < 0)
+		goto out;
+
+	strcpy(t->parms.name, dev->name);
+	dev->rtnl_link_ops = &ipip_link_ops;
+
+	dev_hold(dev);
+	ipip_tunnel_link(ipn, t);
+	return 0;
+
+out:
+	return err;
+}
+
 static struct ip_tunnel *ipip_tunnel_locate(struct net *net,
 		struct ip_tunnel_parm *parms, int create)
 {
@@ -298,16 +309,9 @@
 	nt = netdev_priv(dev);
 	nt->parms = *parms;
 
-	if (ipip_tunnel_init(dev) < 0)
+	if (ipip_tunnel_create(dev) < 0)
 		goto failed_free;
 
-	if (register_netdevice(dev) < 0)
-		goto failed_free;
-
-	strcpy(nt->parms.name, dev->name);
-
-	dev_hold(dev);
-	ipip_tunnel_link(ipn, nt);
 	return nt;
 
 failed_free:
@@ -463,7 +467,6 @@
 static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
-	struct pcpu_tstats *tstats;
 	const struct iphdr  *tiph = &tunnel->parms.iph;
 	u8     tos = tunnel->parms.iph.tos;
 	__be16 df = tiph->frag_off;
@@ -479,6 +482,10 @@
 	if (skb->protocol != htons(ETH_P_IP))
 		goto tx_error;
 
+	if (skb->ip_summed == CHECKSUM_PARTIAL &&
+	    skb_checksum_help(skb))
+		goto tx_error;
+
 	if (tos & 1)
 		tos = old_iph->tos;
 
@@ -586,9 +593,7 @@
 	if ((iph->ttl = tiph->ttl) == 0)
 		iph->ttl	=	old_iph->ttl;
 
-	nf_reset(skb);
-	tstats = this_cpu_ptr(dev->tstats);
-	__IPTUNNEL_XMIT(tstats, &dev->stats);
+	iptunnel_xmit(skb, dev);
 	return NETDEV_TX_OK;
 
 tx_error_icmp:
@@ -635,6 +640,28 @@
 	dev->iflink = tunnel->parms.link;
 }
 
+static void ipip_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
+{
+	struct net *net = dev_net(t->dev);
+	struct ipip_net *ipn = net_generic(net, ipip_net_id);
+
+	ipip_tunnel_unlink(ipn, t);
+	synchronize_net();
+	t->parms.iph.saddr = p->iph.saddr;
+	t->parms.iph.daddr = p->iph.daddr;
+	memcpy(t->dev->dev_addr, &p->iph.saddr, 4);
+	memcpy(t->dev->broadcast, &p->iph.daddr, 4);
+	ipip_tunnel_link(ipn, t);
+	t->parms.iph.ttl = p->iph.ttl;
+	t->parms.iph.tos = p->iph.tos;
+	t->parms.iph.frag_off = p->iph.frag_off;
+	if (t->parms.link != p->link) {
+		t->parms.link = p->link;
+		ipip_tunnel_bind_dev(t->dev);
+	}
+	netdev_state_change(t->dev);
+}
+
 static int
 ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
 {
@@ -664,7 +691,7 @@
 	case SIOCADDTUNNEL:
 	case SIOCCHGTUNNEL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto done;
 
 		err = -EFAULT;
@@ -693,29 +720,13 @@
 					break;
 				}
 				t = netdev_priv(dev);
-				ipip_tunnel_unlink(ipn, t);
-				synchronize_net();
-				t->parms.iph.saddr = p.iph.saddr;
-				t->parms.iph.daddr = p.iph.daddr;
-				memcpy(dev->dev_addr, &p.iph.saddr, 4);
-				memcpy(dev->broadcast, &p.iph.daddr, 4);
-				ipip_tunnel_link(ipn, t);
-				netdev_state_change(dev);
 			}
+
+			ipip_tunnel_update(t, &p);
 		}
 
 		if (t) {
 			err = 0;
-			if (cmd == SIOCCHGTUNNEL) {
-				t->parms.iph.ttl = p.iph.ttl;
-				t->parms.iph.tos = p.iph.tos;
-				t->parms.iph.frag_off = p.iph.frag_off;
-				if (t->parms.link != p.link) {
-					t->parms.link = p.link;
-					ipip_tunnel_bind_dev(dev);
-					netdev_state_change(dev);
-				}
-			}
 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
 				err = -EFAULT;
 		} else
@@ -724,7 +735,7 @@
 
 	case SIOCDELTUNNEL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto done;
 
 		if (dev == ipn->fb_tunnel_dev) {
@@ -773,6 +784,11 @@
 	free_netdev(dev);
 }
 
+#define IPIP_FEATURES (NETIF_F_SG |		\
+		       NETIF_F_FRAGLIST |	\
+		       NETIF_F_HIGHDMA |	\
+		       NETIF_F_HW_CSUM)
+
 static void ipip_tunnel_setup(struct net_device *dev)
 {
 	dev->netdev_ops		= &ipip_netdev_ops;
@@ -787,6 +803,9 @@
 	dev->features		|= NETIF_F_NETNS_LOCAL;
 	dev->features		|= NETIF_F_LLTX;
 	dev->priv_flags		&= ~IFF_XMIT_DST_RELEASE;
+
+	dev->features		|= IPIP_FEATURES;
+	dev->hw_features	|= IPIP_FEATURES;
 }
 
 static int ipip_tunnel_init(struct net_device *dev)
@@ -829,6 +848,142 @@
 	return 0;
 }
 
+static void ipip_netlink_parms(struct nlattr *data[],
+			       struct ip_tunnel_parm *parms)
+{
+	memset(parms, 0, sizeof(*parms));
+
+	parms->iph.version = 4;
+	parms->iph.protocol = IPPROTO_IPIP;
+	parms->iph.ihl = 5;
+
+	if (!data)
+		return;
+
+	if (data[IFLA_IPTUN_LINK])
+		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
+
+	if (data[IFLA_IPTUN_LOCAL])
+		parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]);
+
+	if (data[IFLA_IPTUN_REMOTE])
+		parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]);
+
+	if (data[IFLA_IPTUN_TTL]) {
+		parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
+		if (parms->iph.ttl)
+			parms->iph.frag_off = htons(IP_DF);
+	}
+
+	if (data[IFLA_IPTUN_TOS])
+		parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
+
+	if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
+		parms->iph.frag_off = htons(IP_DF);
+}
+
+static int ipip_newlink(struct net *src_net, struct net_device *dev,
+			struct nlattr *tb[], struct nlattr *data[])
+{
+	struct net *net = dev_net(dev);
+	struct ip_tunnel *nt;
+
+	nt = netdev_priv(dev);
+	ipip_netlink_parms(data, &nt->parms);
+
+	if (ipip_tunnel_locate(net, &nt->parms, 0))
+		return -EEXIST;
+
+	return ipip_tunnel_create(dev);
+}
+
+static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
+			   struct nlattr *data[])
+{
+	struct ip_tunnel *t;
+	struct ip_tunnel_parm p;
+	struct net *net = dev_net(dev);
+	struct ipip_net *ipn = net_generic(net, ipip_net_id);
+
+	if (dev == ipn->fb_tunnel_dev)
+		return -EINVAL;
+
+	ipip_netlink_parms(data, &p);
+
+	if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
+	    (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
+		return -EINVAL;
+
+	t = ipip_tunnel_locate(net, &p, 0);
+
+	if (t) {
+		if (t->dev != dev)
+			return -EEXIST;
+	} else
+		t = netdev_priv(dev);
+
+	ipip_tunnel_update(t, &p);
+	return 0;
+}
+
+static size_t ipip_get_size(const struct net_device *dev)
+{
+	return
+		/* IFLA_IPTUN_LINK */
+		nla_total_size(4) +
+		/* IFLA_IPTUN_LOCAL */
+		nla_total_size(4) +
+		/* IFLA_IPTUN_REMOTE */
+		nla_total_size(4) +
+		/* IFLA_IPTUN_TTL */
+		nla_total_size(1) +
+		/* IFLA_IPTUN_TOS */
+		nla_total_size(1) +
+		/* IFLA_IPTUN_PMTUDISC */
+		nla_total_size(1) +
+		0;
+}
+
+static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct ip_tunnel *tunnel = netdev_priv(dev);
+	struct ip_tunnel_parm *parm = &tunnel->parms;
+
+	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
+	    nla_put_be32(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
+	    nla_put_be32(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
+	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
+	    nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
+	    nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
+		       !!(parm->iph.frag_off & htons(IP_DF))))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
+	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
+	[IFLA_IPTUN_LOCAL]		= { .type = NLA_U32 },
+	[IFLA_IPTUN_REMOTE]		= { .type = NLA_U32 },
+	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
+	[IFLA_IPTUN_TOS]		= { .type = NLA_U8 },
+	[IFLA_IPTUN_PMTUDISC]		= { .type = NLA_U8 },
+};
+
+static struct rtnl_link_ops ipip_link_ops __read_mostly = {
+	.kind		= "ipip",
+	.maxtype	= IFLA_IPTUN_MAX,
+	.policy		= ipip_policy,
+	.priv_size	= sizeof(struct ip_tunnel),
+	.setup		= ipip_tunnel_setup,
+	.newlink	= ipip_newlink,
+	.changelink	= ipip_changelink,
+	.get_size	= ipip_get_size,
+	.fill_info	= ipip_fill_info,
+};
+
 static struct xfrm_tunnel ipip_handler __read_mostly = {
 	.handler	=	ipip_rcv,
 	.err_handler	=	ipip_err,
@@ -925,14 +1080,26 @@
 		return err;
 	err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
 	if (err < 0) {
-		unregister_pernet_device(&ipip_net_ops);
 		pr_info("%s: can't register tunnel\n", __func__);
+		goto xfrm_tunnel_failed;
 	}
+	err = rtnl_link_register(&ipip_link_ops);
+	if (err < 0)
+		goto rtnl_link_failed;
+
+out:
 	return err;
+
+rtnl_link_failed:
+	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
+xfrm_tunnel_failed:
+	unregister_pernet_device(&ipip_net_ops);
+	goto out;
 }
 
 static void __exit ipip_fini(void)
 {
+	rtnl_link_unregister(&ipip_link_ops);
 	if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
 		pr_info("%s: can't deregister tunnel\n", __func__);
 
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3eab2b2..58e4160 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -83,8 +83,8 @@
 	struct vif_device	vif_table[MAXVIFS];
 	int			maxvif;
 	atomic_t		cache_resolve_queue_len;
-	int			mroute_do_assert;
-	int			mroute_do_pim;
+	bool			mroute_do_assert;
+	bool			mroute_do_pim;
 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
 	int			mroute_reg_vif_num;
 #endif
@@ -1207,23 +1207,24 @@
 	struct net *net = sock_net(sk);
 	struct mr_table *mrt;
 
+	if (sk->sk_type != SOCK_RAW ||
+	    inet_sk(sk)->inet_num != IPPROTO_IGMP)
+		return -EOPNOTSUPP;
+
 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
 	if (mrt == NULL)
 		return -ENOENT;
 
 	if (optname != MRT_INIT) {
 		if (sk != rcu_access_pointer(mrt->mroute_sk) &&
-		    !capable(CAP_NET_ADMIN))
+		    !ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EACCES;
 	}
 
 	switch (optname) {
 	case MRT_INIT:
-		if (sk->sk_type != SOCK_RAW ||
-		    inet_sk(sk)->inet_num != IPPROTO_IGMP)
-			return -EOPNOTSUPP;
 		if (optlen != sizeof(int))
-			return -ENOPROTOOPT;
+			return -EINVAL;
 
 		rtnl_lock();
 		if (rtnl_dereference(mrt->mroute_sk)) {
@@ -1284,9 +1285,11 @@
 	case MRT_ASSERT:
 	{
 		int v;
+		if (optlen != sizeof(v))
+			return -EINVAL;
 		if (get_user(v, (int __user *)optval))
 			return -EFAULT;
-		mrt->mroute_do_assert = (v) ? 1 : 0;
+		mrt->mroute_do_assert = v;
 		return 0;
 	}
 #ifdef CONFIG_IP_PIMSM
@@ -1294,9 +1297,11 @@
 	{
 		int v;
 
+		if (optlen != sizeof(v))
+			return -EINVAL;
 		if (get_user(v, (int __user *)optval))
 			return -EFAULT;
-		v = (v) ? 1 : 0;
+		v = !!v;
 
 		rtnl_lock();
 		ret = 0;
@@ -1329,7 +1334,8 @@
 		} else {
 			if (!ipmr_new_table(net, v))
 				ret = -ENOMEM;
-			raw_sk(sk)->ipmr_table = v;
+			else
+				raw_sk(sk)->ipmr_table = v;
 		}
 		rtnl_unlock();
 		return ret;
@@ -1355,6 +1361,10 @@
 	struct net *net = sock_net(sk);
 	struct mr_table *mrt;
 
+	if (sk->sk_type != SOCK_RAW ||
+	    inet_sk(sk)->inet_num != IPPROTO_IGMP)
+		return -EOPNOTSUPP;
+
 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
 	if (mrt == NULL)
 		return -ENOENT;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 97e61ea..3ea4127 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1533,7 +1533,7 @@
 {
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
@@ -1677,7 +1677,7 @@
 {
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
@@ -1698,7 +1698,7 @@
 {
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
@@ -1722,7 +1722,7 @@
 {
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 170b1fd..17c5e06 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1846,7 +1846,7 @@
 {
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
@@ -1961,7 +1961,7 @@
 {
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
@@ -1983,7 +1983,7 @@
 {
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
@@ -2008,7 +2008,7 @@
 {
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index a820472..ac635a7 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -276,9 +276,7 @@
 		return -ENOMEM;
 	net->ipv4.nat_table = ipt_register_table(net, &nf_nat_ipv4_table, repl);
 	kfree(repl);
-	if (IS_ERR(net->ipv4.nat_table))
-		return PTR_ERR(net->ipv4.nat_table);
-	return 0;
+	return PTR_RET(net->ipv4.nat_table);
 }
 
 static void __net_exit iptable_nat_net_exit(struct net *net)
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 8918eff..0f9d09f 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -29,6 +29,7 @@
 #include <net/protocol.h>
 
 const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
+const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
 
 /*
  *	Add a protocol handler to the hash tables
@@ -41,6 +42,13 @@
 }
 EXPORT_SYMBOL(inet_add_protocol);
 
+int inet_add_offload(const struct net_offload *prot, unsigned char protocol)
+{
+	return !cmpxchg((const struct net_offload **)&inet_offloads[protocol],
+			NULL, prot) ? 0 : -1;
+}
+EXPORT_SYMBOL(inet_add_offload);
+
 /*
  *	Remove a protocol from the hash tables.
  */
@@ -57,3 +65,16 @@
 	return ret;
 }
 EXPORT_SYMBOL(inet_del_protocol);
+
+int inet_del_offload(const struct net_offload *prot, unsigned char protocol)
+{
+	int ret;
+
+	ret = (cmpxchg((const struct net_offload **)&inet_offloads[protocol],
+		       prot, NULL) == prot) ? 0 : -1;
+
+	synchronize_net();
+
+	return ret;
+}
+EXPORT_SYMBOL(inet_del_offload);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index df25142..baa9b28 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2496,6 +2496,10 @@
 		tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
 		if (tbl == NULL)
 			goto err_dup;
+
+		/* Don't export sysctls to unprivileged users */
+		if (net->user_ns != &init_user_ns)
+			tbl[0].procname = NULL;
 	}
 	tbl[0].extra1 = net;
 
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index ba48e79..b236ef0 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -340,7 +340,7 @@
 	}
 
 	req->expires	= 0UL;
-	req->retrans	= 0;
+	req->num_retrans = 0;
 
 	/*
 	 * We need to lookup the route here to get at the correct
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 63d4ecc..d84400b 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -883,6 +883,9 @@
 		table[6].data =
 			&net->ipv4.sysctl_ping_group_range;
 
+		/* Don't export sysctls to unprivileged users */
+		if (net->user_ns != &init_user_ns)
+			table[0].procname = NULL;
 	}
 
 	/*
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 083092e..e6eace1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -536,13 +536,14 @@
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	int answ;
+	bool slow;
 
 	switch (cmd) {
 	case SIOCINQ:
 		if (sk->sk_state == TCP_LISTEN)
 			return -EINVAL;
 
-		lock_sock(sk);
+		slow = lock_sock_fast(sk);
 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
 			answ = 0;
 		else if (sock_flag(sk, SOCK_URGINLINE) ||
@@ -557,7 +558,7 @@
 				answ--;
 		} else
 			answ = tp->urg_seq - tp->copied_seq;
-		release_sock(sk);
+		unlock_sock_fast(sk, slow);
 		break;
 	case SIOCATMARK:
 		answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
@@ -2303,7 +2304,7 @@
 
 static inline bool tcp_can_repair_sock(const struct sock *sk)
 {
-	return capable(CAP_NET_ADMIN) &&
+	return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
 		((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
 }
 
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1432cdb..baf2861 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -259,7 +259,8 @@
 	if (!ca)
 		err = -ENOENT;
 
-	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN)))
+	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
+		   ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
 		err = -EPERM;
 
 	else if (!try_module_get(ca->owner))
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 609ff98..fc67831 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3552,6 +3552,24 @@
 	return false;
 }
 
+/* RFC 5961 7 [ACK Throttling] */
+static void tcp_send_challenge_ack(struct sock *sk)
+{
+	/* unprotected vars, we dont care of overwrites */
+	static u32 challenge_timestamp;
+	static unsigned int challenge_count;
+	u32 now = jiffies / HZ;
+
+	if (now != challenge_timestamp) {
+		challenge_timestamp = now;
+		challenge_count = 0;
+	}
+	if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+		tcp_send_ack(sk);
+	}
+}
+
 /* This routine deals with incoming acks, but not outgoing ones. */
 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 {
@@ -3571,8 +3589,14 @@
 	/* If the ack is older than previous acks
 	 * then we can probably ignore it.
 	 */
-	if (before(ack, prior_snd_una))
+	if (before(ack, prior_snd_una)) {
+		/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
+		if (before(ack, prior_snd_una - tp->max_window)) {
+			tcp_send_challenge_ack(sk);
+			return -1;
+		}
 		goto old_ack;
+	}
 
 	/* If the ack includes data we haven't sent yet, discard
 	 * this segment (RFC793 Section 3.9).
@@ -5244,23 +5268,6 @@
 }
 #endif /* CONFIG_NET_DMA */
 
-static void tcp_send_challenge_ack(struct sock *sk)
-{
-	/* unprotected vars, we dont care of overwrites */
-	static u32 challenge_timestamp;
-	static unsigned int challenge_count;
-	u32 now = jiffies / HZ;
-
-	if (now != challenge_timestamp) {
-		challenge_timestamp = now;
-		challenge_count = 0;
-	}
-	if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
-		tcp_send_ack(sk);
-	}
-}
-
 /* Does PAWS and seqno based validation of an incoming segment, flags will
  * play significant role here.
  */
@@ -5988,7 +5995,7 @@
 				 */
 				if (req) {
 					tcp_synack_rtt_meas(sk, req);
-					tp->total_retrans = req->retrans;
+					tp->total_retrans = req->num_retrans;
 
 					reqsk_fastopen_remove(sk, req, false);
 				} else {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0c4a643..1ed2307 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -138,14 +138,6 @@
 }
 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 
-static int tcp_repair_connect(struct sock *sk)
-{
-	tcp_connect_init(sk);
-	tcp_finish_connect(sk, NULL);
-
-	return 0;
-}
-
 /* This will initiate an outgoing connection. */
 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
@@ -250,10 +242,7 @@
 
 	inet->inet_id = tp->write_seq ^ jiffies;
 
-	if (likely(!tp->repair))
-		err = tcp_connect(sk);
-	else
-		err = tcp_repair_connect(sk);
+	err = tcp_connect(sk);
 
 	rt = NULL;
 	if (err)
@@ -877,10 +866,13 @@
 }
 
 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
-			      struct request_values *rvp)
+			     struct request_values *rvp)
 {
-	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-	return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
+	int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
+
+	if (!res)
+		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
+	return res;
 }
 
 /*
@@ -1070,7 +1062,7 @@
 }
 EXPORT_SYMBOL(tcp_md5_do_del);
 
-void tcp_clear_md5_list(struct sock *sk)
+static void tcp_clear_md5_list(struct sock *sk)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct tcp_md5sig_key *key;
@@ -1386,7 +1378,8 @@
 	struct sock *child;
 	int err;
 
-	req->retrans = 0;
+	req->num_retrans = 0;
+	req->num_timeout = 0;
 	req->sk = NULL;
 
 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
@@ -1741,7 +1734,7 @@
 
 	tcp_initialize_rcv_mss(newsk);
 	tcp_synack_rtt_meas(newsk, req);
-	newtp->total_retrans = req->retrans;
+	newtp->total_retrans = req->num_retrans;
 
 #ifdef CONFIG_TCP_MD5SIG
 	/* Copy over the MD5 key from the original socket */
@@ -1919,7 +1912,6 @@
 
 void tcp_v4_early_demux(struct sk_buff *skb)
 {
-	struct net *net = dev_net(skb->dev);
 	const struct iphdr *iph;
 	const struct tcphdr *th;
 	struct sock *sk;
@@ -1927,16 +1919,16 @@
 	if (skb->pkt_type != PACKET_HOST)
 		return;
 
-	if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
+	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
 		return;
 
 	iph = ip_hdr(skb);
-	th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
+	th = tcp_hdr(skb);
 
 	if (th->doff < sizeof(struct tcphdr) / 4)
 		return;
 
-	sk = __inet_lookup_established(net, &tcp_hashinfo,
+	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
 				       iph->saddr, th->source,
 				       iph->daddr, ntohs(th->dest),
 				       skb->skb_iif);
@@ -2640,7 +2632,7 @@
 		0, 0, /* could print option size, but that is af dependent. */
 		1,    /* timers active (only the expire timer) */
 		jiffies_delta_to_clock_t(delta),
-		req->retrans,
+		req->num_timeout,
 		from_kuid_munged(seq_user_ns(f), uid),
 		0,  /* non standard timer */
 		0, /* open_requests have no inode */
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index a7302d9..f35f2df 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -553,7 +553,7 @@
 			 * it can be estimated (approximately)
 			 * from another data.
 			 */
-			tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
+			tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
 		}
 	}
@@ -582,7 +582,7 @@
 		 * Note that even if there is new data in the SYN packet
 		 * they will be thrown away too.
 		 */
-		req->rsk_ops->rtx_syn_ack(sk, req, NULL);
+		inet_rtx_syn_ack(sk, req);
 		return NULL;
 	}
 
@@ -696,7 +696,7 @@
 	/* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */
 	if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
 		tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
-	else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
+	else if (req->num_retrans) /* don't take RTT sample if retrans && ~TS */
 		tcp_rsk(req)->snt_synack = 0;
 
 	/* For Fast Open no more processing is needed (sk is the
@@ -706,7 +706,7 @@
 		return sk;
 
 	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
-	if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
+	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
 		inet_rsk(req)->acked = 1;
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2798706..8ac0855 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2987,6 +2987,11 @@
 
 	tcp_connect_init(sk);
 
+	if (unlikely(tp->repair)) {
+		tcp_finish_connect(sk, NULL);
+		return 0;
+	}
+
 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
 	if (unlikely(buff == NULL))
 		return -ENOBUFS;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index d47c1b4..b78aac3 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -318,7 +318,7 @@
 	req = tcp_sk(sk)->fastopen_rsk;
 	req->rsk_ops->syn_ack_timeout(sk, req);
 
-	if (req->retrans >= max_retries) {
+	if (req->num_timeout >= max_retries) {
 		tcp_write_err(sk);
 		return;
 	}
@@ -327,10 +327,10 @@
 	 * regular retransmit because if the child socket has been accepted
 	 * it's not good to give up too easily.
 	 */
-	req->rsk_ops->rtx_syn_ack(sk, req, NULL);
-	req->retrans++;
+	inet_rtx_syn_ack(sk, req);
+	req->num_timeout++;
 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
-			  TCP_TIMEOUT_INIT << req->retrans, TCP_RTO_MAX);
+			  TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 }
 
 /*
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index b6d3f79..2068ac4 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -7,9 +7,11 @@
 ipv6-objs :=	af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
 		addrlabel.o \
 		route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
-		raw.o protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
+		raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
 		exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o
 
+ipv6-offload :=	ip6_offload.o tcpv6_offload.o udp_offload.o exthdrs_offload.o
+
 ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o
 ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
 
@@ -39,5 +41,6 @@
 obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
 
 obj-y += addrconf_core.o exthdrs_core.o
+obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6_offload)
 
 obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 0424e4e..fc0e13a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -81,6 +81,7 @@
 #include <net/pkt_sched.h>
 #include <linux/if_tunnel.h>
 #include <linux/rtnetlink.h>
+#include <linux/netconf.h>
 
 #ifdef CONFIG_IPV6_PRIVACY
 #include <linux/random.h>
@@ -401,7 +402,7 @@
 	if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
 		ndev->cnf.accept_dad = -1;
 
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
 	if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
 		pr_info("%s: Disabled Multicast RS\n", dev->name);
 		ndev->cnf.rtr_solicits = 0;
@@ -460,6 +461,141 @@
 	return idev;
 }
 
+static int inet6_netconf_msgsize_devconf(int type)
+{
+	int size =  NLMSG_ALIGN(sizeof(struct netconfmsg))
+		    + nla_total_size(4);	/* NETCONFA_IFINDEX */
+
+	/* type -1 is used for ALL */
+	if (type == -1 || type == NETCONFA_FORWARDING)
+		size += nla_total_size(4);
+
+	return size;
+}
+
+static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
+				      struct ipv6_devconf *devconf, u32 portid,
+				      u32 seq, int event, unsigned int flags,
+				      int type)
+{
+	struct nlmsghdr  *nlh;
+	struct netconfmsg *ncm;
+
+	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
+			flags);
+	if (nlh == NULL)
+		return -EMSGSIZE;
+
+	ncm = nlmsg_data(nlh);
+	ncm->ncm_family = AF_INET6;
+
+	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
+		goto nla_put_failure;
+
+	/* type -1 is used for ALL */
+	if ((type == -1 || type == NETCONFA_FORWARDING) &&
+	    nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
+		goto nla_put_failure;
+
+	return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+
+static void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
+					 struct ipv6_devconf *devconf)
+{
+	struct sk_buff *skb;
+	int err = -ENOBUFS;
+
+	skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_ATOMIC);
+	if (skb == NULL)
+		goto errout;
+
+	err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
+					 RTM_NEWNETCONF, 0, type);
+	if (err < 0) {
+		/* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
+		WARN_ON(err == -EMSGSIZE);
+		kfree_skb(skb);
+		goto errout;
+	}
+	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_ATOMIC);
+	return;
+errout:
+	if (err < 0)
+		rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
+}
+
+static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
+	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
+	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
+};
+
+static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
+				     struct nlmsghdr *nlh,
+				     void *arg)
+{
+	struct net *net = sock_net(in_skb->sk);
+	struct nlattr *tb[NETCONFA_MAX+1];
+	struct netconfmsg *ncm;
+	struct sk_buff *skb;
+	struct ipv6_devconf *devconf;
+	struct inet6_dev *in6_dev;
+	struct net_device *dev;
+	int ifindex;
+	int err;
+
+	err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
+			  devconf_ipv6_policy);
+	if (err < 0)
+		goto errout;
+
+	err = EINVAL;
+	if (!tb[NETCONFA_IFINDEX])
+		goto errout;
+
+	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
+	switch (ifindex) {
+	case NETCONFA_IFINDEX_ALL:
+		devconf = net->ipv6.devconf_all;
+		break;
+	case NETCONFA_IFINDEX_DEFAULT:
+		devconf = net->ipv6.devconf_dflt;
+		break;
+	default:
+		dev = __dev_get_by_index(net, ifindex);
+		if (dev == NULL)
+			goto errout;
+		in6_dev = __in6_dev_get(dev);
+		if (in6_dev == NULL)
+			goto errout;
+		devconf = &in6_dev->cnf;
+		break;
+	}
+
+	err = -ENOBUFS;
+	skb = nlmsg_new(inet6_netconf_msgsize_devconf(-1), GFP_ATOMIC);
+	if (skb == NULL)
+		goto errout;
+
+	err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
+					 NETLINK_CB(in_skb).portid,
+					 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
+					 -1);
+	if (err < 0) {
+		/* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
+		WARN_ON(err == -EMSGSIZE);
+		kfree_skb(skb);
+		goto errout;
+	}
+	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
+errout:
+	return err;
+}
+
 #ifdef CONFIG_SYSCTL
 static void dev_forward_change(struct inet6_dev *idev)
 {
@@ -471,7 +607,7 @@
 	dev = idev->dev;
 	if (idev->cnf.forwarding)
 		dev_disable_lro(dev);
-	if (dev && (dev->flags & IFF_MULTICAST)) {
+	if (dev->flags & IFF_MULTICAST) {
 		if (idev->cnf.forwarding)
 			ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
 		else
@@ -486,6 +622,8 @@
 		else
 			addrconf_leave_anycast(ifa);
 	}
+	inet6_netconf_notify_devconf(dev_net(dev), NETCONFA_FORWARDING,
+				     dev->ifindex, &idev->cnf);
 }
 
 
@@ -518,6 +656,10 @@
 	*p = newf;
 
 	if (p == &net->ipv6.devconf_dflt->forwarding) {
+		if ((!newf) ^ (!old))
+			inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+						     NETCONFA_IFINDEX_DEFAULT,
+						     net->ipv6.devconf_dflt);
 		rtnl_unlock();
 		return 0;
 	}
@@ -525,6 +667,10 @@
 	if (p == &net->ipv6.devconf_all->forwarding) {
 		net->ipv6.devconf_dflt->forwarding = newf;
 		addrconf_forward_change(net, newf);
+		if ((!newf) ^ (!old))
+			inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+						     NETCONFA_IFINDEX_ALL,
+						     net->ipv6.devconf_all);
 	} else if ((!newf) ^ (!old))
 		dev_forward_change((struct inet6_dev *)table->extra1);
 	rtnl_unlock();
@@ -553,7 +699,7 @@
 		pr_warn("Freeing alive inet6 address %p\n", ifp);
 		return;
 	}
-	dst_release(&ifp->rt->dst);
+	ip6_rt_put(ifp->rt);
 
 	kfree_rcu(ifp, rcu);
 }
@@ -805,7 +951,7 @@
 				rt6_set_expires(rt, expires);
 			}
 		}
-		dst_release(&rt->dst);
+		ip6_rt_put(rt);
 	}
 
 	/* clean up prefsrc entries */
@@ -1692,7 +1838,7 @@
 	   This thing is done here expecting that the whole
 	   class of non-broadcast devices need not cloning.
 	 */
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
 	if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
 		cfg.fc_flags |= RTF_NONEXTHOP;
 #endif
@@ -1752,7 +1898,7 @@
 	ip6_route_add(&cfg);
 }
 
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
 static void sit_route_add(struct net_device *dev)
 {
 	struct fib6_config cfg = {
@@ -1881,8 +2027,7 @@
 			addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
 					      dev, expires, flags);
 		}
-		if (rt)
-			dst_release(&rt->dst);
+		ip6_rt_put(rt);
 	}
 
 	/* Try to figure out our local address for this prefix */
@@ -2104,7 +2249,7 @@
 	if (dev == NULL)
 		goto err_exit;
 
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
 	if (dev->type == ARPHRD_SIT) {
 		const struct net_device_ops *ops = dev->netdev_ops;
 		struct ifreq ifr;
@@ -2268,7 +2413,7 @@
 	struct in6_ifreq ireq;
 	int err;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
@@ -2287,7 +2432,7 @@
 	struct in6_ifreq ireq;
 	int err;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
@@ -2315,7 +2460,7 @@
 	}
 }
 
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
 static void sit_add_v4_addrs(struct inet6_dev *idev)
 {
 	struct in6_addr addr;
@@ -2434,7 +2579,7 @@
 		addrconf_add_linklocal(idev, &addr);
 }
 
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
 static void addrconf_sit_config(struct net_device *dev)
 {
 	struct inet6_dev *idev;
@@ -2471,7 +2616,7 @@
 }
 #endif
 
-#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+#if IS_ENABLED(CONFIG_NET_IPGRE)
 static void addrconf_gre_config(struct net_device *dev)
 {
 	struct inet6_dev *idev;
@@ -2601,12 +2746,12 @@
 		}
 
 		switch (dev->type) {
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
 		case ARPHRD_SIT:
 			addrconf_sit_config(dev);
 			break;
 #endif
-#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+#if IS_ENABLED(CONFIG_NET_IPGRE)
 		case ARPHRD_IPGRE:
 			addrconf_gre_config(dev);
 			break;
@@ -3194,7 +3339,7 @@
 }
 #endif	/* CONFIG_PROC_FS */
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 /* Check if address is a home address configured on any interface. */
 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
 {
@@ -3892,6 +4037,7 @@
 	array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
 	array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
 	array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
+	array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
 }
 
 static inline size_t inet6_ifla6_size(void)
@@ -4560,6 +4706,13 @@
 			.proc_handler   = proc_dointvec
 		},
 		{
+			.procname       = "ndisc_notify",
+			.data           = &ipv6_devconf.ndisc_notify,
+			.maxlen         = sizeof(int),
+			.mode           = 0644,
+			.proc_handler   = proc_dointvec
+		},
+		{
 			/* sentinel */
 		}
 	},
@@ -4784,6 +4937,8 @@
 			inet6_dump_ifmcaddr, NULL);
 	__rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
 			inet6_dump_ifacaddr, NULL);
+	__rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
+			NULL, NULL);
 
 	ipv6_addr_label_rtnl_register();
 
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index a974247..b043c60 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -160,7 +160,8 @@
 	}
 
 	err = -EPERM;
-	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
+	if (sock->type == SOCK_RAW && !kern &&
+	    !ns_capable(net->user_ns, CAP_NET_RAW))
 		goto out_rcu_unlock;
 
 	sock->ops = answer->ops;
@@ -282,7 +283,7 @@
 		return -EINVAL;
 
 	snum = ntohs(addr->sin6_port);
-	if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
+	if (snum && snum < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
 		return -EACCES;
 
 	lock_sock(sk);
@@ -699,249 +700,9 @@
 }
 EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
 
-static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
-{
-	const struct inet6_protocol *ops = NULL;
-
-	for (;;) {
-		struct ipv6_opt_hdr *opth;
-		int len;
-
-		if (proto != NEXTHDR_HOP) {
-			ops = rcu_dereference(inet6_protos[proto]);
-
-			if (unlikely(!ops))
-				break;
-
-			if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
-				break;
-		}
-
-		if (unlikely(!pskb_may_pull(skb, 8)))
-			break;
-
-		opth = (void *)skb->data;
-		len = ipv6_optlen(opth);
-
-		if (unlikely(!pskb_may_pull(skb, len)))
-			break;
-
-		proto = opth->nexthdr;
-		__skb_pull(skb, len);
-	}
-
-	return proto;
-}
-
-static int ipv6_gso_send_check(struct sk_buff *skb)
-{
-	const struct ipv6hdr *ipv6h;
-	const struct inet6_protocol *ops;
-	int err = -EINVAL;
-
-	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
-		goto out;
-
-	ipv6h = ipv6_hdr(skb);
-	__skb_pull(skb, sizeof(*ipv6h));
-	err = -EPROTONOSUPPORT;
-
-	rcu_read_lock();
-	ops = rcu_dereference(inet6_protos[
-		ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
-
-	if (likely(ops && ops->gso_send_check)) {
-		skb_reset_transport_header(skb);
-		err = ops->gso_send_check(skb);
-	}
-	rcu_read_unlock();
-
-out:
-	return err;
-}
-
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
-	netdev_features_t features)
-{
-	struct sk_buff *segs = ERR_PTR(-EINVAL);
-	struct ipv6hdr *ipv6h;
-	const struct inet6_protocol *ops;
-	int proto;
-	struct frag_hdr *fptr;
-	unsigned int unfrag_ip6hlen;
-	u8 *prevhdr;
-	int offset = 0;
-
-	if (!(features & NETIF_F_V6_CSUM))
-		features &= ~NETIF_F_SG;
-
-	if (unlikely(skb_shinfo(skb)->gso_type &
-		     ~(SKB_GSO_UDP |
-		       SKB_GSO_DODGY |
-		       SKB_GSO_TCP_ECN |
-		       SKB_GSO_TCPV6 |
-		       0)))
-		goto out;
-
-	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
-		goto out;
-
-	ipv6h = ipv6_hdr(skb);
-	__skb_pull(skb, sizeof(*ipv6h));
-	segs = ERR_PTR(-EPROTONOSUPPORT);
-
-	proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
-	rcu_read_lock();
-	ops = rcu_dereference(inet6_protos[proto]);
-	if (likely(ops && ops->gso_segment)) {
-		skb_reset_transport_header(skb);
-		segs = ops->gso_segment(skb, features);
-	}
-	rcu_read_unlock();
-
-	if (IS_ERR(segs))
-		goto out;
-
-	for (skb = segs; skb; skb = skb->next) {
-		ipv6h = ipv6_hdr(skb);
-		ipv6h->payload_len = htons(skb->len - skb->mac_len -
-					   sizeof(*ipv6h));
-		if (proto == IPPROTO_UDP) {
-			unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
-			fptr = (struct frag_hdr *)(skb_network_header(skb) +
-				unfrag_ip6hlen);
-			fptr->frag_off = htons(offset);
-			if (skb->next != NULL)
-				fptr->frag_off |= htons(IP6_MF);
-			offset += (ntohs(ipv6h->payload_len) -
-				   sizeof(struct frag_hdr));
-		}
-	}
-
-out:
-	return segs;
-}
-
-static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
-					 struct sk_buff *skb)
-{
-	const struct inet6_protocol *ops;
-	struct sk_buff **pp = NULL;
-	struct sk_buff *p;
-	struct ipv6hdr *iph;
-	unsigned int nlen;
-	unsigned int hlen;
-	unsigned int off;
-	int flush = 1;
-	int proto;
-	__wsum csum;
-
-	off = skb_gro_offset(skb);
-	hlen = off + sizeof(*iph);
-	iph = skb_gro_header_fast(skb, off);
-	if (skb_gro_header_hard(skb, hlen)) {
-		iph = skb_gro_header_slow(skb, hlen, off);
-		if (unlikely(!iph))
-			goto out;
-	}
-
-	skb_gro_pull(skb, sizeof(*iph));
-	skb_set_transport_header(skb, skb_gro_offset(skb));
-
-	flush += ntohs(iph->payload_len) != skb_gro_len(skb);
-
-	rcu_read_lock();
-	proto = iph->nexthdr;
-	ops = rcu_dereference(inet6_protos[proto]);
-	if (!ops || !ops->gro_receive) {
-		__pskb_pull(skb, skb_gro_offset(skb));
-		proto = ipv6_gso_pull_exthdrs(skb, proto);
-		skb_gro_pull(skb, -skb_transport_offset(skb));
-		skb_reset_transport_header(skb);
-		__skb_push(skb, skb_gro_offset(skb));
-
-		ops = rcu_dereference(inet6_protos[proto]);
-		if (!ops || !ops->gro_receive)
-			goto out_unlock;
-
-		iph = ipv6_hdr(skb);
-	}
-
-	NAPI_GRO_CB(skb)->proto = proto;
-
-	flush--;
-	nlen = skb_network_header_len(skb);
-
-	for (p = *head; p; p = p->next) {
-		const struct ipv6hdr *iph2;
-		__be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
-
-		if (!NAPI_GRO_CB(p)->same_flow)
-			continue;
-
-		iph2 = ipv6_hdr(p);
-		first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ;
-
-		/* All fields must match except length and Traffic Class. */
-		if (nlen != skb_network_header_len(p) ||
-		    (first_word & htonl(0xF00FFFFF)) ||
-		    memcmp(&iph->nexthdr, &iph2->nexthdr,
-			   nlen - offsetof(struct ipv6hdr, nexthdr))) {
-			NAPI_GRO_CB(p)->same_flow = 0;
-			continue;
-		}
-		/* flush if Traffic Class fields are different */
-		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
-		NAPI_GRO_CB(p)->flush |= flush;
-	}
-
-	NAPI_GRO_CB(skb)->flush |= flush;
-
-	csum = skb->csum;
-	skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
-
-	pp = ops->gro_receive(head, skb);
-
-	skb->csum = csum;
-
-out_unlock:
-	rcu_read_unlock();
-
-out:
-	NAPI_GRO_CB(skb)->flush |= flush;
-
-	return pp;
-}
-
-static int ipv6_gro_complete(struct sk_buff *skb)
-{
-	const struct inet6_protocol *ops;
-	struct ipv6hdr *iph = ipv6_hdr(skb);
-	int err = -ENOSYS;
-
-	iph->payload_len = htons(skb->len - skb_network_offset(skb) -
-				 sizeof(*iph));
-
-	rcu_read_lock();
-	ops = rcu_dereference(inet6_protos[NAPI_GRO_CB(skb)->proto]);
-	if (WARN_ON(!ops || !ops->gro_complete))
-		goto out_unlock;
-
-	err = ops->gro_complete(skb);
-
-out_unlock:
-	rcu_read_unlock();
-
-	return err;
-}
-
 static struct packet_type ipv6_packet_type __read_mostly = {
 	.type = cpu_to_be16(ETH_P_IPV6),
 	.func = ipv6_rcv,
-	.gso_send_check = ipv6_gso_send_check,
-	.gso_segment = ipv6_gso_segment,
-	.gro_receive = ipv6_gro_receive,
-	.gro_complete = ipv6_gro_complete,
 };
 
 static int __init ipv6_packet_init(void)
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 7e61395..ecc35b9 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -44,7 +44,7 @@
 #define IPV6HDR_BASELEN 8
 
 struct tmp_ext {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 		struct in6_addr saddr;
 #endif
 		struct in6_addr daddr;
@@ -152,7 +152,7 @@
 	return false;
 }
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 /**
  *	ipv6_rearrange_destopt - rearrange IPv6 destination options header
  *	@iph: IPv6 header
@@ -320,7 +320,7 @@
 	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
 
 	if (extlen) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 		memcpy(&top_iph->saddr, iph_ext, extlen);
 #else
 		memcpy(&top_iph->daddr, iph_ext, extlen);
@@ -385,7 +385,7 @@
 	memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
 
 	if (extlen) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 		memcpy(iph_ext, &top_iph->saddr, extlen);
 #else
 		memcpy(iph_ext, &top_iph->daddr, extlen);
@@ -434,7 +434,7 @@
 	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
 
 	if (extlen) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 		memcpy(&top_iph->saddr, iph_ext, extlen);
 #else
 		memcpy(&top_iph->daddr, iph_ext, extlen);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index cdf02be..2f4f584 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -64,7 +64,7 @@
 	int	ishost = !net->ipv6.devconf_all->forwarding;
 	int	err = 0;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 	if (ipv6_addr_is_multicast(addr))
 		return -EINVAL;
@@ -84,7 +84,7 @@
 		rt = rt6_lookup(net, addr, NULL, 0, 0);
 		if (rt) {
 			dev = rt->dst.dev;
-			dst_release(&rt->dst);
+			ip6_rt_put(rt);
 		} else if (ishost) {
 			err = -EADDRNOTAVAIL;
 			goto error;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index be2b67d6..8edf260 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -701,7 +701,7 @@
 				err = -EINVAL;
 				goto exit_f;
 			}
-			if (!capable(CAP_NET_RAW)) {
+			if (!ns_capable(net->user_ns, CAP_NET_RAW)) {
 				err = -EPERM;
 				goto exit_f;
 			}
@@ -721,7 +721,7 @@
 				err = -EINVAL;
 				goto exit_f;
 			}
-			if (!capable(CAP_NET_RAW)) {
+			if (!ns_capable(net->user_ns, CAP_NET_RAW)) {
 				err = -EPERM;
 				goto exit_f;
 			}
@@ -746,7 +746,7 @@
 				err = -EINVAL;
 				goto exit_f;
 			}
-			if (!capable(CAP_NET_RAW)) {
+			if (!ns_capable(net->user_ns, CAP_NET_RAW)) {
 				err = -EPERM;
 				goto exit_f;
 			}
@@ -769,7 +769,7 @@
 			rthdr = (struct ipv6_rt_hdr *)CMSG_DATA(cmsg);
 
 			switch (rthdr->type) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 			case IPV6_SRCRT_TYPE_2:
 				if (rthdr->hdrlen != 2 ||
 				    rthdr->segments_left != 1) {
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index fa3d9c3..473f628 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -43,56 +43,12 @@
 #include <net/ndisc.h>
 #include <net/ip6_route.h>
 #include <net/addrconf.h>
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 #include <net/xfrm.h>
 #endif
 
 #include <asm/uaccess.h>
 
-int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
-{
-	const unsigned char *nh = skb_network_header(skb);
-	int packet_len = skb->tail - skb->network_header;
-	struct ipv6_opt_hdr *hdr;
-	int len;
-
-	if (offset + 2 > packet_len)
-		goto bad;
-	hdr = (struct ipv6_opt_hdr *)(nh + offset);
-	len = ((hdr->hdrlen + 1) << 3);
-
-	if (offset + len > packet_len)
-		goto bad;
-
-	offset += 2;
-	len -= 2;
-
-	while (len > 0) {
-		int opttype = nh[offset];
-		int optlen;
-
-		if (opttype == type)
-			return offset;
-
-		switch (opttype) {
-		case IPV6_TLV_PAD1:
-			optlen = 1;
-			break;
-		default:
-			optlen = nh[offset + 1] + 2;
-			if (optlen > len)
-				goto bad;
-			break;
-		}
-		offset += optlen;
-		len -= optlen;
-	}
-	/* not_found */
- bad:
-	return -1;
-}
-EXPORT_SYMBOL_GPL(ipv6_find_tlv);
-
 /*
  *	Parsing tlv encoded headers.
  *
@@ -224,7 +180,7 @@
   Destination options header.
  *****************************/
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
 {
 	struct ipv6_destopt_hao *hao;
@@ -288,7 +244,7 @@
 #endif
 
 static const struct tlvtype_proc tlvprocdestopt_lst[] = {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 	{
 		.type	= IPV6_TLV_HAO,
 		.func	= ipv6_dest_hao,
@@ -300,7 +256,7 @@
 static int ipv6_destopt_rcv(struct sk_buff *skb)
 {
 	struct inet6_skb_parm *opt = IP6CB(skb);
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 	__u16 dstbuf;
 #endif
 	struct dst_entry *dst = skb_dst(skb);
@@ -315,14 +271,14 @@
 	}
 
 	opt->lastopt = opt->dst1 = skb_network_header_len(skb);
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 	dstbuf = opt->dst1;
 #endif
 
 	if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
 		skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
 		opt = IP6CB(skb);
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 		opt->nhoff = dstbuf;
 #else
 		opt->nhoff = opt->dst1;
@@ -378,7 +334,7 @@
 looped_back:
 	if (hdr->segments_left == 0) {
 		switch (hdr->type) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 		case IPV6_SRCRT_TYPE_2:
 			/* Silently discard type 2 header unless it was
 			 * processed by own
@@ -404,7 +360,7 @@
 	}
 
 	switch (hdr->type) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 	case IPV6_SRCRT_TYPE_2:
 		if (accept_source_route < 0)
 			goto unknown_rh;
@@ -461,7 +417,7 @@
 	addr += i - 1;
 
 	switch (hdr->type) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 	case IPV6_SRCRT_TYPE_2:
 		if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
 				     (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
@@ -528,12 +484,12 @@
 
 static const struct inet6_protocol rthdr_protocol = {
 	.handler	=	ipv6_rthdr_rcv,
-	.flags		=	INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR,
+	.flags		=	INET6_PROTO_NOPOLICY,
 };
 
 static const struct inet6_protocol destopt_protocol = {
 	.handler	=	ipv6_destopt_rcv,
-	.flags		=	INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR,
+	.flags		=	INET6_PROTO_NOPOLICY,
 };
 
 static const struct inet6_protocol nodata_protocol = {
@@ -559,10 +515,10 @@
 
 out:
 	return ret;
-out_rthdr:
-	inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
 out_destopt:
 	inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
+out_rthdr:
+	inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
 	goto out;
 };
 
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index f73d59a..e7d756e 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -111,3 +111,47 @@
 	return start;
 }
 EXPORT_SYMBOL(ipv6_skip_exthdr);
+
+int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
+{
+	const unsigned char *nh = skb_network_header(skb);
+	int packet_len = skb->tail - skb->network_header;
+	struct ipv6_opt_hdr *hdr;
+	int len;
+
+	if (offset + 2 > packet_len)
+		goto bad;
+	hdr = (struct ipv6_opt_hdr *)(nh + offset);
+	len = ((hdr->hdrlen + 1) << 3);
+
+	if (offset + len > packet_len)
+		goto bad;
+
+	offset += 2;
+	len -= 2;
+
+	while (len > 0) {
+		int opttype = nh[offset];
+		int optlen;
+
+		if (opttype == type)
+			return offset;
+
+		switch (opttype) {
+		case IPV6_TLV_PAD1:
+			optlen = 1;
+			break;
+		default:
+			optlen = nh[offset + 1] + 2;
+			if (optlen > len)
+				goto bad;
+			break;
+		}
+		offset += optlen;
+		len -= optlen;
+	}
+	/* not_found */
+ bad:
+	return -1;
+}
+EXPORT_SYMBOL_GPL(ipv6_find_tlv);
diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
new file mode 100644
index 0000000..cf77f3a
--- /dev/null
+++ b/net/ipv6/exthdrs_offload.c
@@ -0,0 +1,41 @@
+/*
+ *	IPV6 GSO/GRO offload support
+ *	Linux INET6 implementation
+ *
+ *	This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ *      IPV6 Extension Header GSO/GRO support
+ */
+#include <net/protocol.h>
+#include "ip6_offload.h"
+
+static const struct net_offload rthdr_offload = {
+	.flags		=	INET6_PROTO_GSO_EXTHDR,
+};
+
+static const struct net_offload dstopt_offload = {
+	.flags		=	INET6_PROTO_GSO_EXTHDR,
+};
+
+int __init ipv6_exthdrs_offload_init(void)
+{
+	int ret;
+
+	ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING);
+	if (!ret)
+		goto out;
+
+	ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS);
+	if (!ret)
+		goto out_rt;
+
+out:
+	return ret;
+
+out_rt:
+	inet_del_offload(&rthdr_offload, IPPROTO_ROUTING);
+	goto out;
+}
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index d9fb911..2e1a432 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -100,7 +100,7 @@
 		goto out;
 	}
 again:
-	dst_release(&rt->dst);
+	ip6_rt_put(rt);
 	rt = NULL;
 	goto out;
 
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 24d69db..b4a9fd5 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -280,7 +280,7 @@
 	return 0;
 }
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 static void mip6_addr_swap(struct sk_buff *skb)
 {
 	struct ipv6hdr *iph = ipv6_hdr(skb);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 24995a9..710cafd 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -672,6 +672,8 @@
 			    iter->rt6i_idev == rt->rt6i_idev &&
 			    ipv6_addr_equal(&iter->rt6i_gateway,
 					    &rt->rt6i_gateway)) {
+				if (rt->rt6i_nsiblings)
+					rt->rt6i_nsiblings = 0;
 				if (!(iter->rt6i_flags & RTF_EXPIRES))
 					return -EEXIST;
 				if (!(rt->rt6i_flags & RTF_EXPIRES))
@@ -680,6 +682,21 @@
 					rt6_set_expires(iter, rt->dst.expires);
 				return -EEXIST;
 			}
+			/* If we have the same destination and the same metric,
+			 * but not the same gateway, then the route we try to
+			 * add is sibling to this route, increment our counter
+			 * of siblings, and later we will add our route to the
+			 * list.
+			 * Only static routes (which don't have flag
+			 * RTF_EXPIRES) are used for ECMPv6.
+			 *
+			 * To avoid long list, we only had siblings if the
+			 * route have a gateway.
+			 */
+			if (rt->rt6i_flags & RTF_GATEWAY &&
+			    !(rt->rt6i_flags & RTF_EXPIRES) &&
+			    !(iter->rt6i_flags & RTF_EXPIRES))
+				rt->rt6i_nsiblings++;
 		}
 
 		if (iter->rt6i_metric > rt->rt6i_metric)
@@ -692,6 +709,35 @@
 	if (ins == &fn->leaf)
 		fn->rr_ptr = NULL;
 
+	/* Link this route to others same route. */
+	if (rt->rt6i_nsiblings) {
+		unsigned int rt6i_nsiblings;
+		struct rt6_info *sibling, *temp_sibling;
+
+		/* Find the first route that have the same metric */
+		sibling = fn->leaf;
+		while (sibling) {
+			if (sibling->rt6i_metric == rt->rt6i_metric) {
+				list_add_tail(&rt->rt6i_siblings,
+					      &sibling->rt6i_siblings);
+				break;
+			}
+			sibling = sibling->dst.rt6_next;
+		}
+		/* For each sibling in the list, increment the counter of
+		 * siblings. BUG() if counters does not match, list of siblings
+		 * is broken!
+		 */
+		rt6i_nsiblings = 0;
+		list_for_each_entry_safe(sibling, temp_sibling,
+					 &rt->rt6i_siblings, rt6i_siblings) {
+			sibling->rt6i_nsiblings++;
+			BUG_ON(sibling->rt6i_nsiblings != rt->rt6i_nsiblings);
+			rt6i_nsiblings++;
+		}
+		BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings);
+	}
+
 	/*
 	 *	insert node
 	 */
@@ -1193,6 +1239,17 @@
 	if (fn->rr_ptr == rt)
 		fn->rr_ptr = NULL;
 
+	/* Remove this entry from other siblings */
+	if (rt->rt6i_nsiblings) {
+		struct rt6_info *sibling, *next_sibling;
+
+		list_for_each_entry_safe(sibling, next_sibling,
+					 &rt->rt6i_siblings, rt6i_siblings)
+			sibling->rt6i_nsiblings--;
+		rt->rt6i_nsiblings = 0;
+		list_del_init(&rt->rt6i_siblings);
+	}
+
 	/* Adjust walkers */
 	read_lock(&fib6_walker_lock);
 	FOR_WALKERS(w) {
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 90bbefb..29124b7 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -519,7 +519,8 @@
 		}
 		read_unlock_bh(&ip6_sk_fl_lock);
 
-		if (freq.flr_share == IPV6_FL_S_NONE && capable(CAP_NET_ADMIN)) {
+		if (freq.flr_share == IPV6_FL_S_NONE &&
+		    ns_capable(net->user_ns, CAP_NET_ADMIN)) {
 			fl = fl_lookup(net, freq.flr_label);
 			if (fl) {
 				err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index d5cb3c4..867466c 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -109,21 +109,6 @@
 #define tunnels_r	tunnels[2]
 #define tunnels_l	tunnels[1]
 #define tunnels_wc	tunnels[0]
-/*
- * Locking : hash tables are protected by RCU and RTNL
- */
-
-#define for_each_ip_tunnel_rcu(start) \
-	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
-
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
-	u64	rx_packets;
-	u64	rx_bytes;
-	u64	tx_packets;
-	u64	tx_bytes;
-	struct u64_stats_sync	syncp;
-};
 
 static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
 		struct rtnl_link_stats64 *tot)
@@ -181,7 +166,7 @@
 		       ARPHRD_ETHER : ARPHRD_IP6GRE;
 	int score, cand_score = 4;
 
-	for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
+	for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
 		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
 		    !ipv6_addr_equal(remote, &t->parms.raddr) ||
 		    key != t->parms.i_key ||
@@ -206,7 +191,7 @@
 		}
 	}
 
-	for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
+	for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
 		if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
 		    key != t->parms.i_key ||
 		    !(t->dev->flags & IFF_UP))
@@ -230,7 +215,7 @@
 		}
 	}
 
-	for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
+	for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
 		if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
 			  (!ipv6_addr_equal(local, &t->parms.raddr) ||
 				 !ipv6_addr_is_multicast(local))) ||
@@ -256,7 +241,7 @@
 		}
 	}
 
-	for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
+	for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
 		if (t->parms.i_key != key ||
 		    !(t->dev->flags & IFF_UP))
 			continue;
@@ -1069,7 +1054,7 @@
 					dev->mtu = IPV6_MIN_MTU;
 			}
 		}
-		dst_release(&rt->dst);
+		ip6_rt_put(rt);
 	}
 
 	t->hlen = addend;
@@ -1161,7 +1146,7 @@
 	case SIOCADDTUNNEL:
 	case SIOCCHGTUNNEL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto done;
 
 		err = -EFAULT;
@@ -1209,7 +1194,7 @@
 
 	case SIOCDELTUNNEL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto done;
 
 		if (dev == ign->fb_tunnel_dev) {
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
new file mode 100644
index 0000000..f26f0da
--- /dev/null
+++ b/net/ipv6/ip6_offload.c
@@ -0,0 +1,282 @@
+/*
+ *	IPV6 GSO/GRO offload support
+ *	Linux INET6 implementation
+ *
+ *	This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/socket.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/printk.h>
+
+#include <net/protocol.h>
+#include <net/ipv6.h>
+
+#include "ip6_offload.h"
+
+static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
+{
+	const struct net_offload *ops = NULL;
+
+	for (;;) {
+		struct ipv6_opt_hdr *opth;
+		int len;
+
+		if (proto != NEXTHDR_HOP) {
+			ops = rcu_dereference(inet6_offloads[proto]);
+
+			if (unlikely(!ops))
+				break;
+
+			if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
+				break;
+		}
+
+		if (unlikely(!pskb_may_pull(skb, 8)))
+			break;
+
+		opth = (void *)skb->data;
+		len = ipv6_optlen(opth);
+
+		if (unlikely(!pskb_may_pull(skb, len)))
+			break;
+
+		proto = opth->nexthdr;
+		__skb_pull(skb, len);
+	}
+
+	return proto;
+}
+
+static int ipv6_gso_send_check(struct sk_buff *skb)
+{
+	const struct ipv6hdr *ipv6h;
+	const struct net_offload *ops;
+	int err = -EINVAL;
+
+	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+		goto out;
+
+	ipv6h = ipv6_hdr(skb);
+	__skb_pull(skb, sizeof(*ipv6h));
+	err = -EPROTONOSUPPORT;
+
+	rcu_read_lock();
+	ops = rcu_dereference(inet6_offloads[
+		ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
+
+	if (likely(ops && ops->callbacks.gso_send_check)) {
+		skb_reset_transport_header(skb);
+		err = ops->callbacks.gso_send_check(skb);
+	}
+	rcu_read_unlock();
+
+out:
+	return err;
+}
+
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
+	netdev_features_t features)
+{
+	struct sk_buff *segs = ERR_PTR(-EINVAL);
+	struct ipv6hdr *ipv6h;
+	const struct net_offload *ops;
+	int proto;
+	struct frag_hdr *fptr;
+	unsigned int unfrag_ip6hlen;
+	u8 *prevhdr;
+	int offset = 0;
+
+	if (!(features & NETIF_F_V6_CSUM))
+		features &= ~NETIF_F_SG;
+
+	if (unlikely(skb_shinfo(skb)->gso_type &
+		     ~(SKB_GSO_UDP |
+		       SKB_GSO_DODGY |
+		       SKB_GSO_TCP_ECN |
+		       SKB_GSO_TCPV6 |
+		       0)))
+		goto out;
+
+	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+		goto out;
+
+	ipv6h = ipv6_hdr(skb);
+	__skb_pull(skb, sizeof(*ipv6h));
+	segs = ERR_PTR(-EPROTONOSUPPORT);
+
+	proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+	rcu_read_lock();
+	ops = rcu_dereference(inet6_offloads[proto]);
+	if (likely(ops && ops->callbacks.gso_segment)) {
+		skb_reset_transport_header(skb);
+		segs = ops->callbacks.gso_segment(skb, features);
+	}
+	rcu_read_unlock();
+
+	if (IS_ERR(segs))
+		goto out;
+
+	for (skb = segs; skb; skb = skb->next) {
+		ipv6h = ipv6_hdr(skb);
+		ipv6h->payload_len = htons(skb->len - skb->mac_len -
+					   sizeof(*ipv6h));
+		if (proto == IPPROTO_UDP) {
+			unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+			fptr = (struct frag_hdr *)(skb_network_header(skb) +
+				unfrag_ip6hlen);
+			fptr->frag_off = htons(offset);
+			if (skb->next != NULL)
+				fptr->frag_off |= htons(IP6_MF);
+			offset += (ntohs(ipv6h->payload_len) -
+				   sizeof(struct frag_hdr));
+		}
+	}
+
+out:
+	return segs;
+}
+
+static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
+					 struct sk_buff *skb)
+{
+	const struct net_offload *ops;
+	struct sk_buff **pp = NULL;
+	struct sk_buff *p;
+	struct ipv6hdr *iph;
+	unsigned int nlen;
+	unsigned int hlen;
+	unsigned int off;
+	int flush = 1;
+	int proto;
+	__wsum csum;
+
+	off = skb_gro_offset(skb);
+	hlen = off + sizeof(*iph);
+	iph = skb_gro_header_fast(skb, off);
+	if (skb_gro_header_hard(skb, hlen)) {
+		iph = skb_gro_header_slow(skb, hlen, off);
+		if (unlikely(!iph))
+			goto out;
+	}
+
+	skb_gro_pull(skb, sizeof(*iph));
+	skb_set_transport_header(skb, skb_gro_offset(skb));
+
+	flush += ntohs(iph->payload_len) != skb_gro_len(skb);
+
+	rcu_read_lock();
+	proto = iph->nexthdr;
+	ops = rcu_dereference(inet6_offloads[proto]);
+	if (!ops || !ops->callbacks.gro_receive) {
+		__pskb_pull(skb, skb_gro_offset(skb));
+		proto = ipv6_gso_pull_exthdrs(skb, proto);
+		skb_gro_pull(skb, -skb_transport_offset(skb));
+		skb_reset_transport_header(skb);
+		__skb_push(skb, skb_gro_offset(skb));
+
+		ops = rcu_dereference(inet6_offloads[proto]);
+		if (!ops || !ops->callbacks.gro_receive)
+			goto out_unlock;
+
+		iph = ipv6_hdr(skb);
+	}
+
+	NAPI_GRO_CB(skb)->proto = proto;
+
+	flush--;
+	nlen = skb_network_header_len(skb);
+
+	for (p = *head; p; p = p->next) {
+		const struct ipv6hdr *iph2;
+		__be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
+
+		if (!NAPI_GRO_CB(p)->same_flow)
+			continue;
+
+		iph2 = ipv6_hdr(p);
+		first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ;
+
+		/* All fields must match except length and Traffic Class. */
+		if (nlen != skb_network_header_len(p) ||
+		    (first_word & htonl(0xF00FFFFF)) ||
+		    memcmp(&iph->nexthdr, &iph2->nexthdr,
+			   nlen - offsetof(struct ipv6hdr, nexthdr))) {
+			NAPI_GRO_CB(p)->same_flow = 0;
+			continue;
+		}
+		/* flush if Traffic Class fields are different */
+		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
+		NAPI_GRO_CB(p)->flush |= flush;
+	}
+
+	NAPI_GRO_CB(skb)->flush |= flush;
+
+	csum = skb->csum;
+	skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
+
+	pp = ops->callbacks.gro_receive(head, skb);
+
+	skb->csum = csum;
+
+out_unlock:
+	rcu_read_unlock();
+
+out:
+	NAPI_GRO_CB(skb)->flush |= flush;
+
+	return pp;
+}
+
+static int ipv6_gro_complete(struct sk_buff *skb)
+{
+	const struct net_offload *ops;
+	struct ipv6hdr *iph = ipv6_hdr(skb);
+	int err = -ENOSYS;
+
+	iph->payload_len = htons(skb->len - skb_network_offset(skb) -
+				 sizeof(*iph));
+
+	rcu_read_lock();
+	ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]);
+	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
+		goto out_unlock;
+
+	err = ops->callbacks.gro_complete(skb);
+
+out_unlock:
+	rcu_read_unlock();
+
+	return err;
+}
+
+static struct packet_offload ipv6_packet_offload __read_mostly = {
+	.type = cpu_to_be16(ETH_P_IPV6),
+	.callbacks = {
+		.gso_send_check = ipv6_gso_send_check,
+		.gso_segment = ipv6_gso_segment,
+		.gro_receive = ipv6_gro_receive,
+		.gro_complete = ipv6_gro_complete,
+	},
+};
+
+static int __init ipv6_offload_init(void)
+{
+
+	if (tcpv6_offload_init() < 0)
+		pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
+	if (udp_offload_init() < 0)
+		pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
+	if (ipv6_exthdrs_offload_init() < 0)
+		pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
+
+	dev_add_offload(&ipv6_packet_offload);
+	return 0;
+}
+
+fs_initcall(ipv6_offload_init);
diff --git a/net/ipv6/ip6_offload.h b/net/ipv6/ip6_offload.h
new file mode 100644
index 0000000..2e155c6
--- /dev/null
+++ b/net/ipv6/ip6_offload.h
@@ -0,0 +1,18 @@
+/*
+ *	IPV6 GSO/GRO offload support
+ *	Linux INET6 implementation
+ *
+ *	This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ip6_offload_h
+#define __ip6_offload_h
+
+int ipv6_exthdrs_offload_init(void);
+int udp_offload_init(void);
+int tcpv6_offload_init(void);
+
+#endif
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index aece3e7..5552d13 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -538,78 +538,12 @@
 	to->tc_index = from->tc_index;
 #endif
 	nf_copy(to, from);
-#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
-    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
 	to->nf_trace = from->nf_trace;
 #endif
 	skb_copy_secmark(to, from);
 }
 
-int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
-{
-	u16 offset = sizeof(struct ipv6hdr);
-	struct ipv6_opt_hdr *exthdr =
-				(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
-	unsigned int packet_len = skb->tail - skb->network_header;
-	int found_rhdr = 0;
-	*nexthdr = &ipv6_hdr(skb)->nexthdr;
-
-	while (offset + 1 <= packet_len) {
-
-		switch (**nexthdr) {
-
-		case NEXTHDR_HOP:
-			break;
-		case NEXTHDR_ROUTING:
-			found_rhdr = 1;
-			break;
-		case NEXTHDR_DEST:
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-			if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
-				break;
-#endif
-			if (found_rhdr)
-				return offset;
-			break;
-		default :
-			return offset;
-		}
-
-		offset += ipv6_optlen(exthdr);
-		*nexthdr = &exthdr->nexthdr;
-		exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
-						 offset);
-	}
-
-	return offset;
-}
-
-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
-{
-	static atomic_t ipv6_fragmentation_id;
-	int old, new;
-
-	if (rt && !(rt->dst.flags & DST_NOPEER)) {
-		struct inet_peer *peer;
-		struct net *net;
-
-		net = dev_net(rt->dst.dev);
-		peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
-		if (peer) {
-			fhdr->identification = htonl(inet_getid(peer, 0));
-			inet_putpeer(peer);
-			return;
-		}
-	}
-	do {
-		old = atomic_read(&ipv6_fragmentation_id);
-		new = old + 1;
-		if (!new)
-			new = 1;
-	} while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
-	fhdr->identification = htonl(new);
-}
-
 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 {
 	struct sk_buff *frag;
@@ -756,7 +690,7 @@
 		if (err == 0) {
 			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 				      IPSTATS_MIB_FRAGOKS);
-			dst_release(&rt->dst);
+			ip6_rt_put(rt);
 			return 0;
 		}
 
@@ -768,7 +702,7 @@
 
 		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 			      IPSTATS_MIB_FRAGFAILS);
-		dst_release(&rt->dst);
+		ip6_rt_put(rt);
 		return err;
 
 slow_path_clean:
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index cb7e2de..a14f28b 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -74,6 +74,10 @@
 #define HASH_SIZE_SHIFT  5
 #define HASH_SIZE (1 << HASH_SIZE_SHIFT)
 
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
 {
 	u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
@@ -83,6 +87,7 @@
 
 static int ip6_tnl_dev_init(struct net_device *dev);
 static void ip6_tnl_dev_setup(struct net_device *dev);
+static struct rtnl_link_ops ip6_link_ops __read_mostly;
 
 static int ip6_tnl_net_id __read_mostly;
 struct ip6_tnl_net {
@@ -94,14 +99,6 @@
 	struct ip6_tnl __rcu **tnls[2];
 };
 
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
-	unsigned long	rx_packets;
-	unsigned long	rx_bytes;
-	unsigned long	tx_packets;
-	unsigned long	tx_bytes;
-} __attribute__((aligned(4*sizeof(unsigned long))));
-
 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
 {
 	struct pcpu_tstats sum = { 0 };
@@ -258,6 +255,33 @@
 	free_netdev(dev);
 }
 
+static int ip6_tnl_create2(struct net_device *dev)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct net *net = dev_net(dev);
+	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+	int err;
+
+	t = netdev_priv(dev);
+	err = ip6_tnl_dev_init(dev);
+	if (err < 0)
+		goto out;
+
+	err = register_netdevice(dev);
+	if (err < 0)
+		goto out;
+
+	strcpy(t->parms.name, dev->name);
+	dev->rtnl_link_ops = &ip6_link_ops;
+
+	dev_hold(dev);
+	ip6_tnl_link(ip6n, t);
+	return 0;
+
+out:
+	return err;
+}
+
 /**
  * ip6_tnl_create - create a new tunnel
  *   @p: tunnel parameters
@@ -276,7 +300,6 @@
 	struct ip6_tnl *t;
 	char name[IFNAMSIZ];
 	int err;
-	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 
 	if (p->name[0])
 		strlcpy(name, p->name, IFNAMSIZ);
@@ -291,17 +314,10 @@
 
 	t = netdev_priv(dev);
 	t->parms = *p;
-	err = ip6_tnl_dev_init(dev);
+	err = ip6_tnl_create2(dev);
 	if (err < 0)
 		goto failed_free;
 
-	if ((err = register_netdevice(dev)) < 0)
-		goto failed_free;
-
-	strcpy(t->parms.name, dev->name);
-
-	dev_hold(dev);
-	ip6_tnl_link(ip6n, t);
 	return t;
 
 failed_free:
@@ -663,8 +679,7 @@
 
 		icmpv6_send(skb2, rel_type, rel_code, rel_info);
 
-		if (rt)
-			dst_release(&rt->dst);
+		ip6_rt_put(rt);
 
 		kfree_skb(skb2);
 	}
@@ -672,28 +687,26 @@
 	return 0;
 }
 
-static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
-					const struct ipv6hdr *ipv6h,
-					struct sk_buff *skb)
+static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
+				       const struct ipv6hdr *ipv6h,
+				       struct sk_buff *skb)
 {
 	__u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
 
 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
 		ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
 
-	if (INET_ECN_is_ce(dsfield))
-		IP_ECN_set_ce(ip_hdr(skb));
+	return IP6_ECN_decapsulate(ipv6h, skb);
 }
 
-static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
-					const struct ipv6hdr *ipv6h,
-					struct sk_buff *skb)
+static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
+				       const struct ipv6hdr *ipv6h,
+				       struct sk_buff *skb)
 {
 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
 		ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
 
-	if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h)))
-		IP6_ECN_set_ce(ipv6_hdr(skb));
+	return IP6_ECN_decapsulate(ipv6h, skb);
 }
 
 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
@@ -757,12 +770,13 @@
 
 static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
 		       __u8 ipproto,
-		       void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
-						    const struct ipv6hdr *ipv6h,
-						    struct sk_buff *skb))
+		       int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
+						   const struct ipv6hdr *ipv6h,
+						   struct sk_buff *skb))
 {
 	struct ip6_tnl *t;
 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	int err;
 
 	rcu_read_lock();
 
@@ -792,14 +806,26 @@
 		skb->pkt_type = PACKET_HOST;
 		memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
 
+		__skb_tunnel_rx(skb, t->dev);
+
+		err = dscp_ecn_decapsulate(t, ipv6h, skb);
+		if (unlikely(err)) {
+			if (log_ecn_error)
+				net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
+						     &ipv6h->saddr,
+						     ipv6_get_dsfield(ipv6h));
+			if (err > 1) {
+				++t->dev->stats.rx_frame_errors;
+				++t->dev->stats.rx_errors;
+				rcu_read_unlock();
+				goto discard;
+			}
+		}
+
 		tstats = this_cpu_ptr(t->dev->tstats);
 		tstats->rx_packets++;
 		tstats->rx_bytes += skb->len;
 
-		__skb_tunnel_rx(skb, t->dev);
-
-		dscp_ecn_decapsulate(t, ipv6h, skb);
-
 		netif_rx(skb);
 
 		rcu_read_unlock();
@@ -1208,7 +1234,7 @@
 			if (dev->mtu < IPV6_MIN_MTU)
 				dev->mtu = IPV6_MIN_MTU;
 		}
-		dst_release(&rt->dst);
+		ip6_rt_put(rt);
 	}
 }
 
@@ -1237,6 +1263,20 @@
 	return 0;
 }
 
+static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+{
+	struct net *net = dev_net(t->dev);
+	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+	int err;
+
+	ip6_tnl_unlink(ip6n, t);
+	synchronize_net();
+	err = ip6_tnl_change(t, p);
+	ip6_tnl_link(ip6n, t);
+	netdev_state_change(t->dev);
+	return err;
+}
+
 static void
 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
 {
@@ -1325,7 +1365,7 @@
 	case SIOCADDTUNNEL:
 	case SIOCCHGTUNNEL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			break;
 		err = -EFAULT;
 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
@@ -1345,11 +1385,7 @@
 			} else
 				t = netdev_priv(dev);
 
-			ip6_tnl_unlink(ip6n, t);
-			synchronize_net();
-			err = ip6_tnl_change(t, &p1);
-			ip6_tnl_link(ip6n, t);
-			netdev_state_change(dev);
+			err = ip6_tnl_update(t, &p1);
 		}
 		if (t) {
 			err = 0;
@@ -1362,7 +1398,7 @@
 		break;
 	case SIOCDELTUNNEL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			break;
 
 		if (dev == ip6n->fb_tnl_dev) {
@@ -1505,6 +1541,164 @@
 	return 0;
 }
 
+static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	u8 proto;
+
+	if (!data)
+		return 0;
+
+	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
+	if (proto != IPPROTO_IPV6 &&
+	    proto != IPPROTO_IPIP &&
+	    proto != 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void ip6_tnl_netlink_parms(struct nlattr *data[],
+				  struct __ip6_tnl_parm *parms)
+{
+	memset(parms, 0, sizeof(*parms));
+
+	if (!data)
+		return;
+
+	if (data[IFLA_IPTUN_LINK])
+		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
+
+	if (data[IFLA_IPTUN_LOCAL])
+		nla_memcpy(&parms->laddr, data[IFLA_IPTUN_LOCAL],
+			   sizeof(struct in6_addr));
+
+	if (data[IFLA_IPTUN_REMOTE])
+		nla_memcpy(&parms->raddr, data[IFLA_IPTUN_REMOTE],
+			   sizeof(struct in6_addr));
+
+	if (data[IFLA_IPTUN_TTL])
+		parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
+
+	if (data[IFLA_IPTUN_ENCAP_LIMIT])
+		parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
+
+	if (data[IFLA_IPTUN_FLOWINFO])
+		parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
+
+	if (data[IFLA_IPTUN_FLAGS])
+		parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
+
+	if (data[IFLA_IPTUN_PROTO])
+		parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
+}
+
+static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
+			   struct nlattr *tb[], struct nlattr *data[])
+{
+	struct net *net = dev_net(dev);
+	struct ip6_tnl *nt;
+
+	nt = netdev_priv(dev);
+	ip6_tnl_netlink_parms(data, &nt->parms);
+
+	if (ip6_tnl_locate(net, &nt->parms, 0))
+		return -EEXIST;
+
+	return ip6_tnl_create2(dev);
+}
+
+static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
+			      struct nlattr *data[])
+{
+	struct ip6_tnl *t;
+	struct __ip6_tnl_parm p;
+	struct net *net = dev_net(dev);
+	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+
+	if (dev == ip6n->fb_tnl_dev)
+		return -EINVAL;
+
+	ip6_tnl_netlink_parms(data, &p);
+
+	t = ip6_tnl_locate(net, &p, 0);
+
+	if (t) {
+		if (t->dev != dev)
+			return -EEXIST;
+	} else
+		t = netdev_priv(dev);
+
+	return ip6_tnl_update(t, &p);
+}
+
+static size_t ip6_tnl_get_size(const struct net_device *dev)
+{
+	return
+		/* IFLA_IPTUN_LINK */
+		nla_total_size(4) +
+		/* IFLA_IPTUN_LOCAL */
+		nla_total_size(sizeof(struct in6_addr)) +
+		/* IFLA_IPTUN_REMOTE */
+		nla_total_size(sizeof(struct in6_addr)) +
+		/* IFLA_IPTUN_TTL */
+		nla_total_size(1) +
+		/* IFLA_IPTUN_ENCAP_LIMIT */
+		nla_total_size(1) +
+		/* IFLA_IPTUN_FLOWINFO */
+		nla_total_size(4) +
+		/* IFLA_IPTUN_FLAGS */
+		nla_total_size(4) +
+		/* IFLA_IPTUN_PROTO */
+		nla_total_size(1) +
+		0;
+}
+
+static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct ip6_tnl *tunnel = netdev_priv(dev);
+	struct __ip6_tnl_parm *parm = &tunnel->parms;
+
+	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
+	    nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr),
+		    &parm->raddr) ||
+	    nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
+		    &parm->laddr) ||
+	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
+	    nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
+	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
+	    nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
+	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
+	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
+	[IFLA_IPTUN_LOCAL]		= { .len = sizeof(struct in6_addr) },
+	[IFLA_IPTUN_REMOTE]		= { .len = sizeof(struct in6_addr) },
+	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
+	[IFLA_IPTUN_ENCAP_LIMIT]	= { .type = NLA_U8 },
+	[IFLA_IPTUN_FLOWINFO]		= { .type = NLA_U32 },
+	[IFLA_IPTUN_FLAGS]		= { .type = NLA_U32 },
+	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
+};
+
+static struct rtnl_link_ops ip6_link_ops __read_mostly = {
+	.kind		= "ip6tnl",
+	.maxtype	= IFLA_IPTUN_MAX,
+	.policy		= ip6_tnl_policy,
+	.priv_size	= sizeof(struct ip6_tnl),
+	.setup		= ip6_tnl_dev_setup,
+	.validate	= ip6_tnl_validate,
+	.newlink	= ip6_tnl_newlink,
+	.changelink	= ip6_tnl_changelink,
+	.get_size	= ip6_tnl_get_size,
+	.fill_info	= ip6_tnl_fill_info,
+};
+
 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
 	.handler	= ip4ip6_rcv,
 	.err_handler	= ip4ip6_err,
@@ -1613,9 +1807,14 @@
 		pr_err("%s: can't register ip6ip6\n", __func__);
 		goto out_ip6ip6;
 	}
+	err = rtnl_link_register(&ip6_link_ops);
+	if (err < 0)
+		goto rtnl_link_failed;
 
 	return 0;
 
+rtnl_link_failed:
+	xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
 out_ip6ip6:
 	xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
 out_ip4ip6:
@@ -1630,6 +1829,7 @@
 
 static void __exit ip6_tunnel_cleanup(void)
 {
+	rtnl_link_unregister(&ip6_link_ops);
 	if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
 		pr_info("%s: can't deregister ip4ip6\n", __func__);
 
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index f7c7c63..926ea54 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -66,8 +66,8 @@
 	struct mif_device	vif6_table[MAXMIFS];
 	int			maxvif;
 	atomic_t		cache_resolve_queue_len;
-	int			mroute_do_assert;
-	int			mroute_do_pim;
+	bool			mroute_do_assert;
+	bool			mroute_do_pim;
 #ifdef CONFIG_IPV6_PIMSM_V2
 	int			mroute_reg_vif_num;
 #endif
@@ -1583,7 +1583,7 @@
 		return -ENOENT;
 
 	if (optname != MRT6_INIT) {
-		if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN))
+		if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EACCES;
 	}
 
@@ -1646,9 +1646,12 @@
 	case MRT6_ASSERT:
 	{
 		int v;
+
+		if (optlen != sizeof(v))
+			return -EINVAL;
 		if (get_user(v, (int __user *)optval))
 			return -EFAULT;
-		mrt->mroute_do_assert = !!v;
+		mrt->mroute_do_assert = v;
 		return 0;
 	}
 
@@ -1656,6 +1659,9 @@
 	case MRT6_PIM:
 	{
 		int v;
+
+		if (optlen != sizeof(v))
+			return -EINVAL;
 		if (get_user(v, (int __user *)optval))
 			return -EFAULT;
 		v = !!v;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index e02faed..ee94d31 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -343,7 +343,8 @@
 		break;
 
 	case IPV6_TRANSPARENT:
-		if (valbool && !capable(CAP_NET_ADMIN) && !capable(CAP_NET_RAW)) {
+		if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) &&
+		    !ns_capable(net->user_ns, CAP_NET_RAW)) {
 			retv = -EPERM;
 			break;
 		}
@@ -381,7 +382,7 @@
 
 		/* hop-by-hop / destination options are privileged option */
 		retv = -EPERM;
-		if (optname != IPV6_RTHDR && !capable(CAP_NET_RAW))
+		if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
 			break;
 
 		opt = ipv6_renew_options(sk, np->opt, optname,
@@ -397,7 +398,7 @@
 		if (optname == IPV6_RTHDR && opt && opt->srcrt) {
 			struct ipv6_rt_hdr *rthdr = opt->srcrt;
 			switch (rthdr->type) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 			case IPV6_SRCRT_TYPE_2:
 				if (rthdr->hdrlen != 2 ||
 				    rthdr->segments_left != 1)
@@ -754,7 +755,7 @@
 	case IPV6_IPSEC_POLICY:
 	case IPV6_XFRM_POLICY:
 		retv = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			break;
 		retv = xfrm_user_policy(sk, optname, optval, optlen);
 		break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 92f8e48..b19ed51 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -163,7 +163,7 @@
 		rt = rt6_lookup(net, addr, NULL, 0, 0);
 		if (rt) {
 			dev = rt->dst.dev;
-			dst_release(&rt->dst);
+			ip6_rt_put(rt);
 		}
 	} else
 		dev = dev_get_by_index_rcu(net, ifindex);
@@ -260,7 +260,7 @@
 
 		if (rt) {
 			dev = rt->dst.dev;
-			dst_release(&rt->dst);
+			ip6_rt_put(rt);
 		}
 	} else
 		dev = dev_get_by_index_rcu(net, ifindex);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 2edce30..f41853b 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -535,7 +535,6 @@
 {
 	struct inet6_dev *idev;
 	struct inet6_ifaddr *ifa;
-	struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
 
 	idev = in6_dev_get(dev);
 	if (!idev)
@@ -543,7 +542,7 @@
 
 	read_lock_bh(&idev->lock);
 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
-		ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr,
+		ndisc_send_na(dev, NULL, &in6addr_linklocal_allnodes, &ifa->addr,
 			      /*router=*/ !!idev->cnf.forwarding,
 			      /*solicited=*/ false, /*override=*/ true,
 			      /*inc_opt=*/ true);
@@ -905,7 +904,7 @@
 		if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) &&
 		    net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp &&
 		    pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) {
-			/* XXX: idev->cnf.prixy_ndp */
+			/* XXX: idev->cnf.proxy_ndp */
 			goto out;
 		}
 
@@ -1144,7 +1143,7 @@
 			ND_PRINTK(0, err,
 				  "RA: %s got default router without neighbour\n",
 				  __func__);
-			dst_release(&rt->dst);
+			ip6_rt_put(rt);
 			return;
 		}
 	}
@@ -1169,7 +1168,7 @@
 			ND_PRINTK(0, err,
 				  "RA: %s got default router without neighbour\n",
 				  __func__);
-			dst_release(&rt->dst);
+			ip6_rt_put(rt);
 			return;
 		}
 		neigh->flags |= NTF_ROUTER;
@@ -1325,8 +1324,7 @@
 		ND_PRINTK(2, warn, "RA: invalid RA options\n");
 	}
 out:
-	if (rt)
-		dst_release(&rt->dst);
+	ip6_rt_put(rt);
 	if (neigh)
 		neigh_release(neigh);
 }
@@ -1574,11 +1572,18 @@
 {
 	struct net_device *dev = ptr;
 	struct net *net = dev_net(dev);
+	struct inet6_dev *idev;
 
 	switch (event) {
 	case NETDEV_CHANGEADDR:
 		neigh_changeaddr(&nd_tbl, dev);
 		fib6_run_gc(~0UL, net);
+		idev = in6_dev_get(dev);
+		if (!idev)
+			break;
+		if (idev->cnf.ndisc_notify)
+			ndisc_send_unsol_na(dev);
+		in6_dev_put(idev);
 		break;
 	case NETDEV_DOWN:
 		neigh_ifdown(&nd_tbl, dev);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index d7cb045..74cadd0 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -207,8 +207,7 @@
 	return ip6t_get_target((struct ip6t_entry *)e);
 }
 
-#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
-    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
 /* This cries for unification! */
 static const char *const hooknames[] = {
 	[NF_INET_PRE_ROUTING]		= "PREROUTING",
@@ -381,8 +380,7 @@
 		t = ip6t_get_target_c(e);
 		IP_NF_ASSERT(t->u.kernel.target);
 
-#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
-    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
 		/* The packet is traced: log it */
 		if (unlikely(skb->nf_trace))
 			trace_packet(skb, hook, in, out,
@@ -1856,7 +1854,7 @@
 {
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
@@ -1971,7 +1969,7 @@
 {
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
@@ -1993,7 +1991,7 @@
 {
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
@@ -2018,7 +2016,7 @@
 {
 	int ret;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index 5d1d8b0..5060d54 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -67,7 +67,7 @@
 	if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
 		ret = true;
  out:
-	dst_release(&rt->dst);
+	ip6_rt_put(rt);
 	return ret;
 }
 
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index d57dab1..fa84cf8 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -277,9 +277,7 @@
 		return -ENOMEM;
 	net->ipv6.ip6table_nat = ip6t_register_table(net, &nf_nat_ipv6_table, repl);
 	kfree(repl);
-	if (IS_ERR(net->ipv6.ip6table_nat))
-		return PTR_ERR(net->ipv6.ip6table_nat);
-	return 0;
+	return PTR_RET(net->ipv6.ip6table_nat);
 }
 
 static void __net_exit ip6table_nat_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 8860d23..00ee17c 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -21,6 +21,7 @@
 
 #include <linux/netfilter_bridge.h>
 #include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_l4proto.h>
@@ -295,7 +296,56 @@
 	},
 };
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+static int
+ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
+{
+	const struct inet_sock *inet = inet_sk(sk);
+	const struct ipv6_pinfo *inet6 = inet6_sk(sk);
+	const struct nf_conntrack_tuple_hash *h;
+	struct sockaddr_in6 sin6;
+	struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
+	struct nf_conn *ct;
+
+	tuple.src.u3.in6 = inet6->rcv_saddr;
+	tuple.src.u.tcp.port = inet->inet_sport;
+	tuple.dst.u3.in6 = inet6->daddr;
+	tuple.dst.u.tcp.port = inet->inet_dport;
+	tuple.dst.protonum = sk->sk_protocol;
+
+	if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP)
+		return -ENOPROTOOPT;
+
+	if (*len < 0 || (unsigned int) *len < sizeof(sin6))
+		return -EINVAL;
+
+	h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+	if (!h) {
+		pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n",
+			 &tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port),
+			 &tuple.dst.u3.ip6, ntohs(tuple.dst.u.tcp.port));
+		return -ENOENT;
+	}
+
+	ct = nf_ct_tuplehash_to_ctrack(h);
+
+	sin6.sin6_family = AF_INET6;
+	sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
+	sin6.sin6_flowinfo = inet6->flow_label & IPV6_FLOWINFO_MASK;
+	memcpy(&sin6.sin6_addr,
+		&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
+					sizeof(sin6.sin6_addr));
+
+	nf_ct_put(ct);
+
+	if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
+		sin6.sin6_scope_id = sk->sk_bound_dev_if;
+	else
+		sin6.sin6_scope_id = 0;
+
+	return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
+}
+
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -346,7 +396,7 @@
 	.invert_tuple		= ipv6_invert_tuple,
 	.print_tuple		= ipv6_print_tuple,
 	.get_l4proto		= ipv6_get_l4proto,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.tuple_to_nlattr	= ipv6_tuple_to_nlattr,
 	.nlattr_tuple_size	= ipv6_nlattr_tuple_size,
 	.nlattr_to_tuple	= ipv6_nlattr_to_tuple,
@@ -359,6 +409,14 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
 
+static struct nf_sockopt_ops so_getorigdst6 = {
+	.pf		= NFPROTO_IPV6,
+	.get_optmin	= IP6T_SO_ORIGINAL_DST,
+	.get_optmax	= IP6T_SO_ORIGINAL_DST + 1,
+	.get		= ipv6_getorigdst,
+	.owner		= THIS_MODULE,
+};
+
 static int ipv6_net_init(struct net *net)
 {
 	int ret = 0;
@@ -425,6 +483,12 @@
 	need_conntrack();
 	nf_defrag_ipv6_enable();
 
+	ret = nf_register_sockopt(&so_getorigdst6);
+	if (ret < 0) {
+		pr_err("Unable to register netfilter socket option\n");
+		return ret;
+	}
+
 	ret = register_pernet_subsys(&ipv6_net_ops);
 	if (ret < 0)
 		goto cleanup_pernet;
@@ -440,6 +504,7 @@
  cleanup_ipv6:
 	unregister_pernet_subsys(&ipv6_net_ops);
  cleanup_pernet:
+	nf_unregister_sockopt(&so_getorigdst6);
 	return ret;
 }
 
@@ -448,6 +513,7 @@
 	synchronize_net();
 	nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops));
 	unregister_pernet_subsys(&ipv6_net_ops);
+	nf_unregister_sockopt(&so_getorigdst6);
 }
 
 module_init(nf_conntrack_l3proto_ipv6_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 2d54b20..24df3dd 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -232,7 +232,7 @@
 	return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum);
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -375,7 +375,7 @@
 	.get_timeouts		= icmpv6_get_timeouts,
 	.new			= icmpv6_new,
 	.error			= icmpv6_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.tuple_to_nlattr	= icmpv6_tuple_to_nlattr,
 	.nlattr_tuple_size	= icmpv6_nlattr_tuple_size,
 	.nlattr_to_tuple	= icmpv6_nlattr_to_tuple,
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index cdd6d04..aacd121 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -19,7 +19,7 @@
 
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter_bridge.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_l4proto.h>
@@ -35,7 +35,7 @@
 {
 	u16 zone = NF_CT_DEFAULT_ZONE;
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 	if (skb->nfct)
 		zone = nf_ct_zone((struct nf_conn *)skb->nfct);
 #endif
@@ -60,7 +60,7 @@
 {
 	struct sk_buff *reasm;
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 	/* Previously seen (loopback)?	*/
 	if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
 		return NF_ACCEPT;
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
index 5d6da78..61aaf70 100644
--- a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
@@ -84,7 +84,7 @@
 	.manip_pkt		= icmpv6_manip_pkt,
 	.in_range		= icmpv6_in_range,
 	.unique_tuple		= icmpv6_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.nlattr_to_range	= nf_nat_l4proto_nlattr_to_range,
 #endif
 };
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
new file mode 100644
index 0000000..c2e73e6
--- /dev/null
+++ b/net/ipv6/output_core.c
@@ -0,0 +1,76 @@
+/*
+ * IPv6 library code, needed by static components when full IPv6 support is
+ * not configured or static.  These functions are needed by GSO/GRO implementation.
+ */
+#include <linux/export.h>
+#include <net/ipv6.h>
+#include <net/ip6_fib.h>
+
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+{
+	static atomic_t ipv6_fragmentation_id;
+	int old, new;
+
+#if IS_ENABLED(CONFIG_IPV6)
+	if (rt && !(rt->dst.flags & DST_NOPEER)) {
+		struct inet_peer *peer;
+		struct net *net;
+
+		net = dev_net(rt->dst.dev);
+		peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+		if (peer) {
+			fhdr->identification = htonl(inet_getid(peer, 0));
+			inet_putpeer(peer);
+			return;
+		}
+	}
+#endif
+	do {
+		old = atomic_read(&ipv6_fragmentation_id);
+		new = old + 1;
+		if (!new)
+			new = 1;
+	} while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
+	fhdr->identification = htonl(new);
+}
+EXPORT_SYMBOL(ipv6_select_ident);
+
+int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+{
+	u16 offset = sizeof(struct ipv6hdr);
+	struct ipv6_opt_hdr *exthdr =
+				(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
+	unsigned int packet_len = skb->tail - skb->network_header;
+	int found_rhdr = 0;
+	*nexthdr = &ipv6_hdr(skb)->nexthdr;
+
+	while (offset + 1 <= packet_len) {
+
+		switch (**nexthdr) {
+
+		case NEXTHDR_HOP:
+			break;
+		case NEXTHDR_ROUTING:
+			found_rhdr = 1;
+			break;
+		case NEXTHDR_DEST:
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
+			if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
+				break;
+#endif
+			if (found_rhdr)
+				return offset;
+			break;
+		default :
+			return offset;
+		}
+
+		offset += ipv6_optlen(exthdr);
+		*nexthdr = &exthdr->nexthdr;
+		exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
+						 offset);
+	}
+
+	return offset;
+}
+EXPORT_SYMBOL(ip6_find_1stfragopt);
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 053082d..22d1bd4 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -25,7 +25,9 @@
 #include <linux/spinlock.h>
 #include <net/protocol.h>
 
+#if IS_ENABLED(CONFIG_IPV6)
 const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
+EXPORT_SYMBOL(inet6_protos);
 
 int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
 {
@@ -50,3 +52,26 @@
 	return ret;
 }
 EXPORT_SYMBOL(inet6_del_protocol);
+#endif
+
+const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS] __read_mostly;
+
+int inet6_add_offload(const struct net_offload *prot, unsigned char protocol)
+{
+	return !cmpxchg((const struct net_offload **)&inet6_offloads[protocol],
+			NULL, prot) ? 0 : -1;
+}
+EXPORT_SYMBOL(inet6_add_offload);
+
+int inet6_del_offload(const struct net_offload *prot, unsigned char protocol)
+{
+	int ret;
+
+	ret = (cmpxchg((const struct net_offload **)&inet6_offloads[protocol],
+		       prot, NULL) == prot) ? 0 : -1;
+
+	synchronize_net();
+
+	return ret;
+}
+EXPORT_SYMBOL(inet6_del_offload);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d8e95c7..6cd29b1 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -50,7 +50,7 @@
 #include <net/udp.h>
 #include <net/inet_common.h>
 #include <net/tcp_states.h>
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 #include <net/mip6.h>
 #endif
 #include <linux/mroute6.h>
@@ -123,7 +123,7 @@
 	return 1;
 }
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
 
 static mh_filter_t __rcu *mh_filter __read_mostly;
@@ -184,7 +184,7 @@
 			filtered = icmpv6_filter(sk, skb);
 			break;
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 		case IPPROTO_MH:
 		{
 			/* XXX: To validate MH only once for each packet,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index da8a4e3..e5253ec 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -616,6 +616,10 @@
 		table[0].data = &net->ipv6.frags.high_thresh;
 		table[1].data = &net->ipv6.frags.low_thresh;
 		table[2].data = &net->ipv6.frags.timeout;
+
+		/* Don't export sysctls to unprivileged users */
+		if (net->user_ns != &init_user_ns)
+			table[0].procname = NULL;
 	}
 
 	hdr = register_net_sysctl(net, "net/ipv6", table);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b1e6cf0..8f124f5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -57,6 +57,7 @@
 #include <net/xfrm.h>
 #include <net/netevent.h>
 #include <net/netlink.h>
+#include <net/nexthop.h>
 
 #include <asm/uaccess.h>
 
@@ -289,6 +290,8 @@
 		memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
 		rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
 		rt->rt6i_genid = rt_genid(net);
+		INIT_LIST_HEAD(&rt->rt6i_siblings);
+		rt->rt6i_nsiblings = 0;
 	}
 	return rt;
 }
@@ -318,13 +321,6 @@
 	}
 }
 
-static atomic_t __rt6_peer_genid = ATOMIC_INIT(0);
-
-static u32 rt6_peer_genid(void)
-{
-	return atomic_read(&__rt6_peer_genid);
-}
-
 void rt6_bind_peer(struct rt6_info *rt, int create)
 {
 	struct inet_peer_base *base;
@@ -338,8 +334,6 @@
 	if (peer) {
 		if (!rt6_set_peer(rt, peer))
 			inet_putpeer(peer);
-		else
-			rt->rt6i_peer_genid = rt6_peer_genid();
 	}
 }
 
@@ -385,6 +379,69 @@
 		(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
 }
 
+/* Multipath route selection:
+ *   Hash based function using packet header and flowlabel.
+ * Adapted from fib_info_hashfn()
+ */
+static int rt6_info_hash_nhsfn(unsigned int candidate_count,
+			       const struct flowi6 *fl6)
+{
+	unsigned int val = fl6->flowi6_proto;
+
+	val ^= (__force u32)fl6->daddr.s6_addr32[0];
+	val ^= (__force u32)fl6->daddr.s6_addr32[1];
+	val ^= (__force u32)fl6->daddr.s6_addr32[2];
+	val ^= (__force u32)fl6->daddr.s6_addr32[3];
+
+	val ^= (__force u32)fl6->saddr.s6_addr32[0];
+	val ^= (__force u32)fl6->saddr.s6_addr32[1];
+	val ^= (__force u32)fl6->saddr.s6_addr32[2];
+	val ^= (__force u32)fl6->saddr.s6_addr32[3];
+
+	/* Work only if this not encapsulated */
+	switch (fl6->flowi6_proto) {
+	case IPPROTO_UDP:
+	case IPPROTO_TCP:
+	case IPPROTO_SCTP:
+		val ^= (__force u16)fl6->fl6_sport;
+		val ^= (__force u16)fl6->fl6_dport;
+		break;
+
+	case IPPROTO_ICMPV6:
+		val ^= (__force u16)fl6->fl6_icmp_type;
+		val ^= (__force u16)fl6->fl6_icmp_code;
+		break;
+	}
+	/* RFC6438 recommands to use flowlabel */
+	val ^= (__force u32)fl6->flowlabel;
+
+	/* Perhaps, we need to tune, this function? */
+	val = val ^ (val >> 7) ^ (val >> 12);
+	return val % candidate_count;
+}
+
+static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
+					     struct flowi6 *fl6)
+{
+	struct rt6_info *sibling, *next_sibling;
+	int route_choosen;
+
+	route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
+	/* Don't change the route, if route_choosen == 0
+	 * (siblings does not include ourself)
+	 */
+	if (route_choosen)
+		list_for_each_entry_safe(sibling, next_sibling,
+				&match->rt6i_siblings, rt6i_siblings) {
+			route_choosen--;
+			if (route_choosen == 0) {
+				match = sibling;
+				break;
+			}
+		}
+	return match;
+}
+
 /*
  *	Route lookup. Any table->tb6_lock is implied.
  */
@@ -666,7 +723,7 @@
 		else
 			rt6_set_expires(rt, jiffies + HZ * lifetime);
 
-		dst_release(&rt->dst);
+		ip6_rt_put(rt);
 	}
 	return 0;
 }
@@ -702,6 +759,8 @@
 restart:
 	rt = fn->leaf;
 	rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
+	if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
+		rt = rt6_multipath_select(rt, fl6);
 	BACKTRACK(net, &fl6->saddr);
 out:
 	dst_use(&rt->dst, jiffies);
@@ -863,7 +922,8 @@
 
 restart:
 	rt = rt6_select(fn, oif, strict | reachable);
-
+	if (rt->rt6i_nsiblings && oif == 0)
+		rt = rt6_multipath_select(rt, fl6);
 	BACKTRACK(net, &fl6->saddr);
 	if (rt == net->ipv6.ip6_null_entry ||
 	    rt->rt6i_flags & RTF_CACHE)
@@ -879,7 +939,7 @@
 	else
 		goto out2;
 
-	dst_release(&rt->dst);
+	ip6_rt_put(rt);
 	rt = nrt ? : net->ipv6.ip6_null_entry;
 
 	dst_hold(&rt->dst);
@@ -896,7 +956,7 @@
 	 * Race condition! In the gap, when table->tb6_lock was
 	 * released someone could insert this route.  Relookup.
 	 */
-	dst_release(&rt->dst);
+	ip6_rt_put(rt);
 	goto relookup;
 
 out:
@@ -1030,14 +1090,9 @@
 	if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev)))
 		return NULL;
 
-	if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
-		if (rt->rt6i_peer_genid != rt6_peer_genid()) {
-			if (!rt6_has_peer(rt))
-				rt6_bind_peer(rt, 0);
-			rt->rt6i_peer_genid = rt6_peer_genid();
-		}
+	if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
 		return dst;
-	}
+
 	return NULL;
 }
 
@@ -1316,12 +1371,6 @@
 	return entries > rt_max_size;
 }
 
-/* Clean host part of a prefix. Not necessary in radix tree,
-   but results in cleaner routing tables.
-
-   Remove it only when all the things will work!
- */
-
 int ip6_dst_hoplimit(struct dst_entry *dst)
 {
 	int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
@@ -1507,7 +1556,7 @@
 				goto out;
 			if (dev) {
 				if (dev != grt->dst.dev) {
-					dst_release(&grt->dst);
+					ip6_rt_put(grt);
 					goto out;
 				}
 			} else {
@@ -1518,7 +1567,7 @@
 			}
 			if (!(grt->rt6i_flags & RTF_GATEWAY))
 				err = 0;
-			dst_release(&grt->dst);
+			ip6_rt_put(grt);
 
 			if (err)
 				goto out;
@@ -1604,7 +1653,7 @@
 	write_unlock_bh(&table->tb6_lock);
 
 out:
-	dst_release(&rt->dst);
+	ip6_rt_put(rt);
 	return err;
 }
 
@@ -1987,7 +2036,7 @@
 	switch(cmd) {
 	case SIOCADDRT:		/* Add a route */
 	case SIOCDELRT:		/* Delete a route */
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 		err = copy_from_user(&rtmsg, arg,
 				     sizeof(struct in6_rtmsg));
@@ -2249,6 +2298,7 @@
 	[RTA_IIF]		= { .type = NLA_U32 },
 	[RTA_PRIORITY]          = { .type = NLA_U32 },
 	[RTA_METRICS]           = { .type = NLA_NESTED },
+	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2326,11 +2376,71 @@
 	if (tb[RTA_TABLE])
 		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
 
+	if (tb[RTA_MULTIPATH]) {
+		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
+		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
+	}
+
 	err = 0;
 errout:
 	return err;
 }
 
+static int ip6_route_multipath(struct fib6_config *cfg, int add)
+{
+	struct fib6_config r_cfg;
+	struct rtnexthop *rtnh;
+	int remaining;
+	int attrlen;
+	int err = 0, last_err = 0;
+
+beginning:
+	rtnh = (struct rtnexthop *)cfg->fc_mp;
+	remaining = cfg->fc_mp_len;
+
+	/* Parse a Multipath Entry */
+	while (rtnh_ok(rtnh, remaining)) {
+		memcpy(&r_cfg, cfg, sizeof(*cfg));
+		if (rtnh->rtnh_ifindex)
+			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
+
+		attrlen = rtnh_attrlen(rtnh);
+		if (attrlen > 0) {
+			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+
+			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
+			if (nla) {
+				nla_memcpy(&r_cfg.fc_gateway, nla, 16);
+				r_cfg.fc_flags |= RTF_GATEWAY;
+			}
+		}
+		err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
+		if (err) {
+			last_err = err;
+			/* If we are trying to remove a route, do not stop the
+			 * loop when ip6_route_del() fails (because next hop is
+			 * already gone), we should try to remove all next hops.
+			 */
+			if (add) {
+				/* If add fails, we should try to delete all
+				 * next hops that have been already added.
+				 */
+				add = 0;
+				goto beginning;
+			}
+		}
+		/* Because each route is added like a single route we remove
+		 * this flag after the first nexthop (if there is a collision,
+		 * we have already fail to add the first nexthop:
+		 * fib6_add_rt2node() has reject it).
+		 */
+		cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
+		rtnh = rtnh_next(rtnh, &remaining);
+	}
+
+	return last_err;
+}
+
 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 {
 	struct fib6_config cfg;
@@ -2340,7 +2450,10 @@
 	if (err < 0)
 		return err;
 
-	return ip6_route_del(&cfg);
+	if (cfg.fc_mp)
+		return ip6_route_multipath(&cfg, 0);
+	else
+		return ip6_route_del(&cfg);
 }
 
 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
@@ -2352,7 +2465,10 @@
 	if (err < 0)
 		return err;
 
-	return ip6_route_add(&cfg);
+	if (cfg.fc_mp)
+		return ip6_route_multipath(&cfg, 1);
+	else
+		return ip6_route_add(&cfg);
 }
 
 static inline size_t rt6_nlmsg_size(void)
@@ -2596,7 +2712,7 @@
 
 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
 	if (!skb) {
-		dst_release(&rt->dst);
+		ip6_rt_put(rt);
 		err = -ENOBUFS;
 		goto errout;
 	}
@@ -2873,6 +2989,10 @@
 		table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
 		table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
 		table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
+
+		/* Don't export sysctls to unprivileged users */
+		if (net->user_ns != &init_user_ns)
+			table[0].procname = NULL;
 	}
 
 	return table;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3ed54ff..cfba99b 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -65,9 +65,14 @@
 #define HASH_SIZE  16
 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
 
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
 static int ipip6_tunnel_init(struct net_device *dev);
 static void ipip6_tunnel_setup(struct net_device *dev);
 static void ipip6_dev_free(struct net_device *dev);
+static struct rtnl_link_ops sit_link_ops __read_mostly;
 
 static int sit_net_id __read_mostly;
 struct sit_net {
@@ -80,22 +85,6 @@
 	struct net_device *fb_tunnel_dev;
 };
 
-/*
- * Locking : hash tables are protected by RCU and RTNL
- */
-
-#define for_each_ip_tunnel_rcu(start) \
-	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
-
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
-	u64	rx_packets;
-	u64	rx_bytes;
-	u64	tx_packets;
-	u64	tx_bytes;
-	struct u64_stats_sync	syncp;
-};
-
 static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
 						   struct rtnl_link_stats64 *tot)
 {
@@ -121,6 +110,7 @@
 	}
 
 	tot->rx_errors = dev->stats.rx_errors;
+	tot->rx_frame_errors = dev->stats.rx_frame_errors;
 	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
 	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
 	tot->tx_dropped = dev->stats.tx_dropped;
@@ -141,20 +131,20 @@
 	struct ip_tunnel *t;
 	struct sit_net *sitn = net_generic(net, sit_net_id);
 
-	for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) {
+	for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) {
 		if (local == t->parms.iph.saddr &&
 		    remote == t->parms.iph.daddr &&
 		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
 		    (t->dev->flags & IFF_UP))
 			return t;
 	}
-	for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) {
+	for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) {
 		if (remote == t->parms.iph.daddr &&
 		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
 		    (t->dev->flags & IFF_UP))
 			return t;
 	}
-	for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) {
+	for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) {
 		if (local == t->parms.iph.saddr &&
 		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
 		    (t->dev->flags & IFF_UP))
@@ -231,6 +221,37 @@
 #endif
 }
 
+static int ipip6_tunnel_create(struct net_device *dev)
+{
+	struct ip_tunnel *t = netdev_priv(dev);
+	struct net *net = dev_net(dev);
+	struct sit_net *sitn = net_generic(net, sit_net_id);
+	int err;
+
+	err = ipip6_tunnel_init(dev);
+	if (err < 0)
+		goto out;
+	ipip6_tunnel_clone_6rd(dev, sitn);
+
+	if ((__force u16)t->parms.i_flags & SIT_ISATAP)
+		dev->priv_flags |= IFF_ISATAP;
+
+	err = register_netdevice(dev);
+	if (err < 0)
+		goto out;
+
+	strcpy(t->parms.name, dev->name);
+	dev->rtnl_link_ops = &sit_link_ops;
+
+	dev_hold(dev);
+
+	ipip6_tunnel_link(sitn, t);
+	return 0;
+
+out:
+	return err;
+}
+
 static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
 		struct ip_tunnel_parm *parms, int create)
 {
@@ -271,21 +292,9 @@
 	nt = netdev_priv(dev);
 
 	nt->parms = *parms;
-	if (ipip6_tunnel_init(dev) < 0)
-		goto failed_free;
-	ipip6_tunnel_clone_6rd(dev, sitn);
-
-	if (parms->i_flags & SIT_ISATAP)
-		dev->priv_flags |= IFF_ISATAP;
-
-	if (register_netdevice(dev) < 0)
+	if (ipip6_tunnel_create(dev) < 0)
 		goto failed_free;
 
-	strcpy(nt->parms.name, dev->name);
-
-	dev_hold(dev);
-
-	ipip6_tunnel_link(sitn, nt);
 	return nt;
 
 failed_free:
@@ -581,16 +590,11 @@
 	return err;
 }
 
-static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
-{
-	if (INET_ECN_is_ce(iph->tos))
-		IP6_ECN_set_ce(ipv6_hdr(skb));
-}
-
 static int ipip6_rcv(struct sk_buff *skb)
 {
 	const struct iphdr *iph;
 	struct ip_tunnel *tunnel;
+	int err;
 
 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
 		goto out;
@@ -612,18 +616,27 @@
 		if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
 		    !isatap_chksrc(skb, iph, tunnel)) {
 			tunnel->dev->stats.rx_errors++;
-			kfree_skb(skb);
-			return 0;
+			goto out;
+		}
+
+		__skb_tunnel_rx(skb, tunnel->dev);
+
+		err = IP_ECN_decapsulate(iph, skb);
+		if (unlikely(err)) {
+			if (log_ecn_error)
+				net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+						     &iph->saddr, iph->tos);
+			if (err > 1) {
+				++tunnel->dev->stats.rx_frame_errors;
+				++tunnel->dev->stats.rx_errors;
+				goto out;
+			}
 		}
 
 		tstats = this_cpu_ptr(tunnel->dev->tstats);
 		tstats->rx_packets++;
 		tstats->rx_bytes += skb->len;
 
-		__skb_tunnel_rx(skb, tunnel->dev);
-
-		ipip6_ecn_decapsulate(iph, skb);
-
 		netif_rx(skb);
 
 		return 0;
@@ -683,7 +696,6 @@
 				     struct net_device *dev)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
-	struct pcpu_tstats *tstats;
 	const struct iphdr  *tiph = &tunnel->parms.iph;
 	const struct ipv6hdr *iph6 = ipv6_hdr(skb);
 	u8     tos = tunnel->parms.iph.tos;
@@ -864,9 +876,7 @@
 	if ((iph->ttl = tiph->ttl) == 0)
 		iph->ttl	=	iph6->hop_limit;
 
-	nf_reset(skb);
-	tstats = this_cpu_ptr(dev->tstats);
-	__IPTUNNEL_XMIT(tstats, &dev->stats);
+	iptunnel_xmit(skb, dev);
 	return NETDEV_TX_OK;
 
 tx_error_icmp:
@@ -914,6 +924,59 @@
 	dev->iflink = tunnel->parms.link;
 }
 
+static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
+{
+	struct net *net = dev_net(t->dev);
+	struct sit_net *sitn = net_generic(net, sit_net_id);
+
+	ipip6_tunnel_unlink(sitn, t);
+	synchronize_net();
+	t->parms.iph.saddr = p->iph.saddr;
+	t->parms.iph.daddr = p->iph.daddr;
+	memcpy(t->dev->dev_addr, &p->iph.saddr, 4);
+	memcpy(t->dev->broadcast, &p->iph.daddr, 4);
+	ipip6_tunnel_link(sitn, t);
+	t->parms.iph.ttl = p->iph.ttl;
+	t->parms.iph.tos = p->iph.tos;
+	if (t->parms.link != p->link) {
+		t->parms.link = p->link;
+		ipip6_tunnel_bind_dev(t->dev);
+	}
+	netdev_state_change(t->dev);
+}
+
+#ifdef CONFIG_IPV6_SIT_6RD
+static int ipip6_tunnel_update_6rd(struct ip_tunnel *t,
+				   struct ip_tunnel_6rd *ip6rd)
+{
+	struct in6_addr prefix;
+	__be32 relay_prefix;
+
+	if (ip6rd->relay_prefixlen > 32 ||
+	    ip6rd->prefixlen + (32 - ip6rd->relay_prefixlen) > 64)
+		return -EINVAL;
+
+	ipv6_addr_prefix(&prefix, &ip6rd->prefix, ip6rd->prefixlen);
+	if (!ipv6_addr_equal(&prefix, &ip6rd->prefix))
+		return -EINVAL;
+	if (ip6rd->relay_prefixlen)
+		relay_prefix = ip6rd->relay_prefix &
+			       htonl(0xffffffffUL <<
+				     (32 - ip6rd->relay_prefixlen));
+	else
+		relay_prefix = 0;
+	if (relay_prefix != ip6rd->relay_prefix)
+		return -EINVAL;
+
+	t->ip6rd.prefix = prefix;
+	t->ip6rd.relay_prefix = relay_prefix;
+	t->ip6rd.prefixlen = ip6rd->prefixlen;
+	t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen;
+	netdev_state_change(t->dev);
+	return 0;
+}
+#endif
+
 static int
 ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
 {
@@ -966,7 +1029,7 @@
 	case SIOCADDTUNNEL:
 	case SIOCCHGTUNNEL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto done;
 
 		err = -EFAULT;
@@ -995,28 +1058,13 @@
 					break;
 				}
 				t = netdev_priv(dev);
-				ipip6_tunnel_unlink(sitn, t);
-				synchronize_net();
-				t->parms.iph.saddr = p.iph.saddr;
-				t->parms.iph.daddr = p.iph.daddr;
-				memcpy(dev->dev_addr, &p.iph.saddr, 4);
-				memcpy(dev->broadcast, &p.iph.daddr, 4);
-				ipip6_tunnel_link(sitn, t);
-				netdev_state_change(dev);
 			}
+
+			ipip6_tunnel_update(t, &p);
 		}
 
 		if (t) {
 			err = 0;
-			if (cmd == SIOCCHGTUNNEL) {
-				t->parms.iph.ttl = p.iph.ttl;
-				t->parms.iph.tos = p.iph.tos;
-				if (t->parms.link != p.link) {
-					t->parms.link = p.link;
-					ipip6_tunnel_bind_dev(dev);
-					netdev_state_change(dev);
-				}
-			}
 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
 				err = -EFAULT;
 		} else
@@ -1025,7 +1073,7 @@
 
 	case SIOCDELTUNNEL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto done;
 
 		if (dev == sitn->fb_tunnel_dev) {
@@ -1058,7 +1106,7 @@
 	case SIOCDELPRL:
 	case SIOCCHGPRL:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto done;
 		err = -EINVAL;
 		if (dev == sitn->fb_tunnel_dev)
@@ -1087,7 +1135,7 @@
 	case SIOCCHG6RD:
 	case SIOCDEL6RD:
 		err = -EPERM;
-		if (!capable(CAP_NET_ADMIN))
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto done;
 
 		err = -EFAULT;
@@ -1098,31 +1146,9 @@
 		t = netdev_priv(dev);
 
 		if (cmd != SIOCDEL6RD) {
-			struct in6_addr prefix;
-			__be32 relay_prefix;
-
-			err = -EINVAL;
-			if (ip6rd.relay_prefixlen > 32 ||
-			    ip6rd.prefixlen + (32 - ip6rd.relay_prefixlen) > 64)
+			err = ipip6_tunnel_update_6rd(t, &ip6rd);
+			if (err < 0)
 				goto done;
-
-			ipv6_addr_prefix(&prefix, &ip6rd.prefix,
-					 ip6rd.prefixlen);
-			if (!ipv6_addr_equal(&prefix, &ip6rd.prefix))
-				goto done;
-			if (ip6rd.relay_prefixlen)
-				relay_prefix = ip6rd.relay_prefix &
-					       htonl(0xffffffffUL <<
-						     (32 - ip6rd.relay_prefixlen));
-			else
-				relay_prefix = 0;
-			if (relay_prefix != ip6rd.relay_prefix)
-				goto done;
-
-			t->ip6rd.prefix = prefix;
-			t->ip6rd.relay_prefix = relay_prefix;
-			t->ip6rd.prefixlen = ip6rd.prefixlen;
-			t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
 		} else
 			ipip6_tunnel_clone_6rd(dev, sitn);
 
@@ -1216,6 +1242,239 @@
 	return 0;
 }
 
+static void ipip6_netlink_parms(struct nlattr *data[],
+				struct ip_tunnel_parm *parms)
+{
+	memset(parms, 0, sizeof(*parms));
+
+	parms->iph.version = 4;
+	parms->iph.protocol = IPPROTO_IPV6;
+	parms->iph.ihl = 5;
+	parms->iph.ttl = 64;
+
+	if (!data)
+		return;
+
+	if (data[IFLA_IPTUN_LINK])
+		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
+
+	if (data[IFLA_IPTUN_LOCAL])
+		parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]);
+
+	if (data[IFLA_IPTUN_REMOTE])
+		parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]);
+
+	if (data[IFLA_IPTUN_TTL]) {
+		parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
+		if (parms->iph.ttl)
+			parms->iph.frag_off = htons(IP_DF);
+	}
+
+	if (data[IFLA_IPTUN_TOS])
+		parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
+
+	if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
+		parms->iph.frag_off = htons(IP_DF);
+
+	if (data[IFLA_IPTUN_FLAGS])
+		parms->i_flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]);
+}
+
+#ifdef CONFIG_IPV6_SIT_6RD
+/* This function returns true when 6RD attributes are present in the nl msg */
+static bool ipip6_netlink_6rd_parms(struct nlattr *data[],
+				    struct ip_tunnel_6rd *ip6rd)
+{
+	bool ret = false;
+	memset(ip6rd, 0, sizeof(*ip6rd));
+
+	if (!data)
+		return ret;
+
+	if (data[IFLA_IPTUN_6RD_PREFIX]) {
+		ret = true;
+		nla_memcpy(&ip6rd->prefix, data[IFLA_IPTUN_6RD_PREFIX],
+			   sizeof(struct in6_addr));
+	}
+
+	if (data[IFLA_IPTUN_6RD_RELAY_PREFIX]) {
+		ret = true;
+		ip6rd->relay_prefix =
+			nla_get_be32(data[IFLA_IPTUN_6RD_RELAY_PREFIX]);
+	}
+
+	if (data[IFLA_IPTUN_6RD_PREFIXLEN]) {
+		ret = true;
+		ip6rd->prefixlen = nla_get_u16(data[IFLA_IPTUN_6RD_PREFIXLEN]);
+	}
+
+	if (data[IFLA_IPTUN_6RD_RELAY_PREFIXLEN]) {
+		ret = true;
+		ip6rd->relay_prefixlen =
+			nla_get_u16(data[IFLA_IPTUN_6RD_RELAY_PREFIXLEN]);
+	}
+
+	return ret;
+}
+#endif
+
+static int ipip6_newlink(struct net *src_net, struct net_device *dev,
+			 struct nlattr *tb[], struct nlattr *data[])
+{
+	struct net *net = dev_net(dev);
+	struct ip_tunnel *nt;
+#ifdef CONFIG_IPV6_SIT_6RD
+	struct ip_tunnel_6rd ip6rd;
+#endif
+	int err;
+
+	nt = netdev_priv(dev);
+	ipip6_netlink_parms(data, &nt->parms);
+
+	if (ipip6_tunnel_locate(net, &nt->parms, 0))
+		return -EEXIST;
+
+	err = ipip6_tunnel_create(dev);
+	if (err < 0)
+		return err;
+
+#ifdef CONFIG_IPV6_SIT_6RD
+	if (ipip6_netlink_6rd_parms(data, &ip6rd))
+		err = ipip6_tunnel_update_6rd(nt, &ip6rd);
+#endif
+
+	return err;
+}
+
+static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[],
+			  struct nlattr *data[])
+{
+	struct ip_tunnel *t;
+	struct ip_tunnel_parm p;
+	struct net *net = dev_net(dev);
+	struct sit_net *sitn = net_generic(net, sit_net_id);
+#ifdef CONFIG_IPV6_SIT_6RD
+	struct ip_tunnel_6rd ip6rd;
+#endif
+
+	if (dev == sitn->fb_tunnel_dev)
+		return -EINVAL;
+
+	ipip6_netlink_parms(data, &p);
+
+	if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
+	    (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
+		return -EINVAL;
+
+	t = ipip6_tunnel_locate(net, &p, 0);
+
+	if (t) {
+		if (t->dev != dev)
+			return -EEXIST;
+	} else
+		t = netdev_priv(dev);
+
+	ipip6_tunnel_update(t, &p);
+
+#ifdef CONFIG_IPV6_SIT_6RD
+	if (ipip6_netlink_6rd_parms(data, &ip6rd))
+		return ipip6_tunnel_update_6rd(t, &ip6rd);
+#endif
+
+	return 0;
+}
+
+static size_t ipip6_get_size(const struct net_device *dev)
+{
+	return
+		/* IFLA_IPTUN_LINK */
+		nla_total_size(4) +
+		/* IFLA_IPTUN_LOCAL */
+		nla_total_size(4) +
+		/* IFLA_IPTUN_REMOTE */
+		nla_total_size(4) +
+		/* IFLA_IPTUN_TTL */
+		nla_total_size(1) +
+		/* IFLA_IPTUN_TOS */
+		nla_total_size(1) +
+		/* IFLA_IPTUN_PMTUDISC */
+		nla_total_size(1) +
+		/* IFLA_IPTUN_FLAGS */
+		nla_total_size(2) +
+#ifdef CONFIG_IPV6_SIT_6RD
+		/* IFLA_IPTUN_6RD_PREFIX */
+		nla_total_size(sizeof(struct in6_addr)) +
+		/* IFLA_IPTUN_6RD_RELAY_PREFIX */
+		nla_total_size(4) +
+		/* IFLA_IPTUN_6RD_PREFIXLEN */
+		nla_total_size(2) +
+		/* IFLA_IPTUN_6RD_RELAY_PREFIXLEN */
+		nla_total_size(2) +
+#endif
+		0;
+}
+
+static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct ip_tunnel *tunnel = netdev_priv(dev);
+	struct ip_tunnel_parm *parm = &tunnel->parms;
+
+	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
+	    nla_put_be32(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
+	    nla_put_be32(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
+	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
+	    nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
+	    nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
+		       !!(parm->iph.frag_off & htons(IP_DF))) ||
+	    nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags))
+		goto nla_put_failure;
+
+#ifdef CONFIG_IPV6_SIT_6RD
+	if (nla_put(skb, IFLA_IPTUN_6RD_PREFIX, sizeof(struct in6_addr),
+		    &tunnel->ip6rd.prefix) ||
+	    nla_put_be32(skb, IFLA_IPTUN_6RD_RELAY_PREFIX,
+			 tunnel->ip6rd.relay_prefix) ||
+	    nla_put_u16(skb, IFLA_IPTUN_6RD_PREFIXLEN,
+			tunnel->ip6rd.prefixlen) ||
+	    nla_put_u16(skb, IFLA_IPTUN_6RD_RELAY_PREFIXLEN,
+			tunnel->ip6rd.relay_prefixlen))
+		goto nla_put_failure;
+#endif
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
+	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
+	[IFLA_IPTUN_LOCAL]		= { .type = NLA_U32 },
+	[IFLA_IPTUN_REMOTE]		= { .type = NLA_U32 },
+	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
+	[IFLA_IPTUN_TOS]		= { .type = NLA_U8 },
+	[IFLA_IPTUN_PMTUDISC]		= { .type = NLA_U8 },
+	[IFLA_IPTUN_FLAGS]		= { .type = NLA_U16 },
+#ifdef CONFIG_IPV6_SIT_6RD
+	[IFLA_IPTUN_6RD_PREFIX]		= { .len = sizeof(struct in6_addr) },
+	[IFLA_IPTUN_6RD_RELAY_PREFIX]	= { .type = NLA_U32 },
+	[IFLA_IPTUN_6RD_PREFIXLEN]	= { .type = NLA_U16 },
+	[IFLA_IPTUN_6RD_RELAY_PREFIXLEN] = { .type = NLA_U16 },
+#endif
+};
+
+static struct rtnl_link_ops sit_link_ops __read_mostly = {
+	.kind		= "sit",
+	.maxtype	= IFLA_IPTUN_MAX,
+	.policy		= ipip6_policy,
+	.priv_size	= sizeof(struct ip_tunnel),
+	.setup		= ipip6_tunnel_setup,
+	.newlink	= ipip6_newlink,
+	.changelink	= ipip6_changelink,
+	.get_size	= ipip6_get_size,
+	.fill_info	= ipip6_fill_info,
+};
+
 static struct xfrm_tunnel sit_handler __read_mostly = {
 	.handler	=	ipip6_rcv,
 	.err_handler	=	ipip6_err,
@@ -1302,6 +1561,7 @@
 
 static void __exit sit_cleanup(void)
 {
+	rtnl_link_unregister(&sit_link_ops);
 	xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
 
 	unregister_pernet_device(&sit_net_ops);
@@ -1319,10 +1579,21 @@
 		return err;
 	err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
 	if (err < 0) {
-		unregister_pernet_device(&sit_net_ops);
 		pr_info("%s: can't add protocol\n", __func__);
+		goto xfrm_tunnel_failed;
 	}
+	err = rtnl_link_register(&sit_link_ops);
+	if (err < 0)
+		goto rtnl_link_failed;
+
+out:
 	return err;
+
+rtnl_link_failed:
+	xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
+xfrm_tunnel_failed:
+	unregister_pernet_device(&sit_net_ops);
+	goto out;
 }
 
 module_init(sit_init);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 182ab9a..4016197 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -214,7 +214,7 @@
 		ireq6->iif = inet6_iif(skb);
 
 	req->expires = 0UL;
-	req->retrans = 0;
+	req->num_retrans = 0;
 	ireq->ecn_ok		= ecn_ok;
 	ireq->snd_wscale	= tcp_opt.snd_wscale;
 	ireq->sack_ok		= tcp_opt.sack_ok;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 26175bf..6565cf5 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -77,9 +77,6 @@
 				      struct request_sock *req);
 
 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
-static void	__tcp_v6_send_check(struct sk_buff *skb,
-				    const struct in6_addr *saddr,
-				    const struct in6_addr *daddr);
 
 static const struct inet_connection_sock_af_ops ipv6_mapped;
 static const struct inet_connection_sock_af_ops ipv6_specific;
@@ -119,14 +116,6 @@
 	}
 }
 
-static __inline__ __sum16 tcp_v6_check(int len,
-				   const struct in6_addr *saddr,
-				   const struct in6_addr *daddr,
-				   __wsum base)
-{
-	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
-}
-
 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
 {
 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
@@ -306,7 +295,7 @@
 	if (err)
 		goto late_failure;
 
-	if (!tp->write_seq)
+	if (!tp->write_seq && likely(!tp->repair))
 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 							     np->daddr.s6_addr32,
 							     inet->inet_sport,
@@ -495,9 +484,12 @@
 			     struct request_values *rvp)
 {
 	struct flowi6 fl6;
+	int res;
 
-	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-	return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
+	res = tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
+	if (!res)
+		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
+	return res;
 }
 
 static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -719,94 +711,6 @@
 };
 #endif
 
-static void __tcp_v6_send_check(struct sk_buff *skb,
-				const struct in6_addr *saddr, const struct in6_addr *daddr)
-{
-	struct tcphdr *th = tcp_hdr(skb);
-
-	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
-		skb->csum_start = skb_transport_header(skb) - skb->head;
-		skb->csum_offset = offsetof(struct tcphdr, check);
-	} else {
-		th->check = tcp_v6_check(skb->len, saddr, daddr,
-					 csum_partial(th, th->doff << 2,
-						      skb->csum));
-	}
-}
-
-static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
-{
-	struct ipv6_pinfo *np = inet6_sk(sk);
-
-	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
-}
-
-static int tcp_v6_gso_send_check(struct sk_buff *skb)
-{
-	const struct ipv6hdr *ipv6h;
-	struct tcphdr *th;
-
-	if (!pskb_may_pull(skb, sizeof(*th)))
-		return -EINVAL;
-
-	ipv6h = ipv6_hdr(skb);
-	th = tcp_hdr(skb);
-
-	th->check = 0;
-	skb->ip_summed = CHECKSUM_PARTIAL;
-	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
-	return 0;
-}
-
-static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
-					 struct sk_buff *skb)
-{
-	const struct ipv6hdr *iph = skb_gro_network_header(skb);
-	__wsum wsum;
-	__sum16 sum;
-
-	switch (skb->ip_summed) {
-	case CHECKSUM_COMPLETE:
-		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
-				  skb->csum)) {
-			skb->ip_summed = CHECKSUM_UNNECESSARY;
-			break;
-		}
-flush:
-		NAPI_GRO_CB(skb)->flush = 1;
-		return NULL;
-
-	case CHECKSUM_NONE:
-		wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
-						    skb_gro_len(skb),
-						    IPPROTO_TCP, 0));
-		sum = csum_fold(skb_checksum(skb,
-					     skb_gro_offset(skb),
-					     skb_gro_len(skb),
-					     wsum));
-		if (sum)
-			goto flush;
-
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-		break;
-	}
-
-	return tcp_gro_receive(head, skb);
-}
-
-static int tcp6_gro_complete(struct sk_buff *skb)
-{
-	const struct ipv6hdr *iph = ipv6_hdr(skb);
-	struct tcphdr *th = tcp_hdr(skb);
-
-	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
-				  &iph->saddr, &iph->daddr, 0);
-	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
-
-	return tcp_gro_complete(skb);
-}
-
 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
 				 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
 {
@@ -1364,7 +1268,7 @@
 
 	tcp_initialize_rcv_mss(newsk);
 	tcp_synack_rtt_meas(newsk, req);
-	newtp->total_retrans = req->retrans;
+	newtp->total_retrans = req->num_retrans;
 
 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
@@ -1741,11 +1645,11 @@
 		skb->destructor = sock_edemux;
 		if (sk->sk_state != TCP_TIME_WAIT) {
 			struct dst_entry *dst = sk->sk_rx_dst;
-			struct inet_sock *icsk = inet_sk(sk);
+
 			if (dst)
 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
 			if (dst &&
-			    icsk->rx_dst_ifindex == skb->skb_iif)
+			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
 				skb_dst_set_noref(skb, dst);
 		}
 	}
@@ -1866,7 +1770,7 @@
 		   0,0, /* could print option size, but that is af dependent. */
 		   1,   /* timers active (only the expire timer) */
 		   jiffies_to_clock_t(ttd),
-		   req->retrans,
+		   req->num_timeout,
 		   from_kuid_munged(seq_user_ns(seq), uid),
 		   0,  /* non standard timer */
 		   0, /* open_requests have no inode */
@@ -2063,10 +1967,6 @@
 	.early_demux	=	tcp_v6_early_demux,
 	.handler	=	tcp_v6_rcv,
 	.err_handler	=	tcp_v6_err,
-	.gso_send_check	=	tcp_v6_gso_send_check,
-	.gso_segment	=	tcp_tso_segment,
-	.gro_receive	=	tcp6_gro_receive,
-	.gro_complete	=	tcp6_gro_complete,
 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 };
 
@@ -2121,10 +2021,10 @@
 out:
 	return ret;
 
-out_tcpv6_protocol:
-	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
 out_tcpv6_protosw:
 	inet6_unregister_protosw(&tcpv6_protosw);
+out_tcpv6_protocol:
+	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
 	goto out;
 }
 
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
new file mode 100644
index 0000000..2ec6bf6
--- /dev/null
+++ b/net/ipv6/tcpv6_offload.c
@@ -0,0 +1,95 @@
+/*
+ *	IPV6 GSO/GRO offload support
+ *	Linux INET6 implementation
+ *
+ *	This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ *      TCPv6 GSO/GRO support
+ */
+#include <linux/skbuff.h>
+#include <net/protocol.h>
+#include <net/tcp.h>
+#include <net/ip6_checksum.h>
+#include "ip6_offload.h"
+
+static int tcp_v6_gso_send_check(struct sk_buff *skb)
+{
+	const struct ipv6hdr *ipv6h;
+	struct tcphdr *th;
+
+	if (!pskb_may_pull(skb, sizeof(*th)))
+		return -EINVAL;
+
+	ipv6h = ipv6_hdr(skb);
+	th = tcp_hdr(skb);
+
+	th->check = 0;
+	skb->ip_summed = CHECKSUM_PARTIAL;
+	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
+	return 0;
+}
+
+static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
+					 struct sk_buff *skb)
+{
+	const struct ipv6hdr *iph = skb_gro_network_header(skb);
+	__wsum wsum;
+	__sum16 sum;
+
+	switch (skb->ip_summed) {
+	case CHECKSUM_COMPLETE:
+		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
+				  skb->csum)) {
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			break;
+		}
+flush:
+		NAPI_GRO_CB(skb)->flush = 1;
+		return NULL;
+
+	case CHECKSUM_NONE:
+		wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
+						    skb_gro_len(skb),
+						    IPPROTO_TCP, 0));
+		sum = csum_fold(skb_checksum(skb,
+					     skb_gro_offset(skb),
+					     skb_gro_len(skb),
+					     wsum));
+		if (sum)
+			goto flush;
+
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		break;
+	}
+
+	return tcp_gro_receive(head, skb);
+}
+
+static int tcp6_gro_complete(struct sk_buff *skb)
+{
+	const struct ipv6hdr *iph = ipv6_hdr(skb);
+	struct tcphdr *th = tcp_hdr(skb);
+
+	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+				  &iph->saddr, &iph->daddr, 0);
+	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+
+	return tcp_gro_complete(skb);
+}
+
+static const struct net_offload tcpv6_offload = {
+	.callbacks = {
+		.gso_send_check	=	tcp_v6_gso_send_check,
+		.gso_segment	=	tcp_tso_segment,
+		.gro_receive	=	tcp6_gro_receive,
+		.gro_complete	=	tcp6_gro_complete,
+	},
+};
+
+int __init tcpv6_offload_init(void)
+{
+	return inet6_add_offload(&tcpv6_offload, IPPROTO_TCP);
+}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index fc99972..dfaa29b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1343,103 +1343,9 @@
 }
 #endif
 
-static int udp6_ufo_send_check(struct sk_buff *skb)
-{
-	const struct ipv6hdr *ipv6h;
-	struct udphdr *uh;
-
-	if (!pskb_may_pull(skb, sizeof(*uh)))
-		return -EINVAL;
-
-	ipv6h = ipv6_hdr(skb);
-	uh = udp_hdr(skb);
-
-	uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
-				     IPPROTO_UDP, 0);
-	skb->csum_start = skb_transport_header(skb) - skb->head;
-	skb->csum_offset = offsetof(struct udphdr, check);
-	skb->ip_summed = CHECKSUM_PARTIAL;
-	return 0;
-}
-
-static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
-	netdev_features_t features)
-{
-	struct sk_buff *segs = ERR_PTR(-EINVAL);
-	unsigned int mss;
-	unsigned int unfrag_ip6hlen, unfrag_len;
-	struct frag_hdr *fptr;
-	u8 *mac_start, *prevhdr;
-	u8 nexthdr;
-	u8 frag_hdr_sz = sizeof(struct frag_hdr);
-	int offset;
-	__wsum csum;
-
-	mss = skb_shinfo(skb)->gso_size;
-	if (unlikely(skb->len <= mss))
-		goto out;
-
-	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
-		/* Packet is from an untrusted source, reset gso_segs. */
-		int type = skb_shinfo(skb)->gso_type;
-
-		if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
-			     !(type & (SKB_GSO_UDP))))
-			goto out;
-
-		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
-
-		segs = NULL;
-		goto out;
-	}
-
-	/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
-	 * do checksum of UDP packets sent as multiple IP fragments.
-	 */
-	offset = skb_checksum_start_offset(skb);
-	csum = skb_checksum(skb, offset, skb->len - offset, 0);
-	offset += skb->csum_offset;
-	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
-	skb->ip_summed = CHECKSUM_NONE;
-
-	/* Check if there is enough headroom to insert fragment header. */
-	if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) &&
-	    pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
-		goto out;
-
-	/* Find the unfragmentable header and shift it left by frag_hdr_sz
-	 * bytes to insert fragment header.
-	 */
-	unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
-	nexthdr = *prevhdr;
-	*prevhdr = NEXTHDR_FRAGMENT;
-	unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
-		     unfrag_ip6hlen;
-	mac_start = skb_mac_header(skb);
-	memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
-
-	skb->mac_header -= frag_hdr_sz;
-	skb->network_header -= frag_hdr_sz;
-
-	fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
-	fptr->nexthdr = nexthdr;
-	fptr->reserved = 0;
-	ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
-
-	/* Fragment the skb. ipv6 header and the remaining fields of the
-	 * fragment header are updated in ipv6_gso_segment()
-	 */
-	segs = skb_segment(skb, features);
-
-out:
-	return segs;
-}
-
 static const struct inet6_protocol udpv6_protocol = {
 	.handler	=	udpv6_rcv,
 	.err_handler	=	udpv6_err,
-	.gso_send_check =	udp6_ufo_send_check,
-	.gso_segment	=	udp6_ufo_fragment,
 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 };
 
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
new file mode 100644
index 0000000..0c8934a
--- /dev/null
+++ b/net/ipv6/udp_offload.c
@@ -0,0 +1,120 @@
+/*
+ *	IPV6 GSO/GRO offload support
+ *	Linux INET6 implementation
+ *
+ *	This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ *      UDPv6 GSO support
+ */
+#include <linux/skbuff.h>
+#include <net/protocol.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
+#include "ip6_offload.h"
+
+static int udp6_ufo_send_check(struct sk_buff *skb)
+{
+	const struct ipv6hdr *ipv6h;
+	struct udphdr *uh;
+
+	if (!pskb_may_pull(skb, sizeof(*uh)))
+		return -EINVAL;
+
+	ipv6h = ipv6_hdr(skb);
+	uh = udp_hdr(skb);
+
+	uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
+				     IPPROTO_UDP, 0);
+	skb->csum_start = skb_transport_header(skb) - skb->head;
+	skb->csum_offset = offsetof(struct udphdr, check);
+	skb->ip_summed = CHECKSUM_PARTIAL;
+	return 0;
+}
+
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+	netdev_features_t features)
+{
+	struct sk_buff *segs = ERR_PTR(-EINVAL);
+	unsigned int mss;
+	unsigned int unfrag_ip6hlen, unfrag_len;
+	struct frag_hdr *fptr;
+	u8 *mac_start, *prevhdr;
+	u8 nexthdr;
+	u8 frag_hdr_sz = sizeof(struct frag_hdr);
+	int offset;
+	__wsum csum;
+
+	mss = skb_shinfo(skb)->gso_size;
+	if (unlikely(skb->len <= mss))
+		goto out;
+
+	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
+		/* Packet is from an untrusted source, reset gso_segs. */
+		int type = skb_shinfo(skb)->gso_type;
+
+		if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
+			     !(type & (SKB_GSO_UDP))))
+			goto out;
+
+		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
+
+		segs = NULL;
+		goto out;
+	}
+
+	/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
+	 * do checksum of UDP packets sent as multiple IP fragments.
+	 */
+	offset = skb_checksum_start_offset(skb);
+	csum = skb_checksum(skb, offset, skb->len - offset, 0);
+	offset += skb->csum_offset;
+	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* Check if there is enough headroom to insert fragment header. */
+	if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) &&
+	    pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
+		goto out;
+
+	/* Find the unfragmentable header and shift it left by frag_hdr_sz
+	 * bytes to insert fragment header.
+	 */
+	unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+	nexthdr = *prevhdr;
+	*prevhdr = NEXTHDR_FRAGMENT;
+	unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
+		     unfrag_ip6hlen;
+	mac_start = skb_mac_header(skb);
+	memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
+
+	skb->mac_header -= frag_hdr_sz;
+	skb->network_header -= frag_hdr_sz;
+
+	fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+	fptr->nexthdr = nexthdr;
+	fptr->reserved = 0;
+	ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
+
+	/* Fragment the skb. ipv6 header and the remaining fields of the
+	 * fragment header are updated in ipv6_gso_segment()
+	 */
+	segs = skb_segment(skb, features);
+
+out:
+	return segs;
+}
+static const struct net_offload udpv6_offload = {
+	.callbacks = {
+		.gso_send_check =	udp6_ufo_send_check,
+		.gso_segment	=	udp6_ufo_fragment,
+	},
+};
+
+int __init udp_offload_init(void)
+{
+	return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
+}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index f8c4c08..c984413 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -20,7 +20,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 #include <net/mip6.h>
 #endif
 
@@ -182,7 +182,7 @@
 			fl6->flowi6_proto = nexthdr;
 			return;
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 		case IPPROTO_MH:
 			if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
 				struct ip6_mh *mh;
@@ -327,21 +327,7 @@
 int __init xfrm6_init(void)
 {
 	int ret;
-	unsigned int gc_thresh;
 
-	/*
-	 * We need a good default value for the xfrm6 gc threshold.
-	 * In ipv4 we set it to the route hash table size * 8, which
-	 * is half the size of the maximaum route cache for ipv4.  It
-	 * would be good to do the same thing for v6, except the table is
-	 * constructed differently here.  Here each table for a net namespace
-	 * can have FIB_TABLE_HASHSZ entries, so lets go with the same
-	 * computation that we used for ipv4 here.  Also, lets keep the initial
-	 * gc_thresh to a minimum of 1024, since, the ipv6 route cache defaults
-	 * to that as a minimum as well
-	 */
-	gc_thresh = FIB6_TABLE_HASHSZ * 8;
-	xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh;
 	dst_entries_init(&xfrm6_dst_ops);
 
 	ret = xfrm6_policy_init();
@@ -370,7 +356,6 @@
 	if (sysctl_hdr)
 		unregister_net_sysctl_table(sysctl_hdr);
 #endif
-	//xfrm6_input_fini();
 	xfrm6_policy_fini();
 	xfrm6_state_fini();
 	dst_entries_destroy(&xfrm6_dst_ops);
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index 3f2f7c4..d8c70b8 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -101,7 +101,7 @@
 			return 1;
 		else
 			return 3;
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 	case XFRM_MODE_ROUTEOPTIMIZATION:
 	case XFRM_MODE_IN_TRIGGER:
 		return 2;
@@ -134,7 +134,7 @@
 	switch (v->mode) {
 	case XFRM_MODE_TRANSPORT:
 		return 1;
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 	case XFRM_MODE_ROUTEOPTIMIZATION:
 	case XFRM_MODE_IN_TRIGGER:
 		return 2;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 08897a3..5b426a6 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -141,7 +141,7 @@
 	struct sock *sk;
 	int err;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 	if (sock->type != SOCK_RAW)
 		return -ESOCKTNOSUPPORT;
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 6c4cc12..bbba3a1 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -632,7 +632,7 @@
 	     nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
 		goto nla_put_failure;
 
-	if ((session->ifname && session->ifname[0] &&
+	if ((session->ifname[0] &&
 	     nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
 	    (session->cookie_len &&
 	     nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index c219000..8870988 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -160,7 +160,7 @@
 	struct sock *sk;
 	int rc = -ESOCKTNOSUPPORT;
 
-	if (!capable(CAP_NET_RAW))
+	if (!ns_capable(net->user_ns, CAP_NET_RAW))
 		return -EPERM;
 
 	if (!net_eq(net, &init_net))
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 63af254..b4ecf26 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -248,7 +248,7 @@
 	  Do not select this option.
 
 config MAC80211_MESH_SYNC_DEBUG
-	bool "Verbose mesh mesh synchronization debugging"
+	bool "Verbose mesh synchronization debugging"
 	depends on MAC80211_DEBUG_MENU
 	depends on MAC80211_MESH
 	---help---
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index a7dd110..4911202 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -8,6 +8,7 @@
 	wpa.o \
 	scan.o offchannel.o \
 	ht.o agg-tx.o agg-rx.o \
+	vht.o \
 	ibss.o \
 	iface.o \
 	rate.o \
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index a04752e..537488c 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/crypto.h>
+#include <linux/export.h>
 #include <linux/err.h>
 #include <crypto/aes.h>
 
@@ -126,3 +127,20 @@
 {
 	crypto_free_cipher(tfm);
 }
+
+void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
+					u8 *k1, u8 *k2)
+{
+	u8 l[AES_BLOCK_SIZE] = {};
+	struct ieee80211_key *key =
+		container_of(keyconf, struct ieee80211_key, conf);
+
+	crypto_cipher_encrypt_one(key->u.aes_cmac.tfm, l, l);
+
+	memcpy(k1, l, AES_BLOCK_SIZE);
+	gf_mulx(k1);
+
+	memcpy(k2, k1, AES_BLOCK_SIZE);
+	gf_mulx(k2);
+}
+EXPORT_SYMBOL(ieee80211_aes_cmac_calculate_k1_k2);
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 186d991..808338a 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -118,7 +118,7 @@
 		return;
 	}
 
-	for (i = 0; i < STA_TID_NUM; i++)
+	for (i = 0; i < IEEE80211_NUM_TIDS; i++)
 		if (ba_rx_bitmap & BIT(i))
 			set_bit(i, sta->ampdu_mlme.tid_rx_stop_requested);
 
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 3195a63..4152ed1 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -448,7 +448,7 @@
 	if (WARN_ON(!local->ops->ampdu_action))
 		return -EINVAL;
 
-	if ((tid >= STA_TID_NUM) ||
+	if ((tid >= IEEE80211_NUM_TIDS) ||
 	    !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
 	    (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
 		return -EINVAL;
@@ -605,9 +605,9 @@
 
 	trace_api_start_tx_ba_cb(sdata, ra, tid);
 
-	if (tid >= STA_TID_NUM) {
+	if (tid >= IEEE80211_NUM_TIDS) {
 		ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
-		       tid, STA_TID_NUM);
+		       tid, IEEE80211_NUM_TIDS);
 		return;
 	}
 
@@ -687,7 +687,7 @@
 	if (!local->ops->ampdu_action)
 		return -EINVAL;
 
-	if (tid >= STA_TID_NUM)
+	if (tid >= IEEE80211_NUM_TIDS)
 		return -EINVAL;
 
 	spin_lock_bh(&sta->lock);
@@ -722,9 +722,9 @@
 
 	trace_api_stop_tx_ba_cb(sdata, ra, tid);
 
-	if (tid >= STA_TID_NUM) {
+	if (tid >= IEEE80211_NUM_TIDS) {
 		ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
-		       tid, STA_TID_NUM);
+		       tid, IEEE80211_NUM_TIDS);
 		return;
 	}
 
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 7371f67..4965aa6 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -370,29 +370,32 @@
 	return 0;
 }
 
-static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, int idx)
-{
-	if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
-		struct ieee80211_supported_band *sband;
-		sband = sta->local->hw.wiphy->bands[
-				sta->local->oper_channel->band];
-		rate->legacy = sband->bitrates[idx].bitrate;
-	} else
-		rate->mcs = idx;
-}
-
 void sta_set_rate_info_tx(struct sta_info *sta,
 			  const struct ieee80211_tx_rate *rate,
 			  struct rate_info *rinfo)
 {
 	rinfo->flags = 0;
-	if (rate->flags & IEEE80211_TX_RC_MCS)
+	if (rate->flags & IEEE80211_TX_RC_MCS) {
 		rinfo->flags |= RATE_INFO_FLAGS_MCS;
+		rinfo->mcs = rate->idx;
+	} else if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+		rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+		rinfo->mcs = ieee80211_rate_get_vht_mcs(rate);
+		rinfo->nss = ieee80211_rate_get_vht_nss(rate);
+	} else {
+		struct ieee80211_supported_band *sband;
+		sband = sta->local->hw.wiphy->bands[
+				ieee80211_get_sdata_band(sta->sdata)];
+		rinfo->legacy = sband->bitrates[rate->idx].bitrate;
+	}
 	if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
 		rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+	if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+		rinfo->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
+	if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+		rinfo->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
 	if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
 		rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
-	rate_idx_to_bitrate(rinfo, sta, rate->idx);
 }
 
 static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
@@ -443,13 +446,32 @@
 	sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
 
 	sinfo->rxrate.flags = 0;
-	if (sta->last_rx_rate_flag & RX_FLAG_HT)
+	if (sta->last_rx_rate_flag & RX_FLAG_HT) {
 		sinfo->rxrate.flags |= RATE_INFO_FLAGS_MCS;
+		sinfo->rxrate.mcs = sta->last_rx_rate_idx;
+	} else if (sta->last_rx_rate_flag & RX_FLAG_VHT) {
+		sinfo->rxrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
+		sinfo->rxrate.nss = sta->last_rx_rate_vht_nss;
+		sinfo->rxrate.mcs = sta->last_rx_rate_idx;
+	} else {
+		struct ieee80211_supported_band *sband;
+
+		sband = sta->local->hw.wiphy->bands[
+				ieee80211_get_sdata_band(sta->sdata)];
+		sinfo->rxrate.legacy =
+			sband->bitrates[sta->last_rx_rate_idx].bitrate;
+	}
+
 	if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
 		sinfo->rxrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
 	if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
 		sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
-	rate_idx_to_bitrate(&sinfo->rxrate, sta, sta->last_rx_rate_idx);
+	if (sta->last_rx_rate_flag & RX_FLAG_80MHZ)
+		sinfo->rxrate.flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
+	if (sta->last_rx_rate_flag & RX_FLAG_80P80MHZ)
+		sinfo->rxrate.flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH;
+	if (sta->last_rx_rate_flag & RX_FLAG_160MHZ)
+		sinfo->rxrate.flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
 
 	if (ieee80211_vif_is_mesh(&sdata->vif)) {
 #ifdef CONFIG_MAC80211_MESH
@@ -532,6 +554,8 @@
 				   u64 *data)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_channel *channel;
 	struct sta_info *sta;
 	struct ieee80211_local *local = sdata->local;
 	struct station_info sinfo;
@@ -607,19 +631,26 @@
 do_survey:
 	i = STA_STATS_LEN - STA_STATS_SURVEY_LEN;
 	/* Get survey stats for current channel */
-	q = 0;
-	while (true) {
-		survey.filled = 0;
-		if (drv_get_survey(local, q, &survey) != 0) {
-			survey.filled = 0;
-			break;
-		}
+	survey.filled = 0;
 
-		if (survey.channel &&
-		    (local->oper_channel->center_freq ==
-		     survey.channel->center_freq))
-			break;
-		q++;
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (chanctx_conf)
+		channel = chanctx_conf->def.chan;
+	else
+		channel = NULL;
+	rcu_read_unlock();
+
+	if (channel) {
+		q = 0;
+		do {
+			survey.filled = 0;
+			if (drv_get_survey(local, q, &survey) != 0) {
+				survey.filled = 0;
+				break;
+			}
+			q++;
+		} while (channel != survey.channel);
 	}
 
 	if (survey.filled)
@@ -724,47 +755,37 @@
 	return ret;
 }
 
-static int ieee80211_set_channel(struct wiphy *wiphy,
-				 struct net_device *netdev,
-				 struct ieee80211_channel *chan,
-				 enum nl80211_channel_type channel_type)
+static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
+					 struct cfg80211_chan_def *chandef)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
-	struct ieee80211_sub_if_data *sdata = NULL;
+	struct ieee80211_sub_if_data *sdata;
+	int ret = 0;
 
-	if (netdev)
-		sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
+	if (cfg80211_chandef_identical(&local->monitor_chandef, chandef))
+		return 0;
 
-	switch (ieee80211_get_channel_mode(local, NULL)) {
-	case CHAN_MODE_HOPPING:
-		return -EBUSY;
-	case CHAN_MODE_FIXED:
-		if (local->oper_channel != chan ||
-		    (!sdata && local->_oper_channel_type != channel_type))
-			return -EBUSY;
-		if (!sdata && local->_oper_channel_type == channel_type)
-			return 0;
-		break;
-	case CHAN_MODE_UNDEFINED:
-		break;
+	mutex_lock(&local->iflist_mtx);
+	if (local->use_chanctx) {
+		sdata = rcu_dereference_protected(
+				local->monitor_sdata,
+				lockdep_is_held(&local->iflist_mtx));
+		if (sdata) {
+			ieee80211_vif_release_channel(sdata);
+			ret = ieee80211_vif_use_channel(sdata, chandef,
+					IEEE80211_CHANCTX_EXCLUSIVE);
+		}
+	} else if (local->open_count == local->monitors) {
+		local->_oper_channel = chandef->chan;
+		local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
+		ieee80211_hw_config(local, 0);
 	}
 
-	if (!ieee80211_set_channel_type(local, sdata, channel_type))
-		return -EBUSY;
+	if (ret == 0)
+		local->monitor_chandef = *chandef;
+	mutex_unlock(&local->iflist_mtx);
 
-	local->oper_channel = chan;
-
-	/* auto-detects changes */
-	ieee80211_hw_config(local, 0);
-
-	return 0;
-}
-
-static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
-					 struct ieee80211_channel *chan,
-					 enum nl80211_channel_type channel_type)
-{
-	return ieee80211_set_channel(wiphy, NULL, chan, channel_type);
+	return ret;
 }
 
 static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
@@ -879,8 +900,12 @@
 	if (old)
 		return -EALREADY;
 
-	err = ieee80211_set_channel(wiphy, dev, params->channel,
-				    params->channel_type);
+	/* TODO: make hostapd tell us what it wants */
+	sdata->smps_mode = IEEE80211_SMPS_OFF;
+	sdata->needed_rx_chains = sdata->local->rx_chains;
+
+	err = ieee80211_vif_use_channel(sdata, &params->chandef,
+					IEEE80211_CHANCTX_SHARED);
 	if (err)
 		return err;
 
@@ -912,6 +937,15 @@
 		return err;
 	changed |= err;
 
+	err = drv_start_ap(sdata->local, sdata);
+	if (err) {
+		old = rtnl_dereference(sdata->u.ap.beacon);
+		if (old)
+			kfree_rcu(old, rcu_head);
+		RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
+		return err;
+	}
+
 	ieee80211_bss_info_change_notify(sdata, changed);
 
 	netif_carrier_on(dev);
@@ -943,26 +977,40 @@
 
 static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
 {
-	struct ieee80211_sub_if_data *sdata, *vlan;
-	struct beacon_data *old;
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_sub_if_data *vlan;
+	struct ieee80211_local *local = sdata->local;
+	struct beacon_data *old_beacon;
+	struct probe_resp *old_probe_resp;
 
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
-	old = rtnl_dereference(sdata->u.ap.beacon);
-	if (!old)
+	old_beacon = rtnl_dereference(sdata->u.ap.beacon);
+	if (!old_beacon)
 		return -ENOENT;
+	old_probe_resp = rtnl_dereference(sdata->u.ap.probe_resp);
 
+	/* turn off carrier for this interface and dependent VLANs */
 	list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
 		netif_carrier_off(vlan->dev);
 	netif_carrier_off(dev);
 
+	/* remove beacon and probe response */
 	RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
+	RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
+	kfree_rcu(old_beacon, rcu_head);
+	if (old_probe_resp)
+		kfree_rcu(old_probe_resp, rcu_head);
 
-	kfree_rcu(old, rcu_head);
-
-	sta_info_flush(sdata->local, sdata);
+	sta_info_flush(local, sdata);
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
 
+	drv_stop_ap(sdata->local, sdata);
+
+	/* free all potentially still buffered bcast frames */
+	local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
+	skb_queue_purge(&sdata->u.ap.ps.bc_buf);
+
+	ieee80211_vif_release_channel(sdata);
+
 	return 0;
 }
 
@@ -1019,9 +1067,10 @@
 	int i, j;
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
 	u32 mask, set;
 
-	sband = local->hw.wiphy->bands[local->oper_channel->band];
+	sband = local->hw.wiphy->bands[band];
 
 	mask = params->sta_flags_mask;
 	set = params->sta_flags_set;
@@ -1136,7 +1185,7 @@
 					rates |= BIT(j);
 			}
 		}
-		sta->sta.supp_rates[local->oper_channel->band] = rates;
+		sta->sta.supp_rates[band] = rates;
 	}
 
 	if (params->ht_capa)
@@ -1144,6 +1193,11 @@
 						  params->ht_capa,
 						  &sta->sta.ht_cap);
 
+	if (params->vht_capa)
+		ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+						    params->vht_capa,
+						    &sta->sta.vht_cap);
+
 	if (ieee80211_vif_is_mesh(&sdata->vif)) {
 #ifdef CONFIG_MAC80211_MESH
 		if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED)
@@ -1664,8 +1718,12 @@
 	if (err)
 		return err;
 
-	err = ieee80211_set_channel(wiphy, dev, setup->channel,
-				    setup->channel_type);
+	/* can mesh use other SMPS modes? */
+	sdata->smps_mode = IEEE80211_SMPS_OFF;
+	sdata->needed_rx_chains = sdata->local->rx_chains;
+
+	err = ieee80211_vif_use_channel(sdata, &setup->chandef,
+					IEEE80211_CHANCTX_SHARED);
 	if (err)
 		return err;
 
@@ -1679,6 +1737,7 @@
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
 	ieee80211_stop_mesh(sdata);
+	ieee80211_vif_release_channel(sdata);
 
 	return 0;
 }
@@ -1688,10 +1747,14 @@
 				struct net_device *dev,
 				struct bss_parameters *params)
 {
-	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	enum ieee80211_band band;
 	u32 changed = 0;
 
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	if (!rtnl_dereference(sdata->u.ap.beacon))
+		return -ENOENT;
+
+	band = ieee80211_get_sdata_band(sdata);
 
 	if (params->use_cts_prot >= 0) {
 		sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot;
@@ -1704,7 +1767,7 @@
 	}
 
 	if (!sdata->vif.bss_conf.use_short_slot &&
-	    sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ) {
+	    band == IEEE80211_BAND_5GHZ) {
 		sdata->vif.bss_conf.use_short_slot = true;
 		changed |= BSS_CHANGED_ERP_SLOT;
 	}
@@ -1718,9 +1781,7 @@
 	if (params->basic_rates) {
 		int i, j;
 		u32 rates = 0;
-		struct ieee80211_local *local = wiphy_priv(wiphy);
-		struct ieee80211_supported_band *sband =
-			wiphy->bands[local->oper_channel->band];
+		struct ieee80211_supported_band *sband = wiphy->bands[band];
 
 		for (i = 0; i < params->basic_rates_len; i++) {
 			int rate = (params->basic_rates[i] & 0x7f) * 5;
@@ -1829,7 +1890,16 @@
 		 * beaconing hasn't been configured yet
 		 */
 	case NL80211_IFTYPE_AP:
-		if (sdata->u.ap.beacon)
+		/*
+		 * If the scan has been forced (and the driver supports
+		 * forcing), don't care about being beaconing already.
+		 * This will create problems to the attached stations (e.g. all
+		 * the  frames sent while scanning on other channel will be
+		 * lost)
+		 */
+		if (sdata->u.ap.beacon &&
+		    (!(wiphy->features & NL80211_FEATURE_AP_SCAN) ||
+		     !(req->flags & NL80211_SCAN_FLAG_AP)))
 			return -EOPNOTSUPP;
 		break;
 	default:
@@ -1872,20 +1942,6 @@
 static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
 			   struct cfg80211_assoc_request *req)
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
-	switch (ieee80211_get_channel_mode(local, sdata)) {
-	case CHAN_MODE_HOPPING:
-		return -EBUSY;
-	case CHAN_MODE_FIXED:
-		if (local->oper_channel == req->bss->channel)
-			break;
-		return -EBUSY;
-	case CHAN_MODE_UNDEFINED:
-		break;
-	}
-
 	return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req);
 }
 
@@ -1904,30 +1960,22 @@
 static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
 			       struct cfg80211_ibss_params *params)
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
-	switch (ieee80211_get_channel_mode(local, sdata)) {
-	case CHAN_MODE_HOPPING:
-		return -EBUSY;
-	case CHAN_MODE_FIXED:
-		if (!params->channel_fixed)
-			return -EBUSY;
-		if (local->oper_channel == params->channel)
-			break;
-		return -EBUSY;
-	case CHAN_MODE_UNDEFINED:
-		break;
-	}
-
-	return ieee80211_ibss_join(sdata, params);
+	return ieee80211_ibss_join(IEEE80211_DEV_TO_SUB_IF(dev), params);
 }
 
 static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
 {
+	return ieee80211_ibss_leave(IEEE80211_DEV_TO_SUB_IF(dev));
+}
+
+static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
+				    int rate[IEEE80211_NUM_BANDS])
+{
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	return ieee80211_ibss_leave(sdata);
+	memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(rate));
+
+	return 0;
 }
 
 static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
@@ -1956,10 +2004,16 @@
 			return err;
 	}
 
-	if (changed & WIPHY_PARAM_RETRY_SHORT)
+	if (changed & WIPHY_PARAM_RETRY_SHORT) {
+		if (wiphy->retry_short > IEEE80211_MAX_TX_RETRY)
+			return -EINVAL;
 		local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
-	if (changed & WIPHY_PARAM_RETRY_LONG)
+	}
+	if (changed & WIPHY_PARAM_RETRY_LONG) {
+		if (wiphy->retry_long > IEEE80211_MAX_TX_RETRY)
+			return -EINVAL;
 		local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
+	}
 	if (changed &
 	    (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG))
 		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS);
@@ -1968,41 +2022,65 @@
 }
 
 static int ieee80211_set_tx_power(struct wiphy *wiphy,
+				  struct wireless_dev *wdev,
 				  enum nl80211_tx_power_setting type, int mbm)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
-	struct ieee80211_channel *chan = local->oper_channel;
-	u32 changes = 0;
+	struct ieee80211_sub_if_data *sdata;
+
+	if (wdev) {
+		sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+
+		switch (type) {
+		case NL80211_TX_POWER_AUTOMATIC:
+			sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
+			break;
+		case NL80211_TX_POWER_LIMITED:
+		case NL80211_TX_POWER_FIXED:
+			if (mbm < 0 || (mbm % 100))
+				return -EOPNOTSUPP;
+			sdata->user_power_level = MBM_TO_DBM(mbm);
+			break;
+		}
+
+		ieee80211_recalc_txpower(sdata);
+
+		return 0;
+	}
 
 	switch (type) {
 	case NL80211_TX_POWER_AUTOMATIC:
-		local->user_power_level = -1;
+		local->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
 		break;
 	case NL80211_TX_POWER_LIMITED:
-		if (mbm < 0 || (mbm % 100))
-			return -EOPNOTSUPP;
-		local->user_power_level = MBM_TO_DBM(mbm);
-		break;
 	case NL80211_TX_POWER_FIXED:
 		if (mbm < 0 || (mbm % 100))
 			return -EOPNOTSUPP;
-		/* TODO: move to cfg80211 when it knows the channel */
-		if (MBM_TO_DBM(mbm) > chan->max_power)
-			return -EINVAL;
 		local->user_power_level = MBM_TO_DBM(mbm);
 		break;
 	}
 
-	ieee80211_hw_config(local, changes);
+	mutex_lock(&local->iflist_mtx);
+	list_for_each_entry(sdata, &local->interfaces, list)
+		sdata->user_power_level = local->user_power_level;
+	list_for_each_entry(sdata, &local->interfaces, list)
+		ieee80211_recalc_txpower(sdata);
+	mutex_unlock(&local->iflist_mtx);
 
 	return 0;
 }
 
-static int ieee80211_get_tx_power(struct wiphy *wiphy, int *dbm)
+static int ieee80211_get_tx_power(struct wiphy *wiphy,
+				  struct wireless_dev *wdev,
+				  int *dbm)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
+	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 
-	*dbm = local->hw.conf.power_level;
+	if (!local->use_chanctx)
+		*dbm = local->hw.conf.power_level;
+	else
+		*dbm = sdata->vif.bss_conf.txpower;
 
 	return 0;
 }
@@ -2067,13 +2145,12 @@
 
 	/*
 	 * If not associated, or current association is not an HT
-	 * association, there's no need to send an action frame.
+	 * association, there's no need to do anything, just store
+	 * the new value until we associate.
 	 */
 	if (!sdata->u.mgd.associated ||
-	    sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) {
-		ieee80211_recalc_smps(sdata->local);
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
 		return 0;
-	}
 
 	ap = sdata->u.mgd.associated->bssid;
 
@@ -2179,7 +2256,6 @@
 static int ieee80211_start_roc_work(struct ieee80211_local *local,
 				    struct ieee80211_sub_if_data *sdata,
 				    struct ieee80211_channel *channel,
-				    enum nl80211_channel_type channel_type,
 				    unsigned int duration, u64 *cookie,
 				    struct sk_buff *txskb)
 {
@@ -2189,12 +2265,14 @@
 
 	lockdep_assert_held(&local->mtx);
 
+	if (local->use_chanctx && !local->ops->remain_on_channel)
+		return -EOPNOTSUPP;
+
 	roc = kzalloc(sizeof(*roc), GFP_KERNEL);
 	if (!roc)
 		return -ENOMEM;
 
 	roc->chan = channel;
-	roc->chan_type = channel_type;
 	roc->duration = duration;
 	roc->req_duration = duration;
 	roc->frame = txskb;
@@ -2227,7 +2305,7 @@
 	if (!duration)
 		duration = 10;
 
-	ret = drv_remain_on_channel(local, channel, channel_type, duration);
+	ret = drv_remain_on_channel(local, sdata, channel, duration);
 	if (ret) {
 		kfree(roc);
 		return ret;
@@ -2238,7 +2316,7 @@
 
  out_check_combine:
 	list_for_each_entry(tmp, &local->roc_list, list) {
-		if (tmp->chan != channel || tmp->chan_type != channel_type)
+		if (tmp->chan != channel || tmp->sdata != sdata)
 			continue;
 
 		/*
@@ -2332,13 +2410,22 @@
 		list_add_tail(&roc->list, &local->roc_list);
 
 	/*
-	 * cookie is either the roc (for normal roc)
+	 * cookie is either the roc cookie (for normal roc)
 	 * or the SKB (for mgmt TX)
 	 */
-	if (txskb)
+	if (!txskb) {
+		/* local->mtx protects this */
+		local->roc_cookie_counter++;
+		roc->cookie = local->roc_cookie_counter;
+		/* wow, you wrapped 64 bits ... more likely a bug */
+		if (WARN_ON(roc->cookie == 0)) {
+			roc->cookie = 1;
+			local->roc_cookie_counter++;
+		}
+		*cookie = roc->cookie;
+	} else {
 		*cookie = (unsigned long)txskb;
-	else
-		*cookie = (unsigned long)roc;
+	}
 
 	return 0;
 }
@@ -2346,7 +2433,6 @@
 static int ieee80211_remain_on_channel(struct wiphy *wiphy,
 				       struct wireless_dev *wdev,
 				       struct ieee80211_channel *chan,
-				       enum nl80211_channel_type channel_type,
 				       unsigned int duration,
 				       u64 *cookie)
 {
@@ -2355,7 +2441,7 @@
 	int ret;
 
 	mutex_lock(&local->mtx);
-	ret = ieee80211_start_roc_work(local, sdata, chan, channel_type,
+	ret = ieee80211_start_roc_work(local, sdata, chan,
 				       duration, cookie, NULL);
 	mutex_unlock(&local->mtx);
 
@@ -2373,7 +2459,7 @@
 		struct ieee80211_roc_work *dep, *tmp2;
 
 		list_for_each_entry_safe(dep, tmp2, &roc->dependents, list) {
-			if (!mgmt_tx && (unsigned long)dep != cookie)
+			if (!mgmt_tx && dep->cookie != cookie)
 				continue;
 			else if (mgmt_tx && dep->mgmt_tx_cookie != cookie)
 				continue;
@@ -2385,7 +2471,7 @@
 			return 0;
 		}
 
-		if (!mgmt_tx && (unsigned long)roc != cookie)
+		if (!mgmt_tx && roc->cookie != cookie)
 			continue;
 		else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
 			continue;
@@ -2448,10 +2534,8 @@
 
 static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 			     struct ieee80211_channel *chan, bool offchan,
-			     enum nl80211_channel_type channel_type,
-			     bool channel_type_valid, unsigned int wait,
-			     const u8 *buf, size_t len, bool no_cck,
-			     bool dont_wait_for_ack, u64 *cookie)
+			     unsigned int wait, const u8 *buf, size_t len,
+			     bool no_cck, bool dont_wait_for_ack, u64 *cookie)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 	struct ieee80211_local *local = sdata->local;
@@ -2515,10 +2599,16 @@
 
 	/* Check if the operating channel is the requested channel */
 	if (!need_offchan) {
-		need_offchan = chan != local->oper_channel;
-		if (channel_type_valid &&
-		    channel_type != local->_oper_channel_type)
+		struct ieee80211_chanctx_conf *chanctx_conf;
+
+		rcu_read_lock();
+		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+
+		if (chanctx_conf)
+			need_offchan = chan != chanctx_conf->def.chan;
+		else
 			need_offchan = true;
+		rcu_read_unlock();
 	}
 
 	if (need_offchan && !offchan) {
@@ -2552,7 +2642,7 @@
 			local->hw.offchannel_tx_hw_queue;
 
 	/* This will handle all kinds of coalescing and immediate TX */
-	ret = ieee80211_start_roc_work(local, sdata, chan, channel_type,
+	ret = ieee80211_start_roc_work(local, sdata, chan,
 				       wait, cookie, skb);
 	if (ret)
 		kfree_skb(skb);
@@ -2670,7 +2760,7 @@
 	u16 capab;
 
 	capab = 0;
-	if (local->oper_channel->band != IEEE80211_BAND_2GHZ)
+	if (ieee80211_get_sdata_band(sdata) != IEEE80211_BAND_2GHZ)
 		return capab;
 
 	if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
@@ -2702,7 +2792,7 @@
 			       u16 status_code, struct sk_buff *skb)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_local *local = sdata->local;
+	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_tdls_data *tf;
 
 	tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
@@ -2722,10 +2812,8 @@
 		tf->u.setup_req.capability =
 			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-		ieee80211_add_srates_ie(sdata, skb, false,
-					local->oper_channel->band);
-		ieee80211_add_ext_srates_ie(sdata, skb, false,
-					    local->oper_channel->band);
+		ieee80211_add_srates_ie(sdata, skb, false, band);
+		ieee80211_add_ext_srates_ie(sdata, skb, false, band);
 		ieee80211_tdls_add_ext_capab(skb);
 		break;
 	case WLAN_TDLS_SETUP_RESPONSE:
@@ -2738,10 +2826,8 @@
 		tf->u.setup_resp.capability =
 			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-		ieee80211_add_srates_ie(sdata, skb, false,
-					local->oper_channel->band);
-		ieee80211_add_ext_srates_ie(sdata, skb, false,
-					    local->oper_channel->band);
+		ieee80211_add_srates_ie(sdata, skb, false, band);
+		ieee80211_add_ext_srates_ie(sdata, skb, false, band);
 		ieee80211_tdls_add_ext_capab(skb);
 		break;
 	case WLAN_TDLS_SETUP_CONFIRM:
@@ -2779,7 +2865,7 @@
 			   u16 status_code, struct sk_buff *skb)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_local *local = sdata->local;
+	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_mgmt *mgmt;
 
 	mgmt = (void *)skb_put(skb, 24);
@@ -2802,10 +2888,8 @@
 		mgmt->u.action.u.tdls_discover_resp.capability =
 			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-		ieee80211_add_srates_ie(sdata, skb, false,
-					local->oper_channel->band);
-		ieee80211_add_ext_srates_ie(sdata, skb, false,
-					    local->oper_channel->band);
+		ieee80211_add_srates_ie(sdata, skb, false, band);
+		ieee80211_add_ext_srates_ie(sdata, skb, false, band);
 		ieee80211_tdls_add_ext_capab(skb);
 		break;
 	default:
@@ -2822,7 +2906,6 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_tx_info *info;
 	struct sk_buff *skb = NULL;
 	bool send_direct;
 	int ret;
@@ -2848,7 +2931,6 @@
 	if (!skb)
 		return -ENOMEM;
 
-	info = IEEE80211_SKB_CB(skb);
 	skb_reserve(skb, local->hw.extra_tx_headroom);
 
 	switch (action_code) {
@@ -2985,12 +3067,19 @@
 	bool qos;
 	struct ieee80211_tx_info *info;
 	struct sta_info *sta;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	enum ieee80211_band band;
 
 	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (WARN_ON(!chanctx_conf)) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	band = chanctx_conf->def.chan->band;
 	sta = sta_info_get(sdata, peer);
 	if (sta) {
 		qos = test_sta_flag(sta, WLAN_STA_WME);
-		rcu_read_unlock();
 	} else {
 		rcu_read_unlock();
 		return -ENOLINK;
@@ -3008,8 +3097,10 @@
 	}
 
 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
-	if (!skb)
+	if (!skb) {
+		rcu_read_unlock();
 		return -ENOMEM;
+	}
 
 	skb->dev = dev;
 
@@ -3034,21 +3125,31 @@
 		nullfunc->qos_ctrl = cpu_to_le16(7);
 
 	local_bh_disable();
-	ieee80211_xmit(sdata, skb);
+	ieee80211_xmit(sdata, skb, band);
 	local_bh_enable();
+	rcu_read_unlock();
 
 	*cookie = (unsigned long) skb;
 	return 0;
 }
 
-static struct ieee80211_channel *
-ieee80211_cfg_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
-			  enum nl80211_channel_type *type)
+static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
+				     struct wireless_dev *wdev,
+				     struct cfg80211_chan_def *chandef)
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
+	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	int ret = -ENODATA;
 
-	*type = local->_oper_channel_type;
-	return local->oper_channel;
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (chanctx_conf) {
+		*chandef = chanctx_conf->def;
+		ret = 0;
+	}
+	rcu_read_unlock();
+
+	return ret;
 }
 
 #ifdef CONFIG_PM
@@ -3103,6 +3204,7 @@
 	.disassoc = ieee80211_disassoc,
 	.join_ibss = ieee80211_join_ibss,
 	.leave_ibss = ieee80211_leave_ibss,
+	.set_mcast_rate = ieee80211_set_mcast_rate,
 	.set_wiphy_params = ieee80211_set_wiphy_params,
 	.set_tx_power = ieee80211_set_tx_power,
 	.get_tx_power = ieee80211_get_tx_power,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 0bfc914..53f0312 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -3,168 +3,347 @@
  */
 
 #include <linux/nl80211.h>
+#include <linux/export.h>
 #include <net/cfg80211.h>
 #include "ieee80211_i.h"
+#include "driver-ops.h"
 
-static enum ieee80211_chan_mode
-__ieee80211_get_channel_mode(struct ieee80211_local *local,
-			     struct ieee80211_sub_if_data *ignore)
+static void ieee80211_change_chandef(struct ieee80211_local *local,
+				     struct ieee80211_chanctx *ctx,
+				     const struct cfg80211_chan_def *chandef)
+{
+	if (cfg80211_chandef_identical(&ctx->conf.def, chandef))
+		return;
+
+	WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
+
+	ctx->conf.def = *chandef;
+	drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_WIDTH);
+
+	if (!local->use_chanctx) {
+		local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
+		ieee80211_hw_config(local, 0);
+	}
+}
+
+static struct ieee80211_chanctx *
+ieee80211_find_chanctx(struct ieee80211_local *local,
+		       const struct cfg80211_chan_def *chandef,
+		       enum ieee80211_chanctx_mode mode)
+{
+	struct ieee80211_chanctx *ctx;
+
+	lockdep_assert_held(&local->chanctx_mtx);
+
+	if (mode == IEEE80211_CHANCTX_EXCLUSIVE)
+		return NULL;
+
+	list_for_each_entry(ctx, &local->chanctx_list, list) {
+		const struct cfg80211_chan_def *compat;
+
+		if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
+			continue;
+
+		compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef);
+		if (!compat)
+			continue;
+
+		ieee80211_change_chandef(local, ctx, compat);
+
+		return ctx;
+	}
+
+	return NULL;
+}
+
+static struct ieee80211_chanctx *
+ieee80211_new_chanctx(struct ieee80211_local *local,
+		      const struct cfg80211_chan_def *chandef,
+		      enum ieee80211_chanctx_mode mode)
+{
+	struct ieee80211_chanctx *ctx;
+	int err;
+
+	lockdep_assert_held(&local->chanctx_mtx);
+
+	ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	ctx->conf.def = *chandef;
+	ctx->conf.rx_chains_static = 1;
+	ctx->conf.rx_chains_dynamic = 1;
+	ctx->mode = mode;
+
+	if (!local->use_chanctx) {
+		local->_oper_channel_type =
+			cfg80211_get_chandef_type(chandef);
+		local->_oper_channel = chandef->chan;
+		ieee80211_hw_config(local, 0);
+	} else {
+		err = drv_add_chanctx(local, ctx);
+		if (err) {
+			kfree(ctx);
+			return ERR_PTR(err);
+		}
+	}
+
+	list_add_rcu(&ctx->list, &local->chanctx_list);
+
+	return ctx;
+}
+
+static void ieee80211_free_chanctx(struct ieee80211_local *local,
+				   struct ieee80211_chanctx *ctx)
+{
+	lockdep_assert_held(&local->chanctx_mtx);
+
+	WARN_ON_ONCE(ctx->refcount != 0);
+
+	if (!local->use_chanctx) {
+		local->_oper_channel_type = NL80211_CHAN_NO_HT;
+		ieee80211_hw_config(local, 0);
+	} else {
+		drv_remove_chanctx(local, ctx);
+	}
+
+	list_del_rcu(&ctx->list);
+	kfree_rcu(ctx, rcu_head);
+}
+
+static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
+					struct ieee80211_chanctx *ctx)
+{
+	struct ieee80211_local *local = sdata->local;
+	int ret;
+
+	lockdep_assert_held(&local->chanctx_mtx);
+
+	ret = drv_assign_vif_chanctx(local, sdata, ctx);
+	if (ret)
+		return ret;
+
+	rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf);
+	ctx->refcount++;
+
+	ieee80211_recalc_txpower(sdata);
+
+	return 0;
+}
+
+static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+					      struct ieee80211_chanctx *ctx)
+{
+	struct ieee80211_chanctx_conf *conf = &ctx->conf;
+	struct ieee80211_sub_if_data *sdata;
+	const struct cfg80211_chan_def *compat = NULL;
+
+	lockdep_assert_held(&local->chanctx_mtx);
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+
+		if (!ieee80211_sdata_running(sdata))
+			continue;
+		if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
+			continue;
+
+		if (!compat)
+			compat = &sdata->vif.bss_conf.chandef;
+
+		compat = cfg80211_chandef_compatible(
+				&sdata->vif.bss_conf.chandef, compat);
+		if (!compat)
+			break;
+	}
+	rcu_read_unlock();
+
+	if (WARN_ON_ONCE(!compat))
+		return;
+
+	ieee80211_change_chandef(local, ctx, compat);
+}
+
+static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
+					   struct ieee80211_chanctx *ctx)
+{
+	struct ieee80211_local *local = sdata->local;
+
+	lockdep_assert_held(&local->chanctx_mtx);
+
+	ctx->refcount--;
+	rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
+
+	drv_unassign_vif_chanctx(local, sdata, ctx);
+
+	if (ctx->refcount > 0) {
+		ieee80211_recalc_chanctx_chantype(sdata->local, ctx);
+		ieee80211_recalc_smps_chanctx(local, ctx);
+	}
+}
+
+static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_chanctx_conf *conf;
+	struct ieee80211_chanctx *ctx;
+
+	lockdep_assert_held(&local->chanctx_mtx);
+
+	conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+					 lockdep_is_held(&local->chanctx_mtx));
+	if (!conf)
+		return;
+
+	ctx = container_of(conf, struct ieee80211_chanctx, conf);
+
+	ieee80211_unassign_vif_chanctx(sdata, ctx);
+	if (ctx->refcount == 0)
+		ieee80211_free_chanctx(local, ctx);
+}
+
+void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
+				   struct ieee80211_chanctx *chanctx)
 {
 	struct ieee80211_sub_if_data *sdata;
+	u8 rx_chains_static, rx_chains_dynamic;
 
-	lockdep_assert_held(&local->iflist_mtx);
+	lockdep_assert_held(&local->chanctx_mtx);
 
-	list_for_each_entry(sdata, &local->interfaces, list) {
-		if (sdata == ignore)
-			continue;
+	rx_chains_static = 1;
+	rx_chains_dynamic = 1;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+		u8 needed_static, needed_dynamic;
 
 		if (!ieee80211_sdata_running(sdata))
 			continue;
 
+		if (rcu_access_pointer(sdata->vif.chanctx_conf) !=
+						&chanctx->conf)
+			continue;
+
 		switch (sdata->vif.type) {
-		case NL80211_IFTYPE_MONITOR:
+		case NL80211_IFTYPE_P2P_DEVICE:
 			continue;
 		case NL80211_IFTYPE_STATION:
 			if (!sdata->u.mgd.associated)
 				continue;
 			break;
-		case NL80211_IFTYPE_ADHOC:
-			if (!sdata->u.ibss.ssid_len)
-				continue;
-			if (!sdata->u.ibss.fixed_channel)
-				return CHAN_MODE_HOPPING;
-			break;
 		case NL80211_IFTYPE_AP_VLAN:
-			/* will also have _AP interface */
 			continue;
 		case NL80211_IFTYPE_AP:
-			if (!sdata->u.ap.beacon)
-				continue;
-			break;
+		case NL80211_IFTYPE_ADHOC:
+		case NL80211_IFTYPE_WDS:
 		case NL80211_IFTYPE_MESH_POINT:
-			if (!sdata->wdev.mesh_id_len)
-				continue;
 			break;
 		default:
+			WARN_ON_ONCE(1);
+		}
+
+		switch (sdata->smps_mode) {
+		default:
+			WARN_ONCE(1, "Invalid SMPS mode %d\n",
+				  sdata->smps_mode);
+			/* fall through */
+		case IEEE80211_SMPS_OFF:
+			needed_static = sdata->needed_rx_chains;
+			needed_dynamic = sdata->needed_rx_chains;
+			break;
+		case IEEE80211_SMPS_DYNAMIC:
+			needed_static = 1;
+			needed_dynamic = sdata->needed_rx_chains;
+			break;
+		case IEEE80211_SMPS_STATIC:
+			needed_static = 1;
+			needed_dynamic = 1;
 			break;
 		}
 
-		return CHAN_MODE_FIXED;
+		rx_chains_static = max(rx_chains_static, needed_static);
+		rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
+	}
+	rcu_read_unlock();
+
+	if (!local->use_chanctx) {
+		if (rx_chains_static > 1)
+			local->smps_mode = IEEE80211_SMPS_OFF;
+		else if (rx_chains_dynamic > 1)
+			local->smps_mode = IEEE80211_SMPS_DYNAMIC;
+		else
+			local->smps_mode = IEEE80211_SMPS_STATIC;
+		ieee80211_hw_config(local, 0);
 	}
 
-	return CHAN_MODE_UNDEFINED;
+	if (rx_chains_static == chanctx->conf.rx_chains_static &&
+	    rx_chains_dynamic == chanctx->conf.rx_chains_dynamic)
+		return;
+
+	chanctx->conf.rx_chains_static = rx_chains_static;
+	chanctx->conf.rx_chains_dynamic = rx_chains_dynamic;
+	drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RX_CHAINS);
 }
 
-enum ieee80211_chan_mode
-ieee80211_get_channel_mode(struct ieee80211_local *local,
-			   struct ieee80211_sub_if_data *ignore)
+int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
+			      const struct cfg80211_chan_def *chandef,
+			      enum ieee80211_chanctx_mode mode)
 {
-	enum ieee80211_chan_mode mode;
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_chanctx *ctx;
+	int ret;
 
-	mutex_lock(&local->iflist_mtx);
-	mode = __ieee80211_get_channel_mode(local, ignore);
-	mutex_unlock(&local->iflist_mtx);
+	WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
 
-	return mode;
-}
+	mutex_lock(&local->chanctx_mtx);
+	__ieee80211_vif_release_channel(sdata);
 
-static enum nl80211_channel_type
-ieee80211_get_superchan(struct ieee80211_local *local,
-			struct ieee80211_sub_if_data *sdata)
-{
-	enum nl80211_channel_type superchan = NL80211_CHAN_NO_HT;
-	struct ieee80211_sub_if_data *tmp;
-
-	mutex_lock(&local->iflist_mtx);
-	list_for_each_entry(tmp, &local->interfaces, list) {
-		if (tmp == sdata)
-			continue;
-
-		if (!ieee80211_sdata_running(tmp))
-			continue;
-
-		switch (tmp->vif.bss_conf.channel_type) {
-		case NL80211_CHAN_NO_HT:
-		case NL80211_CHAN_HT20:
-			if (superchan > tmp->vif.bss_conf.channel_type)
-				break;
-
-			superchan = tmp->vif.bss_conf.channel_type;
-			break;
-		case NL80211_CHAN_HT40PLUS:
-			WARN_ON(superchan == NL80211_CHAN_HT40MINUS);
-			superchan = NL80211_CHAN_HT40PLUS;
-			break;
-		case NL80211_CHAN_HT40MINUS:
-			WARN_ON(superchan == NL80211_CHAN_HT40PLUS);
-			superchan = NL80211_CHAN_HT40MINUS;
-			break;
-		}
-	}
-	mutex_unlock(&local->iflist_mtx);
-
-	return superchan;
-}
-
-static bool
-ieee80211_channel_types_are_compatible(enum nl80211_channel_type chantype1,
-				       enum nl80211_channel_type chantype2,
-				       enum nl80211_channel_type *compat)
-{
-	/*
-	 * start out with chantype1 being the result,
-	 * overwriting later if needed
-	 */
-	if (compat)
-		*compat = chantype1;
-
-	switch (chantype1) {
-	case NL80211_CHAN_NO_HT:
-		if (compat)
-			*compat = chantype2;
-		break;
-	case NL80211_CHAN_HT20:
-		/*
-		 * allow any change that doesn't go to no-HT
-		 * (if it already is no-HT no change is needed)
-		 */
-		if (chantype2 == NL80211_CHAN_NO_HT)
-			break;
-		if (compat)
-			*compat = chantype2;
-		break;
-	case NL80211_CHAN_HT40PLUS:
-	case NL80211_CHAN_HT40MINUS:
-		/* allow smaller bandwidth and same */
-		if (chantype2 == NL80211_CHAN_NO_HT)
-			break;
-		if (chantype2 == NL80211_CHAN_HT20)
-			break;
-		if (chantype2 == chantype1)
-			break;
-		return false;
+	ctx = ieee80211_find_chanctx(local, chandef, mode);
+	if (!ctx)
+		ctx = ieee80211_new_chanctx(local, chandef, mode);
+	if (IS_ERR(ctx)) {
+		ret = PTR_ERR(ctx);
+		goto out;
 	}
 
-	return true;
+	sdata->vif.bss_conf.chandef = *chandef;
+
+	ret = ieee80211_assign_vif_chanctx(sdata, ctx);
+	if (ret) {
+		/* if assign fails refcount stays the same */
+		if (ctx->refcount == 0)
+			ieee80211_free_chanctx(local, ctx);
+		goto out;
+	}
+
+	ieee80211_recalc_smps_chanctx(local, ctx);
+ out:
+	mutex_unlock(&local->chanctx_mtx);
+	return ret;
 }
 
-bool ieee80211_set_channel_type(struct ieee80211_local *local,
-				struct ieee80211_sub_if_data *sdata,
-				enum nl80211_channel_type chantype)
+void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
 {
-	enum nl80211_channel_type superchan;
-	enum nl80211_channel_type compatchan;
+	WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
 
-	superchan = ieee80211_get_superchan(local, sdata);
-	if (!ieee80211_channel_types_are_compatible(superchan, chantype,
-						    &compatchan))
-		return false;
-
-	local->_oper_channel_type = compatchan;
-
-	if (sdata)
-		sdata->vif.bss_conf.channel_type = chantype;
-
-	return true;
-
+	mutex_lock(&sdata->local->chanctx_mtx);
+	__ieee80211_vif_release_channel(sdata);
+	mutex_unlock(&sdata->local->chanctx_mtx);
 }
+
+void ieee80211_iter_chan_contexts_atomic(
+	struct ieee80211_hw *hw,
+	void (*iter)(struct ieee80211_hw *hw,
+		     struct ieee80211_chanctx_conf *chanctx_conf,
+		     void *data),
+	void *iter_data)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+	struct ieee80211_chanctx *ctx;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ctx, &local->chanctx_list, list)
+		iter(hw, &ctx->conf, iter_data);
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_atomic);
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h
index 9be4e6d..214ed4e 100644
--- a/net/mac80211/debugfs.h
+++ b/net/mac80211/debugfs.h
@@ -2,9 +2,9 @@
 #define __MAC80211_DEBUGFS_H
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-extern void debugfs_hw_add(struct ieee80211_local *local);
-extern int mac80211_format_buffer(char __user *userbuf, size_t count,
-				  loff_t *ppos, char *fmt, ...);
+void debugfs_hw_add(struct ieee80211_local *local);
+int __printf(4, 5) mac80211_format_buffer(char __user *userbuf, size_t count,
+					  loff_t *ppos, char *fmt, ...);
 #else
 static inline void debugfs_hw_add(struct ieee80211_local *local)
 {
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 090d08f..2d42354 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -116,7 +116,7 @@
 				size_t count, loff_t *ppos)
 {
 	struct ieee80211_key *key = file->private_data;
-	char buf[14*NUM_RX_DATA_QUEUES+1], *p = buf;
+	char buf[14*IEEE80211_NUM_TIDS+1], *p = buf;
 	int i, len;
 	const u8 *rpn;
 
@@ -126,7 +126,7 @@
 		len = scnprintf(buf, sizeof(buf), "\n");
 		break;
 	case WLAN_CIPHER_SUITE_TKIP:
-		for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
+		for (i = 0; i < IEEE80211_NUM_TIDS; i++)
 			p += scnprintf(p, sizeof(buf)+buf-p,
 				       "%08x %04x\n",
 				       key->u.tkip.rx[i].iv32,
@@ -134,7 +134,7 @@
 		len = p - buf;
 		break;
 	case WLAN_CIPHER_SUITE_CCMP:
-		for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) {
+		for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
 			rpn = key->u.ccmp.rx_pn[i];
 			p += scnprintf(p, sizeof(buf)+buf-p,
 				       "%02x%02x%02x%02x%02x%02x\n",
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 6d5aec9..cbde5cc 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/device.h>
 #include <linux/if.h>
+#include <linux/if_ether.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/rtnetlink.h>
@@ -167,7 +168,29 @@
 
 IEEE80211_IF_FILE(flags, flags, HEX);
 IEEE80211_IF_FILE(state, state, LHEX);
-IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC);
+IEEE80211_IF_FILE(txpower, vif.bss_conf.txpower, DEC);
+IEEE80211_IF_FILE(ap_power_level, ap_power_level, DEC);
+IEEE80211_IF_FILE(user_power_level, user_power_level, DEC);
+
+static ssize_t
+ieee80211_if_fmt_hw_queues(const struct ieee80211_sub_if_data *sdata,
+			   char *buf, int buflen)
+{
+	int len;
+
+	len = scnprintf(buf, buflen, "AC queues: VO:%d VI:%d BE:%d BK:%d\n",
+			sdata->vif.hw_queue[IEEE80211_AC_VO],
+			sdata->vif.hw_queue[IEEE80211_AC_VI],
+			sdata->vif.hw_queue[IEEE80211_AC_BE],
+			sdata->vif.hw_queue[IEEE80211_AC_BK]);
+
+	if (sdata->vif.type == NL80211_IFTYPE_AP)
+		len += scnprintf(buf + len, buflen - len, "cab queue: %d\n",
+				 sdata->vif.cab_queue);
+
+	return len;
+}
+__IEEE80211_IF_FILE(hw_queues, NULL);
 
 /* STA attributes */
 IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
@@ -217,7 +240,7 @@
 
 	return snprintf(buf, buflen, "request: %s\nused: %s\n",
 			smps_modes[sdata->u.mgd.req_smps],
-			smps_modes[sdata->u.mgd.ap_smps]);
+			smps_modes[sdata->smps_mode]);
 }
 
 static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
@@ -245,27 +268,6 @@
 	return -EOPNOTSUPP;
 }
 
-static int hwaddr_aton(const char *txt, u8 *addr)
-{
-	int i;
-
-	for (i = 0; i < ETH_ALEN; i++) {
-		int a, b;
-
-		a = hex_to_bin(*txt++);
-		if (a < 0)
-			return -1;
-		b = hex_to_bin(*txt++);
-		if (b < 0)
-			return -1;
-		*addr++ = (a << 4) | b;
-		if (i < 5 && *txt++ != ':')
-			return -1;
-	}
-
-	return 0;
-}
-
 static ssize_t ieee80211_if_parse_tkip_mic_test(
 	struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
 {
@@ -275,13 +277,7 @@
 	struct ieee80211_hdr *hdr;
 	__le16 fc;
 
-	/*
-	 * Assume colon-delimited MAC address with possible white space
-	 * following.
-	 */
-	if (buflen < 3 * ETH_ALEN - 1)
-		return -EINVAL;
-	if (hwaddr_aton(buf, addr) < 0)
+	if (!mac_pton(buf, addr))
 		return -EINVAL;
 
 	if (!ieee80211_sdata_running(sdata))
@@ -307,13 +303,16 @@
 	case NL80211_IFTYPE_STATION:
 		fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
 		/* BSSID SA DA */
-		if (sdata->vif.bss_conf.bssid == NULL) {
+		mutex_lock(&sdata->u.mgd.mtx);
+		if (!sdata->u.mgd.associated) {
+			mutex_unlock(&sdata->u.mgd.mtx);
 			dev_kfree_skb(skb);
 			return -ENOTCONN;
 		}
-		memcpy(hdr->addr1, sdata->vif.bss_conf.bssid, ETH_ALEN);
+		memcpy(hdr->addr1, sdata->u.mgd.associated->bssid, ETH_ALEN);
 		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
 		memcpy(hdr->addr3, addr, ETH_ALEN);
+		mutex_unlock(&sdata->u.mgd.mtx);
 		break;
 	default:
 		dev_kfree_skb(skb);
@@ -395,14 +394,14 @@
 
 /* AP attributes */
 IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC);
-IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
-IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
+IEEE80211_IF_FILE(num_sta_ps, u.ap.ps.num_sta_ps, ATOMIC);
+IEEE80211_IF_FILE(dtim_count, u.ap.ps.dtim_count, DEC);
 
 static ssize_t ieee80211_if_fmt_num_buffered_multicast(
 	const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
 {
 	return scnprintf(buf, buflen, "%u\n",
-			 skb_queue_len(&sdata->u.ap.ps_bc_buf));
+			 skb_queue_len(&sdata->u.ap.ps.bc_buf));
 }
 __IEEE80211_IF_FILE(num_buffered_multicast, NULL);
 
@@ -443,7 +442,7 @@
 		}
 		ret = kstrtoull(buf, 10, &tsf);
 		if (ret < 0)
-			return -EINVAL;
+			return ret;
 		if (tsf_is_delta)
 			tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf;
 		if (local->ops->set_tsf) {
@@ -471,7 +470,7 @@
 		  u.mesh.mshstats.dropped_frames_congestion, DEC);
 IEEE80211_IF_FILE(dropped_frames_no_route,
 		  u.mesh.mshstats.dropped_frames_no_route, DEC);
-IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
+IEEE80211_IF_FILE(estab_plinks, u.mesh.estab_plinks, ATOMIC);
 
 /* Mesh parameters */
 IEEE80211_IF_FILE(dot11MeshMaxRetries,
@@ -531,6 +530,7 @@
 	DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 	DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
 	DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
+	DEBUGFS_ADD(hw_queues);
 }
 
 static void add_sta_files(struct ieee80211_sub_if_data *sdata)
@@ -631,7 +631,9 @@
 
 	DEBUGFS_ADD(flags);
 	DEBUGFS_ADD(state);
-	DEBUGFS_ADD(channel_type);
+	DEBUGFS_ADD(txpower);
+	DEBUGFS_ADD(user_power_level);
+	DEBUGFS_ADD(ap_power_level);
 
 	if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
 		add_common_files(sdata);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 5ccec2c..89281d2 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -14,6 +14,7 @@
 #include "debugfs.h"
 #include "debugfs_sta.h"
 #include "sta_info.h"
+#include "driver-ops.h"
 
 /* sta attributtes */
 
@@ -131,10 +132,10 @@
 static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
 				      size_t count, loff_t *ppos)
 {
-	char buf[15*NUM_RX_DATA_QUEUES], *p = buf;
+	char buf[15*IEEE80211_NUM_TIDS], *p = buf;
 	int i;
 	struct sta_info *sta = file->private_data;
-	for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
+	for (i = 0; i < IEEE80211_NUM_TIDS; i++)
 		p += scnprintf(p, sizeof(buf)+buf-p, "%x ",
 			       le16_to_cpu(sta->last_seq_ctrl[i]));
 	p += scnprintf(p, sizeof(buf)+buf-p, "\n");
@@ -145,7 +146,7 @@
 static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
 					size_t count, loff_t *ppos)
 {
-	char buf[71 + STA_TID_NUM * 40], *p = buf;
+	char buf[71 + IEEE80211_NUM_TIDS * 40], *p = buf;
 	int i;
 	struct sta_info *sta = file->private_data;
 	struct tid_ampdu_rx *tid_rx;
@@ -158,7 +159,7 @@
 	p += scnprintf(p, sizeof(buf) + buf - p,
 		       "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
 
-	for (i = 0; i < STA_TID_NUM; i++) {
+	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
 		tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]);
 		tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]);
 
@@ -220,7 +221,7 @@
 
 	tid = simple_strtoul(buf, NULL, 0);
 
-	if (tid >= STA_TID_NUM)
+	if (tid >= IEEE80211_NUM_TIDS)
 		return -EINVAL;
 
 	if (tx) {
@@ -334,6 +335,8 @@
 
 void ieee80211_sta_debugfs_add(struct sta_info *sta)
 {
+	struct ieee80211_local *local = sta->local;
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations;
 	u8 mac[3*ETH_ALEN];
 
@@ -379,10 +382,16 @@
 	DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed);
 	DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
 	DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count);
+
+	drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir);
 }
 
 void ieee80211_sta_debugfs_remove(struct sta_info *sta)
 {
+	struct ieee80211_local *local = sta->local;
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+
+	drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs.dir);
 	debugfs_remove_recursive(sta->debugfs.dir);
 	sta->debugfs.dir = NULL;
 }
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index da9003b..c6560cc 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -490,6 +490,38 @@
 	trace_drv_return_void(local);
 }
 
+#ifdef CONFIG_MAC80211_DEBUGFS
+static inline void drv_sta_add_debugfs(struct ieee80211_local *local,
+				       struct ieee80211_sub_if_data *sdata,
+				       struct ieee80211_sta *sta,
+				       struct dentry *dir)
+{
+	might_sleep();
+
+	sdata = get_bss_sdata(sdata);
+	check_sdata_in_driver(sdata);
+
+	if (local->ops->sta_add_debugfs)
+		local->ops->sta_add_debugfs(&local->hw, &sdata->vif,
+					    sta, dir);
+}
+
+static inline void drv_sta_remove_debugfs(struct ieee80211_local *local,
+					  struct ieee80211_sub_if_data *sdata,
+					  struct ieee80211_sta *sta,
+					  struct dentry *dir)
+{
+	might_sleep();
+
+	sdata = get_bss_sdata(sdata);
+	check_sdata_in_driver(sdata);
+
+	if (local->ops->sta_remove_debugfs)
+		local->ops->sta_remove_debugfs(&local->hw, &sdata->vif,
+					       sta, dir);
+}
+#endif
+
 static inline __must_check
 int drv_sta_state(struct ieee80211_local *local,
 		  struct ieee80211_sub_if_data *sdata,
@@ -704,17 +736,17 @@
 }
 
 static inline int drv_remain_on_channel(struct ieee80211_local *local,
+					struct ieee80211_sub_if_data *sdata,
 					struct ieee80211_channel *chan,
-					enum nl80211_channel_type chantype,
 					unsigned int duration)
 {
 	int ret;
 
 	might_sleep();
 
-	trace_drv_remain_on_channel(local, chan, chantype, duration);
-	ret = local->ops->remain_on_channel(&local->hw, chan, chantype,
-					    duration);
+	trace_drv_remain_on_channel(local, sdata, chan, duration);
+	ret = local->ops->remain_on_channel(&local->hw, &sdata->vif,
+					    chan, duration);
 	trace_drv_return_int(local, ret);
 
 	return ret;
@@ -871,4 +903,104 @@
 		local->ops->mgd_prepare_tx(&local->hw, &sdata->vif);
 	trace_drv_return_void(local);
 }
+
+static inline int drv_add_chanctx(struct ieee80211_local *local,
+				  struct ieee80211_chanctx *ctx)
+{
+	int ret = -EOPNOTSUPP;
+
+	trace_drv_add_chanctx(local, ctx);
+	if (local->ops->add_chanctx)
+		ret = local->ops->add_chanctx(&local->hw, &ctx->conf);
+	trace_drv_return_int(local, ret);
+
+	return ret;
+}
+
+static inline void drv_remove_chanctx(struct ieee80211_local *local,
+				      struct ieee80211_chanctx *ctx)
+{
+	trace_drv_remove_chanctx(local, ctx);
+	if (local->ops->remove_chanctx)
+		local->ops->remove_chanctx(&local->hw, &ctx->conf);
+	trace_drv_return_void(local);
+}
+
+static inline void drv_change_chanctx(struct ieee80211_local *local,
+				      struct ieee80211_chanctx *ctx,
+				      u32 changed)
+{
+	trace_drv_change_chanctx(local, ctx, changed);
+	if (local->ops->change_chanctx)
+		local->ops->change_chanctx(&local->hw, &ctx->conf, changed);
+	trace_drv_return_void(local);
+}
+
+static inline int drv_assign_vif_chanctx(struct ieee80211_local *local,
+					 struct ieee80211_sub_if_data *sdata,
+					 struct ieee80211_chanctx *ctx)
+{
+	int ret = 0;
+
+	check_sdata_in_driver(sdata);
+
+	trace_drv_assign_vif_chanctx(local, sdata, ctx);
+	if (local->ops->assign_vif_chanctx)
+		ret = local->ops->assign_vif_chanctx(&local->hw,
+						     &sdata->vif,
+						     &ctx->conf);
+	trace_drv_return_int(local, ret);
+
+	return ret;
+}
+
+static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local,
+					    struct ieee80211_sub_if_data *sdata,
+					    struct ieee80211_chanctx *ctx)
+{
+	check_sdata_in_driver(sdata);
+
+	trace_drv_unassign_vif_chanctx(local, sdata, ctx);
+	if (local->ops->unassign_vif_chanctx)
+		local->ops->unassign_vif_chanctx(&local->hw,
+						 &sdata->vif,
+						 &ctx->conf);
+	trace_drv_return_void(local);
+}
+
+static inline int drv_start_ap(struct ieee80211_local *local,
+			       struct ieee80211_sub_if_data *sdata)
+{
+	int ret = 0;
+
+	check_sdata_in_driver(sdata);
+
+	trace_drv_start_ap(local, sdata, &sdata->vif.bss_conf);
+	if (local->ops->start_ap)
+		ret = local->ops->start_ap(&local->hw, &sdata->vif);
+	trace_drv_return_int(local, ret);
+	return ret;
+}
+
+static inline void drv_stop_ap(struct ieee80211_local *local,
+			       struct ieee80211_sub_if_data *sdata)
+{
+	check_sdata_in_driver(sdata);
+
+	trace_drv_stop_ap(local, sdata);
+	if (local->ops->stop_ap)
+		local->ops->stop_ap(&local->hw, &sdata->vif);
+	trace_drv_return_void(local);
+}
+
+static inline void drv_restart_complete(struct ieee80211_local *local)
+{
+	might_sleep();
+
+	trace_drv_restart_complete(local);
+	if (local->ops->restart_complete)
+		local->ops->restart_complete(&local->hw);
+	trace_drv_return_void(local);
+}
+
 #endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 4b4538d..a71d891 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -185,7 +185,7 @@
 
 	cancel_work_sync(&sta->ampdu_mlme.work);
 
-	for (i = 0; i <  STA_TID_NUM; i++) {
+	for (i = 0; i <  IEEE80211_NUM_TIDS; i++) {
 		__ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR, tx);
 		__ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
 					       WLAN_REASON_QSTA_LEAVE_QBSS, tx);
@@ -209,7 +209,7 @@
 		return;
 
 	mutex_lock(&sta->ampdu_mlme.mtx);
-	for (tid = 0; tid < STA_TID_NUM; tid++) {
+	for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
 		if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired))
 			___ieee80211_stop_rx_ba_session(
 				sta, tid, WLAN_BACK_RECIPIENT,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index c21e33d..fa862b2 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -26,7 +26,6 @@
 #include "rate.h"
 
 #define IEEE80211_SCAN_INTERVAL (2 * HZ)
-#define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ)
 #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ)
 
 #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
@@ -39,7 +38,8 @@
 				      const u8 *bssid, const int beacon_int,
 				      struct ieee80211_channel *chan,
 				      const u32 basic_rates,
-				      const u16 capability, u64 tsf)
+				      const u16 capability, u64 tsf,
+				      bool creator)
 {
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 	struct ieee80211_local *local = sdata->local;
@@ -51,7 +51,7 @@
 	struct cfg80211_bss *bss;
 	u32 bss_change;
 	u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
-	enum nl80211_channel_type channel_type;
+	struct cfg80211_chan_def chandef;
 
 	lockdep_assert_held(&ifibss->mtx);
 
@@ -72,25 +72,29 @@
 	/* if merging, indicate to driver that we leave the old IBSS */
 	if (sdata->vif.bss_conf.ibss_joined) {
 		sdata->vif.bss_conf.ibss_joined = false;
+		sdata->vif.bss_conf.ibss_creator = false;
 		netif_carrier_off(sdata->dev);
 		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS);
 	}
 
-	memcpy(ifibss->bssid, bssid, ETH_ALEN);
-
 	sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
 
-	local->oper_channel = chan;
-	channel_type = ifibss->channel_type;
-	if (!cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
-		channel_type = NL80211_CHAN_HT20;
-	if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
-		/* can only fail due to HT40+/- mismatch */
-		channel_type = NL80211_CHAN_HT20;
-		WARN_ON(!ieee80211_set_channel_type(local, sdata,
-						    NL80211_CHAN_HT20));
+	cfg80211_chandef_create(&chandef, chan, ifibss->channel_type);
+	if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
+		chandef.width = NL80211_CHAN_WIDTH_20;
+		chandef.center_freq1 = chan->center_freq;
 	}
-	ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+	ieee80211_vif_release_channel(sdata);
+	if (ieee80211_vif_use_channel(sdata, &chandef,
+				      ifibss->fixed_channel ?
+					IEEE80211_CHANCTX_SHARED :
+					IEEE80211_CHANCTX_EXCLUSIVE)) {
+		sdata_info(sdata, "Failed to join IBSS, no channel context\n");
+		return;
+	}
+
+	memcpy(ifibss->bssid, bssid, ETH_ALEN);
 
 	sband = local->hw.wiphy->bands[chan->band];
 
@@ -156,7 +160,8 @@
 		       ifibss->ie, ifibss->ie_len);
 
 	/* add HT capability and information IEs */
-	if (channel_type && sband->ht_cap.ht_supported) {
+	if (chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
+	    sband->ht_cap.ht_supported) {
 		pos = skb_put(skb, 4 +
 				   sizeof(struct ieee80211_ht_cap) +
 				   sizeof(struct ieee80211_ht_operation));
@@ -168,7 +173,7 @@
 		 * keep them at 0
 		 */
 		pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
-						 chan, channel_type, 0);
+						 &chandef, 0);
 	}
 
 	if (local->hw.queues >= IEEE80211_NUM_ACS) {
@@ -197,6 +202,7 @@
 	bss_change |= BSS_CHANGED_HT;
 	bss_change |= BSS_CHANGED_IBSS;
 	sdata->vif.bss_conf.ibss_joined = true;
+	sdata->vif.bss_conf.ibss_creator = creator;
 	ieee80211_bss_info_change_notify(sdata, bss_change);
 
 	ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates);
@@ -249,7 +255,8 @@
 				  cbss->channel,
 				  basic_rates,
 				  cbss->capability,
-				  cbss->tsf);
+				  cbss->tsf,
+				  false);
 }
 
 static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
@@ -279,7 +286,7 @@
 		ibss_dbg(sdata,
 			 "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
 			 sdata->vif.addr, addr, sdata->u.ibss.bssid);
-		ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0,
+		ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, 0, NULL, 0,
 				    addr, sdata->u.ibss.bssid, NULL, 0, 0);
 	}
 	return sta;
@@ -294,7 +301,8 @@
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
-	int band = local->oper_channel->band;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	int band;
 
 	/*
 	 * XXX: Consider removing the least recently used entry and
@@ -317,6 +325,13 @@
 		return NULL;
 	}
 
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (WARN_ON_ONCE(!chanctx_conf))
+		return NULL;
+	band = chanctx_conf->def.chan->band;
+	rcu_read_unlock();
+
 	sta = sta_info_alloc(sdata, addr, GFP_KERNEL);
 	if (!sta) {
 		rcu_read_lock();
@@ -362,11 +377,13 @@
 	auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
 	auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
 
-	if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
-		return;
 	ibss_dbg(sdata,
 		 "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
 		 mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+
+	if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
+		return;
+
 	sta_info_destroy_addr(sdata, mgmt->sa);
 	sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
 	rcu_read_unlock();
@@ -389,7 +406,7 @@
 	 * However, try to reply to authentication attempts if someone
 	 * has actually implemented this.
 	 */
-	ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0,
+	ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, 0, NULL, 0,
 			    mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0);
 }
 
@@ -461,9 +478,11 @@
 		    sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
 			/* we both use HT */
 			struct ieee80211_sta_ht_cap sta_ht_cap_new;
-			enum nl80211_channel_type channel_type =
-				ieee80211_ht_oper_to_channel_type(
-							elems->ht_operation);
+			struct cfg80211_chan_def chandef;
+
+			ieee80211_ht_oper_to_chandef(channel,
+						     elems->ht_operation,
+						     &chandef);
 
 			ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
 							  elems->ht_cap_elem,
@@ -473,9 +492,9 @@
 			 * fall back to HT20 if we don't use or use
 			 * the other extension channel
 			 */
-			if (!(channel_type == NL80211_CHAN_HT40MINUS ||
-			      channel_type == NL80211_CHAN_HT40PLUS) ||
-			    channel_type != sdata->u.ibss.channel_type)
+			if (chandef.width != NL80211_CHAN_WIDTH_40 ||
+			    cfg80211_get_chandef_type(&chandef) !=
+						sdata->u.ibss.channel_type)
 				sta_ht_cap_new.cap &=
 					~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
 
@@ -517,7 +536,8 @@
 		goto put_bss;
 
 	/* different channel */
-	if (cbss->channel != local->oper_channel)
+	if (sdata->u.ibss.fixed_channel &&
+	    sdata->u.ibss.channel != cbss->channel)
 		goto put_bss;
 
 	/* different SSID */
@@ -530,30 +550,11 @@
 	if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid))
 		goto put_bss;
 
-	if (rx_status->flag & RX_FLAG_MACTIME_MPDU) {
-		/*
-		 * For correct IBSS merging we need mactime; since mactime is
-		 * defined as the time the first data symbol of the frame hits
-		 * the PHY, and the timestamp of the beacon is defined as "the
-		 * time that the data symbol containing the first bit of the
-		 * timestamp is transmitted to the PHY plus the transmitting
-		 * STA's delays through its local PHY from the MAC-PHY
-		 * interface to its interface with the WM" (802.11 11.1.2)
-		 * - equals the time this bit arrives at the receiver - we have
-		 * to take into account the offset between the two.
-		 *
-		 * E.g. at 1 MBit that means mactime is 192 usec earlier
-		 * (=24 bytes * 8 usecs/byte) than the beacon timestamp.
-		 */
-		int rate;
-
-		if (rx_status->flag & RX_FLAG_HT)
-			rate = 65; /* TODO: HT rates */
-		else
-			rate = local->hw.wiphy->bands[band]->
-				bitrates[rx_status->rate_idx].bitrate;
-
-		rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate);
+	if (ieee80211_have_rx_timestamp(rx_status)) {
+		/* time when timestamp field was received */
+		rx_timestamp =
+			ieee80211_calculate_rx_timestamp(local, rx_status,
+							 len + FCS_LEN, 24);
 	} else {
 		/*
 		 * second best option: get current TSF
@@ -592,7 +593,8 @@
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
-	int band = local->oper_channel->band;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	int band;
 
 	/*
 	 * XXX: Consider removing the least recently used entry and
@@ -610,6 +612,15 @@
 	if (!ether_addr_equal(bssid, sdata->u.ibss.bssid))
 		return;
 
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (WARN_ON_ONCE(!chanctx_conf)) {
+		rcu_read_unlock();
+		return;
+	}
+	band = chanctx_conf->def.chan->band;
+	rcu_read_unlock();
+
 	sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
 	if (!sta)
 		return;
@@ -715,7 +726,7 @@
 
 	__ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
 				  ifibss->channel, ifibss->basic_rates,
-				  capability, 0);
+				  capability, 0, true);
 }
 
 /*
@@ -784,18 +795,8 @@
 		int interval = IEEE80211_SCAN_INTERVAL;
 
 		if (time_after(jiffies, ifibss->ibss_join_req +
-			       IEEE80211_IBSS_JOIN_TIMEOUT)) {
-			if (!(local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS)) {
-				ieee80211_sta_create_ibss(sdata);
-				return;
-			}
-			sdata_info(sdata, "IBSS not allowed on %d MHz\n",
-				   local->oper_channel->center_freq);
-
-			/* No IBSS found - decrease scan interval and continue
-			 * scanning. */
-			interval = IEEE80211_SCAN_INTERVAL_SLOW;
-		}
+			       IEEE80211_IBSS_JOIN_TIMEOUT))
+			ieee80211_sta_create_ibss(sdata);
 
 		mod_timer(&ifibss->timer,
 			  round_jiffies(jiffies + interval));
@@ -1082,21 +1083,11 @@
 
 	sdata->vif.bss_conf.beacon_int = params->beacon_interval;
 
-	sdata->u.ibss.channel = params->channel;
-	sdata->u.ibss.channel_type = params->channel_type;
+	sdata->u.ibss.channel = params->chandef.chan;
+	sdata->u.ibss.channel_type =
+		cfg80211_get_chandef_type(&params->chandef);
 	sdata->u.ibss.fixed_channel = params->channel_fixed;
 
-	/* fix ourselves to that channel now already */
-	if (params->channel_fixed) {
-		sdata->local->oper_channel = params->channel;
-		if (!ieee80211_set_channel_type(sdata->local, sdata,
-					       params->channel_type)) {
-			mutex_unlock(&sdata->u.ibss.mtx);
-			kfree_skb(skb);
-			return -EINVAL;
-		}
-	}
-
 	if (params->ie) {
 		sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len,
 					   GFP_KERNEL);
@@ -1134,6 +1125,9 @@
 	changed |= BSS_CHANGED_HT;
 	ieee80211_bss_info_change_notify(sdata, changed);
 
+	sdata->smps_mode = IEEE80211_SMPS_OFF;
+	sdata->needed_rx_chains = sdata->local->rx_chains;
+
 	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 
 	return 0;
@@ -1197,6 +1191,7 @@
 					lockdep_is_held(&sdata->u.ibss.mtx));
 	RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
 	sdata->vif.bss_conf.ibss_joined = false;
+	sdata->vif.bss_conf.ibss_creator = false;
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
 						BSS_CHANGED_IBSS);
 	synchronize_rcu();
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 156e583..5c0d5a6 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -56,6 +56,9 @@
 #define TU_TO_JIFFIES(x)	(usecs_to_jiffies((x) * 1024))
 #define TU_TO_EXP_TIME(x)	(jiffies + TU_TO_JIFFIES(x))
 
+/* power level hasn't been configured (or set to automatic) */
+#define IEEE80211_UNSET_POWER_LEVEL	INT_MIN
+
 /*
  * Some APs experience problems when working with U-APSD. Decrease the
  * probability of that happening by using legacy mode for all ACs but VO.
@@ -280,21 +283,25 @@
 	u8 data[0];
 };
 
+struct ps_data {
+	/* yes, this looks ugly, but guarantees that we can later use
+	 * bitmap_empty :)
+	 * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */
+	u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
+	struct sk_buff_head bc_buf;
+	atomic_t num_sta_ps; /* number of stations in PS mode */
+	int dtim_count;
+	bool dtim_bc_mc;
+};
+
 struct ieee80211_if_ap {
 	struct beacon_data __rcu *beacon;
 	struct probe_resp __rcu *probe_resp;
 
 	struct list_head vlans;
 
-	/* yes, this looks ugly, but guarantees that we can later use
-	 * bitmap_empty :)
-	 * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */
-	u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
-	struct sk_buff_head ps_bc_buf;
-	atomic_t num_sta_ps; /* number of stations in PS mode */
+	struct ps_data ps;
 	atomic_t num_mcast_sta; /* number of stations receiving multicast */
-	int dtim_count;
-	bool dtim_bc_mc;
 };
 
 struct ieee80211_if_wds {
@@ -316,7 +323,6 @@
 	__u32 dropped_frames_ttl;	/* Not transmitted since mesh_ttl == 0*/
 	__u32 dropped_frames_no_route;	/* Not transmitted, no route found */
 	__u32 dropped_frames_congestion;/* Not forwarded due to congestion */
-	atomic_t estab_plinks;
 };
 
 #define PREQ_Q_F_START		0x1
@@ -342,7 +348,6 @@
 	struct ieee80211_sub_if_data *sdata;
 
 	struct ieee80211_channel *chan;
-	enum nl80211_channel_type chan_type;
 
 	bool started, abort, hw_begun, notified;
 
@@ -350,7 +355,7 @@
 
 	u32 duration, req_duration;
 	struct sk_buff *frame;
-	u64 mgmt_tx_cookie;
+	u64 cookie, mgmt_tx_cookie;
 };
 
 /* flags used in struct ieee80211_if_managed.flags */
@@ -358,7 +363,7 @@
 	IEEE80211_STA_BEACON_POLL	= BIT(0),
 	IEEE80211_STA_CONNECTION_POLL	= BIT(1),
 	IEEE80211_STA_CONTROL_PORT	= BIT(2),
-	IEEE80211_STA_DISABLE_11N	= BIT(4),
+	IEEE80211_STA_DISABLE_HT	= BIT(4),
 	IEEE80211_STA_CSA_RECEIVED	= BIT(5),
 	IEEE80211_STA_MFP_ENABLED	= BIT(6),
 	IEEE80211_STA_UAPSD_ENABLED	= BIT(7),
@@ -378,8 +383,9 @@
 	u8 key_len, key_idx;
 	bool done;
 
-	size_t ie_len;
-	u8 ie[];
+	u16 sae_trans, sae_status;
+	size_t data_len;
+	u8 data[];
 };
 
 struct ieee80211_mgd_assoc_data {
@@ -433,7 +439,6 @@
 	bool powersave; /* powersave requested for this iface */
 	bool broken_ap; /* AP is broken -- turn off powersave */
 	enum ieee80211_smps_mode req_smps, /* requested smps mode */
-				 ap_smps, /* smps mode AP thinks we're in */
 				 driver_smps_mode; /* smps mode request */
 
 	struct work_struct request_smps_work;
@@ -467,6 +472,8 @@
 
 	u8 use_4addr;
 
+	u8 p2p_noa_index;
+
 	/* Signal strength from the last Beacon frame in the current BSS. */
 	int last_beacon_signal;
 
@@ -599,6 +606,7 @@
 	int preq_queue_len;
 	struct mesh_stats mshstats;
 	struct mesh_config mshcfg;
+	atomic_t estab_plinks;
 	u32 mesh_seqnum;
 	bool accepting_plinks;
 	int num_gates;
@@ -610,7 +618,7 @@
 		IEEE80211_MESH_SEC_SECURED = 0x2,
 	} security;
 	/* Extensible Synchronization Framework */
-	struct ieee80211_mesh_sync_ops *sync_ops;
+	const struct ieee80211_mesh_sync_ops *sync_ops;
 	s64 sync_offset_clockdrift_max;
 	spinlock_t sync_offset_lock;
 	bool adjusting_tbtt;
@@ -658,6 +666,30 @@
 	SDATA_STATE_OFFCHANNEL,
 };
 
+/**
+ * enum ieee80211_chanctx_mode - channel context configuration mode
+ *
+ * @IEEE80211_CHANCTX_SHARED: channel context may be used by
+ *	multiple interfaces
+ * @IEEE80211_CHANCTX_EXCLUSIVE: channel context can be used
+ *	only by a single interface. This can be used for example for
+ *	non-fixed channel IBSS.
+ */
+enum ieee80211_chanctx_mode {
+	IEEE80211_CHANCTX_SHARED,
+	IEEE80211_CHANCTX_EXCLUSIVE
+};
+
+struct ieee80211_chanctx {
+	struct list_head list;
+	struct rcu_head rcu_head;
+
+	enum ieee80211_chanctx_mode mode;
+	int refcount;
+
+	struct ieee80211_chanctx_conf conf;
+};
+
 struct ieee80211_sub_if_data {
 	struct list_head list;
 
@@ -704,11 +736,20 @@
 
 	struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
 
+	/* used to reconfigure hardware SM PS */
+	struct work_struct recalc_smps;
+
 	struct work_struct work;
 	struct sk_buff_head skb_queue;
 
 	bool arp_filter_state;
 
+	u8 needed_rx_chains;
+	enum ieee80211_smps_mode smps_mode;
+
+	int user_power_level; /* in dBm */
+	int ap_power_level; /* in dBm */
+
 	/*
 	 * AP this belongs to: self in AP mode and
 	 * corresponding AP in VLAN mode, NULL for
@@ -749,6 +790,21 @@
 	return container_of(p, struct ieee80211_sub_if_data, vif);
 }
 
+static inline enum ieee80211_band
+ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata)
+{
+	enum ieee80211_band band = IEEE80211_BAND_2GHZ;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (!WARN_ON(!chanctx_conf))
+		band = chanctx_conf->def.chan->band;
+	rcu_read_unlock();
+
+	return band;
+}
+
 enum sdata_queue_type {
 	IEEE80211_SDATA_QUEUE_TYPE_FRAME	= 0,
 	IEEE80211_SDATA_QUEUE_AGG_START		= 1,
@@ -821,6 +877,7 @@
  * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to
  *	send out data
  * @SCAN_RESUME: Resume the scan and scan the next channel
+ * @SCAN_ABORT: Abort the scan and go back to operating channel
  */
 enum mac80211_scan_state {
 	SCAN_DECISION,
@@ -828,6 +885,7 @@
 	SCAN_SEND_PROBE,
 	SCAN_SUSPEND,
 	SCAN_RESUME,
+	SCAN_ABORT,
 };
 
 struct ieee80211_local {
@@ -858,15 +916,14 @@
 
 	bool wiphy_ciphers_allocated;
 
+	bool use_chanctx;
+
 	/* protects the aggregated multicast list and filter calls */
 	spinlock_t filter_lock;
 
 	/* used for uploading changed mc list */
 	struct work_struct reconfig_filter;
 
-	/* used to reconfigure hardware SM PS */
-	struct work_struct recalc_smps;
-
 	/* aggregated multicast list */
 	struct netdev_hw_addr_list mc_list;
 
@@ -903,6 +960,9 @@
 	/* wowlan is enabled -- don't reconfig on resume */
 	bool wowlan;
 
+	/* number of RX chains the hardware has */
+	u8 rx_chains;
+
 	int tx_headroom; /* required headroom for hardware/radiotap */
 
 	/* Tasklet and skb queue to process calls from IRQ mode. All frames
@@ -980,12 +1040,17 @@
 	enum mac80211_scan_state next_scan_state;
 	struct delayed_work scan_work;
 	struct ieee80211_sub_if_data __rcu *scan_sdata;
+	struct ieee80211_channel *csa_channel;
+	/* For backward compatibility only -- do not use */
+	struct ieee80211_channel *_oper_channel;
 	enum nl80211_channel_type _oper_channel_type;
-	struct ieee80211_channel *oper_channel, *csa_channel;
 
 	/* Temporary remain-on-channel for off-channel operations */
 	struct ieee80211_channel *tmp_channel;
-	enum nl80211_channel_type tmp_channel_type;
+
+	/* channel contexts */
+	struct list_head chanctx_list;
+	struct mutex chanctx_mtx;
 
 	/* SNMP counters */
 	/* dot11CountersTable */
@@ -1058,8 +1123,7 @@
 	int dynamic_ps_user_timeout;
 	bool disable_dynamic_ps;
 
-	int user_power_level; /* in dBm */
-	int ap_power_level; /* in dBm */
+	int user_power_level; /* in dBm, for all interfaces */
 
 	enum ieee80211_smps_mode smps_mode;
 
@@ -1078,6 +1142,7 @@
 	struct list_head roc_list;
 	struct work_struct hw_roc_start, hw_roc_done;
 	unsigned long hw_roc_start_time;
+	u64 roc_cookie_counter;
 
 	struct idr ack_status_frames;
 	spinlock_t ack_status_lock;
@@ -1091,6 +1156,7 @@
 
 	/* virtual monitor interface */
 	struct ieee80211_sub_if_data __rcu *monitor_sdata;
+	struct cfg80211_chan_def monitor_chandef;
 };
 
 static inline struct ieee80211_sub_if_data *
@@ -1133,6 +1199,8 @@
 	u8 *wmm_param;
 	struct ieee80211_ht_cap *ht_cap_elem;
 	struct ieee80211_ht_operation *ht_operation;
+	struct ieee80211_vht_cap *vht_cap_elem;
+	struct ieee80211_vht_operation *vht_operation;
 	struct ieee80211_meshconf_ie *mesh_config;
 	u8 *mesh_id;
 	u8 *peering;
@@ -1188,7 +1256,18 @@
 	       is_broadcast_ether_addr(raddr);
 }
 
+static inline bool
+ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
+{
+	WARN_ON_ONCE(status->flag & RX_FLAG_MACTIME_START &&
+		     status->flag & RX_FLAG_MACTIME_END);
+	return status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END);
+}
 
+u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
+				     struct ieee80211_rx_status *status,
+				     unsigned int mpdu_len,
+				     unsigned int mpdu_offset);
 int ieee80211_hw_config(struct ieee80211_local *local, u32 changed);
 void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
 void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
@@ -1302,6 +1381,9 @@
 int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up);
 void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata);
 
+bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
+void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
+
 static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
 {
 	return test_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -1361,6 +1443,13 @@
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
 
+u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs);
+
+/* VHT */
+void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
+					 struct ieee80211_supported_band *sband,
+					 struct ieee80211_vht_cap *vht_cap_ie,
+					 struct ieee80211_sta_vht_cap *vht_cap);
 /* Spectrum management */
 void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
 				       struct ieee80211_mgmt *mgmt,
@@ -1395,11 +1484,42 @@
 				     gfp_t gfp);
 void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
 			       bool bss_notify);
-void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
+		    enum ieee80211_band band);
 
-void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
-			  struct sk_buff *skb, int tid);
-static void inline ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
+void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
+				 struct sk_buff *skb, int tid,
+				 enum ieee80211_band band);
+
+static inline void
+ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
+			  struct sk_buff *skb, int tid,
+			  enum ieee80211_band band)
+{
+	rcu_read_lock();
+	__ieee80211_tx_skb_tid_band(sdata, skb, tid, band);
+	rcu_read_unlock();
+}
+
+static inline void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
+					struct sk_buff *skb, int tid)
+{
+	struct ieee80211_chanctx_conf *chanctx_conf;
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (WARN_ON(!chanctx_conf)) {
+		rcu_read_unlock();
+		kfree_skb(skb);
+		return;
+	}
+
+	__ieee80211_tx_skb_tid_band(sdata, skb, tid,
+				    chanctx_conf->def.chan->band);
+	rcu_read_unlock();
+}
+
+static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
 				    struct sk_buff *skb)
 {
 	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
@@ -1446,7 +1566,7 @@
 }
 
 void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
-			 u16 transaction, u16 auth_alg,
+			 u16 transaction, u16 auth_alg, u16 status,
 			 u8 *extra, size_t extra_len, const u8 *bssid,
 			 const u8 *da, const u8 *key, u8 key_len, u8 key_idx);
 void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
@@ -1466,7 +1586,7 @@
 			      const u8 *ssid, size_t ssid_len,
 			      const u8 *ie, size_t ie_len,
 			      u32 ratemask, bool directed, bool no_cck,
-			      struct ieee80211_channel *channel);
+			      struct ieee80211_channel *channel, bool scan);
 
 void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
 				  const size_t supp_rates_len,
@@ -1476,7 +1596,7 @@
 			    enum ieee80211_band band, u32 *basic_rates);
 int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
 			     enum ieee80211_smps_mode smps_mode);
-void ieee80211_recalc_smps(struct ieee80211_local *local);
+void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata);
 
 size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
 			  const u8 *ids, int n_ids, size_t offset);
@@ -1484,8 +1604,7 @@
 u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
 			      u16 cap);
 u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
-			       struct ieee80211_channel *channel,
-			       enum nl80211_channel_type channel_type,
+			       const struct cfg80211_chan_def *chandef,
 			       u16 prot_mode);
 u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
 			       u32 cap);
@@ -1497,20 +1616,18 @@
 				enum ieee80211_band band);
 
 /* channel management */
-enum ieee80211_chan_mode {
-	CHAN_MODE_UNDEFINED,
-	CHAN_MODE_HOPPING,
-	CHAN_MODE_FIXED,
-};
+void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
+				  struct ieee80211_ht_operation *ht_oper,
+				  struct cfg80211_chan_def *chandef);
 
-enum ieee80211_chan_mode
-ieee80211_get_channel_mode(struct ieee80211_local *local,
-			   struct ieee80211_sub_if_data *ignore);
-bool ieee80211_set_channel_type(struct ieee80211_local *local,
-				struct ieee80211_sub_if_data *sdata,
-				enum nl80211_channel_type chantype);
-enum nl80211_channel_type
-ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper);
+int __must_check
+ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
+			  const struct cfg80211_chan_def *chandef,
+			  enum ieee80211_chanctx_mode mode);
+void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
+
+void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
+				   struct ieee80211_chanctx *chanctx);
 
 #ifdef CONFIG_MAC80211_NOINLINE
 #define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 7de7717..5331662 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -42,6 +42,41 @@
  * by either the RTNL, the iflist_mtx or RCU.
  */
 
+bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	int power;
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (!chanctx_conf) {
+		rcu_read_unlock();
+		return false;
+	}
+
+	power = chanctx_conf->def.chan->max_power;
+	rcu_read_unlock();
+
+	if (sdata->user_power_level != IEEE80211_UNSET_POWER_LEVEL)
+		power = min(power, sdata->user_power_level);
+
+	if (sdata->ap_power_level != IEEE80211_UNSET_POWER_LEVEL)
+		power = min(power, sdata->ap_power_level);
+
+	if (power != sdata->vif.bss_conf.txpower) {
+		sdata->vif.bss_conf.txpower = power;
+		ieee80211_hw_config(sdata->local, 0);
+		return true;
+	}
+
+	return false;
+}
+
+void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
+{
+	if (__ieee80211_recalc_txpower(sdata))
+		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
+}
 
 static u32 ieee80211_idle_off(struct ieee80211_local *local,
 			      const char *reason)
@@ -380,6 +415,14 @@
 		goto out_unlock;
 	}
 
+	ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
+					IEEE80211_CHANCTX_EXCLUSIVE);
+	if (ret) {
+		drv_remove_interface(local, sdata);
+		kfree(sdata);
+		goto out_unlock;
+	}
+
 	rcu_assign_pointer(local->monitor_sdata, sdata);
  out_unlock:
 	mutex_unlock(&local->iflist_mtx);
@@ -403,6 +446,8 @@
 	rcu_assign_pointer(local->monitor_sdata, NULL);
 	synchronize_net();
 
+	ieee80211_vif_release_channel(sdata);
+
 	drv_remove_interface(local, sdata);
 
 	kfree(sdata);
@@ -665,7 +710,6 @@
 	struct sk_buff *skb, *tmp;
 	u32 hw_reconf_flags = 0;
 	int i;
-	enum nl80211_channel_type orig_ct;
 
 	clear_bit(SDATA_STATE_RUNNING, &sdata->state);
 
@@ -729,34 +773,17 @@
 	del_timer_sync(&local->dynamic_ps_timer);
 	cancel_work_sync(&local->dynamic_ps_enable_work);
 
+	cancel_work_sync(&sdata->recalc_smps);
+
 	/* APs need special treatment */
 	if (sdata->vif.type == NL80211_IFTYPE_AP) {
 		struct ieee80211_sub_if_data *vlan, *tmpsdata;
-		struct beacon_data *old_beacon =
-			rtnl_dereference(sdata->u.ap.beacon);
-		struct probe_resp *old_probe_resp =
-			rtnl_dereference(sdata->u.ap.probe_resp);
-
-		/* sdata_running will return false, so this will disable */
-		ieee80211_bss_info_change_notify(sdata,
-						 BSS_CHANGED_BEACON_ENABLED);
-
-		/* remove beacon and probe response */
-		RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
-		RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
-		synchronize_rcu();
-		kfree(old_beacon);
-		kfree(old_probe_resp);
 
 		/* down all dependent devices, that is VLANs */
 		list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
 					 u.vlan.list)
 			dev_close(vlan->dev);
 		WARN_ON(!list_empty(&sdata->u.ap.vlans));
-
-		/* free all potentially still buffered bcast frames */
-		local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
-		skb_queue_purge(&sdata->u.ap.ps_bc_buf);
 	} else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
 		ieee80211_mgd_stop(sdata);
 	}
@@ -837,14 +864,8 @@
 		hw_reconf_flags = 0;
 	}
 
-	/* Re-calculate channel-type, in case there are multiple vifs
-	 * on different channel types.
-	 */
-	orig_ct = local->_oper_channel_type;
-	ieee80211_set_channel_type(local, NULL, NL80211_CHAN_NO_HT);
-
 	/* do after stop to avoid reconfiguring when we stop anyway */
-	if (hw_reconf_flags || (orig_ct != local->_oper_channel_type))
+	if (hw_reconf_flags)
 		ieee80211_hw_config(local, hw_reconf_flags);
 
 	spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
@@ -1121,6 +1142,13 @@
 	}
 }
 
+static void ieee80211_recalc_smps_work(struct work_struct *work)
+{
+	struct ieee80211_sub_if_data *sdata =
+		container_of(work, struct ieee80211_sub_if_data, recalc_smps);
+
+	ieee80211_recalc_smps(sdata);
+}
 
 /*
  * Helper function to initialise an interface to a specific type.
@@ -1149,6 +1177,7 @@
 
 	skb_queue_head_init(&sdata->skb_queue);
 	INIT_WORK(&sdata->work, ieee80211_iface_work);
+	INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
 
 	switch (type) {
 	case NL80211_IFTYPE_P2P_GO:
@@ -1157,7 +1186,7 @@
 		sdata->vif.p2p = true;
 		/* fall through */
 	case NL80211_IFTYPE_AP:
-		skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
+		skb_queue_head_init(&sdata->u.ap.ps.bc_buf);
 		INIT_LIST_HEAD(&sdata->u.ap.vlans);
 		break;
 	case NL80211_IFTYPE_P2P_CLIENT:
@@ -1282,11 +1311,6 @@
 	if (type == ieee80211_vif_type_p2p(&sdata->vif))
 		return 0;
 
-	/* Setting ad-hoc mode on non-IBSS channel is not supported. */
-	if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS &&
-	    type == NL80211_IFTYPE_ADHOC)
-		return -EOPNOTSUPP;
-
 	if (ieee80211_sdata_running(sdata)) {
 		ret = ieee80211_runtime_change_iftype(sdata, type);
 		if (ret)
@@ -1298,9 +1322,6 @@
 	}
 
 	/* reset some values that shouldn't be kept across type changes */
-	sdata->vif.bss_conf.basic_rates =
-		ieee80211_mandatory_rates(sdata->local,
-			sdata->local->oper_channel->band);
 	sdata->drop_unencrypted = 0;
 	if (type == NL80211_IFTYPE_STATION)
 		sdata->u.mgd.use_4addr = false;
@@ -1523,6 +1544,9 @@
 
 	ieee80211_set_default_queues(sdata);
 
+	sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
+	sdata->user_power_level = local->user_power_level;
+
 	/* setup type-dependent data */
 	ieee80211_setup_sdata(sdata, type);
 
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index d27e61a..619c5d6 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -339,7 +339,7 @@
 		key->conf.iv_len = TKIP_IV_LEN;
 		key->conf.icv_len = TKIP_ICV_LEN;
 		if (seq) {
-			for (i = 0; i < NUM_RX_DATA_QUEUES; i++) {
+			for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
 				key->u.tkip.rx[i].iv32 =
 					get_unaligned_le32(&seq[2]);
 				key->u.tkip.rx[i].iv16 =
@@ -352,7 +352,7 @@
 		key->conf.iv_len = CCMP_HDR_LEN;
 		key->conf.icv_len = CCMP_MIC_LEN;
 		if (seq) {
-			for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++)
+			for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
 				for (j = 0; j < CCMP_PN_LEN; j++)
 					key->u.ccmp.rx_pn[i][j] =
 						seq[CCMP_PN_LEN - j - 1];
@@ -372,8 +372,9 @@
 		key->conf.iv_len = 0;
 		key->conf.icv_len = sizeof(struct ieee80211_mmie);
 		if (seq)
-			for (j = 0; j < 6; j++)
-				key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1];
+			for (j = 0; j < CMAC_PN_LEN; j++)
+				key->u.aes_cmac.rx_pn[j] =
+					seq[CMAC_PN_LEN - j - 1];
 		/*
 		 * Initialize AES key state here as an optimization so that
 		 * it does not need to be initialized for every packet.
@@ -654,16 +655,16 @@
 
 	switch (key->conf.cipher) {
 	case WLAN_CIPHER_SUITE_TKIP:
-		if (WARN_ON(tid < 0 || tid >= NUM_RX_DATA_QUEUES))
+		if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS))
 			return;
 		seq->tkip.iv32 = key->u.tkip.rx[tid].iv32;
 		seq->tkip.iv16 = key->u.tkip.rx[tid].iv16;
 		break;
 	case WLAN_CIPHER_SUITE_CCMP:
-		if (WARN_ON(tid < -1 || tid >= NUM_RX_DATA_QUEUES))
+		if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
 			return;
 		if (tid < 0)
-			pn = key->u.ccmp.rx_pn[NUM_RX_DATA_QUEUES];
+			pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS];
 		else
 			pn = key->u.ccmp.rx_pn[tid];
 		memcpy(seq->ccmp.pn, pn, CCMP_PN_LEN);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 7d4e31f..7cff0d3 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -30,8 +30,6 @@
 #define TKIP_ICV_LEN		4
 #define CMAC_PN_LEN		6
 
-#define NUM_RX_DATA_QUEUES	16
-
 struct ieee80211_local;
 struct ieee80211_sub_if_data;
 struct sta_info;
@@ -82,17 +80,17 @@
 			struct tkip_ctx tx;
 
 			/* last received RSC */
-			struct tkip_ctx rx[NUM_RX_DATA_QUEUES];
+			struct tkip_ctx rx[IEEE80211_NUM_TIDS];
 		} tkip;
 		struct {
 			atomic64_t tx_pn;
 			/*
 			 * Last received packet number. The first
-			 * NUM_RX_DATA_QUEUES counters are used with Data
+			 * IEEE80211_NUM_TIDS counters are used with Data
 			 * frames and the last counter is used with Robust
 			 * Management frames.
 			 */
-			u8 rx_pn[NUM_RX_DATA_QUEUES + 1][CCMP_PN_LEN];
+			u8 rx_pn[IEEE80211_NUM_TIDS + 1][CCMP_PN_LEN];
 			struct crypto_cipher *tfm;
 			u32 replays; /* dot11RSNAStatsCCMPReplays */
 		} ccmp;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f57f597..f5e4c1f 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -93,15 +93,15 @@
 	ieee80211_configure_filter(local);
 }
 
-int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
 {
+	struct ieee80211_sub_if_data *sdata;
 	struct ieee80211_channel *chan;
-	int ret = 0;
+	u32 changed = 0;
 	int power;
 	enum nl80211_channel_type channel_type;
 	u32 offchannel_flag;
-
-	might_sleep();
+	bool scanning = false;
 
 	offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
 	if (local->scan_channel) {
@@ -109,19 +109,19 @@
 		/* If scanning on oper channel, use whatever channel-type
 		 * is currently in use.
 		 */
-		if (chan == local->oper_channel)
+		if (chan == local->_oper_channel)
 			channel_type = local->_oper_channel_type;
 		else
 			channel_type = NL80211_CHAN_NO_HT;
 	} else if (local->tmp_channel) {
 		chan = local->tmp_channel;
-		channel_type = local->tmp_channel_type;
+		channel_type = NL80211_CHAN_NO_HT;
 	} else {
-		chan = local->oper_channel;
+		chan = local->_oper_channel;
 		channel_type = local->_oper_channel_type;
 	}
 
-	if (chan != local->oper_channel ||
+	if (chan != local->_oper_channel ||
 	    channel_type != local->_oper_channel_type)
 		local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
 	else
@@ -148,22 +148,39 @@
 		changed |= IEEE80211_CONF_CHANGE_SMPS;
 	}
 
-	if (test_bit(SCAN_SW_SCANNING, &local->scanning) ||
-	    test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
-	    test_bit(SCAN_HW_SCANNING, &local->scanning) ||
-	    !local->ap_power_level)
-		power = chan->max_power;
-	else
-		power = min(chan->max_power, local->ap_power_level);
+	scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+		   test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
+		   test_bit(SCAN_HW_SCANNING, &local->scanning);
+	power = chan->max_power;
 
-	if (local->user_power_level >= 0)
-		power = min(power, local->user_power_level);
+	rcu_read_lock();
+	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+		if (!rcu_access_pointer(sdata->vif.chanctx_conf))
+			continue;
+		power = min(power, sdata->vif.bss_conf.txpower);
+	}
+	rcu_read_unlock();
 
 	if (local->hw.conf.power_level != power) {
 		changed |= IEEE80211_CONF_CHANGE_POWER;
 		local->hw.conf.power_level = power;
 	}
 
+	return changed;
+}
+
+int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+{
+	int ret = 0;
+
+	might_sleep();
+
+	if (!local->use_chanctx)
+		changed |= ieee80211_hw_conf_chan(local);
+	else
+		changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
+			     IEEE80211_CONF_CHANGE_POWER);
+
 	if (changed && local->open_count) {
 		ret = drv_config(local, changed);
 		/*
@@ -359,14 +376,6 @@
 }
 EXPORT_SYMBOL(ieee80211_restart_hw);
 
-static void ieee80211_recalc_smps_work(struct work_struct *work)
-{
-	struct ieee80211_local *local =
-		container_of(work, struct ieee80211_local, recalc_smps);
-
-	ieee80211_recalc_smps(local);
-}
-
 #ifdef CONFIG_INET
 static int ieee80211_ifa_changed(struct notifier_block *nb,
 				 unsigned long data, void *arg)
@@ -540,6 +549,7 @@
 	struct ieee80211_local *local;
 	int priv_size, i;
 	struct wiphy *wiphy;
+	bool use_chanctx;
 
 	if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
 		    !ops->add_interface || !ops->remove_interface ||
@@ -549,6 +559,14 @@
 	if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
 		return NULL;
 
+	/* check all or no channel context operations exist */
+	i = !!ops->add_chanctx + !!ops->remove_chanctx +
+	    !!ops->change_chanctx + !!ops->assign_vif_chanctx +
+	    !!ops->unassign_vif_chanctx;
+	if (WARN_ON(i != 0 && i != 5))
+		return NULL;
+	use_chanctx = i == 5;
+
 	/* Ensure 32-byte alignment of our private data and hw private data.
 	 * We use the wiphy priv data for both our ieee80211_local and for
 	 * the driver's private data
@@ -584,8 +602,15 @@
 	if (ops->remain_on_channel)
 		wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 
-	wiphy->features = NL80211_FEATURE_SK_TX_STATUS |
-			  NL80211_FEATURE_HT_IBSS;
+	wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
+			   NL80211_FEATURE_SAE |
+			   NL80211_FEATURE_HT_IBSS |
+			   NL80211_FEATURE_VIF_TXPOWER;
+
+	if (!ops->hw_scan)
+		wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
+				   NL80211_FEATURE_AP_SCAN;
+
 
 	if (!ops->set_key)
 		wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -599,6 +624,7 @@
 	local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
 
 	local->ops = ops;
+	local->use_chanctx = use_chanctx;
 
 	/* set up some defaults */
 	local->hw.queues = 1;
@@ -612,7 +638,7 @@
 	local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
 					 IEEE80211_RADIOTAP_MCS_HAVE_GI |
 					 IEEE80211_RADIOTAP_MCS_HAVE_BW;
-	local->user_power_level = -1;
+	local->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
 	wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
 
 	INIT_LIST_HEAD(&local->interfaces);
@@ -626,6 +652,9 @@
 	spin_lock_init(&local->filter_lock);
 	spin_lock_init(&local->queue_stop_reason_lock);
 
+	INIT_LIST_HEAD(&local->chanctx_list);
+	mutex_init(&local->chanctx_mtx);
+
 	/*
 	 * The rx_skb_queue is only accessed from tasklets,
 	 * but other SKB queues are used from within IRQ
@@ -641,7 +670,6 @@
 	INIT_WORK(&local->restart_work, ieee80211_restart_work);
 
 	INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
-	INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work);
 	local->smps_mode = IEEE80211_SMPS_OFF;
 
 	INIT_WORK(&local->dynamic_ps_enable_work,
@@ -719,6 +747,25 @@
 	if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan)
 		return -EINVAL;
 
+	if (!local->use_chanctx) {
+		for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
+			const struct ieee80211_iface_combination *comb;
+
+			comb = &local->hw.wiphy->iface_combinations[i];
+
+			if (comb->num_different_channels > 1)
+				return -EINVAL;
+		}
+	} else {
+		/*
+		 * WDS is currently prohibited when channel contexts are used
+		 * because there's no clear definition of which channel WDS
+		 * type interfaces use
+		 */
+		if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS))
+			return -EINVAL;
+	}
+
 	/* Only HW csum features are currently compatible with mac80211 */
 	feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 			    NETIF_F_HW_CSUM;
@@ -728,6 +775,8 @@
 	if (hw->max_report_rates == 0)
 		hw->max_report_rates = hw->max_rates;
 
+	local->rx_chains = 1;
+
 	/*
 	 * generic code guarantees at least one band,
 	 * set this very early because much code assumes
@@ -743,18 +792,28 @@
 		sband = local->hw.wiphy->bands[band];
 		if (!sband)
 			continue;
-		if (!local->oper_channel) {
+		if (!local->use_chanctx && !local->_oper_channel) {
 			/* init channel we're on */
 			local->hw.conf.channel =
-			local->oper_channel = &sband->channels[0];
+			local->_oper_channel = &sband->channels[0];
 			local->hw.conf.channel_type = NL80211_CHAN_NO_HT;
 		}
+		cfg80211_chandef_create(&local->monitor_chandef,
+					&sband->channels[0],
+					NL80211_CHAN_NO_HT);
 		channels += sband->n_channels;
 
 		if (max_bitrates < sband->n_bitrates)
 			max_bitrates = sband->n_bitrates;
 		supp_ht = supp_ht || sband->ht_cap.ht_supported;
 		supp_vht = supp_vht || sband->vht_cap.vht_supported;
+
+		if (sband->ht_cap.ht_supported)
+			local->rx_chains =
+				max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
+				    local->rx_chains);
+
+		/* TODO: consider VHT for RX chains, hopefully it's the same */
 	}
 
 	local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
@@ -778,19 +837,13 @@
 	hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
 	hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
 
-	/*
-	 * mac80211 doesn't support more than 1 channel, and also not more
-	 * than one IBSS interface
-	 */
+	/* mac80211 doesn't support more than one IBSS interface right now */
 	for (i = 0; i < hw->wiphy->n_iface_combinations; i++) {
 		const struct ieee80211_iface_combination *c;
 		int j;
 
 		c = &hw->wiphy->iface_combinations[i];
 
-		if (c->num_different_channels > 1)
-			return -EINVAL;
-
 		for (j = 0; j < c->n_limits; j++)
 			if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
 			    c->limits[j].max > 1)
@@ -830,9 +883,21 @@
 	if (supp_ht)
 		local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);
 
-	if (supp_vht)
+	if (supp_vht) {
 		local->scan_ies_len +=
-			2 + sizeof(struct ieee80211_vht_capabilities);
+			2 + sizeof(struct ieee80211_vht_cap);
+
+		/*
+		 * (for now at least), drivers wanting to use VHT must
+		 * support channel contexts, as they contain all the
+		 * necessary VHT information and the global hw config
+		 * doesn't (yet)
+		 */
+		if (WARN_ON(!local->use_chanctx)) {
+			result = -EINVAL;
+			goto fail_wiphy_register;
+		}
+	}
 
 	if (!local->ops->hw_scan) {
 		/* For hw_scan, driver needs to set these up. */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index ff0296c..1bf03f9 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -76,7 +76,7 @@
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct ieee80211_local *local = sdata->local;
 	u32 basic_rates = 0;
-	enum nl80211_channel_type sta_channel_type = NL80211_CHAN_NO_HT;
+	struct cfg80211_chan_def sta_chan_def;
 
 	/*
 	 * As support for each feature is added, check for matching
@@ -97,23 +97,17 @@
 	     (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
 		goto mismatch;
 
-	ieee80211_sta_get_rates(local, ie, local->oper_channel->band,
+	ieee80211_sta_get_rates(local, ie, ieee80211_get_sdata_band(sdata),
 				&basic_rates);
 
 	if (sdata->vif.bss_conf.basic_rates != basic_rates)
 		goto mismatch;
 
-	if (ie->ht_operation)
-		sta_channel_type =
-			ieee80211_ht_oper_to_channel_type(ie->ht_operation);
+	ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
+				     ie->ht_operation, &sta_chan_def);
 
-	/* Disallow HT40+/- mismatch */
-	if (ie->ht_operation &&
-	    (sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40MINUS ||
-	     sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40PLUS) &&
-	    (sta_channel_type == NL80211_CHAN_HT40MINUS ||
-	     sta_channel_type == NL80211_CHAN_HT40PLUS) &&
-	    sdata->vif.bss_conf.channel_type != sta_channel_type)
+	if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
+					 &sta_chan_def))
 		goto mismatch;
 
 	return true;
@@ -129,7 +123,7 @@
 bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
 {
 	return (ie->mesh_config->meshconf_cap &
-	    MESHCONF_CAPAB_ACCEPT_PLINKS) != 0;
+	    IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS) != 0;
 }
 
 /**
@@ -264,16 +258,16 @@
 	/* Authentication Protocol identifier */
 	*pos++ = ifmsh->mesh_auth_id;
 	/* Mesh Formation Info - number of neighbors */
-	neighbors = atomic_read(&ifmsh->mshstats.estab_plinks);
+	neighbors = atomic_read(&ifmsh->estab_plinks);
 	/* Number of neighbor mesh STAs or 15 whichever is smaller */
 	neighbors = (neighbors > 15) ? 15 : neighbors;
 	*pos++ = neighbors << 1;
 	/* Mesh capability */
-	*pos = MESHCONF_CAPAB_FORWARDING;
+	*pos = IEEE80211_MESHCONF_CAPAB_FORWARDING;
 	*pos |= ifmsh->accepting_plinks ?
-	    MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
+	    IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
 	*pos++ |= ifmsh->adjusting_tbtt ?
-	    MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
+	    IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
 	*pos++ = 0x00;
 
 	return 0;
@@ -355,12 +349,22 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
-	struct ieee80211_channel *chan = local->oper_channel;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_channel *chan;
 	u8 *pos;
 
 	if (skb_tailroom(skb) < 3)
 		return -ENOMEM;
 
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (WARN_ON(!chanctx_conf)) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	chan = chanctx_conf->def.chan;
+	rcu_read_unlock();
+
 	sband = local->hw.wiphy->bands[chan->band];
 	if (sband->band == IEEE80211_BAND_2GHZ) {
 		pos = skb_put(skb, 2 + 1);
@@ -376,12 +380,13 @@
 		       struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
+	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_supported_band *sband;
 	u8 *pos;
 
-	sband = local->hw.wiphy->bands[local->oper_channel->band];
+	sband = local->hw.wiphy->bands[band];
 	if (!sband->ht_cap.ht_supported ||
-	    sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
 		return 0;
 
 	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -397,14 +402,26 @@
 			struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_channel *channel = local->oper_channel;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_channel *channel;
 	enum nl80211_channel_type channel_type =
-				sdata->vif.bss_conf.channel_type;
-	struct ieee80211_supported_band *sband =
-				local->hw.wiphy->bands[channel->band];
-	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
+		cfg80211_get_chandef_type(&sdata->vif.bss_conf.chandef);
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_sta_ht_cap *ht_cap;
 	u8 *pos;
 
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (WARN_ON(!chanctx_conf)) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	channel = chanctx_conf->def.chan;
+	rcu_read_unlock();
+
+	sband = local->hw.wiphy->bands[channel->band];
+	ht_cap = &sband->ht_cap;
+
 	if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT)
 		return 0;
 
@@ -412,7 +429,7 @@
 		return -ENOMEM;
 
 	pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
-	ieee80211_ie_build_ht_oper(pos, ht_cap, channel, channel_type,
+	ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chandef,
 				   sdata->vif.bss_conf.ht_operation_mode);
 
 	return 0;
@@ -610,7 +627,7 @@
 	sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
 	sdata->vif.bss_conf.basic_rates =
 		ieee80211_mandatory_rates(sdata->local,
-					  sdata->local->oper_channel->band);
+					  ieee80211_get_sdata_band(sdata));
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
 						BSS_CHANGED_BEACON_ENABLED |
 						BSS_CHANGED_HT |
@@ -680,8 +697,10 @@
 	ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
 			       &elems);
 
-	/* ignore beacons from secure mesh peers if our security is off */
-	if (elems.rsn_len && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE)
+	/* ignore non-mesh or secure / unsecure mismatch */
+	if ((!elems.mesh_id || !elems.mesh_config) ||
+	    (elems.rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) ||
+	    (!elems.rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE))
 		return;
 
 	if (elems.ds_params && elems.ds_params_len == 1)
@@ -694,8 +713,7 @@
 	if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
 		return;
 
-	if (elems.mesh_id && elems.mesh_config &&
-	    mesh_matches_local(sdata, &elems))
+	if (mesh_matches_local(sdata, &elems))
 		mesh_neighbour_update(sdata, mgmt->sa, &elems);
 
 	if (ifmsh->sync_ops)
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 25d0f17..7c9215f 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -19,20 +19,6 @@
 /* Data structures */
 
 /**
- * enum mesh_config_capab_flags - mesh config IE capability flags
- *
- * @MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
- * additional mesh peerings with other mesh STAs
- * @MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
- * @MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure is ongoing
- */
-enum mesh_config_capab_flags {
-	MESHCONF_CAPAB_ACCEPT_PLINKS = BIT(0),
-	MESHCONF_CAPAB_FORWARDING = BIT(3),
-	MESHCONF_CAPAB_TBTT_ADJUSTING = BIT(5),
-};
-
-/**
  * enum mesh_path_flags - mac80211 mesh path flags
  *
  *
@@ -256,7 +242,7 @@
 void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
 void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
-struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method);
+const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method);
 
 /* Mesh paths */
 int mesh_nexthop_lookup(struct sk_buff *skb,
@@ -324,7 +310,7 @@
 static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata)
 {
 	return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks -
-	       atomic_read(&sdata->u.mesh.mshstats.estab_plinks);
+	       atomic_read(&sdata->u.mesh.estab_plinks);
 }
 
 static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 3ab34d8..ca52dfd 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -19,12 +19,6 @@
 #define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
 				jiffies + HZ * t / 1000))
 
-#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries)
-#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout)
-#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout)
-#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
-#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
-
 /* We only need a valid sta if user configured a minimum rssi_threshold. */
 #define rssi_threshold_check(sta, sdata) \
 		(sdata->u.mesh.mshcfg.rssi_threshold == 0 ||\
@@ -50,14 +44,14 @@
 static inline
 u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
 {
-	atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
+	atomic_inc(&sdata->u.mesh.estab_plinks);
 	return mesh_accept_plinks_update(sdata);
 }
 
 static inline
 u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
 {
-	atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
+	atomic_dec(&sdata->u.mesh.estab_plinks);
 	return mesh_accept_plinks_update(sdata);
 }
 
@@ -117,7 +111,7 @@
 	u16 ht_opmode;
 	bool non_ht_sta = false, ht20_sta = false;
 
-	if (sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
+	if (sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
 		return 0;
 
 	rcu_read_lock();
@@ -126,14 +120,14 @@
 		    sta->plink_state != NL80211_PLINK_ESTAB)
 			continue;
 
-		switch (sta->ch_type) {
-		case NL80211_CHAN_NO_HT:
+		switch (sta->ch_width) {
+		case NL80211_CHAN_WIDTH_20_NOHT:
 			mpl_dbg(sdata,
 				"mesh_plink %pM: nonHT sta (%pM) is present\n",
 				sdata->vif.addr, sta->sta.addr);
 			non_ht_sta = true;
 			goto out;
-		case NL80211_CHAN_HT20:
+		case NL80211_CHAN_WIDTH_20:
 			mpl_dbg(sdata,
 				"mesh_plink %pM: HT20 sta (%pM) is present\n",
 				sdata->vif.addr, sta->sta.addr);
@@ -148,7 +142,7 @@
 	if (non_ht_sta)
 		ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
 	else if (ht20_sta &&
-		 sdata->vif.bss_conf.channel_type > NL80211_CHAN_HT20)
+		 sdata->vif.bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
 		ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
 	else
 		ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -252,6 +246,8 @@
 	mgmt->u.action.u.self_prot.action_code = action;
 
 	if (action != WLAN_SP_MESH_PEERING_CLOSE) {
+		enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+
 		/* capability info */
 		pos = skb_put(skb, 2);
 		memset(pos, 0, 2);
@@ -260,10 +256,8 @@
 			pos = skb_put(skb, 2);
 			memcpy(pos + 2, &plid, 2);
 		}
-		if (ieee80211_add_srates_ie(sdata, skb, true,
-					    local->oper_channel->band) ||
-		    ieee80211_add_ext_srates_ie(sdata, skb, true,
-						local->oper_channel->band) ||
+		if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
+		    ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
 		    mesh_add_rsn_ie(skb, sdata) ||
 		    mesh_add_meshid_ie(skb, sdata) ||
 		    mesh_add_meshconf_ie(skb, sdata))
@@ -343,7 +337,7 @@
 				       struct ieee802_11_elems *elems)
 {
 	struct ieee80211_local *local = sdata->local;
-	enum ieee80211_band band = local->oper_channel->band;
+	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_supported_band *sband;
 	u32 rates, basic_rates = 0;
 	struct sta_info *sta;
@@ -378,7 +372,7 @@
 
 	sta->sta.supp_rates[band] = rates;
 	if (elems->ht_cap_elem &&
-	    sdata->vif.bss_conf.channel_type != NL80211_CHAN_NO_HT)
+	    sdata->vif.bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
 		ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
 						  elems->ht_cap_elem,
 						  &sta->sta.ht_cap);
@@ -386,12 +380,15 @@
 		memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap));
 
 	if (elems->ht_operation) {
+		struct cfg80211_chan_def chandef;
+
 		if (!(elems->ht_operation->ht_param &
 		      IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
 			sta->sta.ht_cap.cap &=
 					    ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-		sta->ch_type =
-			ieee80211_ht_oper_to_channel_type(elems->ht_operation);
+		ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
+					     elems->ht_operation, &chandef);
+		sta->ch_width = chandef.width;
 	}
 
 	rate_control_rate_init(sta);
@@ -430,6 +427,7 @@
 	struct sta_info *sta;
 	__le16 llid, plid, reason;
 	struct ieee80211_sub_if_data *sdata;
+	struct mesh_config *mshcfg;
 
 	/*
 	 * This STA is valid because sta_info_destroy() will
@@ -456,12 +454,13 @@
 	llid = sta->llid;
 	plid = sta->plid;
 	sdata = sta->sdata;
+	mshcfg = &sdata->u.mesh.mshcfg;
 
 	switch (sta->plink_state) {
 	case NL80211_PLINK_OPN_RCVD:
 	case NL80211_PLINK_OPN_SNT:
 		/* retry timer */
-		if (sta->plink_retries < dot11MeshMaxRetries(sdata)) {
+		if (sta->plink_retries < mshcfg->dot11MeshMaxRetries) {
 			u32 rand;
 			mpl_dbg(sta->sdata,
 				"Mesh plink for %pM (retry, timeout): %d %d\n",
@@ -484,7 +483,7 @@
 		if (!reason)
 			reason = cpu_to_le16(WLAN_REASON_MESH_CONFIRM_TIMEOUT);
 		sta->plink_state = NL80211_PLINK_HOLDING;
-		mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
+		mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
 		spin_unlock_bh(&sta->lock);
 		mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
 				    sta->sta.addr, llid, plid, reason);
@@ -543,7 +542,7 @@
 		return -EBUSY;
 	}
 	sta->plink_state = NL80211_PLINK_OPN_SNT;
-	mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
+	mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
 	spin_unlock_bh(&sta->lock);
 	mpl_dbg(sdata,
 		"Mesh plink: starting establishment with %pM\n",
@@ -570,6 +569,7 @@
 void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
 			 size_t len, struct ieee80211_rx_status *rx_status)
 {
+	struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
 	struct ieee802_11_elems elems;
 	struct sta_info *sta;
 	enum plink_event event;
@@ -777,7 +777,8 @@
 			sta->plid = plid;
 			get_random_bytes(&llid, 2);
 			sta->llid = llid;
-			mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
+			mesh_plink_timer_set(sta,
+					     mshcfg->dot11MeshRetryTimeout);
 			spin_unlock_bh(&sta->lock);
 			mesh_plink_frame_tx(sdata,
 					    WLAN_SP_MESH_PEERING_OPEN,
@@ -803,7 +804,7 @@
 			sta->reason = reason;
 			sta->plink_state = NL80211_PLINK_HOLDING;
 			if (!mod_plink_timer(sta,
-					     dot11MeshHoldingTimeout(sdata)))
+					     mshcfg->dot11MeshHoldingTimeout))
 				sta->ignore_plink_timer = true;
 
 			llid = sta->llid;
@@ -825,7 +826,7 @@
 		case CNF_ACPT:
 			sta->plink_state = NL80211_PLINK_CNF_RCVD;
 			if (!mod_plink_timer(sta,
-					     dot11MeshConfirmTimeout(sdata)))
+					     mshcfg->dot11MeshConfirmTimeout))
 				sta->ignore_plink_timer = true;
 
 			spin_unlock_bh(&sta->lock);
@@ -847,7 +848,7 @@
 			sta->reason = reason;
 			sta->plink_state = NL80211_PLINK_HOLDING;
 			if (!mod_plink_timer(sta,
-					     dot11MeshHoldingTimeout(sdata)))
+					     mshcfg->dot11MeshHoldingTimeout))
 				sta->ignore_plink_timer = true;
 
 			llid = sta->llid;
@@ -888,7 +889,7 @@
 			sta->reason = reason;
 			sta->plink_state = NL80211_PLINK_HOLDING;
 			if (!mod_plink_timer(sta,
-					     dot11MeshHoldingTimeout(sdata)))
+					     mshcfg->dot11MeshHoldingTimeout))
 				sta->ignore_plink_timer = true;
 
 			llid = sta->llid;
@@ -923,7 +924,7 @@
 			changed |= __mesh_plink_deactivate(sta);
 			sta->plink_state = NL80211_PLINK_HOLDING;
 			llid = sta->llid;
-			mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
+			mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
 			spin_unlock_bh(&sta->lock);
 			changed |= mesh_set_ht_prot_mode(sdata);
 			mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index a16b7b4..0f40086 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -43,7 +43,7 @@
 static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie)
 {
 	return (ie->mesh_config->meshconf_cap &
-	    MESHCONF_CAPAB_TBTT_ADJUSTING) != 0;
+	    IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING) != 0;
 }
 
 void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
@@ -116,43 +116,13 @@
 		goto no_sync;
 	}
 
-	if (rx_status->flag & RX_FLAG_MACTIME_MPDU && rx_status->mactime) {
-		/*
-		 * The mactime is defined as the time the first data symbol
-		 * of the frame hits the PHY, and the timestamp of the beacon
-		 * is defined as "the time that the data symbol containing the
-		 * first bit of the timestamp is transmitted to the PHY plus
-		 * the transmitting STA's delays through its local PHY from the
-		 * MAC-PHY interface to its interface with the WM" (802.11
-		 * 11.1.2)
-		 *
-		 * T_r, in 13.13.2.2.2, is just defined as "the frame reception
-		 * time" but we unless we interpret that time to be the same
-		 * time of the beacon timestamp, the offset calculation will be
-		 * off.  Below we adjust t_r to be "the time at which the first
-		 * symbol of the timestamp element in the beacon is received".
-		 * This correction depends on the rate.
-		 *
-		 * Based on similar code in ibss.c
-		 */
-		int rate;
-
-		if (rx_status->flag & RX_FLAG_HT) {
-			/* TODO:
-			 * In principle there could be HT-beacons (Dual Beacon
-			 * HT Operation options), but for now ignore them and
-			 * just use the primary (i.e. non-HT) beacons for
-			 * synchronization.
-			 * */
-			goto no_sync;
-		} else
-			rate = local->hw.wiphy->bands[rx_status->band]->
-				bitrates[rx_status->rate_idx].bitrate;
-
-		/* 24 bytes of header * 8 bits/byte *
-		 * 10*(100 Kbps)/Mbps / rate (100 Kbps)*/
-		t_r = rx_status->mactime + (24 * 8 * 10 / rate);
-	}
+	if (ieee80211_have_rx_timestamp(rx_status))
+		/* time when timestamp field was received */
+		t_r = ieee80211_calculate_rx_timestamp(local, rx_status,
+						       24 + 12 +
+						       elems->total_len +
+						       FCS_LEN,
+						       24);
 
 	/* Timing offset calculation (see 13.13.2.2.2) */
 	t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
@@ -234,49 +204,7 @@
 	spin_unlock_bh(&ifmsh->sync_offset_lock);
 }
 
-static const u8 *mesh_get_vendor_oui(struct ieee80211_sub_if_data *sdata)
-{
-	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-	u8 offset;
-
-	if (!ifmsh->ie || !ifmsh->ie_len)
-		return NULL;
-
-	offset = ieee80211_ie_split_vendor(ifmsh->ie,
-					ifmsh->ie_len, 0);
-
-	if (!offset)
-		return NULL;
-
-	return ifmsh->ie + offset + 2;
-}
-
-static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
-				   u16 stype,
-				   struct ieee80211_mgmt *mgmt,
-				   struct ieee802_11_elems *elems,
-				   struct ieee80211_rx_status *rx_status)
-{
-	const u8 *oui;
-
-	WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
-	msync_dbg(sdata, "called mesh_sync_vendor_rx_bcn_presp\n");
-	oui = mesh_get_vendor_oui(sdata);
-	/*  here you would implement the vendor offset tracking for this oui */
-}
-
-static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
-{
-	const u8 *oui;
-
-	WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
-	msync_dbg(sdata, "called mesh_sync_vendor_adjust_tbtt\n");
-	oui = mesh_get_vendor_oui(sdata);
-	/*  here you would implement the vendor tsf adjustment for this oui */
-}
-
-/* global variable */
-static struct sync_method sync_methods[] = {
+static const struct sync_method sync_methods[] = {
 	{
 		.method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
 		.ops = {
@@ -284,18 +212,11 @@
 			.adjust_tbtt = &mesh_sync_offset_adjust_tbtt,
 		}
 	},
-	{
-		.method = IEEE80211_SYNC_METHOD_VENDOR,
-		.ops = {
-			.rx_bcn_presp = &mesh_sync_vendor_rx_bcn_presp,
-			.adjust_tbtt = &mesh_sync_vendor_adjust_tbtt,
-		}
-	},
 };
 
-struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method)
+const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method)
 {
-	struct ieee80211_mesh_sync_ops *ops = NULL;
+	const struct ieee80211_mesh_sync_ops *ops = NULL;
 	u8 i;
 
 	for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 1b7eed2..d2a4f78 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -178,20 +178,32 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_channel *chan;
 	struct sta_info *sta;
 	u32 changed = 0;
 	u16 ht_opmode;
 	bool disable_40 = false;
 
-	sband = local->hw.wiphy->bands[local->oper_channel->band];
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (WARN_ON(!chanctx_conf)) {
+		rcu_read_unlock();
+		return 0;
+	}
+	chan = chanctx_conf->def.chan;
+	rcu_read_unlock();
+	sband = local->hw.wiphy->bands[chan->band];
 
-	switch (sdata->vif.bss_conf.channel_type) {
-	case NL80211_CHAN_HT40PLUS:
-		if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
+	switch (sdata->vif.bss_conf.chandef.width) {
+	case NL80211_CHAN_WIDTH_40:
+		if (sdata->vif.bss_conf.chandef.chan->center_freq >
+				sdata->vif.bss_conf.chandef.center_freq1 &&
+		    chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
 			disable_40 = true;
-		break;
-	case NL80211_CHAN_HT40MINUS:
-		if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
+		if (sdata->vif.bss_conf.chandef.chan->center_freq <
+				sdata->vif.bss_conf.chandef.center_freq1 &&
+		    chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
 			disable_40 = true;
 		break;
 	default:
@@ -343,7 +355,7 @@
 	cap = vht_cap.cap;
 
 	/* reserve and fill IE */
-	pos = skb_put(skb, sizeof(struct ieee80211_vht_capabilities) + 2);
+	pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
 	ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
 }
 
@@ -359,11 +371,21 @@
 	int i, count, rates_len, supp_rates_len;
 	u16 capab;
 	struct ieee80211_supported_band *sband;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_channel *chan;
 	u32 rates = 0;
 
 	lockdep_assert_held(&ifmgd->mtx);
 
-	sband = local->hw.wiphy->bands[local->oper_channel->band];
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (WARN_ON(!chanctx_conf)) {
+		rcu_read_unlock();
+		return;
+	}
+	chan = chanctx_conf->def.chan;
+	rcu_read_unlock();
+	sband = local->hw.wiphy->bands[chan->band];
 
 	if (assoc_data->supp_rates_len) {
 		/*
@@ -392,7 +414,7 @@
 			4 + /* power capability */
 			2 + 2 * sband->n_channels + /* supported channels */
 			2 + sizeof(struct ieee80211_ht_cap) + /* HT */
-			2 + sizeof(struct ieee80211_vht_capabilities) + /* VHT */
+			2 + sizeof(struct ieee80211_vht_cap) + /* VHT */
 			assoc_data->ie_len + /* extra IEs */
 			9, /* WMM */
 			GFP_KERNEL);
@@ -485,7 +507,7 @@
 		*pos++ = WLAN_EID_PWR_CAPABILITY;
 		*pos++ = 2;
 		*pos++ = 0; /* min tx power */
-		*pos++ = local->oper_channel->max_power; /* max tx power */
+		*pos++ = chan->max_power; /* max tx power */
 
 		/* 2. supported channels */
 		/* TODO: get this in reg domain format */
@@ -521,9 +543,9 @@
 		offset = noffset;
 	}
 
-	if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
+	if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
 		ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
-				    sband, local->oper_channel, ifmgd->ap_smps);
+				    sband, chan, sdata->smps_mode);
 
 	if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
 		ieee80211_add_vht_ie(sdata, skb, sband);
@@ -657,18 +679,18 @@
 	if (!ifmgd->associated)
 		goto out;
 
-	sdata->local->oper_channel = sdata->local->csa_channel;
+	sdata->local->_oper_channel = sdata->local->csa_channel;
 	if (!sdata->local->ops->channel_switch) {
 		/* call "hw_config" only if doing sw channel switch */
 		ieee80211_hw_config(sdata->local,
 			IEEE80211_CONF_CHANGE_CHANNEL);
 	} else {
 		/* update the device channel directly */
-		sdata->local->hw.conf.channel = sdata->local->oper_channel;
+		sdata->local->hw.conf.channel = sdata->local->_oper_channel;
 	}
 
 	/* XXX: shouldn't really modify cfg80211-owned data! */
-	ifmgd->associated->channel = sdata->local->oper_channel;
+	ifmgd->associated->channel = sdata->local->_oper_channel;
 
 	/* XXX: wait for a beacon first? */
 	ieee80211_wake_queues_by_reason(&sdata->local->hw,
@@ -680,11 +702,8 @@
 
 void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
 {
-	struct ieee80211_sub_if_data *sdata;
-	struct ieee80211_if_managed *ifmgd;
-
-	sdata = vif_to_sdata(vif);
-	ifmgd = &sdata->u.mgd;
+	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
 	trace_api_chswitch_done(sdata, success);
 	if (!success) {
@@ -723,6 +742,7 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num,
 						      cbss->channel->band);
+	struct ieee80211_chanctx *chanctx;
 
 	ASSERT_MGD_MTX(ifmgd);
 
@@ -748,10 +768,34 @@
 		return;
 	}
 
-	sdata->local->csa_channel = new_ch;
-
 	ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
 
+	if (sdata->local->use_chanctx) {
+		sdata_info(sdata,
+			   "not handling channel switch with channel contexts\n");
+		ieee80211_queue_work(&sdata->local->hw,
+				     &ifmgd->csa_connection_drop_work);
+	}
+
+	mutex_lock(&sdata->local->chanctx_mtx);
+	if (WARN_ON(!rcu_access_pointer(sdata->vif.chanctx_conf))) {
+		mutex_unlock(&sdata->local->chanctx_mtx);
+		return;
+	}
+	chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf),
+			       struct ieee80211_chanctx, conf);
+	if (chanctx->refcount > 1) {
+		sdata_info(sdata,
+			   "channel switch with multiple interfaces on the same channel, disconnecting\n");
+		ieee80211_queue_work(&sdata->local->hw,
+				     &ifmgd->csa_connection_drop_work);
+		mutex_unlock(&sdata->local->chanctx_mtx);
+		return;
+	}
+	mutex_unlock(&sdata->local->chanctx_mtx);
+
+	sdata->local->csa_channel = new_ch;
+
 	if (sw_elem->mode)
 		ieee80211_stop_queues_by_reason(&sdata->local->hw,
 				IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -778,10 +822,10 @@
 					 cbss->beacon_interval));
 }
 
-static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
-					struct ieee80211_channel *channel,
-					const u8 *country_ie, u8 country_ie_len,
-					const u8 *pwr_constr_elem)
+static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
+				       struct ieee80211_channel *channel,
+				       const u8 *country_ie, u8 country_ie_len,
+				       const u8 *pwr_constr_elem)
 {
 	struct ieee80211_country_ie_triplet *triplet;
 	int chan = ieee80211_frequency_to_channel(channel->center_freq);
@@ -790,7 +834,7 @@
 
 	/* Invalid IE */
 	if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
-		return;
+		return 0;
 
 	triplet = (void *)(country_ie + 3);
 	country_ie_len -= 3;
@@ -831,19 +875,21 @@
 	}
 
 	if (!have_chan_pwr)
-		return;
+		return 0;
 
 	new_ap_level = max_t(int, 0, chan_pwr - *pwr_constr_elem);
 
-	if (sdata->local->ap_power_level == new_ap_level)
-		return;
+	if (sdata->ap_power_level == new_ap_level)
+		return 0;
 
 	sdata_info(sdata,
 		   "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
 		   new_ap_level, chan_pwr, *pwr_constr_elem,
 		   sdata->u.mgd.bssid);
-	sdata->local->ap_power_level = new_ap_level;
-	ieee80211_hw_config(sdata->local, 0);
+	sdata->ap_power_level = new_ap_level;
+	if (__ieee80211_recalc_txpower(sdata))
+		return BSS_CHANGED_TXPOWER;
+	return 0;
 }
 
 void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif)
@@ -1280,7 +1326,7 @@
 	}
 
 	use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
-	if (sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ)
+	if (ieee80211_get_sdata_band(sdata) == IEEE80211_BAND_5GHZ)
 		use_short_slot = true;
 
 	if (use_protection != bss_conf->use_cts_prot) {
@@ -1321,6 +1367,22 @@
 
 	sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
 
+	if (sdata->vif.p2p) {
+		u8 noa[2];
+		int ret;
+
+		ret = cfg80211_get_p2p_attr(cbss->information_elements,
+					    cbss->len_information_elements,
+					    IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
+					    noa, sizeof(noa));
+		if (ret >= 2) {
+			bss_conf->p2p_oppps = noa[1] & 0x80;
+			bss_conf->p2p_ctwindow = noa[1] & 0x7f;
+			bss_info_changed |= BSS_CHANGED_P2P_PS;
+			sdata->u.mgd.p2p_noa_index = noa[0];
+		}
+	}
+
 	/* just to be sure */
 	ieee80211_stop_poll(sdata);
 
@@ -1350,7 +1412,7 @@
 	ieee80211_recalc_ps(local, -1);
 	mutex_unlock(&local->iflist_mtx);
 
-	ieee80211_recalc_smps(local);
+	ieee80211_recalc_smps(sdata);
 	ieee80211_recalc_ps_vif(sdata);
 
 	netif_tx_start_all_queues(sdata->dev);
@@ -1443,11 +1505,14 @@
 	changed |= BSS_CHANGED_ASSOC;
 	sdata->vif.bss_conf.assoc = false;
 
+	sdata->vif.bss_conf.p2p_ctwindow = 0;
+	sdata->vif.bss_conf.p2p_oppps = false;
+
 	/* on the next assoc, re-program HT parameters */
 	memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
 	memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
 
-	local->ap_power_level = 0;
+	sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
 
 	del_timer_sync(&local->dynamic_ps_timer);
 	cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -1465,10 +1530,6 @@
 	changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
 	ieee80211_bss_info_change_notify(sdata, changed);
 
-	/* channel(_type) changes are handled by ieee80211_hw_config */
-	WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
-	ieee80211_hw_config(local, 0);
-
 	/* disassociated - set to defaults now */
 	ieee80211_set_wmm_default(sdata, false);
 
@@ -1478,6 +1539,9 @@
 	del_timer_sync(&sdata->u.mgd.chswitch_timer);
 
 	sdata->u.mgd.timers_running = 0;
+
+	ifmgd->flags = 0;
+	ieee80211_vif_release_channel(sdata);
 }
 
 void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1589,7 +1653,7 @@
 
 		ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
 					 0, (u32) -1, true, false,
-					 ifmgd->associated->channel);
+					 ifmgd->associated->channel, false);
 	}
 
 	ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -1692,8 +1756,7 @@
 		ssid_len = ssid[1];
 
 	skb = ieee80211_build_probe_req(sdata, cbss->bssid,
-					(u32) -1,
-					sdata->local->oper_channel,
+					(u32) -1, cbss->channel,
 					ssid + 2, ssid_len,
 					NULL, 0, true);
 
@@ -1804,6 +1867,8 @@
 
 		memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
 		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
+		sdata->u.mgd.flags = 0;
+		ieee80211_vif_release_channel(sdata);
 	}
 
 	cfg80211_put_bss(auth_data->bss);
@@ -1824,7 +1889,7 @@
 		return;
 	auth_data->expected_transaction = 4;
 	drv_mgd_prepare_tx(sdata->local, sdata);
-	ieee80211_send_auth(sdata, 3, auth_data->algorithm,
+	ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0,
 			    elems.challenge - 2, elems.challenge_len + 2,
 			    auth_data->bss->bssid, auth_data->bss->bssid,
 			    auth_data->key, auth_data->key_len,
@@ -1858,8 +1923,13 @@
 	status_code = le16_to_cpu(mgmt->u.auth.status_code);
 
 	if (auth_alg != ifmgd->auth_data->algorithm ||
-	    auth_transaction != ifmgd->auth_data->expected_transaction)
+	    auth_transaction != ifmgd->auth_data->expected_transaction) {
+		sdata_info(sdata, "%pM unexpected authentication state: alg %d (expected %d) transact %d (expected %d)\n",
+			   mgmt->sa, auth_alg, ifmgd->auth_data->algorithm,
+			   auth_transaction,
+			   ifmgd->auth_data->expected_transaction);
 		return RX_MGMT_NONE;
+	}
 
 	if (status_code != WLAN_STATUS_SUCCESS) {
 		sdata_info(sdata, "%pM denied authentication (status %d)\n",
@@ -1872,6 +1942,7 @@
 	case WLAN_AUTH_OPEN:
 	case WLAN_AUTH_LEAP:
 	case WLAN_AUTH_FT:
+	case WLAN_AUTH_SAE:
 		break;
 	case WLAN_AUTH_SHARED_KEY:
 		if (ifmgd->auth_data->expected_transaction != 4) {
@@ -1891,6 +1962,15 @@
 	ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
 	run_again(ifmgd, ifmgd->auth_data->timeout);
 
+	if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
+	    ifmgd->auth_data->expected_transaction != 2) {
+		/*
+		 * Report auth frame to user space for processing since another
+		 * round of Authentication frames is still needed.
+		 */
+		return RX_MGMT_CFG80211_RX_AUTH;
+	}
+
 	/* move station state to auth */
 	mutex_lock(&sdata->local->sta_mtx);
 	sta = sta_info_get(sdata, bssid);
@@ -2030,6 +2110,8 @@
 
 		memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
 		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
+		sdata->u.mgd.flags = 0;
+		ieee80211_vif_release_channel(sdata);
 	}
 
 	kfree(assoc_data);
@@ -2091,15 +2173,20 @@
 		return false;
 	}
 
-	sband = local->hw.wiphy->bands[local->oper_channel->band];
+	sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
 
-	if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
+	if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
 		ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
 				elems.ht_cap_elem, &sta->sta.ht_cap);
 
 	sta->supports_40mhz =
 		sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
 
+	if (elems.vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
+		ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+						    elems.vht_cap_elem,
+						    &sta->sta.vht_cap);
+
 	rate_control_rate_init(sta);
 
 	if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
@@ -2140,7 +2227,7 @@
 	changed |= BSS_CHANGED_QOS;
 
 	if (elems.ht_operation && elems.wmm_param &&
-	    !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
+	    !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
 		changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
 						  cbss->bssid, false);
 
@@ -2369,8 +2456,10 @@
 	size_t baselen;
 	struct ieee802_11_elems elems;
 	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_channel *chan;
 	u32 changed = 0;
-	bool erp_valid, directed_tim = false;
+	bool erp_valid;
 	u8 erp_value = 0;
 	u32 ncrc;
 	u8 *bssid;
@@ -2382,8 +2471,19 @@
 	if (baselen > len)
 		return;
 
-	if (rx_status->freq != local->oper_channel->center_freq)
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (!chanctx_conf) {
+		rcu_read_unlock();
 		return;
+	}
+
+	if (rx_status->freq != chanctx_conf->def.chan->center_freq) {
+		rcu_read_unlock();
+		return;
+	}
+	chan = chanctx_conf->def.chan;
+	rcu_read_unlock();
 
 	if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
 	    ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
@@ -2490,11 +2590,10 @@
 					  len - baselen, &elems,
 					  care_about_ies, ncrc);
 
-	if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
-		directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len,
-						   ifmgd->aid);
-
 	if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
+		bool directed_tim = ieee80211_check_tim(elems.tim,
+							elems.tim_len,
+							ifmgd->aid);
 		if (directed_tim) {
 			if (local->hw.conf.dynamic_ps_timeout > 0) {
 				if (local->hw.conf.flags & IEEE80211_CONF_PS) {
@@ -2519,6 +2618,27 @@
 		}
 	}
 
+	if (sdata->vif.p2p) {
+		u8 noa[2];
+		int ret;
+
+		ret = cfg80211_get_p2p_attr(mgmt->u.beacon.variable,
+					    len - baselen,
+					    IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
+					    noa, sizeof(noa));
+		if (ret >= 2 && sdata->u.mgd.p2p_noa_index != noa[0]) {
+			bss_conf->p2p_oppps = noa[1] & 0x80;
+			bss_conf->p2p_ctwindow = noa[1] & 0x7f;
+			changed |= BSS_CHANGED_P2P_PS;
+			sdata->u.mgd.p2p_noa_index = noa[0];
+			/*
+			 * make sure we update all information, the CRC
+			 * mechanism doesn't look at P2P attributes.
+			 */
+			ifmgd->beacon_crc_valid = false;
+		}
+	}
+
 	if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid)
 		return;
 	ifmgd->beacon_crc = ncrc;
@@ -2543,22 +2663,17 @@
 
 
 	if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param &&
-	    !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
-		struct ieee80211_supported_band *sband;
-
-		sband = local->hw.wiphy->bands[local->oper_channel->band];
-
+	    !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
 		changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
 						  bssid, true);
-	}
 
 	if (elems.country_elem && elems.pwr_constr_elem &&
 	    mgmt->u.probe_resp.capab_info &
 				cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT))
-		ieee80211_handle_pwr_constr(sdata, local->oper_channel,
-					    elems.country_elem,
-					    elems.country_elem_len,
-					    elems.pwr_constr_elem);
+		changed |= ieee80211_handle_pwr_constr(sdata, chan,
+						       elems.country_elem,
+						       elems.country_elem_len,
+						       elems.pwr_constr_elem);
 
 	ieee80211_bss_info_change_notify(sdata, changed);
 }
@@ -2703,13 +2818,23 @@
 	drv_mgd_prepare_tx(local, sdata);
 
 	if (auth_data->bss->proberesp_ies) {
+		u16 trans = 1;
+		u16 status = 0;
+
 		sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
 			   auth_data->bss->bssid, auth_data->tries,
 			   IEEE80211_AUTH_MAX_TRIES);
 
 		auth_data->expected_transaction = 2;
-		ieee80211_send_auth(sdata, 1, auth_data->algorithm,
-				    auth_data->ie, auth_data->ie_len,
+
+		if (auth_data->algorithm == WLAN_AUTH_SAE) {
+			trans = auth_data->sae_trans;
+			status = auth_data->sae_status;
+			auth_data->expected_transaction = trans;
+		}
+
+		ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
+				    auth_data->data, auth_data->data_len,
 				    auth_data->bss->bssid,
 				    auth_data->bss->bssid, NULL, 0, 0);
 	} else {
@@ -2728,7 +2853,7 @@
 		 */
 		ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
 					 NULL, 0, (u32) -1, true, false,
-					 auth_data->bss->channel);
+					 auth_data->bss->channel, false);
 	}
 
 	auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
@@ -3068,6 +3193,7 @@
 	const u8 *ht_oper_ie;
 	const struct ieee80211_ht_operation *ht_oper = NULL;
 	struct ieee80211_supported_band *sband;
+	struct cfg80211_chan_def chandef;
 
 	sband = local->hw.wiphy->bands[cbss->channel->band];
 
@@ -3099,49 +3225,64 @@
 				   ht_cfreq, ht_oper->primary_chan,
 				   cbss->channel->band);
 			ht_oper = NULL;
-		} else {
-			channel_type = NL80211_CHAN_HT20;
 		}
 	}
 
-	if (ht_oper && sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+	if (ht_oper) {
 		/*
 		 * cfg80211 already verified that the channel itself can
 		 * be used, but it didn't check that we can do the right
 		 * HT type, so do that here as well. If HT40 isn't allowed
 		 * on this channel, disable 40 MHz operation.
 		 */
+		const u8 *ht_cap_ie;
+		const struct ieee80211_ht_cap *ht_cap;
+		u8 chains = 1;
 
-		switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
-		case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-			if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
-				ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
-			else
-				channel_type = NL80211_CHAN_HT40PLUS;
-			break;
-		case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-			if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
-				ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
-			else
-				channel_type = NL80211_CHAN_HT40MINUS;
-			break;
-		}
-	}
-
-	if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
-		/* can only fail due to HT40+/- mismatch */
 		channel_type = NL80211_CHAN_HT20;
-		sdata_info(sdata,
-			   "disabling 40 MHz due to multi-vif mismatch\n");
-		ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
-		WARN_ON(!ieee80211_set_channel_type(local, sdata,
-						    channel_type));
+
+		if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+			switch (ht_oper->ht_param &
+					IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+				if (cbss->channel->flags &
+						IEEE80211_CHAN_NO_HT40PLUS)
+					ifmgd->flags |=
+						IEEE80211_STA_DISABLE_40MHZ;
+				else
+					channel_type = NL80211_CHAN_HT40PLUS;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+				if (cbss->channel->flags &
+						IEEE80211_CHAN_NO_HT40MINUS)
+					ifmgd->flags |=
+						IEEE80211_STA_DISABLE_40MHZ;
+				else
+					channel_type = NL80211_CHAN_HT40MINUS;
+				break;
+			}
+		}
+
+		ht_cap_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY,
+					     cbss->information_elements,
+					     cbss->len_information_elements);
+		if (ht_cap_ie && ht_cap_ie[1] >= sizeof(*ht_cap)) {
+			ht_cap = (void *)(ht_cap_ie + 2);
+			chains = ieee80211_mcs_to_chains(&ht_cap->mcs);
+		}
+		sdata->needed_rx_chains = min(chains, local->rx_chains);
+	} else {
+		sdata->needed_rx_chains = 1;
+		sdata->u.mgd.flags |= IEEE80211_STA_DISABLE_HT;
 	}
 
-	local->oper_channel = cbss->channel;
-	ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+	/* will change later if needed */
+	sdata->smps_mode = IEEE80211_SMPS_OFF;
 
-	return 0;
+	ieee80211_vif_release_channel(sdata);
+	cfg80211_chandef_create(&chandef, cbss->channel, channel_type);
+	return ieee80211_vif_use_channel(sdata, &chandef,
+					 IEEE80211_CHANCTX_SHARED);
 }
 
 static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
@@ -3211,7 +3352,7 @@
 		sdata->vif.bss_conf.basic_rates = basic_rates;
 
 		/* cf. IEEE 802.11 9.2.12 */
-		if (local->oper_channel->band == IEEE80211_BAND_2GHZ &&
+		if (cbss->channel->band == IEEE80211_BAND_2GHZ &&
 		    have_higher_than_11mbit)
 			sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
 		else
@@ -3273,19 +3414,33 @@
 	case NL80211_AUTHTYPE_NETWORK_EAP:
 		auth_alg = WLAN_AUTH_LEAP;
 		break;
+	case NL80211_AUTHTYPE_SAE:
+		auth_alg = WLAN_AUTH_SAE;
+		break;
 	default:
 		return -EOPNOTSUPP;
 	}
 
-	auth_data = kzalloc(sizeof(*auth_data) + req->ie_len, GFP_KERNEL);
+	auth_data = kzalloc(sizeof(*auth_data) + req->sae_data_len +
+			    req->ie_len, GFP_KERNEL);
 	if (!auth_data)
 		return -ENOMEM;
 
 	auth_data->bss = req->bss;
 
+	if (req->sae_data_len >= 4) {
+		__le16 *pos = (__le16 *) req->sae_data;
+		auth_data->sae_trans = le16_to_cpu(pos[0]);
+		auth_data->sae_status = le16_to_cpu(pos[1]);
+		memcpy(auth_data->data, req->sae_data + 4,
+		       req->sae_data_len - 4);
+		auth_data->data_len += req->sae_data_len - 4;
+	}
+
 	if (req->ie && req->ie_len) {
-		memcpy(auth_data->ie, req->ie, req->ie_len);
-		auth_data->ie_len = req->ie_len;
+		memcpy(&auth_data->data[auth_data->data_len],
+		       req->ie, req->ie_len);
+		auth_data->data_len += req->ie_len;
 	}
 
 	if (req->key && req->key_len) {
@@ -3388,13 +3543,6 @@
 
 	/* prepare assoc data */
 	
-	/*
-	 * keep only the 40 MHz disable bit set as it might have
-	 * been set during authentication already, all other bits
-	 * should be reset for a new connection
-	 */
-	ifmgd->flags &= IEEE80211_STA_DISABLE_40MHZ;
-
 	ifmgd->beacon_crc_valid = false;
 
 	/*
@@ -3408,7 +3556,7 @@
 		if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
 		    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
 		    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
-			ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+			ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
 			ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
 			netdev_info(sdata->dev,
 				    "disabling HT/VHT due to WEP/TKIP use\n");
@@ -3416,7 +3564,7 @@
 	}
 
 	if (req->flags & ASSOC_REQ_DISABLE_HT) {
-		ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+		ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
 		ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
 	}
 
@@ -3424,7 +3572,7 @@
 	sband = local->hw.wiphy->bands[req->bss->channel->band];
 	if (!sband->ht_cap.ht_supported ||
 	    local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
-		ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+		ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
 		if (!bss->wmm_used)
 			netdev_info(sdata->dev,
 				    "disabling HT as WMM/QoS is not supported by the AP\n");
@@ -3452,11 +3600,11 @@
 
 	if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
 		if (ifmgd->powersave)
-			ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC;
+			sdata->smps_mode = IEEE80211_SMPS_DYNAMIC;
 		else
-			ifmgd->ap_smps = IEEE80211_SMPS_OFF;
+			sdata->smps_mode = IEEE80211_SMPS_OFF;
 	} else
-		ifmgd->ap_smps = ifmgd->req_smps;
+		sdata->smps_mode = ifmgd->req_smps;
 
 	assoc_data->capability = req->bss->capability;
 	assoc_data->wmm = bss->wmm_used &&
@@ -3469,7 +3617,7 @@
 		assoc_data->ap_ht_param =
 			((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param;
 	else
-		ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+		ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
 
 	if (bss->wmm_used && bss->uapsd_supported &&
 	    (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
@@ -3560,40 +3708,44 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 	bool tx = !req->local_state_change;
+	bool sent_frame = false;
 
 	mutex_lock(&ifmgd->mtx);
 
-	if (ifmgd->auth_data) {
-		ieee80211_destroy_auth_data(sdata, false);
-		mutex_unlock(&ifmgd->mtx);
-		return 0;
-	}
-
 	sdata_info(sdata,
 		   "deauthenticating from %pM by local choice (reason=%d)\n",
 		   req->bssid, req->reason_code);
 
-	if (ifmgd->associated &&
-	    ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
-		ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
-				       req->reason_code, tx, frame_buf);
-	} else {
+	if (ifmgd->auth_data) {
 		drv_mgd_prepare_tx(sdata->local, sdata);
 		ieee80211_send_deauth_disassoc(sdata, req->bssid,
 					       IEEE80211_STYPE_DEAUTH,
 					       req->reason_code, tx,
 					       frame_buf);
+		ieee80211_destroy_auth_data(sdata, false);
+		mutex_unlock(&ifmgd->mtx);
+
+		sent_frame = tx;
+		goto out;
 	}
 
+	if (ifmgd->associated &&
+	    ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
+		ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
+				       req->reason_code, tx, frame_buf);
+		sent_frame = tx;
+	}
 	mutex_unlock(&ifmgd->mtx);
 
-	__cfg80211_send_deauth(sdata->dev, frame_buf,
-			       IEEE80211_DEAUTH_FRAME_LEN);
-
+ out:
 	mutex_lock(&sdata->local->mtx);
 	ieee80211_recalc_idle(sdata->local);
 	mutex_unlock(&sdata->local->mtx);
 
+	if (sent_frame)
+		__cfg80211_send_deauth(sdata->dev, frame_buf,
+				       IEEE80211_DEAUTH_FRAME_LEN);
+
 	return 0;
 }
 
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 83608ac..5abddfe 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -107,6 +107,9 @@
 {
 	struct ieee80211_sub_if_data *sdata;
 
+	if (WARN_ON(local->use_chanctx))
+		return;
+
 	/*
 	 * notify the AP about us leaving the channel and stop all
 	 * STA interfaces.
@@ -145,6 +148,9 @@
 {
 	struct ieee80211_sub_if_data *sdata;
 
+	if (WARN_ON(local->use_chanctx))
+		return;
+
 	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
@@ -193,13 +199,14 @@
 
 	if (roc->mgmt_tx_cookie) {
 		if (!WARN_ON(!roc->frame)) {
-			ieee80211_tx_skb(roc->sdata, roc->frame);
+			ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7,
+						  roc->chan->band);
 			roc->frame = NULL;
 		}
 	} else {
-		cfg80211_ready_on_channel(&roc->sdata->wdev, (unsigned long)roc,
-					  roc->chan, roc->chan_type,
-					  roc->req_duration, GFP_KERNEL);
+		cfg80211_ready_on_channel(&roc->sdata->wdev, roc->cookie,
+					  roc->chan, roc->req_duration,
+					  GFP_KERNEL);
 	}
 
 	roc->notified = true;
@@ -276,8 +283,7 @@
 		if (!duration)
 			duration = 10;
 
-		ret = drv_remain_on_channel(local, roc->chan,
-					    roc->chan_type,
+		ret = drv_remain_on_channel(local, roc->sdata, roc->chan,
 					    duration);
 
 		roc->started = true;
@@ -313,8 +319,7 @@
 
 	if (!roc->mgmt_tx_cookie)
 		cfg80211_remain_on_channel_expired(&roc->sdata->wdev,
-						   (unsigned long)roc,
-						   roc->chan, roc->chan_type,
+						   roc->cookie, roc->chan,
 						   GFP_KERNEL);
 
 	list_for_each_entry_safe(dep, tmp, &roc->dependents, list)
@@ -353,7 +358,6 @@
 		ieee80211_recalc_idle(local);
 
 		local->tmp_channel = roc->chan;
-		local->tmp_channel_type = roc->chan_type;
 		ieee80211_hw_config(local, 0);
 
 		/* tell userspace or send frame */
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 5c572e7..79a48f3 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -33,6 +33,7 @@
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_sub_if_data *sdata;
 	struct sta_info *sta;
+	struct ieee80211_chanctx *ctx;
 
 	if (!local->open_count)
 		goto suspend;
@@ -135,12 +136,55 @@
 		ieee80211_bss_info_change_notify(sdata,
 			BSS_CHANGED_BEACON_ENABLED);
 
+		if (sdata->vif.type == NL80211_IFTYPE_AP &&
+		    rcu_access_pointer(sdata->u.ap.beacon))
+			drv_stop_ap(local, sdata);
+
+		if (local->use_chanctx) {
+			struct ieee80211_chanctx_conf *conf;
+
+			mutex_lock(&local->chanctx_mtx);
+			conf = rcu_dereference_protected(
+					sdata->vif.chanctx_conf,
+					lockdep_is_held(&local->chanctx_mtx));
+			if (conf) {
+				ctx = container_of(conf,
+						   struct ieee80211_chanctx,
+						   conf);
+				drv_unassign_vif_chanctx(local, sdata, ctx);
+			}
+
+			mutex_unlock(&local->chanctx_mtx);
+		}
 		drv_remove_interface(local, sdata);
 	}
 
 	sdata = rtnl_dereference(local->monitor_sdata);
-	if (sdata)
+	if (sdata) {
+		if (local->use_chanctx) {
+			struct ieee80211_chanctx_conf *conf;
+
+			mutex_lock(&local->chanctx_mtx);
+			conf = rcu_dereference_protected(
+					sdata->vif.chanctx_conf,
+					lockdep_is_held(&local->chanctx_mtx));
+			if (conf) {
+				ctx = container_of(conf,
+						   struct ieee80211_chanctx,
+						   conf);
+				drv_unassign_vif_chanctx(local, sdata, ctx);
+			}
+
+			mutex_unlock(&local->chanctx_mtx);
+		}
+
 		drv_remove_interface(local, sdata);
+	}
+
+	mutex_lock(&local->chanctx_mtx);
+	list_for_each_entry(ctx, &local->chanctx_list, list)
+		drv_remove_chanctx(local, ctx);
+	mutex_unlock(&local->chanctx_mtx);
 
 	/* stop hardware - this must stop RX */
 	if (local->open_count)
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 3313c11..dd88381 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -391,7 +391,7 @@
 			return;
 
 		/* if HT BSS, and we handle a data frame, also try HT rates */
-		if (txrc->bss_conf->channel_type == NL80211_CHAN_NO_HT)
+		if (txrc->bss_conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
 			return;
 
 		fc = hdr->frame_control;
@@ -408,8 +408,7 @@
 
 		alt_rate.flags |= IEEE80211_TX_RC_MCS;
 
-		if ((txrc->bss_conf->channel_type == NL80211_CHAN_HT40MINUS) ||
-		    (txrc->bss_conf->channel_type == NL80211_CHAN_HT40PLUS))
+		if (txrc->bss_conf->chandef.width == NL80211_CHAN_WIDTH_40)
 			alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
 
 		if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) {
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 10de668..301386d 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -52,11 +52,21 @@
 	struct ieee80211_sta *ista = &sta->sta;
 	void *priv_sta = sta->rate_ctrl_priv;
 	struct ieee80211_supported_band *sband;
+	struct ieee80211_chanctx_conf *chanctx_conf;
 
 	if (!ref)
 		return;
 
-	sband = local->hw.wiphy->bands[local->oper_channel->band];
+	rcu_read_lock();
+
+	chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+	if (WARN_ON(!chanctx_conf)) {
+		rcu_read_unlock();
+		return;
+	}
+
+	sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
+	rcu_read_unlock();
 
 	ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
 	set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 00ade7f..825f33c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -40,6 +40,8 @@
 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
 					   struct sk_buff *skb)
 {
+	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+
 	if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
 		if (likely(skb->len > FCS_LEN))
 			__pskb_trim(skb, skb->len - FCS_LEN);
@@ -51,20 +53,25 @@
 		}
 	}
 
+	if (status->vendor_radiotap_len)
+		__pskb_pull(skb, status->vendor_radiotap_len);
+
 	return skb;
 }
 
-static inline int should_drop_frame(struct sk_buff *skb,
-				    int present_fcs_len)
+static inline int should_drop_frame(struct sk_buff *skb, int present_fcs_len)
 {
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_hdr *hdr;
+
+	hdr = (void *)(skb->data + status->vendor_radiotap_len);
 
 	if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
 			    RX_FLAG_FAILED_PLCP_CRC |
 			    RX_FLAG_AMPDU_IS_ZEROLEN))
 		return 1;
-	if (unlikely(skb->len < 16 + present_fcs_len))
+	if (unlikely(skb->len < 16 + present_fcs_len +
+				status->vendor_radiotap_len))
 		return 1;
 	if (ieee80211_is_ctl(hdr->frame_control) &&
 	    !ieee80211_is_pspoll(hdr->frame_control) &&
@@ -74,32 +81,48 @@
 }
 
 static int
-ieee80211_rx_radiotap_len(struct ieee80211_local *local,
-			  struct ieee80211_rx_status *status)
+ieee80211_rx_radiotap_space(struct ieee80211_local *local,
+			    struct ieee80211_rx_status *status)
 {
 	int len;
 
 	/* always present fields */
 	len = sizeof(struct ieee80211_radiotap_header) + 9;
 
-	if (status->flag & RX_FLAG_MACTIME_MPDU)
+	/* allocate extra bitmap */
+	if (status->vendor_radiotap_len)
+		len += 4;
+
+	if (ieee80211_have_rx_timestamp(status)) {
+		len = ALIGN(len, 8);
 		len += 8;
+	}
 	if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
 		len += 1;
 
-	if (len & 1) /* padding for RX_FLAGS if necessary */
-		len++;
+	/* padding for RX_FLAGS if necessary */
+	len = ALIGN(len, 2);
 
 	if (status->flag & RX_FLAG_HT) /* HT info */
 		len += 3;
 
 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
-		/* padding */
-		while (len & 3)
-			len++;
+		len = ALIGN(len, 4);
 		len += 8;
 	}
 
+	if (status->vendor_radiotap_len) {
+		if (WARN_ON_ONCE(status->vendor_radiotap_align == 0))
+			status->vendor_radiotap_align = 1;
+		/* align standard part of vendor namespace */
+		len = ALIGN(len, 2);
+		/* allocate standard part of vendor namespace */
+		len += 6;
+		/* align vendor-defined part */
+		len = ALIGN(len, status->vendor_radiotap_align);
+		/* vendor-defined part is already in skb */
+	}
+
 	return len;
 }
 
@@ -118,6 +141,11 @@
 	struct ieee80211_radiotap_header *rthdr;
 	unsigned char *pos;
 	u16 rx_flags = 0;
+	int mpdulen;
+
+	mpdulen = skb->len;
+	if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)))
+		mpdulen += FCS_LEN;
 
 	rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
 	memset(rthdr, 0, rtap_len);
@@ -128,17 +156,30 @@
 			    (1 << IEEE80211_RADIOTAP_CHANNEL) |
 			    (1 << IEEE80211_RADIOTAP_ANTENNA) |
 			    (1 << IEEE80211_RADIOTAP_RX_FLAGS));
-	rthdr->it_len = cpu_to_le16(rtap_len);
+	rthdr->it_len = cpu_to_le16(rtap_len + status->vendor_radiotap_len);
 
-	pos = (unsigned char *)(rthdr+1);
+	pos = (unsigned char *)(rthdr + 1);
+
+	if (status->vendor_radiotap_len) {
+		rthdr->it_present |=
+			cpu_to_le32(BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE)) |
+			cpu_to_le32(BIT(IEEE80211_RADIOTAP_EXT));
+		put_unaligned_le32(status->vendor_radiotap_bitmap, pos);
+		pos += 4;
+	}
 
 	/* the order of the following fields is important */
 
 	/* IEEE80211_RADIOTAP_TSFT */
-	if (status->flag & RX_FLAG_MACTIME_MPDU) {
-		put_unaligned_le64(status->mactime, pos);
-		rthdr->it_present |=
-			cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
+	if (ieee80211_have_rx_timestamp(status)) {
+		/* padding */
+		while ((pos - (u8 *)rthdr) & 7)
+			*pos++ = 0;
+		put_unaligned_le64(
+			ieee80211_calculate_rx_timestamp(local, status,
+							 mpdulen, 0),
+			pos);
+		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
 		pos += 8;
 	}
 
@@ -152,7 +193,7 @@
 	pos++;
 
 	/* IEEE80211_RADIOTAP_RATE */
-	if (!rate || status->flag & RX_FLAG_HT) {
+	if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) {
 		/*
 		 * Without rate information don't add it. If we have,
 		 * MCS information is a separate field in radiotap,
@@ -172,7 +213,7 @@
 	if (status->band == IEEE80211_BAND_5GHZ)
 		put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
 				   pos);
-	else if (status->flag & RX_FLAG_HT)
+	else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
 		put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
 				   pos);
 	else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
@@ -205,7 +246,7 @@
 	/* IEEE80211_RADIOTAP_RX_FLAGS */
 	/* ensure 2 byte alignment for the 2 byte field as required */
 	if ((pos - (u8 *)rthdr) & 1)
-		pos++;
+		*pos++ = 0;
 	if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
 		rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
 	put_unaligned_le16(rx_flags, pos);
@@ -255,6 +296,21 @@
 			*pos++ = 0;
 		*pos++ = 0;
 	}
+
+	if (status->vendor_radiotap_len) {
+		/* ensure 2 byte alignment for the vendor field as required */
+		if ((pos - (u8 *)rthdr) & 1)
+			*pos++ = 0;
+		*pos++ = status->vendor_radiotap_oui[0];
+		*pos++ = status->vendor_radiotap_oui[1];
+		*pos++ = status->vendor_radiotap_oui[2];
+		*pos++ = status->vendor_radiotap_subns;
+		put_unaligned_le16(status->vendor_radiotap_len, pos);
+		pos += 2;
+		/* align the actual payload as requested */
+		while ((pos - (u8 *)rthdr) & (status->vendor_radiotap_align - 1))
+			*pos++ = 0;
+	}
 }
 
 /*
@@ -283,13 +339,13 @@
 	 */
 
 	/* room for the radiotap header based on driver features */
-	needed_headroom = ieee80211_rx_radiotap_len(local, status);
+	needed_headroom = ieee80211_rx_radiotap_space(local, status);
 
 	if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
 		present_fcs_len = FCS_LEN;
 
-	/* make sure hdr->frame_control is on the linear part */
-	if (!pskb_may_pull(origskb, 2)) {
+	/* ensure hdr->frame_control and vendor radiotap data are in skb head */
+	if (!pskb_may_pull(origskb, 2 + status->vendor_radiotap_len)) {
 		dev_kfree_skb(origskb);
 		return NULL;
 	}
@@ -374,7 +430,6 @@
 	return origskb;
 }
 
-
 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
@@ -403,10 +458,10 @@
 		 *
 		 * We also use that counter for non-QoS STAs.
 		 */
-		seqno_idx = NUM_RX_DATA_QUEUES;
+		seqno_idx = IEEE80211_NUM_TIDS;
 		security_idx = 0;
 		if (ieee80211_is_mgmt(hdr->frame_control))
-			security_idx = NUM_RX_DATA_QUEUES;
+			security_idx = IEEE80211_NUM_TIDS;
 		tid = 0;
 	}
 
@@ -481,8 +536,7 @@
 	struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
 	struct ieee80211_mmie *mmie;
 
-	if (skb->len < 24 + sizeof(*mmie) ||
-	    !is_multicast_ether_addr(hdr->da))
+	if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
 		return -1;
 
 	if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
@@ -497,9 +551,7 @@
 	return le16_to_cpu(mmie->key_id);
 }
 
-
-static ieee80211_rx_result
-ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
+static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 	char *dev_addr = rx->sdata->vif.addr;
@@ -507,7 +559,7 @@
 	if (ieee80211_is_data(hdr->frame_control)) {
 		if (is_multicast_ether_addr(hdr->addr1)) {
 			if (ieee80211_has_tods(hdr->frame_control) ||
-				!ieee80211_has_fromds(hdr->frame_control))
+			    !ieee80211_has_fromds(hdr->frame_control))
 				return RX_DROP_MONITOR;
 			if (ether_addr_equal(hdr->addr3, dev_addr))
 				return RX_DROP_MONITOR;
@@ -539,7 +591,7 @@
 			mgmt = (struct ieee80211_mgmt *)hdr;
 			category = mgmt->u.action.category;
 			if (category != WLAN_CATEGORY_MESH_ACTION &&
-				category != WLAN_CATEGORY_SELF_PROTECTED)
+			    category != WLAN_CATEGORY_SELF_PROTECTED)
 				return RX_DROP_MONITOR;
 			return RX_CONTINUE;
 		}
@@ -551,7 +603,6 @@
 			return RX_CONTINUE;
 
 		return RX_DROP_MONITOR;
-
 	}
 
 	return RX_CONTINUE;
@@ -575,7 +626,6 @@
 	return (sq1 - sq2) & SEQ_MASK;
 }
 
-
 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
 					    struct tid_ampdu_rx *tid_agg_rx,
 					    int index)
@@ -1148,12 +1198,19 @@
 	return RX_CONTINUE;
 }
 
-static void ap_sta_ps_start(struct sta_info *sta)
+static void sta_ps_start(struct sta_info *sta)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct ieee80211_local *local = sdata->local;
+	struct ps_data *ps;
 
-	atomic_inc(&sdata->bss->num_sta_ps);
+	if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
+	    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+		ps = &sdata->bss->ps;
+	else
+		return;
+
+	atomic_inc(&ps->num_sta_ps);
 	set_sta_flag(sta, WLAN_STA_PS_STA);
 	if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
 		drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
@@ -1161,7 +1218,7 @@
 	       sta->sta.addr, sta->sta.aid);
 }
 
-static void ap_sta_ps_end(struct sta_info *sta)
+static void sta_ps_end(struct sta_info *sta)
 {
 	ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
 	       sta->sta.addr, sta->sta.aid);
@@ -1188,9 +1245,9 @@
 		return -EINVAL;
 
 	if (start)
-		ap_sta_ps_start(sta_inf);
+		sta_ps_start(sta_inf);
 	else
-		ap_sta_ps_end(sta_inf);
+		sta_ps_end(sta_inf);
 
 	return 0;
 }
@@ -1284,17 +1341,22 @@
 
 	/*
 	 * Update last_rx only for IBSS packets which are for the current
-	 * BSSID to avoid keeping the current IBSS network alive in cases
-	 * where other STAs start using different BSSID.
+	 * BSSID and for station already AUTHORIZED to avoid keeping the
+	 * current IBSS network alive in cases where other STAs start
+	 * using different BSSID. This will also give the station another
+	 * chance to restart the authentication/authorization in case
+	 * something went wrong the first time.
 	 */
 	if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
 		u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
 						NL80211_IFTYPE_ADHOC);
-		if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) {
+		if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
+		    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
 			sta->last_rx = jiffies;
 			if (ieee80211_is_data(hdr->frame_control)) {
 				sta->last_rx_rate_idx = status->rate_idx;
 				sta->last_rx_rate_flag = status->flag;
+				sta->last_rx_rate_vht_nss = status->vht_nss;
 			}
 		}
 	} else if (!is_multicast_ether_addr(hdr->addr1)) {
@@ -1306,6 +1368,7 @@
 		if (ieee80211_is_data(hdr->frame_control)) {
 			sta->last_rx_rate_idx = status->rate_idx;
 			sta->last_rx_rate_flag = status->flag;
+			sta->last_rx_rate_vht_nss = status->vht_nss;
 		}
 	}
 
@@ -1342,10 +1405,10 @@
 			 */
 			if (ieee80211_is_data(hdr->frame_control) &&
 			    !ieee80211_has_pm(hdr->frame_control))
-				ap_sta_ps_end(sta);
+				sta_ps_end(sta);
 		} else {
 			if (ieee80211_has_pm(hdr->frame_control))
-				ap_sta_ps_start(sta);
+				sta_ps_start(sta);
 		}
 	}
 
@@ -1391,9 +1454,7 @@
 			 struct sk_buff **skb)
 {
 	struct ieee80211_fragment_entry *entry;
-	int idx;
 
-	idx = sdata->fragment_next;
 	entry = &sdata->fragments[sdata->fragment_next++];
 	if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
 		sdata->fragment_next = 0;
@@ -1580,18 +1641,15 @@
 	return RX_CONTINUE;
 }
 
-static int
-ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
+static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
 {
-	if (unlikely(!rx->sta ||
-	    !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
+	if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
 		return -EACCES;
 
 	return 0;
 }
 
-static int
-ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
+static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
 {
 	struct sk_buff *skb = rx->skb;
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -1613,8 +1671,7 @@
 	return 0;
 }
 
-static int
-ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
+static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
@@ -1998,7 +2055,7 @@
 	} else {
 		/* unable to resolve next hop */
 		mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3,
-				    0, reason, fwd_hdr->addr2, sdata);
+				   0, reason, fwd_hdr->addr2, sdata);
 		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
 		kfree_skb(fwd_skb);
 		return RX_DROP_MONITOR;
@@ -2207,7 +2264,7 @@
 
 		cfg80211_report_obss_beacon(rx->local->hw.wiphy,
 					    rx->skb->data, rx->skb->len,
-					    status->freq, sig, GFP_ATOMIC);
+					    status->freq, sig);
 		rx->flags |= IEEE80211_RX_BEACON_REPORTED;
 	}
 
@@ -2407,7 +2464,7 @@
 		if (!ieee80211_vif_is_mesh(&sdata->vif))
 			break;
 		if (mesh_action_is_path_sel(mgmt) &&
-		  (!mesh_path_sel_is_hwmp(sdata)))
+		    !mesh_path_sel_is_hwmp(sdata))
 			break;
 		goto queue;
 	}
@@ -2463,7 +2520,6 @@
 		return RX_QUEUED;
 	}
 
-
 	return RX_CONTINUE;
 }
 
@@ -2593,7 +2649,7 @@
 		goto out_free_skb;
 
 	/* room for the radiotap header based on driver features */
-	needed_headroom = ieee80211_rx_radiotap_len(local, status);
+	needed_headroom = ieee80211_rx_radiotap_space(local, status);
 
 	if (skb_headroom(skb) < needed_headroom &&
 	    pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
@@ -2656,7 +2712,8 @@
 		status = IEEE80211_SKB_RXCB((rx->skb));
 
 		sband = rx->local->hw.wiphy->bands[status->band];
-		if (!(status->flag & RX_FLAG_HT))
+		if (!(status->flag & RX_FLAG_HT) &&
+		    !(status->flag & RX_FLAG_VHT))
 			rate = &sband->bitrates[status->rate_idx];
 
 		ieee80211_rx_cooked_monitor(rx, rate);
@@ -2823,8 +2880,8 @@
 			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
 		} else if (!rx->sta) {
 			int rate_idx;
-			if (status->flag & RX_FLAG_HT)
-				rate_idx = 0; /* TODO: HT rates */
+			if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
+				rate_idx = 0; /* TODO: HT/VHT rates */
 			else
 				rate_idx = status->rate_idx;
 			ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
@@ -3048,8 +3105,7 @@
 
 	WARN_ON_ONCE(softirq_count() == 0);
 
-	if (WARN_ON(status->band < 0 ||
-		    status->band >= IEEE80211_NUM_BANDS))
+	if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
 		goto drop;
 
 	sband = local->hw.wiphy->bands[status->band];
@@ -3094,17 +3150,22 @@
 			 * hardware error. The driver should catch hardware
 			 * errors.
 			 */
-			if (WARN((status->rate_idx < 0 ||
-				 status->rate_idx > 76),
+			if (WARN(status->rate_idx > 76,
 				 "Rate marked as an HT rate but passed "
 				 "status->rate_idx is not "
 				 "an MCS index [0-76]: %d (0x%02x)\n",
 				 status->rate_idx,
 				 status->rate_idx))
 				goto drop;
+		} else if (status->flag & RX_FLAG_VHT) {
+			if (WARN_ONCE(status->rate_idx > 9 ||
+				      !status->vht_nss ||
+				      status->vht_nss > 8,
+				      "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
+				      status->rate_idx, status->vht_nss))
+				goto drop;
 		} else {
-			if (WARN_ON(status->rate_idx < 0 ||
-				    status->rate_idx >= sband->n_bitrates))
+			if (WARN_ON(status->rate_idx >= sband->n_bitrates))
 				goto drop;
 			rate = &sband->bitrates[status->rate_idx];
 		}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 43e60b5..f334027 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -174,7 +174,6 @@
 	u8 *elements;
 	struct ieee80211_channel *channel;
 	size_t baselen;
-	int freq;
 	bool beacon;
 	struct ieee802_11_elems elems;
 
@@ -209,13 +208,7 @@
 
 	ieee802_11_parse_elems(elements, skb->len - baselen, &elems);
 
-	if (elems.ds_params && elems.ds_params_len == 1)
-		freq = ieee80211_channel_to_frequency(elems.ds_params[0],
-						      rx_status->band);
-	else
-		freq = rx_status->freq;
-
-	channel = ieee80211_get_channel(local->hw.wiphy, freq);
+	channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq);
 
 	if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
 		return;
@@ -336,6 +329,10 @@
 
 static int ieee80211_start_sw_scan(struct ieee80211_local *local)
 {
+	/* Software scan is not supported in multi-channel cases */
+	if (local->use_chanctx)
+		return -EOPNOTSUPP;
+
 	/*
 	 * Hardware/driver doesn't support hw_scan, so use software
 	 * scanning instead. First send a nullfunc frame with power save
@@ -417,7 +414,7 @@
 			local->scan_req->ie, local->scan_req->ie_len,
 			local->scan_req->rates[band], false,
 			local->scan_req->no_cck,
-			local->hw.conf.channel);
+			local->hw.conf.channel, true);
 
 	/*
 	 * After sending probe requests, wait for probe responses
@@ -462,6 +459,7 @@
 			sizeof(*local->hw_scan_req) +
 			req->n_channels * sizeof(req->channels[0]);
 		local->hw_scan_req->ie = ies;
+		local->hw_scan_req->flags = req->flags;
 
 		local->hw_scan_band = 0;
 
@@ -480,7 +478,7 @@
 	if (local->ops->hw_scan) {
 		__set_bit(SCAN_HW_SCANNING, &local->scanning);
 	} else if ((req->n_channels == 1) &&
-		   (req->channels[0] == local->oper_channel)) {
+		   (req->channels[0] == local->_oper_channel)) {
 		/*
 		 * If we are scanning only on the operating channel
 		 * then we do not need to stop normal activities
@@ -562,6 +560,7 @@
 	unsigned long min_beacon_int = 0;
 	struct ieee80211_sub_if_data *sdata;
 	struct ieee80211_channel *next_chan;
+	enum mac80211_scan_state next_scan_state;
 
 	/*
 	 * check if at least one STA interface is associated,
@@ -620,10 +619,18 @@
 			usecs_to_jiffies(min_beacon_int * 1024) *
 			local->hw.conf.listen_interval);
 
-	if (associated && (!tx_empty || bad_latency || listen_int_exceeded))
-		local->next_scan_state = SCAN_SUSPEND;
-	else
-		local->next_scan_state = SCAN_SET_CHANNEL;
+	if (associated && !tx_empty) {
+		if (local->scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
+			next_scan_state = SCAN_ABORT;
+		else
+			next_scan_state = SCAN_SUSPEND;
+	} else if (associated && (bad_latency || listen_int_exceeded)) {
+		next_scan_state = SCAN_SUSPEND;
+	} else {
+		next_scan_state = SCAN_SET_CHANNEL;
+	}
+
+	local->next_scan_state = next_scan_state;
 
 	*next_delay = 0;
 }
@@ -794,6 +801,9 @@
 		case SCAN_RESUME:
 			ieee80211_scan_state_resume(local, &next_delay);
 			break;
+		case SCAN_ABORT:
+			aborted = true;
+			goto out_complete;
 		}
 	} while (next_delay == 0);
 
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index d2eb64e..f3e5025 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -98,6 +98,7 @@
 	struct tid_ampdu_tx *tid_tx;
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct ieee80211_local *local = sdata->local;
+	struct ps_data *ps;
 
 	/*
 	 * At this point, when being called as call_rcu callback,
@@ -107,11 +108,15 @@
 	 */
 
 	if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
-		BUG_ON(!sdata->bss);
+		if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
+		    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+			ps = &sdata->bss->ps;
+		else
+			return;
 
 		clear_sta_flag(sta, WLAN_STA_PS_STA);
 
-		atomic_dec(&sdata->bss->num_sta_ps);
+		atomic_dec(&ps->num_sta_ps);
 		sta_info_recalc_tim(sta);
 	}
 
@@ -137,7 +142,7 @@
 	 * drivers have to handle aggregation stop being requested, followed
 	 * directly by station destruction.
 	 */
-	for (i = 0; i < STA_TID_NUM; i++) {
+	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
 		tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
 		if (!tid_tx)
 			continue;
@@ -325,7 +330,7 @@
 		return NULL;
 	}
 
-	for (i = 0; i < STA_TID_NUM; i++) {
+	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
 		/*
 		 * timer_to_tid must be initialized with identity mapping
 		 * to enable session_timer's data differentiation. See
@@ -338,7 +343,7 @@
 		skb_queue_head_init(&sta->tx_filtered[i]);
 	}
 
-	for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
+	for (i = 0; i < IEEE80211_NUM_TIDS; i++)
 		sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
 
 	sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
@@ -502,22 +507,22 @@
 	return err;
 }
 
-static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
+static inline void __bss_tim_set(u8 *tim, u16 id)
 {
 	/*
 	 * This format has been mandated by the IEEE specifications,
 	 * so this line may not be changed to use the __set_bit() format.
 	 */
-	bss->tim[aid / 8] |= (1 << (aid % 8));
+	tim[id / 8] |= (1 << (id % 8));
 }
 
-static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid)
+static inline void __bss_tim_clear(u8 *tim, u16 id)
 {
 	/*
 	 * This format has been mandated by the IEEE specifications,
 	 * so this line may not be changed to use the __clear_bit() format.
 	 */
-	bss->tim[aid / 8] &= ~(1 << (aid % 8));
+	tim[id / 8] &= ~(1 << (id % 8));
 }
 
 static unsigned long ieee80211_tids_for_ac(int ac)
@@ -541,14 +546,23 @@
 void sta_info_recalc_tim(struct sta_info *sta)
 {
 	struct ieee80211_local *local = sta->local;
-	struct ieee80211_if_ap *bss = sta->sdata->bss;
+	struct ps_data *ps;
 	unsigned long flags;
 	bool indicate_tim = false;
 	u8 ignore_for_tim = sta->sta.uapsd_queues;
 	int ac;
+	u16 id;
 
-	if (WARN_ON_ONCE(!sta->sdata->bss))
+	if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
+	    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+		if (WARN_ON_ONCE(!sta->sdata->bss))
+			return;
+
+		ps = &sta->sdata->bss->ps;
+		id = sta->sta.aid;
+	} else {
 		return;
+	}
 
 	/* No need to do anything if the driver does all */
 	if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
@@ -587,9 +601,9 @@
 	spin_lock_irqsave(&local->tim_lock, flags);
 
 	if (indicate_tim)
-		__bss_tim_set(bss, sta->sta.aid);
+		__bss_tim_set(ps->tim, id);
 	else
-		__bss_tim_clear(bss, sta->sta.aid);
+		__bss_tim_clear(ps->tim, id);
 
 	if (local->ops->set_tim) {
 		local->tim_in_locked_section = true;
@@ -893,8 +907,8 @@
 			continue;
 
 		if (time_after(jiffies, sta->last_rx + exp_time)) {
-			ibss_dbg(sdata, "expiring inactive STA %pM\n",
-				 sta->sta.addr);
+			sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
+				sta->sta.addr);
 			WARN_ON(__sta_info_destroy(sta));
 		}
 	}
@@ -948,10 +962,17 @@
 {
 	struct sta_info *sta = _sta;
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	struct ps_data *ps;
+
+	if (sdata->vif.type == NL80211_IFTYPE_AP ||
+	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+		ps = &sdata->bss->ps;
+	else
+		return;
 
 	clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
 	if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA))
-		atomic_dec(&sdata->bss->num_sta_ps);
+		atomic_dec(&ps->num_sta_ps);
 }
 
 /* powersave support code */
@@ -965,7 +986,7 @@
 
 	clear_sta_flag(sta, WLAN_STA_SP);
 
-	BUILD_BUG_ON(BITS_TO_LONGS(STA_TID_NUM) > 1);
+	BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1);
 	sta->driver_buffered_tids = 0;
 
 	if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
@@ -1013,6 +1034,7 @@
 	__le16 fc;
 	bool qos = test_sta_flag(sta, WLAN_STA_WME);
 	struct ieee80211_tx_info *info;
+	struct ieee80211_chanctx_conf *chanctx_conf;
 
 	if (qos) {
 		fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
@@ -1062,7 +1084,16 @@
 
 	drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false);
 
-	ieee80211_xmit(sdata, skb);
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (WARN_ON(!chanctx_conf)) {
+		rcu_read_unlock();
+		kfree_skb(skb);
+		return;
+	}
+
+	ieee80211_xmit(sdata, skb, chanctx_conf->def.chan->band);
+	rcu_read_unlock();
 }
 
 static void
@@ -1343,7 +1374,7 @@
 {
 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
 
-	if (WARN_ON(tid >= STA_TID_NUM))
+	if (WARN_ON(tid >= IEEE80211_NUM_TIDS))
 		return;
 
 	if (buffered)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index c88f161f..6835cea 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -80,7 +80,6 @@
 	WLAN_STA_TOFFSET_KNOWN,
 };
 
-#define STA_TID_NUM 16
 #define ADDBA_RESP_INTERVAL HZ
 #define HT_AGG_MAX_RETRIES		15
 #define HT_AGG_BURST_RETRIES		3
@@ -197,15 +196,15 @@
 struct sta_ampdu_mlme {
 	struct mutex mtx;
 	/* rx */
-	struct tid_ampdu_rx __rcu *tid_rx[STA_TID_NUM];
-	unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)];
-	unsigned long tid_rx_stop_requested[BITS_TO_LONGS(STA_TID_NUM)];
+	struct tid_ampdu_rx __rcu *tid_rx[IEEE80211_NUM_TIDS];
+	unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
+	unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
 	/* tx */
 	struct work_struct work;
-	struct tid_ampdu_tx __rcu *tid_tx[STA_TID_NUM];
-	struct tid_ampdu_tx *tid_start_tx[STA_TID_NUM];
-	unsigned long last_addba_req_time[STA_TID_NUM];
-	u8 addba_req_num[STA_TID_NUM];
+	struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS];
+	struct tid_ampdu_tx *tid_start_tx[IEEE80211_NUM_TIDS];
+	unsigned long last_addba_req_time[IEEE80211_NUM_TIDS];
+	u8 addba_req_num[IEEE80211_NUM_TIDS];
 	u8 dialog_token_allocator;
 };
 
@@ -228,6 +227,7 @@
  *	"the" transmit rate
  * @last_rx_rate_idx: rx status rate index of the last data packet
  * @last_rx_rate_flag: rx status flag of the last data packet
+ * @last_rx_rate_vht_nss: rx status nss of last data packet
  * @lock: used for locking all fields that require locking, see comments
  *	in the header file.
  * @drv_unblock_wk: used for driver PS unblocking
@@ -273,7 +273,7 @@
  * @t_offset: timing offset relative to this host
  * @t_offset_setpoint: reference timing offset of this sta to be used when
  * 	calculating clockdrift
- * @ch_type: peer's channel type
+ * @ch_width: peer's channel width
  * @debugfs: debug filesystem info
  * @dead: set to true when sta is unlinked
  * @uploaded: set to true when sta is uploaded to the driver
@@ -330,7 +330,7 @@
 	int last_signal;
 	struct ewma avg_signal;
 	/* Plus 1 for non-QoS frames */
-	__le16 last_seq_ctrl[NUM_RX_DATA_QUEUES + 1];
+	__le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
 
 	/* Updated from TX status path only, no locking requirements */
 	unsigned long tx_filtered_count;
@@ -344,14 +344,15 @@
 	unsigned long tx_fragments;
 	struct ieee80211_tx_rate last_tx_rate;
 	int last_rx_rate_idx;
-	int last_rx_rate_flag;
+	u32 last_rx_rate_flag;
+	u8 last_rx_rate_vht_nss;
 	u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
 
 	/*
 	 * Aggregation information, locked with lock.
 	 */
 	struct sta_ampdu_mlme ampdu_mlme;
-	u8 timer_to_tid[STA_TID_NUM];
+	u8 timer_to_tid[IEEE80211_NUM_TIDS];
 
 #ifdef CONFIG_MAC80211_MESH
 	/*
@@ -369,7 +370,7 @@
 	struct timer_list plink_timer;
 	s64 t_offset;
 	s64 t_offset_setpoint;
-	enum nl80211_channel_type ch_type;
+	enum nl80211_chan_width ch_width;
 #endif
 
 #ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 101eb88..ab63237 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -189,30 +189,31 @@
 	}
 
 	if (ieee80211_is_action(mgmt->frame_control) &&
-	    sdata->vif.type == NL80211_IFTYPE_STATION &&
 	    mgmt->u.action.category == WLAN_CATEGORY_HT &&
-	    mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) {
+	    mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS &&
+	    sdata->vif.type == NL80211_IFTYPE_STATION &&
+	    ieee80211_sdata_running(sdata)) {
 		/*
 		 * This update looks racy, but isn't -- if we come
 		 * here we've definitely got a station that we're
 		 * talking to, and on a managed interface that can
 		 * only be the AP. And the only other place updating
-		 * this variable is before we're associated.
+		 * this variable in managed mode is before association.
 		 */
 		switch (mgmt->u.action.u.ht_smps.smps_control) {
 		case WLAN_HT_SMPS_CONTROL_DYNAMIC:
-			sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_DYNAMIC;
+			sdata->smps_mode = IEEE80211_SMPS_DYNAMIC;
 			break;
 		case WLAN_HT_SMPS_CONTROL_STATIC:
-			sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_STATIC;
+			sdata->smps_mode = IEEE80211_SMPS_STATIC;
 			break;
 		case WLAN_HT_SMPS_CONTROL_DISABLED:
 		default: /* shouldn't happen since we don't send that */
-			sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_OFF;
+			sdata->smps_mode = IEEE80211_SMPS_OFF;
 			break;
 		}
 
-		ieee80211_queue_work(&local->hw, &local->recalc_smps);
+		ieee80211_queue_work(&local->hw, &sdata->recalc_smps);
 	}
 }
 
@@ -324,6 +325,75 @@
 
 }
 
+static void ieee80211_report_used_skb(struct ieee80211_local *local,
+				      struct sk_buff *skb, bool dropped)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	bool acked = info->flags & IEEE80211_TX_STAT_ACK;
+
+	if (dropped)
+		acked = false;
+
+	if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
+		struct ieee80211_sub_if_data *sdata = NULL;
+		struct ieee80211_sub_if_data *iter_sdata;
+		u64 cookie = (unsigned long)skb;
+
+		rcu_read_lock();
+
+		if (skb->dev) {
+			list_for_each_entry_rcu(iter_sdata, &local->interfaces,
+						list) {
+				if (!iter_sdata->dev)
+					continue;
+
+				if (skb->dev == iter_sdata->dev) {
+					sdata = iter_sdata;
+					break;
+				}
+			}
+		} else {
+			sdata = rcu_dereference(local->p2p_sdata);
+		}
+
+		if (!sdata)
+			skb->dev = NULL;
+		else if (ieee80211_is_nullfunc(hdr->frame_control) ||
+			 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+			cfg80211_probe_status(sdata->dev, hdr->addr1,
+					      cookie, acked, GFP_ATOMIC);
+		} else {
+			cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data,
+						skb->len, acked, GFP_ATOMIC);
+		}
+
+		rcu_read_unlock();
+	}
+
+	if (unlikely(info->ack_frame_id)) {
+		struct sk_buff *ack_skb;
+		unsigned long flags;
+
+		spin_lock_irqsave(&local->ack_status_lock, flags);
+		ack_skb = idr_find(&local->ack_status_frames,
+				   info->ack_frame_id);
+		if (ack_skb)
+			idr_remove(&local->ack_status_frames,
+				   info->ack_frame_id);
+		spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+		if (ack_skb) {
+			if (!dropped) {
+				/* consumes ack_skb */
+				skb_complete_wifi_ack(ack_skb, acked);
+			} else {
+				dev_kfree_skb_any(ack_skb);
+			}
+		}
+	}
+}
+
 /*
  * Use a static threshold for now, best value to be determined
  * by testing ...
@@ -515,62 +585,7 @@
 					msecs_to_jiffies(10));
 	}
 
-	if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
-		u64 cookie = (unsigned long)skb;
-		bool found = false;
-
-		acked = info->flags & IEEE80211_TX_STAT_ACK;
-
-		rcu_read_lock();
-
-		list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-			if (!sdata->dev)
-				continue;
-
-			if (skb->dev != sdata->dev)
-				continue;
-
-			found = true;
-			break;
-		}
-
-		if (!skb->dev) {
-			sdata = rcu_dereference(local->p2p_sdata);
-			if (sdata)
-				found = true;
-		}
-
-		if (!found)
-			skb->dev = NULL;
-		else if (ieee80211_is_nullfunc(hdr->frame_control) ||
-			 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
-			cfg80211_probe_status(sdata->dev, hdr->addr1,
-					      cookie, acked, GFP_ATOMIC);
-		} else {
-			cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data,
-						skb->len, acked, GFP_ATOMIC);
-		}
-
-		rcu_read_unlock();
-	}
-
-	if (unlikely(info->ack_frame_id)) {
-		struct sk_buff *ack_skb;
-		unsigned long flags;
-
-		spin_lock_irqsave(&local->ack_status_lock, flags);
-		ack_skb = idr_find(&local->ack_status_frames,
-				   info->ack_frame_id);
-		if (ack_skb)
-			idr_remove(&local->ack_status_frames,
-				   info->ack_frame_id);
-		spin_unlock_irqrestore(&local->ack_status_lock, flags);
-
-		/* consumes ack_skb */
-		if (ack_skb)
-			skb_complete_wifi_ack(ack_skb,
-				info->flags & IEEE80211_TX_STAT_ACK);
-	}
+	ieee80211_report_used_skb(local, skb, false);
 
 	/* this was a transmitted frame, but now we want to reuse it */
 	skb_orphan(skb);
@@ -646,25 +661,8 @@
 void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
-	if (unlikely(info->ack_frame_id)) {
-		struct sk_buff *ack_skb;
-		unsigned long flags;
-
-		spin_lock_irqsave(&local->ack_status_lock, flags);
-		ack_skb = idr_find(&local->ack_status_frames,
-				   info->ack_frame_id);
-		if (ack_skb)
-			idr_remove(&local->ack_status_frames,
-				   info->ack_frame_id);
-		spin_unlock_irqrestore(&local->ack_status_lock, flags);
-
-		/* consumes ack_skb */
-		if (ack_skb)
-			dev_kfree_skb_any(ack_skb);
-	}
-
+	ieee80211_report_used_skb(local, skb, true);
 	dev_kfree_skb_any(skb);
 }
 EXPORT_SYMBOL(ieee80211_free_txskb);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 18d9c8a..a8270b4 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -28,6 +28,25 @@
 #define VIF_PR_FMT	" vif:%s(%d%s)"
 #define VIF_PR_ARG	__get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
 
+#define CHANCTX_ENTRY	__field(u32, control_freq)				\
+			__field(u32, chan_width)				\
+			__field(u32, center_freq1)				\
+			__field(u32, center_freq2)				\
+			__field(u8, rx_chains_static)				\
+			__field(u8, rx_chains_dynamic)
+#define CHANCTX_ASSIGN	__entry->control_freq = ctx->conf.def.chan->center_freq;\
+			__entry->chan_width = ctx->conf.def.width;		\
+			__entry->center_freq1 = ctx->conf.def.center_freq1;	\
+			__entry->center_freq2 = ctx->conf.def.center_freq2;	\
+			__entry->rx_chains_static = ctx->conf.rx_chains_static;	\
+			__entry->rx_chains_dynamic = ctx->conf.rx_chains_dynamic
+#define CHANCTX_PR_FMT	" control:%d MHz width:%d center: %d/%d MHz chains:%d/%d"
+#define CHANCTX_PR_ARG	__entry->control_freq, __entry->chan_width,		\
+			__entry->center_freq1, __entry->center_freq2,		\
+			__entry->rx_chains_static, __entry->rx_chains_dynamic
+
+
+
 /*
  * Tracing for driver callbacks.
  */
@@ -301,20 +320,37 @@
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		VIF_ENTRY
+		__field(u32, changed)
 		__field(bool, assoc)
+		__field(bool, ibss_joined)
+		__field(bool, ibss_creator)
 		__field(u16, aid)
 		__field(bool, cts)
 		__field(bool, shortpre)
 		__field(bool, shortslot)
+		__field(bool, enable_beacon)
 		__field(u8, dtimper)
 		__field(u16, bcnint)
 		__field(u16, assoc_cap)
 		__field(u64, sync_tsf)
 		__field(u32, sync_device_ts)
 		__field(u32, basic_rates)
-		__field(u32, changed)
-		__field(bool, enable_beacon)
+		__array(int, mcast_rate, IEEE80211_NUM_BANDS)
 		__field(u16, ht_operation_mode)
+		__field(s32, cqm_rssi_thold);
+		__field(s32, cqm_rssi_hyst);
+		__field(u32, channel_width);
+		__field(u32, channel_cfreq1);
+		__dynamic_array(u32, arp_addr_list, info->arp_addr_cnt);
+		__field(bool, arp_filter_enabled);
+		__field(bool, qos);
+		__field(bool, idle);
+		__field(bool, ps);
+		__dynamic_array(u8, ssid, info->ssid_len);
+		__field(bool, hidden_ssid);
+		__field(int, txpower)
+		__field(u8, p2p_ctwindow)
+		__field(bool, p2p_oppps)
 	),
 
 	TP_fast_assign(
@@ -323,17 +359,36 @@
 		__entry->changed = changed;
 		__entry->aid = info->aid;
 		__entry->assoc = info->assoc;
+		__entry->ibss_joined = info->ibss_joined;
+		__entry->ibss_creator = info->ibss_creator;
 		__entry->shortpre = info->use_short_preamble;
 		__entry->cts = info->use_cts_prot;
 		__entry->shortslot = info->use_short_slot;
+		__entry->enable_beacon = info->enable_beacon;
 		__entry->dtimper = info->dtim_period;
 		__entry->bcnint = info->beacon_int;
 		__entry->assoc_cap = info->assoc_capability;
 		__entry->sync_tsf = info->sync_tsf;
 		__entry->sync_device_ts = info->sync_device_ts;
 		__entry->basic_rates = info->basic_rates;
-		__entry->enable_beacon = info->enable_beacon;
+		memcpy(__entry->mcast_rate, info->mcast_rate,
+		       sizeof(__entry->mcast_rate));
 		__entry->ht_operation_mode = info->ht_operation_mode;
+		__entry->cqm_rssi_thold = info->cqm_rssi_thold;
+		__entry->cqm_rssi_hyst = info->cqm_rssi_hyst;
+		__entry->channel_width = info->chandef.width;
+		__entry->channel_cfreq1 = info->chandef.center_freq1;
+		memcpy(__get_dynamic_array(arp_addr_list), info->arp_addr_list,
+		       sizeof(u32) * info->arp_addr_cnt);
+		__entry->arp_filter_enabled = info->arp_filter_enabled;
+		__entry->qos = info->qos;
+		__entry->idle = info->idle;
+		__entry->ps = info->ps;
+		memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len);
+		__entry->hidden_ssid = info->hidden_ssid;
+		__entry->txpower = info->txpower;
+		__entry->p2p_ctwindow = info->p2p_ctwindow;
+		__entry->p2p_oppps = info->p2p_oppps;
 	),
 
 	TP_printk(
@@ -971,28 +1026,31 @@
 );
 
 TRACE_EVENT(drv_remain_on_channel,
-	TP_PROTO(struct ieee80211_local *local, struct ieee80211_channel *chan,
-		 enum nl80211_channel_type chantype, unsigned int duration),
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 struct ieee80211_channel *chan,
+		 unsigned int duration),
 
-	TP_ARGS(local, chan, chantype, duration),
+	TP_ARGS(local, sdata, chan, duration),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
+		VIF_ENTRY
 		__field(int, center_freq)
-		__field(int, channel_type)
 		__field(unsigned int, duration)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
+		VIF_ASSIGN;
 		__entry->center_freq = chan->center_freq;
-		__entry->channel_type = chantype;
 		__entry->duration = duration;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT " freq:%dMHz duration:%dms",
-		LOCAL_PR_ARG, __entry->center_freq, __entry->duration
+		LOCAL_PR_FMT  VIF_PR_FMT " freq:%dMHz duration:%dms",
+		LOCAL_PR_ARG, VIF_PR_ARG,
+		__entry->center_freq, __entry->duration
 	)
 );
 
@@ -1001,34 +1059,6 @@
 	TP_ARGS(local)
 );
 
-TRACE_EVENT(drv_offchannel_tx,
-	TP_PROTO(struct ieee80211_local *local, struct sk_buff *skb,
-		 struct ieee80211_channel *chan,
-		 enum nl80211_channel_type channel_type,
-		 unsigned int wait),
-
-	TP_ARGS(local, skb, chan, channel_type, wait),
-
-	TP_STRUCT__entry(
-		LOCAL_ENTRY
-		__field(int, center_freq)
-		__field(int, channel_type)
-		__field(unsigned int, wait)
-	),
-
-	TP_fast_assign(
-		LOCAL_ASSIGN;
-		__entry->center_freq = chan->center_freq;
-		__entry->channel_type = channel_type;
-		__entry->wait = wait;
-	),
-
-	TP_printk(
-		LOCAL_PR_FMT " freq:%dMHz, wait:%dms",
-		LOCAL_PR_ARG, __entry->center_freq, __entry->wait
-	)
-);
-
 TRACE_EVENT(drv_set_ringparam,
 	TP_PROTO(struct ieee80211_local *local, u32 tx, u32 rx),
 
@@ -1256,6 +1286,146 @@
 	TP_ARGS(local, sdata)
 );
 
+DECLARE_EVENT_CLASS(local_chanctx,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_chanctx *ctx),
+
+	TP_ARGS(local, ctx),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		CHANCTX_ENTRY
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		CHANCTX_ASSIGN;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT CHANCTX_PR_FMT,
+		LOCAL_PR_ARG, CHANCTX_PR_ARG
+	)
+);
+
+DEFINE_EVENT(local_chanctx, drv_add_chanctx,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_chanctx *ctx),
+	TP_ARGS(local, ctx)
+);
+
+DEFINE_EVENT(local_chanctx, drv_remove_chanctx,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_chanctx *ctx),
+	TP_ARGS(local, ctx)
+);
+
+TRACE_EVENT(drv_change_chanctx,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_chanctx *ctx,
+		 u32 changed),
+
+	TP_ARGS(local, ctx, changed),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		CHANCTX_ENTRY
+		__field(u32, changed)
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		CHANCTX_ASSIGN;
+		__entry->changed = changed;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT CHANCTX_PR_FMT " changed:%#x",
+		LOCAL_PR_ARG, CHANCTX_PR_ARG, __entry->changed
+	)
+);
+
+DECLARE_EVENT_CLASS(local_sdata_chanctx,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 struct ieee80211_chanctx *ctx),
+
+	TP_ARGS(local, sdata, ctx),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		VIF_ENTRY
+		CHANCTX_ENTRY
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		VIF_ASSIGN;
+		CHANCTX_ASSIGN;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT VIF_PR_FMT CHANCTX_PR_FMT,
+		LOCAL_PR_ARG, VIF_PR_ARG, CHANCTX_PR_ARG
+	)
+);
+
+DEFINE_EVENT(local_sdata_chanctx, drv_assign_vif_chanctx,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 struct ieee80211_chanctx *ctx),
+	TP_ARGS(local, sdata, ctx)
+);
+
+DEFINE_EVENT(local_sdata_chanctx, drv_unassign_vif_chanctx,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 struct ieee80211_chanctx *ctx),
+	TP_ARGS(local, sdata, ctx)
+);
+
+TRACE_EVENT(drv_start_ap,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 struct ieee80211_bss_conf *info),
+
+	TP_ARGS(local, sdata, info),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		VIF_ENTRY
+		__field(u8, dtimper)
+		__field(u16, bcnint)
+		__dynamic_array(u8, ssid, info->ssid_len);
+		__field(bool, hidden_ssid);
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		VIF_ASSIGN;
+		__entry->dtimper = info->dtim_period;
+		__entry->bcnint = info->beacon_int;
+		memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len);
+		__entry->hidden_ssid = info->hidden_ssid;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT  VIF_PR_FMT,
+		LOCAL_PR_ARG, VIF_PR_ARG
+	)
+);
+
+DEFINE_EVENT(local_sdata_evt, drv_stop_ap,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata),
+	TP_ARGS(local, sdata)
+);
+
+DEFINE_EVENT(local_only_evt, drv_restart_complete,
+	TP_PROTO(struct ieee80211_local *local),
+	TP_ARGS(local)
+);
+
 /*
  * Tracing for API calls that drivers call.
  */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index b858ebe..d287a4f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -324,22 +324,20 @@
 	struct ieee80211_sub_if_data *sdata;
 	struct sta_info *sta;
 
-	/*
-	 * virtual interfaces are protected by RCU
-	 */
-	rcu_read_lock();
-
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-		struct ieee80211_if_ap *ap;
-		if (sdata->vif.type != NL80211_IFTYPE_AP)
+		struct ps_data *ps;
+
+		if (sdata->vif.type == NL80211_IFTYPE_AP)
+			ps = &sdata->u.ap.ps;
+		else
 			continue;
-		ap = &sdata->u.ap;
-		skb = skb_dequeue(&ap->ps_bc_buf);
+
+		skb = skb_dequeue(&ps->bc_buf);
 		if (skb) {
 			purged++;
 			dev_kfree_skb(skb);
 		}
-		total += skb_queue_len(&ap->ps_bc_buf);
+		total += skb_queue_len(&ps->bc_buf);
 	}
 
 	/*
@@ -360,8 +358,6 @@
 		}
 	}
 
-	rcu_read_unlock();
-
 	local->total_ps_buffered = total;
 	ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged);
 }
@@ -371,6 +367,7 @@
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
+	struct ps_data *ps;
 
 	/*
 	 * broadcast/multicast frame
@@ -380,16 +377,24 @@
 	 * This is done either by the hardware or us.
 	 */
 
-	/* powersaving STAs only in AP/VLAN mode */
-	if (!tx->sdata->bss)
+	/* powersaving STAs currently only in AP/VLAN mode */
+	if (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
+	    tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+		if (!tx->sdata->bss)
+			return TX_CONTINUE;
+
+		ps = &tx->sdata->bss->ps;
+	} else {
 		return TX_CONTINUE;
+	}
+
 
 	/* no buffering for ordered frames */
 	if (ieee80211_has_order(hdr->frame_control))
 		return TX_CONTINUE;
 
 	/* no stations in PS mode */
-	if (!atomic_read(&tx->sdata->bss->num_sta_ps))
+	if (!atomic_read(&ps->num_sta_ps))
 		return TX_CONTINUE;
 
 	info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
@@ -404,14 +409,14 @@
 	if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
 		purge_old_ps_buffers(tx->local);
 
-	if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
+	if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
 		ps_dbg(tx->sdata,
 		       "BC TX buffer full - dropping the oldest frame\n");
-		dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
+		dev_kfree_skb(skb_dequeue(&ps->bc_buf));
 	} else
 		tx->local->total_ps_buffered++;
 
-	skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb);
+	skb_queue_tail(&ps->bc_buf, tx->skb);
 
 	return TX_QUEUED;
 }
@@ -951,7 +956,6 @@
 	fragnum = 0;
 
 	skb_queue_walk(&tx->skbs, skb) {
-		int next_len;
 		const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
 
 		hdr = (void *)skb->data;
@@ -970,7 +974,6 @@
 			info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
 		} else {
 			hdr->frame_control &= ~morefrags;
-			next_len = 0;
 		}
 		hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
 		fragnum++;
@@ -1372,7 +1375,8 @@
  * Returns false if the frame couldn't be transmitted but was queued instead.
  */
 static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
-			 struct sk_buff *skb, bool txpending)
+			 struct sk_buff *skb, bool txpending,
+			 enum ieee80211_band band)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_tx_data tx;
@@ -1386,20 +1390,18 @@
 		return true;
 	}
 
-	rcu_read_lock();
-
 	/* initialises tx */
 	led_len = skb->len;
 	res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
 
 	if (unlikely(res_prepare == TX_DROP)) {
 		ieee80211_free_txskb(&local->hw, skb);
-		goto out;
+		return true;
 	} else if (unlikely(res_prepare == TX_QUEUED)) {
-		goto out;
+		return true;
 	}
 
-	info->band = local->hw.conf.channel->band;
+	info->band = band;
 
 	/* set up hw_queue value early */
 	if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
@@ -1410,8 +1412,7 @@
 	if (!invoke_tx_handlers(&tx))
 		result = __ieee80211_tx(local, &tx.skbs, led_len,
 					tx.sta, txpending);
- out:
-	rcu_read_unlock();
+
 	return result;
 }
 
@@ -1446,7 +1447,8 @@
 	return 0;
 }
 
-void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
+		    enum ieee80211_band band)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1454,8 +1456,6 @@
 	int headroom;
 	bool may_encrypt;
 
-	rcu_read_lock();
-
 	may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
 
 	headroom = local->tx_headroom;
@@ -1466,7 +1466,6 @@
 
 	if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
 		ieee80211_free_txskb(&local->hw, skb);
-		rcu_read_unlock();
 		return;
 	}
 
@@ -1478,13 +1477,11 @@
 	    !is_multicast_ether_addr(hdr->addr1) &&
 	    mesh_nexthop_resolve(skb, sdata)) {
 		/* skb queued: don't free */
-		rcu_read_unlock();
 		return;
 	}
 
 	ieee80211_set_qos_hdr(sdata, skb);
-	ieee80211_tx(sdata, skb, false);
-	rcu_read_unlock();
+	ieee80211_tx(sdata, skb, false, band);
 }
 
 static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
@@ -1574,7 +1571,8 @@
 					 struct net_device *dev)
 {
 	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_channel *chan = local->hw.conf.channel;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_channel *chan;
 	struct ieee80211_radiotap_header *prthdr =
 		(struct ieee80211_radiotap_header *)skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1583,26 +1581,6 @@
 	u16 len_rthdr;
 	int hdrlen;
 
-	/*
-	 * Frame injection is not allowed if beaconing is not allowed
-	 * or if we need radar detection. Beaconing is usually not allowed when
-	 * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
-	 * Passive scan is also used in world regulatory domains where
-	 * your country is not known and as such it should be treated as
-	 * NO TX unless the channel is explicitly allowed in which case
-	 * your current regulatory domain would not have the passive scan
-	 * flag.
-	 *
-	 * Since AP mode uses monitor interfaces to inject/TX management
-	 * frames we can make AP mode the exception to this rule once it
-	 * supports radar detection as its implementation can deal with
-	 * radar detection by itself. We can do that later by adding a
-	 * monitor flag interfaces used for AP support.
-	 */
-	if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR |
-	     IEEE80211_CHAN_PASSIVE_SCAN)))
-		goto fail;
-
 	/* check for not even having the fixed radiotap header part */
 	if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
 		goto fail; /* too short to be possibly valid */
@@ -1688,11 +1666,45 @@
 		}
 	}
 
-	ieee80211_xmit(sdata, skb);
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (!chanctx_conf) {
+		tmp_sdata = rcu_dereference(local->monitor_sdata);
+		if (tmp_sdata)
+			chanctx_conf =
+				rcu_dereference(tmp_sdata->vif.chanctx_conf);
+	}
+	if (!chanctx_conf)
+		goto fail_rcu;
+
+	chan = chanctx_conf->def.chan;
+
+	/*
+	 * Frame injection is not allowed if beaconing is not allowed
+	 * or if we need radar detection. Beaconing is usually not allowed when
+	 * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
+	 * Passive scan is also used in world regulatory domains where
+	 * your country is not known and as such it should be treated as
+	 * NO TX unless the channel is explicitly allowed in which case
+	 * your current regulatory domain would not have the passive scan
+	 * flag.
+	 *
+	 * Since AP mode uses monitor interfaces to inject/TX management
+	 * frames we can make AP mode the exception to this rule once it
+	 * supports radar detection as its implementation can deal with
+	 * radar detection by itself. We can do that later by adding a
+	 * monitor flag interfaces used for AP support.
+	 */
+	if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR |
+			    IEEE80211_CHAN_PASSIVE_SCAN)))
+		goto fail_rcu;
+
+	ieee80211_xmit(sdata, skb, chan->band);
 	rcu_read_unlock();
 
 	return NETDEV_TX_OK;
 
+fail_rcu:
+	rcu_read_unlock();
 fail:
 	dev_kfree_skb(skb);
 	return NETDEV_TX_OK; /* meaning, we dealt with the skb */
@@ -1734,6 +1746,9 @@
 	bool multicast;
 	u32 info_flags = 0;
 	u16 info_id = 0;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_sub_if_data *ap_sdata;
+	enum ieee80211_band band;
 
 	if (unlikely(skb->len < ETH_HLEN))
 		goto fail;
@@ -1743,9 +1758,10 @@
 	ethertype = (skb->data[12] << 8) | skb->data[13];
 	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
 
+	rcu_read_lock();
+
 	switch (sdata->vif.type) {
 	case NL80211_IFTYPE_AP_VLAN:
-		rcu_read_lock();
 		sta = rcu_dereference(sdata->u.vlan.sta);
 		if (sta) {
 			fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
@@ -1758,7 +1774,12 @@
 			authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
 			wme_sta = test_sta_flag(sta, WLAN_STA_WME);
 		}
-		rcu_read_unlock();
+		ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
+					u.ap);
+		chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf);
+		if (!chanctx_conf)
+			goto fail_rcu;
+		band = chanctx_conf->def.chan->band;
 		if (sta)
 			break;
 		/* fall through */
@@ -1769,6 +1790,11 @@
 		memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
 		memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
 		hdrlen = 24;
+		if (sdata->vif.type == NL80211_IFTYPE_AP)
+			chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+		if (!chanctx_conf)
+			goto fail_rcu;
+		band = chanctx_conf->def.chan->band;
 		break;
 	case NL80211_IFTYPE_WDS:
 		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
@@ -1778,15 +1804,20 @@
 		memcpy(hdr.addr3, skb->data, ETH_ALEN);
 		memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
 		hdrlen = 30;
+		/*
+		 * This is the exception! WDS style interfaces are prohibited
+		 * when channel contexts are in used so this must be valid
+		 */
+		band = local->hw.conf.channel->band;
 		break;
 #ifdef CONFIG_MAC80211_MESH
 	case NL80211_IFTYPE_MESH_POINT:
 		if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
 			/* Do not send frames with mesh_ttl == 0 */
 			sdata->u.mesh.mshstats.dropped_frames_ttl++;
-			goto fail;
+			goto fail_rcu;
 		}
-		rcu_read_lock();
+
 		if (!is_multicast_ether_addr(skb->data)) {
 			mpath = mesh_path_lookup(skb->data, sdata);
 			if (!mpath)
@@ -1803,7 +1834,6 @@
 		    !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
 			hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
 					skb->data, skb->data + ETH_ALEN);
-			rcu_read_unlock();
 			meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
 					sdata, NULL, NULL);
 		} else {
@@ -1819,7 +1849,6 @@
 				mesh_da = mppath->mpp;
 			else if (mpath)
 				mesh_da = mpath->dst;
-			rcu_read_unlock();
 
 			hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
 					mesh_da, sdata->vif.addr);
@@ -1839,13 +1868,16 @@
 							skb->data + ETH_ALEN);
 
 		}
+		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+		if (!chanctx_conf)
+			goto fail_rcu;
+		band = chanctx_conf->def.chan->band;
 		break;
 #endif
 	case NL80211_IFTYPE_STATION:
 		if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
 			bool tdls_peer = false;
 
-			rcu_read_lock();
 			sta = sta_info_get(sdata, skb->data);
 			if (sta) {
 				authorized = test_sta_flag(sta,
@@ -1856,7 +1888,6 @@
 				tdls_auth = test_sta_flag(sta,
 						WLAN_STA_TDLS_PEER_AUTH);
 			}
-			rcu_read_unlock();
 
 			/*
 			 * If the TDLS link is enabled, send everything
@@ -1871,7 +1902,7 @@
 		if (tdls_direct) {
 			/* link during setup - throw out frames to peer */
 			if (!tdls_auth)
-				goto fail;
+				goto fail_rcu;
 
 			/* DA SA BSSID */
 			memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1896,6 +1927,10 @@
 			memcpy(hdr.addr3, skb->data, ETH_ALEN);
 			hdrlen = 24;
 		}
+		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+		if (!chanctx_conf)
+			goto fail_rcu;
+		band = chanctx_conf->def.chan->band;
 		break;
 	case NL80211_IFTYPE_ADHOC:
 		/* DA SA BSSID */
@@ -1903,9 +1938,13 @@
 		memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
 		memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
 		hdrlen = 24;
+		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+		if (!chanctx_conf)
+			goto fail_rcu;
+		band = chanctx_conf->def.chan->band;
 		break;
 	default:
-		goto fail;
+		goto fail_rcu;
 	}
 
 	/*
@@ -1915,13 +1954,11 @@
 	 */
 	multicast = is_multicast_ether_addr(hdr.addr1);
 	if (!multicast) {
-		rcu_read_lock();
 		sta = sta_info_get(sdata, hdr.addr1);
 		if (sta) {
 			authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
 			wme_sta = test_sta_flag(sta, WLAN_STA_WME);
 		}
-		rcu_read_unlock();
 	}
 
 	/* For mesh, the use of the QoS header is mandatory */
@@ -1949,7 +1986,7 @@
 
 		I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
 
-		goto fail;
+		goto fail_rcu;
 	}
 
 	if (unlikely(!multicast && skb->sk &&
@@ -2004,7 +2041,7 @@
 		kfree_skb(tmp_skb);
 
 		if (!skb)
-			goto fail;
+			goto fail_rcu;
 	}
 
 	hdr.frame_control = fc;
@@ -2052,7 +2089,8 @@
 		head_need = max_t(int, 0, head_need);
 		if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
 			ieee80211_free_txskb(&local->hw, skb);
-			return NETDEV_TX_OK;
+			skb = NULL;
+			goto fail_rcu;
 		}
 	}
 
@@ -2104,10 +2142,13 @@
 	info->flags = info_flags;
 	info->ack_frame_id = info_id;
 
-	ieee80211_xmit(sdata, skb);
+	ieee80211_xmit(sdata, skb, band);
+	rcu_read_unlock();
 
 	return NETDEV_TX_OK;
 
+ fail_rcu:
+	rcu_read_unlock();
  fail:
 	dev_kfree_skb(skb);
 	return NETDEV_TX_OK;
@@ -2142,11 +2183,18 @@
 	struct sta_info *sta;
 	struct ieee80211_hdr *hdr;
 	bool result;
+	struct ieee80211_chanctx_conf *chanctx_conf;
 
 	sdata = vif_to_sdata(info->control.vif);
 
 	if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
-		result = ieee80211_tx(sdata, skb, true);
+		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+		if (unlikely(!chanctx_conf)) {
+			dev_kfree_skb(skb);
+			return true;
+		}
+		result = ieee80211_tx(sdata, skb, true,
+				      chanctx_conf->def.chan->band);
 	} else {
 		struct sk_buff_head skbs;
 
@@ -2214,9 +2262,8 @@
 /* functions for drivers to get certain frames */
 
 static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
-				     struct ieee80211_if_ap *bss,
-				     struct sk_buff *skb,
-				     struct beacon_data *beacon)
+				     struct ps_data *ps,
+				     struct sk_buff *skb)
 {
 	u8 *pos, *tim;
 	int aid0 = 0;
@@ -2224,27 +2271,27 @@
 
 	/* Generate bitmap for TIM only if there are any STAs in power save
 	 * mode. */
-	if (atomic_read(&bss->num_sta_ps) > 0)
+	if (atomic_read(&ps->num_sta_ps) > 0)
 		/* in the hope that this is faster than
 		 * checking byte-for-byte */
-		have_bits = !bitmap_empty((unsigned long*)bss->tim,
+		have_bits = !bitmap_empty((unsigned long*)ps->tim,
 					  IEEE80211_MAX_AID+1);
 
-	if (bss->dtim_count == 0)
-		bss->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
+	if (ps->dtim_count == 0)
+		ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
 	else
-		bss->dtim_count--;
+		ps->dtim_count--;
 
 	tim = pos = (u8 *) skb_put(skb, 6);
 	*pos++ = WLAN_EID_TIM;
 	*pos++ = 4;
-	*pos++ = bss->dtim_count;
+	*pos++ = ps->dtim_count;
 	*pos++ = sdata->vif.bss_conf.dtim_period;
 
-	if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf))
+	if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf))
 		aid0 = 1;
 
-	bss->dtim_bc_mc = aid0 == 1;
+	ps->dtim_bc_mc = aid0 == 1;
 
 	if (have_bits) {
 		/* Find largest even number N1 so that bits numbered 1 through
@@ -2252,14 +2299,14 @@
 		 * (N2 + 1) x 8 through 2007 are 0. */
 		n1 = 0;
 		for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
-			if (bss->tim[i]) {
+			if (ps->tim[i]) {
 				n1 = i & 0xfe;
 				break;
 			}
 		}
 		n2 = n1;
 		for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
-			if (bss->tim[i]) {
+			if (ps->tim[i]) {
 				n2 = i;
 				break;
 			}
@@ -2269,7 +2316,7 @@
 		*pos++ = n1 | aid0;
 		/* Part Virt Bitmap */
 		skb_put(skb, n2 - n1);
-		memcpy(pos, bss->tim + n1, n2 - n1 + 1);
+		memcpy(pos, ps->tim + n1, n2 - n1 + 1);
 
 		tim[1] = n2 - n1 + 4;
 	} else {
@@ -2286,16 +2333,16 @@
 	struct sk_buff *skb = NULL;
 	struct ieee80211_tx_info *info;
 	struct ieee80211_sub_if_data *sdata = NULL;
-	struct ieee80211_if_ap *ap = NULL;
-	struct beacon_data *beacon;
-	enum ieee80211_band band = local->oper_channel->band;
+	enum ieee80211_band band;
 	struct ieee80211_tx_rate_control txrc;
+	struct ieee80211_chanctx_conf *chanctx_conf;
 
 	rcu_read_lock();
 
 	sdata = vif_to_sdata(vif);
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
 
-	if (!ieee80211_sdata_running(sdata))
+	if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
 		goto out;
 
 	if (tim_offset)
@@ -2304,8 +2351,9 @@
 		*tim_length = 0;
 
 	if (sdata->vif.type == NL80211_IFTYPE_AP) {
-		ap = &sdata->u.ap;
-		beacon = rcu_dereference(ap->beacon);
+		struct ieee80211_if_ap *ap = &sdata->u.ap;
+		struct beacon_data *beacon = rcu_dereference(ap->beacon);
+
 		if (beacon) {
 			/*
 			 * headroom, head length,
@@ -2329,14 +2377,12 @@
 			 * of the tim bitmap in mac80211 and the driver.
 			 */
 			if (local->tim_in_locked_section) {
-				ieee80211_beacon_add_tim(sdata, ap, skb,
-							 beacon);
+				ieee80211_beacon_add_tim(sdata, &ap->ps, skb);
 			} else {
 				unsigned long flags;
 
 				spin_lock_irqsave(&local->tim_lock, flags);
-				ieee80211_beacon_add_tim(sdata, ap, skb,
-							 beacon);
+				ieee80211_beacon_add_tim(sdata, &ap->ps, skb);
 				spin_unlock_irqrestore(&local->tim_lock, flags);
 			}
 
@@ -2412,6 +2458,8 @@
 		*pos++ = WLAN_EID_SSID;
 		*pos++ = 0x0;
 
+		band = chanctx_conf->def.chan->band;
+
 		if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
 		    mesh_add_ds_params_ie(skb, sdata) ||
 		    ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -2429,6 +2477,8 @@
 		goto out;
 	}
 
+	band = chanctx_conf->def.chan->band;
+
 	info = IEEE80211_SKB_CB(skb);
 
 	info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
@@ -2656,29 +2706,40 @@
 	struct sk_buff *skb = NULL;
 	struct ieee80211_tx_data tx;
 	struct ieee80211_sub_if_data *sdata;
-	struct ieee80211_if_ap *bss = NULL;
-	struct beacon_data *beacon;
+	struct ps_data *ps;
 	struct ieee80211_tx_info *info;
+	struct ieee80211_chanctx_conf *chanctx_conf;
 
 	sdata = vif_to_sdata(vif);
-	bss = &sdata->u.ap;
 
 	rcu_read_lock();
-	beacon = rcu_dereference(bss->beacon);
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
 
-	if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head)
+	if (!chanctx_conf)
 		goto out;
 
-	if (bss->dtim_count != 0 || !bss->dtim_bc_mc)
+	if (sdata->vif.type == NL80211_IFTYPE_AP) {
+		struct beacon_data *beacon =
+				rcu_dereference(sdata->u.ap.beacon);
+
+		if (!beacon || !beacon->head)
+			goto out;
+
+		ps = &sdata->u.ap.ps;
+	} else {
+		goto out;
+	}
+
+	if (ps->dtim_count != 0 || !ps->dtim_bc_mc)
 		goto out; /* send buffered bc/mc only after DTIM beacon */
 
 	while (1) {
-		skb = skb_dequeue(&bss->ps_bc_buf);
+		skb = skb_dequeue(&ps->bc_buf);
 		if (!skb)
 			goto out;
 		local->total_ps_buffered--;
 
-		if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) {
+		if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) {
 			struct ieee80211_hdr *hdr =
 				(struct ieee80211_hdr *) skb->data;
 			/* more buffered multicast/broadcast frames ==> set
@@ -2696,7 +2757,7 @@
 	info = IEEE80211_SKB_CB(skb);
 
 	tx.flags |= IEEE80211_TX_PS_BUFFERED;
-	info->band = local->oper_channel->band;
+	info->band = chanctx_conf->def.chan->band;
 
 	if (invoke_tx_handlers(&tx))
 		skb = NULL;
@@ -2707,8 +2768,9 @@
 }
 EXPORT_SYMBOL(ieee80211_get_buffered_bc);
 
-void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
-			  struct sk_buff *skb, int tid)
+void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
+				 struct sk_buff *skb, int tid,
+				 enum ieee80211_band band)
 {
 	int ac = ieee802_1d_to_ac[tid & 7];
 
@@ -2725,6 +2787,6 @@
 	 * requirements are that we do not come into tx with bhs on.
 	 */
 	local_bh_disable();
-	ieee80211_xmit(sdata, skb);
+	ieee80211_xmit(sdata, skb, band);
 	local_bh_enable();
 }
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0151ae3..08132ff 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -512,7 +512,7 @@
 EXPORT_SYMBOL(ieee80211_wake_queues);
 
 void ieee80211_iterate_active_interfaces(
-	struct ieee80211_hw *hw,
+	struct ieee80211_hw *hw, u32 iter_flags,
 	void (*iterator)(void *data, u8 *mac,
 			 struct ieee80211_vif *vif),
 	void *data)
@@ -530,6 +530,9 @@
 		default:
 			break;
 		}
+		if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) &&
+		    !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
+			continue;
 		if (ieee80211_sdata_running(sdata))
 			iterator(data, sdata->vif.addr,
 				 &sdata->vif);
@@ -537,7 +540,9 @@
 
 	sdata = rcu_dereference_protected(local->monitor_sdata,
 					  lockdep_is_held(&local->iflist_mtx));
-	if (sdata)
+	if (sdata &&
+	    (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL ||
+	     sdata->flags & IEEE80211_SDATA_IN_DRIVER))
 		iterator(data, sdata->vif.addr, &sdata->vif);
 
 	mutex_unlock(&local->iflist_mtx);
@@ -545,7 +550,7 @@
 EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
 
 void ieee80211_iterate_active_interfaces_atomic(
-	struct ieee80211_hw *hw,
+	struct ieee80211_hw *hw, u32 iter_flags,
 	void (*iterator)(void *data, u8 *mac,
 			 struct ieee80211_vif *vif),
 	void *data)
@@ -563,13 +568,18 @@
 		default:
 			break;
 		}
+		if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) &&
+		    !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
+			continue;
 		if (ieee80211_sdata_running(sdata))
 			iterator(data, sdata->vif.addr,
 				 &sdata->vif);
 	}
 
 	sdata = rcu_dereference(local->monitor_sdata);
-	if (sdata)
+	if (sdata &&
+	    (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL ||
+	     sdata->flags & IEEE80211_SDATA_IN_DRIVER))
 		iterator(data, sdata->vif.addr, &sdata->vif);
 
 	rcu_read_unlock();
@@ -769,6 +779,18 @@
 			else
 				elem_parse_failed = true;
 			break;
+		case WLAN_EID_VHT_CAPABILITY:
+			if (elen >= sizeof(struct ieee80211_vht_cap))
+				elems->vht_cap_elem = (void *)pos;
+			else
+				elem_parse_failed = true;
+			break;
+		case WLAN_EID_VHT_OPERATION:
+			if (elen >= sizeof(struct ieee80211_vht_operation))
+				elems->vht_operation = (void *)pos;
+			else
+				elem_parse_failed = true;
+			break;
 		case WLAN_EID_MESH_ID:
 			elems->mesh_id = pos;
 			elems->mesh_id_len = elen;
@@ -837,7 +859,7 @@
 		if (elem_parse_failed)
 			elems->parse_error = true;
 		else
-			set_bit(id, seen_elems);
+			__set_bit(id, seen_elems);
 
 		left -= elen;
 		pos += elen;
@@ -860,6 +882,7 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_tx_queue_params qparam;
+	struct ieee80211_chanctx_conf *chanctx_conf;
 	int ac;
 	bool use_11b, enable_qos;
 	int aCWmin, aCWmax;
@@ -872,8 +895,12 @@
 
 	memset(&qparam, 0, sizeof(qparam));
 
-	use_11b = (local->oper_channel->band == IEEE80211_BAND_2GHZ) &&
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	use_11b = (chanctx_conf &&
+		   chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ) &&
 		 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
+	rcu_read_unlock();
 
 	/*
 	 * By default disable QoS in STA mode for old access points, which do
@@ -952,7 +979,7 @@
 				  const size_t supp_rates_len,
 				  const u8 *supp_rates)
 {
-	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_chanctx_conf *chanctx_conf;
 	int i, have_higher_than_11mbit = 0;
 
 	/* cf. IEEE 802.11 9.2.12 */
@@ -960,11 +987,16 @@
 		if ((supp_rates[i] & 0x7f) * 5 > 110)
 			have_higher_than_11mbit = 1;
 
-	if (local->oper_channel->band == IEEE80211_BAND_2GHZ &&
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+
+	if (chanctx_conf &&
+	    chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ &&
 	    have_higher_than_11mbit)
 		sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
 	else
 		sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
+	rcu_read_unlock();
 
 	ieee80211_set_wmm_default(sdata, true);
 }
@@ -996,7 +1028,7 @@
 }
 
 void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
-			 u16 transaction, u16 auth_alg,
+			 u16 transaction, u16 auth_alg, u16 status,
 			 u8 *extra, size_t extra_len, const u8 *da,
 			 const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx)
 {
@@ -1021,7 +1053,7 @@
 	memcpy(mgmt->bssid, bssid, ETH_ALEN);
 	mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg);
 	mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
-	mgmt->u.auth.status_code = cpu_to_le16(0);
+	mgmt->u.auth.status_code = cpu_to_le16(status);
 	if (extra)
 		memcpy(skb_put(skb, extra_len), extra, extra_len);
 
@@ -1234,7 +1266,7 @@
 			      const u8 *ssid, size_t ssid_len,
 			      const u8 *ie, size_t ie_len,
 			      u32 ratemask, bool directed, bool no_cck,
-			      struct ieee80211_channel *channel)
+			      struct ieee80211_channel *channel, bool scan)
 {
 	struct sk_buff *skb;
 
@@ -1245,7 +1277,10 @@
 		if (no_cck)
 			IEEE80211_SKB_CB(skb)->flags |=
 				IEEE80211_TX_CTL_NO_CCK_RATE;
-		ieee80211_tx_skb(sdata, skb);
+		if (scan)
+			ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
+		else
+			ieee80211_tx_skb(sdata, skb);
 	}
 }
 
@@ -1308,6 +1343,7 @@
 {
 	struct ieee80211_hw *hw = &local->hw;
 	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_chanctx *ctx;
 	struct sta_info *sta;
 	int res, i;
 
@@ -1380,6 +1416,46 @@
 			res = drv_add_interface(local, sdata);
 	}
 
+	/* add channel contexts */
+	if (local->use_chanctx) {
+		mutex_lock(&local->chanctx_mtx);
+		list_for_each_entry(ctx, &local->chanctx_list, list)
+			WARN_ON(drv_add_chanctx(local, ctx));
+		mutex_unlock(&local->chanctx_mtx);
+	}
+
+	list_for_each_entry(sdata, &local->interfaces, list) {
+		struct ieee80211_chanctx_conf *ctx_conf;
+
+		if (!ieee80211_sdata_running(sdata))
+			continue;
+
+		mutex_lock(&local->chanctx_mtx);
+		ctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+				lockdep_is_held(&local->chanctx_mtx));
+		if (ctx_conf) {
+			ctx = container_of(ctx_conf, struct ieee80211_chanctx,
+					   conf);
+			drv_assign_vif_chanctx(local, sdata, ctx);
+		}
+		mutex_unlock(&local->chanctx_mtx);
+	}
+
+	sdata = rtnl_dereference(local->monitor_sdata);
+	if (sdata && local->use_chanctx && ieee80211_sdata_running(sdata)) {
+		struct ieee80211_chanctx_conf *ctx_conf;
+
+		mutex_lock(&local->chanctx_mtx);
+		ctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+				lockdep_is_held(&local->chanctx_mtx));
+		if (ctx_conf) {
+			ctx = container_of(ctx_conf, struct ieee80211_chanctx,
+					   conf);
+			drv_assign_vif_chanctx(local, sdata, ctx);
+		}
+		mutex_unlock(&local->chanctx_mtx);
+	}
+
 	/* add STAs back */
 	mutex_lock(&local->sta_mtx);
 	list_for_each_entry(sta, &local->sta_list, list) {
@@ -1435,7 +1511,8 @@
 			  BSS_CHANGED_BSSID |
 			  BSS_CHANGED_CQM |
 			  BSS_CHANGED_QOS |
-			  BSS_CHANGED_IDLE;
+			  BSS_CHANGED_IDLE |
+			  BSS_CHANGED_TXPOWER;
 
 		switch (sdata->vif.type) {
 		case NL80211_IFTYPE_STATION:
@@ -1452,9 +1529,13 @@
 		case NL80211_IFTYPE_AP:
 			changed |= BSS_CHANGED_SSID;
 
-			if (sdata->vif.type == NL80211_IFTYPE_AP)
+			if (sdata->vif.type == NL80211_IFTYPE_AP) {
 				changed |= BSS_CHANGED_AP_PROBE_RESP;
 
+				if (rcu_access_pointer(sdata->u.ap.beacon))
+					drv_start_ap(local, sdata);
+			}
+
 			/* fall through */
 		case NL80211_IFTYPE_MESH_POINT:
 			changed |= BSS_CHANGED_BEACON |
@@ -1553,8 +1634,10 @@
 	 * If this is for hw restart things are still running.
 	 * We may want to change that later, however.
 	 */
-	if (!local->suspended)
+	if (!local->suspended) {
+		drv_restart_complete(local);
 		return 0;
+	}
 
 #ifdef CONFIG_PM
 	/* first set suspended false, then resuming */
@@ -1617,68 +1700,24 @@
 }
 EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect);
 
-static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
-			  enum ieee80211_smps_mode *smps_mode)
+void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata)
 {
-	if (ifmgd->associated) {
-		*smps_mode = ifmgd->ap_smps;
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_chanctx *chanctx;
 
-		if (*smps_mode == IEEE80211_SMPS_AUTOMATIC) {
-			if (ifmgd->powersave)
-				*smps_mode = IEEE80211_SMPS_DYNAMIC;
-			else
-				*smps_mode = IEEE80211_SMPS_OFF;
-		}
+	mutex_lock(&local->chanctx_mtx);
 
-		return 1;
-	}
+	chanctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+					lockdep_is_held(&local->chanctx_mtx));
 
-	return 0;
-}
-
-void ieee80211_recalc_smps(struct ieee80211_local *local)
-{
-	struct ieee80211_sub_if_data *sdata;
-	enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF;
-	int count = 0;
-
-	mutex_lock(&local->iflist_mtx);
-
-	/*
-	 * This function could be improved to handle multiple
-	 * interfaces better, but right now it makes any
-	 * non-station interfaces force SM PS to be turned
-	 * off. If there are multiple station interfaces it
-	 * could also use the best possible mode, e.g. if
-	 * one is in static and the other in dynamic then
-	 * dynamic is ok.
-	 */
-
-	list_for_each_entry(sdata, &local->interfaces, list) {
-		if (!ieee80211_sdata_running(sdata))
-			continue;
-		if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
-			continue;
-		if (sdata->vif.type != NL80211_IFTYPE_STATION)
-			goto set;
-
-		count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
-
-		if (count > 1) {
-			smps_mode = IEEE80211_SMPS_OFF;
-			break;
-		}
-	}
-
-	if (smps_mode == local->smps_mode)
+	if (WARN_ON_ONCE(!chanctx_conf))
 		goto unlock;
 
- set:
-	local->smps_mode = smps_mode;
-	/* changed flag is auto-detected for this */
-	ieee80211_hw_config(local, 0);
+	chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
+	ieee80211_recalc_smps_chanctx(local, chanctx);
  unlock:
-	mutex_unlock(&local->iflist_mtx);
+	mutex_unlock(&local->chanctx_mtx);
 }
 
 static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
@@ -1818,8 +1857,8 @@
 	__le32 tmp;
 
 	*pos++ = WLAN_EID_VHT_CAPABILITY;
-	*pos++ = sizeof(struct ieee80211_vht_capabilities);
-	memset(pos, 0, sizeof(struct ieee80211_vht_capabilities));
+	*pos++ = sizeof(struct ieee80211_vht_cap);
+	memset(pos, 0, sizeof(struct ieee80211_vht_cap));
 
 	/* capability flags */
 	tmp = cpu_to_le32(cap);
@@ -1834,8 +1873,7 @@
 }
 
 u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
-			       struct ieee80211_channel *channel,
-			       enum nl80211_channel_type channel_type,
+			       const struct cfg80211_chan_def *chandef,
 			       u16 prot_mode)
 {
 	struct ieee80211_ht_operation *ht_oper;
@@ -1843,23 +1881,25 @@
 	*pos++ = WLAN_EID_HT_OPERATION;
 	*pos++ = sizeof(struct ieee80211_ht_operation);
 	ht_oper = (struct ieee80211_ht_operation *)pos;
-	ht_oper->primary_chan =
-			ieee80211_frequency_to_channel(channel->center_freq);
-	switch (channel_type) {
-	case NL80211_CHAN_HT40MINUS:
-		ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+	ht_oper->primary_chan = ieee80211_frequency_to_channel(
+					chandef->chan->center_freq);
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_160:
+	case NL80211_CHAN_WIDTH_80P80:
+	case NL80211_CHAN_WIDTH_80:
+	case NL80211_CHAN_WIDTH_40:
+		if (chandef->center_freq1 > chandef->chan->center_freq)
+			ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+		else
+			ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
 		break;
-	case NL80211_CHAN_HT40PLUS:
-		ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-		break;
-	case NL80211_CHAN_HT20:
 	default:
 		ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
 		break;
 	}
 	if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
-	    channel_type != NL80211_CHAN_NO_HT &&
-	    channel_type != NL80211_CHAN_HT20)
+	    chandef->width != NL80211_CHAN_WIDTH_20_NOHT &&
+	    chandef->width != NL80211_CHAN_WIDTH_20)
 		ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
 
 	ht_oper->operation_mode = cpu_to_le16(prot_mode);
@@ -1873,13 +1913,17 @@
 	return pos + sizeof(struct ieee80211_ht_operation);
 }
 
-enum nl80211_channel_type
-ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
+void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
+				  struct ieee80211_ht_operation *ht_oper,
+				  struct cfg80211_chan_def *chandef)
 {
 	enum nl80211_channel_type channel_type;
 
-	if (!ht_oper)
-		return NL80211_CHAN_NO_HT;
+	if (!ht_oper) {
+		cfg80211_chandef_create(chandef, control_chan,
+					NL80211_CHAN_NO_HT);
+		return;
+	}
 
 	switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
 	case IEEE80211_HT_PARAM_CHA_SEC_NONE:
@@ -1895,7 +1939,7 @@
 		channel_type = NL80211_CHAN_NO_HT;
 	}
 
-	return channel_type;
+	cfg80211_chandef_create(chandef, control_chan, channel_type);
 }
 
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
@@ -1977,3 +2021,84 @@
 	return ifmgd->ave_beacon_signal;
 }
 EXPORT_SYMBOL_GPL(ieee80211_ave_rssi);
+
+u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs)
+{
+	if (!mcs)
+		return 1;
+
+	/* TODO: consider rx_highest */
+
+	if (mcs->rx_mask[3])
+		return 4;
+	if (mcs->rx_mask[2])
+		return 3;
+	if (mcs->rx_mask[1])
+		return 2;
+	return 1;
+}
+
+/**
+ * ieee80211_calculate_rx_timestamp - calculate timestamp in frame
+ * @local: mac80211 hw info struct
+ * @status: RX status
+ * @mpdu_len: total MPDU length (including FCS)
+ * @mpdu_offset: offset into MPDU to calculate timestamp at
+ *
+ * This function calculates the RX timestamp at the given MPDU offset, taking
+ * into account what the RX timestamp was. An offset of 0 will just normalize
+ * the timestamp to TSF at beginning of MPDU reception.
+ */
+u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
+				     struct ieee80211_rx_status *status,
+				     unsigned int mpdu_len,
+				     unsigned int mpdu_offset)
+{
+	u64 ts = status->mactime;
+	struct rate_info ri;
+	u16 rate;
+
+	if (WARN_ON(!ieee80211_have_rx_timestamp(status)))
+		return 0;
+
+	memset(&ri, 0, sizeof(ri));
+
+	/* Fill cfg80211 rate info */
+	if (status->flag & RX_FLAG_HT) {
+		ri.mcs = status->rate_idx;
+		ri.flags |= RATE_INFO_FLAGS_MCS;
+		if (status->flag & RX_FLAG_40MHZ)
+			ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+		if (status->flag & RX_FLAG_SHORT_GI)
+			ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
+	} else if (status->flag & RX_FLAG_VHT) {
+		ri.flags |= RATE_INFO_FLAGS_VHT_MCS;
+		ri.mcs = status->rate_idx;
+		ri.nss = status->vht_nss;
+		if (status->flag & RX_FLAG_40MHZ)
+			ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+		if (status->flag & RX_FLAG_80MHZ)
+			ri.flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
+		if (status->flag & RX_FLAG_80P80MHZ)
+			ri.flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH;
+		if (status->flag & RX_FLAG_160MHZ)
+			ri.flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
+		if (status->flag & RX_FLAG_SHORT_GI)
+			ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
+	} else {
+		struct ieee80211_supported_band *sband;
+
+		sband = local->hw.wiphy->bands[status->band];
+		ri.legacy = sband->bitrates[status->rate_idx].bitrate;
+	}
+
+	rate = cfg80211_calculate_bitrate(&ri);
+
+	/* rewind from end of MPDU */
+	if (status->flag & RX_FLAG_MACTIME_END)
+		ts -= mpdu_len * 8 * 10 / rate;
+
+	ts += mpdu_offset * 8 * 10 / rate;
+
+	return ts;
+}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
new file mode 100644
index 0000000..f311388
--- /dev/null
+++ b/net/mac80211/vht.c
@@ -0,0 +1,35 @@
+/*
+ * VHT handling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ieee80211.h>
+#include <linux/export.h>
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+
+
+void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
+					 struct ieee80211_supported_band *sband,
+					 struct ieee80211_vht_cap *vht_cap_ie,
+					 struct ieee80211_sta_vht_cap *vht_cap)
+{
+	if (WARN_ON_ONCE(!vht_cap))
+		return;
+
+	memset(vht_cap, 0, sizeof(*vht_cap));
+
+	if (!vht_cap_ie || !sband->vht_cap.vht_supported)
+		return;
+
+	vht_cap->vht_supported = true;
+
+	vht_cap->cap = le32_to_cpu(vht_cap_ie->vht_cap_info);
+
+	/* Copy peer MCS info, the driver might need them. */
+	memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs,
+	       sizeof(struct ieee80211_vht_mcs_info));
+}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index cea06e9..906f00c 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -160,31 +160,37 @@
 	return ieee80211_downgrade_queue(sdata, skb);
 }
 
+/**
+ * ieee80211_set_qos_hdr - Fill in the QoS header if there is one.
+ *
+ * @sdata: local subif
+ * @skb: packet to be updated
+ */
 void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
 			   struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (void *)skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	u8 *p;
+	u8 ack_policy, tid;
 
-	/* Fill in the QoS header if there is one. */
-	if (ieee80211_is_data_qos(hdr->frame_control)) {
-		u8 *p = ieee80211_get_qos_ctl(hdr);
-		u8 ack_policy, tid;
+	if (!ieee80211_is_data_qos(hdr->frame_control))
+		return;
 
-		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+	p = ieee80211_get_qos_ctl(hdr);
+	tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
 
-		/* preserve EOSP bit */
-		ack_policy = *p & IEEE80211_QOS_CTL_EOSP;
+	/* preserve EOSP bit */
+	ack_policy = *p & IEEE80211_QOS_CTL_EOSP;
 
-		if (is_multicast_ether_addr(hdr->addr1) ||
-		    sdata->noack_map & BIT(tid)) {
-			ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
-			info->flags |= IEEE80211_TX_CTL_NO_ACK;
-		}
-
-		/* qos header is 2 bytes */
-		*p++ = ack_policy | tid;
-		*p = ieee80211_vif_is_mesh(&sdata->vif) ?
-			(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0;
+	if (is_multicast_ether_addr(hdr->addr1) ||
+	    sdata->noack_map & BIT(tid)) {
+		ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
+		info->flags |= IEEE80211_TX_CTL_NO_ACK;
 	}
+
+	/* qos header is 2 bytes */
+	*p++ = ack_policy | tid;
+	*p = ieee80211_vif_is_mesh(&sdata->vif) ?
+		(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0;
 }
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 778465f..fed899f 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1643,7 +1643,7 @@
 	void *data;
 	int copylen = *len, ret = 0;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 	if (optval != SO_IP_SET)
 		return -EBADF;
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 8b2cffd..0c3b167 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -28,12 +28,11 @@
 config	IP_VS_IPV6
 	bool "IPv6 support for IPVS"
 	depends on IPV6 = y || IP_VS = IPV6
+	select IP6_NF_IPTABLES
 	---help---
-	  Add IPv6 support to IPVS. This is incomplete and might be dangerous.
+	  Add IPv6 support to IPVS.
 
-	  See http://www.mindbasket.com/ipvs for more information.
-
-	  Say N if unsure.
+	  Say Y if unsure.
 
 config	IP_VS_DEBUG
 	bool "IP virtual server debugging"
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 1548df9..30e764a 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -308,13 +308,12 @@
 static int
 ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb,
 			    const struct ip_vs_iphdr *iph,
-			    unsigned int proto_off, int inverse,
-			    struct ip_vs_conn_param *p)
+			    int inverse, struct ip_vs_conn_param *p)
 {
 	__be16 _ports[2], *pptr;
 	struct net *net = skb_net(skb);
 
-	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
+	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
 	if (pptr == NULL)
 		return 1;
 
@@ -329,12 +328,11 @@
 
 struct ip_vs_conn *
 ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
-			const struct ip_vs_iphdr *iph,
-			unsigned int proto_off, int inverse)
+			const struct ip_vs_iphdr *iph, int inverse)
 {
 	struct ip_vs_conn_param p;
 
-	if (ip_vs_conn_fill_param_proto(af, skb, iph, proto_off, inverse, &p))
+	if (ip_vs_conn_fill_param_proto(af, skb, iph, inverse, &p))
 		return NULL;
 
 	return ip_vs_conn_in_get(&p);
@@ -432,12 +430,11 @@
 
 struct ip_vs_conn *
 ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
-			 const struct ip_vs_iphdr *iph,
-			 unsigned int proto_off, int inverse)
+			 const struct ip_vs_iphdr *iph, int inverse)
 {
 	struct ip_vs_conn_param p;
 
-	if (ip_vs_conn_fill_param_proto(af, skb, iph, proto_off, inverse, &p))
+	if (ip_vs_conn_fill_param_proto(af, skb, iph, inverse, &p))
 		return NULL;
 
 	return ip_vs_conn_out_get(&p);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 58918e2..fb45640 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -222,11 +222,10 @@
  */
 static struct ip_vs_conn *
 ip_vs_sched_persist(struct ip_vs_service *svc,
-		    struct sk_buff *skb,
-		    __be16 src_port, __be16 dst_port, int *ignored)
+		    struct sk_buff *skb, __be16 src_port, __be16 dst_port,
+		    int *ignored, struct ip_vs_iphdr *iph)
 {
 	struct ip_vs_conn *cp = NULL;
-	struct ip_vs_iphdr iph;
 	struct ip_vs_dest *dest;
 	struct ip_vs_conn *ct;
 	__be16 dport = 0;		/* destination port to forward */
@@ -236,20 +235,18 @@
 	union nf_inet_addr snet;	/* source network of the client,
 					   after masking */
 
-	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
-
 	/* Mask saddr with the netmask to adjust template granularity */
 #ifdef CONFIG_IP_VS_IPV6
 	if (svc->af == AF_INET6)
-		ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask);
+		ipv6_addr_prefix(&snet.in6, &iph->saddr.in6, svc->netmask);
 	else
 #endif
-		snet.ip = iph.saddr.ip & svc->netmask;
+		snet.ip = iph->saddr.ip & svc->netmask;
 
 	IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
 		      "mnet %s\n",
-		      IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port),
-		      IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port),
+		      IP_VS_DBG_ADDR(svc->af, &iph->saddr), ntohs(src_port),
+		      IP_VS_DBG_ADDR(svc->af, &iph->daddr), ntohs(dst_port),
 		      IP_VS_DBG_ADDR(svc->af, &snet));
 
 	/*
@@ -266,8 +263,8 @@
 	 * is created for other persistent services.
 	 */
 	{
-		int protocol = iph.protocol;
-		const union nf_inet_addr *vaddr = &iph.daddr;
+		int protocol = iph->protocol;
+		const union nf_inet_addr *vaddr = &iph->daddr;
 		__be16 vport = 0;
 
 		if (dst_port == svc->port) {
@@ -342,14 +339,14 @@
 		dport = dest->port;
 
 	flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
-		 && iph.protocol == IPPROTO_UDP)?
+		 && iph->protocol == IPPROTO_UDP) ?
 		IP_VS_CONN_F_ONE_PACKET : 0;
 
 	/*
 	 *    Create a new connection according to the template
 	 */
-	ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr,
-			      src_port, &iph.daddr, dst_port, &param);
+	ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr,
+			      src_port, &iph->daddr, dst_port, &param);
 
 	cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
 	if (cp == NULL) {
@@ -392,18 +389,20 @@
  */
 struct ip_vs_conn *
 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
-	       struct ip_vs_proto_data *pd, int *ignored)
+	       struct ip_vs_proto_data *pd, int *ignored,
+	       struct ip_vs_iphdr *iph)
 {
 	struct ip_vs_protocol *pp = pd->pp;
 	struct ip_vs_conn *cp = NULL;
-	struct ip_vs_iphdr iph;
 	struct ip_vs_dest *dest;
 	__be16 _ports[2], *pptr;
 	unsigned int flags;
 
 	*ignored = 1;
-	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
-	pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
+	/*
+	 * IPv6 frags, only the first hit here.
+	 */
+	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
 	if (pptr == NULL)
 		return NULL;
 
@@ -423,7 +422,7 @@
 	 *    Do not schedule replies from local real server.
 	 */
 	if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
-	    (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) {
+	    (cp = pp->conn_in_get(svc->af, skb, iph, 1))) {
 		IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
 			      "Not scheduling reply for existing connection");
 		__ip_vs_conn_put(cp);
@@ -434,7 +433,8 @@
 	 *    Persistent service
 	 */
 	if (svc->flags & IP_VS_SVC_F_PERSISTENT)
-		return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored);
+		return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored,
+					   iph);
 
 	*ignored = 0;
 
@@ -456,7 +456,7 @@
 	}
 
 	flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
-		 && iph.protocol == IPPROTO_UDP)?
+		 && iph->protocol == IPPROTO_UDP) ?
 		IP_VS_CONN_F_ONE_PACKET : 0;
 
 	/*
@@ -465,9 +465,9 @@
 	{
 		struct ip_vs_conn_param p;
 
-		ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
-				      &iph.saddr, pptr[0], &iph.daddr, pptr[1],
-				      &p);
+		ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
+				      &iph->saddr, pptr[0], &iph->daddr,
+				      pptr[1], &p);
 		cp = ip_vs_conn_new(&p, &dest->addr,
 				    dest->port ? dest->port : pptr[1],
 				    flags, dest, skb->mark);
@@ -496,19 +496,16 @@
  *  no destination is available for a new connection.
  */
 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
-		struct ip_vs_proto_data *pd)
+		struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
 {
 	__be16 _ports[2], *pptr;
-	struct ip_vs_iphdr iph;
 #ifdef CONFIG_SYSCTL
 	struct net *net;
 	struct netns_ipvs *ipvs;
 	int unicast;
 #endif
 
-	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
-
-	pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
+	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
 	if (pptr == NULL) {
 		ip_vs_service_put(svc);
 		return NF_DROP;
@@ -519,10 +516,10 @@
 
 #ifdef CONFIG_IP_VS_IPV6
 	if (svc->af == AF_INET6)
-		unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
+		unicast = ipv6_addr_type(&iph->daddr.in6) & IPV6_ADDR_UNICAST;
 	else
 #endif
-		unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST);
+		unicast = (inet_addr_type(net, iph->daddr.ip) == RTN_UNICAST);
 
 	/* if it is fwmark-based service, the cache_bypass sysctl is up
 	   and the destination is a non-local unicast, then create
@@ -532,7 +529,7 @@
 		int ret;
 		struct ip_vs_conn *cp;
 		unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
-				      iph.protocol == IPPROTO_UDP)?
+				      iph->protocol == IPPROTO_UDP) ?
 				      IP_VS_CONN_F_ONE_PACKET : 0;
 		union nf_inet_addr daddr =  { .all = { 0, 0, 0, 0 } };
 
@@ -542,9 +539,9 @@
 		IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
 		{
 			struct ip_vs_conn_param p;
-			ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
-					      &iph.saddr, pptr[0],
-					      &iph.daddr, pptr[1], &p);
+			ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
+					      &iph->saddr, pptr[0],
+					      &iph->daddr, pptr[1], &p);
 			cp = ip_vs_conn_new(&p, &daddr, 0,
 					    IP_VS_CONN_F_BYPASS | flags,
 					    NULL, skb->mark);
@@ -559,7 +556,7 @@
 		ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
 
 		/* transmit the first SYN packet */
-		ret = cp->packet_xmit(skb, cp, pd->pp);
+		ret = cp->packet_xmit(skb, cp, pd->pp, iph);
 		/* do not touch skb anymore */
 
 		atomic_inc(&cp->in_pkts);
@@ -654,14 +651,6 @@
 	return err;
 }
 
-#ifdef CONFIG_IP_VS_IPV6
-static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user)
-{
-	/* TODO IPv6: Find out what to do here for IPv6 */
-	return 0;
-}
-#endif
-
 static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
 {
 #ifdef CONFIG_IP_VS_IPV6
@@ -732,10 +721,19 @@
 		    struct ip_vs_conn *cp, int inout)
 {
 	struct ipv6hdr *iph	 = ipv6_hdr(skb);
-	unsigned int icmp_offset = sizeof(struct ipv6hdr);
-	struct icmp6hdr *icmph	 = (struct icmp6hdr *)(skb_network_header(skb) +
-						      icmp_offset);
-	struct ipv6hdr *ciph	 = (struct ipv6hdr *)(icmph + 1);
+	unsigned int icmp_offset = 0;
+	unsigned int offs	 = 0; /* header offset*/
+	int protocol;
+	struct icmp6hdr *icmph;
+	struct ipv6hdr *ciph;
+	unsigned short fragoffs;
+
+	ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
+	icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
+	offs = icmp_offset + sizeof(struct icmp6hdr);
+	ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
+
+	protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
 
 	if (inout) {
 		iph->saddr = cp->vaddr.in6;
@@ -746,10 +744,13 @@
 	}
 
 	/* the TCP/UDP/SCTP port */
-	if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr ||
-	    IPPROTO_SCTP == ciph->nexthdr) {
-		__be16 *ports = (void *)ciph + sizeof(struct ipv6hdr);
+	if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
+			  IPPROTO_SCTP == protocol)) {
+		__be16 *ports = (void *)(skb_network_header(skb) + offs);
 
+		IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__,
+			      ntohs(inout ? ports[1] : ports[0]),
+			      ntohs(inout ? cp->vport : cp->dport));
 		if (inout)
 			ports[1] = cp->vport;
 		else
@@ -898,51 +899,35 @@
 	IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
 		      "Checking outgoing ICMP for");
 
-	offset += cih->ihl * 4;
-
-	ip_vs_fill_iphdr(AF_INET, cih, &ciph);
+	ip_vs_fill_ip4hdr(cih, &ciph);
+	ciph.len += offset;
 	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
+	cp = pp->conn_out_get(AF_INET, skb, &ciph, 1);
 	if (!cp)
 		return NF_ACCEPT;
 
 	snet.ip = iph->saddr;
 	return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
-				    pp, offset, ihl);
+				    pp, ciph.len, ihl);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
 static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
-			     unsigned int hooknum)
+			     unsigned int hooknum, struct ip_vs_iphdr *ipvsh)
 {
-	struct ipv6hdr *iph;
 	struct icmp6hdr	_icmph, *ic;
-	struct ipv6hdr	_ciph, *cih;	/* The ip header contained
-					   within the ICMP */
-	struct ip_vs_iphdr ciph;
+	struct ipv6hdr _ip6h, *ip6h; /* The ip header contained within ICMP */
+	struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
 	struct ip_vs_conn *cp;
 	struct ip_vs_protocol *pp;
-	unsigned int offset;
 	union nf_inet_addr snet;
+	unsigned int writable;
 
 	*related = 1;
-
-	/* reassemble IP fragments */
-	if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
-		if (ip_vs_gather_frags_v6(skb, ip_vs_defrag_user(hooknum)))
-			return NF_STOLEN;
-	}
-
-	iph = ipv6_hdr(skb);
-	offset = sizeof(struct ipv6hdr);
-	ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
+	ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
 	if (ic == NULL)
 		return NF_DROP;
 
-	IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) %pI6->%pI6\n",
-		  ic->icmp6_type, ntohs(icmpv6_id(ic)),
-		  &iph->saddr, &iph->daddr);
-
 	/*
 	 * Work through seeing if this is for us.
 	 * These checks are supposed to be in an order that means easy
@@ -950,42 +935,45 @@
 	 * this means that some packets will manage to get a long way
 	 * down this stack and then be rejected, but that's life.
 	 */
-	if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
-	    (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
-	    (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
+	if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
 		*related = 0;
 		return NF_ACCEPT;
 	}
+	/* Fragment header that is before ICMP header tells us that:
+	 * it's not an error message since they can't be fragmented.
+	 */
+	if (ipvsh->flags & IP6T_FH_F_FRAG)
+		return NF_DROP;
+
+	IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n",
+		  ic->icmp6_type, ntohs(icmpv6_id(ic)),
+		  &ipvsh->saddr, &ipvsh->daddr);
 
 	/* Now find the contained IP header */
-	offset += sizeof(_icmph);
-	cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
-	if (cih == NULL)
+	ciph.len = ipvsh->len + sizeof(_icmph);
+	ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
+	if (ip6h == NULL)
 		return NF_ACCEPT; /* The packet looks wrong, ignore */
+	ciph.saddr.in6 = ip6h->saddr; /* conn_out_get() handles reverse order */
+	ciph.daddr.in6 = ip6h->daddr;
+	/* skip possible IPv6 exthdrs of contained IPv6 packet */
+	ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
+	if (ciph.protocol < 0)
+		return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
 
-	pp = ip_vs_proto_get(cih->nexthdr);
+	pp = ip_vs_proto_get(ciph.protocol);
 	if (!pp)
 		return NF_ACCEPT;
 
-	/* Is the embedded protocol header present? */
-	/* TODO: we don't support fragmentation at the moment anyways */
-	if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
-		return NF_ACCEPT;
-
-	IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
-		      "Checking outgoing ICMPv6 for");
-
-	offset += sizeof(struct ipv6hdr);
-
-	ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
 	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
+	cp = pp->conn_out_get(AF_INET6, skb, &ciph, 1);
 	if (!cp)
 		return NF_ACCEPT;
 
-	snet.in6 = iph->saddr;
-	return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp,
-				    pp, offset, sizeof(struct ipv6hdr));
+	snet.in6 = ciph.saddr.in6;
+	writable = ciph.len;
+	return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
+				    pp, writable, sizeof(struct ipv6hdr));
 }
 #endif
 
@@ -1018,17 +1006,17 @@
  */
 static unsigned int
 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
-		struct ip_vs_conn *cp, int ihl)
+		struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
 {
 	struct ip_vs_protocol *pp = pd->pp;
 
 	IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
 
-	if (!skb_make_writable(skb, ihl))
+	if (!skb_make_writable(skb, iph->len))
 		goto drop;
 
 	/* mangle the packet */
-	if (pp->snat_handler && !pp->snat_handler(skb, pp, cp))
+	if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph))
 		goto drop;
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1115,17 +1103,22 @@
 	if (!net_ipvs(net)->enable)
 		return NF_ACCEPT;
 
-	ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+	ip_vs_fill_iph_skb(af, skb, &iph);
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6) {
+		if (!iph.fragoffs && skb_nfct_reasm(skb)) {
+			struct sk_buff *reasm = skb_nfct_reasm(skb);
+			/* Save fw mark for coming frags */
+			reasm->ipvs_property = 1;
+			reasm->mark = skb->mark;
+		}
 		if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
 			int related;
 			int verdict = ip_vs_out_icmp_v6(skb, &related,
-							hooknum);
+							hooknum, &iph);
 
 			if (related)
 				return verdict;
-			ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 		}
 	} else
 #endif
@@ -1135,7 +1128,6 @@
 
 			if (related)
 				return verdict;
-			ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 		}
 
 	pd = ip_vs_proto_data_get(net, iph.protocol);
@@ -1145,39 +1137,31 @@
 
 	/* reassemble IP fragments */
 #ifdef CONFIG_IP_VS_IPV6
-	if (af == AF_INET6) {
-		if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
-			if (ip_vs_gather_frags_v6(skb,
-						  ip_vs_defrag_user(hooknum)))
-				return NF_STOLEN;
-		}
-
-		ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
-	} else
+	if (af == AF_INET)
 #endif
 		if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
 			if (ip_vs_gather_frags(skb,
 					       ip_vs_defrag_user(hooknum)))
 				return NF_STOLEN;
 
-			ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+			ip_vs_fill_ip4hdr(skb_network_header(skb), &iph);
 		}
 
 	/*
 	 * Check if the packet belongs to an existing entry
 	 */
-	cp = pp->conn_out_get(af, skb, &iph, iph.len, 0);
+	cp = pp->conn_out_get(af, skb, &iph, 0);
 
 	if (likely(cp))
-		return handle_response(af, skb, pd, cp, iph.len);
+		return handle_response(af, skb, pd, cp, &iph);
 	if (sysctl_nat_icmp_send(net) &&
 	    (pp->protocol == IPPROTO_TCP ||
 	     pp->protocol == IPPROTO_UDP ||
 	     pp->protocol == IPPROTO_SCTP)) {
 		__be16 _ports[2], *pptr;
 
-		pptr = skb_header_pointer(skb, iph.len,
-					  sizeof(_ports), _ports);
+		pptr = frag_safe_skb_hp(skb, iph.len,
+					 sizeof(_ports), _ports, &iph);
 		if (pptr == NULL)
 			return NF_ACCEPT;	/* Not for me */
 		if (ip_vs_lookup_real_service(net, af, iph.protocol,
@@ -1375,13 +1359,13 @@
 		      "Checking incoming ICMP for");
 
 	offset2 = offset;
-	offset += cih->ihl * 4;
-
-	ip_vs_fill_iphdr(AF_INET, cih, &ciph);
+	ip_vs_fill_ip4hdr(cih, &ciph);
+	ciph.len += offset;
+	offset = ciph.len;
 	/* The embedded headers contain source and dest in reverse order.
 	 * For IPIP this is error for request, not for reply.
 	 */
-	cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, ipip ? 0 : 1);
+	cp = pp->conn_in_get(AF_INET, skb, &ciph, ipip ? 0 : 1);
 	if (!cp)
 		return NF_ACCEPT;
 
@@ -1450,7 +1434,7 @@
 	ip_vs_in_stats(cp, skb);
 	if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
 		offset += 2 * sizeof(__u16);
-	verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum);
+	verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
 
 out:
 	__ip_vs_conn_put(cp);
@@ -1459,38 +1443,24 @@
 }
 
 #ifdef CONFIG_IP_VS_IPV6
-static int
-ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
+static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related,
+			    unsigned int hooknum, struct ip_vs_iphdr *iph)
 {
 	struct net *net = NULL;
-	struct ipv6hdr *iph;
+	struct ipv6hdr _ip6h, *ip6h;
 	struct icmp6hdr	_icmph, *ic;
-	struct ipv6hdr	_ciph, *cih;	/* The ip header contained
-					   within the ICMP */
-	struct ip_vs_iphdr ciph;
+	struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
 	struct ip_vs_conn *cp;
 	struct ip_vs_protocol *pp;
 	struct ip_vs_proto_data *pd;
-	unsigned int offset, verdict;
+	unsigned int offs_ciph, writable, verdict;
 
 	*related = 1;
 
-	/* reassemble IP fragments */
-	if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
-		if (ip_vs_gather_frags_v6(skb, ip_vs_defrag_user(hooknum)))
-			return NF_STOLEN;
-	}
-
-	iph = ipv6_hdr(skb);
-	offset = sizeof(struct ipv6hdr);
-	ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
+	ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph);
 	if (ic == NULL)
 		return NF_DROP;
 
-	IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) %pI6->%pI6\n",
-		  ic->icmp6_type, ntohs(icmpv6_id(ic)),
-		  &iph->saddr, &iph->daddr);
-
 	/*
 	 * Work through seeing if this is for us.
 	 * These checks are supposed to be in an order that means easy
@@ -1498,47 +1468,71 @@
 	 * this means that some packets will manage to get a long way
 	 * down this stack and then be rejected, but that's life.
 	 */
-	if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
-	    (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
-	    (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
+	if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
 		*related = 0;
 		return NF_ACCEPT;
 	}
+	/* Fragment header that is before ICMP header tells us that:
+	 * it's not an error message since they can't be fragmented.
+	 */
+	if (iph->flags & IP6T_FH_F_FRAG)
+		return NF_DROP;
+
+	IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n",
+		  ic->icmp6_type, ntohs(icmpv6_id(ic)),
+		  &iph->saddr, &iph->daddr);
 
 	/* Now find the contained IP header */
-	offset += sizeof(_icmph);
-	cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
-	if (cih == NULL)
+	ciph.len = iph->len + sizeof(_icmph);
+	offs_ciph = ciph.len; /* Save ip header offset */
+	ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
+	if (ip6h == NULL)
 		return NF_ACCEPT; /* The packet looks wrong, ignore */
+	ciph.saddr.in6 = ip6h->saddr; /* conn_in_get() handles reverse order */
+	ciph.daddr.in6 = ip6h->daddr;
+	/* skip possible IPv6 exthdrs of contained IPv6 packet */
+	ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
+	if (ciph.protocol < 0)
+		return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
 
 	net = skb_net(skb);
-	pd = ip_vs_proto_data_get(net, cih->nexthdr);
+	pd = ip_vs_proto_data_get(net, ciph.protocol);
 	if (!pd)
 		return NF_ACCEPT;
 	pp = pd->pp;
 
-	/* Is the embedded protocol header present? */
-	/* TODO: we don't support fragmentation at the moment anyways */
-	if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
+	/* Cannot handle fragmented embedded protocol */
+	if (ciph.fragoffs)
 		return NF_ACCEPT;
 
-	IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
+	IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offs_ciph,
 		      "Checking incoming ICMPv6 for");
 
-	offset += sizeof(struct ipv6hdr);
+	/* The embedded headers contain source and dest in reverse order
+	 * if not from localhost
+	 */
+	cp = pp->conn_in_get(AF_INET6, skb, &ciph,
+			     (hooknum == NF_INET_LOCAL_OUT) ? 0 : 1);
 
-	ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
-	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
 	if (!cp)
 		return NF_ACCEPT;
+	/* VS/TUN, VS/DR and LOCALNODE just let it go */
+	if ((hooknum == NF_INET_LOCAL_OUT) &&
+	    (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
+		__ip_vs_conn_put(cp);
+		return NF_ACCEPT;
+	}
 
 	/* do the statistics and put it back */
 	ip_vs_in_stats(cp, skb);
-	if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr ||
-	    IPPROTO_SCTP == cih->nexthdr)
-		offset += 2 * sizeof(__u16);
-	verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum);
+
+	/* Need to mangle contained IPv6 header in ICMPv6 packet */
+	writable = ciph.len;
+	if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
+	    IPPROTO_SCTP == ciph.protocol)
+		writable += 2 * sizeof(__u16); /* Also mangle ports */
+
+	verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, writable, hooknum, &ciph);
 
 	__ip_vs_conn_put(cp);
 
@@ -1574,7 +1568,7 @@
 	if (unlikely((skb->pkt_type != PACKET_HOST &&
 		      hooknum != NF_INET_LOCAL_OUT) ||
 		     !skb_dst(skb))) {
-		ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+		ip_vs_fill_iph_skb(af, skb, &iph);
 		IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
 			      " ignored in hook %u\n",
 			      skb->pkt_type, iph.protocol,
@@ -1586,7 +1580,7 @@
 	if (!net_ipvs(net)->enable)
 		return NF_ACCEPT;
 
-	ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+	ip_vs_fill_iph_skb(af, skb, &iph);
 
 	/* Bad... Do not break raw sockets */
 	if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
@@ -1600,13 +1594,19 @@
 
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6) {
+		if (!iph.fragoffs && skb_nfct_reasm(skb)) {
+			struct sk_buff *reasm = skb_nfct_reasm(skb);
+			/* Save fw mark for coming frags. */
+			reasm->ipvs_property = 1;
+			reasm->mark = skb->mark;
+		}
 		if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
 			int related;
-			int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum);
+			int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
+						       &iph);
 
 			if (related)
 				return verdict;
-			ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 		}
 	} else
 #endif
@@ -1616,7 +1616,6 @@
 
 			if (related)
 				return verdict;
-			ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 		}
 
 	/* Protocol supported? */
@@ -1627,12 +1626,15 @@
 	/*
 	 * Check if the packet belongs to an existing connection entry
 	 */
-	cp = pp->conn_in_get(af, skb, &iph, iph.len, 0);
-
-	if (unlikely(!cp)) {
+	cp = pp->conn_in_get(af, skb, &iph, 0);
+	if (unlikely(!cp) && !iph.fragoffs) {
+		/* No (second) fragments need to enter here, as nf_defrag_ipv6
+		 * replayed fragment zero will already have created the cp
+		 */
 		int v;
 
-		if (!pp->conn_schedule(af, skb, pd, &v, &cp))
+		/* Schedule and create new connection entry into &cp */
+		if (!pp->conn_schedule(af, skb, pd, &v, &cp, &iph))
 			return v;
 	}
 
@@ -1640,6 +1642,14 @@
 		/* sorry, all this trouble for a no-hit :) */
 		IP_VS_DBG_PKT(12, af, pp, skb, 0,
 			      "ip_vs_in: packet continues traversal as normal");
+		if (iph.fragoffs && !skb_nfct_reasm(skb)) {
+			/* Fragment that couldn't be mapped to a conn entry
+			 * and don't have any pointer to a reasm skb
+			 * is missing module nf_defrag_ipv6
+			 */
+			IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
+			IP_VS_DBG_PKT(7, af, pp, skb, 0, "unhandled fragment");
+		}
 		return NF_ACCEPT;
 	}
 
@@ -1662,7 +1672,7 @@
 	ip_vs_in_stats(cp, skb);
 	ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
 	if (cp->packet_xmit)
-		ret = cp->packet_xmit(skb, cp, pp);
+		ret = cp->packet_xmit(skb, cp, pp, &iph);
 		/* do not touch skb anymore */
 	else {
 		IP_VS_DBG_RL("warning: packet_xmit is null");
@@ -1724,6 +1734,38 @@
 #ifdef CONFIG_IP_VS_IPV6
 
 /*
+ * AF_INET6 fragment handling
+ * Copy info from first fragment, to the rest of them.
+ */
+static unsigned int
+ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
+		     const struct net_device *in,
+		     const struct net_device *out,
+		     int (*okfn)(struct sk_buff *))
+{
+	struct sk_buff *reasm = skb_nfct_reasm(skb);
+	struct net *net;
+
+	/* Skip if not a "replay" from nf_ct_frag6_output or first fragment.
+	 * ipvs_property is set when checking first fragment
+	 * in ip_vs_in() and ip_vs_out().
+	 */
+	if (reasm)
+		IP_VS_DBG(2, "Fragment recv prop:%d\n", reasm->ipvs_property);
+	if (!reasm || !reasm->ipvs_property)
+		return NF_ACCEPT;
+
+	net = skb_net(skb);
+	if (!net_ipvs(net)->enable)
+		return NF_ACCEPT;
+
+	/* Copy stored fw mark, saved in ip_vs_{in,out} */
+	skb->mark = reasm->mark;
+
+	return NF_ACCEPT;
+}
+
+/*
  *	AF_INET6 handler in NF_INET_LOCAL_IN chain
  *	Schedule and forward packets from remote clients
  */
@@ -1793,8 +1835,10 @@
 {
 	int r;
 	struct net *net;
+	struct ip_vs_iphdr iphdr;
 
-	if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
+	ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
+	if (iphdr.protocol != IPPROTO_ICMPV6)
 		return NF_ACCEPT;
 
 	/* ipvs enabled in this netns ? */
@@ -1802,7 +1846,7 @@
 	if (!net_ipvs(net)->enable)
 		return NF_ACCEPT;
 
-	return ip_vs_in_icmp_v6(skb, &r, hooknum);
+	return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
 }
 #endif
 
@@ -1860,6 +1904,14 @@
 		.priority	= 100,
 	},
 #ifdef CONFIG_IP_VS_IPV6
+	/* After mangle & nat fetch 2:nd fragment and following */
+	{
+		.hook		= ip_vs_preroute_frag6,
+		.owner		= THIS_MODULE,
+		.pf		= NFPROTO_IPV6,
+		.hooknum	= NF_INET_PRE_ROUTING,
+		.priority	= NF_IP6_PRI_NAT_DST + 1,
+	},
 	/* After packet filtering, change source only for VS/NAT */
 	{
 		.hook		= ip_vs_reply6,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c4ee437..ec664cb 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2339,7 +2339,7 @@
 	struct ip_vs_dest_user_kern udest;
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX)
@@ -2632,7 +2632,7 @@
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	BUG_ON(!net);
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
@@ -3699,6 +3699,10 @@
 		tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
 		if (tbl == NULL)
 			return -ENOMEM;
+
+		/* Don't export sysctls to unprivileged users */
+		if (net->user_ns != &init_user_ns)
+			tbl[0].procname = NULL;
 	} else
 		tbl = vs_vars;
 	/* Initialize sysctl defaults */
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 8b7dca9..7f3b0cc 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -215,7 +215,7 @@
 	struct ip_vs_dh_bucket *tbl;
 	struct ip_vs_iphdr iph;
 
-	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
+	ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
 
 	IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
 
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index df646cc..fdd89b9 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -479,7 +479,7 @@
 	struct ip_vs_dest *dest = NULL;
 	struct ip_vs_lblc_entry *en;
 
-	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
+	ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
 
 	IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
 
@@ -560,6 +560,11 @@
 						GFP_KERNEL);
 		if (ipvs->lblc_ctl_table == NULL)
 			return -ENOMEM;
+
+		/* Don't export sysctls to unprivileged users */
+		if (net->user_ns != &init_user_ns)
+			ipvs->lblc_ctl_table[0].procname = NULL;
+
 	} else
 		ipvs->lblc_ctl_table = vs_vars_table;
 	ipvs->sysctl_lblc_expiration = DEFAULT_EXPIRATION;
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 570e31e..c03b6a3 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -649,7 +649,7 @@
 	struct ip_vs_dest *dest = NULL;
 	struct ip_vs_lblcr_entry *en;
 
-	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
+	ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
 
 	IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
 
@@ -754,6 +754,10 @@
 						GFP_KERNEL);
 		if (ipvs->lblcr_ctl_table == NULL)
 			return -ENOMEM;
+
+		/* Don't export sysctls to unprivileged users */
+		if (net->user_ns != &init_user_ns)
+			ipvs->lblcr_ctl_table[0].procname = NULL;
 	} else
 		ipvs->lblcr_ctl_table = vs_vars_table;
 	ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION;
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 022e77e..c8beafd 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -82,7 +82,7 @@
 ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
 {
 	enum ip_conntrack_info ctinfo;
-	struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
+	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 	struct nf_conntrack_tuple new_tuple;
 
 	if (ct == NULL || nf_ct_is_confirmed(ct) || nf_ct_is_untracked(ct) ||
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 1aa5cac..12475ef 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -68,23 +68,31 @@
 static int
 ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
 {
+	struct sk_buff *reasm = skb_nfct_reasm(skb);
 	struct ip_vs_iphdr iph;
 	unsigned int dataoff, datalen, matchoff, matchlen;
 	const char *dptr;
 	int retc;
 
-	ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph);
+	ip_vs_fill_iph_skb(p->af, skb, &iph);
 
 	/* Only useful with UDP */
 	if (iph.protocol != IPPROTO_UDP)
 		return -EINVAL;
+	/* todo: IPv6 fragments:
+	 *       I think this only should be done for the first fragment. /HS
+	 */
+	if (reasm) {
+		skb = reasm;
+		dataoff = iph.thoff_reasm + sizeof(struct udphdr);
+	} else
+		dataoff = iph.len + sizeof(struct udphdr);
 
-	/* No Data ? */
-	dataoff = iph.len + sizeof(struct udphdr);
 	if (dataoff >= skb->len)
 		return -EINVAL;
-
-	if ((retc=skb_linearize(skb)) < 0)
+	/* todo: Check if this will mess-up the reasm skb !!! /HS */
+	retc = skb_linearize(skb);
+	if (retc < 0)
 		return retc;
 	dptr = skb->data + dataoff;
 	datalen = skb->len - dataoff;
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 50d82186..939f7fb 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -280,17 +280,17 @@
 	if (ih == NULL)
 		sprintf(buf, "TRUNCATED");
 	else if (ih->nexthdr == IPPROTO_FRAGMENT)
-		sprintf(buf, "%pI6->%pI6 frag",	&ih->saddr, &ih->daddr);
+		sprintf(buf, "%pI6c->%pI6c frag", &ih->saddr, &ih->daddr);
 	else {
 		__be16 _ports[2], *pptr;
 
 		pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr),
 					  sizeof(_ports), _ports);
 		if (pptr == NULL)
-			sprintf(buf, "TRUNCATED %pI6->%pI6",
+			sprintf(buf, "TRUNCATED %pI6c->%pI6c",
 				&ih->saddr, &ih->daddr);
 		else
-			sprintf(buf, "%pI6:%u->%pI6:%u",
+			sprintf(buf, "%pI6c:%u->%pI6c:%u",
 				&ih->saddr, ntohs(pptr[0]),
 				&ih->daddr, ntohs(pptr[1]));
 	}
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index 5b8eb8b..5de3dd3 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -57,7 +57,7 @@
 
 static struct ip_vs_conn *
 ah_esp_conn_in_get(int af, const struct sk_buff *skb,
-		   const struct ip_vs_iphdr *iph, unsigned int proto_off,
+		   const struct ip_vs_iphdr *iph,
 		   int inverse)
 {
 	struct ip_vs_conn *cp;
@@ -85,9 +85,7 @@
 
 static struct ip_vs_conn *
 ah_esp_conn_out_get(int af, const struct sk_buff *skb,
-		    const struct ip_vs_iphdr *iph,
-		    unsigned int proto_off,
-		    int inverse)
+		    const struct ip_vs_iphdr *iph, int inverse)
 {
 	struct ip_vs_conn *cp;
 	struct ip_vs_conn_param p;
@@ -110,7 +108,8 @@
 
 static int
 ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
-		     int *verdict, struct ip_vs_conn **cpp)
+		     int *verdict, struct ip_vs_conn **cpp,
+		     struct ip_vs_iphdr *iph)
 {
 	/*
 	 * AH/ESP is only related traffic. Pass the packet to IP stack.
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 9f3fb75..746048b 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -10,28 +10,26 @@
 
 static int
 sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
-		   int *verdict, struct ip_vs_conn **cpp)
+		   int *verdict, struct ip_vs_conn **cpp,
+		   struct ip_vs_iphdr *iph)
 {
 	struct net *net;
 	struct ip_vs_service *svc;
 	sctp_chunkhdr_t _schunkh, *sch;
 	sctp_sctphdr_t *sh, _sctph;
-	struct ip_vs_iphdr iph;
 
-	ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
-
-	sh = skb_header_pointer(skb, iph.len, sizeof(_sctph), &_sctph);
+	sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
 	if (sh == NULL)
 		return 0;
 
-	sch = skb_header_pointer(skb, iph.len + sizeof(sctp_sctphdr_t),
+	sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
 				 sizeof(_schunkh), &_schunkh);
 	if (sch == NULL)
 		return 0;
 	net = skb_net(skb);
 	if ((sch->type == SCTP_CID_INIT) &&
-	    (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
-				     &iph.daddr, sh->dest))) {
+	    (svc = ip_vs_service_get(net, af, skb->mark, iph->protocol,
+				     &iph->daddr, sh->dest))) {
 		int ignored;
 
 		if (ip_vs_todrop(net_ipvs(net))) {
@@ -47,10 +45,10 @@
 		 * Let the virtual server select a real server for the
 		 * incoming connection, and create a connection entry.
 		 */
-		*cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+		*cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
 		if (!*cpp && ignored <= 0) {
 			if (!ignored)
-				*verdict = ip_vs_leave(svc, skb, pd);
+				*verdict = ip_vs_leave(svc, skb, pd, iph);
 			else {
 				ip_vs_service_put(svc);
 				*verdict = NF_DROP;
@@ -64,20 +62,18 @@
 }
 
 static int
-sctp_snat_handler(struct sk_buff *skb,
-		  struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
+sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+		  struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
 {
 	sctp_sctphdr_t *sctph;
-	unsigned int sctphoff;
+	unsigned int sctphoff = iph->len;
 	struct sk_buff *iter;
 	__be32 crc32;
 
 #ifdef CONFIG_IP_VS_IPV6
-	if (cp->af == AF_INET6)
-		sctphoff = sizeof(struct ipv6hdr);
-	else
+	if (cp->af == AF_INET6 && iph->fragoffs)
+		return 1;
 #endif
-		sctphoff = ip_hdrlen(skb);
 
 	/* csum_check requires unshared skb */
 	if (!skb_make_writable(skb, sctphoff + sizeof(*sctph)))
@@ -108,20 +104,18 @@
 }
 
 static int
-sctp_dnat_handler(struct sk_buff *skb,
-		  struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
+sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+		  struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
 {
 	sctp_sctphdr_t *sctph;
-	unsigned int sctphoff;
+	unsigned int sctphoff = iph->len;
 	struct sk_buff *iter;
 	__be32 crc32;
 
 #ifdef CONFIG_IP_VS_IPV6
-	if (cp->af == AF_INET6)
-		sctphoff = sizeof(struct ipv6hdr);
-	else
+	if (cp->af == AF_INET6 && iph->fragoffs)
+		return 1;
 #endif
-		sctphoff = ip_hdrlen(skb);
 
 	/* csum_check requires unshared skb */
 	if (!skb_make_writable(skb, sctphoff + sizeof(*sctph)))
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index cd609cc..9af653a 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -33,16 +33,14 @@
 
 static int
 tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
-		  int *verdict, struct ip_vs_conn **cpp)
+		  int *verdict, struct ip_vs_conn **cpp,
+		  struct ip_vs_iphdr *iph)
 {
 	struct net *net;
 	struct ip_vs_service *svc;
 	struct tcphdr _tcph, *th;
-	struct ip_vs_iphdr iph;
 
-	ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
-
-	th = skb_header_pointer(skb, iph.len, sizeof(_tcph), &_tcph);
+	th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
 	if (th == NULL) {
 		*verdict = NF_DROP;
 		return 0;
@@ -50,8 +48,8 @@
 	net = skb_net(skb);
 	/* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
 	if (th->syn &&
-	    (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
-				     &iph.daddr, th->dest))) {
+	    (svc = ip_vs_service_get(net, af, skb->mark, iph->protocol,
+				     &iph->daddr, th->dest))) {
 		int ignored;
 
 		if (ip_vs_todrop(net_ipvs(net))) {
@@ -68,10 +66,10 @@
 		 * Let the virtual server select a real server for the
 		 * incoming connection, and create a connection entry.
 		 */
-		*cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+		*cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
 		if (!*cpp && ignored <= 0) {
 			if (!ignored)
-				*verdict = ip_vs_leave(svc, skb, pd);
+				*verdict = ip_vs_leave(svc, skb, pd, iph);
 			else {
 				ip_vs_service_put(svc);
 				*verdict = NF_DROP;
@@ -128,20 +126,18 @@
 
 
 static int
-tcp_snat_handler(struct sk_buff *skb,
-		 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
+tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+		 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
 {
 	struct tcphdr *tcph;
-	unsigned int tcphoff;
+	unsigned int tcphoff = iph->len;
 	int oldlen;
 	int payload_csum = 0;
 
 #ifdef CONFIG_IP_VS_IPV6
-	if (cp->af == AF_INET6)
-		tcphoff = sizeof(struct ipv6hdr);
-	else
+	if (cp->af == AF_INET6 && iph->fragoffs)
+		return 1;
 #endif
-		tcphoff = ip_hdrlen(skb);
 	oldlen = skb->len - tcphoff;
 
 	/* csum_check requires unshared skb */
@@ -208,20 +204,18 @@
 
 
 static int
-tcp_dnat_handler(struct sk_buff *skb,
-		 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
+tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+		 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
 {
 	struct tcphdr *tcph;
-	unsigned int tcphoff;
+	unsigned int tcphoff = iph->len;
 	int oldlen;
 	int payload_csum = 0;
 
 #ifdef CONFIG_IP_VS_IPV6
-	if (cp->af == AF_INET6)
-		tcphoff = sizeof(struct ipv6hdr);
-	else
+	if (cp->af == AF_INET6 && iph->fragoffs)
+		return 1;
 #endif
-		tcphoff = ip_hdrlen(skb);
 	oldlen = skb->len - tcphoff;
 
 	/* csum_check requires unshared skb */
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 2fedb2d..503a842 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -30,23 +30,22 @@
 
 static int
 udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
-		  int *verdict, struct ip_vs_conn **cpp)
+		  int *verdict, struct ip_vs_conn **cpp,
+		  struct ip_vs_iphdr *iph)
 {
 	struct net *net;
 	struct ip_vs_service *svc;
 	struct udphdr _udph, *uh;
-	struct ip_vs_iphdr iph;
 
-	ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
-
-	uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph);
+	/* IPv6 fragments, only first fragment will hit this */
+	uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
 	if (uh == NULL) {
 		*verdict = NF_DROP;
 		return 0;
 	}
 	net = skb_net(skb);
-	svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
-				&iph.daddr, uh->dest);
+	svc = ip_vs_service_get(net, af, skb->mark, iph->protocol,
+				&iph->daddr, uh->dest);
 	if (svc) {
 		int ignored;
 
@@ -64,10 +63,10 @@
 		 * Let the virtual server select a real server for the
 		 * incoming connection, and create a connection entry.
 		 */
-		*cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+		*cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
 		if (!*cpp && ignored <= 0) {
 			if (!ignored)
-				*verdict = ip_vs_leave(svc, skb, pd);
+				*verdict = ip_vs_leave(svc, skb, pd, iph);
 			else {
 				ip_vs_service_put(svc);
 				*verdict = NF_DROP;
@@ -125,20 +124,18 @@
 
 
 static int
-udp_snat_handler(struct sk_buff *skb,
-		 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
+udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+		 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
 {
 	struct udphdr *udph;
-	unsigned int udphoff;
+	unsigned int udphoff = iph->len;
 	int oldlen;
 	int payload_csum = 0;
 
 #ifdef CONFIG_IP_VS_IPV6
-	if (cp->af == AF_INET6)
-		udphoff = sizeof(struct ipv6hdr);
-	else
+	if (cp->af == AF_INET6 && iph->fragoffs)
+		return 1;
 #endif
-		udphoff = ip_hdrlen(skb);
 	oldlen = skb->len - udphoff;
 
 	/* csum_check requires unshared skb */
@@ -210,20 +207,18 @@
 
 
 static int
-udp_dnat_handler(struct sk_buff *skb,
-		 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
+udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+		 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
 {
 	struct udphdr *udph;
-	unsigned int udphoff;
+	unsigned int udphoff = iph->len;
 	int oldlen;
 	int payload_csum = 0;
 
 #ifdef CONFIG_IP_VS_IPV6
-	if (cp->af == AF_INET6)
-		udphoff = sizeof(struct ipv6hdr);
-	else
+	if (cp->af == AF_INET6 && iph->fragoffs)
+		return 1;
 #endif
-		udphoff = ip_hdrlen(skb);
 	oldlen = skb->len - udphoff;
 
 	/* csum_check requires unshared skb */
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 08dbdd5..d6bf20d 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -159,7 +159,7 @@
 			     svc->fwmark, msg);
 #ifdef CONFIG_IP_VS_IPV6
 	} else if (svc->af == AF_INET6) {
-		IP_VS_ERR_RL("%s: %s [%pI6]:%d - %s\n",
+		IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
 			     svc->scheduler->name,
 			     ip_vs_proto_name(svc->protocol),
 			     &svc->addr.in6, ntohs(svc->port), msg);
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 0512652..e331269 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -228,7 +228,7 @@
 	struct ip_vs_sh_bucket *tbl;
 	struct ip_vs_iphdr iph;
 
-	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
+	ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
 
 	IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n");
 
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index cc4c809..ee6b7a9 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -338,7 +338,7 @@
 	local = __ip_vs_is_local_route6(rt);
 	if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
 	      rt_mode)) {
-		IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n",
+		IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6c\n",
 			     local ? "local":"non-local", daddr);
 		dst_release(&rt->dst);
 		return NULL;
@@ -346,8 +346,8 @@
 	if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
 	    !((ort = (struct rt6_info *) skb_dst(skb)) &&
 	      __ip_vs_is_local_route6(ort))) {
-		IP_VS_DBG_RL("Redirect from non-local address %pI6 to local "
-			     "requires NAT method, dest: %pI6\n",
+		IP_VS_DBG_RL("Redirect from non-local address %pI6c to local "
+			     "requires NAT method, dest: %pI6c\n",
 			     &ipv6_hdr(skb)->daddr, daddr);
 		dst_release(&rt->dst);
 		return NULL;
@@ -355,8 +355,8 @@
 	if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
 		     ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
 				    IPV6_ADDR_LOOPBACK)) {
-		IP_VS_DBG_RL("Stopping traffic from loopback address %pI6 "
-			     "to non-local address, dest: %pI6\n",
+		IP_VS_DBG_RL("Stopping traffic from loopback address %pI6c "
+			     "to non-local address, dest: %pI6c\n",
 			     &ipv6_hdr(skb)->saddr, daddr);
 		dst_release(&rt->dst);
 		return NULL;
@@ -427,7 +427,7 @@
  */
 int
 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-		struct ip_vs_protocol *pp)
+		struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
 {
 	/* we do not touch skb and do not need pskb ptr */
 	IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
@@ -441,7 +441,7 @@
  */
 int
 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-		  struct ip_vs_protocol *pp)
+		  struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
 {
 	struct rtable *rt;			/* Route to the other host */
 	struct iphdr  *iph = ip_hdr(skb);
@@ -496,16 +496,16 @@
 #ifdef CONFIG_IP_VS_IPV6
 int
 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-		     struct ip_vs_protocol *pp)
+		     struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
 {
 	struct rt6_info *rt;			/* Route to the other host */
-	struct ipv6hdr  *iph = ipv6_hdr(skb);
 	int    mtu;
 
 	EnterFunction(10);
 
-	if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0,
-					 IP_VS_RT_MODE_NON_LOCAL)))
+	rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr.in6, NULL, 0,
+				   IP_VS_RT_MODE_NON_LOCAL);
+	if (!rt)
 		goto tx_error_icmp;
 
 	/* MTU checking */
@@ -516,7 +516,9 @@
 
 			skb->dev = net->loopback_dev;
 		}
-		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+		/* only send ICMP too big on first fragment */
+		if (!iph->fragoffs)
+			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 		dst_release(&rt->dst);
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
 		goto tx_error;
@@ -559,7 +561,7 @@
  */
 int
 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-	       struct ip_vs_protocol *pp)
+	       struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
 {
 	struct rtable *rt;		/* Route to the other host */
 	int mtu;
@@ -592,7 +594,7 @@
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
 		enum ip_conntrack_info ctinfo;
-		struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
+		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
 		if (ct && !nf_ct_is_untracked(ct)) {
 			IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0,
@@ -629,7 +631,7 @@
 		goto tx_error_put;
 
 	/* mangle the packet */
-	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
+	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
 		goto tx_error_put;
 	ip_hdr(skb)->daddr = cp->daddr.ip;
 	ip_send_check(ip_hdr(skb));
@@ -677,7 +679,7 @@
 #ifdef CONFIG_IP_VS_IPV6
 int
 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-		  struct ip_vs_protocol *pp)
+		  struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
 {
 	struct rt6_info *rt;		/* Route to the other host */
 	int mtu;
@@ -686,10 +688,9 @@
 	EnterFunction(10);
 
 	/* check if it is a connection of no-client-port */
-	if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
+	if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !iph->fragoffs)) {
 		__be16 _pt, *p;
-		p = skb_header_pointer(skb, sizeof(struct ipv6hdr),
-				       sizeof(_pt), &_pt);
+		p = skb_header_pointer(skb, iph->len, sizeof(_pt), &_pt);
 		if (p == NULL)
 			goto tx_error;
 		ip_vs_conn_fill_cport(cp, *p);
@@ -709,7 +710,7 @@
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
 		enum ip_conntrack_info ctinfo;
-		struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
+		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
 		if (ct && !nf_ct_is_untracked(ct)) {
 			IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0,
@@ -737,7 +738,9 @@
 
 			skb->dev = net->loopback_dev;
 		}
-		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+		/* only send ICMP too big on first fragment */
+		if (!iph->fragoffs)
+			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 		IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0,
 				 "ip_vs_nat_xmit_v6(): frag needed for");
 		goto tx_error_put;
@@ -751,7 +754,7 @@
 		goto tx_error_put;
 
 	/* mangle the packet */
-	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
+	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, iph))
 		goto tx_error;
 	ipv6_hdr(skb)->daddr = cp->daddr.in6;
 
@@ -812,7 +815,7 @@
  */
 int
 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-		  struct ip_vs_protocol *pp)
+		  struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
 {
 	struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
 	struct rtable *rt;			/* Route to the other host */
@@ -932,7 +935,7 @@
 #ifdef CONFIG_IP_VS_IPV6
 int
 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-		     struct ip_vs_protocol *pp)
+		     struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
 {
 	struct rt6_info *rt;		/* Route to the other host */
 	struct in6_addr saddr;		/* Source for tunnel */
@@ -972,7 +975,9 @@
 
 			skb->dev = net->loopback_dev;
 		}
-		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+		/* only send ICMP too big on first fragment */
+		if (!ipvsh->fragoffs)
+			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
 		goto tx_error_put;
 	}
@@ -1053,7 +1058,7 @@
  */
 int
 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-	      struct ip_vs_protocol *pp)
+	      struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
 {
 	struct rtable *rt;			/* Route to the other host */
 	struct iphdr  *iph = ip_hdr(skb);
@@ -1115,7 +1120,7 @@
 #ifdef CONFIG_IP_VS_IPV6
 int
 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-		 struct ip_vs_protocol *pp)
+		 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
 {
 	struct rt6_info *rt;			/* Route to the other host */
 	int    mtu;
@@ -1139,7 +1144,9 @@
 
 			skb->dev = net->loopback_dev;
 		}
-		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+		/* only send ICMP too big on first fragment */
+		if (!iph->fragoffs)
+			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 		dst_release(&rt->dst);
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
 		goto tx_error;
@@ -1183,7 +1190,8 @@
  */
 int
 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-		struct ip_vs_protocol *pp, int offset, unsigned int hooknum)
+		struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
+		struct ip_vs_iphdr *iph)
 {
 	struct rtable	*rt;	/* Route to the other host */
 	int mtu;
@@ -1198,7 +1206,7 @@
 	   translate address/port back */
 	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
 		if (cp->packet_xmit)
-			rc = cp->packet_xmit(skb, cp, pp);
+			rc = cp->packet_xmit(skb, cp, pp, iph);
 		else
 			rc = NF_ACCEPT;
 		/* do not touch skb anymore */
@@ -1227,7 +1235,7 @@
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
 		enum ip_conntrack_info ctinfo;
-		struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
+		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
 		if (ct && !nf_ct_is_untracked(ct)) {
 			IP_VS_DBG(10, "%s(): "
@@ -1304,7 +1312,8 @@
 #ifdef CONFIG_IP_VS_IPV6
 int
 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-		struct ip_vs_protocol *pp, int offset, unsigned int hooknum)
+		struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
+		struct ip_vs_iphdr *iph)
 {
 	struct rt6_info	*rt;	/* Route to the other host */
 	int mtu;
@@ -1319,7 +1328,7 @@
 	   translate address/port back */
 	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
 		if (cp->packet_xmit)
-			rc = cp->packet_xmit(skb, cp, pp);
+			rc = cp->packet_xmit(skb, cp, pp, iph);
 		else
 			rc = NF_ACCEPT;
 		/* do not touch skb anymore */
@@ -1347,7 +1356,7 @@
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
 		enum ip_conntrack_info ctinfo;
-		struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
+		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
 		if (ct && !nf_ct_is_untracked(ct)) {
 			IP_VS_DBG(10, "%s(): "
@@ -1375,7 +1384,9 @@
 
 			skb->dev = net->loopback_dev;
 		}
-		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+		/* only send ICMP too big on first fragment */
+		if (!iph->fragoffs)
+			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
 		goto tx_error_put;
 	}
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index d61e078..7df424e 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -69,6 +69,10 @@
 
 	table[0].data = &net->ct.sysctl_acct;
 
+	/* Don't export sysctls to unprivileged users */
+	if (net->user_ns != &init_user_ns)
+		table[0].procname = NULL;
+
 	net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter",
 							 table);
 	if (!net->ct.acct_sysctl_header) {
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index de9781b..faa978f 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -196,6 +196,10 @@
 	table[0].data = &net->ct.sysctl_events;
 	table[1].data = &net->ct.sysctl_events_retry_timeout;
 
+	/* Don't export sysctls to unprivileged users */
+	if (net->user_ns != &init_user_ns)
+		table[0].procname = NULL;
+
 	net->ct.event_sysctl_header =
 		register_net_sysctl(net, "net/netfilter", table);
 	if (!net->ct.event_sysctl_header) {
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index c4bc637..884f2b3 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -64,6 +64,10 @@
 
 	table[0].data = &net->ct.sysctl_auto_assign_helper;
 
+	/* Don't export sysctls to unprivileged users */
+	if (net->user_ns != &init_user_ns)
+		table[0].procname = NULL;
+
 	net->ct.helper_sysctl_header =
 		register_net_sysctl(net, "net/netfilter", table);
 
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 6535326..a8ae287 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -815,7 +815,7 @@
 };
 #endif /* CONFIG_SYSCTL */
 
-static int dccp_kmemdup_sysctl_table(struct nf_proto_net *pn,
+static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
 				     struct dccp_net *dn)
 {
 #ifdef CONFIG_SYSCTL
@@ -836,6 +836,10 @@
 	pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
 	pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
 	pn->ctl_table[7].data = &dn->dccp_loose;
+
+	/* Don't export sysctls to unprivileged users */
+	if (net->user_ns != &init_user_ns)
+		pn->ctl_table[0].procname = NULL;
 #endif
 	return 0;
 }
@@ -857,7 +861,7 @@
 		dn->dccp_timeout[CT_DCCP_TIMEWAIT]	= 2 * DCCP_MSL;
 	}
 
-	return dccp_kmemdup_sysctl_table(pn, dn);
+	return dccp_kmemdup_sysctl_table(net, pn, dn);
 }
 
 static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 9b39432..363285d 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -489,6 +489,10 @@
 	table[3].data = &net->ct.sysctl_checksum;
 	table[4].data = &net->ct.sysctl_log_invalid;
 
+	/* Don't export sysctls to unprivileged users */
+	if (net->user_ns != &init_user_ns)
+		table[0].procname = NULL;
+
 	net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
 	if (!net->ct.sysctl_header)
 		goto out_unregister_netfilter;
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index dbb364f..7ea8026 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -51,6 +51,10 @@
 
 	table[0].data = &net->ct.sysctl_tstamp;
 
+	/* Don't export sysctls to unprivileged users */
+	if (net->user_ns != &init_user_ns)
+		table[0].procname = NULL;
+
 	net->ct.tstamp_sysctl_header = register_net_sysctl(net,	"net/netfilter",
 							   table);
 	if (!net->ct.tstamp_sysctl_header) {
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index ffb92c0..58a09b7 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -138,7 +138,7 @@
 	const struct nfnetlink_subsystem *ss;
 	int type, err;
 
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	/* All the messages must at least contain nfgenmsg */
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
index bb10b07..8d47c37 100644
--- a/net/netfilter/xt_ipvs.c
+++ b/net/netfilter/xt_ipvs.c
@@ -67,7 +67,7 @@
 		goto out;
 	}
 
-	ip_vs_fill_iphdr(family, skb_network_header(skb), &iph);
+	ip_vs_fill_iph_skb(family, skb, &iph);
 
 	if (data->bitmask & XT_IPVS_PROTO)
 		if ((iph.protocol == data->l4proto) ^
@@ -85,7 +85,7 @@
 	/*
 	 * Check if the packet belongs to an existing entry
 	 */
-	cp = pp->conn_out_get(family, skb, &iph, iph.len, 1 /* inverse */);
+	cp = pp->conn_out_get(family, skb, &iph, 1 /* inverse */);
 	if (unlikely(cp == NULL)) {
 		match = false;
 		goto out;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 4da797f..c8a1eb6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -612,7 +612,7 @@
 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
 {
 	return (nl_table[sock->sk->sk_protocol].flags & flag) ||
-	       capable(CAP_NET_ADMIN);
+		ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
 }
 
 static void
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 8d8d9bc..60c3bbb 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -3,8 +3,8 @@
 #
 
 menuconfig NFC
-	depends on NET && EXPERIMENTAL
-	tristate "NFC subsystem support (EXPERIMENTAL)"
+	depends on NET
+	tristate "NFC subsystem support"
 	default n
 	help
 	  Say Y here if you want to build support for NFC (Near field
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 479bee3..aa64ea4 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -40,6 +40,9 @@
 int nfc_devlist_generation;
 DEFINE_MUTEX(nfc_devlist_mutex);
 
+/* NFC device ID bitmap */
+static DEFINE_IDA(nfc_index_ida);
+
 /**
  * nfc_dev_up - turn on the NFC device
  *
@@ -181,6 +184,7 @@
 
 	dev->ops->stop_poll(dev);
 	dev->polling = false;
+	dev->rf_mode = NFC_RF_NONE;
 
 error:
 	device_unlock(&dev->dev);
@@ -194,7 +198,7 @@
 	if (dev->n_targets == 0)
 		return NULL;
 
-	for (i = 0; i < dev->n_targets ; i++) {
+	for (i = 0; i < dev->n_targets; i++) {
 		if (dev->targets[i].idx == target_idx)
 			return &dev->targets[i];
 	}
@@ -274,12 +278,14 @@
 	if (!rc) {
 		dev->dep_link_up = false;
 		dev->active_target = NULL;
+		dev->rf_mode = NFC_RF_NONE;
 		nfc_llcp_mac_is_down(dev);
 		nfc_genl_dep_link_down_event(dev);
 	}
 
 error:
 	device_unlock(&dev->dev);
+
 	return rc;
 }
 
@@ -503,6 +509,7 @@
 int nfc_tm_deactivated(struct nfc_dev *dev)
 {
 	dev->dep_link_up = false;
+	dev->rf_mode = NFC_RF_NONE;
 
 	return nfc_genl_tm_deactivated(dev);
 }
@@ -697,6 +704,8 @@
 
 	if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) {
 		rc = dev->ops->check_presence(dev, dev->active_target);
+		if (rc == -EOPNOTSUPP)
+			goto exit;
 		if (!rc) {
 			mod_timer(&dev->check_pres_timer, jiffies +
 				  msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
@@ -708,6 +717,7 @@
 		}
 	}
 
+exit:
 	device_unlock(&dev->dev);
 }
 
@@ -753,7 +763,6 @@
 				    u32 supported_protocols,
 				    int tx_headroom, int tx_tailroom)
 {
-	static atomic_t dev_no = ATOMIC_INIT(0);
 	struct nfc_dev *dev;
 
 	if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
@@ -767,11 +776,6 @@
 	if (!dev)
 		return NULL;
 
-	dev->dev.class = &nfc_class;
-	dev->idx = atomic_inc_return(&dev_no) - 1;
-	dev_set_name(&dev->dev, "nfc%d", dev->idx);
-	device_initialize(&dev->dev);
-
 	dev->ops = ops;
 	dev->supported_protocols = supported_protocols;
 	dev->tx_headroom = tx_headroom;
@@ -779,6 +783,7 @@
 
 	nfc_genl_data_init(&dev->genl_data);
 
+	dev->rf_mode = NFC_RF_NONE;
 
 	/* first generation must not be 0 */
 	dev->targets_generation = 1;
@@ -806,6 +811,14 @@
 
 	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
+	dev->idx = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
+	if (dev->idx < 0)
+		return dev->idx;
+
+	dev->dev.class = &nfc_class;
+	dev_set_name(&dev->dev, "nfc%d", dev->idx);
+	device_initialize(&dev->dev);
+
 	mutex_lock(&nfc_devlist_mutex);
 	nfc_devlist_generation++;
 	rc = device_add(&dev->dev);
@@ -834,10 +847,12 @@
  */
 void nfc_unregister_device(struct nfc_dev *dev)
 {
-	int rc;
+	int rc, id;
 
 	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
+	id = dev->idx;
+
 	mutex_lock(&nfc_devlist_mutex);
 	nfc_devlist_generation++;
 
@@ -856,6 +871,8 @@
 		pr_debug("The userspace won't be notified that the device %s was removed\n",
 			 dev_name(&dev->dev));
 
+	ida_simple_remove(&nfc_index_ida, id);
+
 }
 EXPORT_SYMBOL(nfc_unregister_device);
 
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index 71c6a70..7d99410 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -257,16 +257,16 @@
 	*result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
 				      NFC_HCI_ADM_CREATE_PIPE,
 				      (u8 *) &params, sizeof(params), &skb);
-	if (*result == 0) {
-		resp = (struct hci_create_pipe_resp *)skb->data;
-		pipe = resp->pipe;
-		kfree_skb(skb);
-
-		pr_debug("pipe created=%d\n", pipe);
-
-		return pipe;
-	} else
+	if (*result < 0)
 		return NFC_HCI_INVALID_PIPE;
+
+	resp = (struct hci_create_pipe_resp *)skb->data;
+	pipe = resp->pipe;
+	kfree_skb(skb);
+
+	pr_debug("pipe created=%d\n", pipe);
+
+	return pipe;
 }
 
 static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe)
@@ -279,8 +279,6 @@
 
 static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev)
 {
-	int r;
-
 	u8 param[2];
 
 	/* TODO: Find out what the identity reference data is
@@ -288,10 +286,8 @@
 
 	pr_debug("\n");
 
-	r = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
-				NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL);
-
-	return 0;
+	return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
+				   NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL);
 }
 
 int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate)
@@ -348,7 +344,7 @@
 		return -EADDRINUSE;
 
 	if (pipe != NFC_HCI_INVALID_PIPE)
-		goto pipe_is_open;
+		goto open_pipe;
 
 	switch (dest_gate) {
 	case NFC_HCI_LINK_MGMT_GATE:
@@ -365,6 +361,7 @@
 		break;
 	}
 
+open_pipe:
 	r = nfc_hci_open_pipe(hdev, pipe);
 	if (r < 0) {
 		if (pipe_created)
@@ -375,7 +372,6 @@
 		return r;
 	}
 
-pipe_is_open:
 	hdev->gate2pipe[dest_gate] = pipe;
 
 	return 0;
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 5fbb6e4..7bea574 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -33,17 +33,20 @@
 /* Largest headroom needed for outgoing HCI commands */
 #define HCI_CMDS_HEADROOM 1
 
-static int nfc_hci_result_to_errno(u8 result)
+int nfc_hci_result_to_errno(u8 result)
 {
 	switch (result) {
 	case NFC_HCI_ANY_OK:
 		return 0;
+	case NFC_HCI_ANY_E_REG_PAR_UNKNOWN:
+		return -EOPNOTSUPP;
 	case NFC_HCI_ANY_E_TIMEOUT:
 		return -ETIME;
 	default:
 		return -1;
 	}
 }
+EXPORT_SYMBOL(nfc_hci_result_to_errno);
 
 static void nfc_hci_msg_tx_work(struct work_struct *work)
 {
@@ -65,8 +68,9 @@
 							  -ETIME);
 			kfree(hdev->cmd_pending_msg);
 			hdev->cmd_pending_msg = NULL;
-		} else
+		} else {
 			goto exit;
+		}
 	}
 
 next_msg:
@@ -166,7 +170,7 @@
 	kfree_skb(skb);
 }
 
-static u32 nfc_hci_sak_to_protocol(u8 sak)
+u32 nfc_hci_sak_to_protocol(u8 sak)
 {
 	switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) {
 	case NFC_HCI_TYPE_A_SEL_PROT_MIFARE:
@@ -181,8 +185,9 @@
 		return 0xffffffff;
 	}
 }
+EXPORT_SYMBOL(nfc_hci_sak_to_protocol);
 
-static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
+int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
 {
 	struct nfc_target *targets;
 	struct sk_buff *atqa_skb = NULL;
@@ -263,7 +268,9 @@
 		break;
 	}
 
-	targets->hci_reader_gate = gate;
+	/* if driver set the new gate, we will skip the old one */
+	if (targets->hci_reader_gate == 0x00)
+		targets->hci_reader_gate = gate;
 
 	r = nfc_targets_found(hdev->ndev, targets, 1);
 
@@ -275,11 +282,18 @@
 
 	return r;
 }
+EXPORT_SYMBOL(nfc_hci_target_discovered);
 
 void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
 			    struct sk_buff *skb)
 {
 	int r = 0;
+	u8 gate = nfc_hci_pipe2gate(hdev, pipe);
+
+	if (gate == 0xff) {
+		pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
+		goto exit;
+	}
 
 	switch (event) {
 	case NFC_HCI_EVT_TARGET_DISCOVERED:
@@ -303,12 +317,14 @@
 			goto exit;
 		}
 
-		r = nfc_hci_target_discovered(hdev,
-					      nfc_hci_pipe2gate(hdev, pipe));
+		r = nfc_hci_target_discovered(hdev, gate);
 		break;
 	default:
-		/* TODO: Unknown events are hardware specific
-		 * pass them to the driver (needs a new hci_ops) */
+		if (hdev->ops->event_received) {
+			hdev->ops->event_received(hdev, gate, event, skb);
+			return;
+		}
+
 		break;
 	}
 
@@ -410,6 +426,10 @@
 
 	r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
 			      NFC_HCI_ID_MGMT_VERSION_SW, &skb);
+	if (r == -EOPNOTSUPP) {
+		pr_info("Software/Hardware info not available\n");
+		return 0;
+	}
 	if (r < 0)
 		return r;
 
@@ -527,7 +547,8 @@
 		return hdev->ops->start_poll(hdev, im_protocols, tm_protocols);
 	else
 		return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
-				       NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
+					  NFC_HCI_EVT_READER_REQUESTED,
+					  NULL, 0);
 }
 
 static void hci_stop_poll(struct nfc_dev *nfc_dev)
@@ -538,6 +559,28 @@
 			   NFC_HCI_EVT_END_OPERATION, NULL, 0);
 }
 
+static int hci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
+				__u8 comm_mode, __u8 *gb, size_t gb_len)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+	if (hdev->ops->dep_link_up)
+		return hdev->ops->dep_link_up(hdev, target, comm_mode,
+						gb, gb_len);
+
+	return 0;
+}
+
+static int hci_dep_link_down(struct nfc_dev *nfc_dev)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+	if (hdev->ops->dep_link_down)
+		return hdev->ops->dep_link_down(hdev);
+
+	return 0;
+}
+
 static int hci_activate_target(struct nfc_dev *nfc_dev,
 			       struct nfc_target *target, u32 protocol)
 {
@@ -586,8 +629,8 @@
 	switch (target->hci_reader_gate) {
 	case NFC_HCI_RF_READER_A_GATE:
 	case NFC_HCI_RF_READER_B_GATE:
-		if (hdev->ops->data_exchange) {
-			r = hdev->ops->data_exchange(hdev, target, skb, cb,
+		if (hdev->ops->im_transceive) {
+			r = hdev->ops->im_transceive(hdev, target, skb, cb,
 						     cb_context);
 			if (r <= 0)	/* handled */
 				break;
@@ -604,14 +647,14 @@
 					   skb->len, hci_transceive_cb, hdev);
 		break;
 	default:
-		if (hdev->ops->data_exchange) {
-			r = hdev->ops->data_exchange(hdev, target, skb, cb,
+		if (hdev->ops->im_transceive) {
+			r = hdev->ops->im_transceive(hdev, target, skb, cb,
 						     cb_context);
 			if (r == 1)
 				r = -ENOTSUPP;
-		}
-		else
+		} else {
 			r = -ENOTSUPP;
+		}
 		break;
 	}
 
@@ -620,6 +663,16 @@
 	return r;
 }
 
+static int hci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+	if (hdev->ops->tm_send)
+		return hdev->ops->tm_send(hdev, skb);
+	else
+		return -ENOTSUPP;
+}
+
 static int hci_check_presence(struct nfc_dev *nfc_dev,
 			      struct nfc_target *target)
 {
@@ -723,9 +776,12 @@
 	.dev_down = hci_dev_down,
 	.start_poll = hci_start_poll,
 	.stop_poll = hci_stop_poll,
+	.dep_link_up = hci_dep_link_up,
+	.dep_link_down = hci_dep_link_down,
 	.activate_target = hci_activate_target,
 	.deactivate_target = hci_deactivate_target,
 	.im_transceive = hci_transceive,
+	.tm_send = hci_tm_send,
 	.check_presence = hci_check_presence,
 };
 
@@ -848,7 +904,7 @@
 }
 EXPORT_SYMBOL(nfc_hci_driver_failure);
 
-void inline nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
 {
 	nfc_llc_rcv_from_drv(hdev->llc, skb);
 }
diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c
index ae1205d..fe5e966 100644
--- a/net/nfc/hci/llc.c
+++ b/net/nfc/hci/llc.c
@@ -72,7 +72,7 @@
 	llc_engine->ops = ops;
 
 	INIT_LIST_HEAD(&llc_engine->entry);
-	list_add_tail (&llc_engine->entry, &llc_engines);
+	list_add_tail(&llc_engine->entry, &llc_engines);
 
 	return 0;
 }
diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c
index 01cbc72..27b313b 100644
--- a/net/nfc/hci/llc_shdlc.c
+++ b/net/nfc/hci/llc_shdlc.c
@@ -634,9 +634,9 @@
 			r = llc_shdlc_connect_initiate(shdlc);
 		else
 			r = -ETIME;
-		if (r < 0)
+		if (r < 0) {
 			llc_shdlc_connect_complete(shdlc, r);
-		else {
+		} else {
 			mod_timer(&shdlc->connect_timer, jiffies +
 				  msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
 
@@ -682,9 +682,8 @@
 			llc_shdlc_handle_send_queue(shdlc);
 		}
 
-		if (shdlc->hard_fault) {
+		if (shdlc->hard_fault)
 			shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault);
-		}
 		break;
 	default:
 		break;
diff --git a/net/nfc/llcp/Kconfig b/net/nfc/llcp/Kconfig
index fbf5e81..a1a41cd 100644
--- a/net/nfc/llcp/Kconfig
+++ b/net/nfc/llcp/Kconfig
@@ -1,6 +1,6 @@
 config NFC_LLCP
-       depends on NFC && EXPERIMENTAL
-       bool "NFC LLCP support (EXPERIMENTAL)"
+       depends on NFC
+       bool "NFC LLCP support"
        default n
        help
 	 Say Y here if you want to build support for a kernel NFC LLCP
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index c45ccd6..df24be4 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -261,7 +261,6 @@
 	struct sk_buff *skb;
 	struct nfc_dev *dev;
 	struct nfc_llcp_local *local;
-	u16 size = 0;
 
 	pr_debug("Sending DISC\n");
 
@@ -273,17 +272,10 @@
 	if (dev == NULL)
 		return -ENODEV;
 
-	size += LLCP_HEADER_SIZE;
-	size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
-
-	skb = alloc_skb(size, GFP_ATOMIC);
+	skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0);
 	if (skb == NULL)
 		return -ENOMEM;
 
-	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
-
-	skb = llcp_add_header(skb, sock->dsap, sock->ssap, LLCP_PDU_DISC);
-
 	skb_queue_tail(&local->tx_queue, skb);
 
 	return 0;
@@ -324,8 +316,7 @@
 	struct sk_buff *skb;
 	u8 *service_name_tlv = NULL, service_name_tlv_length;
 	u8 *miux_tlv = NULL, miux_tlv_length;
-	u8 *rw_tlv = NULL, rw_tlv_length, rw;
-	__be16 miux;
+	u8 *rw_tlv = NULL, rw_tlv_length;
 	int err;
 	u16 size = 0;
 
@@ -343,13 +334,11 @@
 		size += service_name_tlv_length;
 	}
 
-	miux = cpu_to_be16(LLCP_MAX_MIUX);
-	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
 				      &miux_tlv_length);
 	size += miux_tlv_length;
 
-	rw = LLCP_MAX_RW;
-	rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+	rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &local->rw, 0, &rw_tlv_length);
 	size += rw_tlv_length;
 
 	pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
@@ -386,8 +375,7 @@
 	struct nfc_llcp_local *local;
 	struct sk_buff *skb;
 	u8 *miux_tlv = NULL, miux_tlv_length;
-	u8 *rw_tlv = NULL, rw_tlv_length, rw;
-	__be16 miux;
+	u8 *rw_tlv = NULL, rw_tlv_length;
 	int err;
 	u16 size = 0;
 
@@ -397,13 +385,11 @@
 	if (local == NULL)
 		return -ENODEV;
 
-	miux = cpu_to_be16(LLCP_MAX_MIUX);
-	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
 				      &miux_tlv_length);
 	size += miux_tlv_length;
 
-	rw = LLCP_MAX_RW;
-	rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+	rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &local->rw, 0, &rw_tlv_length);
 	size += rw_tlv_length;
 
 	skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
@@ -428,6 +414,52 @@
 	return err;
 }
 
+int nfc_llcp_send_snl(struct nfc_llcp_local *local, u8 tid, u8 sap)
+{
+	struct sk_buff *skb;
+	struct nfc_dev *dev;
+	u8 *sdres_tlv = NULL, sdres_tlv_length, sdres[2];
+	u16 size = 0;
+
+	pr_debug("Sending SNL tid 0x%x sap 0x%x\n", tid, sap);
+
+	if (local == NULL)
+		return -ENODEV;
+
+	dev = local->dev;
+	if (dev == NULL)
+		return -ENODEV;
+
+	sdres[0] = tid;
+	sdres[1] = sap;
+	sdres_tlv = nfc_llcp_build_tlv(LLCP_TLV_SDRES, sdres, 0,
+				       &sdres_tlv_length);
+	if (sdres_tlv == NULL)
+		return -ENOMEM;
+
+	size += LLCP_HEADER_SIZE;
+	size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+	size += sdres_tlv_length;
+
+	skb = alloc_skb(size, GFP_KERNEL);
+	if (skb == NULL) {
+		kfree(sdres_tlv);
+		return -ENOMEM;
+	}
+
+	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+	skb = llcp_add_header(skb, LLCP_SAP_SDP, LLCP_SAP_SDP, LLCP_PDU_SNL);
+
+	memcpy(skb_put(skb, sdres_tlv_length), sdres_tlv, sdres_tlv_length);
+
+	skb_queue_tail(&local->tx_queue, skb);
+
+	kfree(sdres_tlv);
+
+	return 0;
+}
+
 int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
 {
 	struct sk_buff *skb;
@@ -496,6 +528,23 @@
 	if (local == NULL)
 		return -ENODEV;
 
+	/* Remote is ready but has not acknowledged our frames */
+	if((sock->remote_ready &&
+	    skb_queue_len(&sock->tx_pending_queue) >= sock->rw &&
+	    skb_queue_len(&sock->tx_queue) >= 2 * sock->rw)) {
+		pr_err("Pending queue is full %d frames\n",
+		       skb_queue_len(&sock->tx_pending_queue));
+		return -ENOBUFS;
+	}
+
+	/* Remote is not ready and we've been queueing enough frames */
+	if ((!sock->remote_ready &&
+	     skb_queue_len(&sock->tx_queue) >= 2 * sock->rw)) {
+		pr_err("Tx queue is full %d frames\n",
+		       skb_queue_len(&sock->tx_queue));
+		return -ENOBUFS;
+	}
+
 	msg_data = kzalloc(len, GFP_KERNEL);
 	if (msg_data == NULL)
 		return -ENOMEM;
@@ -541,6 +590,63 @@
 	return len;
 }
 
+int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
+			   struct msghdr *msg, size_t len)
+{
+	struct sk_buff *pdu;
+	struct nfc_llcp_local *local;
+	size_t frag_len = 0, remaining_len;
+	u8 *msg_ptr, *msg_data;
+	int err;
+
+	pr_debug("Send UI frame len %zd\n", len);
+
+	local = sock->local;
+	if (local == NULL)
+		return -ENODEV;
+
+	msg_data = kzalloc(len, GFP_KERNEL);
+	if (msg_data == NULL)
+		return -ENOMEM;
+
+	if (memcpy_fromiovec(msg_data, msg->msg_iov, len)) {
+		kfree(msg_data);
+		return -EFAULT;
+	}
+
+	remaining_len = len;
+	msg_ptr = msg_data;
+
+	while (remaining_len > 0) {
+
+		frag_len = min_t(size_t, sock->miu, remaining_len);
+
+		pr_debug("Fragment %zd bytes remaining %zd",
+			 frag_len, remaining_len);
+
+		pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
+					 frag_len + LLCP_HEADER_SIZE, &err);
+		if (pdu == NULL) {
+			pr_err("Could not allocate PDU\n");
+			continue;
+		}
+
+		pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
+
+		memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len);
+
+		/* No need to check for the peer RW for UI frames */
+		skb_queue_tail(&local->tx_queue, pdu);
+
+		remaining_len -= frag_len;
+		msg_ptr += frag_len;
+	}
+
+	kfree(msg_data);
+
+	return len;
+}
+
 int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
 {
 	struct sk_buff *skb;
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 9e8f4b2..2df8705 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -45,12 +45,38 @@
 	write_unlock(&l->lock);
 }
 
+static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
+{
+	struct nfc_llcp_local *local = sock->local;
+	struct sk_buff *s, *tmp;
+
+	pr_debug("%p\n", &sock->sk);
+
+	skb_queue_purge(&sock->tx_queue);
+	skb_queue_purge(&sock->tx_pending_queue);
+	skb_queue_purge(&sock->tx_backlog_queue);
+
+	if (local == NULL)
+		return;
+
+	/* Search for local pending SKBs that are related to this socket */
+	skb_queue_walk_safe(&local->tx_queue, s, tmp) {
+		if (s->sk != &sock->sk)
+			continue;
+
+		skb_unlink(s, &local->tx_queue);
+		kfree_skb(s);
+	}
+}
+
 static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
 {
 	struct sock *sk;
 	struct hlist_node *node, *tmp;
 	struct nfc_llcp_sock *llcp_sock;
 
+	skb_queue_purge(&local->tx_queue);
+
 	write_lock(&local->sockets.lock);
 
 	sk_for_each_safe(sk, node, tmp, &local->sockets.head) {
@@ -58,6 +84,8 @@
 
 		bh_lock_sock(sk);
 
+		nfc_llcp_socket_purge(llcp_sock);
+
 		if (sk->sk_state == LLCP_CONNECTED)
 			nfc_put_device(llcp_sock->dev);
 
@@ -65,7 +93,8 @@
 			struct nfc_llcp_sock *lsk, *n;
 			struct sock *accept_sk;
 
-			list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
+			list_for_each_entry_safe(lsk, n,
+						 &llcp_sock->accept_queue,
 						 accept_queue) {
 				accept_sk = &lsk->sk;
 				bh_lock_sock(accept_sk);
@@ -85,6 +114,16 @@
 			}
 		}
 
+		/*
+		 * If we have a connection less socket bound, we keep it alive
+		 * if the device is still present.
+		 */
+		if (sk->sk_state == LLCP_BOUND && sk->sk_type == SOCK_DGRAM &&
+		    listen == true) {
+			bh_unlock_sock(sk);
+			continue;
+		}
+
 		sk->sk_state = LLCP_CLOSED;
 
 		bh_unlock_sock(sk);
@@ -134,7 +173,7 @@
 {
 	struct sock *sk;
 	struct hlist_node *node;
-	struct nfc_llcp_sock *llcp_sock;
+	struct nfc_llcp_sock *llcp_sock, *tmp_sock;
 
 	pr_debug("ssap dsap %d %d\n", ssap, dsap);
 
@@ -146,10 +185,12 @@
 	llcp_sock = NULL;
 
 	sk_for_each(sk, node, &local->sockets.head) {
-		llcp_sock = nfc_llcp_sock(sk);
+		tmp_sock = nfc_llcp_sock(sk);
 
-		if (llcp_sock->ssap == ssap && llcp_sock->dsap == dsap)
+		if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
+			llcp_sock = tmp_sock;
 			break;
+		}
 	}
 
 	read_unlock(&local->sockets.lock);
@@ -249,7 +290,12 @@
 
 		pr_debug("llcp sock %p\n", tmp_sock);
 
-		if (tmp_sock->sk.sk_state != LLCP_LISTEN)
+		if (tmp_sock->sk.sk_type == SOCK_STREAM &&
+		    tmp_sock->sk.sk_state != LLCP_LISTEN)
+			continue;
+
+		if (tmp_sock->sk.sk_type == SOCK_DGRAM &&
+		    tmp_sock->sk.sk_state != LLCP_BOUND)
 			continue;
 
 		if (tmp_sock->service_name == NULL ||
@@ -421,10 +467,9 @@
 static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
 {
 	u8 *gb_cur, *version_tlv, version, version_length;
-	u8 *lto_tlv, lto, lto_length;
+	u8 *lto_tlv, lto_length;
 	u8 *wks_tlv, wks_length;
 	u8 *miux_tlv, miux_length;
-	__be16 miux;
 	u8 gb_len = 0;
 	int ret = 0;
 
@@ -433,9 +478,7 @@
 					 1, &version_length);
 	gb_len += version_length;
 
-	/* 1500 ms */
-	lto = 150;
-	lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &lto, 1, &lto_length);
+	lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length);
 	gb_len += lto_length;
 
 	pr_debug("Local wks 0x%lx\n", local->local_wks);
@@ -443,8 +486,7 @@
 				     &wks_length);
 	gb_len += wks_length;
 
-	miux = cpu_to_be16(LLCP_MAX_MIUX);
-	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
 				      &miux_length);
 	gb_len += miux_length;
 
@@ -610,7 +652,12 @@
 	if (skb != NULL) {
 		sk = skb->sk;
 		llcp_sock = nfc_llcp_sock(sk);
-		if (llcp_sock != NULL) {
+
+		if (llcp_sock == NULL && nfc_llcp_ptype(skb) == LLCP_PDU_I) {
+			nfc_llcp_send_symm(local->dev);
+		} else {
+			struct sk_buff *copy_skb = NULL;
+			u8 ptype = nfc_llcp_ptype(skb);
 			int ret;
 
 			pr_debug("Sending pending skb\n");
@@ -618,24 +665,29 @@
 				       DUMP_PREFIX_OFFSET, 16, 1,
 				       skb->data, skb->len, true);
 
+			if (ptype == LLCP_PDU_I)
+				copy_skb = skb_copy(skb, GFP_ATOMIC);
+
 			nfc_llcp_send_to_raw_sock(local, skb,
 						  NFC_LLCP_DIRECTION_TX);
 
 			ret = nfc_data_exchange(local->dev, local->target_idx,
 						skb, nfc_llcp_recv, local);
 
-			if (!ret && nfc_llcp_ptype(skb) == LLCP_PDU_I) {
-				skb = skb_get(skb);
-				skb_queue_tail(&llcp_sock->tx_pending_queue,
-					       skb);
+			if (ret) {
+				kfree_skb(copy_skb);
+				goto out;
 			}
-		} else {
-			nfc_llcp_send_symm(local->dev);
+
+			if (ptype == LLCP_PDU_I && copy_skb)
+				skb_queue_tail(&llcp_sock->tx_pending_queue,
+					       copy_skb);
 		}
 	} else {
 		nfc_llcp_send_symm(local->dev);
 	}
 
+out:
 	mod_timer(&local->link_timer,
 		  jiffies + msecs_to_jiffies(2 * local->remote_lto));
 }
@@ -704,6 +756,39 @@
 	return NULL;
 }
 
+static void nfc_llcp_recv_ui(struct nfc_llcp_local *local,
+			     struct sk_buff *skb)
+{
+	struct nfc_llcp_sock *llcp_sock;
+	struct nfc_llcp_ui_cb *ui_cb;
+	u8 dsap, ssap;
+
+	dsap = nfc_llcp_dsap(skb);
+	ssap = nfc_llcp_ssap(skb);
+
+	ui_cb = nfc_llcp_ui_skb_cb(skb);
+	ui_cb->dsap = dsap;
+	ui_cb->ssap = ssap;
+
+	printk("%s %d %d\n", __func__, dsap, ssap);
+
+	pr_debug("%d %d\n", dsap, ssap);
+
+	/* We're looking for a bound socket, not a client one */
+	llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
+	if (llcp_sock == NULL || llcp_sock->sk.sk_type != SOCK_DGRAM)
+		return;
+
+	/* There is no sequence with UI frames */
+	skb_pull(skb, LLCP_HEADER_SIZE);
+	if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
+		pr_err("receive queue is full\n");
+		skb_queue_head(&llcp_sock->tx_backlog_queue, skb);
+	}
+
+	nfc_llcp_sock_put(llcp_sock);
+}
+
 static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
 				  struct sk_buff *skb)
 {
@@ -823,9 +908,6 @@
 fail:
 	/* Send DM */
 	nfc_llcp_send_dm(local, dsap, ssap, reason);
-
-	return;
-
 }
 
 int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
@@ -953,6 +1035,9 @@
 
 	sk = &llcp_sock->sk;
 	lock_sock(sk);
+
+	nfc_llcp_socket_purge(llcp_sock);
+
 	if (sk->sk_state == LLCP_CLOSED) {
 		release_sock(sk);
 		nfc_llcp_sock_put(llcp_sock);
@@ -1027,7 +1112,7 @@
 	}
 
 	if (llcp_sock == NULL) {
-		pr_err("Invalid DM\n");
+		pr_debug("Already closed\n");
 		return;
 	}
 
@@ -1038,8 +1123,100 @@
 	sk->sk_state_change(sk);
 
 	nfc_llcp_sock_put(llcp_sock);
+}
 
-	return;
+static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
+			      struct sk_buff *skb)
+{
+	struct nfc_llcp_sock *llcp_sock;
+	u8 dsap, ssap, *tlv, type, length, tid, sap;
+	u16 tlv_len, offset;
+	char *service_name;
+	size_t service_name_len;
+
+	dsap = nfc_llcp_dsap(skb);
+	ssap = nfc_llcp_ssap(skb);
+
+	pr_debug("%d %d\n", dsap, ssap);
+
+	if (dsap != LLCP_SAP_SDP || ssap != LLCP_SAP_SDP) {
+		pr_err("Wrong SNL SAP\n");
+		return;
+	}
+
+	tlv = &skb->data[LLCP_HEADER_SIZE];
+	tlv_len = skb->len - LLCP_HEADER_SIZE;
+	offset = 0;
+
+	while (offset < tlv_len) {
+		type = tlv[0];
+		length = tlv[1];
+
+		switch (type) {
+		case LLCP_TLV_SDREQ:
+			tid = tlv[2];
+			service_name = (char *) &tlv[3];
+			service_name_len = length - 1;
+
+			pr_debug("Looking for %.16s\n", service_name);
+
+			if (service_name_len == strlen("urn:nfc:sn:sdp") &&
+			    !strncmp(service_name, "urn:nfc:sn:sdp",
+				     service_name_len)) {
+				sap = 1;
+				goto send_snl;
+			}
+
+			llcp_sock = nfc_llcp_sock_from_sn(local, service_name,
+							  service_name_len);
+			if (!llcp_sock) {
+				sap = 0;
+				goto send_snl;
+			}
+
+			/*
+			 * We found a socket but its ssap has not been reserved
+			 * yet. We need to assign it for good and send a reply.
+			 * The ssap will be freed when the socket is closed.
+			 */
+			if (llcp_sock->ssap == LLCP_SDP_UNBOUND) {
+				atomic_t *client_count;
+
+				sap = nfc_llcp_reserve_sdp_ssap(local);
+
+				pr_debug("Reserving %d\n", sap);
+
+				if (sap == LLCP_SAP_MAX) {
+					sap = 0;
+					goto send_snl;
+				}
+
+				client_count =
+					&local->local_sdp_cnt[sap -
+							      LLCP_WKS_NUM_SAP];
+
+				atomic_inc(client_count);
+
+				llcp_sock->ssap = sap;
+				llcp_sock->reserved_ssap = sap;
+			} else {
+				sap = llcp_sock->ssap;
+			}
+
+			pr_debug("%p %d\n", llcp_sock, sap);
+
+send_snl:
+			nfc_llcp_send_snl(local, tid, sap);
+			break;
+
+		default:
+			pr_err("Invalid SNL tlv value 0x%x\n", type);
+			break;
+		}
+
+		offset += length + 2;
+		tlv += length + 2;
+	}
 }
 
 static void nfc_llcp_rx_work(struct work_struct *work)
@@ -1072,6 +1249,11 @@
 		pr_debug("SYMM\n");
 		break;
 
+	case LLCP_PDU_UI:
+		pr_debug("UI\n");
+		nfc_llcp_recv_ui(local, skb);
+		break;
+
 	case LLCP_PDU_CONNECT:
 		pr_debug("CONNECT\n");
 		nfc_llcp_recv_connect(local, skb);
@@ -1092,6 +1274,11 @@
 		nfc_llcp_recv_dm(local, skb);
 		break;
 
+	case LLCP_PDU_SNL:
+		pr_debug("SNL\n");
+		nfc_llcp_recv_snl(local, skb);
+		break;
+
 	case LLCP_PDU_I:
 	case LLCP_PDU_RR:
 	case LLCP_PDU_RNR:
@@ -1104,8 +1291,6 @@
 	schedule_work(&local->tx_work);
 	kfree_skb(local->rx_pending);
 	local->rx_pending = NULL;
-
-	return;
 }
 
 void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
@@ -1121,8 +1306,6 @@
 	local->rx_pending = skb_get(skb);
 	del_timer(&local->link_timer);
 	schedule_work(&local->rx_work);
-
-	return;
 }
 
 int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
@@ -1205,6 +1388,10 @@
 	rwlock_init(&local->connecting_sockets.lock);
 	rwlock_init(&local->raw_sockets.lock);
 
+	local->lto = 150; /* 1500 ms */
+	local->rw = LLCP_MAX_RW;
+	local->miux = cpu_to_be16(LLCP_MAX_MIUX);
+
 	nfc_llcp_build_gb(local);
 
 	local->remote_miu = LLCP_DEFAULT_MIU;
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index fdb2d24..0d62366 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -64,6 +64,9 @@
 	u32 target_idx;
 	u8 rf_mode;
 	u8 comm_mode;
+	u8 lto;
+	u8 rw;
+	__be16 miux;
 	unsigned long local_wks;      /* Well known services */
 	unsigned long local_sdp;      /* Local services  */
 	unsigned long local_sap; /* Local SAPs, not available for discovery */
@@ -124,6 +127,13 @@
 	struct sock *parent;
 };
 
+struct nfc_llcp_ui_cb {
+	__u8 dsap;
+	__u8 ssap;
+};
+
+#define nfc_llcp_ui_skb_cb(__skb) ((struct nfc_llcp_ui_cb *)&((__skb)->cb[0]))
+
 #define nfc_llcp_sock(sk) ((struct nfc_llcp_sock *) (sk))
 #define nfc_llcp_dev(sk)  (nfc_llcp_sock((sk))->dev)
 
@@ -209,10 +219,13 @@
 int nfc_llcp_send_symm(struct nfc_dev *dev);
 int nfc_llcp_send_connect(struct nfc_llcp_sock *sock);
 int nfc_llcp_send_cc(struct nfc_llcp_sock *sock);
+int nfc_llcp_send_snl(struct nfc_llcp_local *local, u8 tid, u8 sap);
 int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason);
 int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock);
 int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
 			  struct msghdr *msg, size_t len);
+int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
+			   struct msghdr *msg, size_t len);
 int nfc_llcp_send_rr(struct nfc_llcp_sock *sock);
 
 /* Socket API */
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 63e4cdc..0fa1e92 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -205,8 +205,8 @@
 
 	lock_sock(sk);
 
-	if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
-	    || sk->sk_state != LLCP_BOUND) {
+	if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) ||
+	    sk->sk_state != LLCP_BOUND) {
 		ret = -EBADFD;
 		goto error;
 	}
@@ -608,6 +608,25 @@
 
 	lock_sock(sk);
 
+	if (sk->sk_type == SOCK_DGRAM) {
+		struct sockaddr_nfc_llcp *addr =
+			(struct sockaddr_nfc_llcp *)msg->msg_name;
+
+		if (msg->msg_namelen < sizeof(*addr)) {
+			release_sock(sk);
+
+			pr_err("Invalid socket address length %d\n",
+			       msg->msg_namelen);
+
+			return -EINVAL;
+		}
+
+		release_sock(sk);
+
+		return nfc_llcp_send_ui_frame(llcp_sock, addr->dsap, addr->ssap,
+					      msg, len);
+	}
+
 	if (sk->sk_state != LLCP_CONNECTED) {
 		release_sock(sk);
 		return -ENOTCONN;
@@ -663,11 +682,28 @@
 		return -EFAULT;
 	}
 
+	if (sk->sk_type == SOCK_DGRAM && msg->msg_name) {
+		struct nfc_llcp_ui_cb *ui_cb = nfc_llcp_ui_skb_cb(skb);
+		struct sockaddr_nfc_llcp sockaddr;
+
+		pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap);
+
+		sockaddr.sa_family = AF_NFC;
+		sockaddr.nfc_protocol = NFC_PROTO_NFC_DEP;
+		sockaddr.dsap = ui_cb->dsap;
+		sockaddr.ssap = ui_cb->ssap;
+
+		memcpy(msg->msg_name, &sockaddr, sizeof(sockaddr));
+		msg->msg_namelen = sizeof(sockaddr);
+	}
+
 	/* Mark read part of skb as used */
 	if (!(flags & MSG_PEEK)) {
 
 		/* SOCK_STREAM: re-queue skb if it contains unreceived data */
-		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_RAW) {
+		if (sk->sk_type == SOCK_STREAM ||
+		    sk->sk_type == SOCK_DGRAM ||
+		    sk->sk_type == SOCK_RAW) {
 			skb_pull(skb, copied);
 			if (skb->len) {
 				skb_queue_head(&sk->sk_receive_queue, skb);
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig
index decdc49..6d69b5f 100644
--- a/net/nfc/nci/Kconfig
+++ b/net/nfc/nci/Kconfig
@@ -1,6 +1,6 @@
 config NFC_NCI
-	depends on NFC && EXPERIMENTAL
-	tristate "NCI protocol support (EXPERIMENTAL)"
+	depends on NFC
+	tristate "NCI protocol support"
 	default n
 	help
 	  NCI (NFC Controller Interface) is a communication protocol between
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index acf9abb..5f98dc1 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -205,10 +205,10 @@
 	cmd.num_disc_configs = 0;
 
 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
-	    (protocols & NFC_PROTO_JEWEL_MASK
-	     || protocols & NFC_PROTO_MIFARE_MASK
-	     || protocols & NFC_PROTO_ISO14443_MASK
-	     || protocols & NFC_PROTO_NFC_DEP_MASK)) {
+	    (protocols & NFC_PROTO_JEWEL_MASK ||
+	     protocols & NFC_PROTO_MIFARE_MASK ||
+	     protocols & NFC_PROTO_ISO14443_MASK ||
+	     protocols & NFC_PROTO_NFC_DEP_MASK)) {
 		cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
 			NCI_NFC_A_PASSIVE_POLL_MODE;
 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
@@ -224,8 +224,8 @@
 	}
 
 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
-	    (protocols & NFC_PROTO_FELICA_MASK
-	     || protocols & NFC_PROTO_NFC_DEP_MASK)) {
+	    (protocols & NFC_PROTO_FELICA_MASK ||
+	     protocols & NFC_PROTO_NFC_DEP_MASK)) {
 		cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
 			NCI_NFC_F_PASSIVE_POLL_MODE;
 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
@@ -414,13 +414,13 @@
 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 	struct nci_set_config_param param;
 	__u8 local_gb[NFC_MAX_GT_LEN];
-	int i, rc = 0;
+	int i;
 
 	param.val = nfc_get_local_general_bytes(nfc_dev, &param.len);
 	if ((param.val == NULL) || (param.len == 0))
-		return rc;
+		return 0;
 
-	if (param.len > NCI_MAX_PARAM_LEN)
+	if (param.len > NFC_MAX_GT_LEN)
 		return -EINVAL;
 
 	for (i = 0; i < param.len; i++)
@@ -429,10 +429,8 @@
 	param.id = NCI_PN_ATR_REQ_GEN_BYTES;
 	param.val = local_gb;
 
-	rc = nci_request(ndev, nci_set_config_req, (unsigned long)&param,
-			 msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
-
-	return rc;
+	return nci_request(ndev, nci_set_config_req, (unsigned long)&param,
+			   msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
 }
 
 static int nci_start_poll(struct nfc_dev *nfc_dev,
@@ -579,7 +577,6 @@
 	}
 }
 
-
 static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
 			   __u8 comm_mode, __u8 *gb, size_t gb_len)
 {
@@ -806,8 +803,8 @@
 
 	pr_debug("len %d\n", skb->len);
 
-	if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
-		      && !test_bit(NCI_INIT, &ndev->flags))) {
+	if (!ndev || (!test_bit(NCI_UP, &ndev->flags) &&
+	    !test_bit(NCI_INIT, &ndev->flags))) {
 		kfree_skb(skb);
 		return -ENXIO;
 	}
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index c1b5285..3568ae1 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -29,6 +29,8 @@
 
 #include "nfc.h"
 
+#include "llcp/llcp.h"
+
 static struct genl_multicast_group nfc_genl_event_mcgrp = {
 	.name = NFC_GENL_MCAST_EVENT_NAME,
 };
@@ -364,7 +366,8 @@
 	if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
 	    nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
 	    nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
-	    nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
+	    nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) ||
+	    nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode))
 		goto nla_put_failure;
 
 	return genlmsg_end(msg, hdr);
@@ -590,7 +593,7 @@
 	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
 	    ((!info->attrs[NFC_ATTR_IM_PROTOCOLS] &&
 	      !info->attrs[NFC_ATTR_PROTOCOLS]) &&
-	     !info->attrs[NFC_ATTR_TM_PROTOCOLS]))
+	      !info->attrs[NFC_ATTR_TM_PROTOCOLS]))
 		return -EINVAL;
 
 	idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -715,6 +718,146 @@
 	return rc;
 }
 
+static int nfc_genl_send_params(struct sk_buff *msg,
+				struct nfc_llcp_local *local,
+				u32 portid, u32 seq)
+{
+	void *hdr;
+
+	hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, 0,
+			  NFC_CMD_LLC_GET_PARAMS);
+	if (!hdr)
+		return -EMSGSIZE;
+
+	if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, local->dev->idx) ||
+	    nla_put_u8(msg, NFC_ATTR_LLC_PARAM_LTO, local->lto) ||
+	    nla_put_u8(msg, NFC_ATTR_LLC_PARAM_RW, local->rw) ||
+	    nla_put_u16(msg, NFC_ATTR_LLC_PARAM_MIUX, be16_to_cpu(local->miux)))
+		goto nla_put_failure;
+
+	return genlmsg_end(msg, hdr);
+
+nla_put_failure:
+
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nfc_dev *dev;
+	struct nfc_llcp_local *local;
+	int rc = 0;
+	struct sk_buff *msg = NULL;
+	u32 idx;
+
+	if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+		return -EINVAL;
+
+	idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+
+	dev = nfc_get_device(idx);
+	if (!dev)
+		return -ENODEV;
+
+	device_lock(&dev->dev);
+
+	local = nfc_llcp_find_local(dev);
+	if (!local) {
+		rc = -ENODEV;
+		goto exit;
+	}
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!msg) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq);
+
+exit:
+	device_unlock(&dev->dev);
+
+	nfc_put_device(dev);
+
+	if (rc < 0) {
+		if (msg)
+			nlmsg_free(msg);
+
+		return rc;
+	}
+
+	return genlmsg_reply(msg, info);
+}
+
+static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nfc_dev *dev;
+	struct nfc_llcp_local *local;
+	u8 rw = 0;
+	u16 miux = 0;
+	u32 idx;
+	int rc = 0;
+
+	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+	    (!info->attrs[NFC_ATTR_LLC_PARAM_LTO] &&
+	     !info->attrs[NFC_ATTR_LLC_PARAM_RW] &&
+	     !info->attrs[NFC_ATTR_LLC_PARAM_MIUX]))
+		return -EINVAL;
+
+	if (info->attrs[NFC_ATTR_LLC_PARAM_RW]) {
+		rw = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_RW]);
+
+		if (rw > LLCP_MAX_RW)
+			return -EINVAL;
+	}
+
+	if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX]) {
+		miux = nla_get_u16(info->attrs[NFC_ATTR_LLC_PARAM_MIUX]);
+
+		if (miux > LLCP_MAX_MIUX)
+			return -EINVAL;
+	}
+
+	idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+
+	dev = nfc_get_device(idx);
+	if (!dev)
+		return -ENODEV;
+
+	device_lock(&dev->dev);
+
+	local = nfc_llcp_find_local(dev);
+	if (!local) {
+		nfc_put_device(dev);
+		rc = -ENODEV;
+		goto exit;
+	}
+
+	if (info->attrs[NFC_ATTR_LLC_PARAM_LTO]) {
+		if (dev->dep_link_up) {
+			rc = -EINPROGRESS;
+			goto exit;
+		}
+
+		local->lto = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_LTO]);
+	}
+
+	if (info->attrs[NFC_ATTR_LLC_PARAM_RW])
+		local->rw = rw;
+
+	if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX])
+		local->miux = cpu_to_be16(miux);
+
+exit:
+	device_unlock(&dev->dev);
+
+	nfc_put_device(dev);
+
+	return rc;
+}
+
 static struct genl_ops nfc_genl_ops[] = {
 	{
 		.cmd = NFC_CMD_GET_DEVICE,
@@ -759,6 +902,16 @@
 		.done = nfc_genl_dump_targets_done,
 		.policy = nfc_genl_policy,
 	},
+	{
+		.cmd = NFC_CMD_LLC_GET_PARAMS,
+		.doit = nfc_genl_llc_get_params,
+		.policy = nfc_genl_policy,
+	},
+	{
+		.cmd = NFC_CMD_LLC_SET_PARAMS,
+		.doit = nfc_genl_llc_set_params,
+		.policy = nfc_genl_policy,
+	},
 };
 
 
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index c5e42b79..87d914d 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -56,6 +56,7 @@
 int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
 u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
 int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
+struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
 int __init nfc_llcp_init(void);
 void nfc_llcp_exit(void);
 
@@ -97,6 +98,11 @@
 	return 0;
 }
 
+static inline struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
+{
+	return NULL;
+}
+
 static inline int nfc_llcp_init(void)
 {
 	return 0;
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 8b8a6a2..313bf1b 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -256,7 +256,6 @@
 	return rc ? : copied;
 }
 
-
 static const struct proto_ops rawsock_ops = {
 	.family         = PF_NFC,
 	.owner          = THIS_MODULE,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 94060ed..e639645 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1881,7 +1881,35 @@
 	skb_reserve(skb, hlen);
 	skb_reset_network_header(skb);
 
-	data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
+	if (po->tp_tx_has_off) {
+		int off_min, off_max, off;
+		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
+		off_max = po->tx_ring.frame_size - tp_len;
+		if (sock->type == SOCK_DGRAM) {
+			switch (po->tp_version) {
+			case TPACKET_V2:
+				off = ph.h2->tp_net;
+				break;
+			default:
+				off = ph.h1->tp_net;
+				break;
+			}
+		} else {
+			switch (po->tp_version) {
+			case TPACKET_V2:
+				off = ph.h2->tp_mac;
+				break;
+			default:
+				off = ph.h1->tp_mac;
+				break;
+			}
+		}
+		if (unlikely((off < off_min) || (off_max < off)))
+			return -EINVAL;
+		data = ph.raw + off;
+	} else {
+		data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
+	}
 	to_write = tp_len;
 
 	if (sock->type == SOCK_DGRAM) {
@@ -1907,7 +1935,6 @@
 		to_write -= dev->hard_header_len;
 	}
 
-	err = -EFAULT;
 	offset = offset_in_page(data);
 	len_max = PAGE_SIZE - offset;
 	len = ((to_write > len_max) ? len_max : to_write);
@@ -1957,7 +1984,6 @@
 
 	mutex_lock(&po->pg_vec_lock);
 
-	err = -EBUSY;
 	if (saddr == NULL) {
 		dev = po->prot_hook.dev;
 		proto	= po->num;
@@ -2478,7 +2504,7 @@
 	__be16 proto = (__force __be16)protocol; /* weird, but documented */
 	int err;
 
-	if (!capable(CAP_NET_RAW))
+	if (!ns_capable(net->user_ns, CAP_NET_RAW))
 		return -EPERM;
 	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
 	    sock->type != SOCK_PACKET)
@@ -3111,6 +3137,19 @@
 
 		return fanout_add(sk, val & 0xffff, val >> 16);
 	}
+	case PACKET_TX_HAS_OFF:
+	{
+		unsigned int val;
+
+		if (optlen != sizeof(val))
+			return -EINVAL;
+		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+			return -EBUSY;
+		if (copy_from_user(&val, optval, sizeof(val)))
+			return -EFAULT;
+		po->tp_tx_has_off = !!val;
+		return 0;
+	}
 	default:
 		return -ENOPROTOOPT;
 	}
@@ -3202,6 +3241,9 @@
 			((u32)po->fanout->type << 16)) :
 		       0);
 		break;
+	case PACKET_TX_HAS_OFF:
+		val = po->tp_tx_has_off;
+		break;
 	default:
 		return -ENOPROTOOPT;
 	}
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 44945f6..e84cab8 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -109,6 +109,7 @@
 	unsigned int		tp_hdrlen;
 	unsigned int		tp_reserve;
 	unsigned int		tp_loss:1;
+	unsigned int		tp_tx_has_off:1;
 	unsigned int		tp_tstamp;
 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
 };
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 83a8389..0193630 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -70,6 +70,9 @@
 	int err;
 	u8 pnaddr;
 
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
@@ -230,6 +233,9 @@
 	int err;
 	u8 dst;
 
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 8d2b3d5..7280ab8 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -50,7 +50,7 @@
 };
 
 struct rds_ib_refill_cache {
-	struct rds_ib_cache_head *percpu;
+	struct rds_ib_cache_head __percpu *percpu;
 	struct list_head	 *xfer;
 	struct list_head	 *ready;
 };
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 8d19491..8c5bc85 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -418,20 +418,21 @@
 				 struct rds_ib_refill_cache *cache)
 {
 	unsigned long flags;
-	struct rds_ib_cache_head *chp;
 	struct list_head *old;
+	struct list_head __percpu *chpfirst;
 
 	local_irq_save(flags);
 
-	chp = per_cpu_ptr(cache->percpu, smp_processor_id());
-	if (!chp->first)
+	chpfirst = __this_cpu_read(cache->percpu->first);
+	if (!chpfirst)
 		INIT_LIST_HEAD(new_item);
 	else /* put on front */
-		list_add_tail(new_item, chp->first);
-	chp->first = new_item;
-	chp->count++;
+		list_add_tail(new_item, chpfirst);
 
-	if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT)
+	__this_cpu_write(chpfirst, new_item);
+	__this_cpu_inc(cache->percpu->count);
+
+	if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
 		goto end;
 
 	/*
@@ -443,12 +444,13 @@
 	do {
 		old = xchg(&cache->xfer, NULL);
 		if (old)
-			list_splice_entire_tail(old, chp->first);
-		old = cmpxchg(&cache->xfer, NULL, chp->first);
+			list_splice_entire_tail(old, chpfirst);
+		old = cmpxchg(&cache->xfer, NULL, chpfirst);
 	} while (old);
 
-	chp->first = NULL;
-	chp->count = 0;
+
+	__this_cpu_write(chpfirst, NULL);
+	__this_cpu_write(cache->percpu->count, 0);
 end:
 	local_irq_restore(flags);
 }
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 62fb51f..235e01a 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -509,7 +509,7 @@
 
 config NET_EMATCH_CANID
 	tristate "CAN Identifier"
-	depends on NET_EMATCH && CAN
+	depends on NET_EMATCH && (CAN=y || CAN=m)
 	---help---
 	  Say Y here if you want to be able to classify CAN frames based
 	  on CAN Identifier.
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 102761d..65d240c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -987,6 +987,9 @@
 	u32 portid = skb ? NETLINK_CB(skb).portid : 0;
 	int ret = 0, ovr = 0;
 
+	if ((n->nlmsg_type != RTM_GETACTION) && !capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
 	if (ret < 0)
 		return ret;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 7ae0289..ff55ed6 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -139,6 +139,8 @@
 	int err;
 	int tp_created = 0;
 
+	if ((n->nlmsg_type != RTM_GETTFILTER) && !capable(CAP_NET_ADMIN))
+		return -EPERM;
 replay:
 	t = nlmsg_data(n);
 	protocol = TC_H_MIN(t->tcm_info);
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 2ecde22..709b0fb 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -17,6 +17,7 @@
 #include <linux/skbuff.h>
 #include <linux/cgroup.h>
 #include <linux/rcupdate.h>
+#include <linux/fdtable.h>
 #include <net/rtnetlink.h>
 #include <net/pkt_cls.h>
 #include <net/sock.h>
@@ -53,6 +54,28 @@
 	kfree(cgrp_cls_state(cgrp));
 }
 
+static int update_classid(const void *v, struct file *file, unsigned n)
+{
+	int err;
+	struct socket *sock = sock_from_file(file, &err);
+	if (sock)
+		sock->sk->sk_classid = (u32)(unsigned long)v;
+	return 0;
+}
+
+static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+	struct task_struct *p;
+	void *v;
+
+	cgroup_taskset_for_each(p, cgrp, tset) {
+		task_lock(p);
+		v = (void *)(unsigned long)task_cls_classid(p);
+		iterate_fd(p->files, 0, update_classid, v);
+		task_unlock(p);
+	}
+}
+
 static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
 {
 	return cgrp_cls_state(cgrp)->classid;
@@ -77,6 +100,7 @@
 	.name		= "net_cls",
 	.create		= cgrp_create,
 	.destroy	= cgrp_destroy,
+	.attach		= cgrp_attach,
 	.subsys_id	= net_cls_subsys_id,
 	.base_cftypes	= ss_files,
 	.module		= THIS_MODULE,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a18d975..4799c48 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -495,16 +495,15 @@
 
 void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
 {
-	ktime_t time;
-
 	if (test_bit(__QDISC_STATE_DEACTIVATED,
 		     &qdisc_root_sleeping(wd->qdisc)->state))
 		return;
 
 	qdisc_throttled(wd->qdisc);
-	time = ktime_set(0, 0);
-	time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
-	hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
+
+	hrtimer_start(&wd->timer,
+		      ns_to_ktime(PSCHED_TICKS2NS(expires)),
+		      HRTIMER_MODE_ABS);
 }
 EXPORT_SYMBOL(qdisc_watchdog_schedule);
 
@@ -981,6 +980,9 @@
 	struct Qdisc *p = NULL;
 	int err;
 
+	if ((n->nlmsg_type != RTM_GETQDISC) && !capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
 	if (!dev)
 		return -ENODEV;
@@ -1044,6 +1046,9 @@
 	struct Qdisc *q, *p;
 	int err;
 
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 replay:
 	/* Reinit, just in case something touches this. */
 	tcm = nlmsg_data(n);
@@ -1380,6 +1385,9 @@
 	u32 qid = TC_H_MAJ(clid);
 	int err;
 
+	if ((n->nlmsg_type != RTM_GETTCLASS) && !capable(CAP_NET_ADMIN))
+		return -EPERM;
+
 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
 	if (!dev)
 		return -ENODEV;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 564b9fc..0e19948 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -509,8 +509,7 @@
 			cl->cpriority = TC_CBQ_MAXPRIO;
 			q->pmask |= (1<<TC_CBQ_MAXPRIO);
 
-			expires = ktime_set(0, 0);
-			expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched));
+			expires = ns_to_ktime(PSCHED_TICKS2NS(sched));
 			if (hrtimer_try_to_cancel(&q->delay_timer) &&
 			    ktime_to_ns(ktime_sub(
 					hrtimer_get_expires(&q->delay_timer),
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 9d75b77..d2922c0 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -71,6 +71,12 @@
 	HTB_CAN_SEND		/* class can send */
 };
 
+struct htb_rate_cfg {
+	u64 rate_bps;
+	u32 mult;
+	u32 shift;
+};
+
 /* interior & leaf nodes; props specific to leaves are marked L: */
 struct htb_class {
 	struct Qdisc_class_common common;
@@ -118,11 +124,11 @@
 	int filter_cnt;
 
 	/* token bucket parameters */
-	struct qdisc_rate_table *rate;	/* rate table of the class itself */
-	struct qdisc_rate_table *ceil;	/* ceiling rate (limits borrows too) */
-	long buffer, cbuffer;	/* token bucket depth/rate */
+	struct htb_rate_cfg rate;
+	struct htb_rate_cfg ceil;
+	s64 buffer, cbuffer;	/* token bucket depth/rate */
 	psched_tdiff_t mbuffer;	/* max wait time */
-	long tokens, ctokens;	/* current number of tokens */
+	s64 tokens, ctokens;	/* current number of tokens */
 	psched_time_t t_c;	/* checkpoint time */
 };
 
@@ -162,6 +168,45 @@
 	struct work_struct work;
 };
 
+static u64 l2t_ns(struct htb_rate_cfg *r, unsigned int len)
+{
+	return ((u64)len * r->mult) >> r->shift;
+}
+
+static void htb_precompute_ratedata(struct htb_rate_cfg *r)
+{
+	u64 factor;
+	u64 mult;
+	int shift;
+
+	r->shift = 0;
+	r->mult = 1;
+	/*
+	 * Calibrate mult, shift so that token counting is accurate
+	 * for smallest packet size (64 bytes).  Token (time in ns) is
+	 * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps.  It will
+	 * work as long as the smallest packet transfer time can be
+	 * accurately represented in nanosec.
+	 */
+	if (r->rate_bps > 0) {
+		/*
+		 * Higher shift gives better accuracy.  Find the largest
+		 * shift such that mult fits in 32 bits.
+		 */
+		for (shift = 0; shift < 16; shift++) {
+			r->shift = shift;
+			factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
+			mult = div64_u64(factor, r->rate_bps);
+			if (mult > UINT_MAX)
+				break;
+		}
+
+		r->shift = shift - 1;
+		factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
+		r->mult = div64_u64(factor, r->rate_bps);
+	}
+}
+
 /* find class in global hash table using given handle */
 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
 {
@@ -273,7 +318,7 @@
  * already in the queue.
  */
 static void htb_add_to_wait_tree(struct htb_sched *q,
-				 struct htb_class *cl, long delay)
+				 struct htb_class *cl, s64 delay)
 {
 	struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
 
@@ -441,14 +486,14 @@
 		htb_remove_class_from_row(q, cl, mask);
 }
 
-static inline long htb_lowater(const struct htb_class *cl)
+static inline s64 htb_lowater(const struct htb_class *cl)
 {
 	if (htb_hysteresis)
 		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
 	else
 		return 0;
 }
-static inline long htb_hiwater(const struct htb_class *cl)
+static inline s64 htb_hiwater(const struct htb_class *cl)
 {
 	if (htb_hysteresis)
 		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
@@ -469,9 +514,9 @@
  * mode transitions per time unit. The speed gain is about 1/6.
  */
 static inline enum htb_cmode
-htb_class_mode(struct htb_class *cl, long *diff)
+htb_class_mode(struct htb_class *cl, s64 *diff)
 {
-	long toks;
+	s64 toks;
 
 	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
 		*diff = -toks;
@@ -495,7 +540,7 @@
  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
  */
 static void
-htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
+htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
 {
 	enum htb_cmode new_mode = htb_class_mode(cl, diff);
 
@@ -581,26 +626,26 @@
 	return NET_XMIT_SUCCESS;
 }
 
-static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff)
+static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
 {
-	long toks = diff + cl->tokens;
+	s64 toks = diff + cl->tokens;
 
 	if (toks > cl->buffer)
 		toks = cl->buffer;
-	toks -= (long) qdisc_l2t(cl->rate, bytes);
+	toks -= (s64) l2t_ns(&cl->rate, bytes);
 	if (toks <= -cl->mbuffer)
 		toks = 1 - cl->mbuffer;
 
 	cl->tokens = toks;
 }
 
-static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff)
+static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
 {
-	long toks = diff + cl->ctokens;
+	s64 toks = diff + cl->ctokens;
 
 	if (toks > cl->cbuffer)
 		toks = cl->cbuffer;
-	toks -= (long) qdisc_l2t(cl->ceil, bytes);
+	toks -= (s64) l2t_ns(&cl->ceil, bytes);
 	if (toks <= -cl->mbuffer)
 		toks = 1 - cl->mbuffer;
 
@@ -623,10 +668,10 @@
 {
 	int bytes = qdisc_pkt_len(skb);
 	enum htb_cmode old_mode;
-	long diff;
+	s64 diff;
 
 	while (cl) {
-		diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
+		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
 		if (cl->level >= level) {
 			if (cl->level == level)
 				cl->xstats.lends++;
@@ -673,7 +718,7 @@
 	unsigned long stop_at = start + 2;
 	while (time_before(jiffies, stop_at)) {
 		struct htb_class *cl;
-		long diff;
+		s64 diff;
 		struct rb_node *p = rb_first(&q->wait_pq[level]);
 
 		if (!p)
@@ -684,7 +729,7 @@
 			return cl->pq_key;
 
 		htb_safe_rb_erase(p, q->wait_pq + level);
-		diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
+		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
 		htb_change_class_mode(q, cl, &diff);
 		if (cl->cmode != HTB_CAN_SEND)
 			htb_add_to_wait_tree(q, cl, diff);
@@ -871,10 +916,10 @@
 
 	if (!sch->q.qlen)
 		goto fin;
-	q->now = psched_get_time();
+	q->now = ktime_to_ns(ktime_get());
 	start_at = jiffies;
 
-	next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
+	next_event = q->now + 5 * NSEC_PER_SEC;
 
 	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
 		/* common case optimization - skip event handler quickly */
@@ -884,7 +929,7 @@
 		if (q->now >= q->near_ev_cache[level]) {
 			event = htb_do_events(q, level, start_at);
 			if (!event)
-				event = q->now + PSCHED_TICKS_PER_SEC;
+				event = q->now + NSEC_PER_SEC;
 			q->near_ev_cache[level] = event;
 		} else
 			event = q->near_ev_cache[level];
@@ -903,10 +948,17 @@
 		}
 	}
 	sch->qstats.overlimits++;
-	if (likely(next_event > q->now))
-		qdisc_watchdog_schedule(&q->watchdog, next_event);
-	else
+	if (likely(next_event > q->now)) {
+		if (!test_bit(__QDISC_STATE_DEACTIVATED,
+			      &qdisc_root_sleeping(q->watchdog.qdisc)->state)) {
+			ktime_t time = ns_to_ktime(next_event);
+			qdisc_throttled(q->watchdog.qdisc);
+			hrtimer_start(&q->watchdog.timer, time,
+				      HRTIMER_MODE_ABS);
+		}
+	} else {
 		schedule_work(&q->work);
+	}
 fin:
 	return skb;
 }
@@ -1082,9 +1134,9 @@
 
 	memset(&opt, 0, sizeof(opt));
 
-	opt.rate = cl->rate->rate;
+	opt.rate.rate = cl->rate.rate_bps >> 3;
 	opt.buffer = cl->buffer;
-	opt.ceil = cl->ceil->rate;
+	opt.ceil.rate = cl->ceil.rate_bps >> 3;
 	opt.cbuffer = cl->cbuffer;
 	opt.quantum = cl->quantum;
 	opt.prio = cl->prio;
@@ -1203,9 +1255,6 @@
 		qdisc_destroy(cl->un.leaf.q);
 	}
 	gen_kill_estimator(&cl->bstats, &cl->rate_est);
-	qdisc_put_rtab(cl->rate);
-	qdisc_put_rtab(cl->ceil);
-
 	tcf_destroy_chain(&cl->filter_list);
 	kfree(cl);
 }
@@ -1307,7 +1356,6 @@
 	struct htb_sched *q = qdisc_priv(sch);
 	struct htb_class *cl = (struct htb_class *)*arg, *parent;
 	struct nlattr *opt = tca[TCA_OPTIONS];
-	struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
 	struct nlattr *tb[__TCA_HTB_MAX];
 	struct tc_htb_opt *hopt;
 
@@ -1326,10 +1374,7 @@
 	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
 
 	hopt = nla_data(tb[TCA_HTB_PARMS]);
-
-	rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
-	ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
-	if (!rtab || !ctab)
+	if (!hopt->rate.rate || !hopt->ceil.rate)
 		goto failure;
 
 	if (!cl) {		/* new class */
@@ -1439,7 +1484,7 @@
 	 * is really leaf before changing cl->un.leaf !
 	 */
 	if (!cl->level) {
-		cl->quantum = rtab->rate.rate / q->rate2quantum;
+		cl->quantum = hopt->rate.rate / q->rate2quantum;
 		if (!hopt->quantum && cl->quantum < 1000) {
 			pr_warning(
 			       "HTB: quantum of class %X is small. Consider r2q change.\n",
@@ -1460,12 +1505,16 @@
 
 	cl->buffer = hopt->buffer;
 	cl->cbuffer = hopt->cbuffer;
-	if (cl->rate)
-		qdisc_put_rtab(cl->rate);
-	cl->rate = rtab;
-	if (cl->ceil)
-		qdisc_put_rtab(cl->ceil);
-	cl->ceil = ctab;
+
+	cl->rate.rate_bps = (u64)hopt->rate.rate << 3;
+	cl->ceil.rate_bps = (u64)hopt->ceil.rate << 3;
+
+	htb_precompute_ratedata(&cl->rate);
+	htb_precompute_ratedata(&cl->ceil);
+
+	cl->buffer = hopt->buffer << PSCHED_SHIFT;
+	cl->cbuffer = hopt->buffer << PSCHED_SHIFT;
+
 	sch_tree_unlock(sch);
 
 	qdisc_class_hash_grow(sch, &q->clhash);
@@ -1474,10 +1523,6 @@
 	return 0;
 
 failure:
-	if (rtab)
-		qdisc_put_rtab(rtab);
-	if (ctab)
-		qdisc_put_rtab(ctab);
 	return err;
 }
 
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 9687fa1..6ed3765 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1,7 +1,8 @@
 /*
- * net/sched/sch_qfq.c         Quick Fair Queueing Scheduler.
+ * net/sched/sch_qfq.c         Quick Fair Queueing Plus Scheduler.
  *
  * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
+ * Copyright (c) 2012 Paolo Valente.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -19,12 +20,18 @@
 #include <net/pkt_cls.h>
 
 
-/*  Quick Fair Queueing
-    ===================
+/*  Quick Fair Queueing Plus
+    ========================
 
     Sources:
 
-    Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
+    [1] Paolo Valente,
+    "Reducing the Execution Time of Fair-Queueing Schedulers."
+    http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf
+
+    Sources for QFQ:
+
+    [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
     Packet Scheduling with Tight Bandwidth Distribution Guarantees."
 
     See also:
@@ -33,6 +40,20 @@
 
 /*
 
+  QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES
+  classes. Each aggregate is timestamped with a virtual start time S
+  and a virtual finish time F, and scheduled according to its
+  timestamps. S and F are computed as a function of a system virtual
+  time function V. The classes within each aggregate are instead
+  scheduled with DRR.
+
+  To speed up operations, QFQ+ divides also aggregates into a limited
+  number of groups. Which group a class belongs to depends on the
+  ratio between the maximum packet length for the class and the weight
+  of the class. Groups have their own S and F. In the end, QFQ+
+  schedules groups, then aggregates within groups, then classes within
+  aggregates. See [1] and [2] for a full description.
+
   Virtual time computations.
 
   S, F and V are all computed in fixed point arithmetic with
@@ -76,27 +97,28 @@
 #define QFQ_MAX_SLOTS	32
 
 /*
- * Shifts used for class<->group mapping.  We allow class weights that are
- * in the range [1, 2^MAX_WSHIFT], and we try to map each class i to the
+ * Shifts used for aggregate<->group mapping.  We allow class weights that are
+ * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the
  * group with the smallest index that can support the L_i / r_i configured
- * for the class.
+ * for the classes in the aggregate.
  *
  * grp->index is the index of the group; and grp->slot_shift
  * is the shift for the corresponding (scaled) sigma_i.
  */
 #define QFQ_MAX_INDEX		24
-#define QFQ_MAX_WSHIFT		12
+#define QFQ_MAX_WSHIFT		10
 
-#define	QFQ_MAX_WEIGHT		(1<<QFQ_MAX_WSHIFT)
-#define QFQ_MAX_WSUM		(16*QFQ_MAX_WEIGHT)
+#define	QFQ_MAX_WEIGHT		(1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */
+#define QFQ_MAX_WSUM		(64*QFQ_MAX_WEIGHT)
 
 #define FRAC_BITS		30	/* fixed point arithmetic */
 #define ONE_FP			(1UL << FRAC_BITS)
 #define IWSUM			(ONE_FP/QFQ_MAX_WSUM)
 
 #define QFQ_MTU_SHIFT		16	/* to support TSO/GSO */
-#define QFQ_MIN_SLOT_SHIFT	(FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
-#define QFQ_MIN_LMAX		256	/* min possible lmax for a class */
+#define QFQ_MIN_LMAX		512	/* see qfq_slot_insert */
+
+#define QFQ_MAX_AGG_CLASSES	8 /* max num classes per aggregate allowed */
 
 /*
  * Possible group states.  These values are used as indexes for the bitmaps
@@ -106,6 +128,8 @@
 
 struct qfq_group;
 
+struct qfq_aggregate;
+
 struct qfq_class {
 	struct Qdisc_class_common common;
 
@@ -116,7 +140,12 @@
 	struct gnet_stats_queue qstats;
 	struct gnet_stats_rate_est rate_est;
 	struct Qdisc *qdisc;
+	struct list_head alist;		/* Link for active-classes list. */
+	struct qfq_aggregate *agg;	/* Parent aggregate. */
+	int deficit;			/* DRR deficit counter. */
+};
 
+struct qfq_aggregate {
 	struct hlist_node next;	/* Link for the slot list. */
 	u64 S, F;		/* flow timestamps (exact) */
 
@@ -127,8 +156,18 @@
 	struct qfq_group *grp;
 
 	/* these are copied from the flowset. */
-	u32	inv_w;		/* ONE_FP/weight */
-	u32	lmax;		/* Max packet size for this flow. */
+	u32	class_weight; /* Weight of each class in this aggregate. */
+	/* Max pkt size for the classes in this aggregate, DRR quantum. */
+	int	lmax;
+
+	u32	inv_w;	    /* ONE_FP/(sum of weights of classes in aggr.). */
+	u32	budgetmax;  /* Max budget for this aggregate. */
+	u32	initial_budget, budget;     /* Initial and current budget. */
+
+	int		  num_classes;	/* Number of classes in this aggr. */
+	struct list_head  active;	/* DRR queue of active classes. */
+
+	struct hlist_node nonfull_next;	/* See nonfull_aggs in qfq_sched. */
 };
 
 struct qfq_group {
@@ -138,7 +177,7 @@
 	unsigned int front;		/* Index of the front slot. */
 	unsigned long full_slots;	/* non-empty slots */
 
-	/* Array of RR lists of active classes. */
+	/* Array of RR lists of active aggregates. */
 	struct hlist_head slots[QFQ_MAX_SLOTS];
 };
 
@@ -146,13 +185,28 @@
 	struct tcf_proto *filter_list;
 	struct Qdisc_class_hash clhash;
 
-	u64		V;		/* Precise virtual time. */
-	u32		wsum;		/* weight sum */
+	u64			oldV, V;	/* Precise virtual times. */
+	struct qfq_aggregate	*in_serv_agg;   /* Aggregate being served. */
+	u32			num_active_agg; /* Num. of active aggregates */
+	u32			wsum;		/* weight sum */
 
 	unsigned long bitmaps[QFQ_MAX_STATE];	    /* Group bitmaps. */
 	struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
+	u32 min_slot_shift;	/* Index of the group-0 bit in the bitmaps. */
+
+	u32 max_agg_classes;		/* Max number of classes per aggr. */
+	struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */
 };
 
+/*
+ * Possible reasons why the timestamps of an aggregate are updated
+ * enqueue: the aggregate switches from idle to active and must scheduled
+ *	    for service
+ * requeue: the aggregate finishes its budget, so it stops being served and
+ *	    must be rescheduled for service
+ */
+enum update_reason {enqueue, requeue};
+
 static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
@@ -182,18 +236,18 @@
  * index = log_2(maxlen/weight) but we need to apply the scaling.
  * This is used only once at flow creation.
  */
-static int qfq_calc_index(u32 inv_w, unsigned int maxlen)
+static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift)
 {
 	u64 slot_size = (u64)maxlen * inv_w;
 	unsigned long size_map;
 	int index = 0;
 
-	size_map = slot_size >> QFQ_MIN_SLOT_SHIFT;
+	size_map = slot_size >> min_slot_shift;
 	if (!size_map)
 		goto out;
 
 	index = __fls(size_map) + 1;	/* basically a log_2 */
-	index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1)));
+	index -= !(slot_size - (1ULL << (index + min_slot_shift - 1)));
 
 	if (index < 0)
 		index = 0;
@@ -204,66 +258,150 @@
 	return index;
 }
 
-/* Length of the next packet (0 if the queue is empty). */
-static unsigned int qdisc_peek_len(struct Qdisc *sch)
-{
-	struct sk_buff *skb;
+static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *);
+static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *,
+			     enum update_reason);
 
-	skb = sch->ops->peek(sch);
-	return skb ? qdisc_pkt_len(skb) : 0;
+static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
+			 u32 lmax, u32 weight)
+{
+	INIT_LIST_HEAD(&agg->active);
+	hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
+
+	agg->lmax = lmax;
+	agg->class_weight = weight;
 }
 
-static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *);
-static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
-			       unsigned int len);
-
-static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
-				    u32 lmax, u32 inv_w, int delta_w)
+static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
+					  u32 lmax, u32 weight)
 {
-	int i;
+	struct qfq_aggregate *agg;
+	struct hlist_node *n;
 
-	/* update qfq-specific data */
-	cl->lmax = lmax;
-	cl->inv_w = inv_w;
-	i = qfq_calc_index(cl->inv_w, cl->lmax);
+	hlist_for_each_entry(agg, n, &q->nonfull_aggs, nonfull_next)
+		if (agg->lmax == lmax && agg->class_weight == weight)
+			return agg;
 
-	cl->grp = &q->groups[i];
-
-	q->wsum += delta_w;
+	return NULL;
 }
 
-static void qfq_update_reactivate_class(struct qfq_sched *q,
-					struct qfq_class *cl,
-					u32 inv_w, u32 lmax, int delta_w)
-{
-	bool need_reactivation = false;
-	int i = qfq_calc_index(inv_w, lmax);
 
-	if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
-		/*
-		 * shift cl->F back, to not charge the
-		 * class for the not-yet-served head
-		 * packet
-		 */
-		cl->F = cl->S;
-		/* remove class from its slot in the old group */
-		qfq_deactivate_class(q, cl);
-		need_reactivation = true;
+/* Update aggregate as a function of the new number of classes. */
+static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
+			   int new_num_classes)
+{
+	u32 new_agg_weight;
+
+	if (new_num_classes == q->max_agg_classes)
+		hlist_del_init(&agg->nonfull_next);
+
+	if (agg->num_classes > new_num_classes &&
+	    new_num_classes == q->max_agg_classes - 1) /* agg no more full */
+		hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
+
+	agg->budgetmax = new_num_classes * agg->lmax;
+	new_agg_weight = agg->class_weight * new_num_classes;
+	agg->inv_w = ONE_FP/new_agg_weight;
+
+	if (agg->grp == NULL) {
+		int i = qfq_calc_index(agg->inv_w, agg->budgetmax,
+				       q->min_slot_shift);
+		agg->grp = &q->groups[i];
 	}
 
-	qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
+	q->wsum +=
+		(int) agg->class_weight * (new_num_classes - agg->num_classes);
 
-	if (need_reactivation) /* activate in new group */
-		qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
+	agg->num_classes = new_num_classes;
 }
 
+/* Add class to aggregate. */
+static void qfq_add_to_agg(struct qfq_sched *q,
+			   struct qfq_aggregate *agg,
+			   struct qfq_class *cl)
+{
+	cl->agg = agg;
+
+	qfq_update_agg(q, agg, agg->num_classes+1);
+	if (cl->qdisc->q.qlen > 0) { /* adding an active class */
+		list_add_tail(&cl->alist, &agg->active);
+		if (list_first_entry(&agg->active, struct qfq_class, alist) ==
+		    cl && q->in_serv_agg != agg) /* agg was inactive */
+			qfq_activate_agg(q, agg, enqueue); /* schedule agg */
+	}
+}
+
+static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *);
+
+static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
+{
+	if (!hlist_unhashed(&agg->nonfull_next))
+		hlist_del_init(&agg->nonfull_next);
+	if (q->in_serv_agg == agg)
+		q->in_serv_agg = qfq_choose_next_agg(q);
+	kfree(agg);
+}
+
+/* Deschedule class from within its parent aggregate. */
+static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
+{
+	struct qfq_aggregate *agg = cl->agg;
+
+
+	list_del(&cl->alist); /* remove from RR queue of the aggregate */
+	if (list_empty(&agg->active)) /* agg is now inactive */
+		qfq_deactivate_agg(q, agg);
+}
+
+/* Remove class from its parent aggregate. */
+static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
+{
+	struct qfq_aggregate *agg = cl->agg;
+
+	cl->agg = NULL;
+	if (agg->num_classes == 1) { /* agg being emptied, destroy it */
+		qfq_destroy_agg(q, agg);
+		return;
+	}
+	qfq_update_agg(q, agg, agg->num_classes-1);
+}
+
+/* Deschedule class and remove it from its parent aggregate. */
+static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
+{
+	if (cl->qdisc->q.qlen > 0) /* class is active */
+		qfq_deactivate_class(q, cl);
+
+	qfq_rm_from_agg(q, cl);
+}
+
+/* Move class to a new aggregate, matching the new class weight and/or lmax */
+static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
+			   u32 lmax)
+{
+	struct qfq_sched *q = qdisc_priv(sch);
+	struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight);
+
+	if (new_agg == NULL) { /* create new aggregate */
+		new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
+		if (new_agg == NULL)
+			return -ENOBUFS;
+		qfq_init_agg(q, new_agg, lmax, weight);
+	}
+	qfq_deact_rm_from_agg(q, cl);
+	qfq_add_to_agg(q, new_agg, cl);
+
+	return 0;
+}
 
 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 			    struct nlattr **tca, unsigned long *arg)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
 	struct qfq_class *cl = (struct qfq_class *)*arg;
+	bool existing = false;
 	struct nlattr *tb[TCA_QFQ_MAX + 1];
+	struct qfq_aggregate *new_agg = NULL;
 	u32 weight, lmax, inv_w;
 	int err;
 	int delta_w;
@@ -286,15 +424,6 @@
 	} else
 		weight = 1;
 
-	inv_w = ONE_FP / weight;
-	weight = ONE_FP / inv_w;
-	delta_w = weight - (cl ? ONE_FP / cl->inv_w : 0);
-	if (q->wsum + delta_w > QFQ_MAX_WSUM) {
-		pr_notice("qfq: total weight out of range (%u + %u)\n",
-			  delta_w, q->wsum);
-		return -EINVAL;
-	}
-
 	if (tb[TCA_QFQ_LMAX]) {
 		lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
 		if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
@@ -304,7 +433,23 @@
 	} else
 		lmax = psched_mtu(qdisc_dev(sch));
 
-	if (cl != NULL) {
+	inv_w = ONE_FP / weight;
+	weight = ONE_FP / inv_w;
+
+	if (cl != NULL &&
+	    lmax == cl->agg->lmax &&
+	    weight == cl->agg->class_weight)
+		return 0; /* nothing to change */
+
+	delta_w = weight - (cl ? cl->agg->class_weight : 0);
+
+	if (q->wsum + delta_w > QFQ_MAX_WSUM) {
+		pr_notice("qfq: total weight out of range (%d + %u)\n",
+			  delta_w, q->wsum);
+		return -EINVAL;
+	}
+
+	if (cl != NULL) { /* modify existing class */
 		if (tca[TCA_RATE]) {
 			err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
 						    qdisc_root_sleeping_lock(sch),
@@ -312,25 +457,18 @@
 			if (err)
 				return err;
 		}
-
-		if (lmax == cl->lmax && inv_w == cl->inv_w)
-			return 0; /* nothing to update */
-
-		sch_tree_lock(sch);
-		qfq_update_reactivate_class(q, cl, inv_w, lmax, delta_w);
-		sch_tree_unlock(sch);
-
-		return 0;
+		existing = true;
+		goto set_change_agg;
 	}
 
+	/* create and init new class */
 	cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
 	if (cl == NULL)
 		return -ENOBUFS;
 
 	cl->refcnt = 1;
 	cl->common.classid = classid;
-
-	qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
+	cl->deficit = lmax;
 
 	cl->qdisc = qdisc_create_dflt(sch->dev_queue,
 				      &pfifo_qdisc_ops, classid);
@@ -341,11 +479,8 @@
 		err = gen_new_estimator(&cl->bstats, &cl->rate_est,
 					qdisc_root_sleeping_lock(sch),
 					tca[TCA_RATE]);
-		if (err) {
-			qdisc_destroy(cl->qdisc);
-			kfree(cl);
-			return err;
-		}
+		if (err)
+			goto destroy_class;
 	}
 
 	sch_tree_lock(sch);
@@ -354,19 +489,39 @@
 
 	qdisc_class_hash_grow(sch, &q->clhash);
 
+set_change_agg:
+	sch_tree_lock(sch);
+	new_agg = qfq_find_agg(q, lmax, weight);
+	if (new_agg == NULL) { /* create new aggregate */
+		sch_tree_unlock(sch);
+		new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL);
+		if (new_agg == NULL) {
+			err = -ENOBUFS;
+			gen_kill_estimator(&cl->bstats, &cl->rate_est);
+			goto destroy_class;
+		}
+		sch_tree_lock(sch);
+		qfq_init_agg(q, new_agg, lmax, weight);
+	}
+	if (existing)
+		qfq_deact_rm_from_agg(q, cl);
+	qfq_add_to_agg(q, new_agg, cl);
+	sch_tree_unlock(sch);
+
 	*arg = (unsigned long)cl;
 	return 0;
+
+destroy_class:
+	qdisc_destroy(cl->qdisc);
+	kfree(cl);
+	return err;
 }
 
 static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
 
-	if (cl->inv_w) {
-		q->wsum -= ONE_FP / cl->inv_w;
-		cl->inv_w = 0;
-	}
-
+	qfq_rm_from_agg(q, cl);
 	gen_kill_estimator(&cl->bstats, &cl->rate_est);
 	qdisc_destroy(cl->qdisc);
 	kfree(cl);
@@ -481,8 +636,8 @@
 	nest = nla_nest_start(skb, TCA_OPTIONS);
 	if (nest == NULL)
 		goto nla_put_failure;
-	if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) ||
-	    nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax))
+	if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
+	    nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
 		goto nla_put_failure;
 	return nla_nest_end(skb, nest);
 
@@ -500,8 +655,8 @@
 	memset(&xstats, 0, sizeof(xstats));
 	cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
 
-	xstats.weight = ONE_FP/cl->inv_w;
-	xstats.lmax = cl->lmax;
+	xstats.weight = cl->agg->class_weight;
+	xstats.lmax = cl->agg->lmax;
 
 	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
 	    gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
@@ -652,16 +807,16 @@
  * perhaps
  *
 	old_V ^= q->V;
-	old_V >>= QFQ_MIN_SLOT_SHIFT;
+	old_V >>= q->min_slot_shift;
 	if (old_V) {
 		...
 	}
  *
  */
-static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
+static void qfq_make_eligible(struct qfq_sched *q)
 {
-	unsigned long vslot = q->V >> QFQ_MIN_SLOT_SHIFT;
-	unsigned long old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT;
+	unsigned long vslot = q->V >> q->min_slot_shift;
+	unsigned long old_vslot = q->oldV >> q->min_slot_shift;
 
 	if (vslot != old_vslot) {
 		unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
@@ -672,34 +827,38 @@
 
 
 /*
- * If the weight and lmax (max_pkt_size) of the classes do not change,
- * then QFQ guarantees that the slot index is never higher than
- * 2 + ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM).
+ * The index of the slot in which the aggregate is to be inserted must
+ * not be higher than QFQ_MAX_SLOTS-2. There is a '-2' and not a '-1'
+ * because the start time of the group may be moved backward by one
+ * slot after the aggregate has been inserted, and this would cause
+ * non-empty slots to be right-shifted by one position.
  *
- * With the current values of the above constants, the index is
- * then guaranteed to never be higher than 2 + 256 * (1 / 16) = 18.
+ * If the weight and lmax (max_pkt_size) of the classes do not change,
+ * then QFQ+ does meet the above contraint according to the current
+ * values of its parameters. In fact, if the weight and lmax of the
+ * classes do not change, then, from the theory, QFQ+ guarantees that
+ * the slot index is never higher than
+ * 2 + QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
+ * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM) = 2 + 8 * 128 * (1 / 64) = 18
  *
  * When the weight of a class is increased or the lmax of the class is
- * decreased, a new class with smaller slot size may happen to be
- * activated. The activation of this class should be properly delayed
- * to when the service of the class has finished in the ideal system
- * tracked by QFQ. If the activation of the class is not delayed to
- * this reference time instant, then this class may be unjustly served
- * before other classes waiting for service. This may cause
- * (unfrequently) the above bound to the slot index to be violated for
- * some of these unlucky classes.
+ * decreased, a new aggregate with smaller slot size than the original
+ * parent aggregate of the class may happen to be activated. The
+ * activation of this aggregate should be properly delayed to when the
+ * service of the class has finished in the ideal system tracked by
+ * QFQ+. If the activation of the aggregate is not delayed to this
+ * reference time instant, then this aggregate may be unjustly served
+ * before other aggregates waiting for service. This may cause the
+ * above bound to the slot index to be violated for some of these
+ * unlucky aggregates.
  *
- * Instead of delaying the activation of the new class, which is quite
- * complex, the following inaccurate but simple solution is used: if
- * the slot index is higher than QFQ_MAX_SLOTS-2, then the timestamps
- * of the class are shifted backward so as to let the slot index
- * become equal to QFQ_MAX_SLOTS-2. This threshold is used because, if
- * the slot index is above it, then the data structure implementing
- * the bucket list either gets immediately corrupted or may get
- * corrupted on a possible next packet arrival that causes the start
- * time of the group to be shifted backward.
+ * Instead of delaying the activation of the new aggregate, which is
+ * quite complex, the following inaccurate but simple solution is used:
+ * if the slot index is higher than QFQ_MAX_SLOTS-2, then the
+ * timestamps of the aggregate are shifted backward so as to let the
+ * slot index become equal to QFQ_MAX_SLOTS-2.
  */
-static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
+static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
 			    u64 roundedS)
 {
 	u64 slot = (roundedS - grp->S) >> grp->slot_shift;
@@ -708,22 +867,22 @@
 	if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
 		u64 deltaS = roundedS - grp->S -
 			((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
-		cl->S -= deltaS;
-		cl->F -= deltaS;
+		agg->S -= deltaS;
+		agg->F -= deltaS;
 		slot = QFQ_MAX_SLOTS - 2;
 	}
 
 	i = (grp->front + slot) % QFQ_MAX_SLOTS;
 
-	hlist_add_head(&cl->next, &grp->slots[i]);
+	hlist_add_head(&agg->next, &grp->slots[i]);
 	__set_bit(slot, &grp->full_slots);
 }
 
 /* Maybe introduce hlist_first_entry?? */
-static struct qfq_class *qfq_slot_head(struct qfq_group *grp)
+static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp)
 {
 	return hlist_entry(grp->slots[grp->front].first,
-			   struct qfq_class, next);
+			   struct qfq_aggregate, next);
 }
 
 /*
@@ -731,20 +890,20 @@
  */
 static void qfq_front_slot_remove(struct qfq_group *grp)
 {
-	struct qfq_class *cl = qfq_slot_head(grp);
+	struct qfq_aggregate *agg = qfq_slot_head(grp);
 
-	BUG_ON(!cl);
-	hlist_del(&cl->next);
+	BUG_ON(!agg);
+	hlist_del(&agg->next);
 	if (hlist_empty(&grp->slots[grp->front]))
 		__clear_bit(0, &grp->full_slots);
 }
 
 /*
- * Returns the first full queue in a group. As a side effect,
- * adjust the bucket list so the first non-empty bucket is at
- * position 0 in full_slots.
+ * Returns the first aggregate in the first non-empty bucket of the
+ * group. As a side effect, adjusts the bucket list so the first
+ * non-empty bucket is at position 0 in full_slots.
  */
-static struct qfq_class *qfq_slot_scan(struct qfq_group *grp)
+static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp)
 {
 	unsigned int i;
 
@@ -780,7 +939,7 @@
 	grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
 }
 
-static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
+static void qfq_update_eligible(struct qfq_sched *q)
 {
 	struct qfq_group *grp;
 	unsigned long ineligible;
@@ -792,137 +951,226 @@
 			if (qfq_gt(grp->S, q->V))
 				q->V = grp->S;
 		}
-		qfq_make_eligible(q, old_V);
+		qfq_make_eligible(q);
 	}
 }
 
-/*
- * Updates the class, returns true if also the group needs to be updated.
- */
-static bool qfq_update_class(struct qfq_group *grp, struct qfq_class *cl)
+/* Dequeue head packet of the head class in the DRR queue of the aggregate. */
+static void agg_dequeue(struct qfq_aggregate *agg,
+			struct qfq_class *cl, unsigned int len)
 {
-	unsigned int len = qdisc_peek_len(cl->qdisc);
+	qdisc_dequeue_peeked(cl->qdisc);
 
-	cl->S = cl->F;
-	if (!len)
-		qfq_front_slot_remove(grp);	/* queue is empty */
-	else {
-		u64 roundedS;
+	cl->deficit -= (int) len;
 
-		cl->F = cl->S + (u64)len * cl->inv_w;
-		roundedS = qfq_round_down(cl->S, grp->slot_shift);
-		if (roundedS == grp->S)
-			return false;
-
-		qfq_front_slot_remove(grp);
-		qfq_slot_insert(grp, cl, roundedS);
+	if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
+		list_del(&cl->alist);
+	else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
+		cl->deficit += agg->lmax;
+		list_move_tail(&cl->alist, &agg->active);
 	}
+}
 
-	return true;
+static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
+					   struct qfq_class **cl,
+					   unsigned int *len)
+{
+	struct sk_buff *skb;
+
+	*cl = list_first_entry(&agg->active, struct qfq_class, alist);
+	skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
+	if (skb == NULL)
+		WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
+	else
+		*len = qdisc_pkt_len(skb);
+
+	return skb;
+}
+
+/* Update F according to the actual service received by the aggregate. */
+static inline void charge_actual_service(struct qfq_aggregate *agg)
+{
+	/* compute the service received by the aggregate */
+	u32 service_received = agg->initial_budget - agg->budget;
+
+	agg->F = agg->S + (u64)service_received * agg->inv_w;
 }
 
 static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
-	struct qfq_group *grp;
+	struct qfq_aggregate *in_serv_agg = q->in_serv_agg;
 	struct qfq_class *cl;
-	struct sk_buff *skb;
-	unsigned int len;
-	u64 old_V;
+	struct sk_buff *skb = NULL;
+	/* next-packet len, 0 means no more active classes in in-service agg */
+	unsigned int len = 0;
+
+	if (in_serv_agg == NULL)
+		return NULL;
+
+	if (!list_empty(&in_serv_agg->active))
+		skb = qfq_peek_skb(in_serv_agg, &cl, &len);
+
+	/*
+	 * If there are no active classes in the in-service aggregate,
+	 * or if the aggregate has not enough budget to serve its next
+	 * class, then choose the next aggregate to serve.
+	 */
+	if (len == 0 || in_serv_agg->budget < len) {
+		charge_actual_service(in_serv_agg);
+
+		/* recharge the budget of the aggregate */
+		in_serv_agg->initial_budget = in_serv_agg->budget =
+			in_serv_agg->budgetmax;
+
+		if (!list_empty(&in_serv_agg->active))
+			/*
+			 * Still active: reschedule for
+			 * service. Possible optimization: if no other
+			 * aggregate is active, then there is no point
+			 * in rescheduling this aggregate, and we can
+			 * just keep it as the in-service one. This
+			 * should be however a corner case, and to
+			 * handle it, we would need to maintain an
+			 * extra num_active_aggs field.
+			*/
+			qfq_activate_agg(q, in_serv_agg, requeue);
+		else if (sch->q.qlen == 0) { /* no aggregate to serve */
+			q->in_serv_agg = NULL;
+			return NULL;
+		}
+
+		/*
+		 * If we get here, there are other aggregates queued:
+		 * choose the new aggregate to serve.
+		 */
+		in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q);
+		skb = qfq_peek_skb(in_serv_agg, &cl, &len);
+	}
+	if (!skb)
+		return NULL;
+
+	sch->q.qlen--;
+	qdisc_bstats_update(sch, skb);
+
+	agg_dequeue(in_serv_agg, cl, len);
+	in_serv_agg->budget -= len;
+	q->V += (u64)len * IWSUM;
+	pr_debug("qfq dequeue: len %u F %lld now %lld\n",
+		 len, (unsigned long long) in_serv_agg->F,
+		 (unsigned long long) q->V);
+
+	return skb;
+}
+
+static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
+{
+	struct qfq_group *grp;
+	struct qfq_aggregate *agg, *new_front_agg;
+	u64 old_F;
+
+	qfq_update_eligible(q);
+	q->oldV = q->V;
 
 	if (!q->bitmaps[ER])
 		return NULL;
 
 	grp = qfq_ffs(q, q->bitmaps[ER]);
+	old_F = grp->F;
 
-	cl = qfq_slot_head(grp);
-	skb = qdisc_dequeue_peeked(cl->qdisc);
-	if (!skb) {
-		WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
-		return NULL;
+	agg = qfq_slot_head(grp);
+
+	/* agg starts to be served, remove it from schedule */
+	qfq_front_slot_remove(grp);
+
+	new_front_agg = qfq_slot_scan(grp);
+
+	if (new_front_agg == NULL) /* group is now inactive, remove from ER */
+		__clear_bit(grp->index, &q->bitmaps[ER]);
+	else {
+		u64 roundedS = qfq_round_down(new_front_agg->S,
+					      grp->slot_shift);
+		unsigned int s;
+
+		if (grp->S == roundedS)
+			return agg;
+		grp->S = roundedS;
+		grp->F = roundedS + (2ULL << grp->slot_shift);
+		__clear_bit(grp->index, &q->bitmaps[ER]);
+		s = qfq_calc_state(q, grp);
+		__set_bit(grp->index, &q->bitmaps[s]);
 	}
 
-	sch->q.qlen--;
-	qdisc_bstats_update(sch, skb);
+	qfq_unblock_groups(q, grp->index, old_F);
 
-	old_V = q->V;
-	len = qdisc_pkt_len(skb);
-	q->V += (u64)len * IWSUM;
-	pr_debug("qfq dequeue: len %u F %lld now %lld\n",
-		 len, (unsigned long long) cl->F, (unsigned long long) q->V);
-
-	if (qfq_update_class(grp, cl)) {
-		u64 old_F = grp->F;
-
-		cl = qfq_slot_scan(grp);
-		if (!cl)
-			__clear_bit(grp->index, &q->bitmaps[ER]);
-		else {
-			u64 roundedS = qfq_round_down(cl->S, grp->slot_shift);
-			unsigned int s;
-
-			if (grp->S == roundedS)
-				goto skip_unblock;
-			grp->S = roundedS;
-			grp->F = roundedS + (2ULL << grp->slot_shift);
-			__clear_bit(grp->index, &q->bitmaps[ER]);
-			s = qfq_calc_state(q, grp);
-			__set_bit(grp->index, &q->bitmaps[s]);
-		}
-
-		qfq_unblock_groups(q, grp->index, old_F);
-	}
-
-skip_unblock:
-	qfq_update_eligible(q, old_V);
-
-	return skb;
+	return agg;
 }
 
 /*
- * Assign a reasonable start time for a new flow k in group i.
+ * Assign a reasonable start time for a new aggregate in group i.
  * Admissible values for \hat(F) are multiples of \sigma_i
  * no greater than V+\sigma_i . Larger values mean that
  * we had a wraparound so we consider the timestamp to be stale.
  *
  * If F is not stale and F >= V then we set S = F.
  * Otherwise we should assign S = V, but this may violate
- * the ordering in ER. So, if we have groups in ER, set S to
- * the F_j of the first group j which would be blocking us.
+ * the ordering in EB (see [2]). So, if we have groups in ER,
+ * set S to the F_j of the first group j which would be blocking us.
  * We are guaranteed not to move S backward because
  * otherwise our group i would still be blocked.
  */
-static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
+static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
 {
 	unsigned long mask;
 	u64 limit, roundedF;
-	int slot_shift = cl->grp->slot_shift;
+	int slot_shift = agg->grp->slot_shift;
 
-	roundedF = qfq_round_down(cl->F, slot_shift);
+	roundedF = qfq_round_down(agg->F, slot_shift);
 	limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
 
-	if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
+	if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
 		/* timestamp was stale */
-		mask = mask_from(q->bitmaps[ER], cl->grp->index);
+		mask = mask_from(q->bitmaps[ER], agg->grp->index);
 		if (mask) {
 			struct qfq_group *next = qfq_ffs(q, mask);
 			if (qfq_gt(roundedF, next->F)) {
 				if (qfq_gt(limit, next->F))
-					cl->S = next->F;
+					agg->S = next->F;
 				else /* preserve timestamp correctness */
-					cl->S = limit;
+					agg->S = limit;
 				return;
 			}
 		}
-		cl->S = q->V;
+		agg->S = q->V;
 	} else  /* timestamp is not stale */
-		cl->S = cl->F;
+		agg->S = agg->F;
 }
 
+/*
+ * Update the timestamps of agg before scheduling/rescheduling it for
+ * service.  In particular, assign to agg->F its maximum possible
+ * value, i.e., the virtual finish time with which the aggregate
+ * should be labeled if it used all its budget once in service.
+ */
+static inline void
+qfq_update_agg_ts(struct qfq_sched *q,
+		    struct qfq_aggregate *agg, enum update_reason reason)
+{
+	if (reason != requeue)
+		qfq_update_start(q, agg);
+	else /* just charge agg for the service received */
+		agg->S = agg->F;
+
+	agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
+}
+
+static void qfq_schedule_agg(struct qfq_sched *, struct qfq_aggregate *);
+
 static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
 	struct qfq_class *cl;
+	struct qfq_aggregate *agg;
 	int err = 0;
 
 	cl = qfq_classify(skb, sch, &err);
@@ -934,11 +1182,13 @@
 	}
 	pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
 
-	if (unlikely(cl->lmax < qdisc_pkt_len(skb))) {
+	if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
 		pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
-			  cl->lmax, qdisc_pkt_len(skb), cl->common.classid);
-		qfq_update_reactivate_class(q, cl, cl->inv_w,
-					    qdisc_pkt_len(skb), 0);
+			 cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
+		err = qfq_change_agg(sch, cl, cl->agg->class_weight,
+				     qdisc_pkt_len(skb));
+		if (err)
+			return err;
 	}
 
 	err = qdisc_enqueue(skb, cl->qdisc);
@@ -954,35 +1204,50 @@
 	bstats_update(&cl->bstats, skb);
 	++sch->q.qlen;
 
-	/* If the new skb is not the head of queue, then done here. */
-	if (cl->qdisc->q.qlen != 1)
-		return err;
+	agg = cl->agg;
+	/* if the queue was not empty, then done here */
+	if (cl->qdisc->q.qlen != 1) {
+		if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
+		    list_first_entry(&agg->active, struct qfq_class, alist)
+		    == cl && cl->deficit < qdisc_pkt_len(skb))
+			list_move_tail(&cl->alist, &agg->active);
 
-	/* If reach this point, queue q was idle */
-	qfq_activate_class(q, cl, qdisc_pkt_len(skb));
+		return err;
+	}
+
+	/* schedule class for service within the aggregate */
+	cl->deficit = agg->lmax;
+	list_add_tail(&cl->alist, &agg->active);
+
+	if (list_first_entry(&agg->active, struct qfq_class, alist) != cl)
+		return err; /* aggregate was not empty, nothing else to do */
+
+	/* recharge budget */
+	agg->initial_budget = agg->budget = agg->budgetmax;
+
+	qfq_update_agg_ts(q, agg, enqueue);
+	if (q->in_serv_agg == NULL)
+		q->in_serv_agg = agg;
+	else if (agg != q->in_serv_agg)
+		qfq_schedule_agg(q, agg);
 
 	return err;
 }
 
 /*
- * Handle class switch from idle to backlogged.
+ * Schedule aggregate according to its timestamps.
  */
-static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
-			       unsigned int pkt_len)
+static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
 {
-	struct qfq_group *grp = cl->grp;
+	struct qfq_group *grp = agg->grp;
 	u64 roundedS;
 	int s;
 
-	qfq_update_start(q, cl);
-
-	/* compute new finish time and rounded start. */
-	cl->F = cl->S + (u64)pkt_len * cl->inv_w;
-	roundedS = qfq_round_down(cl->S, grp->slot_shift);
+	roundedS = qfq_round_down(agg->S, grp->slot_shift);
 
 	/*
-	 * insert cl in the correct bucket.
-	 * If cl->S >= grp->S we don't need to adjust the
+	 * Insert agg in the correct bucket.
+	 * If agg->S >= grp->S we don't need to adjust the
 	 * bucket list and simply go to the insertion phase.
 	 * Otherwise grp->S is decreasing, we must make room
 	 * in the bucket list, and also recompute the group state.
@@ -990,10 +1255,10 @@
 	 * was in ER make sure to adjust V.
 	 */
 	if (grp->full_slots) {
-		if (!qfq_gt(grp->S, cl->S))
+		if (!qfq_gt(grp->S, agg->S))
 			goto skip_update;
 
-		/* create a slot for this cl->S */
+		/* create a slot for this agg->S */
 		qfq_slot_rotate(grp, roundedS);
 		/* group was surely ineligible, remove */
 		__clear_bit(grp->index, &q->bitmaps[IR]);
@@ -1008,46 +1273,61 @@
 
 	pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
 		 s, q->bitmaps[s],
-		 (unsigned long long) cl->S,
-		 (unsigned long long) cl->F,
+		 (unsigned long long) agg->S,
+		 (unsigned long long) agg->F,
 		 (unsigned long long) q->V);
 
 skip_update:
-	qfq_slot_insert(grp, cl, roundedS);
+	qfq_slot_insert(grp, agg, roundedS);
 }
 
 
+/* Update agg ts and schedule agg for service */
+static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
+			     enum update_reason reason)
+{
+	qfq_update_agg_ts(q, agg, reason);
+	qfq_schedule_agg(q, agg);
+}
+
 static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
-			    struct qfq_class *cl)
+			    struct qfq_aggregate *agg)
 {
 	unsigned int i, offset;
 	u64 roundedS;
 
-	roundedS = qfq_round_down(cl->S, grp->slot_shift);
+	roundedS = qfq_round_down(agg->S, grp->slot_shift);
 	offset = (roundedS - grp->S) >> grp->slot_shift;
+
 	i = (grp->front + offset) % QFQ_MAX_SLOTS;
 
-	hlist_del(&cl->next);
+	hlist_del(&agg->next);
 	if (hlist_empty(&grp->slots[i]))
 		__clear_bit(offset, &grp->full_slots);
 }
 
 /*
- * called to forcibly destroy a queue.
- * If the queue is not in the front bucket, or if it has
- * other queues in the front bucket, we can simply remove
- * the queue with no other side effects.
+ * Called to forcibly deschedule an aggregate.  If the aggregate is
+ * not in the front bucket, or if the latter has other aggregates in
+ * the front bucket, we can simply remove the aggregate with no other
+ * side effects.
  * Otherwise we must propagate the event up.
  */
-static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
+static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
 {
-	struct qfq_group *grp = cl->grp;
+	struct qfq_group *grp = agg->grp;
 	unsigned long mask;
 	u64 roundedS;
 	int s;
 
-	cl->F = cl->S;
-	qfq_slot_remove(q, grp, cl);
+	if (agg == q->in_serv_agg) {
+		charge_actual_service(agg);
+		q->in_serv_agg = qfq_choose_next_agg(q);
+		return;
+	}
+
+	agg->F = agg->S;
+	qfq_slot_remove(q, grp, agg);
 
 	if (!grp->full_slots) {
 		__clear_bit(grp->index, &q->bitmaps[IR]);
@@ -1066,8 +1346,8 @@
 		}
 		__clear_bit(grp->index, &q->bitmaps[ER]);
 	} else if (hlist_empty(&grp->slots[grp->front])) {
-		cl = qfq_slot_scan(grp);
-		roundedS = qfq_round_down(cl->S, grp->slot_shift);
+		agg = qfq_slot_scan(grp);
+		roundedS = qfq_round_down(agg->S, grp->slot_shift);
 		if (grp->S != roundedS) {
 			__clear_bit(grp->index, &q->bitmaps[ER]);
 			__clear_bit(grp->index, &q->bitmaps[IR]);
@@ -1080,7 +1360,7 @@
 		}
 	}
 
-	qfq_update_eligible(q, q->V);
+	qfq_update_eligible(q);
 }
 
 static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
@@ -1092,6 +1372,32 @@
 		qfq_deactivate_class(q, cl);
 }
 
+static unsigned int qfq_drop_from_slot(struct qfq_sched *q,
+				       struct hlist_head *slot)
+{
+	struct qfq_aggregate *agg;
+	struct hlist_node *n;
+	struct qfq_class *cl;
+	unsigned int len;
+
+	hlist_for_each_entry(agg, n, slot, next) {
+		list_for_each_entry(cl, &agg->active, alist) {
+
+			if (!cl->qdisc->ops->drop)
+				continue;
+
+			len = cl->qdisc->ops->drop(cl->qdisc);
+			if (len > 0) {
+				if (cl->qdisc->q.qlen == 0)
+					qfq_deactivate_class(q, cl);
+
+				return len;
+			}
+		}
+	}
+	return 0;
+}
+
 static unsigned int qfq_drop(struct Qdisc *sch)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
@@ -1101,24 +1407,13 @@
 	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
 		grp = &q->groups[i];
 		for (j = 0; j < QFQ_MAX_SLOTS; j++) {
-			struct qfq_class *cl;
-			struct hlist_node *n;
-
-			hlist_for_each_entry(cl, n, &grp->slots[j], next) {
-
-				if (!cl->qdisc->ops->drop)
-					continue;
-
-				len = cl->qdisc->ops->drop(cl->qdisc);
-				if (len > 0) {
-					sch->q.qlen--;
-					if (!cl->qdisc->q.qlen)
-						qfq_deactivate_class(q, cl);
-
-					return len;
-				}
+			len = qfq_drop_from_slot(q, &grp->slots[j]);
+			if (len > 0) {
+				sch->q.qlen--;
+				return len;
 			}
 		}
+
 	}
 
 	return 0;
@@ -1129,44 +1424,51 @@
 	struct qfq_sched *q = qdisc_priv(sch);
 	struct qfq_group *grp;
 	int i, j, err;
+	u32 max_cl_shift, maxbudg_shift, max_classes;
 
 	err = qdisc_class_hash_init(&q->clhash);
 	if (err < 0)
 		return err;
 
+	if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
+		max_classes = QFQ_MAX_AGG_CLASSES;
+	else
+		max_classes = qdisc_dev(sch)->tx_queue_len + 1;
+	/* max_cl_shift = floor(log_2(max_classes)) */
+	max_cl_shift = __fls(max_classes);
+	q->max_agg_classes = 1<<max_cl_shift;
+
+	/* maxbudg_shift = log2(max_len * max_classes_per_agg) */
+	maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift;
+	q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX;
+
 	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
 		grp = &q->groups[i];
 		grp->index = i;
-		grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS
-				   - (QFQ_MAX_INDEX - i);
+		grp->slot_shift = q->min_slot_shift + i;
 		for (j = 0; j < QFQ_MAX_SLOTS; j++)
 			INIT_HLIST_HEAD(&grp->slots[j]);
 	}
 
+	INIT_HLIST_HEAD(&q->nonfull_aggs);
+
 	return 0;
 }
 
 static void qfq_reset_qdisc(struct Qdisc *sch)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
-	struct qfq_group *grp;
 	struct qfq_class *cl;
-	struct hlist_node *n, *tmp;
-	unsigned int i, j;
-
-	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
-		grp = &q->groups[i];
-		for (j = 0; j < QFQ_MAX_SLOTS; j++) {
-			hlist_for_each_entry_safe(cl, n, tmp,
-						  &grp->slots[j], next) {
-				qfq_deactivate_class(q, cl);
-			}
-		}
-	}
+	struct hlist_node *n;
+	unsigned int i;
 
 	for (i = 0; i < q->clhash.hashsize; i++) {
-		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
+		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+			if (cl->qdisc->q.qlen > 0)
+				qfq_deactivate_class(q, cl);
+
 			qdisc_reset(cl->qdisc);
+		}
 	}
 	sch->q.qlen = 0;
 }
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 126b014..a9edd2e 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -9,7 +9,6 @@
 	select CRYPTO
 	select CRYPTO_HMAC
 	select CRYPTO_SHA1
-	select CRYPTO_MD5 if SCTP_HMAC_MD5
 	select LIBCRC32C
 	---help---
 	  Stream Control Transmission Protocol
@@ -68,33 +67,21 @@
 
 	  If unsure, say N
 
-choice
-	prompt "SCTP: Cookie HMAC Algorithm"
-	default SCTP_HMAC_MD5
+config SCTP_COOKIE_HMAC_MD5
+	bool "Enable optional MD5 hmac cookie generation"
 	help
-	  HMAC algorithm to be used during association initialization.  It
-	  is strongly recommended to use HMAC-SHA1 or HMAC-MD5.  See 
-	  configuration for Cryptographic API and enable those algorithms
-          to make usable by SCTP. 
+	  Enable optional MD5 hmac based SCTP cookie generation
+	default y
+	select CRYPTO_HMAC if SCTP_COOKIE_HMAC_MD5
+	select CRYPTO_MD5 if SCTP_COOKIE_HMAC_MD5
 
-config SCTP_HMAC_NONE
-	bool "None"
-	help 
-	  Choosing this disables the use of an HMAC during association 
-	  establishment.  It is advised to use either HMAC-MD5 or HMAC-SHA1.
-
-config SCTP_HMAC_SHA1
-	bool "HMAC-SHA1"
-	help 
-	  Enable the use of HMAC-SHA1 during association establishment.  It 
-	  is advised to use either HMAC-MD5 or HMAC-SHA1.
-
-config SCTP_HMAC_MD5
-	bool "HMAC-MD5"
+config SCTP_COOKIE_HMAC_SHA1
+	bool "Enable optional SHA1 hmac cookie generation"
 	help
-	  Enable the use of HMAC-MD5 during association establishment.  It is 
-	  advised to use either HMAC-MD5 or HMAC-SHA1.
+	  Enable optional SHA1 hmac based SCTP cookie generation
+	default y
+	select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1
+	select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1
 
-endchoice
 
 endif # IP_SCTP
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 2d51842..456bc3d 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1190,6 +1190,15 @@
 	/* Whether Cookie Preservative is enabled(1) or not(0) */
 	net->sctp.cookie_preserve_enable 	= 1;
 
+	/* Default sctp sockets to use md5 as their hmac alg */
+#if defined (CONFIG_CRYPTO_MD5)
+	net->sctp.sctp_hmac_alg			= "md5";
+#elif defined (CONFIG_CRYPTO_SHA1)
+	net->sctp.sctp_hmac_alg			= "sha1";
+#else
+	net->sctp.sctp_hmac_alg			= NULL;
+#endif
+
 	/* Max.Burst		    - 4 */
 	net->sctp.max_burst			= SCTP_DEFAULT_MAX_BURST;
 
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index fbe1636..e0f01a4 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1090,6 +1090,25 @@
 	return retval;
 }
 
+struct sctp_chunk *sctp_make_violation_max_retrans(
+	const struct sctp_association *asoc,
+	const struct sctp_chunk *chunk)
+{
+	struct sctp_chunk *retval;
+	static const char error[] = "Association exceeded its max_retans count";
+	size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t);
+
+	retval = sctp_make_abort(asoc, chunk, payload_len);
+	if (!retval)
+		goto nodata;
+
+	sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error));
+	sctp_addto_chunk(retval, sizeof(error), error);
+
+nodata:
+	return retval;
+}
+
 /* Make a HEARTBEAT chunk.  */
 struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
 				  const struct sctp_transport *transport)
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 6773d78..c076956 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -577,7 +577,7 @@
 				  unsigned int error)
 {
 	struct sctp_ulpevent *event;
-
+	struct sctp_chunk *abort;
 	/* Cancel any partial delivery in progress. */
 	sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
 
@@ -593,6 +593,13 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
 				SCTP_ULPEVENT(event));
 
+	if (asoc->overall_error_count >= asoc->max_retrans) {
+		abort = sctp_make_violation_max_retrans(asoc, chunk);
+		if (abort)
+			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+					SCTP_CHUNK(abort));
+	}
+
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
 			SCTP_STATE(SCTP_STATE_CLOSED));
 
@@ -1268,14 +1275,14 @@
 				sctp_outq_uncork(&asoc->outqueue);
 				local_cork = 0;
 			}
-			asoc = cmd->obj.ptr;
+			asoc = cmd->obj.asoc;
 			/* Register with the endpoint.  */
 			sctp_endpoint_add_asoc(ep, asoc);
 			sctp_hash_established(asoc);
 			break;
 
 		case SCTP_CMD_UPDATE_ASSOC:
-		       sctp_assoc_update(asoc, cmd->obj.ptr);
+		       sctp_assoc_update(asoc, cmd->obj.asoc);
 		       break;
 
 		case SCTP_CMD_PURGE_OUTQUEUE:
@@ -1315,7 +1322,7 @@
 			break;
 
 		case SCTP_CMD_PROCESS_FWDTSN:
-			sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr);
+			sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk);
 			break;
 
 		case SCTP_CMD_GEN_SACK:
@@ -1331,7 +1338,7 @@
 		case SCTP_CMD_PROCESS_SACK:
 			/* Process an inbound SACK.  */
 			error = sctp_cmd_process_sack(commands, asoc,
-						      cmd->obj.ptr);
+						      cmd->obj.chunk);
 			break;
 
 		case SCTP_CMD_GEN_INIT_ACK:
@@ -1352,15 +1359,15 @@
 			 * layer which will bail.
 			 */
 			error = sctp_cmd_process_init(commands, asoc, chunk,
-						      cmd->obj.ptr, gfp);
+						      cmd->obj.init, gfp);
 			break;
 
 		case SCTP_CMD_GEN_COOKIE_ECHO:
 			/* Generate a COOKIE ECHO chunk.  */
 			new_obj = sctp_make_cookie_echo(asoc, chunk);
 			if (!new_obj) {
-				if (cmd->obj.ptr)
-					sctp_chunk_free(cmd->obj.ptr);
+				if (cmd->obj.chunk)
+					sctp_chunk_free(cmd->obj.chunk);
 				goto nomem;
 			}
 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
@@ -1369,9 +1376,9 @@
 			/* If there is an ERROR chunk to be sent along with
 			 * the COOKIE_ECHO, send it, too.
 			 */
-			if (cmd->obj.ptr)
+			if (cmd->obj.chunk)
 				sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
-						SCTP_CHUNK(cmd->obj.ptr));
+						SCTP_CHUNK(cmd->obj.chunk));
 
 			if (new_obj->transport) {
 				new_obj->transport->init_sent_count++;
@@ -1417,18 +1424,18 @@
 		case SCTP_CMD_CHUNK_ULP:
 			/* Send a chunk to the sockets layer.  */
 			SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
-					  "chunk_up:", cmd->obj.ptr,
+					  "chunk_up:", cmd->obj.chunk,
 					  "ulpq:", &asoc->ulpq);
-			sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.ptr,
+			sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk,
 					    GFP_ATOMIC);
 			break;
 
 		case SCTP_CMD_EVENT_ULP:
 			/* Send a notification to the sockets layer.  */
 			SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
-					  "event_up:",cmd->obj.ptr,
+					  "event_up:",cmd->obj.ulpevent,
 					  "ulpq:",&asoc->ulpq);
-			sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ptr);
+			sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent);
 			break;
 
 		case SCTP_CMD_REPLY:
@@ -1438,12 +1445,12 @@
 				local_cork = 1;
 			}
 			/* Send a chunk to our peer.  */
-			error = sctp_outq_tail(&asoc->outqueue, cmd->obj.ptr);
+			error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk);
 			break;
 
 		case SCTP_CMD_SEND_PKT:
 			/* Send a full packet to our peer.  */
-			packet = cmd->obj.ptr;
+			packet = cmd->obj.packet;
 			sctp_packet_transmit(packet);
 			sctp_ootb_pkt_free(packet);
 			break;
@@ -1480,7 +1487,7 @@
 			break;
 
 		case SCTP_CMD_SETUP_T2:
-			sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
+			sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
 			break;
 
 		case SCTP_CMD_TIMER_START_ONCE:
@@ -1514,7 +1521,7 @@
 			break;
 
 		case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
-			chunk = cmd->obj.ptr;
+			chunk = cmd->obj.chunk;
 			t = sctp_assoc_choose_alter_transport(asoc,
 						asoc->init_last_sent_to);
 			asoc->init_last_sent_to = t;
@@ -1665,17 +1672,16 @@
 			break;
 
 		case SCTP_CMD_PART_DELIVER:
-			sctp_ulpq_partial_delivery(&asoc->ulpq, cmd->obj.ptr,
-						   GFP_ATOMIC);
+			sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC);
 			break;
 
 		case SCTP_CMD_RENEGE:
-			sctp_ulpq_renege(&asoc->ulpq, cmd->obj.ptr,
+			sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk,
 					 GFP_ATOMIC);
 			break;
 
 		case SCTP_CMD_SETUP_T4:
-			sctp_cmd_setup_t4(commands, asoc, cmd->obj.ptr);
+			sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
 			break;
 
 		case SCTP_CMD_PROCESS_OPERR:
@@ -1734,8 +1740,8 @@
 			break;
 
 		default:
-			pr_warn("Impossible command: %u, %p\n",
-				cmd->verb, cmd->obj.ptr);
+			pr_warn("Impossible command: %u\n",
+				cmd->verb);
 			break;
 		}
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 406d957..bc16249 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -110,7 +110,6 @@
 static int sctp_autobind(struct sock *sk);
 static void sctp_sock_migrate(struct sock *, struct sock *,
 			      struct sctp_association *, sctp_socket_type_t);
-static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
 
 extern struct kmem_cache *sctp_bucket_cachep;
 extern long sysctl_sctp_mem[3];
@@ -336,6 +335,7 @@
 /* Bind a local address either to an endpoint or to an association.  */
 SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_sock *sp = sctp_sk(sk);
 	struct sctp_endpoint *ep = sp->ep;
 	struct sctp_bind_addr *bp = &ep->base.bind_addr;
@@ -379,7 +379,8 @@
 		}
 	}
 
-	if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
+	if (snum && snum < PROT_SOCK &&
+	    !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
 		return -EACCES;
 
 	/* See if the address matches any of the addresses we may have
@@ -1162,7 +1163,7 @@
 				 * be permitted to open new associations.
 				 */
 				if (ep->base.bind_addr.port < PROT_SOCK &&
-				    !capable(CAP_NET_BIND_SERVICE)) {
+				    !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
 					err = -EACCES;
 					goto out_free;
 				}
@@ -1791,7 +1792,7 @@
 			 * associations.
 			 */
 			if (ep->base.bind_addr.port < PROT_SOCK &&
-			    !capable(CAP_NET_BIND_SERVICE)) {
+			    !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
 				err = -EACCES;
 				goto out_unlock;
 			}
@@ -3890,6 +3891,8 @@
 	sp->default_rcv_context = 0;
 	sp->max_burst = net->sctp.max_burst;
 
+	sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
+
 	/* Initialize default setup parameters. These parameters
 	 * can be modified with the SCTP_INITMSG socket option or
 	 * overridden by the SCTP_INIT CMSG.
@@ -5981,13 +5984,15 @@
 	struct sctp_sock *sp = sctp_sk(sk);
 	struct sctp_endpoint *ep = sp->ep;
 	struct crypto_hash *tfm = NULL;
+	char alg[32];
 
 	/* Allocate HMAC for generating cookie. */
-	if (!sctp_sk(sk)->hmac && sctp_hmac_alg) {
-		tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
+	if (!sp->hmac && sp->sctp_hmac_alg) {
+		sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
+		tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
 		if (IS_ERR(tfm)) {
 			net_info_ratelimited("failed to load transform for %s: %ld\n",
-					     sctp_hmac_alg, PTR_ERR(tfm));
+					     sp->sctp_hmac_alg, PTR_ERR(tfm));
 			return -ENOSYS;
 		}
 		sctp_sk(sk)->hmac = tfm;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 70e3ba5..043889a 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -62,6 +62,11 @@
 extern int sysctl_sctp_rmem[3];
 extern int sysctl_sctp_wmem[3];
 
+static int proc_sctp_do_hmac_alg(ctl_table *ctl,
+				int write,
+				void __user *buffer, size_t *lenp,
+
+				loff_t *ppos);
 static ctl_table sctp_table[] = {
 	{
 		.procname	= "sctp_mem",
@@ -147,6 +152,12 @@
 		.proc_handler	= proc_dointvec,
 	},
 	{
+		.procname	= "cookie_hmac_alg",
+		.maxlen		= 8,
+		.mode		= 0644,
+		.proc_handler	= proc_sctp_do_hmac_alg,
+	},
+	{
 		.procname	= "valid_cookie_life",
 		.data		= &init_net.sctp.valid_cookie_life,
 		.maxlen		= sizeof(unsigned int),
@@ -289,6 +300,54 @@
 	{ /* sentinel */ }
 };
 
+static int proc_sctp_do_hmac_alg(ctl_table *ctl,
+				int write,
+				void __user *buffer, size_t *lenp,
+				loff_t *ppos)
+{
+	struct net *net = current->nsproxy->net_ns;
+	char tmp[8];
+	ctl_table tbl;
+	int ret;
+	int changed = 0;
+	char *none = "none";
+
+	memset(&tbl, 0, sizeof(struct ctl_table));
+
+	if (write) {
+		tbl.data = tmp;
+		tbl.maxlen = 8;
+	} else {
+		tbl.data = net->sctp.sctp_hmac_alg ? : none;
+		tbl.maxlen = strlen(tbl.data);
+	}
+		ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+
+	if (write) {
+#ifdef CONFIG_CRYPTO_MD5
+		if (!strncmp(tmp, "md5", 3)) {
+			net->sctp.sctp_hmac_alg = "md5";
+			changed = 1;
+		}
+#endif
+#ifdef CONFIG_CRYPTO_SHA1
+		if (!strncmp(tmp, "sha1", 4)) {
+			net->sctp.sctp_hmac_alg = "sha1";
+			changed = 1;
+		}
+#endif
+		if (!strncmp(tmp, "none", 4)) {
+			net->sctp.sctp_hmac_alg = NULL;
+			changed = 1;
+		}
+
+		if (!changed)
+			ret = -EINVAL;
+	}
+
+	return ret;
+}
+
 int sctp_sysctl_net_register(struct net *net)
 {
 	struct ctl_table *table;
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index b5fb7c40..5f25e0c 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -272,7 +272,7 @@
 	__u32 max_tsn = map->max_tsn_seen;
 	__u32 base_tsn = map->base_tsn;
 	__u16 pending_data;
-	u32 gap, i;
+	u32 gap;
 
 	pending_data = max_tsn - cum_tsn;
 	gap = max_tsn - base_tsn;
@@ -280,11 +280,7 @@
 	if (gap == 0 || gap >= map->len)
 		goto out;
 
-	for (i = 0; i < gap+1; i++) {
-		if (test_bit(i, map->tsn_map))
-			pending_data--;
-	}
-
+	pending_data -= bitmap_weight(map->tsn_map, gap + 1);
 out:
 	return pending_data;
 }
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 360d869..ada1746 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -997,7 +997,6 @@
 
 /* Partial deliver the first message as there is pressure on rwnd. */
 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
-				struct sctp_chunk *chunk,
 				gfp_t gfp)
 {
 	struct sctp_ulpevent *event;
@@ -1060,7 +1059,7 @@
 		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
 		sctp_ulpq_tail_data(ulpq, chunk, gfp);
 
-		sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
+		sctp_ulpq_partial_delivery(ulpq, gfp);
 	}
 
 	sk_mem_reclaim(asoc->base.sk);
diff --git a/net/socket.c b/net/socket.c
index d92c490..2ca51c7 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -620,8 +620,6 @@
 {
 	struct sock_iocb *si = kiocb_to_siocb(iocb);
 
-	sock_update_classid(sock->sk);
-
 	si->sock = sock;
 	si->scm = NULL;
 	si->msg = msg;
@@ -784,8 +782,6 @@
 {
 	struct sock_iocb *si = kiocb_to_siocb(iocb);
 
-	sock_update_classid(sock->sk);
-
 	si->sock = sock;
 	si->scm = NULL;
 	si->msg = msg;
@@ -896,8 +892,6 @@
 	if (unlikely(!sock->ops->splice_read))
 		return -EINVAL;
 
-	sock_update_classid(sock->sk);
-
 	return sock->ops->splice_read(sock, ppos, pipe, len, flags);
 }
 
@@ -3437,8 +3431,6 @@
 int kernel_sendpage(struct socket *sock, struct page *page, int offset,
 		    size_t size, int flags)
 {
-	sock_update_classid(sock->sk);
-
 	if (sock->ops->sendpage)
 		return sock->ops->sendpage(sock, page, offset, size, flags);
 
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index e3a6e37..9bc6db04 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -38,15 +38,24 @@
 }
 
 /* Return standard mode bits for table entry. */
-static int net_ctl_permissions(struct ctl_table_root *root,
-			       struct nsproxy *nsproxy,
+static int net_ctl_permissions(struct ctl_table_header *head,
 			       struct ctl_table *table)
 {
+	struct net *net = container_of(head->set, struct net, sysctls);
+	kuid_t root_uid = make_kuid(net->user_ns, 0);
+	kgid_t root_gid = make_kgid(net->user_ns, 0);
+
 	/* Allow network administrator to have same access as root. */
-	if (capable(CAP_NET_ADMIN)) {
+	if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
+	    uid_eq(root_uid, current_uid())) {
 		int mode = (table->mode >> 6) & 7;
 		return (mode << 6) | (mode << 3) | mode;
 	}
+	/* Allow netns root group to have the same access as the root group */
+	if (gid_eq(root_gid, current_gid())) {
+		int mode = (table->mode >> 3) & 7;
+		return (mode << 3) | mode;
+	}
 	return table->mode;
 }
 
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 5854601..bc41bd3 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -20,18 +20,9 @@
 
 	  If in doubt, say N.
 
-if TIPC
-
-config TIPC_ADVANCED
-	bool "Advanced TIPC configuration"
-	default n
-	help
-	  Saying Y here will open some advanced configuration for TIPC.
-	  Most users do not need to bother; if unsure, just say N.
-
 config TIPC_PORTS
 	int "Maximum number of ports in a node"
-	depends on TIPC_ADVANCED
+	depends on TIPC
 	range 127 65535
 	default "8191"
 	help
@@ -40,5 +31,3 @@
 
 	  Setting this to a smaller value saves some memory,
 	  setting it to higher allows for more ports.
-
-endif # TIPC
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index e4e6d8c..54f89f9 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -347,7 +347,7 @@
 
 	tipc_node_lock(n_ptr);
 
-	if (n_ptr->bclink.supported &&
+	if (n_ptr->bclink.recv_permitted &&
 	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
 	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
 		n_ptr->bclink.oos_state = 2;
@@ -429,7 +429,7 @@
 		goto exit;
 
 	tipc_node_lock(node);
-	if (unlikely(!node->bclink.supported))
+	if (unlikely(!node->bclink.recv_permitted))
 		goto unlock;
 
 	/* Handle broadcast protocol message */
@@ -564,7 +564,7 @@
 
 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
 {
-	return (n_ptr->bclink.supported &&
+	return (n_ptr->bclink.recv_permitted &&
 		(tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
 }
 
@@ -619,16 +619,14 @@
 		if (bcbearer->remains_new.count == bcbearer->remains.count)
 			continue;	/* bearer pair doesn't add anything */
 
-		if (p->blocked ||
-		    p->media->send_msg(buf, p, &p->media->bcast_addr)) {
+		if (!tipc_bearer_blocked(p))
+			tipc_bearer_send(p, buf, &p->media->bcast_addr);
+		else if (s && !tipc_bearer_blocked(s))
 			/* unable to send on primary bearer */
-			if (!s || s->blocked ||
-			    s->media->send_msg(buf, s,
-					       &s->media->bcast_addr)) {
-				/* unable to send on either bearer */
-				continue;
-			}
-		}
+			tipc_bearer_send(s, buf, &s->media->bcast_addr);
+		else
+			/* unable to send on either bearer */
+			continue;
 
 		if (s) {
 			bcbearer->bpairs[bp_index].primary = s;
@@ -731,8 +729,8 @@
 			     "  TX naks:%u acks:%u dups:%u\n",
 			     s->sent_nacks, s->sent_acks, s->retransmitted);
 	ret += tipc_snprintf(buf + ret, buf_size - ret,
-			     "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
-			     s->bearer_congs, s->link_congs, s->max_queue_sz,
+			     "  Congestion link:%u  Send queue max:%u avg:%u\n",
+			     s->link_congs, s->max_queue_sz,
 			     s->queue_sz_counts ?
 			     (s->accu_queue_sz / s->queue_sz_counts) : 0);
 
@@ -766,7 +764,6 @@
 
 void tipc_bclink_init(void)
 {
-	INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
 	bcbearer->bearer.media = &bcbearer->media;
 	bcbearer->media.send_msg = tipc_bcbearer_send;
 	sprintf(bcbearer->media.name, "tipc-broadcast");
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 4ec5c80..aa62f93 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -279,116 +279,31 @@
 }
 
 /*
- * bearer_push(): Resolve bearer congestion. Force the waiting
- * links to push out their unsent packets, one packet per link
- * per iteration, until all packets are gone or congestion reoccurs.
- * 'tipc_net_lock' is read_locked when this function is called
- * bearer.lock must be taken before calling
- * Returns binary true(1) ore false(0)
- */
-static int bearer_push(struct tipc_bearer *b_ptr)
-{
-	u32 res = 0;
-	struct tipc_link *ln, *tln;
-
-	if (b_ptr->blocked)
-		return 0;
-
-	while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
-		list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
-			res = tipc_link_push_packet(ln);
-			if (res == PUSH_FAILED)
-				break;
-			if (res == PUSH_FINISHED)
-				list_move_tail(&ln->link_list, &b_ptr->links);
-		}
-	}
-	return list_empty(&b_ptr->cong_links);
-}
-
-void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
-{
-	spin_lock_bh(&b_ptr->lock);
-	bearer_push(b_ptr);
-	spin_unlock_bh(&b_ptr->lock);
-}
-
-
-/*
- * Interrupt enabling new requests after bearer congestion or blocking:
+ * Interrupt enabling new requests after bearer blocking:
  * See bearer_send().
  */
-void tipc_continue(struct tipc_bearer *b_ptr)
+void tipc_continue(struct tipc_bearer *b)
 {
-	spin_lock_bh(&b_ptr->lock);
-	if (!list_empty(&b_ptr->cong_links))
-		tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr);
-	b_ptr->blocked = 0;
-	spin_unlock_bh(&b_ptr->lock);
+	spin_lock_bh(&b->lock);
+	b->blocked = 0;
+	spin_unlock_bh(&b->lock);
 }
 
 /*
- * Schedule link for sending of messages after the bearer
- * has been deblocked by 'continue()'. This method is called
- * when somebody tries to send a message via this link while
- * the bearer is congested. 'tipc_net_lock' is in read_lock here
- * bearer.lock is busy
+ * tipc_bearer_blocked - determines if bearer is currently blocked
  */
-static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr,
-						struct tipc_link *l_ptr)
+int tipc_bearer_blocked(struct tipc_bearer *b)
 {
-	list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
-}
+	int res;
 
-/*
- * Schedule link for sending of messages after the bearer
- * has been deblocked by 'continue()'. This method is called
- * when somebody tries to send a message via this link while
- * the bearer is congested. 'tipc_net_lock' is in read_lock here,
- * bearer.lock is free
- */
-void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
-{
-	spin_lock_bh(&b_ptr->lock);
-	tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
-	spin_unlock_bh(&b_ptr->lock);
-}
+	spin_lock_bh(&b->lock);
+	res = b->blocked;
+	spin_unlock_bh(&b->lock);
 
-
-/*
- * tipc_bearer_resolve_congestion(): Check if there is bearer congestion,
- * and if there is, try to resolve it before returning.
- * 'tipc_net_lock' is read_locked when this function is called
- */
-int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
-					struct tipc_link *l_ptr)
-{
-	int res = 1;
-
-	if (list_empty(&b_ptr->cong_links))
-		return 1;
-	spin_lock_bh(&b_ptr->lock);
-	if (!bearer_push(b_ptr)) {
-		tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
-		res = 0;
-	}
-	spin_unlock_bh(&b_ptr->lock);
 	return res;
 }
 
 /**
- * tipc_bearer_congested - determines if bearer is currently congested
- */
-int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
-{
-	if (unlikely(b_ptr->blocked))
-		return 1;
-	if (likely(list_empty(&b_ptr->cong_links)))
-		return 0;
-	return !tipc_bearer_resolve_congestion(b_ptr, l_ptr);
-}
-
-/**
  * tipc_enable_bearer - enable bearer with the given name
  */
 int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
@@ -489,7 +404,6 @@
 	b_ptr->net_plane = bearer_id + 'A';
 	b_ptr->active = 1;
 	b_ptr->priority = priority;
-	INIT_LIST_HEAD(&b_ptr->cong_links);
 	INIT_LIST_HEAD(&b_ptr->links);
 	spin_lock_init(&b_ptr->lock);
 
@@ -528,7 +442,6 @@
 	pr_info("Blocking bearer <%s>\n", name);
 	spin_lock_bh(&b_ptr->lock);
 	b_ptr->blocked = 1;
-	list_splice_init(&b_ptr->cong_links, &b_ptr->links);
 	list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
 		struct tipc_node *n_ptr = l_ptr->owner;
 
@@ -555,7 +468,6 @@
 	spin_lock_bh(&b_ptr->lock);
 	b_ptr->blocked = 1;
 	b_ptr->media->disable_bearer(b_ptr);
-	list_splice_init(&b_ptr->cong_links, &b_ptr->links);
 	list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
 		tipc_link_delete(l_ptr);
 	}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index dd4c2ab..39f1192 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -120,7 +120,6 @@
  * @identity: array index of this bearer within TIPC bearer array
  * @link_req: ptr to (optional) structure making periodic link setup requests
  * @links: list of non-congested links associated with bearer
- * @cong_links: list of congested links associated with bearer
  * @active: non-zero if bearer structure is represents a bearer
  * @net_plane: network plane ('A' through 'H') currently associated with bearer
  * @nodes: indicates which nodes in cluster can be reached through bearer
@@ -143,7 +142,6 @@
 	u32 identity;
 	struct tipc_link_req *link_req;
 	struct list_head links;
-	struct list_head cong_links;
 	int active;
 	char net_plane;
 	struct tipc_node_map nodes;
@@ -185,39 +183,23 @@
 struct sk_buff *tipc_bearer_get_names(void);
 void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest);
 void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest);
-void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
 struct tipc_bearer *tipc_bearer_find(const char *name);
 struct tipc_bearer *tipc_bearer_find_interface(const char *if_name);
 struct tipc_media *tipc_media_find(const char *name);
-int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
-				   struct tipc_link *l_ptr);
-int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
+int tipc_bearer_blocked(struct tipc_bearer *b_ptr);
 void tipc_bearer_stop(void);
-void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);
-
 
 /**
  * tipc_bearer_send- sends buffer to destination over bearer
  *
- * Returns true (1) if successful, or false (0) if unable to send
- *
  * IMPORTANT:
  * The media send routine must not alter the buffer being passed in
  * as it may be needed for later retransmission!
- *
- * If the media send routine returns a non-zero value (indicating that
- * it was unable to send the buffer), it must:
- *   1) mark the bearer as blocked,
- *   2) call tipc_continue() once the bearer is able to send again.
- * Media types that are unable to meet these two critera must ensure their
- * send routine always returns success -- even if the buffer was not sent --
- * and let TIPC's link code deal with the undelivered message.
  */
-static inline int tipc_bearer_send(struct tipc_bearer *b_ptr,
-				   struct sk_buff *buf,
+static inline void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf,
 				   struct tipc_media_addr *dest)
 {
-	return !b_ptr->media->send_msg(buf, b_ptr, dest);
+	b->media->send_msg(buf, b, dest);
 }
 
 #endif	/* _TIPC_BEARER_H */
diff --git a/net/tipc/core.c b/net/tipc/core.c
index bfe8af8..fc05cec 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -42,11 +42,6 @@
 
 #include <linux/module.h>
 
-#ifndef CONFIG_TIPC_PORTS
-#define CONFIG_TIPC_PORTS 8191
-#endif
-
-
 /* global variables used by multiple sub-systems within TIPC */
 int tipc_random __read_mostly;
 
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 50eaa40..1074b95 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -243,7 +243,7 @@
 	if ((type == DSC_REQ_MSG) && !link_fully_up && !b_ptr->blocked) {
 		rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
 		if (rbuf) {
-			b_ptr->media->send_msg(rbuf, b_ptr, &media_addr);
+			tipc_bearer_send(b_ptr, rbuf, &media_addr);
 			kfree_skb(rbuf);
 		}
 	}
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a79c755..87bf5aa 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1,7 +1,7 @@
 /*
  * net/tipc/link.c: TIPC link code
  *
- * Copyright (c) 1996-2007, Ericsson AB
+ * Copyright (c) 1996-2007, 2012, Ericsson AB
  * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -103,6 +103,8 @@
 static void link_print(struct tipc_link *l_ptr, const char *str);
 static void link_start(struct tipc_link *l_ptr);
 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
+static void tipc_link_send_sync(struct tipc_link *l);
+static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf);
 
 /*
  *  Simple link routines
@@ -712,6 +714,8 @@
 			link_activate(l_ptr);
 			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
 			l_ptr->fsm_msg_cnt++;
+			if (l_ptr->owner->working_links == 1)
+				tipc_link_send_sync(l_ptr);
 			link_set_timer(l_ptr, cont_intv);
 			break;
 		case RESET_MSG:
@@ -745,6 +749,8 @@
 			link_activate(l_ptr);
 			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
 			l_ptr->fsm_msg_cnt++;
+			if (l_ptr->owner->working_links == 1)
+				tipc_link_send_sync(l_ptr);
 			link_set_timer(l_ptr, cont_intv);
 			break;
 		case RESET_MSG:
@@ -872,17 +878,12 @@
 		return link_send_long_buf(l_ptr, buf);
 
 	/* Packet can be queued or sent. */
-	if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
+	if (likely(!tipc_bearer_blocked(l_ptr->b_ptr) &&
 		   !link_congested(l_ptr))) {
 		link_add_to_outqueue(l_ptr, buf, msg);
 
-		if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
-			l_ptr->unacked_window = 0;
-		} else {
-			tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
-			l_ptr->stats.bearer_congs++;
-			l_ptr->next_out = buf;
-		}
+		tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
+		l_ptr->unacked_window = 0;
 		return dsz;
 	}
 	/* Congestion: can message be bundled ? */
@@ -891,10 +892,8 @@
 
 		/* Try adding message to an existing bundle */
 		if (l_ptr->next_out &&
-		    link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
-			tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
+		    link_bundle_buf(l_ptr, l_ptr->last_out, buf))
 			return dsz;
-		}
 
 		/* Try creating a new bundle */
 		if (size <= max_packet * 2 / 3) {
@@ -917,7 +916,6 @@
 	if (!l_ptr->next_out)
 		l_ptr->next_out = buf;
 	link_add_to_outqueue(l_ptr, buf, msg);
-	tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
 	return dsz;
 }
 
@@ -949,7 +947,48 @@
 	return res;
 }
 
-/**
+/*
+ * tipc_link_send_sync - synchronize broadcast link endpoints.
+ *
+ * Give a newly added peer node the sequence number where it should
+ * start receiving and acking broadcast packets.
+ *
+ * Called with node locked
+ */
+static void tipc_link_send_sync(struct tipc_link *l)
+{
+	struct sk_buff *buf;
+	struct tipc_msg *msg;
+
+	buf = tipc_buf_acquire(INT_H_SIZE);
+	if (!buf)
+		return;
+
+	msg = buf_msg(buf);
+	tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr);
+	msg_set_last_bcast(msg, l->owner->bclink.acked);
+	link_add_chain_to_outqueue(l, buf, 0);
+	tipc_link_push_queue(l);
+}
+
+/*
+ * tipc_link_recv_sync - synchronize broadcast link endpoints.
+ * Receive the sequence number where we should start receiving and
+ * acking broadcast packets from a newly added peer node, and open
+ * up for reception of such packets.
+ *
+ * Called with node locked
+ */
+static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
+{
+	struct tipc_msg *msg = buf_msg(buf);
+
+	n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
+	n->bclink.recv_permitted = true;
+	kfree_skb(buf);
+}
+
+/*
  * tipc_link_send_names - send name table entries to new neighbor
  *
  * Send routine for bulk delivery of name table messages when contact
@@ -1006,16 +1045,11 @@
 
 	if (likely(!link_congested(l_ptr))) {
 		if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
-			if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
+			if (likely(!tipc_bearer_blocked(l_ptr->b_ptr))) {
 				link_add_to_outqueue(l_ptr, buf, msg);
-				if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
-							    &l_ptr->media_addr))) {
-					l_ptr->unacked_window = 0;
-					return res;
-				}
-				tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
-				l_ptr->stats.bearer_congs++;
-				l_ptr->next_out = buf;
+				tipc_bearer_send(l_ptr->b_ptr, buf,
+						 &l_ptr->media_addr);
+				l_ptr->unacked_window = 0;
 				return res;
 			}
 		} else
@@ -1106,7 +1140,7 @@
 
 			/* Exit if link (or bearer) is congested */
 			if (link_congested(l_ptr) ||
-			    !list_empty(&l_ptr->b_ptr->cong_links)) {
+			    tipc_bearer_blocked(l_ptr->b_ptr)) {
 				res = link_schedule_port(l_ptr,
 							 sender->ref, res);
 				goto exit;
@@ -1329,15 +1363,11 @@
 	if (r_q_size && buf) {
 		msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
 		msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
-		if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
-			l_ptr->retransm_queue_head = mod(++r_q_head);
-			l_ptr->retransm_queue_size = --r_q_size;
-			l_ptr->stats.retransmitted++;
-			return 0;
-		} else {
-			l_ptr->stats.bearer_congs++;
-			return PUSH_FAILED;
-		}
+		tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
+		l_ptr->retransm_queue_head = mod(++r_q_head);
+		l_ptr->retransm_queue_size = --r_q_size;
+		l_ptr->stats.retransmitted++;
+		return 0;
 	}
 
 	/* Send deferred protocol message, if any: */
@@ -1345,15 +1375,11 @@
 	if (buf) {
 		msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
 		msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
-		if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
-			l_ptr->unacked_window = 0;
-			kfree_skb(buf);
-			l_ptr->proto_msg_queue = NULL;
-			return 0;
-		} else {
-			l_ptr->stats.bearer_congs++;
-			return PUSH_FAILED;
-		}
+		tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
+		l_ptr->unacked_window = 0;
+		kfree_skb(buf);
+		l_ptr->proto_msg_queue = NULL;
+		return 0;
 	}
 
 	/* Send one deferred data message, if send window not full: */
@@ -1366,18 +1392,14 @@
 		if (mod(next - first) < l_ptr->queue_limit[0]) {
 			msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
 			msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-			if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
-				if (msg_user(msg) == MSG_BUNDLER)
-					msg_set_type(msg, CLOSED_MSG);
-				l_ptr->next_out = buf->next;
-				return 0;
-			} else {
-				l_ptr->stats.bearer_congs++;
-				return PUSH_FAILED;
-			}
+			tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
+			if (msg_user(msg) == MSG_BUNDLER)
+				msg_set_type(msg, CLOSED_MSG);
+			l_ptr->next_out = buf->next;
+			return 0;
 		}
 	}
-	return PUSH_FINISHED;
+	return 1;
 }
 
 /*
@@ -1388,15 +1410,12 @@
 {
 	u32 res;
 
-	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
+	if (tipc_bearer_blocked(l_ptr->b_ptr))
 		return;
 
 	do {
 		res = tipc_link_push_packet(l_ptr);
 	} while (!res);
-
-	if (res == PUSH_FAILED)
-		tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
 }
 
 static void link_reset_all(unsigned long addr)
@@ -1454,9 +1473,8 @@
 
 		tipc_addr_string_fill(addr_string, n_ptr->addr);
 		pr_info("Broadcast link info for %s\n", addr_string);
-		pr_info("Supportable: %d,  Supported: %d,  Acked: %u\n",
-			n_ptr->bclink.supportable,
-			n_ptr->bclink.supported,
+		pr_info("Reception permitted: %d,  Acked: %u\n",
+			n_ptr->bclink.recv_permitted,
 			n_ptr->bclink.acked);
 		pr_info("Last in: %u,  Oos state: %u,  Last sent: %u\n",
 			n_ptr->bclink.last_in,
@@ -1481,7 +1499,7 @@
 
 	msg = buf_msg(buf);
 
-	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
+	if (tipc_bearer_blocked(l_ptr->b_ptr)) {
 		if (l_ptr->retransm_queue_size == 0) {
 			l_ptr->retransm_queue_head = msg_seqno(msg);
 			l_ptr->retransm_queue_size = retransmits;
@@ -1491,7 +1509,7 @@
 		}
 		return;
 	} else {
-		/* Detect repeated retransmit failures on uncongested bearer */
+		/* Detect repeated retransmit failures on unblocked bearer */
 		if (l_ptr->last_retransmitted == msg_seqno(msg)) {
 			if (++l_ptr->stale_count > 100) {
 				link_retransmit_failure(l_ptr, buf);
@@ -1507,17 +1525,10 @@
 		msg = buf_msg(buf);
 		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
 		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-		if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
-			buf = buf->next;
-			retransmits--;
-			l_ptr->stats.retransmitted++;
-		} else {
-			tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
-			l_ptr->stats.bearer_congs++;
-			l_ptr->retransm_queue_head = buf_seqno(buf);
-			l_ptr->retransm_queue_size = retransmits;
-			return;
-		}
+		tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
+		buf = buf->next;
+		retransmits--;
+		l_ptr->stats.retransmitted++;
 	}
 
 	l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
@@ -1676,7 +1687,7 @@
 		ackd = msg_ack(msg);
 
 		/* Release acked messages */
-		if (n_ptr->bclink.supported)
+		if (n_ptr->bclink.recv_permitted)
 			tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
 
 		crs = l_ptr->first_out;
@@ -1727,9 +1738,14 @@
 					tipc_link_recv_bundle(buf);
 					continue;
 				case NAME_DISTRIBUTOR:
+					n_ptr->bclink.recv_permitted = true;
 					tipc_node_unlock(n_ptr);
 					tipc_named_recv(buf);
 					continue;
+				case BCAST_PROTOCOL:
+					tipc_link_recv_sync(n_ptr, buf);
+					tipc_node_unlock(n_ptr);
+					continue;
 				case CONN_MANAGER:
 					tipc_node_unlock(n_ptr);
 					tipc_port_recv_proto_msg(buf);
@@ -1772,16 +1788,19 @@
 			continue;
 		}
 
+		/* Link is not in state WORKING_WORKING */
 		if (msg_user(msg) == LINK_PROTOCOL) {
 			link_recv_proto_msg(l_ptr, buf);
 			head = link_insert_deferred_queue(l_ptr, head);
 			tipc_node_unlock(n_ptr);
 			continue;
 		}
+
+		/* Traffic message. Conditionally activate link */
 		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
 
 		if (link_working_working(l_ptr)) {
-			/* Re-insert in front of queue */
+			/* Re-insert buffer in front of queue */
 			buf->next = head;
 			head = buf;
 			tipc_node_unlock(n_ptr);
@@ -1972,21 +1991,13 @@
 
 	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
 
-	/* Defer message if bearer is already congested */
-	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
+	/* Defer message if bearer is already blocked */
+	if (tipc_bearer_blocked(l_ptr->b_ptr)) {
 		l_ptr->proto_msg_queue = buf;
 		return;
 	}
 
-	/* Defer message if attempting to send results in bearer congestion */
-	if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
-		tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
-		l_ptr->proto_msg_queue = buf;
-		l_ptr->stats.bearer_congs++;
-		return;
-	}
-
-	/* Discard message if it was sent successfully */
+	tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
 	l_ptr->unacked_window = 0;
 	kfree_skb(buf);
 }
@@ -2057,7 +2068,6 @@
 		} else {
 			l_ptr->max_pkt = l_ptr->max_pkt_target;
 		}
-		l_ptr->owner->bclink.supportable = (max_pkt_info != 0);
 
 		/* Synchronize broadcast link info, if not done previously */
 		if (!tipc_node_is_up(l_ptr->owner)) {
@@ -2112,7 +2122,7 @@
 		}
 
 		/* Protocol message before retransmits, reduce loss risk */
-		if (l_ptr->owner->bclink.supported)
+		if (l_ptr->owner->bclink.recv_permitted)
 			tipc_bclink_update_link_state(l_ptr->owner,
 						      msg_last_bcast(msg));
 
@@ -2937,8 +2947,8 @@
 			     s->sent_nacks, s->sent_acks, s->retransmitted);
 
 	ret += tipc_snprintf(buf + ret, buf_size - ret,
-			     "  Congestion bearer:%u link:%u  Send queue"
-			     " max:%u avg:%u\n", s->bearer_congs, s->link_congs,
+			     "  Congestion link:%u  Send queue"
+			     " max:%u avg:%u\n", s->link_congs,
 			     s->max_queue_sz, s->queue_sz_counts ?
 			     (s->accu_queue_sz / s->queue_sz_counts) : 0);
 
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 6e92112..c048ed1 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -40,9 +40,6 @@
 #include "msg.h"
 #include "node.h"
 
-#define PUSH_FAILED   1
-#define PUSH_FINISHED 2
-
 /*
  * Out-of-range value for link sequence numbers
  */
@@ -82,7 +79,6 @@
 	u32 recv_fragmented;
 	u32 recv_fragments;
 	u32 link_congs;		/* # port sends blocked by congestion */
-	u32 bearer_congs;
 	u32 deferred_recv;
 	u32 duplicates;
 	u32 max_queue_sz;	/* send queue size high water mark */
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 55d3928..e0d0805 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -262,7 +262,7 @@
 	named_distribute(&message_list, node, &publ_zone, max_item_buf);
 	read_unlock_bh(&tipc_nametbl_lock);
 
-	tipc_link_send_names(&message_list, (u32)node);
+	tipc_link_send_names(&message_list, node);
 }
 
 /**
diff --git a/net/tipc/node.c b/net/tipc/node.c
index d21db20..48f39dd 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1,7 +1,7 @@
 /*
  * net/tipc/node.c: TIPC node management routines
  *
- * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2000-2006, 2012 Ericsson AB
  * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -263,12 +263,9 @@
 static void node_established_contact(struct tipc_node *n_ptr)
 {
 	tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
-
-	if (n_ptr->bclink.supportable) {
-		n_ptr->bclink.acked = tipc_bclink_get_last_sent();
-		tipc_bclink_add_node(n_ptr->addr);
-		n_ptr->bclink.supported = 1;
-	}
+	n_ptr->bclink.oos_state = 0;
+	n_ptr->bclink.acked = tipc_bclink_get_last_sent();
+	tipc_bclink_add_node(n_ptr->addr);
 }
 
 static void node_name_purge_complete(unsigned long node_addr)
@@ -294,7 +291,7 @@
 		tipc_addr_string_fill(addr_string, n_ptr->addr));
 
 	/* Flush broadcast link info associated with lost node */
-	if (n_ptr->bclink.supported) {
+	if (n_ptr->bclink.recv_permitted) {
 		while (n_ptr->bclink.deferred_head) {
 			struct sk_buff *buf = n_ptr->bclink.deferred_head;
 			n_ptr->bclink.deferred_head = buf->next;
@@ -310,7 +307,7 @@
 		tipc_bclink_remove_node(n_ptr->addr);
 		tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
 
-		n_ptr->bclink.supported = 0;
+		n_ptr->bclink.recv_permitted = false;
 	}
 
 	/* Abort link changeover */
diff --git a/net/tipc/node.h b/net/tipc/node.h
index cfcaf4d..3c189b3 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -67,8 +67,6 @@
  * @permit_changeover: non-zero if node has redundant links to this system
  * @signature: node instance identifier
  * @bclink: broadcast-related info
- *    @supportable: non-zero if node supports TIPC b'cast link capability
- *    @supported: non-zero if node supports TIPC b'cast capability
  *    @acked: sequence # of last outbound b'cast message acknowledged by node
  *    @last_in: sequence # of last in-sequence b'cast message received from node
  *    @last_sent: sequence # of last b'cast message sent by node
@@ -77,6 +75,7 @@
  *    @deferred_head: oldest OOS b'cast message received from node
  *    @deferred_tail: newest OOS b'cast message received from node
  *    @defragm: list of partially reassembled b'cast message fragments from node
+ *    @recv_permitted: true if node is allowed to receive b'cast messages
  */
 struct tipc_node {
 	u32 addr;
@@ -92,8 +91,6 @@
 	int permit_changeover;
 	u32 signature;
 	struct {
-		u8 supportable;
-		u8 supported;
 		u32 acked;
 		u32 last_in;
 		u32 last_sent;
@@ -102,6 +99,7 @@
 		struct sk_buff *deferred_head;
 		struct sk_buff *deferred_tail;
 		struct sk_buff *defragm;
+		bool recv_permitted;
 	} bclink;
 };
 
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index fd5f042..1a720c8 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -62,6 +62,8 @@
 static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
 static void wakeupdispatch(struct tipc_port *tport);
+static void tipc_data_ready(struct sock *sk, int len);
+static void tipc_write_space(struct sock *sk);
 
 static const struct proto_ops packet_ops;
 static const struct proto_ops stream_ops;
@@ -221,6 +223,8 @@
 	sock_init_data(sock, sk);
 	sk->sk_backlog_rcv = backlog_rcv;
 	sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2;
+	sk->sk_data_ready = tipc_data_ready;
+	sk->sk_write_space = tipc_write_space;
 	tipc_sk(sk)->p = tp_ptr;
 	tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
 
@@ -408,7 +412,7 @@
  * socket state		flags set
  * ------------		---------
  * unconnected		no read flags
- *			no write flags
+ *			POLLOUT if port is not congested
  *
  * connecting		POLLIN/POLLRDNORM if ACK/NACK in rx queue
  *			no write flags
@@ -435,9 +439,13 @@
 	struct sock *sk = sock->sk;
 	u32 mask = 0;
 
-	poll_wait(file, sk_sleep(sk), wait);
+	sock_poll_wait(file, sk_sleep(sk), wait);
 
 	switch ((int)sock->state) {
+	case SS_UNCONNECTED:
+		if (!tipc_sk_port(sk)->congested)
+			mask |= POLLOUT;
+		break;
 	case SS_READY:
 	case SS_CONNECTED:
 		if (!tipc_sk_port(sk)->congested)
@@ -1126,6 +1134,39 @@
 }
 
 /**
+ * tipc_write_space - wake up thread if port congestion is released
+ * @sk: socket
+ */
+static void tipc_write_space(struct sock *sk)
+{
+	struct socket_wq *wq;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
+						POLLWRNORM | POLLWRBAND);
+	rcu_read_unlock();
+}
+
+/**
+ * tipc_data_ready - wake up threads to indicate messages have been received
+ * @sk: socket
+ * @len: the length of messages
+ */
+static void tipc_data_ready(struct sock *sk, int len)
+{
+	struct socket_wq *wq;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+						POLLRDNORM | POLLRDBAND);
+	rcu_read_unlock();
+}
+
+/**
  * rx_queue_full - determine if receive queue can accept another message
  * @msg: message to be added to queue
  * @queue_size: current size of queue
@@ -1222,8 +1263,7 @@
 		tipc_disconnect_port(tipc_sk_port(sk));
 	}
 
-	if (waitqueue_active(sk_sleep(sk)))
-		wake_up_interruptible(sk_sleep(sk));
+	sk->sk_data_ready(sk, 0);
 	return TIPC_OK;
 }
 
@@ -1290,8 +1330,7 @@
 {
 	struct sock *sk = (struct sock *)tport->usr_handle;
 
-	if (waitqueue_active(sk_sleep(sk)))
-		wake_up_interruptible(sk_sleep(sk));
+	sk->sk_write_space(sk);
 }
 
 /**
@@ -1556,10 +1595,11 @@
 
 	case SS_DISCONNECTING:
 
-		/* Discard any unreceived messages; wake up sleeping tasks */
+		/* Discard any unreceived messages */
 		discard_rx_queue(sk);
-		if (waitqueue_active(sk_sleep(sk)))
-			wake_up_interruptible(sk_sleep(sk));
+
+		/* Wake up anyone sleeping in poll */
+		sk->sk_state_change(sk);
 		res = 0;
 		break;
 
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 06748f1..5ac19dc 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -151,6 +151,9 @@
 	    sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
 		goto out_nlmsg_trim;
 
+	if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
+		goto out_nlmsg_trim;
+
 	return nlmsg_end(skb, nlh);
 
 out_nlmsg_trim:
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index b34b5b9..8800604 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -34,6 +34,10 @@
 	if (table == NULL)
 		goto err_alloc;
 
+	/* Don't export sysctls to unprivileged users */
+	if (net->user_ns != &init_user_ns)
+		table[0].procname = NULL;
+
 	table[0].data = &net->unx.sysctl_max_dgram_qlen;
 	net->unx.ctl = register_net_sysctl(net, "net/unix", table);
 	if (net->unx.ctl == NULL)
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index fe4adb1..16d08b3 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -140,14 +140,13 @@
 	  extensions with cfg80211-based drivers.
 
 config LIB80211
-	tristate "Common routines for IEEE802.11 drivers"
+	tristate
 	default n
 	help
 	  This options enables a library of common routines used
 	  by IEEE802.11 wireless LAN drivers.
 
-	  Drivers should select this themselves if needed.  Say Y if
-	  you want this built into your kernel.
+	  Drivers should select this themselves if needed.
 
 config LIB80211_CRYPT_WEP
 	tristate
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 0f7e0d6..a761670 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -10,11 +10,13 @@
 obj-$(CONFIG_WEXT_PRIV) += wext-priv.o
 
 cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
-cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o
+cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o
 cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
 cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
 cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
 
+CFLAGS_trace.o := -I$(src)
+
 ccflags-y += -D__CHECK_ENDIAN__
 
 $(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index fcc60d8..324e8d8 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -3,6 +3,7 @@
 #include <net/cfg80211.h>
 #include "nl80211.h"
 #include "core.h"
+#include "rdev-ops.h"
 
 
 static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
@@ -23,10 +24,11 @@
 	if (!wdev->beacon_interval)
 		return -ENOENT;
 
-	err = rdev->ops->stop_ap(&rdev->wiphy, dev);
+	err = rdev_stop_ap(rdev, dev);
 	if (!err) {
 		wdev->beacon_interval = 0;
 		wdev->channel = NULL;
+		wdev->ssid_len = 0;
 	}
 
 	return err;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 2f876b9..bf2dfd5 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -9,90 +9,266 @@
 #include <linux/export.h>
 #include <net/cfg80211.h>
 #include "core.h"
+#include "rdev-ops.h"
 
-struct ieee80211_channel *
-rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
-		  int freq, enum nl80211_channel_type channel_type)
+void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
+			     struct ieee80211_channel *chan,
+			     enum nl80211_channel_type chan_type)
 {
-	struct ieee80211_channel *chan;
-	struct ieee80211_sta_ht_cap *ht_cap;
+	if (WARN_ON(!chan))
+		return;
 
-	chan = ieee80211_get_channel(&rdev->wiphy, freq);
+	chandef->chan = chan;
+	chandef->center_freq2 = 0;
 
-	/* Primary channel not allowed */
-	if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
-		return NULL;
-
-	if (channel_type == NL80211_CHAN_HT40MINUS &&
-	    chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
-		return NULL;
-	else if (channel_type == NL80211_CHAN_HT40PLUS &&
-		 chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
-		return NULL;
-
-	ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
-
-	if (channel_type != NL80211_CHAN_NO_HT) {
-		if (!ht_cap->ht_supported)
-			return NULL;
-
-		if (channel_type != NL80211_CHAN_HT20 &&
-		    (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
-		    ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT))
-			return NULL;
-	}
-
-	return chan;
-}
-
-bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
-				  struct ieee80211_channel *chan,
-				  enum nl80211_channel_type channel_type)
-{
-	struct ieee80211_channel *sec_chan;
-	int diff;
-
-	switch (channel_type) {
+	switch (chan_type) {
+	case NL80211_CHAN_NO_HT:
+		chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
+		chandef->center_freq1 = chan->center_freq;
+		break;
+	case NL80211_CHAN_HT20:
+		chandef->width = NL80211_CHAN_WIDTH_20;
+		chandef->center_freq1 = chan->center_freq;
+		break;
 	case NL80211_CHAN_HT40PLUS:
-		diff = 20;
+		chandef->width = NL80211_CHAN_WIDTH_40;
+		chandef->center_freq1 = chan->center_freq + 10;
 		break;
 	case NL80211_CHAN_HT40MINUS:
-		diff = -20;
+		chandef->width = NL80211_CHAN_WIDTH_40;
+		chandef->center_freq1 = chan->center_freq - 10;
 		break;
 	default:
-		return true;
+		WARN_ON(1);
 	}
+}
+EXPORT_SYMBOL(cfg80211_chandef_create);
 
-	sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff);
-	if (!sec_chan)
+bool cfg80211_chan_def_valid(const struct cfg80211_chan_def *chandef)
+{
+	u32 control_freq;
+
+	if (!chandef->chan)
 		return false;
 
-	/* we'll need a DFS capability later */
-	if (sec_chan->flags & (IEEE80211_CHAN_DISABLED |
-			       IEEE80211_CHAN_PASSIVE_SCAN |
-			       IEEE80211_CHAN_NO_IBSS |
-			       IEEE80211_CHAN_RADAR))
+	control_freq = chandef->chan->center_freq;
+
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_20:
+	case NL80211_CHAN_WIDTH_20_NOHT:
+		if (chandef->center_freq1 != control_freq)
+			return false;
+		if (chandef->center_freq2)
+			return false;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		if (chandef->center_freq1 != control_freq + 10 &&
+		    chandef->center_freq1 != control_freq - 10)
+			return false;
+		if (chandef->center_freq2)
+			return false;
+		break;
+	case NL80211_CHAN_WIDTH_80P80:
+		if (chandef->center_freq1 != control_freq + 30 &&
+		    chandef->center_freq1 != control_freq + 10 &&
+		    chandef->center_freq1 != control_freq - 10 &&
+		    chandef->center_freq1 != control_freq - 30)
+			return false;
+		if (!chandef->center_freq2)
+			return false;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		if (chandef->center_freq1 != control_freq + 30 &&
+		    chandef->center_freq1 != control_freq + 10 &&
+		    chandef->center_freq1 != control_freq - 10 &&
+		    chandef->center_freq1 != control_freq - 30)
+			return false;
+		if (chandef->center_freq2)
+			return false;
+		break;
+	case NL80211_CHAN_WIDTH_160:
+		if (chandef->center_freq1 != control_freq + 70 &&
+		    chandef->center_freq1 != control_freq + 50 &&
+		    chandef->center_freq1 != control_freq + 30 &&
+		    chandef->center_freq1 != control_freq + 10 &&
+		    chandef->center_freq1 != control_freq - 10 &&
+		    chandef->center_freq1 != control_freq - 30 &&
+		    chandef->center_freq1 != control_freq - 50 &&
+		    chandef->center_freq1 != control_freq - 70)
+			return false;
+		if (chandef->center_freq2)
+			return false;
+		break;
+	default:
 		return false;
+	}
 
 	return true;
 }
-EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan);
+
+static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
+				  int *pri40, int *pri80)
+{
+	int tmp;
+
+	switch (c->width) {
+	case NL80211_CHAN_WIDTH_40:
+		*pri40 = c->center_freq1;
+		*pri80 = 0;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+	case NL80211_CHAN_WIDTH_80P80:
+		*pri80 = c->center_freq1;
+		/* n_P20 */
+		tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
+		/* n_P40 */
+		tmp /= 2;
+		/* freq_P40 */
+		*pri40 = c->center_freq1 - 20 + 40 * tmp;
+		break;
+	case NL80211_CHAN_WIDTH_160:
+		/* n_P20 */
+		tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
+		/* n_P40 */
+		tmp /= 2;
+		/* freq_P40 */
+		*pri40 = c->center_freq1 - 60 + 40 * tmp;
+		/* n_P80 */
+		tmp /= 2;
+		*pri80 = c->center_freq1 - 40 + 80 * tmp;
+		break;
+	default:
+		WARN_ON_ONCE(1);
+	}
+}
+
+const struct cfg80211_chan_def *
+cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
+			    const struct cfg80211_chan_def *c2)
+{
+	u32 c1_pri40, c1_pri80, c2_pri40, c2_pri80;
+
+	/* If they are identical, return */
+	if (cfg80211_chandef_identical(c1, c2))
+		return c1;
+
+	/* otherwise, must have same control channel */
+	if (c1->chan != c2->chan)
+		return NULL;
+
+	/*
+	 * If they have the same width, but aren't identical,
+	 * then they can't be compatible.
+	 */
+	if (c1->width == c2->width)
+		return NULL;
+
+	if (c1->width == NL80211_CHAN_WIDTH_20_NOHT ||
+	    c1->width == NL80211_CHAN_WIDTH_20)
+		return c2;
+
+	if (c2->width == NL80211_CHAN_WIDTH_20_NOHT ||
+	    c2->width == NL80211_CHAN_WIDTH_20)
+		return c1;
+
+	chandef_primary_freqs(c1, &c1_pri40, &c1_pri80);
+	chandef_primary_freqs(c2, &c2_pri40, &c2_pri80);
+
+	if (c1_pri40 != c2_pri40)
+		return NULL;
+
+	WARN_ON(!c1_pri80 && !c2_pri80);
+	if (c1_pri80 && c2_pri80 && c1_pri80 != c2_pri80)
+		return NULL;
+
+	if (c1->width > c2->width)
+		return c1;
+	return c2;
+}
+EXPORT_SYMBOL(cfg80211_chandef_compatible);
+
+bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
+				 u32 center_freq, u32 bandwidth,
+				 u32 prohibited_flags)
+{
+	struct ieee80211_channel *c;
+	u32 freq;
+
+	for (freq = center_freq - bandwidth/2 + 10;
+	     freq <= center_freq + bandwidth/2 - 10;
+	     freq += 20) {
+		c = ieee80211_get_channel(wiphy, freq);
+		if (!c || c->flags & prohibited_flags)
+			return false;
+	}
+
+	return true;
+}
+
+static bool cfg80211_check_beacon_chans(struct wiphy *wiphy,
+					u32 center_freq, u32 bw)
+{
+	return cfg80211_secondary_chans_ok(wiphy, center_freq, bw,
+					   IEEE80211_CHAN_DISABLED |
+					   IEEE80211_CHAN_PASSIVE_SCAN |
+					   IEEE80211_CHAN_NO_IBSS |
+					   IEEE80211_CHAN_RADAR);
+}
+
+bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
+			     struct cfg80211_chan_def *chandef)
+{
+	u32 width;
+	bool res;
+
+	trace_cfg80211_reg_can_beacon(wiphy, chandef);
+
+	if (WARN_ON(!cfg80211_chan_def_valid(chandef))) {
+		trace_cfg80211_return_bool(false);
+		return false;
+	}
+
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_20_NOHT:
+	case NL80211_CHAN_WIDTH_20:
+		width = 20;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		width = 40;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+	case NL80211_CHAN_WIDTH_80P80:
+		width = 80;
+		break;
+	case NL80211_CHAN_WIDTH_160:
+		width = 160;
+		break;
+	default:
+		WARN_ON_ONCE(1);
+		trace_cfg80211_return_bool(false);
+		return false;
+	}
+
+	res = cfg80211_check_beacon_chans(wiphy, chandef->center_freq1, width);
+
+	if (res && chandef->center_freq2)
+		res = cfg80211_check_beacon_chans(wiphy, chandef->center_freq2,
+						  width);
+
+	trace_cfg80211_return_bool(res);
+	return res;
+}
+EXPORT_SYMBOL(cfg80211_reg_can_beacon);
 
 int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
-				 int freq, enum nl80211_channel_type chantype)
+				 struct cfg80211_chan_def *chandef)
 {
-	struct ieee80211_channel *chan;
-
 	if (!rdev->ops->set_monitor_channel)
 		return -EOPNOTSUPP;
 	if (!cfg80211_has_monitors_only(rdev))
 		return -EBUSY;
 
-	chan = rdev_freq_to_chan(rdev, freq, chantype);
-	if (!chan)
-		return -EINVAL;
-
-	return rdev->ops->set_monitor_channel(&rdev->wiphy, chan, chantype);
+	return rdev_set_monitor_channel(rdev, chandef);
 }
 
 void
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3f72530..14d9904 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -26,6 +26,7 @@
 #include "debugfs.h"
 #include "wext-compat.h"
 #include "ethtool.h"
+#include "rdev-ops.h"
 
 /* name for sysfs, %d is appended */
 #define PHY_NAME "phy"
@@ -216,7 +217,7 @@
 {
 	struct cfg80211_registered_device *rdev = data;
 
-	rdev->ops->rfkill_poll(&rdev->wiphy);
+	rdev_rfkill_poll(rdev);
 }
 
 static int cfg80211_rfkill_set_block(void *data, bool blocked)
@@ -240,7 +241,7 @@
 		case NL80211_IFTYPE_P2P_DEVICE:
 			if (!wdev->p2p_started)
 				break;
-			rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+			rdev_stop_p2p_device(rdev, wdev);
 			wdev->p2p_started = false;
 			rdev->opencount--;
 			break;
@@ -325,6 +326,8 @@
 	mutex_init(&rdev->devlist_mtx);
 	mutex_init(&rdev->sched_scan_mtx);
 	INIT_LIST_HEAD(&rdev->wdev_list);
+	INIT_LIST_HEAD(&rdev->beacon_registrations);
+	spin_lock_init(&rdev->beacon_registrations_lock);
 	spin_lock_init(&rdev->bss_lock);
 	INIT_LIST_HEAD(&rdev->bss_list);
 	INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
@@ -370,6 +373,8 @@
 	rdev->wiphy.rts_threshold = (u32) -1;
 	rdev->wiphy.coverage_class = 0;
 
+	rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH;
+
 	return &rdev->wiphy;
 }
 EXPORT_SYMBOL(wiphy_new);
@@ -687,7 +692,7 @@
 	flush_work(&rdev->event_work);
 
 	if (rdev->wowlan && rdev->ops->set_wakeup)
-		rdev->ops->set_wakeup(&rdev->wiphy, false);
+		rdev_set_wakeup(rdev, false);
 	cfg80211_rdev_free_wowlan(rdev);
 }
 EXPORT_SYMBOL(wiphy_unregister);
@@ -695,10 +700,15 @@
 void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
 {
 	struct cfg80211_internal_bss *scan, *tmp;
+	struct cfg80211_beacon_registration *reg, *treg;
 	rfkill_destroy(rdev->rfkill);
 	mutex_destroy(&rdev->mtx);
 	mutex_destroy(&rdev->devlist_mtx);
 	mutex_destroy(&rdev->sched_scan_mtx);
+	list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) {
+		list_del(&reg->list);
+		kfree(reg);
+	}
 	list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
 		cfg80211_put_bss(&scan->pub);
 	kfree(rdev);
@@ -770,7 +780,7 @@
 	case NL80211_IFTYPE_P2P_DEVICE:
 		if (!wdev->p2p_started)
 			break;
-		rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+		rdev_stop_p2p_device(rdev, wdev);
 		wdev->p2p_started = false;
 		rdev->opencount--;
 		break;
@@ -961,9 +971,8 @@
 		if ((wdev->iftype == NL80211_IFTYPE_STATION ||
 		     wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) &&
 		    rdev->ops->set_power_mgmt)
-			if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
-						      wdev->ps,
-						      wdev->ps_timeout)) {
+			if (rdev_set_power_mgmt(rdev, dev, wdev->ps,
+						wdev->ps_timeout)) {
 				/* assume this means it's off */
 				wdev->ps = false;
 			}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index a343be4..a0c8dec 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -55,7 +55,8 @@
 	int opencount; /* also protected by devlist_mtx */
 	wait_queue_head_t dev_wait;
 
-	u32 ap_beacons_nlportid;
+	struct list_head beacon_registrations;
+	spinlock_t beacon_registrations_lock;
 
 	/* protected by RTNL only */
 	int num_running_ifaces;
@@ -260,6 +261,10 @@
 	CHAN_MODE_EXCLUSIVE,
 };
 
+struct cfg80211_beacon_registration {
+	struct list_head list;
+	u32 nlportid;
+};
 
 /* free object */
 extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
@@ -304,9 +309,9 @@
 		       const struct mesh_config *conf);
 int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
 			struct net_device *dev);
-int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev,
-			   struct wireless_dev *wdev, int freq,
-			   enum nl80211_channel_type channel_type);
+int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
+			      struct wireless_dev *wdev,
+			      struct cfg80211_chan_def *chandef);
 
 /* AP */
 int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
@@ -320,13 +325,15 @@
 			 const u8 *bssid,
 			 const u8 *ssid, int ssid_len,
 			 const u8 *ie, int ie_len,
-			 const u8 *key, int key_len, int key_idx);
+			 const u8 *key, int key_len, int key_idx,
+			 const u8 *sae_data, int sae_data_len);
 int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
 		       struct net_device *dev, struct ieee80211_channel *chan,
 		       enum nl80211_auth_type auth_type, const u8 *bssid,
 		       const u8 *ssid, int ssid_len,
 		       const u8 *ie, int ie_len,
-		       const u8 *key, int key_len, int key_idx);
+		       const u8 *key, int key_len, int key_idx,
+		       const u8 *sae_data, int sae_data_len);
 int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
 			  struct net_device *dev,
 			  struct ieee80211_channel *chan,
@@ -371,10 +378,8 @@
 int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
 			  struct wireless_dev *wdev,
 			  struct ieee80211_channel *chan, bool offchan,
-			  enum nl80211_channel_type channel_type,
-			  bool channel_type_valid, unsigned int wait,
-			  const u8 *buf, size_t len, bool no_cck,
-			  bool dont_wait_for_ack, u64 *cookie);
+			  unsigned int wait, const u8 *buf, size_t len,
+			  bool no_cck, bool dont_wait_for_ack, u64 *cookie);
 void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
 			       const struct ieee80211_ht_cap *ht_capa_mask);
 
@@ -465,11 +470,8 @@
 		        struct ieee80211_channel **chan,
 		        enum cfg80211_chan_mode *chanmode);
 
-struct ieee80211_channel *
-rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
-		  int freq, enum nl80211_channel_type channel_type);
 int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
-				 int freq, enum nl80211_channel_type chantype);
+				 struct cfg80211_chan_def *chandef);
 
 int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
 			   const u8 *rates, unsigned int n_rates,
@@ -481,6 +483,12 @@
 void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
 			       enum nl80211_iftype iftype, int num);
 
+bool cfg80211_chan_def_valid(const struct cfg80211_chan_def *chandef);
+
+bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
+				 u32 center_freq, u32 bandwidth,
+				 u32 prohibited_flags);
+
 #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
 
 #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index 7eecdf4..48c48ff 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -2,6 +2,7 @@
 #include <net/cfg80211.h>
 #include "core.h"
 #include "ethtool.h"
+#include "rdev-ops.h"
 
 static void cfg80211_get_drvinfo(struct net_device *dev,
 					struct ethtool_drvinfo *info)
@@ -47,9 +48,8 @@
 	memset(rp, 0, sizeof(*rp));
 
 	if (rdev->ops->get_ringparam)
-		rdev->ops->get_ringparam(wdev->wiphy,
-					 &rp->tx_pending, &rp->tx_max_pending,
-					 &rp->rx_pending, &rp->rx_max_pending);
+		rdev_get_ringparam(rdev, &rp->tx_pending, &rp->tx_max_pending,
+				   &rp->rx_pending, &rp->rx_max_pending);
 }
 
 static int cfg80211_set_ringparam(struct net_device *dev,
@@ -62,8 +62,7 @@
 		return -EINVAL;
 
 	if (rdev->ops->set_ringparam)
-		return rdev->ops->set_ringparam(wdev->wiphy,
-						rp->tx_pending, rp->rx_pending);
+		return rdev_set_ringparam(rdev, rp->tx_pending, rp->rx_pending);
 
 	return -ENOTSUPP;
 }
@@ -73,7 +72,7 @@
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
 	if (rdev->ops->get_et_sset_count)
-		return rdev->ops->get_et_sset_count(wdev->wiphy, dev, sset);
+		return rdev_get_et_sset_count(rdev, dev, sset);
 	return -EOPNOTSUPP;
 }
 
@@ -83,7 +82,7 @@
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
 	if (rdev->ops->get_et_stats)
-		rdev->ops->get_et_stats(wdev->wiphy, dev, stats, data);
+		rdev_get_et_stats(rdev, dev, stats, data);
 }
 
 static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
@@ -91,7 +90,7 @@
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
 	if (rdev->ops->get_et_strings)
-		rdev->ops->get_et_strings(wdev->wiphy, dev, sset, data);
+		rdev_get_et_strings(rdev, dev, sset, data);
 }
 
 const struct ethtool_ops cfg80211_ethtool_ops = {
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index ca5672f..9b9551e 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -11,6 +11,7 @@
 #include <net/cfg80211.h>
 #include "wext-compat.h"
 #include "nl80211.h"
+#include "rdev-ops.h"
 
 
 void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
@@ -61,6 +62,8 @@
 	struct cfg80211_event *ev;
 	unsigned long flags;
 
+	trace_cfg80211_ibss_joined(dev, bssid);
+
 	CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
 
 	ev = kzalloc(sizeof(*ev), gfp);
@@ -97,9 +100,9 @@
 		* 11a for maximum compatibility.
 		*/
 		struct ieee80211_supported_band *sband =
-			rdev->wiphy.bands[params->channel->band];
+			rdev->wiphy.bands[params->chandef.chan->band];
 		int j;
-		u32 flag = params->channel->band == IEEE80211_BAND_5GHZ ?
+		u32 flag = params->chandef.chan->band == IEEE80211_BAND_5GHZ ?
 			IEEE80211_RATE_MANDATORY_A :
 			IEEE80211_RATE_MANDATORY_B;
 
@@ -115,11 +118,11 @@
 
 	wdev->ibss_fixed = params->channel_fixed;
 #ifdef CONFIG_CFG80211_WEXT
-	wdev->wext.ibss.channel = params->channel;
+	wdev->wext.ibss.chandef = params->chandef;
 #endif
 	wdev->sme_state = CFG80211_SME_CONNECTING;
 
-	err = cfg80211_can_use_chan(rdev, wdev, params->channel,
+	err = cfg80211_can_use_chan(rdev, wdev, params->chandef.chan,
 				    params->channel_fixed
 				    ? CHAN_MODE_SHARED
 				    : CHAN_MODE_EXCLUSIVE);
@@ -128,7 +131,7 @@
 		return err;
 	}
 
-	err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
+	err = rdev_join_ibss(rdev, dev, params);
 	if (err) {
 		wdev->connect_keys = NULL;
 		wdev->sme_state = CFG80211_SME_IDLE;
@@ -175,7 +178,7 @@
 	 */
 	if (rdev->ops->del_key)
 		for (i = 0; i < 6; i++)
-			rdev->ops->del_key(wdev->wiphy, dev, i, false, NULL);
+			rdev_del_key(rdev, dev, i, false, NULL);
 
 	if (wdev->current_bss) {
 		cfg80211_unhold_bss(wdev->current_bss);
@@ -211,7 +214,7 @@
 	if (!wdev->ssid_len)
 		return -ENOLINK;
 
-	err = rdev->ops->leave_ibss(&rdev->wiphy, dev);
+	err = rdev_leave_ibss(rdev, dev);
 
 	if (err)
 		return err;
@@ -248,7 +251,9 @@
 		wdev->wext.ibss.beacon_interval = 100;
 
 	/* try to find an IBSS channel if none requested ... */
-	if (!wdev->wext.ibss.channel) {
+	if (!wdev->wext.ibss.chandef.chan) {
+		wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
+
 		for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
 			struct ieee80211_supported_band *sband;
 			struct ieee80211_channel *chan;
@@ -263,15 +268,15 @@
 					continue;
 				if (chan->flags & IEEE80211_CHAN_DISABLED)
 					continue;
-				wdev->wext.ibss.channel = chan;
+				wdev->wext.ibss.chandef.chan = chan;
 				break;
 			}
 
-			if (wdev->wext.ibss.channel)
+			if (wdev->wext.ibss.chandef.chan)
 				break;
 		}
 
-		if (!wdev->wext.ibss.channel)
+		if (!wdev->wext.ibss.chandef.chan)
 			return -EINVAL;
 	}
 
@@ -333,7 +338,7 @@
 			return -EINVAL;
 	}
 
-	if (wdev->wext.ibss.channel == chan)
+	if (wdev->wext.ibss.chandef.chan == chan)
 		return 0;
 
 	wdev_lock(wdev);
@@ -346,7 +351,8 @@
 		return err;
 
 	if (chan) {
-		wdev->wext.ibss.channel = chan;
+		wdev->wext.ibss.chandef.chan = chan;
+		wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
 		wdev->wext.ibss.channel_fixed = true;
 	} else {
 		/* cfg80211_ibss_wext_join will pick one if needed */
@@ -376,8 +382,8 @@
 	wdev_lock(wdev);
 	if (wdev->current_bss)
 		chan = wdev->current_bss->pub.channel;
-	else if (wdev->wext.ibss.channel)
-		chan = wdev->wext.ibss.channel;
+	else if (wdev->wext.ibss.chandef.chan)
+		chan = wdev->wext.ibss.chandef.chan;
 	wdev_unlock(wdev);
 
 	if (chan) {
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index c384e77..3ee5a72 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -3,6 +3,7 @@
 #include <net/cfg80211.h>
 #include "nl80211.h"
 #include "core.h"
+#include "rdev-ops.h"
 
 /* Default values, timeouts in ms */
 #define MESH_TTL 		31
@@ -72,8 +73,6 @@
 
 const struct mesh_setup default_mesh_setup = {
 	/* cfg80211_join_mesh() will pick a channel if needed */
-	.channel = NULL,
-	.channel_type = NL80211_CHAN_NO_HT,
 	.sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
 	.path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
 	.path_metric = IEEE80211_PATH_METRIC_AIRTIME,
@@ -110,13 +109,12 @@
 	if (!rdev->ops->join_mesh)
 		return -EOPNOTSUPP;
 
-	if (!setup->channel) {
+	if (!setup->chandef.chan) {
 		/* if no channel explicitly given, use preset channel */
-		setup->channel = wdev->preset_chan;
-		setup->channel_type = wdev->preset_chantype;
+		setup->chandef = wdev->preset_chandef;
 	}
 
-	if (!setup->channel) {
+	if (!setup->chandef.chan) {
 		/* if we don't have that either, use the first usable channel */
 		enum ieee80211_band band;
 
@@ -136,35 +134,34 @@
 						   IEEE80211_CHAN_DISABLED |
 						   IEEE80211_CHAN_RADAR))
 					continue;
-				setup->channel = chan;
+				setup->chandef.chan = chan;
 				break;
 			}
 
-			if (setup->channel)
+			if (setup->chandef.chan)
 				break;
 		}
 
 		/* no usable channel ... */
-		if (!setup->channel)
+		if (!setup->chandef.chan)
 			return -EINVAL;
 
-		setup->channel_type = NL80211_CHAN_NO_HT;
+		setup->chandef.width = NL80211_CHAN_WIDTH_20_NOHT;;
 	}
 
-	if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, setup->channel,
-					  setup->channel_type))
+	if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef))
 		return -EINVAL;
 
-	err = cfg80211_can_use_chan(rdev, wdev, setup->channel,
+	err = cfg80211_can_use_chan(rdev, wdev, setup->chandef.chan,
 				    CHAN_MODE_SHARED);
 	if (err)
 		return err;
 
-	err = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup);
+	err = rdev_join_mesh(rdev, dev, conf, setup);
 	if (!err) {
 		memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
 		wdev->mesh_id_len = setup->mesh_id_len;
-		wdev->channel = setup->channel;
+		wdev->channel = setup->chandef.chan;
 	}
 
 	return err;
@@ -187,20 +184,12 @@
 	return err;
 }
 
-int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev,
-			   struct wireless_dev *wdev, int freq,
-			   enum nl80211_channel_type channel_type)
+int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
+			      struct wireless_dev *wdev,
+			      struct cfg80211_chan_def *chandef)
 {
-	struct ieee80211_channel *channel;
 	int err;
 
-	channel = rdev_freq_to_chan(rdev, freq, channel_type);
-	if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy,
-						      channel,
-						      channel_type)) {
-		return -EINVAL;
-	}
-
 	/*
 	 * Workaround for libertas (only!), it puts the interface
 	 * into mesh mode but doesn't implement join_mesh. Instead,
@@ -209,22 +198,21 @@
 	 * compatible with 802.11 mesh.
 	 */
 	if (rdev->ops->libertas_set_mesh_channel) {
-		if (channel_type != NL80211_CHAN_NO_HT)
+		if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
 			return -EINVAL;
 
 		if (!netif_running(wdev->netdev))
 			return -ENETDOWN;
 
-		err = cfg80211_can_use_chan(rdev, wdev, channel,
+		err = cfg80211_can_use_chan(rdev, wdev, chandef->chan,
 					    CHAN_MODE_SHARED);
 		if (err)
 			return err;
 
-		err = rdev->ops->libertas_set_mesh_channel(&rdev->wiphy,
-							   wdev->netdev,
-							   channel);
+		err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev,
+						     chandef->chan);
 		if (!err)
-			wdev->channel = channel;
+			wdev->channel = chandef->chan;
 
 		return err;
 	}
@@ -232,8 +220,7 @@
 	if (wdev->mesh_id_len)
 		return -EBUSY;
 
-	wdev->preset_chan = channel;
-	wdev->preset_chantype = channel_type;
+	wdev->preset_chandef = *chandef;
 	return 0;
 }
 
@@ -242,6 +229,7 @@
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 
+	trace_cfg80211_notify_new_peer_candidate(dev, macaddr);
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT))
 		return;
 
@@ -267,7 +255,7 @@
 	if (!wdev->mesh_id_len)
 		return -ENOTCONN;
 
-	err = rdev->ops->leave_mesh(&rdev->wiphy, dev);
+	err = rdev_leave_mesh(rdev, dev);
 	if (!err) {
 		wdev->mesh_id_len = 0;
 		wdev->channel = NULL;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 904a7f3..5e8123e 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -15,6 +15,8 @@
 #include <net/iw_handler.h>
 #include "core.h"
 #include "nl80211.h"
+#include "rdev-ops.h"
+
 
 void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
 {
@@ -22,6 +24,7 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_send_rx_auth(dev);
 	wdev_lock(wdev);
 
 	nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
@@ -42,6 +45,7 @@
 	u8 *ie = mgmt->u.assoc_resp.variable;
 	int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
 
+	trace_cfg80211_send_rx_assoc(dev, bss);
 	wdev_lock(wdev);
 
 	status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
@@ -98,6 +102,7 @@
 	const u8 *bssid = mgmt->bssid;
 	bool was_current = false;
 
+	trace___cfg80211_send_deauth(dev);
 	ASSERT_WDEV_LOCK(wdev);
 
 	if (wdev->current_bss &&
@@ -147,6 +152,7 @@
 	u16 reason_code;
 	bool from_ap;
 
+	trace___cfg80211_send_disassoc(dev);
 	ASSERT_WDEV_LOCK(wdev);
 
 	nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL);
@@ -188,6 +194,7 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_send_unprot_deauth(dev);
 	nl80211_send_unprot_deauth(rdev, dev, buf, len, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(cfg80211_send_unprot_deauth);
@@ -199,6 +206,7 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_send_unprot_disassoc(dev);
 	nl80211_send_unprot_disassoc(rdev, dev, buf, len, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(cfg80211_send_unprot_disassoc);
@@ -209,6 +217,7 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_send_auth_timeout(dev, addr);
 	wdev_lock(wdev);
 
 	nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL);
@@ -227,6 +236,7 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_send_assoc_timeout(dev, addr);
 	wdev_lock(wdev);
 
 	nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL);
@@ -261,6 +271,7 @@
 	}
 #endif
 
+	trace_cfg80211_michael_mic_failure(dev, addr, key_type, key_id, tsc);
 	nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc, gfp);
 }
 EXPORT_SYMBOL(cfg80211_michael_mic_failure);
@@ -273,7 +284,8 @@
 			 const u8 *bssid,
 			 const u8 *ssid, int ssid_len,
 			 const u8 *ie, int ie_len,
-			 const u8 *key, int key_len, int key_idx)
+			 const u8 *key, int key_len, int key_idx,
+			 const u8 *sae_data, int sae_data_len)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_auth_request req;
@@ -293,6 +305,8 @@
 
 	req.ie = ie;
 	req.ie_len = ie_len;
+	req.sae_data = sae_data;
+	req.sae_data_len = sae_data_len;
 	req.auth_type = auth_type;
 	req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
 				   WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
@@ -307,7 +321,7 @@
 	if (err)
 		goto out;
 
-	err = rdev->ops->auth(&rdev->wiphy, dev, &req);
+	err = rdev_auth(rdev, dev, &req);
 
 out:
 	cfg80211_put_bss(req.bss);
@@ -319,7 +333,8 @@
 		       enum nl80211_auth_type auth_type, const u8 *bssid,
 		       const u8 *ssid, int ssid_len,
 		       const u8 *ie, int ie_len,
-		       const u8 *key, int key_len, int key_idx)
+		       const u8 *key, int key_len, int key_idx,
+		       const u8 *sae_data, int sae_data_len)
 {
 	int err;
 
@@ -327,7 +342,8 @@
 	wdev_lock(dev->ieee80211_ptr);
 	err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
 				   ssid, ssid_len, ie, ie_len,
-				   key, key_len, key_idx);
+				   key, key_len, key_idx,
+				   sae_data, sae_data_len);
 	wdev_unlock(dev->ieee80211_ptr);
 	mutex_unlock(&rdev->devlist_mtx);
 
@@ -410,7 +426,7 @@
 	if (err)
 		goto out;
 
-	err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
+	err = rdev_assoc(rdev, dev, &req);
 
 out:
 	if (err) {
@@ -466,7 +482,7 @@
 	    !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
 		return 0;
 
-	return rdev->ops->deauth(&rdev->wiphy, dev, &req);
+	return rdev_deauth(rdev, dev, &req);
 }
 
 int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
@@ -511,7 +527,7 @@
 	else
 		return -ENOTCONN;
 
-	return rdev->ops->disassoc(&rdev->wiphy, dev, &req);
+	return rdev_disassoc(rdev, dev, &req);
 }
 
 int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
@@ -552,7 +568,7 @@
 
 	memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
 	req.bssid = bssid;
-	rdev->ops->deauth(&rdev->wiphy, dev, &req);
+	rdev_deauth(rdev, dev, &req);
 
 	if (wdev->current_bss) {
 		cfg80211_unhold_bss(wdev->current_bss);
@@ -563,27 +579,25 @@
 
 void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
 			       struct ieee80211_channel *chan,
-			       enum nl80211_channel_type channel_type,
 			       unsigned int duration, gfp_t gfp)
 {
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
-	nl80211_send_remain_on_channel(rdev, wdev, cookie, chan, channel_type,
-				       duration, gfp);
+	trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration);
+	nl80211_send_remain_on_channel(rdev, wdev, cookie, chan, duration, gfp);
 }
 EXPORT_SYMBOL(cfg80211_ready_on_channel);
 
 void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
 					struct ieee80211_channel *chan,
-					enum nl80211_channel_type channel_type,
 					gfp_t gfp)
 {
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
-	nl80211_send_remain_on_channel_cancel(rdev, wdev, cookie, chan,
-					      channel_type, gfp);
+	trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan);
+	nl80211_send_remain_on_channel_cancel(rdev, wdev, cookie, chan, gfp);
 }
 EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
 
@@ -593,6 +607,7 @@
 	struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_new_sta(dev, mac_addr, sinfo);
 	nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp);
 }
 EXPORT_SYMBOL(cfg80211_new_sta);
@@ -602,6 +617,7 @@
 	struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_del_sta(dev, mac_addr);
 	nl80211_send_sta_del_event(rdev, dev, mac_addr, gfp);
 }
 EXPORT_SYMBOL(cfg80211_del_sta);
@@ -682,7 +698,7 @@
 	list_add(&nreg->list, &wdev->mgmt_registrations);
 
 	if (rdev->ops->mgmt_frame_register)
-		rdev->ops->mgmt_frame_register(wiphy, wdev, frame_type, true);
+		rdev_mgmt_frame_register(rdev, wdev, frame_type, true);
 
  out:
 	spin_unlock_bh(&wdev->mgmt_registrations_lock);
@@ -705,8 +721,8 @@
 		if (rdev->ops->mgmt_frame_register) {
 			u16 frame_type = le16_to_cpu(reg->frame_type);
 
-			rdev->ops->mgmt_frame_register(wiphy, wdev,
-						       frame_type, false);
+			rdev_mgmt_frame_register(rdev, wdev,
+						 frame_type, false);
 		}
 
 		list_del(&reg->list);
@@ -736,10 +752,8 @@
 int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
 			  struct wireless_dev *wdev,
 			  struct ieee80211_channel *chan, bool offchan,
-			  enum nl80211_channel_type channel_type,
-			  bool channel_type_valid, unsigned int wait,
-			  const u8 *buf, size_t len, bool no_cck,
-			  bool dont_wait_for_ack, u64 *cookie)
+			  unsigned int wait, const u8 *buf, size_t len,
+			  bool no_cck, bool dont_wait_for_ack, u64 *cookie)
 {
 	const struct ieee80211_mgmt *mgmt;
 	u16 stype;
@@ -832,10 +846,9 @@
 		return -EINVAL;
 
 	/* Transmit the Action frame as requested by user space */
-	return rdev->ops->mgmt_tx(&rdev->wiphy, wdev, chan, offchan,
-				  channel_type, channel_type_valid,
-				  wait, buf, len, no_cck, dont_wait_for_ack,
-				  cookie);
+	return rdev_mgmt_tx(rdev, wdev, chan, offchan,
+			    wait, buf, len, no_cck, dont_wait_for_ack,
+			    cookie);
 }
 
 bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
@@ -854,10 +867,13 @@
 		cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
 	u16 stype;
 
+	trace_cfg80211_rx_mgmt(wdev, freq, sig_mbm);
 	stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
 
-	if (!(stypes->rx & BIT(stype)))
+	if (!(stypes->rx & BIT(stype))) {
+		trace_cfg80211_return_bool(false);
 		return false;
+	}
 
 	data = buf + ieee80211_hdrlen(mgmt->frame_control);
 	data_len = len - ieee80211_hdrlen(mgmt->frame_control);
@@ -888,6 +904,7 @@
 
 	spin_unlock_bh(&wdev->mgmt_registrations_lock);
 
+	trace_cfg80211_return_bool(result);
 	return result;
 }
 EXPORT_SYMBOL(cfg80211_rx_mgmt);
@@ -898,6 +915,8 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
+
 	/* Indicate TX status of the Action frame to user space */
 	nl80211_send_mgmt_tx_status(rdev, wdev, cookie, buf, len, ack, gfp);
 }
@@ -911,6 +930,8 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_cqm_rssi_notify(dev, rssi_event);
+
 	/* Indicate roaming trigger event to user space */
 	nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp);
 }
@@ -923,6 +944,8 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets);
+
 	/* Indicate roaming trigger event to user space */
 	nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp);
 }
@@ -948,6 +971,7 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_gtk_rekey_notify(dev, bssid);
 	nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
 }
 EXPORT_SYMBOL(cfg80211_gtk_rekey_notify);
@@ -959,17 +983,19 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth);
 	nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
 }
 EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
 
-void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
-			       enum nl80211_channel_type type)
+void cfg80211_ch_switch_notify(struct net_device *dev,
+			       struct cfg80211_chan_def *chandef)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
-	struct ieee80211_channel *chan;
+
+	trace_cfg80211_ch_switch_notify(dev, chandef);
 
 	wdev_lock(wdev);
 
@@ -977,12 +1003,8 @@
 		    wdev->iftype != NL80211_IFTYPE_P2P_GO))
 		goto out;
 
-	chan = rdev_freq_to_chan(rdev, freq, type);
-	if (WARN_ON(!chan))
-		goto out;
-
-	wdev->channel = chan;
-	nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL);
+	wdev->channel = chandef->chan;
+	nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL);
 out:
 	wdev_unlock(wdev);
 	return;
@@ -993,12 +1015,18 @@
 				const u8 *addr, gfp_t gfp)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	bool ret;
+
+	trace_cfg80211_rx_spurious_frame(dev, addr);
 
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
-		    wdev->iftype != NL80211_IFTYPE_P2P_GO))
+		    wdev->iftype != NL80211_IFTYPE_P2P_GO)) {
+		trace_cfg80211_return_bool(false);
 		return false;
-
-	return nl80211_unexpected_frame(dev, addr, gfp);
+	}
+	ret = nl80211_unexpected_frame(dev, addr, gfp);
+	trace_cfg80211_return_bool(ret);
+	return ret;
 }
 EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
 
@@ -1006,12 +1034,18 @@
 					const u8 *addr, gfp_t gfp)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	bool ret;
+
+	trace_cfg80211_rx_unexpected_4addr_frame(dev, addr);
 
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
 		    wdev->iftype != NL80211_IFTYPE_P2P_GO &&
-		    wdev->iftype != NL80211_IFTYPE_AP_VLAN))
+		    wdev->iftype != NL80211_IFTYPE_AP_VLAN)) {
+		trace_cfg80211_return_bool(false);
 		return false;
-
-	return nl80211_unexpected_4addr_frame(dev, addr, gfp);
+	}
+	ret = nl80211_unexpected_4addr_frame(dev, addr, gfp);
+	trace_cfg80211_return_bool(ret);
+	return ret;
 }
 EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 0418a6d..d038fa4 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -22,8 +22,8 @@
 #include "core.h"
 #include "nl80211.h"
 #include "reg.h"
+#include "rdev-ops.h"
 
-static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type);
 static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
 				   struct genl_info *info,
 				   struct cfg80211_crypto_settings *settings,
@@ -223,8 +223,13 @@
 	[NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
 				      .len = 20-1 },
 	[NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED },
+
 	[NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 },
 	[NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 },
+	[NL80211_ATTR_CHANNEL_WIDTH] = { .type = NLA_U32 },
+	[NL80211_ATTR_CENTER_FREQ1] = { .type = NLA_U32 },
+	[NL80211_ATTR_CENTER_FREQ2] = { .type = NLA_U32 },
+
 	[NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 },
 	[NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 },
 	[NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
@@ -355,6 +360,9 @@
 	[NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
 	[NL80211_ATTR_WDEV] = { .type = NLA_U64 },
 	[NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 },
+	[NL80211_ATTR_SAE_DATA] = { .type = NLA_BINARY, },
+	[NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN },
+	[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
 };
 
 /* policy for the key attributes */
@@ -690,7 +698,7 @@
 
 static struct cfg80211_cached_keys *
 nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
-		       struct nlattr *keys)
+		       struct nlattr *keys, bool *no_ht)
 {
 	struct key_parse parse;
 	struct nlattr *key;
@@ -733,6 +741,12 @@
 		result->params[parse.idx].key_len = parse.p.key_len;
 		result->params[parse.idx].key = result->data[parse.idx];
 		memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len);
+
+		if (parse.p.cipher == WLAN_CIPHER_SUITE_WEP40 ||
+		    parse.p.cipher == WLAN_CIPHER_SUITE_WEP104) {
+			if (no_ht)
+				*no_ht = true;
+		}
 	}
 
 	return result;
@@ -943,7 +957,7 @@
 	     dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
 		u32 tx_ant = 0, rx_ant = 0;
 		int res;
-		res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant);
+		res = rdev_get_antenna(dev, &tx_ant, &rx_ant);
 		if (!res) {
 			if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX,
 					tx_ant) ||
@@ -1101,6 +1115,7 @@
 			goto nla_put_failure;
 	}
 	CMD(start_p2p_device, START_P2P_DEVICE);
+	CMD(set_mcast_rate, SET_MCAST_RATE);
 
 #ifdef CONFIG_NL80211_TESTMODE
 	CMD(testmode_cmd, TESTMODE);
@@ -1350,51 +1365,139 @@
 		wdev->iftype == NL80211_IFTYPE_P2P_GO;
 }
 
-static bool nl80211_valid_channel_type(struct genl_info *info,
-				       enum nl80211_channel_type *channel_type)
+static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+				 struct genl_info *info,
+				 struct cfg80211_chan_def *chandef)
 {
-	enum nl80211_channel_type tmp;
+	struct ieee80211_sta_ht_cap *ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap;
+	u32 control_freq, width;
 
-	if (!info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE])
-		return false;
+	if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
+		return -EINVAL;
 
-	tmp = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
-	if (tmp != NL80211_CHAN_NO_HT &&
-	    tmp != NL80211_CHAN_HT20 &&
-	    tmp != NL80211_CHAN_HT40PLUS &&
-	    tmp != NL80211_CHAN_HT40MINUS)
-		return false;
+	control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
 
-	if (channel_type)
-		*channel_type = tmp;
+	chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
+	chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
+	chandef->center_freq1 = control_freq;
+	chandef->center_freq2 = 0;
 
-	return true;
+	/* Primary channel not allowed */
+	if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED)
+		return -EINVAL;
+
+	if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
+		enum nl80211_channel_type chantype;
+
+		chantype = nla_get_u32(
+				info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+
+		switch (chantype) {
+		case NL80211_CHAN_NO_HT:
+		case NL80211_CHAN_HT20:
+		case NL80211_CHAN_HT40PLUS:
+		case NL80211_CHAN_HT40MINUS:
+			cfg80211_chandef_create(chandef, chandef->chan,
+						chantype);
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else if (info->attrs[NL80211_ATTR_CHANNEL_WIDTH]) {
+		chandef->width =
+			nla_get_u32(info->attrs[NL80211_ATTR_CHANNEL_WIDTH]);
+		if (info->attrs[NL80211_ATTR_CENTER_FREQ1])
+			chandef->center_freq1 =
+				nla_get_u32(
+					info->attrs[NL80211_ATTR_CENTER_FREQ1]);
+		if (info->attrs[NL80211_ATTR_CENTER_FREQ2])
+			chandef->center_freq2 =
+				nla_get_u32(
+					info->attrs[NL80211_ATTR_CENTER_FREQ2]);
+	}
+
+	ht_cap = &rdev->wiphy.bands[chandef->chan->band]->ht_cap;
+	vht_cap = &rdev->wiphy.bands[chandef->chan->band]->vht_cap;
+
+	if (!cfg80211_chan_def_valid(chandef))
+		return -EINVAL;
+
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_20:
+		if (!ht_cap->ht_supported)
+			return -EINVAL;
+	case NL80211_CHAN_WIDTH_20_NOHT:
+		width = 20;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		width = 40;
+		/* quick early regulatory check */
+		if (chandef->center_freq1 < control_freq &&
+		    chandef->chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
+			return -EINVAL;
+		if (chandef->center_freq1 > control_freq &&
+		    chandef->chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
+			return -EINVAL;
+		if (!ht_cap->ht_supported)
+			return -EINVAL;
+		if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
+		    ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)
+			return -EINVAL;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		width = 80;
+		if (!vht_cap->vht_supported)
+			return -EINVAL;
+		break;
+	case NL80211_CHAN_WIDTH_80P80:
+		width = 80;
+		if (!vht_cap->vht_supported)
+			return -EINVAL;
+		if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
+			return -EINVAL;
+		break;
+	case NL80211_CHAN_WIDTH_160:
+		width = 160;
+		if (!vht_cap->vht_supported)
+			return -EINVAL;
+		if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!cfg80211_secondary_chans_ok(&rdev->wiphy, chandef->center_freq1,
+					 width, IEEE80211_CHAN_DISABLED))
+		return -EINVAL;
+	if (chandef->center_freq2 &&
+	    !cfg80211_secondary_chans_ok(&rdev->wiphy, chandef->center_freq2,
+					 width, IEEE80211_CHAN_DISABLED))
+		return -EINVAL;
+
+	/* TODO: missing regulatory check on bandwidth */
+
+	return 0;
 }
 
 static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
 				 struct wireless_dev *wdev,
 				 struct genl_info *info)
 {
-	struct ieee80211_channel *channel;
-	enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
-	u32 freq;
+	struct cfg80211_chan_def chandef;
 	int result;
 	enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR;
 
 	if (wdev)
 		iftype = wdev->iftype;
 
-	if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
-		return -EINVAL;
-
 	if (!nl80211_can_set_dev_channel(wdev))
 		return -EOPNOTSUPP;
 
-	if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
-	    !nl80211_valid_channel_type(info, &channel_type))
-		return -EINVAL;
-
-	freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+	result = nl80211_parse_chandef(rdev, info, &chandef);
+	if (result)
+		return result;
 
 	mutex_lock(&rdev->devlist_mtx);
 	switch (iftype) {
@@ -1404,22 +1507,18 @@
 			result = -EBUSY;
 			break;
 		}
-		channel = rdev_freq_to_chan(rdev, freq, channel_type);
-		if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy,
-							      channel,
-							      channel_type)) {
+		if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef)) {
 			result = -EINVAL;
 			break;
 		}
-		wdev->preset_chan = channel;
-		wdev->preset_chantype = channel_type;
+		wdev->preset_chandef = chandef;
 		result = 0;
 		break;
 	case NL80211_IFTYPE_MESH_POINT:
-		result = cfg80211_set_mesh_freq(rdev, wdev, freq, channel_type);
+		result = cfg80211_set_mesh_channel(rdev, wdev, &chandef);
 		break;
 	case NL80211_IFTYPE_MONITOR:
-		result = cfg80211_set_monitor_channel(rdev, freq, channel_type);
+		result = cfg80211_set_monitor_channel(rdev, &chandef);
 		break;
 	default:
 		result = -EINVAL;
@@ -1457,7 +1556,7 @@
 		return -EOPNOTSUPP;
 
 	bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
-	return rdev->ops->set_wds_peer(wdev->wiphy, dev, bssid);
+	return rdev_set_wds_peer(rdev, dev, bssid);
 }
 
 
@@ -1507,10 +1606,8 @@
 		result = 0;
 
 		mutex_lock(&rdev->mtx);
-	} else if (nl80211_can_set_dev_channel(netdev->ieee80211_ptr))
+	} else
 		wdev = netdev->ieee80211_ptr;
-	else
-		wdev = NULL;
 
 	/*
 	 * end workaround code, by now the rdev is available
@@ -1562,24 +1659,29 @@
 			if (result)
 				goto bad_res;
 
-			result = rdev->ops->set_txq_params(&rdev->wiphy,
-							   netdev,
-							   &txq_params);
+			result = rdev_set_txq_params(rdev, netdev,
+						     &txq_params);
 			if (result)
 				goto bad_res;
 		}
 	}
 
 	if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
-		result = __nl80211_set_channel(rdev, wdev, info);
+		result = __nl80211_set_channel(rdev,
+				nl80211_can_set_dev_channel(wdev) ? wdev : NULL,
+				info);
 		if (result)
 			goto bad_res;
 	}
 
 	if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) {
+		struct wireless_dev *txp_wdev = wdev;
 		enum nl80211_tx_power_setting type;
 		int idx, mbm = 0;
 
+		if (!(rdev->wiphy.features & NL80211_FEATURE_VIF_TXPOWER))
+			txp_wdev = NULL;
+
 		if (!rdev->ops->set_tx_power) {
 			result = -EOPNOTSUPP;
 			goto bad_res;
@@ -1599,7 +1701,7 @@
 			mbm = nla_get_u32(info->attrs[idx]);
 		}
 
-		result = rdev->ops->set_tx_power(&rdev->wiphy, type, mbm);
+		result = rdev_set_tx_power(rdev, txp_wdev, type, mbm);
 		if (result)
 			goto bad_res;
 	}
@@ -1628,7 +1730,7 @@
 		tx_ant = tx_ant & rdev->wiphy.available_antennas_tx;
 		rx_ant = rx_ant & rdev->wiphy.available_antennas_rx;
 
-		result = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant);
+		result = rdev_set_antenna(rdev, tx_ant, rx_ant);
 		if (result)
 			goto bad_res;
 	}
@@ -1713,7 +1815,7 @@
 		if (changed & WIPHY_PARAM_COVERAGE_CLASS)
 			rdev->wiphy.coverage_class = coverage_class;
 
-		result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
+		result = rdev_set_wiphy_params(rdev, changed);
 		if (result) {
 			rdev->wiphy.retry_short = old_retry_short;
 			rdev->wiphy.retry_long = old_retry_long;
@@ -1736,6 +1838,35 @@
 	       ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32);
 }
 
+static int nl80211_send_chandef(struct sk_buff *msg,
+				 struct cfg80211_chan_def *chandef)
+{
+	WARN_ON(!cfg80211_chan_def_valid(chandef));
+
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ,
+			chandef->chan->center_freq))
+		return -ENOBUFS;
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_20_NOHT:
+	case NL80211_CHAN_WIDTH_20:
+	case NL80211_CHAN_WIDTH_40:
+		if (nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
+				cfg80211_get_chandef_type(chandef)))
+			return -ENOBUFS;
+		break;
+	default:
+		break;
+	}
+	if (nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, chandef->width))
+		return -ENOBUFS;
+	if (nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ1, chandef->center_freq1))
+		return -ENOBUFS;
+	if (chandef->center_freq2 &&
+	    nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ2, chandef->center_freq2))
+		return -ENOBUFS;
+	return 0;
+}
+
 static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
 			      struct cfg80211_registered_device *rdev,
 			      struct wireless_dev *wdev)
@@ -1762,16 +1893,18 @@
 		goto nla_put_failure;
 
 	if (rdev->ops->get_channel) {
-		struct ieee80211_channel *chan;
-		enum nl80211_channel_type channel_type;
+		int ret;
+		struct cfg80211_chan_def chandef;
 
-		chan = rdev->ops->get_channel(&rdev->wiphy, wdev,
-					      &channel_type);
-		if (chan &&
-		    (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ,
-				 chan->center_freq) ||
-		     nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
-				 channel_type)))
+		ret = rdev_get_channel(rdev, wdev, &chandef);
+		if (ret == 0) {
+			if (nl80211_send_chandef(msg, &chandef))
+				goto nla_put_failure;
+		}
+	}
+
+	if (wdev->ssid_len) {
+		if (nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
 			goto nla_put_failure;
 	}
 
@@ -2014,9 +2147,9 @@
 	err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
 				  info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
 				  &flags);
-	wdev = rdev->ops->add_virtual_intf(&rdev->wiphy,
-		nla_data(info->attrs[NL80211_ATTR_IFNAME]),
-		type, err ? NULL : &flags, &params);
+	wdev = rdev_add_virtual_intf(rdev,
+				nla_data(info->attrs[NL80211_ATTR_IFNAME]),
+				type, err ? NULL : &flags, &params);
 	if (IS_ERR(wdev)) {
 		nlmsg_free(msg);
 		return PTR_ERR(wdev);
@@ -2083,7 +2216,7 @@
 	if (!wdev->netdev)
 		info->user_ptr[1] = NULL;
 
-	return rdev->ops->del_virtual_intf(&rdev->wiphy, wdev);
+	return rdev_del_virtual_intf(rdev, wdev);
 }
 
 static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
@@ -2100,7 +2233,7 @@
 
 	noack_map = nla_get_u16(info->attrs[NL80211_ATTR_NOACK_MAP]);
 
-	return rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map);
+	return rdev_set_noack_map(rdev, dev, noack_map);
 }
 
 struct get_key_cookie {
@@ -2210,8 +2343,8 @@
 	    !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
 		return -ENOENT;
 
-	err = rdev->ops->get_key(&rdev->wiphy, dev, key_idx, pairwise,
-				 mac_addr, &cookie, get_key_callback);
+	err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
+			   get_key_callback);
 
 	if (err)
 		goto free_msg;
@@ -2259,7 +2392,7 @@
 		if (err)
 			goto out;
 
-		err = rdev->ops->set_default_key(&rdev->wiphy, dev, key.idx,
+		err = rdev_set_default_key(rdev, dev, key.idx,
 						 key.def_uni, key.def_multi);
 
 		if (err)
@@ -2283,8 +2416,7 @@
 		if (err)
 			goto out;
 
-		err = rdev->ops->set_default_mgmt_key(&rdev->wiphy,
-						      dev, key.idx);
+		err = rdev_set_default_mgmt_key(rdev, dev, key.idx);
 		if (err)
 			goto out;
 
@@ -2340,9 +2472,9 @@
 	wdev_lock(dev->ieee80211_ptr);
 	err = nl80211_key_allowed(dev->ieee80211_ptr);
 	if (!err)
-		err = rdev->ops->add_key(&rdev->wiphy, dev, key.idx,
-					 key.type == NL80211_KEYTYPE_PAIRWISE,
-					 mac_addr, &key.p);
+		err = rdev_add_key(rdev, dev, key.idx,
+				   key.type == NL80211_KEYTYPE_PAIRWISE,
+				    mac_addr, &key.p);
 	wdev_unlock(dev->ieee80211_ptr);
 
 	return err;
@@ -2386,9 +2518,9 @@
 		err = -ENOENT;
 
 	if (!err)
-		err = rdev->ops->del_key(&rdev->wiphy, dev, key.idx,
-					 key.type == NL80211_KEYTYPE_PAIRWISE,
-					 mac_addr);
+		err = rdev_del_key(rdev, dev, key.idx,
+				   key.type == NL80211_KEYTYPE_PAIRWISE,
+				   mac_addr);
 
 #ifdef CONFIG_CFG80211_WEXT
 	if (!err) {
@@ -2476,11 +2608,10 @@
 		    wdev->iftype != NL80211_IFTYPE_P2P_GO)
 			continue;
 
-		if (!wdev->preset_chan)
+		if (!wdev->preset_chandef.chan)
 			continue;
 
-		params->channel = wdev->preset_chan;
-		params->channel_type = wdev->preset_chantype;
+		params->chandef = wdev->preset_chandef;
 		ret = true;
 		break;
 	}
@@ -2490,6 +2621,30 @@
 	return ret;
 }
 
+static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev,
+				    enum nl80211_auth_type auth_type,
+				    enum nl80211_commands cmd)
+{
+	if (auth_type > NL80211_AUTHTYPE_MAX)
+		return false;
+
+	switch (cmd) {
+	case NL80211_CMD_AUTHENTICATE:
+		if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) &&
+		    auth_type == NL80211_AUTHTYPE_SAE)
+			return false;
+		return true;
+	case NL80211_CMD_CONNECT:
+	case NL80211_CMD_START_AP:
+		/* SAE not supported yet */
+		if (auth_type == NL80211_AUTHTYPE_SAE)
+			return false;
+		return true;
+	default:
+		return false;
+	}
+}
+
 static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -2559,7 +2714,8 @@
 	if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
 		params.auth_type = nla_get_u32(
 			info->attrs[NL80211_ATTR_AUTH_TYPE]);
-		if (!nl80211_valid_auth_type(params.auth_type))
+		if (!nl80211_valid_auth_type(rdev, params.auth_type,
+					     NL80211_CMD_START_AP))
 			return -EINVAL;
 	} else
 		params.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
@@ -2577,42 +2733,32 @@
 	}
 
 	if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
-		enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
-
-		if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
-		    !nl80211_valid_channel_type(info, &channel_type))
-			return -EINVAL;
-
-		params.channel = rdev_freq_to_chan(rdev,
-			nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
-			channel_type);
-		if (!params.channel)
-			return -EINVAL;
-		params.channel_type = channel_type;
-	} else if (wdev->preset_chan) {
-		params.channel = wdev->preset_chan;
-		params.channel_type = wdev->preset_chantype;
+		err = nl80211_parse_chandef(rdev, info, &params.chandef);
+		if (err)
+			return err;
+	} else if (wdev->preset_chandef.chan) {
+		params.chandef = wdev->preset_chandef;
 	} else if (!nl80211_get_ap_channel(rdev, &params))
 		return -EINVAL;
 
-	if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, params.channel,
-					  params.channel_type))
+	if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
 		return -EINVAL;
 
 	mutex_lock(&rdev->devlist_mtx);
-	err = cfg80211_can_use_chan(rdev, wdev, params.channel,
+	err = cfg80211_can_use_chan(rdev, wdev, params.chandef.chan,
 				    CHAN_MODE_SHARED);
 	mutex_unlock(&rdev->devlist_mtx);
 
 	if (err)
 		return err;
 
-	err = rdev->ops->start_ap(&rdev->wiphy, dev, &params);
+	err = rdev_start_ap(rdev, dev, &params);
 	if (!err) {
-		wdev->preset_chan = params.channel;
-		wdev->preset_chantype = params.channel_type;
+		wdev->preset_chandef = params.chandef;
 		wdev->beacon_interval = params.beacon_interval;
-		wdev->channel = params.channel;
+		wdev->channel = params.chandef.chan;
+		wdev->ssid_len = params.ssid_len;
+		memcpy(wdev->ssid, params.ssid, wdev->ssid_len);
 	}
 	return err;
 }
@@ -2639,7 +2785,7 @@
 	if (err)
 		return err;
 
-	return rdev->ops->change_beacon(&rdev->wiphy, dev, &params);
+	return rdev_change_beacon(rdev, dev, &params);
 }
 
 static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info)
@@ -2744,29 +2890,52 @@
 
 	rate = nla_nest_start(msg, attr);
 	if (!rate)
-		goto nla_put_failure;
+		return false;
 
 	/* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
 	bitrate = cfg80211_calculate_bitrate(info);
 	/* report 16-bit bitrate only if we can */
 	bitrate_compat = bitrate < (1UL << 16) ? bitrate : 0;
-	if ((bitrate > 0 &&
-	     nla_put_u32(msg, NL80211_RATE_INFO_BITRATE32, bitrate)) ||
-	    (bitrate_compat > 0 &&
-	     nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat)) ||
-	    ((info->flags & RATE_INFO_FLAGS_MCS) &&
-	     nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) ||
-	    ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) &&
-	     nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) ||
-	    ((info->flags & RATE_INFO_FLAGS_SHORT_GI) &&
-	     nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)))
-		goto nla_put_failure;
+	if (bitrate > 0 &&
+	    nla_put_u32(msg, NL80211_RATE_INFO_BITRATE32, bitrate))
+		return false;
+	if (bitrate_compat > 0 &&
+	    nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat))
+		return false;
+
+	if (info->flags & RATE_INFO_FLAGS_MCS) {
+		if (nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs))
+			return false;
+		if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH &&
+		    nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH))
+			return false;
+		if (info->flags & RATE_INFO_FLAGS_SHORT_GI &&
+		    nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))
+			return false;
+	} else if (info->flags & RATE_INFO_FLAGS_VHT_MCS) {
+		if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_MCS, info->mcs))
+			return false;
+		if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_NSS, info->nss))
+			return false;
+		if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH &&
+		    nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH))
+			return false;
+		if (info->flags & RATE_INFO_FLAGS_80_MHZ_WIDTH &&
+		    nla_put_flag(msg, NL80211_RATE_INFO_80_MHZ_WIDTH))
+			return false;
+		if (info->flags & RATE_INFO_FLAGS_80P80_MHZ_WIDTH &&
+		    nla_put_flag(msg, NL80211_RATE_INFO_80P80_MHZ_WIDTH))
+			return false;
+		if (info->flags & RATE_INFO_FLAGS_160_MHZ_WIDTH &&
+		    nla_put_flag(msg, NL80211_RATE_INFO_160_MHZ_WIDTH))
+			return false;
+		if (info->flags & RATE_INFO_FLAGS_SHORT_GI &&
+		    nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))
+			return false;
+	}
 
 	nla_nest_end(msg, rate);
 	return true;
-
-nla_put_failure:
-	return false;
 }
 
 static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
@@ -2923,8 +3092,8 @@
 
 	while (1) {
 		memset(&sinfo, 0, sizeof(sinfo));
-		err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx,
-					     mac_addr, &sinfo);
+		err = rdev_dump_station(dev, netdev, sta_idx,
+					mac_addr, &sinfo);
 		if (err == -ENOENT)
 			break;
 		if (err)
@@ -2969,7 +3138,7 @@
 	if (!rdev->ops->get_station)
 		return -EOPNOTSUPP;
 
-	err = rdev->ops->get_station(&rdev->wiphy, dev, mac_addr, &sinfo);
+	err = rdev_get_station(rdev, dev, mac_addr, &sinfo);
 	if (err)
 		return err;
 
@@ -3146,7 +3315,7 @@
 
 	/* be aware of params.vlan when changing code here */
 
-	err = rdev->ops->change_station(&rdev->wiphy, dev, mac_addr, &params);
+	err = rdev_change_station(rdev, dev, mac_addr, &params);
 
 	if (params.vlan)
 		dev_put(params.vlan);
@@ -3198,6 +3367,10 @@
 		params.ht_capa =
 			nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
 
+	if (info->attrs[NL80211_ATTR_VHT_CAPABILITY])
+		params.vht_capa =
+			nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
+
 	if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
 		params.plink_action =
 		    nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
@@ -3275,7 +3448,7 @@
 
 	/* be aware of params.vlan when changing code here */
 
-	err = rdev->ops->add_station(&rdev->wiphy, dev, mac_addr, &params);
+	err = rdev_add_station(rdev, dev, mac_addr, &params);
 
 	if (params.vlan)
 		dev_put(params.vlan);
@@ -3300,7 +3473,7 @@
 	if (!rdev->ops->del_station)
 		return -EOPNOTSUPP;
 
-	return rdev->ops->del_station(&rdev->wiphy, dev, mac_addr);
+	return rdev_del_station(rdev, dev, mac_addr);
 }
 
 static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq,
@@ -3382,8 +3555,8 @@
 	}
 
 	while (1) {
-		err = dev->ops->dump_mpath(&dev->wiphy, netdev, path_idx,
-					   dst, next_hop, &pinfo);
+		err = rdev_dump_mpath(dev, netdev, path_idx, dst, next_hop,
+				      &pinfo);
 		if (err == -ENOENT)
 			break;
 		if (err)
@@ -3430,7 +3603,7 @@
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
 		return -EOPNOTSUPP;
 
-	err = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, &pinfo);
+	err = rdev_get_mpath(rdev, dev, dst, next_hop, &pinfo);
 	if (err)
 		return err;
 
@@ -3469,7 +3642,7 @@
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
 		return -EOPNOTSUPP;
 
-	return rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop);
+	return rdev_change_mpath(rdev, dev, dst, next_hop);
 }
 
 static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
@@ -3494,7 +3667,7 @@
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
 		return -EOPNOTSUPP;
 
-	return rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop);
+	return rdev_add_mpath(rdev, dev, dst, next_hop);
 }
 
 static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
@@ -3509,7 +3682,7 @@
 	if (!rdev->ops->del_mpath)
 		return -EOPNOTSUPP;
 
-	return rdev->ops->del_mpath(&rdev->wiphy, dev, dst);
+	return rdev_del_mpath(rdev, dev, dst);
 }
 
 static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
@@ -3554,7 +3727,7 @@
 	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
 		return -EOPNOTSUPP;
 
-	return rdev->ops->change_bss(&rdev->wiphy, dev, &params);
+	return rdev_change_bss(rdev, dev, &params);
 }
 
 static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
@@ -3668,8 +3841,7 @@
 	if (!wdev->mesh_id_len)
 		memcpy(&cur_params, &default_mesh_config, sizeof(cur_params));
 	else
-		err = rdev->ops->get_mesh_config(&rdev->wiphy, dev,
-						 &cur_params);
+		err = rdev_get_mesh_config(rdev, dev, &cur_params);
 	wdev_unlock(wdev);
 
 	if (err)
@@ -3971,8 +4143,7 @@
 		err = -ENOLINK;
 
 	if (!err)
-		err = rdev->ops->update_mesh_config(&rdev->wiphy, dev,
-						    mask, &cfg);
+		err = rdev_update_mesh_config(rdev, dev, mask, &cfg);
 
 	wdev_unlock(wdev);
 
@@ -4337,14 +4508,27 @@
 		}
 	}
 
+	if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) {
+		request->flags = nla_get_u32(
+			info->attrs[NL80211_ATTR_SCAN_FLAGS]);
+		if (((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
+		     !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) ||
+		    ((request->flags & NL80211_SCAN_FLAG_FLUSH) &&
+		     !(wiphy->features & NL80211_FEATURE_SCAN_FLUSH))) {
+			err = -EOPNOTSUPP;
+			goto out_free;
+		}
+	}
+
 	request->no_cck =
 		nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
 
 	request->wdev = wdev;
 	request->wiphy = &rdev->wiphy;
+	request->scan_start = jiffies;
 
 	rdev->scan_req = request;
-	err = rdev->ops->scan(&rdev->wiphy, request);
+	err = rdev_scan(rdev, request);
 
 	if (!err) {
 		nl80211_send_scan_start(rdev, wdev);
@@ -4568,11 +4752,24 @@
 		       request->ie_len);
 	}
 
+	if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) {
+		request->flags = nla_get_u32(
+			info->attrs[NL80211_ATTR_SCAN_FLAGS]);
+		if (((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
+		     !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) ||
+		    ((request->flags & NL80211_SCAN_FLAG_FLUSH) &&
+		     !(wiphy->features & NL80211_FEATURE_SCAN_FLUSH))) {
+			err = -EOPNOTSUPP;
+			goto out_free;
+		}
+	}
+
 	request->dev = dev;
 	request->wiphy = &rdev->wiphy;
 	request->interval = interval;
+	request->scan_start = jiffies;
 
-	err = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request);
+	err = rdev_sched_scan_start(rdev, dev, request);
 	if (!err) {
 		rdev->sched_scan_req = request;
 		nl80211_send_sched_scan(rdev, dev,
@@ -4815,8 +5012,7 @@
 	while (1) {
 		struct ieee80211_channel *chan;
 
-		res = dev->ops->dump_survey(&dev->wiphy, netdev, survey_idx,
-					    &survey);
+		res = rdev_dump_survey(dev, netdev, survey_idx, &survey);
 		if (res == -ENOENT)
 			break;
 		if (res)
@@ -4852,11 +5048,6 @@
 	return res;
 }
 
-static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type)
-{
-	return auth_type <= NL80211_AUTHTYPE_MAX;
-}
-
 static bool nl80211_valid_wpa_versions(u32 wpa_versions)
 {
 	return !(wpa_versions & ~(NL80211_WPA_VERSION_1 |
@@ -4868,8 +5059,8 @@
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
 	struct ieee80211_channel *chan;
-	const u8 *bssid, *ssid, *ie = NULL;
-	int err, ssid_len, ie_len = 0;
+	const u8 *bssid, *ssid, *ie = NULL, *sae_data = NULL;
+	int err, ssid_len, ie_len = 0, sae_data_len = 0;
 	enum nl80211_auth_type auth_type;
 	struct key_parse key;
 	bool local_state_change;
@@ -4945,9 +5136,23 @@
 	}
 
 	auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
-	if (!nl80211_valid_auth_type(auth_type))
+	if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_AUTHENTICATE))
 		return -EINVAL;
 
+	if (auth_type == NL80211_AUTHTYPE_SAE &&
+	    !info->attrs[NL80211_ATTR_SAE_DATA])
+		return -EINVAL;
+
+	if (info->attrs[NL80211_ATTR_SAE_DATA]) {
+		if (auth_type != NL80211_AUTHTYPE_SAE)
+			return -EINVAL;
+		sae_data = nla_data(info->attrs[NL80211_ATTR_SAE_DATA]);
+		sae_data_len = nla_len(info->attrs[NL80211_ATTR_SAE_DATA]);
+		/* need to include at least Auth Transaction and Status Code */
+		if (sae_data_len < 4)
+			return -EINVAL;
+	}
+
 	local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
 
 	/*
@@ -4959,7 +5164,8 @@
 
 	return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
 				  ssid, ssid_len, ie, ie_len,
-				  key.p.key, key.p.key_len, key.idx);
+				  key.p.key, key.p.key_len, key.idx,
+				  sae_data, sae_data_len);
 }
 
 static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
@@ -5250,8 +5456,7 @@
 	if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
 		return -EINVAL;
 
-	if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
-	    !info->attrs[NL80211_ATTR_SSID] ||
+	if (!info->attrs[NL80211_ATTR_SSID] ||
 	    !nla_len(info->attrs[NL80211_ATTR_SSID]))
 		return -EINVAL;
 
@@ -5286,35 +5491,17 @@
 		ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
 	}
 
-	if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
-		enum nl80211_channel_type channel_type;
+	err = nl80211_parse_chandef(rdev, info, &ibss.chandef);
+	if (err)
+		return err;
 
-		if (!nl80211_valid_channel_type(info, &channel_type))
-			return -EINVAL;
-
-		if (channel_type != NL80211_CHAN_NO_HT &&
-		    !(wiphy->features & NL80211_FEATURE_HT_IBSS))
-			return -EINVAL;
-
-		ibss.channel_type = channel_type;
-	} else {
-		ibss.channel_type = NL80211_CHAN_NO_HT;
-	}
-
-	ibss.channel = rdev_freq_to_chan(rdev,
-		nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
-		ibss.channel_type);
-	if (!ibss.channel ||
-	    ibss.channel->flags & IEEE80211_CHAN_NO_IBSS ||
-	    ibss.channel->flags & IEEE80211_CHAN_DISABLED)
+	if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef))
 		return -EINVAL;
 
-	/* Both channels should be able to initiate communication */
-	if ((ibss.channel_type == NL80211_CHAN_HT40PLUS ||
-	     ibss.channel_type == NL80211_CHAN_HT40MINUS) &&
-	    !cfg80211_can_beacon_sec_chan(&rdev->wiphy, ibss.channel,
-					  ibss.channel_type))
+	if (ibss.chandef.width > NL80211_CHAN_WIDTH_40)
 		return -EINVAL;
+	if (ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
+	    !(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS))
 
 	ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED];
 	ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
@@ -5325,7 +5512,7 @@
 		int n_rates =
 			nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
 		struct ieee80211_supported_band *sband =
-			wiphy->bands[ibss.channel->band];
+			wiphy->bands[ibss.chandef.chan->band];
 
 		err = ieee80211_get_ratemask(sband, rates, n_rates,
 					     &ibss.basic_rates);
@@ -5339,10 +5526,19 @@
 		return -EINVAL;
 
 	if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) {
+		bool no_ht = false;
+
 		connkeys = nl80211_parse_connkeys(rdev,
-					info->attrs[NL80211_ATTR_KEYS]);
+					  info->attrs[NL80211_ATTR_KEYS],
+					  &no_ht);
 		if (IS_ERR(connkeys))
 			return PTR_ERR(connkeys);
+
+		if ((ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) &&
+		    no_ht) {
+			kfree(connkeys);
+			return -EINVAL;
+		}
 	}
 
 	ibss.control_port =
@@ -5368,6 +5564,36 @@
 	return cfg80211_leave_ibss(rdev, dev, false);
 }
 
+static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	int mcast_rate[IEEE80211_NUM_BANDS];
+	u32 nla_rate;
+	int err;
+
+	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
+	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+		return -EOPNOTSUPP;
+
+	if (!rdev->ops->set_mcast_rate)
+		return -EOPNOTSUPP;
+
+	memset(mcast_rate, 0, sizeof(mcast_rate));
+
+	if (!info->attrs[NL80211_ATTR_MCAST_RATE])
+		return -EINVAL;
+
+	nla_rate = nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]);
+	if (!nl80211_parse_mcast_rate(rdev, mcast_rate, nla_rate))
+		return -EINVAL;
+
+	err = rdev->ops->set_mcast_rate(&rdev->wiphy, dev, mcast_rate);
+
+	return err;
+}
+
+
 #ifdef CONFIG_NL80211_TESTMODE
 static struct genl_multicast_group nl80211_testmode_mcgrp = {
 	.name = "testmode",
@@ -5384,7 +5610,7 @@
 	err = -EOPNOTSUPP;
 	if (rdev->ops->testmode_cmd) {
 		rdev->testmode_info = info;
-		err = rdev->ops->testmode_cmd(&rdev->wiphy,
+		err = rdev_testmode_cmd(rdev,
 				nla_data(info->attrs[NL80211_ATTR_TESTDATA]),
 				nla_len(info->attrs[NL80211_ATTR_TESTDATA]));
 		rdev->testmode_info = NULL;
@@ -5466,8 +5692,7 @@
 			genlmsg_cancel(skb, hdr);
 			break;
 		}
-		err = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb,
-					       data, data_len);
+		err = rdev_testmode_dump(rdev, skb, cb, data, data_len);
 		nla_nest_end(skb, tmdata);
 
 		if (err == -ENOBUFS || err == -ENOENT) {
@@ -5596,7 +5821,8 @@
 	if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
 		connect.auth_type =
 			nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
-		if (!nl80211_valid_auth_type(connect.auth_type))
+		if (!nl80211_valid_auth_type(rdev, connect.auth_type,
+					     NL80211_CMD_CONNECT))
 			return -EINVAL;
 	} else
 		connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
@@ -5642,7 +5868,7 @@
 
 	if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) {
 		connkeys = nl80211_parse_connkeys(rdev,
-					info->attrs[NL80211_ATTR_KEYS]);
+					  info->attrs[NL80211_ATTR_KEYS], NULL);
 		if (IS_ERR(connkeys))
 			return PTR_ERR(connkeys);
 	}
@@ -5771,7 +5997,7 @@
 	if (!rdev->ops->flush_pmksa)
 		return -EOPNOTSUPP;
 
-	return rdev->ops->flush_pmksa(&rdev->wiphy, dev);
+	return rdev_flush_pmksa(rdev, dev);
 }
 
 static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info)
@@ -5798,10 +6024,10 @@
 	status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
 	dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]);
 
-	return rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
-				    dialog_token, status_code,
-				    nla_data(info->attrs[NL80211_ATTR_IE]),
-				    nla_len(info->attrs[NL80211_ATTR_IE]));
+	return rdev_tdls_mgmt(rdev, dev, peer, action_code,
+			      dialog_token, status_code,
+			      nla_data(info->attrs[NL80211_ATTR_IE]),
+			      nla_len(info->attrs[NL80211_ATTR_IE]));
 }
 
 static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info)
@@ -5822,7 +6048,7 @@
 	operation = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_OPERATION]);
 	peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
 
-	return rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, operation);
+	return rdev_tdls_oper(rdev, dev, peer, operation);
 }
 
 static int nl80211_remain_on_channel(struct sk_buff *skb,
@@ -5830,12 +6056,11 @@
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct wireless_dev *wdev = info->user_ptr[1];
-	struct ieee80211_channel *chan;
+	struct cfg80211_chan_def chandef;
 	struct sk_buff *msg;
 	void *hdr;
 	u64 cookie;
-	enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
-	u32 freq, duration;
+	u32 duration;
 	int err;
 
 	if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
@@ -5856,14 +6081,9 @@
 	    duration > rdev->wiphy.max_remain_on_channel_duration)
 		return -EINVAL;
 
-	if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
-	    !nl80211_valid_channel_type(info, &channel_type))
-		return -EINVAL;
-
-	freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
-	chan = rdev_freq_to_chan(rdev, freq, channel_type);
-	if (chan == NULL)
-		return -EINVAL;
+	err = nl80211_parse_chandef(rdev, info, &chandef);
+	if (err)
+		return err;
 
 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 	if (!msg)
@@ -5877,8 +6097,8 @@
 		goto free_msg;
 	}
 
-	err = rdev->ops->remain_on_channel(&rdev->wiphy, wdev, chan,
-					   channel_type, duration, &cookie);
+	err = rdev_remain_on_channel(rdev, wdev, chandef.chan,
+				     duration, &cookie);
 
 	if (err)
 		goto free_msg;
@@ -5912,7 +6132,7 @@
 
 	cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
 
-	return rdev->ops->cancel_remain_on_channel(&rdev->wiphy, wdev, cookie);
+	return rdev_cancel_remain_on_channel(rdev, wdev, cookie);
 }
 
 static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
@@ -6055,7 +6275,7 @@
 		}
 	}
 
-	return rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, NULL, &mask);
+	return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
 }
 
 static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
@@ -6097,10 +6317,7 @@
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct wireless_dev *wdev = info->user_ptr[1];
-	struct ieee80211_channel *chan;
-	enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
-	bool channel_type_valid = false;
-	u32 freq;
+	struct cfg80211_chan_def chandef;
 	int err;
 	void *hdr = NULL;
 	u64 cookie;
@@ -6110,8 +6327,7 @@
 
 	dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK];
 
-	if (!info->attrs[NL80211_ATTR_FRAME] ||
-	    !info->attrs[NL80211_ATTR_WIPHY_FREQ])
+	if (!info->attrs[NL80211_ATTR_FRAME])
 		return -EINVAL;
 
 	if (!rdev->ops->mgmt_tx)
@@ -6146,12 +6362,6 @@
 
 	}
 
-	if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
-		if (!nl80211_valid_channel_type(info, &channel_type))
-			return -EINVAL;
-		channel_type_valid = true;
-	}
-
 	offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
 
 	if (offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
@@ -6159,10 +6369,9 @@
 
 	no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
 
-	freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
-	chan = rdev_freq_to_chan(rdev, freq, channel_type);
-	if (chan == NULL)
-		return -EINVAL;
+	err = nl80211_parse_chandef(rdev, info, &chandef);
+	if (err)
+		return err;
 
 	if (!dont_wait_for_ack) {
 		msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -6178,8 +6387,7 @@
 		}
 	}
 
-	err = cfg80211_mlme_mgmt_tx(rdev, wdev, chan, offchan, channel_type,
-				    channel_type_valid, wait,
+	err = cfg80211_mlme_mgmt_tx(rdev, wdev, chandef.chan, offchan, wait,
 				    nla_data(info->attrs[NL80211_ATTR_FRAME]),
 				    nla_len(info->attrs[NL80211_ATTR_FRAME]),
 				    no_cck, dont_wait_for_ack, &cookie);
@@ -6230,7 +6438,7 @@
 
 	cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
 
-	return rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie);
+	return rdev_mgmt_tx_cancel_wait(rdev, wdev, cookie);
 }
 
 static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
@@ -6260,8 +6468,7 @@
 	if (state == wdev->ps)
 		return 0;
 
-	err = rdev->ops->set_power_mgmt(wdev->wiphy, dev, state,
-					wdev->ps_timeout);
+	err = rdev_set_power_mgmt(rdev, dev, state, wdev->ps_timeout);
 	if (!err)
 		wdev->ps = state;
 	return err;
@@ -6341,8 +6548,7 @@
 	    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
 		return -EOPNOTSUPP;
 
-	return rdev->ops->set_cqm_txe_config(wdev->wiphy, dev,
-					     rate, pkts, intvl);
+	return rdev_set_cqm_txe_config(rdev, dev, rate, pkts, intvl);
 }
 
 static int nl80211_set_cqm_rssi(struct genl_info *info,
@@ -6364,8 +6570,7 @@
 	    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
 		return -EOPNOTSUPP;
 
-	return rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev,
-					      threshold, hysteresis);
+	return rdev_set_cqm_rssi_config(rdev, dev, threshold, hysteresis);
 }
 
 static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
@@ -6446,21 +6651,12 @@
 	}
 
 	if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
-		enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
-
-		if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
-		    !nl80211_valid_channel_type(info, &channel_type))
-			return -EINVAL;
-
-		setup.channel = rdev_freq_to_chan(rdev,
-			nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
-			channel_type);
-		if (!setup.channel)
-			return -EINVAL;
-		setup.channel_type = channel_type;
+		err = nl80211_parse_chandef(rdev, info, &setup.chandef);
+		if (err)
+			return err;
 	} else {
 		/* cfg80211_join_mesh() will sort it out */
-		setup.channel = NULL;
+		setup.chandef.chan = NULL;
 	}
 
 	return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
@@ -6690,7 +6886,7 @@
 
  set_wakeup:
 	if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan)
-		rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan);
+		rdev_set_wakeup(rdev, rdev->wowlan);
 
 	return 0;
  error:
@@ -6746,7 +6942,7 @@
 		goto out;
 	}
 
-	err = rdev->ops->set_rekey_data(&rdev->wiphy, dev, &rekey_data);
+	err = rdev_set_rekey_data(rdev, dev, &rekey_data);
  out:
 	wdev_unlock(wdev);
 	return err;
@@ -6805,7 +7001,7 @@
 
 	addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
 
-	err = rdev->ops->probe_client(&rdev->wiphy, dev, addr, &cookie);
+	err = rdev_probe_client(rdev, dev, addr, &cookie);
 	if (err)
 		goto free_msg;
 
@@ -6826,16 +7022,35 @@
 static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct cfg80211_beacon_registration *reg, *nreg;
+	int rv;
 
 	if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS))
 		return -EOPNOTSUPP;
 
-	if (rdev->ap_beacons_nlportid)
-		return -EBUSY;
+	nreg = kzalloc(sizeof(*nreg), GFP_KERNEL);
+	if (!nreg)
+		return -ENOMEM;
 
-	rdev->ap_beacons_nlportid = info->snd_portid;
+	/* First, check if already registered. */
+	spin_lock_bh(&rdev->beacon_registrations_lock);
+	list_for_each_entry(reg, &rdev->beacon_registrations, list) {
+		if (reg->nlportid == info->snd_portid) {
+			rv = -EALREADY;
+			goto out_err;
+		}
+	}
+	/* Add it to the list */
+	nreg->nlportid = info->snd_portid;
+	list_add(&nreg->list, &rdev->beacon_registrations);
+
+	spin_unlock_bh(&rdev->beacon_registrations_lock);
 
 	return 0;
+out_err:
+	spin_unlock_bh(&rdev->beacon_registrations_lock);
+	kfree(nreg);
+	return rv;
 }
 
 static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
@@ -6859,7 +7074,7 @@
 	if (err)
 		return err;
 
-	err = rdev->ops->start_p2p_device(&rdev->wiphy, wdev);
+	err = rdev_start_p2p_device(rdev, wdev);
 	if (err)
 		return err;
 
@@ -6885,7 +7100,7 @@
 	if (!wdev->p2p_started)
 		return 0;
 
-	rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+	rdev_stop_p2p_device(rdev, wdev);
 	wdev->p2p_started = false;
 
 	mutex_lock(&rdev->devlist_mtx);
@@ -7552,6 +7767,14 @@
 		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
+	{
+		.cmd = NL80211_CMD_SET_MCAST_RATE,
+		.doit = nl80211_set_mcast_rate,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+				  NL80211_FLAG_NEED_RTNL,
+	},
 };
 
 static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -7622,6 +7845,9 @@
 	    nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
 		goto nla_put_failure;
 
+	if (req->flags)
+		nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags);
+
 	return 0;
  nla_put_failure:
 	return -ENOBUFS;
@@ -8250,7 +8476,6 @@
 	int cmd, struct cfg80211_registered_device *rdev,
 	struct wireless_dev *wdev, u64 cookie,
 	struct ieee80211_channel *chan,
-	enum nl80211_channel_type channel_type,
 	unsigned int duration, gfp_t gfp)
 {
 	struct sk_buff *msg;
@@ -8271,7 +8496,8 @@
 					 wdev->netdev->ifindex)) ||
 	    nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
 	    nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
-	    nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
+			NL80211_CHAN_NO_HT) ||
 	    nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
 		goto nla_put_failure;
 
@@ -8293,23 +8519,20 @@
 void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
 				    struct wireless_dev *wdev, u64 cookie,
 				    struct ieee80211_channel *chan,
-				    enum nl80211_channel_type channel_type,
 				    unsigned int duration, gfp_t gfp)
 {
 	nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
 					  rdev, wdev, cookie, chan,
-					  channel_type, duration, gfp);
+					  duration, gfp);
 }
 
 void nl80211_send_remain_on_channel_cancel(
 	struct cfg80211_registered_device *rdev,
 	struct wireless_dev *wdev,
-	u64 cookie, struct ieee80211_channel *chan,
-	enum nl80211_channel_type channel_type, gfp_t gfp)
+	u64 cookie, struct ieee80211_channel *chan, gfp_t gfp)
 {
 	nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
-					  rdev, wdev, cookie, chan,
-					  channel_type, 0, gfp);
+					  rdev, wdev, cookie, chan, 0, gfp);
 }
 
 void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
@@ -8665,8 +8888,8 @@
 }
 
 void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
-			      struct net_device *netdev, int freq,
-			      enum nl80211_channel_type type, gfp_t gfp)
+			      struct net_device *netdev,
+			      struct cfg80211_chan_def *chandef, gfp_t gfp)
 {
 	struct sk_buff *msg;
 	void *hdr;
@@ -8681,9 +8904,10 @@
 		return;
 	}
 
-	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
-	    nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
-	    nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, type))
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+		goto nla_put_failure;
+
+	if (nl80211_send_chandef(msg, chandef))
 		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
@@ -8800,7 +9024,10 @@
 	void *hdr;
 	int err;
 
+	trace_cfg80211_probe_status(dev, addr, cookie, acked);
+
 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+
 	if (!msg)
 		return;
 
@@ -8835,44 +9062,96 @@
 
 void cfg80211_report_obss_beacon(struct wiphy *wiphy,
 				 const u8 *frame, size_t len,
-				 int freq, int sig_dbm, gfp_t gfp)
+				 int freq, int sig_dbm)
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 	struct sk_buff *msg;
 	void *hdr;
-	u32 nlportid = ACCESS_ONCE(rdev->ap_beacons_nlportid);
+	struct cfg80211_beacon_registration *reg;
 
-	if (!nlportid)
-		return;
+	trace_cfg80211_report_obss_beacon(wiphy, frame, len, freq, sig_dbm);
 
-	msg = nlmsg_new(len + 100, gfp);
+	spin_lock_bh(&rdev->beacon_registrations_lock);
+	list_for_each_entry(reg, &rdev->beacon_registrations, list) {
+		msg = nlmsg_new(len + 100, GFP_ATOMIC);
+		if (!msg) {
+			spin_unlock_bh(&rdev->beacon_registrations_lock);
+			return;
+		}
+
+		hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME);
+		if (!hdr)
+			goto nla_put_failure;
+
+		if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+		    (freq &&
+		     nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) ||
+		    (sig_dbm &&
+		     nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
+		    nla_put(msg, NL80211_ATTR_FRAME, len, frame))
+			goto nla_put_failure;
+
+		genlmsg_end(msg, hdr);
+
+		genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, reg->nlportid);
+	}
+	spin_unlock_bh(&rdev->beacon_registrations_lock);
+	return;
+
+ nla_put_failure:
+	spin_unlock_bh(&rdev->beacon_registrations_lock);
+	if (hdr)
+		genlmsg_cancel(msg, hdr);
+	nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_report_obss_beacon);
+
+void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
+				enum nl80211_tdls_operation oper,
+				u16 reason_code, gfp_t gfp)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+	struct sk_buff *msg;
+	void *hdr;
+	int err;
+
+	trace_cfg80211_tdls_oper_request(wdev->wiphy, dev, peer, oper,
+					 reason_code);
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
 	if (!msg)
 		return;
 
-	hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME);
+	hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_TDLS_OPER);
 	if (!hdr) {
 		nlmsg_free(msg);
 		return;
 	}
 
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
-	    (freq &&
-	     nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) ||
-	    (sig_dbm &&
-	     nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
-	    nla_put(msg, NL80211_ATTR_FRAME, len, frame))
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put_u8(msg, NL80211_ATTR_TDLS_OPERATION, oper) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer) ||
+	    (reason_code > 0 &&
+	     nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason_code)))
 		goto nla_put_failure;
 
-	genlmsg_end(msg, hdr);
+	err = genlmsg_end(msg, hdr);
+	if (err < 0) {
+		nlmsg_free(msg);
+		return;
+	}
 
-	genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
+	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+				nl80211_mlme_mcgrp.id, gfp);
 	return;
 
  nla_put_failure:
 	genlmsg_cancel(msg, hdr);
 	nlmsg_free(msg);
 }
-EXPORT_SYMBOL(cfg80211_report_obss_beacon);
+EXPORT_SYMBOL(cfg80211_tdls_oper_request);
 
 static int nl80211_netlink_notify(struct notifier_block * nb,
 				  unsigned long state,
@@ -8881,6 +9160,7 @@
 	struct netlink_notify *notify = _notify;
 	struct cfg80211_registered_device *rdev;
 	struct wireless_dev *wdev;
+	struct cfg80211_beacon_registration *reg, *tmp;
 
 	if (state != NETLINK_URELEASE)
 		return NOTIFY_DONE;
@@ -8890,8 +9170,17 @@
 	list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
 		list_for_each_entry_rcu(wdev, &rdev->wdev_list, list)
 			cfg80211_mlme_unregister_socket(wdev, notify->portid);
-		if (rdev->ap_beacons_nlportid == notify->portid)
-			rdev->ap_beacons_nlportid = 0;
+
+		spin_lock_bh(&rdev->beacon_registrations_lock);
+		list_for_each_entry_safe(reg, tmp, &rdev->beacon_registrations,
+					 list) {
+			if (reg->nlportid == notify->portid) {
+				list_del(&reg->list);
+				kfree(reg);
+				break;
+			}
+		}
+		spin_unlock_bh(&rdev->beacon_registrations_lock);
 	}
 
 	rcu_read_unlock();
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index f615351..2acba84 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -76,13 +76,11 @@
 void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
 				    struct wireless_dev *wdev, u64 cookie,
 				    struct ieee80211_channel *chan,
-				    enum nl80211_channel_type channel_type,
 				    unsigned int duration, gfp_t gfp);
 void nl80211_send_remain_on_channel_cancel(
 	struct cfg80211_registered_device *rdev,
 	struct wireless_dev *wdev,
-	u64 cookie, struct ieee80211_channel *chan,
-	enum nl80211_channel_type channel_type, gfp_t gfp);
+	u64 cookie, struct ieee80211_channel *chan, gfp_t gfp);
 
 void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
 			    struct net_device *dev, const u8 *mac_addr,
@@ -129,8 +127,8 @@
 				    const u8 *bssid, bool preauth, gfp_t gfp);
 
 void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
-			      struct net_device *dev, int freq,
-			      enum nl80211_channel_type type, gfp_t gfp);
+			      struct net_device *dev,
+			      struct cfg80211_chan_def *chandef, gfp_t gfp);
 
 bool nl80211_unexpected_frame(struct net_device *dev,
 			      const u8 *addr, gfp_t gfp);
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
new file mode 100644
index 0000000..6c0c819
--- /dev/null
+++ b/net/wireless/rdev-ops.h
@@ -0,0 +1,878 @@
+#ifndef __CFG80211_RDEV_OPS
+#define __CFG80211_RDEV_OPS
+
+#include <linux/rtnetlink.h>
+#include <net/cfg80211.h>
+#include "core.h"
+#include "trace.h"
+
+static inline int rdev_suspend(struct cfg80211_registered_device *rdev)
+{
+	int ret;
+	trace_rdev_suspend(&rdev->wiphy, rdev->wowlan);
+	ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_resume(struct cfg80211_registered_device *rdev)
+{
+	int ret;
+	trace_rdev_resume(&rdev->wiphy);
+	ret = rdev->ops->resume(&rdev->wiphy);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline void rdev_set_wakeup(struct cfg80211_registered_device *rdev,
+				   bool enabled)
+{
+	trace_rdev_set_wakeup(&rdev->wiphy, enabled);
+	rdev->ops->set_wakeup(&rdev->wiphy, enabled);
+	trace_rdev_return_void(&rdev->wiphy);
+}
+
+static inline struct wireless_dev
+*rdev_add_virtual_intf(struct cfg80211_registered_device *rdev, char *name,
+		       enum nl80211_iftype type, u32 *flags,
+		       struct vif_params *params)
+{
+	struct wireless_dev *ret;
+	trace_rdev_add_virtual_intf(&rdev->wiphy, name, type);
+	ret = rdev->ops->add_virtual_intf(&rdev->wiphy, name, type, flags,
+					  params);
+	trace_rdev_return_wdev(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int
+rdev_del_virtual_intf(struct cfg80211_registered_device *rdev,
+		      struct wireless_dev *wdev)
+{
+	int ret;
+	trace_rdev_del_virtual_intf(&rdev->wiphy, wdev);
+	ret = rdev->ops->del_virtual_intf(&rdev->wiphy, wdev);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int
+rdev_change_virtual_intf(struct cfg80211_registered_device *rdev,
+			 struct net_device *dev, enum nl80211_iftype type,
+			 u32 *flags, struct vif_params *params)
+{
+	int ret;
+	trace_rdev_change_virtual_intf(&rdev->wiphy, dev, type);
+	ret = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, type, flags,
+					     params);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_add_key(struct cfg80211_registered_device *rdev,
+			       struct net_device *netdev, u8 key_index,
+			       bool pairwise, const u8 *mac_addr,
+			       struct key_params *params)
+{
+	int ret;
+	trace_rdev_add_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr);
+	ret = rdev->ops->add_key(&rdev->wiphy, netdev, key_index, pairwise,
+				  mac_addr, params);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int
+rdev_get_key(struct cfg80211_registered_device *rdev, struct net_device *netdev,
+	     u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie,
+	     void (*callback)(void *cookie, struct key_params*))
+{
+	int ret;
+	trace_rdev_get_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr);
+	ret = rdev->ops->get_key(&rdev->wiphy, netdev, key_index, pairwise,
+				  mac_addr, cookie, callback);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_del_key(struct cfg80211_registered_device *rdev,
+			       struct net_device *netdev, u8 key_index,
+			       bool pairwise, const u8 *mac_addr)
+{
+	int ret;
+	trace_rdev_del_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr);
+	ret = rdev->ops->del_key(&rdev->wiphy, netdev, key_index, pairwise,
+				  mac_addr);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int
+rdev_set_default_key(struct cfg80211_registered_device *rdev,
+		     struct net_device *netdev, u8 key_index, bool unicast,
+		     bool multicast)
+{
+	int ret;
+	trace_rdev_set_default_key(&rdev->wiphy, netdev, key_index,
+				   unicast, multicast);
+	ret = rdev->ops->set_default_key(&rdev->wiphy, netdev, key_index,
+					  unicast, multicast);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int
+rdev_set_default_mgmt_key(struct cfg80211_registered_device *rdev,
+			  struct net_device *netdev, u8 key_index)
+{
+	int ret;
+	trace_rdev_set_default_mgmt_key(&rdev->wiphy, netdev, key_index);
+	ret = rdev->ops->set_default_mgmt_key(&rdev->wiphy, netdev,
+					       key_index);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_start_ap(struct cfg80211_registered_device *rdev,
+				struct net_device *dev,
+				struct cfg80211_ap_settings *settings)
+{
+	int ret;
+	trace_rdev_start_ap(&rdev->wiphy, dev, settings);
+	ret = rdev->ops->start_ap(&rdev->wiphy, dev, settings);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_change_beacon(struct cfg80211_registered_device *rdev,
+				     struct net_device *dev,
+				     struct cfg80211_beacon_data *info)
+{
+	int ret;
+	trace_rdev_change_beacon(&rdev->wiphy, dev, info);
+	ret = rdev->ops->change_beacon(&rdev->wiphy, dev, info);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_stop_ap(struct cfg80211_registered_device *rdev,
+			       struct net_device *dev)
+{
+	int ret;
+	trace_rdev_stop_ap(&rdev->wiphy, dev);
+	ret = rdev->ops->stop_ap(&rdev->wiphy, dev);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_add_station(struct cfg80211_registered_device *rdev,
+				   struct net_device *dev, u8 *mac,
+				   struct station_parameters *params)
+{
+	int ret;
+	trace_rdev_add_station(&rdev->wiphy, dev, mac, params);
+	ret = rdev->ops->add_station(&rdev->wiphy, dev, mac, params);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_del_station(struct cfg80211_registered_device *rdev,
+				   struct net_device *dev, u8 *mac)
+{
+	int ret;
+	trace_rdev_del_station(&rdev->wiphy, dev, mac);
+	ret = rdev->ops->del_station(&rdev->wiphy, dev, mac);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_change_station(struct cfg80211_registered_device *rdev,
+				      struct net_device *dev, u8 *mac,
+				      struct station_parameters *params)
+{
+	int ret;
+	trace_rdev_change_station(&rdev->wiphy, dev, mac, params);
+	ret = rdev->ops->change_station(&rdev->wiphy, dev, mac, params);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_get_station(struct cfg80211_registered_device *rdev,
+				   struct net_device *dev, u8 *mac,
+				   struct station_info *sinfo)
+{
+	int ret;
+	trace_rdev_get_station(&rdev->wiphy, dev, mac);
+	ret = rdev->ops->get_station(&rdev->wiphy, dev, mac, sinfo);
+	trace_rdev_return_int_station_info(&rdev->wiphy, ret, sinfo);
+	return ret;
+}
+
+static inline int rdev_dump_station(struct cfg80211_registered_device *rdev,
+				    struct net_device *dev, int idx, u8 *mac,
+				    struct station_info *sinfo)
+{
+	int ret;
+	trace_rdev_dump_station(&rdev->wiphy, dev, idx, mac);
+	ret = rdev->ops->dump_station(&rdev->wiphy, dev, idx, mac, sinfo);
+	trace_rdev_return_int_station_info(&rdev->wiphy, ret, sinfo);
+	return ret;
+}
+
+static inline int rdev_add_mpath(struct cfg80211_registered_device *rdev,
+				 struct net_device *dev, u8 *dst, u8 *next_hop)
+{
+	int ret;
+	trace_rdev_add_mpath(&rdev->wiphy, dev, dst, next_hop);
+	ret = rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_del_mpath(struct cfg80211_registered_device *rdev,
+				 struct net_device *dev, u8 *dst)
+{
+	int ret;
+	trace_rdev_del_mpath(&rdev->wiphy, dev, dst);
+	ret = rdev->ops->del_mpath(&rdev->wiphy, dev, dst);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_change_mpath(struct cfg80211_registered_device *rdev,
+				    struct net_device *dev, u8 *dst,
+				    u8 *next_hop)
+{
+	int ret;
+	trace_rdev_change_mpath(&rdev->wiphy, dev, dst, next_hop);
+	ret = rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_get_mpath(struct cfg80211_registered_device *rdev,
+				 struct net_device *dev, u8 *dst, u8 *next_hop,
+				 struct mpath_info *pinfo)
+{
+	int ret;
+	trace_rdev_get_mpath(&rdev->wiphy, dev, dst, next_hop);
+	ret = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, pinfo);
+	trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo);
+	return ret;
+
+}
+
+static inline int rdev_dump_mpath(struct cfg80211_registered_device *rdev,
+				  struct net_device *dev, int idx, u8 *dst,
+				  u8 *next_hop, struct mpath_info *pinfo)
+
+{
+	int ret;
+	trace_rdev_dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop);
+	ret = rdev->ops->dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop,
+				     pinfo);
+	trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo);
+	return ret;
+}
+
+static inline int
+rdev_get_mesh_config(struct cfg80211_registered_device *rdev,
+		     struct net_device *dev, struct mesh_config *conf)
+{
+	int ret;
+	trace_rdev_get_mesh_config(&rdev->wiphy, dev);
+	ret = rdev->ops->get_mesh_config(&rdev->wiphy, dev, conf);
+	trace_rdev_return_int_mesh_config(&rdev->wiphy, ret, conf);
+	return ret;
+}
+
+static inline int
+rdev_update_mesh_config(struct cfg80211_registered_device *rdev,
+			struct net_device *dev, u32 mask,
+			const struct mesh_config *nconf)
+{
+	int ret;
+	trace_rdev_update_mesh_config(&rdev->wiphy, dev, mask, nconf);
+	ret = rdev->ops->update_mesh_config(&rdev->wiphy, dev, mask, nconf);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_join_mesh(struct cfg80211_registered_device *rdev,
+				 struct net_device *dev,
+				 const struct mesh_config *conf,
+				 const struct mesh_setup *setup)
+{
+	int ret;
+	trace_rdev_join_mesh(&rdev->wiphy, dev, conf, setup);
+	ret = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+
+static inline int rdev_leave_mesh(struct cfg80211_registered_device *rdev,
+				  struct net_device *dev)
+{
+	int ret;
+	trace_rdev_leave_mesh(&rdev->wiphy, dev);
+	ret = rdev->ops->leave_mesh(&rdev->wiphy, dev);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_change_bss(struct cfg80211_registered_device *rdev,
+				  struct net_device *dev,
+				  struct bss_parameters *params)
+
+{
+	int ret;
+	trace_rdev_change_bss(&rdev->wiphy, dev, params);
+	ret = rdev->ops->change_bss(&rdev->wiphy, dev, params);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_set_txq_params(struct cfg80211_registered_device *rdev,
+				      struct net_device *dev,
+				      struct ieee80211_txq_params *params)
+
+{
+	int ret;
+	trace_rdev_set_txq_params(&rdev->wiphy, dev, params);
+	ret = rdev->ops->set_txq_params(&rdev->wiphy, dev, params);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int
+rdev_libertas_set_mesh_channel(struct cfg80211_registered_device *rdev,
+			       struct net_device *dev,
+			       struct ieee80211_channel *chan)
+{
+	int ret;
+	trace_rdev_libertas_set_mesh_channel(&rdev->wiphy, dev, chan);
+	ret = rdev->ops->libertas_set_mesh_channel(&rdev->wiphy, dev, chan);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int
+rdev_set_monitor_channel(struct cfg80211_registered_device *rdev,
+			 struct cfg80211_chan_def *chandef)
+{
+	int ret;
+	trace_rdev_set_monitor_channel(&rdev->wiphy, chandef);
+	ret = rdev->ops->set_monitor_channel(&rdev->wiphy, chandef);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_scan(struct cfg80211_registered_device *rdev,
+			    struct cfg80211_scan_request *request)
+{
+	int ret;
+	trace_rdev_scan(&rdev->wiphy, request);
+	ret = rdev->ops->scan(&rdev->wiphy, request);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_auth(struct cfg80211_registered_device *rdev,
+			    struct net_device *dev,
+			    struct cfg80211_auth_request *req)
+{
+	int ret;
+	trace_rdev_auth(&rdev->wiphy, dev, req);
+	ret = rdev->ops->auth(&rdev->wiphy, dev, req);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_assoc(struct cfg80211_registered_device *rdev,
+			     struct net_device *dev,
+			     struct cfg80211_assoc_request *req)
+{
+	int ret;
+	trace_rdev_assoc(&rdev->wiphy, dev, req);
+	ret = rdev->ops->assoc(&rdev->wiphy, dev, req);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_deauth(struct cfg80211_registered_device *rdev,
+			      struct net_device *dev,
+			      struct cfg80211_deauth_request *req)
+{
+	int ret;
+	trace_rdev_deauth(&rdev->wiphy, dev, req);
+	ret = rdev->ops->deauth(&rdev->wiphy, dev, req);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_disassoc(struct cfg80211_registered_device *rdev,
+				struct net_device *dev,
+				struct cfg80211_disassoc_request *req)
+{
+	int ret;
+	trace_rdev_disassoc(&rdev->wiphy, dev, req);
+	ret = rdev->ops->disassoc(&rdev->wiphy, dev, req);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_connect(struct cfg80211_registered_device *rdev,
+			       struct net_device *dev,
+			       struct cfg80211_connect_params *sme)
+{
+	int ret;
+	trace_rdev_connect(&rdev->wiphy, dev, sme);
+	ret = rdev->ops->connect(&rdev->wiphy, dev, sme);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_disconnect(struct cfg80211_registered_device *rdev,
+				  struct net_device *dev, u16 reason_code)
+{
+	int ret;
+	trace_rdev_disconnect(&rdev->wiphy, dev, reason_code);
+	ret = rdev->ops->disconnect(&rdev->wiphy, dev, reason_code);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_join_ibss(struct cfg80211_registered_device *rdev,
+				 struct net_device *dev,
+				 struct cfg80211_ibss_params *params)
+{
+	int ret;
+	trace_rdev_join_ibss(&rdev->wiphy, dev, params);
+	ret = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_leave_ibss(struct cfg80211_registered_device *rdev,
+				  struct net_device *dev)
+{
+	int ret;
+	trace_rdev_leave_ibss(&rdev->wiphy, dev);
+	ret = rdev->ops->leave_ibss(&rdev->wiphy, dev);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int
+rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed)
+{
+	int ret;
+	trace_rdev_set_wiphy_params(&rdev->wiphy, changed);
+	ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_set_tx_power(struct cfg80211_registered_device *rdev,
+				    struct wireless_dev *wdev,
+				    enum nl80211_tx_power_setting type, int mbm)
+{
+	int ret;
+	trace_rdev_set_tx_power(&rdev->wiphy, wdev, type, mbm);
+	ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, type, mbm);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev,
+				    struct wireless_dev *wdev, int *dbm)
+{
+	int ret;
+	trace_rdev_get_tx_power(&rdev->wiphy, wdev);
+	ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, dbm);
+	trace_rdev_return_int_int(&rdev->wiphy, ret, *dbm);
+	return ret;
+}
+
+static inline int rdev_set_wds_peer(struct cfg80211_registered_device *rdev,
+				    struct net_device *dev, const u8 *addr)
+{
+	int ret;
+	trace_rdev_set_wds_peer(&rdev->wiphy, dev, addr);
+	ret = rdev->ops->set_wds_peer(&rdev->wiphy, dev, addr);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline void rdev_rfkill_poll(struct cfg80211_registered_device *rdev)
+{
+	trace_rdev_rfkill_poll(&rdev->wiphy);
+	rdev->ops->rfkill_poll(&rdev->wiphy);
+	trace_rdev_return_void(&rdev->wiphy);
+}
+
+
+#ifdef CONFIG_NL80211_TESTMODE
+static inline int rdev_testmode_cmd(struct cfg80211_registered_device *rdev,
+				    void *data, int len)
+{
+	int ret;
+	trace_rdev_testmode_cmd(&rdev->wiphy);
+	ret = rdev->ops->testmode_cmd(&rdev->wiphy, data, len);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_testmode_dump(struct cfg80211_registered_device *rdev,
+				     struct sk_buff *skb,
+				     struct netlink_callback *cb, void *data,
+				     int len)
+{
+	int ret;
+	trace_rdev_testmode_dump(&rdev->wiphy);
+	ret = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb, data, len);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+#endif
+
+static inline int
+rdev_set_bitrate_mask(struct cfg80211_registered_device *rdev,
+		      struct net_device *dev, const u8 *peer,
+		      const struct cfg80211_bitrate_mask *mask)
+{
+	int ret;
+	trace_rdev_set_bitrate_mask(&rdev->wiphy, dev, peer, mask);
+	ret = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, peer, mask);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_dump_survey(struct cfg80211_registered_device *rdev,
+				   struct net_device *netdev, int idx,
+				   struct survey_info *info)
+{
+	int ret;
+	trace_rdev_dump_survey(&rdev->wiphy, netdev, idx);
+	ret = rdev->ops->dump_survey(&rdev->wiphy, netdev, idx, info);
+	if (ret < 0)
+		trace_rdev_return_int(&rdev->wiphy, ret);
+	else
+		trace_rdev_return_int_survey_info(&rdev->wiphy, ret, info);
+	return ret;
+}
+
+static inline int rdev_set_pmksa(struct cfg80211_registered_device *rdev,
+				 struct net_device *netdev,
+				 struct cfg80211_pmksa *pmksa)
+{
+	int ret;
+	trace_rdev_set_pmksa(&rdev->wiphy, netdev, pmksa);
+	ret = rdev->ops->set_pmksa(&rdev->wiphy, netdev, pmksa);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_del_pmksa(struct cfg80211_registered_device *rdev,
+				 struct net_device *netdev,
+				 struct cfg80211_pmksa *pmksa)
+{
+	int ret;
+	trace_rdev_del_pmksa(&rdev->wiphy, netdev, pmksa);
+	ret = rdev->ops->del_pmksa(&rdev->wiphy, netdev, pmksa);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_flush_pmksa(struct cfg80211_registered_device *rdev,
+				   struct net_device *netdev)
+{
+	int ret;
+	trace_rdev_flush_pmksa(&rdev->wiphy, netdev);
+	ret = rdev->ops->flush_pmksa(&rdev->wiphy, netdev);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int
+rdev_remain_on_channel(struct cfg80211_registered_device *rdev,
+		       struct wireless_dev *wdev,
+		       struct ieee80211_channel *chan,
+		       unsigned int duration, u64 *cookie)
+{
+	int ret;
+	trace_rdev_remain_on_channel(&rdev->wiphy, wdev, chan, duration);
+	ret = rdev->ops->remain_on_channel(&rdev->wiphy, wdev, chan,
+					   duration, cookie);
+	trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie);
+	return ret;
+}
+
+static inline int
+rdev_cancel_remain_on_channel(struct cfg80211_registered_device *rdev,
+			      struct wireless_dev *wdev, u64 cookie)
+{
+	int ret;
+	trace_rdev_cancel_remain_on_channel(&rdev->wiphy, wdev, cookie);
+	ret = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, wdev, cookie);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_mgmt_tx(struct cfg80211_registered_device *rdev,
+			       struct wireless_dev *wdev,
+			       struct ieee80211_channel *chan, bool offchan,
+			       unsigned int wait, const u8 *buf, size_t len,
+			       bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+{
+	int ret;
+	trace_rdev_mgmt_tx(&rdev->wiphy, wdev, chan, offchan,
+			   wait, no_cck, dont_wait_for_ack);
+	ret = rdev->ops->mgmt_tx(&rdev->wiphy, wdev, chan, offchan,
+				  wait, buf, len, no_cck,
+				  dont_wait_for_ack, cookie);
+	trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie);
+	return ret;
+}
+
+static inline int
+rdev_mgmt_tx_cancel_wait(struct cfg80211_registered_device *rdev,
+			 struct wireless_dev *wdev, u64 cookie)
+{
+	int ret;
+	trace_rdev_mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie);
+	ret = rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_set_power_mgmt(struct cfg80211_registered_device *rdev,
+				      struct net_device *dev, bool enabled,
+				      int timeout)
+{
+	int ret;
+	trace_rdev_set_power_mgmt(&rdev->wiphy, dev, enabled, timeout);
+	ret = rdev->ops->set_power_mgmt(&rdev->wiphy, dev, enabled, timeout);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int
+rdev_set_cqm_rssi_config(struct cfg80211_registered_device *rdev,
+			 struct net_device *dev, s32 rssi_thold, u32 rssi_hyst)
+{
+	int ret;
+	trace_rdev_set_cqm_rssi_config(&rdev->wiphy, dev, rssi_thold,
+				       rssi_hyst);
+	ret = rdev->ops->set_cqm_rssi_config(&rdev->wiphy, dev, rssi_thold,
+				       rssi_hyst);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int
+rdev_set_cqm_txe_config(struct cfg80211_registered_device *rdev,
+			struct net_device *dev, u32 rate, u32 pkts, u32 intvl)
+{
+	int ret;
+	trace_rdev_set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts, intvl);
+	ret = rdev->ops->set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts,
+					     intvl);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline void
+rdev_mgmt_frame_register(struct cfg80211_registered_device *rdev,
+			 struct wireless_dev *wdev, u16 frame_type, bool reg)
+{
+	trace_rdev_mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
+	rdev->ops->mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
+	trace_rdev_return_void(&rdev->wiphy);
+}
+
+static inline int rdev_set_antenna(struct cfg80211_registered_device *rdev,
+				   u32 tx_ant, u32 rx_ant)
+{
+	int ret;
+	trace_rdev_set_antenna(&rdev->wiphy, tx_ant, rx_ant);
+	ret = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_get_antenna(struct cfg80211_registered_device *rdev,
+				   u32 *tx_ant, u32 *rx_ant)
+{
+	int ret;
+	trace_rdev_get_antenna(&rdev->wiphy);
+	ret = rdev->ops->get_antenna(&rdev->wiphy, tx_ant, rx_ant);
+	if (ret)
+		trace_rdev_return_int(&rdev->wiphy, ret);
+	else
+		trace_rdev_return_int_tx_rx(&rdev->wiphy, ret, *tx_ant,
+					    *rx_ant);
+	return ret;
+}
+
+static inline int rdev_set_ringparam(struct cfg80211_registered_device *rdev,
+				     u32 tx, u32 rx)
+{
+	int ret;
+	trace_rdev_set_ringparam(&rdev->wiphy, tx, rx);
+	ret = rdev->ops->set_ringparam(&rdev->wiphy, tx, rx);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline void rdev_get_ringparam(struct cfg80211_registered_device *rdev,
+				      u32 *tx, u32 *tx_max, u32 *rx,
+				      u32 *rx_max)
+{
+	trace_rdev_get_ringparam(&rdev->wiphy);
+	rdev->ops->get_ringparam(&rdev->wiphy, tx, tx_max, rx, rx_max);
+	trace_rdev_return_void_tx_rx(&rdev->wiphy, *tx, *tx_max, *rx, *rx_max);
+}
+
+static inline int
+rdev_sched_scan_start(struct cfg80211_registered_device *rdev,
+		      struct net_device *dev,
+		      struct cfg80211_sched_scan_request *request)
+{
+	int ret;
+	trace_rdev_sched_scan_start(&rdev->wiphy, dev, request);
+	ret = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_sched_scan_stop(struct cfg80211_registered_device *rdev,
+				       struct net_device *dev)
+{
+	int ret;
+	trace_rdev_sched_scan_stop(&rdev->wiphy, dev);
+	ret = rdev->ops->sched_scan_stop(&rdev->wiphy, dev);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_set_rekey_data(struct cfg80211_registered_device *rdev,
+				      struct net_device *dev,
+				      struct cfg80211_gtk_rekey_data *data)
+{
+	int ret;
+	trace_rdev_set_rekey_data(&rdev->wiphy, dev);
+	ret = rdev->ops->set_rekey_data(&rdev->wiphy, dev, data);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_tdls_mgmt(struct cfg80211_registered_device *rdev,
+				 struct net_device *dev, u8 *peer,
+				 u8 action_code, u8 dialog_token,
+				 u16 status_code, const u8 *buf, size_t len)
+{
+	int ret;
+	trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
+			     dialog_token, status_code, buf, len);
+	ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
+				   dialog_token, status_code, buf, len);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_tdls_oper(struct cfg80211_registered_device *rdev,
+				 struct net_device *dev, u8 *peer,
+				 enum nl80211_tdls_operation oper)
+{
+	int ret;
+	trace_rdev_tdls_oper(&rdev->wiphy, dev, peer, oper);
+	ret = rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, oper);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int rdev_probe_client(struct cfg80211_registered_device *rdev,
+				    struct net_device *dev, const u8 *peer,
+				    u64 *cookie)
+{
+	int ret;
+	trace_rdev_probe_client(&rdev->wiphy, dev, peer);
+	ret = rdev->ops->probe_client(&rdev->wiphy, dev, peer, cookie);
+	trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie);
+	return ret;
+}
+
+static inline int rdev_set_noack_map(struct cfg80211_registered_device *rdev,
+				     struct net_device *dev, u16 noack_map)
+{
+	int ret;
+	trace_rdev_set_noack_map(&rdev->wiphy, dev, noack_map);
+	ret = rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline int
+rdev_get_et_sset_count(struct cfg80211_registered_device *rdev,
+		       struct net_device *dev, int sset)
+{
+	int ret;
+	trace_rdev_get_et_sset_count(&rdev->wiphy, dev, sset);
+	ret = rdev->ops->get_et_sset_count(&rdev->wiphy, dev, sset);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline void rdev_get_et_stats(struct cfg80211_registered_device *rdev,
+				     struct net_device *dev,
+				     struct ethtool_stats *stats, u64 *data)
+{
+	trace_rdev_get_et_stats(&rdev->wiphy, dev);
+	rdev->ops->get_et_stats(&rdev->wiphy, dev, stats, data);
+	trace_rdev_return_void(&rdev->wiphy);
+}
+
+static inline void rdev_get_et_strings(struct cfg80211_registered_device *rdev,
+				       struct net_device *dev, u32 sset,
+				       u8 *data)
+{
+	trace_rdev_get_et_strings(&rdev->wiphy, dev, sset);
+	rdev->ops->get_et_strings(&rdev->wiphy, dev, sset, data);
+	trace_rdev_return_void(&rdev->wiphy);
+}
+
+static inline int
+rdev_get_channel(struct cfg80211_registered_device *rdev,
+		 struct wireless_dev *wdev,
+		 struct cfg80211_chan_def *chandef)
+{
+	int ret;
+
+	trace_rdev_get_channel(&rdev->wiphy, wdev);
+	ret = rdev->ops->get_channel(&rdev->wiphy, wdev, chandef);
+	trace_rdev_return_chandef(&rdev->wiphy, ret, chandef);
+
+	return ret;
+}
+
+static inline int rdev_start_p2p_device(struct cfg80211_registered_device *rdev,
+					struct wireless_dev *wdev)
+{
+	int ret;
+
+	trace_rdev_start_p2p_device(&rdev->wiphy, wdev);
+	ret = rdev->ops->start_p2p_device(&rdev->wiphy, wdev);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
+static inline void rdev_stop_p2p_device(struct cfg80211_registered_device *rdev,
+					struct wireless_dev *wdev)
+{
+	trace_rdev_stop_p2p_device(&rdev->wiphy, wdev);
+	rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+	trace_rdev_return_void(&rdev->wiphy);
+}					
+#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 9730c98..9596015 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -17,9 +17,58 @@
 #include "core.h"
 #include "nl80211.h"
 #include "wext-compat.h"
+#include "rdev-ops.h"
 
 #define IEEE80211_SCAN_RESULT_EXPIRE	(30 * HZ)
 
+static void bss_release(struct kref *ref)
+{
+	struct cfg80211_internal_bss *bss;
+
+	bss = container_of(ref, struct cfg80211_internal_bss, ref);
+	if (bss->pub.free_priv)
+		bss->pub.free_priv(&bss->pub);
+
+	if (bss->beacon_ies_allocated)
+		kfree(bss->pub.beacon_ies);
+	if (bss->proberesp_ies_allocated)
+		kfree(bss->pub.proberesp_ies);
+
+	BUG_ON(atomic_read(&bss->hold));
+
+	kfree(bss);
+}
+
+/* must hold dev->bss_lock! */
+static void __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
+				  struct cfg80211_internal_bss *bss)
+{
+	list_del_init(&bss->list);
+	rb_erase(&bss->rbn, &dev->bss_tree);
+	kref_put(&bss->ref, bss_release);
+}
+
+/* must hold dev->bss_lock! */
+static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev,
+				  unsigned long expire_time)
+{
+	struct cfg80211_internal_bss *bss, *tmp;
+	bool expired = false;
+
+	list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
+		if (atomic_read(&bss->hold))
+			continue;
+		if (!time_after(expire_time, bss->ts))
+			continue;
+
+		__cfg80211_unlink_bss(dev, bss);
+		expired = true;
+	}
+
+	if (expired)
+		dev->bss_generation++;
+}
+
 void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
 {
 	struct cfg80211_scan_request *request;
@@ -45,10 +94,17 @@
 	if (wdev->netdev)
 		cfg80211_sme_scan_done(wdev->netdev);
 
-	if (request->aborted)
+	if (request->aborted) {
 		nl80211_send_scan_aborted(rdev, wdev);
-	else
+	} else {
+		if (request->flags & NL80211_SCAN_FLAG_FLUSH) {
+			/* flush entries from previous scans */
+			spin_lock_bh(&rdev->bss_lock);
+			__cfg80211_bss_expire(rdev, request->scan_start);
+			spin_unlock_bh(&rdev->bss_lock);
+		}
 		nl80211_send_scan_done(rdev, wdev);
+	}
 
 #ifdef CONFIG_CFG80211_WEXT
 	if (wdev->netdev && !request->aborted) {
@@ -89,6 +145,7 @@
 
 void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
 {
+	trace_cfg80211_scan_done(request, aborted);
 	WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req);
 
 	request->aborted = aborted;
@@ -99,22 +156,34 @@
 void __cfg80211_sched_scan_results(struct work_struct *wk)
 {
 	struct cfg80211_registered_device *rdev;
+	struct cfg80211_sched_scan_request *request;
 
 	rdev = container_of(wk, struct cfg80211_registered_device,
 			    sched_scan_results_wk);
 
+	request = rdev->sched_scan_req;
+
 	mutex_lock(&rdev->sched_scan_mtx);
 
 	/* we don't have sched_scan_req anymore if the scan is stopping */
-	if (rdev->sched_scan_req)
-		nl80211_send_sched_scan_results(rdev,
-						rdev->sched_scan_req->dev);
+	if (request) {
+		if (request->flags & NL80211_SCAN_FLAG_FLUSH) {
+			/* flush entries from previous scans */
+			spin_lock_bh(&rdev->bss_lock);
+			__cfg80211_bss_expire(rdev, request->scan_start);
+			spin_unlock_bh(&rdev->bss_lock);
+			request->scan_start =
+				jiffies + msecs_to_jiffies(request->interval);
+		}
+		nl80211_send_sched_scan_results(rdev, request->dev);
+	}
 
 	mutex_unlock(&rdev->sched_scan_mtx);
 }
 
 void cfg80211_sched_scan_results(struct wiphy *wiphy)
 {
+	trace_cfg80211_sched_scan_results(wiphy);
 	/* ignore if we're not scanning */
 	if (wiphy_to_dev(wiphy)->sched_scan_req)
 		queue_work(cfg80211_wq,
@@ -126,6 +195,8 @@
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+	trace_cfg80211_sched_scan_stopped(wiphy);
+
 	mutex_lock(&rdev->sched_scan_mtx);
 	__cfg80211_stop_sched_scan(rdev, true);
 	mutex_unlock(&rdev->sched_scan_mtx);
@@ -145,7 +216,7 @@
 	dev = rdev->sched_scan_req->dev;
 
 	if (!driver_initiated) {
-		int err = rdev->ops->sched_scan_stop(&rdev->wiphy, dev);
+		int err = rdev_sched_scan_stop(rdev, dev);
 		if (err)
 			return err;
 	}
@@ -158,24 +229,6 @@
 	return 0;
 }
 
-static void bss_release(struct kref *ref)
-{
-	struct cfg80211_internal_bss *bss;
-
-	bss = container_of(ref, struct cfg80211_internal_bss, ref);
-	if (bss->pub.free_priv)
-		bss->pub.free_priv(&bss->pub);
-
-	if (bss->beacon_ies_allocated)
-		kfree(bss->pub.beacon_ies);
-	if (bss->proberesp_ies_allocated)
-		kfree(bss->pub.proberesp_ies);
-
-	BUG_ON(atomic_read(&bss->hold));
-
-	kfree(bss);
-}
-
 /* must hold dev->bss_lock! */
 void cfg80211_bss_age(struct cfg80211_registered_device *dev,
                       unsigned long age_secs)
@@ -188,32 +241,9 @@
 	}
 }
 
-/* must hold dev->bss_lock! */
-static void __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
-				  struct cfg80211_internal_bss *bss)
-{
-	list_del_init(&bss->list);
-	rb_erase(&bss->rbn, &dev->bss_tree);
-	kref_put(&bss->ref, bss_release);
-}
-
-/* must hold dev->bss_lock! */
 void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
 {
-	struct cfg80211_internal_bss *bss, *tmp;
-	bool expired = false;
-
-	list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
-		if (atomic_read(&bss->hold))
-			continue;
-		if (!time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE))
-			continue;
-		__cfg80211_unlink_bss(dev, bss);
-		expired = true;
-	}
-
-	if (expired)
-		dev->bss_generation++;
+	__cfg80211_bss_expire(dev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
 }
 
 const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
@@ -459,6 +489,9 @@
 	struct cfg80211_internal_bss *bss, *res = NULL;
 	unsigned long now = jiffies;
 
+	trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, capa_mask,
+			       capa_val);
+
 	spin_lock_bh(&dev->bss_lock);
 
 	list_for_each_entry(bss, &dev->bss_list, list) {
@@ -480,6 +513,7 @@
 	spin_unlock_bh(&dev->bss_lock);
 	if (!res)
 		return NULL;
+	trace_cfg80211_return_bss(&res->pub);
 	return &res->pub;
 }
 EXPORT_SYMBOL(cfg80211_get_bss);
@@ -737,6 +771,38 @@
 	return found;
 }
 
+static struct ieee80211_channel *
+cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
+			 struct ieee80211_channel *channel)
+{
+	const u8 *tmp;
+	u32 freq;
+	int channel_number = -1;
+
+	tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
+	if (tmp && tmp[1] == 1) {
+		channel_number = tmp[2];
+	} else {
+		tmp = cfg80211_find_ie(WLAN_EID_HT_OPERATION, ie, ielen);
+		if (tmp && tmp[1] >= sizeof(struct ieee80211_ht_operation)) {
+			struct ieee80211_ht_operation *htop = (void *)(tmp + 2);
+
+			channel_number = htop->primary_chan;
+		}
+	}
+
+	if (channel_number < 0)
+		return channel;
+
+	freq = ieee80211_channel_to_frequency(channel_number, channel->band);
+	channel = ieee80211_get_channel(wiphy, freq);
+	if (!channel)
+		return NULL;
+	if (channel->flags & IEEE80211_CHAN_DISABLED)
+		return NULL;
+	return channel;
+}
+
 struct cfg80211_bss*
 cfg80211_inform_bss(struct wiphy *wiphy,
 		    struct ieee80211_channel *channel,
@@ -756,6 +822,10 @@
 			(signal < 0 || signal > 100)))
 		return NULL;
 
+	channel = cfg80211_get_bss_channel(wiphy, ie, ielen, channel);
+	if (!channel)
+		return NULL;
+
 	res = kzalloc(sizeof(*res) + privsz + ielen, gfp);
 	if (!res)
 		return NULL;
@@ -792,6 +862,7 @@
 	if (res->pub.capability & WLAN_CAPABILITY_ESS)
 		regulatory_hint_found_beacon(wiphy, channel, gfp);
 
+	trace_cfg80211_return_bss(&res->pub);
 	/* cfg80211_bss_update gives us a referenced result */
 	return &res->pub;
 }
@@ -808,6 +879,11 @@
 				      u.probe_resp.variable);
 	size_t privsz;
 
+	BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
+			offsetof(struct ieee80211_mgmt, u.beacon.variable));
+
+	trace_cfg80211_inform_bss_frame(wiphy, channel, mgmt, len, signal);
+
 	if (WARN_ON(!mgmt))
 		return NULL;
 
@@ -823,6 +899,11 @@
 
 	privsz = wiphy->bss_priv_size;
 
+	channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
+					   ielen, channel);
+	if (!channel)
+		return NULL;
+
 	res = kzalloc(sizeof(*res) + privsz + ielen, gfp);
 	if (!res)
 		return NULL;
@@ -861,6 +942,7 @@
 	if (res->pub.capability & WLAN_CAPABILITY_ESS)
 		regulatory_hint_found_beacon(wiphy, channel, gfp);
 
+	trace_cfg80211_return_bss(&res->pub);
 	/* cfg80211_bss_update gives us a referenced result */
 	return &res->pub;
 }
@@ -962,6 +1044,7 @@
 	creq->ssids = (void *)&creq->channels[n_channels];
 	creq->n_channels = n_channels;
 	creq->n_ssids = 1;
+	creq->scan_start = jiffies;
 
 	/* translate "Scan on frequencies" request */
 	i = 0;
@@ -1026,7 +1109,7 @@
 			creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
 
 	rdev->scan_req = creq;
-	err = rdev->ops->scan(wiphy, creq);
+	err = rdev_scan(rdev, creq);
 	if (err) {
 		rdev->scan_req = NULL;
 		/* creq will be freed below */
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 6f39cb8..c749002 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -16,6 +16,7 @@
 #include <net/rtnetlink.h>
 #include "nl80211.h"
 #include "reg.h"
+#include "rdev-ops.h"
 
 struct cfg80211_conn {
 	struct cfg80211_connect_params params;
@@ -138,10 +139,11 @@
 
 	request->wdev = wdev;
 	request->wiphy = &rdev->wiphy;
+	request->scan_start = jiffies;
 
 	rdev->scan_req = request;
 
-	err = rdev->ops->scan(wdev->wiphy, request);
+	err = rdev_scan(rdev, request);
 	if (!err) {
 		wdev->conn->state = CFG80211_CONN_SCANNING;
 		nl80211_send_scan_start(rdev, wdev);
@@ -179,7 +181,7 @@
 					    params->ssid, params->ssid_len,
 					    NULL, 0,
 					    params->key, params->key_len,
-					    params->key_idx);
+					    params->key_idx, NULL, 0);
 	case CFG80211_CONN_ASSOCIATE_NEXT:
 		BUG_ON(!rdev->ops->assoc);
 		wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -716,7 +718,7 @@
 	 */
 	if (rdev->ops->del_key)
 		for (i = 0; i < 6; i++)
-			rdev->ops->del_key(wdev->wiphy, dev, i, false, NULL);
+			rdev_del_key(rdev, dev, i, false, NULL);
 
 #ifdef CONFIG_CFG80211_WEXT
 	memset(&wrqu, 0, sizeof(wrqu));
@@ -892,7 +894,7 @@
 	} else {
 		wdev->sme_state = CFG80211_SME_CONNECTING;
 		wdev->connect_keys = connkeys;
-		err = rdev->ops->connect(&rdev->wiphy, dev, connect);
+		err = rdev_connect(rdev, dev, connect);
 		if (err) {
 			wdev->connect_keys = NULL;
 			wdev->sme_state = CFG80211_SME_IDLE;
@@ -964,7 +966,7 @@
 		if (err)
 			return err;
 	} else {
-		err = rdev->ops->disconnect(&rdev->wiphy, dev, reason);
+		err = rdev_disconnect(rdev, dev, reason);
 		if (err)
 			return err;
 	}
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index ff57459..9bf6d5e 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -16,6 +16,7 @@
 #include <net/cfg80211.h>
 #include "sysfs.h"
 #include "core.h"
+#include "rdev-ops.h"
 
 static inline struct cfg80211_registered_device *dev_to_rdev(
 	struct device *dev)
@@ -94,7 +95,7 @@
 	if (rdev->ops->suspend) {
 		rtnl_lock();
 		if (rdev->wiphy.registered)
-			ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan);
+			ret = rdev_suspend(rdev);
 		rtnl_unlock();
 	}
 
@@ -114,7 +115,7 @@
 	if (rdev->ops->resume) {
 		rtnl_lock();
 		if (rdev->wiphy.registered)
-			ret = rdev->ops->resume(&rdev->wiphy);
+			ret = rdev_resume(rdev);
 		rtnl_unlock();
 	}
 
diff --git a/net/wireless/trace.c b/net/wireless/trace.c
new file mode 100644
index 0000000..95f997f
--- /dev/null
+++ b/net/wireless/trace.c
@@ -0,0 +1,7 @@
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
new file mode 100644
index 0000000..2134576
--- /dev/null
+++ b/net/wireless/trace.h
@@ -0,0 +1,2324 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cfg80211
+
+#if !defined(__RDEV_OPS_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __RDEV_OPS_TRACE
+
+#include <linux/tracepoint.h>
+
+#include <linux/rtnetlink.h>
+#include <net/cfg80211.h>
+#include "core.h"
+
+#define MAC_ENTRY(entry_mac) __array(u8, entry_mac, ETH_ALEN)
+#define MAC_ASSIGN(entry_mac, given_mac) do {			     \
+	if (given_mac)						     \
+		memcpy(__entry->entry_mac, given_mac, ETH_ALEN);     \
+	else							     \
+		memset(__entry->entry_mac, 0, ETH_ALEN);	     \
+	} while (0)
+#define MAC_PR_FMT "%pM"
+#define MAC_PR_ARG(entry_mac) (__entry->entry_mac)
+
+#define MAXNAME		32
+#define WIPHY_ENTRY	__array(char, wiphy_name, 32)
+#define WIPHY_ASSIGN	strlcpy(__entry->wiphy_name, wiphy_name(wiphy), MAXNAME)
+#define WIPHY_PR_FMT	"%s"
+#define WIPHY_PR_ARG	__entry->wiphy_name
+
+#define WDEV_ENTRY	__field(u32, id)
+#define WDEV_ASSIGN	(__entry->id) = (wdev ? wdev->identifier : 0)
+#define WDEV_PR_FMT	"wdev(%u)"
+#define WDEV_PR_ARG	(__entry->id)
+
+#define NETDEV_ENTRY	__array(char, name, IFNAMSIZ) \
+			__field(int, ifindex)
+#define NETDEV_ASSIGN					       \
+	do {						       \
+		memcpy(__entry->name, netdev->name, IFNAMSIZ); \
+		(__entry->ifindex) = (netdev->ifindex);	       \
+	} while (0)
+#define NETDEV_PR_FMT	"netdev:%s(%d)"
+#define NETDEV_PR_ARG	__entry->name, __entry->ifindex
+
+#define MESH_CFG_ENTRY __field(u16, dot11MeshRetryTimeout)		   \
+		       __field(u16, dot11MeshConfirmTimeout)		   \
+		       __field(u16, dot11MeshHoldingTimeout)		   \
+		       __field(u16, dot11MeshMaxPeerLinks)		   \
+		       __field(u8, dot11MeshMaxRetries)			   \
+		       __field(u8, dot11MeshTTL)			   \
+		       __field(u8, element_ttl)				   \
+		       __field(bool, auto_open_plinks)			   \
+		       __field(u32, dot11MeshNbrOffsetMaxNeighbor)	   \
+		       __field(u8, dot11MeshHWMPmaxPREQretries)		   \
+		       __field(u32, path_refresh_time)			   \
+		       __field(u32, dot11MeshHWMPactivePathTimeout)	   \
+		       __field(u16, min_discovery_timeout)		   \
+		       __field(u16, dot11MeshHWMPpreqMinInterval)	   \
+		       __field(u16, dot11MeshHWMPperrMinInterval)	   \
+		       __field(u16, dot11MeshHWMPnetDiameterTraversalTime) \
+		       __field(u8, dot11MeshHWMPRootMode)		   \
+		       __field(u16, dot11MeshHWMPRannInterval)		   \
+		       __field(bool, dot11MeshGateAnnouncementProtocol)	   \
+		       __field(bool, dot11MeshForwarding)		   \
+		       __field(s32, rssi_threshold)			   \
+		       __field(u16, ht_opmode)				   \
+		       __field(u32, dot11MeshHWMPactivePathToRootTimeout)  \
+		       __field(u16, dot11MeshHWMProotInterval)		   \
+		       __field(u16, dot11MeshHWMPconfirmationInterval)
+#define MESH_CFG_ASSIGN							      \
+	do {								      \
+		__entry->dot11MeshRetryTimeout = conf->dot11MeshRetryTimeout; \
+		__entry->dot11MeshConfirmTimeout =			      \
+				conf->dot11MeshConfirmTimeout;		      \
+		__entry->dot11MeshHoldingTimeout =			      \
+				conf->dot11MeshHoldingTimeout;		      \
+		__entry->dot11MeshMaxPeerLinks = conf->dot11MeshMaxPeerLinks; \
+		__entry->dot11MeshMaxRetries = conf->dot11MeshMaxRetries;     \
+		__entry->dot11MeshTTL = conf->dot11MeshTTL;		      \
+		__entry->element_ttl = conf->element_ttl;		      \
+		__entry->auto_open_plinks = conf->auto_open_plinks;	      \
+		__entry->dot11MeshNbrOffsetMaxNeighbor =		      \
+				conf->dot11MeshNbrOffsetMaxNeighbor;	      \
+		__entry->dot11MeshHWMPmaxPREQretries =			      \
+				conf->dot11MeshHWMPmaxPREQretries;	      \
+		__entry->path_refresh_time = conf->path_refresh_time;	      \
+		__entry->dot11MeshHWMPactivePathTimeout =		      \
+				conf->dot11MeshHWMPactivePathTimeout;	      \
+		__entry->min_discovery_timeout = conf->min_discovery_timeout; \
+		__entry->dot11MeshHWMPpreqMinInterval =			      \
+				conf->dot11MeshHWMPpreqMinInterval;	      \
+		__entry->dot11MeshHWMPperrMinInterval =			      \
+				conf->dot11MeshHWMPperrMinInterval;	      \
+		__entry->dot11MeshHWMPnetDiameterTraversalTime =	      \
+				conf->dot11MeshHWMPnetDiameterTraversalTime;  \
+		__entry->dot11MeshHWMPRootMode = conf->dot11MeshHWMPRootMode; \
+		__entry->dot11MeshHWMPRannInterval =			      \
+				conf->dot11MeshHWMPRannInterval;	      \
+		__entry->dot11MeshGateAnnouncementProtocol =		      \
+				conf->dot11MeshGateAnnouncementProtocol;      \
+		__entry->dot11MeshForwarding = conf->dot11MeshForwarding;     \
+		__entry->rssi_threshold = conf->rssi_threshold;		      \
+		__entry->ht_opmode = conf->ht_opmode;			      \
+		__entry->dot11MeshHWMPactivePathToRootTimeout =		      \
+				conf->dot11MeshHWMPactivePathToRootTimeout;   \
+		__entry->dot11MeshHWMProotInterval =			      \
+				conf->dot11MeshHWMProotInterval;	      \
+		__entry->dot11MeshHWMPconfirmationInterval =		      \
+				conf->dot11MeshHWMPconfirmationInterval;      \
+	} while (0)
+
+#define CHAN_ENTRY __field(enum ieee80211_band, band) \
+		   __field(u16, center_freq)
+#define CHAN_ASSIGN(chan)					  \
+	do {							  \
+		if (chan) {					  \
+			__entry->band = chan->band;		  \
+			__entry->center_freq = chan->center_freq; \
+		} else {					  \
+			__entry->band = 0;			  \
+			__entry->center_freq = 0;		  \
+		}						  \
+	} while (0)
+#define CHAN_PR_FMT "band: %d, freq: %u"
+#define CHAN_PR_ARG __entry->band, __entry->center_freq
+
+#define CHAN_DEF_ENTRY __field(enum ieee80211_band, band)		\
+		       __field(u32, control_freq)			\
+		       __field(u32, width)				\
+		       __field(u32, center_freq1)			\
+		       __field(u32, center_freq2)
+#define CHAN_DEF_ASSIGN(chandef)					\
+	do {								\
+		if ((chandef) && (chandef)->chan) {			\
+			__entry->band = (chandef)->chan->band;		\
+			__entry->control_freq =				\
+				(chandef)->chan->center_freq;		\
+			__entry->width = (chandef)->width;		\
+			__entry->center_freq1 = (chandef)->center_freq1;\
+			__entry->center_freq2 = (chandef)->center_freq2;\
+		} else {						\
+			__entry->band = 0;				\
+			__entry->control_freq = 0;			\
+			__entry->width = 0;				\
+			__entry->center_freq1 = 0;			\
+			__entry->center_freq2 = 0;			\
+		}							\
+	} while (0)
+#define CHAN_DEF_PR_FMT							\
+	"band: %d, control freq: %u, width: %d, cf1: %u, cf2: %u"
+#define CHAN_DEF_PR_ARG __entry->band, __entry->control_freq,		\
+			__entry->width, __entry->center_freq1,		\
+			__entry->center_freq2
+
+#define SINFO_ENTRY __field(int, generation)	    \
+		    __field(u32, connected_time)    \
+		    __field(u32, inactive_time)	    \
+		    __field(u32, rx_bytes)	    \
+		    __field(u32, tx_bytes)	    \
+		    __field(u32, rx_packets)	    \
+		    __field(u32, tx_packets)	    \
+		    __field(u32, tx_retries)	    \
+		    __field(u32, tx_failed)	    \
+		    __field(u32, rx_dropped_misc)   \
+		    __field(u32, beacon_loss_count) \
+		    __field(u16, llid)		    \
+		    __field(u16, plid)		    \
+		    __field(u8, plink_state)
+#define SINFO_ASSIGN						       \
+	do {							       \
+		__entry->generation = sinfo->generation;	       \
+		__entry->connected_time = sinfo->connected_time;       \
+		__entry->inactive_time = sinfo->inactive_time;	       \
+		__entry->rx_bytes = sinfo->rx_bytes;		       \
+		__entry->tx_bytes = sinfo->tx_bytes;		       \
+		__entry->rx_packets = sinfo->rx_packets;	       \
+		__entry->tx_packets = sinfo->tx_packets;	       \
+		__entry->tx_retries = sinfo->tx_retries;	       \
+		__entry->tx_failed = sinfo->tx_failed;		       \
+		__entry->rx_dropped_misc = sinfo->rx_dropped_misc;     \
+		__entry->beacon_loss_count = sinfo->beacon_loss_count; \
+		__entry->llid = sinfo->llid;			       \
+		__entry->plid = sinfo->plid;			       \
+		__entry->plink_state = sinfo->plink_state;	       \
+	} while (0)
+
+#define BOOL_TO_STR(bo) (bo) ? "true" : "false"
+
+/*************************************************************
+ *			rdev->ops traces		     *
+ *************************************************************/
+
+TRACE_EVENT(rdev_suspend,
+	TP_PROTO(struct wiphy *wiphy, struct cfg80211_wowlan *wow),
+	TP_ARGS(wiphy, wow),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(bool, any)
+		__field(bool, disconnect)
+		__field(bool, magic_pkt)
+		__field(bool, gtk_rekey_failure)
+		__field(bool, eap_identity_req)
+		__field(bool, four_way_handshake)
+		__field(bool, rfkill_release)
+		__field(bool, valid_wow)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		if (wow) {
+			__entry->any = wow->any;
+			__entry->disconnect = wow->disconnect;
+			__entry->magic_pkt = wow->magic_pkt;
+			__entry->gtk_rekey_failure = wow->gtk_rekey_failure;
+			__entry->eap_identity_req = wow->eap_identity_req;
+			__entry->four_way_handshake = wow->four_way_handshake;
+			__entry->rfkill_release = wow->rfkill_release;
+			__entry->valid_wow = true;
+		} else {
+			__entry->valid_wow = false;
+		}
+	),
+	TP_printk(WIPHY_PR_FMT ", wow%s - any: %d, disconnect: %d, "
+		  "magic pkt: %d, gtk rekey failure: %d, eap identify req: %d, "
+		  "four way handshake: %d, rfkill release: %d.",
+		  WIPHY_PR_ARG, __entry->valid_wow ? "" : "(Not configured!)",
+		  __entry->any, __entry->disconnect, __entry->magic_pkt,
+		  __entry->gtk_rekey_failure, __entry->eap_identity_req,
+		  __entry->four_way_handshake, __entry->rfkill_release)
+);
+
+TRACE_EVENT(rdev_return_int,
+	TP_PROTO(struct wiphy *wiphy, int ret),
+	TP_ARGS(wiphy, ret),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(int, ret)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		__entry->ret = ret;
+	),
+	TP_printk(WIPHY_PR_FMT ", returned: %d", WIPHY_PR_ARG, __entry->ret)
+);
+
+TRACE_EVENT(rdev_scan,
+	TP_PROTO(struct wiphy *wiphy, struct cfg80211_scan_request *request),
+	TP_ARGS(wiphy, request),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+	),
+	TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
+);
+
+DECLARE_EVENT_CLASS(wiphy_only_evt,
+	TP_PROTO(struct wiphy *wiphy),
+	TP_ARGS(wiphy),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+	),
+	TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
+);
+
+DEFINE_EVENT(wiphy_only_evt, rdev_resume,
+	TP_PROTO(struct wiphy *wiphy),
+	TP_ARGS(wiphy)
+);
+
+DEFINE_EVENT(wiphy_only_evt, rdev_return_void,
+	TP_PROTO(struct wiphy *wiphy),
+	TP_ARGS(wiphy)
+);
+
+DEFINE_EVENT(wiphy_only_evt, rdev_get_ringparam,
+	TP_PROTO(struct wiphy *wiphy),
+	TP_ARGS(wiphy)
+);
+
+DEFINE_EVENT(wiphy_only_evt, rdev_get_antenna,
+	TP_PROTO(struct wiphy *wiphy),
+	TP_ARGS(wiphy)
+);
+
+DEFINE_EVENT(wiphy_only_evt, rdev_rfkill_poll,
+	TP_PROTO(struct wiphy *wiphy),
+	TP_ARGS(wiphy)
+);
+
+DECLARE_EVENT_CLASS(wiphy_enabled_evt,
+	TP_PROTO(struct wiphy *wiphy, bool enabled),
+	TP_ARGS(wiphy, enabled),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(bool, enabled)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		__entry->enabled = enabled;
+	),
+	TP_printk(WIPHY_PR_FMT ", %senabled ",
+		  WIPHY_PR_ARG, __entry->enabled ? "" : "not ")
+);
+
+DEFINE_EVENT(wiphy_enabled_evt, rdev_set_wakeup,
+	TP_PROTO(struct wiphy *wiphy, bool enabled),
+	TP_ARGS(wiphy, enabled)
+);
+
+TRACE_EVENT(rdev_add_virtual_intf,
+	TP_PROTO(struct wiphy *wiphy, char *name, enum nl80211_iftype type),
+	TP_ARGS(wiphy, name, type),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__string(vir_intf_name, name ? name : "<noname>")
+		__field(enum nl80211_iftype, type)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		__assign_str(vir_intf_name, name ? name : "<noname>");
+		__entry->type = type;
+	),
+	TP_printk(WIPHY_PR_FMT ", virtual intf name: %s, type: %d",
+		  WIPHY_PR_ARG, __get_str(vir_intf_name), __entry->type)
+);
+
+DECLARE_EVENT_CLASS(wiphy_wdev_evt,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+	TP_ARGS(wiphy, wdev),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		WDEV_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		WDEV_ASSIGN;
+	),
+	TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
+);
+
+DEFINE_EVENT(wiphy_wdev_evt, rdev_return_wdev,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+	TP_ARGS(wiphy, wdev)
+);
+
+DEFINE_EVENT(wiphy_wdev_evt, rdev_del_virtual_intf,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+	TP_ARGS(wiphy, wdev)
+);
+
+TRACE_EVENT(rdev_change_virtual_intf,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 enum nl80211_iftype type),
+	TP_ARGS(wiphy, netdev, type),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(enum nl80211_iftype, type)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->type = type;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", type: %d",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->type)
+);
+
+DECLARE_EVENT_CLASS(key_handle,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
+		 bool pairwise, const u8 *mac_addr),
+	TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(mac_addr)
+		__field(u8, key_index)
+		__field(bool, pairwise)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(mac_addr, mac_addr);
+		__entry->key_index = key_index;
+		__entry->pairwise = pairwise;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key_index: %u, pairwise: %s, mac addr: " MAC_PR_FMT,
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index,
+		  BOOL_TO_STR(__entry->pairwise), MAC_PR_ARG(mac_addr))
+);
+
+DEFINE_EVENT(key_handle, rdev_add_key,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
+		 bool pairwise, const u8 *mac_addr),
+	TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr)
+);
+
+DEFINE_EVENT(key_handle, rdev_get_key,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
+		 bool pairwise, const u8 *mac_addr),
+	TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr)
+);
+
+DEFINE_EVENT(key_handle, rdev_del_key,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
+		 bool pairwise, const u8 *mac_addr),
+	TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr)
+);
+
+TRACE_EVENT(rdev_set_default_key,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
+		 bool unicast, bool multicast),
+	TP_ARGS(wiphy, netdev, key_index, unicast, multicast),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(u8, key_index)
+		__field(bool, unicast)
+		__field(bool, multicast)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->key_index = key_index;
+		__entry->unicast = unicast;
+		__entry->multicast = multicast;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u, unicast: %s, multicast: %s",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index,
+		  BOOL_TO_STR(__entry->unicast),
+		  BOOL_TO_STR(__entry->multicast))
+);
+
+TRACE_EVENT(rdev_set_default_mgmt_key,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index),
+	TP_ARGS(wiphy, netdev, key_index),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(u8, key_index)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->key_index = key_index;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index)
+);
+
+TRACE_EVENT(rdev_start_ap,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_ap_settings *settings),
+	TP_ARGS(wiphy, netdev, settings),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		CHAN_DEF_ENTRY
+		__field(int, beacon_interval)
+		__field(int, dtim_period)
+		__array(char, ssid, IEEE80211_MAX_SSID_LEN + 1)
+		__field(enum nl80211_hidden_ssid, hidden_ssid)
+		__field(u32, wpa_ver)
+		__field(bool, privacy)
+		__field(enum nl80211_auth_type, auth_type)
+		__field(int, inactivity_timeout)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		CHAN_DEF_ASSIGN(&settings->chandef);
+		__entry->beacon_interval = settings->beacon_interval;
+		__entry->dtim_period = settings->dtim_period;
+		__entry->hidden_ssid = settings->hidden_ssid;
+		__entry->wpa_ver = settings->crypto.wpa_versions;
+		__entry->privacy = settings->privacy;
+		__entry->auth_type = settings->auth_type;
+		__entry->inactivity_timeout = settings->inactivity_timeout;
+		memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
+		memcpy(__entry->ssid, settings->ssid, settings->ssid_len);
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", AP settings - ssid: %s, "
+		  CHAN_DEF_PR_FMT ", beacon interval: %d, dtim period: %d, "
+		  "hidden ssid: %d, wpa versions: %u, privacy: %s, "
+		  "auth type: %d, inactivity timeout: %d",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->ssid, CHAN_DEF_PR_ARG,
+		  __entry->beacon_interval, __entry->dtim_period,
+		  __entry->hidden_ssid, __entry->wpa_ver,
+		  BOOL_TO_STR(__entry->privacy), __entry->auth_type,
+		  __entry->inactivity_timeout)
+);
+
+TRACE_EVENT(rdev_change_beacon,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_beacon_data *info),
+	TP_ARGS(wiphy, netdev, info),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__dynamic_array(u8, head, info ? info->head_len : 0)
+		__dynamic_array(u8, tail, info ? info->tail_len : 0)
+		__dynamic_array(u8, beacon_ies, info ? info->beacon_ies_len : 0)
+		__dynamic_array(u8, proberesp_ies,
+				info ? info->proberesp_ies_len : 0)
+		__dynamic_array(u8, assocresp_ies,
+				info ? info->assocresp_ies_len : 0)
+		__dynamic_array(u8, probe_resp, info ? info->probe_resp_len : 0)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		if (info) {
+			if (info->head)
+				memcpy(__get_dynamic_array(head), info->head,
+				       info->head_len);
+			if (info->tail)
+				memcpy(__get_dynamic_array(tail), info->tail,
+				       info->tail_len);
+			if (info->beacon_ies)
+				memcpy(__get_dynamic_array(beacon_ies),
+				       info->beacon_ies, info->beacon_ies_len);
+			if (info->proberesp_ies)
+				memcpy(__get_dynamic_array(proberesp_ies),
+				       info->proberesp_ies,
+				       info->proberesp_ies_len);
+			if (info->assocresp_ies)
+				memcpy(__get_dynamic_array(assocresp_ies),
+				       info->assocresp_ies,
+				       info->assocresp_ies_len);
+			if (info->probe_resp)
+				memcpy(__get_dynamic_array(probe_resp),
+				       info->probe_resp, info->probe_resp_len);
+		}
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG)
+);
+
+DECLARE_EVENT_CLASS(wiphy_netdev_evt,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+	TP_ARGS(wiphy, netdev),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG)
+);
+
+DEFINE_EVENT(wiphy_netdev_evt, rdev_stop_ap,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+	TP_ARGS(wiphy, netdev)
+);
+
+DEFINE_EVENT(wiphy_netdev_evt, rdev_get_et_stats,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+	TP_ARGS(wiphy, netdev)
+);
+
+DEFINE_EVENT(wiphy_netdev_evt, rdev_sched_scan_stop,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+	TP_ARGS(wiphy, netdev)
+);
+
+DEFINE_EVENT(wiphy_netdev_evt, rdev_set_rekey_data,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+	TP_ARGS(wiphy, netdev)
+);
+
+DEFINE_EVENT(wiphy_netdev_evt, rdev_get_mesh_config,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+	TP_ARGS(wiphy, netdev)
+);
+
+DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_mesh,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+	TP_ARGS(wiphy, netdev)
+);
+
+DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_ibss,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+	TP_ARGS(wiphy, netdev)
+);
+
+DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+	TP_ARGS(wiphy, netdev)
+);
+
+DECLARE_EVENT_CLASS(station_add_change,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac,
+		 struct station_parameters *params),
+	TP_ARGS(wiphy, netdev, mac, params),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(sta_mac)
+		__field(u32, sta_flags_mask)
+		__field(u32, sta_flags_set)
+		__field(u32, sta_modify_mask)
+		__field(int, listen_interval)
+		__field(u16, aid)
+		__field(u8, plink_action)
+		__field(u8, plink_state)
+		__field(u8, uapsd_queues)
+		__array(u8, ht_capa, (int)sizeof(struct ieee80211_ht_cap))
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(sta_mac, mac);
+		__entry->sta_flags_mask = params->sta_flags_mask;
+		__entry->sta_flags_set = params->sta_flags_set;
+		__entry->sta_modify_mask = params->sta_modify_mask;
+		__entry->listen_interval = params->listen_interval;
+		__entry->aid = params->aid;
+		__entry->plink_action = params->plink_action;
+		__entry->plink_state = params->plink_state;
+		__entry->uapsd_queues = params->uapsd_queues;
+		memset(__entry->ht_capa, 0, sizeof(struct ieee80211_ht_cap));
+		if (params->ht_capa)
+			memcpy(__entry->ht_capa, params->ht_capa,
+			       sizeof(struct ieee80211_ht_cap));
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT
+		  ", station flags mask: %u, station flags set: %u, "
+		  "station modify mask: %u, listen interval: %d, aid: %u, "
+		  "plink action: %u, plink state: %u, uapsd queues: %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac),
+		  __entry->sta_flags_mask, __entry->sta_flags_set,
+		  __entry->sta_modify_mask, __entry->listen_interval,
+		  __entry->aid, __entry->plink_action, __entry->plink_state,
+		  __entry->uapsd_queues)
+);
+
+DEFINE_EVENT(station_add_change, rdev_add_station,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac,
+		 struct station_parameters *params),
+	TP_ARGS(wiphy, netdev, mac, params)
+);
+
+DEFINE_EVENT(station_add_change, rdev_change_station,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac,
+		 struct station_parameters *params),
+	TP_ARGS(wiphy, netdev, mac, params)
+);
+
+DECLARE_EVENT_CLASS(wiphy_netdev_mac_evt,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac),
+	TP_ARGS(wiphy, netdev, mac),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(sta_mac)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(sta_mac, mac);
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac: " MAC_PR_FMT,
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac))
+);
+
+DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_del_station,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac),
+	TP_ARGS(wiphy, netdev, mac)
+);
+
+DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_get_station,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac),
+	TP_ARGS(wiphy, netdev, mac)
+);
+
+DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_del_mpath,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac),
+	TP_ARGS(wiphy, netdev, mac)
+);
+
+DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_set_wds_peer,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac),
+	TP_ARGS(wiphy, netdev, mac)
+);
+
+TRACE_EVENT(rdev_dump_station,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx,
+		 u8 *mac),
+	TP_ARGS(wiphy, netdev, idx, mac),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(sta_mac)
+		__field(int, idx)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(sta_mac, mac);
+		__entry->idx = idx;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT ", idx: %d",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac),
+		  __entry->idx)
+);
+
+TRACE_EVENT(rdev_return_int_station_info,
+	TP_PROTO(struct wiphy *wiphy, int ret, struct station_info *sinfo),
+	TP_ARGS(wiphy, ret, sinfo),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(int, ret)
+		SINFO_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		__entry->ret = ret;
+		SINFO_ASSIGN;
+	),
+	TP_printk(WIPHY_PR_FMT ", returned %d" ,
+		  WIPHY_PR_ARG, __entry->ret)
+);
+
+DECLARE_EVENT_CLASS(mpath_evt,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst,
+		 u8 *next_hop),
+	TP_ARGS(wiphy, netdev, dst, next_hop),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(dst)
+		MAC_ENTRY(next_hop)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(dst, dst);
+		MAC_ASSIGN(next_hop, next_hop);
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: " MAC_PR_FMT ", next hop: " MAC_PR_FMT,
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dst),
+		  MAC_PR_ARG(next_hop))
+);
+
+DEFINE_EVENT(mpath_evt, rdev_add_mpath,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst,
+		 u8 *next_hop),
+	TP_ARGS(wiphy, netdev, dst, next_hop)
+);
+
+DEFINE_EVENT(mpath_evt, rdev_change_mpath,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst,
+		 u8 *next_hop),
+	TP_ARGS(wiphy, netdev, dst, next_hop)
+);
+
+DEFINE_EVENT(mpath_evt, rdev_get_mpath,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst,
+		 u8 *next_hop),
+	TP_ARGS(wiphy, netdev, dst, next_hop)
+);
+
+TRACE_EVENT(rdev_dump_mpath,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx,
+		 u8 *dst, u8 *next_hop),
+	TP_ARGS(wiphy, netdev, idx, dst, next_hop),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(dst)
+		MAC_ENTRY(next_hop)
+		__field(int, idx)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(dst, dst);
+		MAC_ASSIGN(next_hop, next_hop);
+		__entry->idx = idx;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: "
+		  MAC_PR_FMT ", next hop: " MAC_PR_FMT,
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, MAC_PR_ARG(dst),
+		  MAC_PR_ARG(next_hop))
+);
+
+TRACE_EVENT(rdev_return_int_mpath_info,
+	TP_PROTO(struct wiphy *wiphy, int ret, struct mpath_info *pinfo),
+	TP_ARGS(wiphy, ret, pinfo),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(int, ret)
+		__field(int, generation)
+		__field(u32, filled)
+		__field(u32, frame_qlen)
+		__field(u32, sn)
+		__field(u32, metric)
+		__field(u32, exptime)
+		__field(u32, discovery_timeout)
+		__field(u8, discovery_retries)
+		__field(u8, flags)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		__entry->ret = ret;
+		__entry->generation = pinfo->generation;
+		__entry->filled = pinfo->filled;
+		__entry->frame_qlen = pinfo->frame_qlen;
+		__entry->sn = pinfo->sn;
+		__entry->metric = pinfo->metric;
+		__entry->exptime = pinfo->exptime;
+		__entry->discovery_timeout = pinfo->discovery_timeout;
+		__entry->discovery_retries = pinfo->discovery_retries;
+		__entry->flags = pinfo->flags;
+	),
+	TP_printk(WIPHY_PR_FMT ", returned %d. mpath info - generation: %d, "
+		  "filled: %u, frame qlen: %u, sn: %u, metric: %u, exptime: %u,"
+		  " discovery timeout: %u, discovery retries: %u, flags: %u",
+		  WIPHY_PR_ARG, __entry->ret, __entry->generation,
+		  __entry->filled, __entry->frame_qlen, __entry->sn,
+		  __entry->metric, __entry->exptime, __entry->discovery_timeout,
+		  __entry->discovery_retries, __entry->flags)
+);
+
+TRACE_EVENT(rdev_return_int_mesh_config,
+	TP_PROTO(struct wiphy *wiphy, int ret, struct mesh_config *conf),
+	TP_ARGS(wiphy, ret, conf),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		MESH_CFG_ENTRY
+		__field(int, ret)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		MESH_CFG_ASSIGN;
+		__entry->ret = ret;
+	),
+	TP_printk(WIPHY_PR_FMT ", returned: %d",
+		  WIPHY_PR_ARG, __entry->ret)
+);
+
+TRACE_EVENT(rdev_update_mesh_config,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 mask,
+		 const struct mesh_config *conf),
+	TP_ARGS(wiphy, netdev, mask, conf),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MESH_CFG_ENTRY
+		__field(u32, mask)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MESH_CFG_ASSIGN;
+		__entry->mask = mask;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mask: %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mask)
+);
+
+TRACE_EVENT(rdev_join_mesh,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 const struct mesh_config *conf,
+		 const struct mesh_setup *setup),
+	TP_ARGS(wiphy, netdev, conf, setup),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MESH_CFG_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MESH_CFG_ASSIGN;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT,
+		  WIPHY_PR_ARG, NETDEV_PR_ARG)
+);
+
+TRACE_EVENT(rdev_change_bss,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct bss_parameters *params),
+	TP_ARGS(wiphy, netdev, params),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(int, use_cts_prot)
+		__field(int, use_short_preamble)
+		__field(int, use_short_slot_time)
+		__field(int, ap_isolate)
+		__field(int, ht_opmode)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->use_cts_prot = params->use_cts_prot;
+		__entry->use_short_preamble = params->use_short_preamble;
+		__entry->use_short_slot_time = params->use_short_slot_time;
+		__entry->ap_isolate = params->ap_isolate;
+		__entry->ht_opmode = params->ht_opmode;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", use cts prot: %d, "
+		  "use short preamble: %d, use short slot time: %d, "
+		  "ap isolate: %d, ht opmode: %d",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->use_cts_prot,
+		  __entry->use_short_preamble, __entry->use_short_slot_time,
+		  __entry->ap_isolate, __entry->ht_opmode)
+);
+
+TRACE_EVENT(rdev_set_txq_params,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct ieee80211_txq_params *params),
+	TP_ARGS(wiphy, netdev, params),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(enum nl80211_ac, ac)
+		__field(u16, txop)
+		__field(u16, cwmin)
+		__field(u16, cwmax)
+		__field(u8, aifs)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->ac = params->ac;
+		__entry->txop = params->txop;
+		__entry->cwmin = params->cwmin;
+		__entry->cwmax = params->cwmax;
+		__entry->aifs = params->aifs;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", ac: %d, txop: %u, cwmin: %u, cwmax: %u, aifs: %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->ac, __entry->txop,
+		  __entry->cwmin, __entry->cwmax, __entry->aifs)
+);
+
+TRACE_EVENT(rdev_libertas_set_mesh_channel,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct ieee80211_channel *chan),
+	TP_ARGS(wiphy, netdev, chan),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		CHAN_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		CHAN_ASSIGN(chan);
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_PR_FMT, WIPHY_PR_ARG,
+		  NETDEV_PR_ARG, CHAN_PR_ARG)
+);
+
+TRACE_EVENT(rdev_set_monitor_channel,
+	TP_PROTO(struct wiphy *wiphy,
+		 struct cfg80211_chan_def *chandef),
+	TP_ARGS(wiphy, chandef),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		CHAN_DEF_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		CHAN_DEF_ASSIGN(chandef);
+	),
+	TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
+		  WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
+);
+
+TRACE_EVENT(rdev_auth,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_auth_request *req),
+	TP_ARGS(wiphy, netdev, req),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(bssid)
+		__field(enum nl80211_auth_type, auth_type)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		if (req->bss)
+			MAC_ASSIGN(bssid, req->bss->bssid);
+		else
+			memset(__entry->bssid, 0, ETH_ALEN);
+		__entry->auth_type = req->auth_type;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", auth type: %d, bssid: " MAC_PR_FMT,
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->auth_type,
+		  MAC_PR_ARG(bssid))
+);
+
+TRACE_EVENT(rdev_assoc,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_assoc_request *req),
+	TP_ARGS(wiphy, netdev, req),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(bssid)
+		MAC_ENTRY(prev_bssid)
+		__field(bool, use_mfp)
+		__field(u32, flags)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		if (req->bss)
+			MAC_ASSIGN(bssid, req->bss->bssid);
+		else
+			memset(__entry->bssid, 0, ETH_ALEN);
+		MAC_ASSIGN(prev_bssid, req->prev_bssid);
+		__entry->use_mfp = req->use_mfp;
+		__entry->flags = req->flags;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
+		  ", previous bssid: " MAC_PR_FMT ", use mfp: %s, flags: %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid),
+		  MAC_PR_ARG(prev_bssid), BOOL_TO_STR(__entry->use_mfp),
+		  __entry->flags)
+);
+
+TRACE_EVENT(rdev_deauth,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_deauth_request *req),
+	TP_ARGS(wiphy, netdev, req),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(bssid)
+		__field(u16, reason_code)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(bssid, req->bssid);
+		__entry->reason_code = req->reason_code;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", reason: %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid),
+		  __entry->reason_code)
+);
+
+TRACE_EVENT(rdev_disassoc,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_disassoc_request *req),
+	TP_ARGS(wiphy, netdev, req),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(bssid)
+		__field(u16, reason_code)
+		__field(bool, local_state_change)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		if (req->bss)
+			MAC_ASSIGN(bssid, req->bss->bssid);
+		else
+			memset(__entry->bssid, 0, ETH_ALEN);
+		__entry->reason_code = req->reason_code;
+		__entry->local_state_change = req->local_state_change;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
+		  ", reason: %u, local state change: %s",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid),
+		  __entry->reason_code,
+		  BOOL_TO_STR(__entry->local_state_change))
+);
+
+TRACE_EVENT(rdev_mgmt_tx_cancel_wait,
+	TP_PROTO(struct wiphy *wiphy,
+		 struct wireless_dev *wdev, u64 cookie),
+	TP_ARGS(wiphy, wdev, cookie),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		WDEV_ENTRY
+		__field(u64, cookie)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		WDEV_ASSIGN;
+		__entry->cookie = cookie;
+	),
+	TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %llu ",
+		  WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie)
+);
+
+TRACE_EVENT(rdev_set_power_mgmt,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 bool enabled, int timeout),
+	TP_ARGS(wiphy, netdev, enabled, timeout),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(bool, enabled)
+		__field(int, timeout)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->enabled = enabled;
+		__entry->timeout = timeout;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %senabled, timeout: %d ",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG,
+		  __entry->enabled ? "" : "not ", __entry->timeout)
+);
+
+TRACE_EVENT(rdev_connect,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_connect_params *sme),
+	TP_ARGS(wiphy, netdev, sme),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(bssid)
+		__array(char, ssid, IEEE80211_MAX_SSID_LEN + 1)
+		__field(enum nl80211_auth_type, auth_type)
+		__field(bool, privacy)
+		__field(u32, wpa_versions)
+		__field(u32, flags)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(bssid, sme->bssid);
+		memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
+		memcpy(__entry->ssid, sme->ssid, sme->ssid_len);
+		__entry->auth_type = sme->auth_type;
+		__entry->privacy = sme->privacy;
+		__entry->wpa_versions = sme->crypto.wpa_versions;
+		__entry->flags = sme->flags;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
+		  ", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, "
+		  "flags: %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid,
+		  __entry->auth_type, BOOL_TO_STR(__entry->privacy),
+		  __entry->wpa_versions, __entry->flags)
+);
+
+TRACE_EVENT(rdev_set_cqm_rssi_config,
+	TP_PROTO(struct wiphy *wiphy,
+		 struct net_device *netdev, s32 rssi_thold,
+		 u32 rssi_hyst),
+	TP_ARGS(wiphy, netdev, rssi_thold, rssi_hyst),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(s32, rssi_thold)
+		__field(u32, rssi_hyst)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->rssi_thold = rssi_thold;
+		__entry->rssi_hyst = rssi_hyst;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT
+		  ", rssi_thold: %d, rssi_hyst: %u ",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG,
+		 __entry->rssi_thold, __entry->rssi_hyst)
+);
+
+TRACE_EVENT(rdev_set_cqm_txe_config,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 rate,
+		 u32 pkts, u32 intvl),
+	TP_ARGS(wiphy, netdev, rate, pkts, intvl),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(u32, rate)
+		__field(u32, pkts)
+		__field(u32, intvl)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->rate = rate;
+		__entry->pkts = pkts;
+		__entry->intvl = intvl;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", rate: %u, packets: %u, interval: %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->rate, __entry->pkts,
+		  __entry->intvl)
+);
+
+TRACE_EVENT(rdev_disconnect,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 u16 reason_code),
+	TP_ARGS(wiphy, netdev, reason_code),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(u16, reason_code)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->reason_code = reason_code;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", reason code: %u", WIPHY_PR_ARG,
+		  NETDEV_PR_ARG, __entry->reason_code)
+);
+
+TRACE_EVENT(rdev_join_ibss,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_ibss_params *params),
+	TP_ARGS(wiphy, netdev, params),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(bssid)
+		__array(char, ssid, IEEE80211_MAX_SSID_LEN + 1)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(bssid, params->bssid);
+		memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
+		memcpy(__entry->ssid, params->ssid, params->ssid_len);
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", ssid: %s",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid)
+);
+
+TRACE_EVENT(rdev_set_wiphy_params,
+	TP_PROTO(struct wiphy *wiphy, u32 changed),
+	TP_ARGS(wiphy, changed),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(u32, changed)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		__entry->changed = changed;
+	),
+	TP_printk(WIPHY_PR_FMT ", changed: %u",
+		  WIPHY_PR_ARG, __entry->changed)
+);
+
+DEFINE_EVENT(wiphy_wdev_evt, rdev_get_tx_power,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+	TP_ARGS(wiphy, wdev)
+);
+
+TRACE_EVENT(rdev_set_tx_power,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+		 enum nl80211_tx_power_setting type, int mbm),
+	TP_ARGS(wiphy, wdev, type, mbm),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		WDEV_ENTRY
+		__field(enum nl80211_tx_power_setting, type)
+		__field(int, mbm)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		WDEV_ASSIGN;
+		__entry->type = type;
+		__entry->mbm = mbm;
+	),
+	TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", type: %u, mbm: %d",
+		  WIPHY_PR_ARG, WDEV_PR_ARG,__entry->type, __entry->mbm)
+);
+
+TRACE_EVENT(rdev_return_int_int,
+	TP_PROTO(struct wiphy *wiphy, int func_ret, int func_fill),
+	TP_ARGS(wiphy, func_ret, func_fill),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(int, func_ret)
+		__field(int, func_fill)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		__entry->func_ret = func_ret;
+		__entry->func_fill = func_fill;
+	),
+	TP_printk(WIPHY_PR_FMT ", function returns: %d, function filled: %d",
+		  WIPHY_PR_ARG, __entry->func_ret, __entry->func_fill)
+);
+
+#ifdef CONFIG_NL80211_TESTMODE
+TRACE_EVENT(rdev_testmode_cmd,
+	TP_PROTO(struct wiphy *wiphy),
+	TP_ARGS(wiphy),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+	),
+	TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
+);
+
+TRACE_EVENT(rdev_testmode_dump,
+	TP_PROTO(struct wiphy *wiphy),
+	TP_ARGS(wiphy),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+	),
+	TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
+);
+#endif /* CONFIG_NL80211_TESTMODE */
+
+TRACE_EVENT(rdev_set_bitrate_mask,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 const u8 *peer, const struct cfg80211_bitrate_mask *mask),
+	TP_ARGS(wiphy, netdev, peer, mask),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(peer)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(peer, peer);
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT,
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
+);
+
+TRACE_EVENT(rdev_mgmt_frame_register,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+		 u16 frame_type, bool reg),
+	TP_ARGS(wiphy, wdev, frame_type, reg),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		WDEV_ENTRY
+		__field(u16, frame_type)
+		__field(bool, reg)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		WDEV_ASSIGN;
+		__entry->frame_type = frame_type;
+		__entry->reg = reg;
+	),
+	TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", frame_type: 0x%.2x, reg: %s ",
+		  WIPHY_PR_ARG, WDEV_PR_ARG, __entry->frame_type,
+		  __entry->reg ? "true" : "false")
+);
+
+TRACE_EVENT(rdev_return_int_tx_rx,
+	TP_PROTO(struct wiphy *wiphy, int ret, u32 tx, u32 rx),
+	TP_ARGS(wiphy, ret, tx, rx),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(int, ret)
+		__field(u32, tx)
+		__field(u32, rx)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		__entry->ret = ret;
+		__entry->tx = tx;
+		__entry->rx = rx;
+	),
+	TP_printk(WIPHY_PR_FMT ", returned %d, tx: %u, rx: %u",
+		  WIPHY_PR_ARG, __entry->ret, __entry->tx, __entry->rx)
+);
+
+TRACE_EVENT(rdev_return_void_tx_rx,
+	TP_PROTO(struct wiphy *wiphy, u32 tx, u32 tx_max,
+		 u32 rx, u32 rx_max),
+	TP_ARGS(wiphy, tx, tx_max, rx, rx_max),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(u32, tx)
+		__field(u32, tx_max)
+		__field(u32, rx)
+		__field(u32, rx_max)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		__entry->tx = tx;
+		__entry->tx_max = tx_max;
+		__entry->rx = rx;
+		__entry->rx_max = rx_max;
+	),
+	TP_printk(WIPHY_PR_FMT ", tx: %u, tx_max: %u, rx: %u, rx_max: %u ",
+		  WIPHY_PR_ARG, __entry->tx, __entry->tx_max, __entry->rx,
+		  __entry->rx_max)
+);
+
+DECLARE_EVENT_CLASS(tx_rx_evt,
+	TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
+	TP_ARGS(wiphy, rx, tx),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(u32, tx)
+		__field(u32, rx)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		__entry->tx = tx;
+		__entry->rx = rx;
+	),
+	TP_printk(WIPHY_PR_FMT ", tx: %u, rx: %u ",
+		  WIPHY_PR_ARG, __entry->tx, __entry->rx)
+);
+
+DEFINE_EVENT(tx_rx_evt, rdev_set_ringparam,
+	TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
+	TP_ARGS(wiphy, rx, tx)
+);
+
+DEFINE_EVENT(tx_rx_evt, rdev_set_antenna,
+	TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
+	TP_ARGS(wiphy, rx, tx)
+);
+
+TRACE_EVENT(rdev_sched_scan_start,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_sched_scan_request *request),
+	TP_ARGS(wiphy, netdev, request),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT,
+		  WIPHY_PR_ARG, NETDEV_PR_ARG)
+);
+
+TRACE_EVENT(rdev_tdls_mgmt,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 u8 *peer, u8 action_code, u8 dialog_token,
+		 u16 status_code, const u8 *buf, size_t len),
+	TP_ARGS(wiphy, netdev, peer, action_code, dialog_token, status_code,
+		buf, len),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(peer)
+		__field(u8, action_code)
+		__field(u8, dialog_token)
+		__field(u16, status_code)
+		__dynamic_array(u8, buf, len)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(peer, peer);
+		__entry->action_code = action_code;
+		__entry->dialog_token = dialog_token;
+		__entry->status_code = status_code;
+		memcpy(__get_dynamic_array(buf), buf, len);
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", action_code: %u, "
+		  "dialog_token: %u, status_code: %u, buf: %#.2x ",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
+		  __entry->action_code, __entry->dialog_token,
+		  __entry->status_code, ((u8 *)__get_dynamic_array(buf))[0])
+);
+
+TRACE_EVENT(rdev_dump_survey,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx),
+	TP_ARGS(wiphy, netdev, idx),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(int, idx)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->idx = idx;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx)
+);
+
+TRACE_EVENT(rdev_return_int_survey_info,
+	TP_PROTO(struct wiphy *wiphy, int ret, struct survey_info *info),
+	TP_ARGS(wiphy, ret, info),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		CHAN_ENTRY
+		__field(int, ret)
+		__field(u64, channel_time)
+		__field(u64, channel_time_busy)
+		__field(u64, channel_time_ext_busy)
+		__field(u64, channel_time_rx)
+		__field(u64, channel_time_tx)
+		__field(u32, filled)
+		__field(s8, noise)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		CHAN_ASSIGN(info->channel);
+		__entry->ret = ret;
+		__entry->channel_time = info->channel_time;
+		__entry->channel_time_busy = info->channel_time_busy;
+		__entry->channel_time_ext_busy = info->channel_time_ext_busy;
+		__entry->channel_time_rx = info->channel_time_rx;
+		__entry->channel_time_tx = info->channel_time_tx;
+		__entry->filled = info->filled;
+		__entry->noise = info->noise;
+	),
+	TP_printk(WIPHY_PR_FMT ", returned: %d, " CHAN_PR_FMT
+		  ", channel time: %llu, channel time busy: %llu, "
+		  "channel time extension busy: %llu, channel time rx: %llu, "
+		  "channel time tx: %llu, filled: %u, noise: %d",
+		  WIPHY_PR_ARG, __entry->ret, CHAN_PR_ARG,
+		  __entry->channel_time, __entry->channel_time_busy,
+		  __entry->channel_time_ext_busy, __entry->channel_time_rx,
+		  __entry->channel_time_tx, __entry->filled, __entry->noise)
+);
+
+TRACE_EVENT(rdev_tdls_oper,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 u8 *peer, enum nl80211_tdls_operation oper),
+	TP_ARGS(wiphy, netdev, peer, oper),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(peer)
+		__field(enum nl80211_tdls_operation, oper)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(peer, peer);
+		__entry->oper = oper;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", oper: %d",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->oper)
+);
+
+DECLARE_EVENT_CLASS(rdev_pmksa,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_pmksa *pmksa),
+	TP_ARGS(wiphy, netdev, pmksa),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(bssid)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(bssid, pmksa->bssid);
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT,
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid))
+);
+
+TRACE_EVENT(rdev_probe_client,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 const u8 *peer),
+	TP_ARGS(wiphy, netdev, peer),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(peer)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(peer, peer);
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT,
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
+);
+
+DEFINE_EVENT(rdev_pmksa, rdev_set_pmksa,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_pmksa *pmksa),
+	TP_ARGS(wiphy, netdev, pmksa)
+);
+
+DEFINE_EVENT(rdev_pmksa, rdev_del_pmksa,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_pmksa *pmksa),
+	TP_ARGS(wiphy, netdev, pmksa)
+);
+
+TRACE_EVENT(rdev_remain_on_channel,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+		 struct ieee80211_channel *chan,
+		 unsigned int duration),
+	TP_ARGS(wiphy, wdev, chan, duration),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		WDEV_ENTRY
+		CHAN_ENTRY
+		__field(unsigned int, duration)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		WDEV_ASSIGN;
+		CHAN_ASSIGN(chan);
+		__entry->duration = duration;
+	),
+	TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", " CHAN_PR_FMT ", duration: %u",
+		  WIPHY_PR_ARG, WDEV_PR_ARG, CHAN_PR_ARG, __entry->duration)
+);
+
+TRACE_EVENT(rdev_return_int_cookie,
+	TP_PROTO(struct wiphy *wiphy, int ret, u64 cookie),
+	TP_ARGS(wiphy, ret, cookie),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(int, ret)
+		__field(u64, cookie)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		__entry->ret = ret;
+		__entry->cookie = cookie;
+	),
+	TP_printk(WIPHY_PR_FMT ", returned %d, cookie: %llu",
+		  WIPHY_PR_ARG, __entry->ret, __entry->cookie)
+);
+
+TRACE_EVENT(rdev_cancel_remain_on_channel,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie),
+	TP_ARGS(wiphy, wdev, cookie),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		WDEV_ENTRY
+		__field(u64, cookie)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		WDEV_ASSIGN;
+		__entry->cookie = cookie;
+	),
+	TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %llu",
+		  WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie)
+);
+
+TRACE_EVENT(rdev_mgmt_tx,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+		 struct ieee80211_channel *chan, bool offchan,
+		 unsigned int wait, bool no_cck, bool dont_wait_for_ack),
+	TP_ARGS(wiphy, wdev, chan, offchan, wait, no_cck, dont_wait_for_ack),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		WDEV_ENTRY
+		CHAN_ENTRY
+		__field(bool, offchan)
+		__field(unsigned int, wait)
+		__field(bool, no_cck)
+		__field(bool, dont_wait_for_ack)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		WDEV_ASSIGN;
+		CHAN_ASSIGN(chan);
+		__entry->offchan = offchan;
+		__entry->wait = wait;
+		__entry->no_cck = no_cck;
+		__entry->dont_wait_for_ack = dont_wait_for_ack;
+	),
+	TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", " CHAN_PR_FMT ", offchan: %s,"
+		  " wait: %u, no cck: %s, dont wait for ack: %s",
+		  WIPHY_PR_ARG, WDEV_PR_ARG, CHAN_PR_ARG,
+		  BOOL_TO_STR(__entry->offchan), __entry->wait,
+		  BOOL_TO_STR(__entry->no_cck),
+		  BOOL_TO_STR(__entry->dont_wait_for_ack))
+);
+
+TRACE_EVENT(rdev_set_noack_map,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 u16 noack_map),
+	TP_ARGS(wiphy, netdev, noack_map),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(u16, noack_map)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->noack_map = noack_map;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", noack_map: %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->noack_map)
+);
+
+TRACE_EVENT(rdev_get_et_sset_count,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int sset),
+	TP_ARGS(wiphy, netdev, sset),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(int, sset)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->sset = sset;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", sset: %d",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sset)
+);
+
+TRACE_EVENT(rdev_get_et_strings,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 sset),
+	TP_ARGS(wiphy, netdev, sset),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(u32, sset)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->sset = sset;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", sset: %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sset)
+);
+
+DEFINE_EVENT(wiphy_wdev_evt, rdev_get_channel,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+	TP_ARGS(wiphy, wdev)
+);
+
+TRACE_EVENT(rdev_return_chandef,
+	TP_PROTO(struct wiphy *wiphy, int ret,
+		 struct cfg80211_chan_def *chandef),
+	TP_ARGS(wiphy, ret, chandef),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(int, ret)
+		CHAN_DEF_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		if (ret == 0)
+			CHAN_DEF_ASSIGN(chandef);
+		else
+			CHAN_DEF_ASSIGN((struct cfg80211_chan_def *)NULL);
+		__entry->ret = ret;
+	),
+	TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", ret: %d",
+		  WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->ret)
+);
+
+DEFINE_EVENT(wiphy_wdev_evt, rdev_start_p2p_device,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+	TP_ARGS(wiphy, wdev)
+);
+
+DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_p2p_device,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+	TP_ARGS(wiphy, wdev)
+);
+
+/*************************************************************
+ *	     cfg80211 exported functions traces		     *
+ *************************************************************/
+
+TRACE_EVENT(cfg80211_return_bool,
+	TP_PROTO(bool ret),
+	TP_ARGS(ret),
+	TP_STRUCT__entry(
+		__field(bool, ret)
+	),
+	TP_fast_assign(
+		__entry->ret = ret;
+	),
+	TP_printk("returned %s", BOOL_TO_STR(__entry->ret))
+);
+
+DECLARE_EVENT_CLASS(cfg80211_netdev_mac_evt,
+	TP_PROTO(struct net_device *netdev, const u8 *macaddr),
+	TP_ARGS(netdev, macaddr),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		MAC_ENTRY(macaddr)
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(macaddr, macaddr);
+	),
+	TP_printk(NETDEV_PR_FMT ", mac: " MAC_PR_FMT,
+		  NETDEV_PR_ARG, MAC_PR_ARG(macaddr))
+);
+
+DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_notify_new_peer_candidate,
+	TP_PROTO(struct net_device *netdev, const u8 *macaddr),
+	TP_ARGS(netdev, macaddr)
+);
+
+DECLARE_EVENT_CLASS(netdev_evt_only,
+	TP_PROTO(struct net_device *netdev),
+	TP_ARGS(netdev),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+	),
+	TP_printk(NETDEV_PR_FMT , NETDEV_PR_ARG)
+);
+
+DEFINE_EVENT(netdev_evt_only, cfg80211_send_rx_auth,
+	TP_PROTO(struct net_device *netdev),
+	TP_ARGS(netdev)
+);
+
+TRACE_EVENT(cfg80211_send_rx_assoc,
+	TP_PROTO(struct net_device *netdev, struct cfg80211_bss *bss),
+	TP_ARGS(netdev, bss),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		MAC_ENTRY(bssid)
+		CHAN_ENTRY
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(bssid, bss->bssid);
+		CHAN_ASSIGN(bss->channel);
+	),
+	TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", " CHAN_PR_FMT,
+		  NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG)
+);
+
+DEFINE_EVENT(netdev_evt_only, __cfg80211_send_deauth,
+	TP_PROTO(struct net_device *netdev),
+	TP_ARGS(netdev)
+);
+
+DEFINE_EVENT(netdev_evt_only, __cfg80211_send_disassoc,
+	TP_PROTO(struct net_device *netdev),
+	TP_ARGS(netdev)
+);
+
+DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_deauth,
+	TP_PROTO(struct net_device *netdev),
+	TP_ARGS(netdev)
+);
+
+DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_disassoc,
+	TP_PROTO(struct net_device *netdev),
+	TP_ARGS(netdev)
+);
+
+DECLARE_EVENT_CLASS(netdev_mac_evt,
+	TP_PROTO(struct net_device *netdev, const u8 *mac),
+	TP_ARGS(netdev, mac),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		MAC_ENTRY(mac)
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(mac, mac)
+	),
+	TP_printk(NETDEV_PR_FMT ", mac: " MAC_PR_FMT,
+		  NETDEV_PR_ARG, MAC_PR_ARG(mac))
+);
+
+DEFINE_EVENT(netdev_mac_evt, cfg80211_send_auth_timeout,
+	TP_PROTO(struct net_device *netdev, const u8 *mac),
+	TP_ARGS(netdev, mac)
+);
+
+DEFINE_EVENT(netdev_mac_evt, cfg80211_send_assoc_timeout,
+	TP_PROTO(struct net_device *netdev, const u8 *mac),
+	TP_ARGS(netdev, mac)
+);
+
+TRACE_EVENT(cfg80211_michael_mic_failure,
+	TP_PROTO(struct net_device *netdev, const u8 *addr,
+		 enum nl80211_key_type key_type, int key_id, const u8 *tsc),
+	TP_ARGS(netdev, addr, key_type, key_id, tsc),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		MAC_ENTRY(addr)
+		__field(enum nl80211_key_type, key_type)
+		__field(int, key_id)
+		__array(u8, tsc, 6)
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(addr, addr);
+		__entry->key_type = key_type;
+		__entry->key_id = key_id;
+		memcpy(__entry->tsc, tsc, 6);
+	),
+	TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
+		  NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
+		  __entry->key_id, __entry->tsc)
+);
+
+TRACE_EVENT(cfg80211_ready_on_channel,
+	TP_PROTO(struct wireless_dev *wdev, u64 cookie,
+		 struct ieee80211_channel *chan,
+		 unsigned int duration),
+	TP_ARGS(wdev, cookie, chan, duration),
+	TP_STRUCT__entry(
+		WDEV_ENTRY
+		__field(u64, cookie)
+		CHAN_ENTRY
+		__field(unsigned int, duration)
+	),
+	TP_fast_assign(
+		WDEV_ASSIGN;
+		__entry->cookie = cookie;
+		CHAN_ASSIGN(chan);
+		__entry->duration = duration;
+	),
+	TP_printk(WDEV_PR_FMT ", cookie: %llu, " CHAN_PR_FMT ", duration: %u",
+		  WDEV_PR_ARG, __entry->cookie, CHAN_PR_ARG,
+		  __entry->duration)
+);
+
+TRACE_EVENT(cfg80211_ready_on_channel_expired,
+	TP_PROTO(struct wireless_dev *wdev, u64 cookie,
+		 struct ieee80211_channel *chan),
+	TP_ARGS(wdev, cookie, chan),
+	TP_STRUCT__entry(
+		WDEV_ENTRY
+		__field(u64, cookie)
+		CHAN_ENTRY
+	),
+	TP_fast_assign(
+		WDEV_ASSIGN;
+		__entry->cookie = cookie;
+		CHAN_ASSIGN(chan);
+	),
+	TP_printk(WDEV_PR_FMT ", cookie: %llu, " CHAN_PR_FMT,
+		  WDEV_PR_ARG, __entry->cookie, CHAN_PR_ARG)
+);
+
+TRACE_EVENT(cfg80211_new_sta,
+	TP_PROTO(struct net_device *netdev, const u8 *mac_addr,
+		 struct station_info *sinfo),
+	TP_ARGS(netdev, mac_addr, sinfo),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		MAC_ENTRY(mac_addr)
+		SINFO_ENTRY
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(mac_addr, mac_addr);
+		SINFO_ASSIGN;
+	),
+	TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT,
+		  NETDEV_PR_ARG, MAC_PR_ARG(mac_addr))
+);
+
+DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_del_sta,
+	TP_PROTO(struct net_device *netdev, const u8 *macaddr),
+	TP_ARGS(netdev, macaddr)
+);
+
+TRACE_EVENT(cfg80211_rx_mgmt,
+	TP_PROTO(struct wireless_dev *wdev, int freq, int sig_mbm),
+	TP_ARGS(wdev, freq, sig_mbm),
+	TP_STRUCT__entry(
+		WDEV_ENTRY
+		__field(int, freq)
+		__field(int, sig_mbm)
+	),
+	TP_fast_assign(
+		WDEV_ASSIGN;
+		__entry->freq = freq;
+		__entry->sig_mbm = sig_mbm;
+	),
+	TP_printk(WDEV_PR_FMT ", freq: %d, sig mbm: %d",
+		  WDEV_PR_ARG, __entry->freq, __entry->sig_mbm)
+);
+
+TRACE_EVENT(cfg80211_mgmt_tx_status,
+	TP_PROTO(struct wireless_dev *wdev, u64 cookie, bool ack),
+	TP_ARGS(wdev, cookie, ack),
+	TP_STRUCT__entry(
+		WDEV_ENTRY
+		__field(u64, cookie)
+		__field(bool, ack)
+	),
+	TP_fast_assign(
+		WDEV_ASSIGN;
+		__entry->cookie = cookie;
+		__entry->ack = ack;
+	),
+	TP_printk(WDEV_PR_FMT", cookie: %llu, ack: %s",
+		  WDEV_PR_ARG, __entry->cookie, BOOL_TO_STR(__entry->ack))
+);
+
+TRACE_EVENT(cfg80211_cqm_rssi_notify,
+	TP_PROTO(struct net_device *netdev,
+		 enum nl80211_cqm_rssi_threshold_event rssi_event),
+	TP_ARGS(netdev, rssi_event),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		__field(enum nl80211_cqm_rssi_threshold_event, rssi_event)
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		__entry->rssi_event = rssi_event;
+	),
+	TP_printk(NETDEV_PR_FMT ", rssi event: %d",
+		  NETDEV_PR_ARG, __entry->rssi_event)
+);
+
+TRACE_EVENT(cfg80211_reg_can_beacon,
+	TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
+	TP_ARGS(wiphy, chandef),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		CHAN_DEF_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		CHAN_DEF_ASSIGN(chandef);
+	),
+	TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
+		  WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
+);
+
+TRACE_EVENT(cfg80211_ch_switch_notify,
+	TP_PROTO(struct net_device *netdev,
+		 struct cfg80211_chan_def *chandef),
+	TP_ARGS(netdev, chandef),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		CHAN_DEF_ENTRY
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		CHAN_DEF_ASSIGN(chandef);
+	),
+	TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT,
+		  NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
+);
+
+DECLARE_EVENT_CLASS(cfg80211_rx_evt,
+	TP_PROTO(struct net_device *netdev, const u8 *addr),
+	TP_ARGS(netdev, addr),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		MAC_ENTRY(addr)
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(addr, addr);
+	),
+	TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(addr))
+);
+
+DEFINE_EVENT(cfg80211_rx_evt, cfg80211_ibss_joined,
+	TP_PROTO(struct net_device *netdev, const u8 *addr),
+	TP_ARGS(netdev, addr)
+);
+
+DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame,
+	TP_PROTO(struct net_device *netdev, const u8 *addr),
+	TP_ARGS(netdev, addr)
+);
+
+DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_unexpected_4addr_frame,
+	TP_PROTO(struct net_device *netdev, const u8 *addr),
+	TP_ARGS(netdev, addr)
+);
+
+TRACE_EVENT(cfg80211_probe_status,
+	TP_PROTO(struct net_device *netdev, const u8 *addr, u64 cookie,
+		 bool acked),
+	TP_ARGS(netdev, addr, cookie, acked),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		MAC_ENTRY(addr)
+		__field(u64, cookie)
+		__field(bool, acked)
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(addr, addr);
+		__entry->cookie = cookie;
+		__entry->acked = acked;
+	),
+	TP_printk(NETDEV_PR_FMT " addr:" MAC_PR_FMT ", cookie: %llu, acked: %s",
+		  NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->cookie,
+		  BOOL_TO_STR(__entry->acked))
+);
+
+TRACE_EVENT(cfg80211_cqm_pktloss_notify,
+	TP_PROTO(struct net_device *netdev, const u8 *peer, u32 num_packets),
+	TP_ARGS(netdev, peer, num_packets),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		MAC_ENTRY(peer)
+		__field(u32, num_packets)
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(peer, peer);
+		__entry->num_packets = num_packets;
+	),
+	TP_printk(NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", num of lost packets: %u",
+		  NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->num_packets)
+);
+
+DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_gtk_rekey_notify,
+	TP_PROTO(struct net_device *netdev, const u8 *macaddr),
+	TP_ARGS(netdev, macaddr)
+);
+
+TRACE_EVENT(cfg80211_pmksa_candidate_notify,
+	TP_PROTO(struct net_device *netdev, int index, const u8 *bssid,
+		 bool preauth),
+	TP_ARGS(netdev, index, bssid, preauth),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		__field(int, index)
+		MAC_ENTRY(bssid)
+		__field(bool, preauth)
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		__entry->index = index;
+		MAC_ASSIGN(bssid, bssid);
+		__entry->preauth = preauth;
+	),
+	TP_printk(NETDEV_PR_FMT ", index:%d, bssid: " MAC_PR_FMT ", pre auth: %s",
+		  NETDEV_PR_ARG, __entry->index, MAC_PR_ARG(bssid),
+		  BOOL_TO_STR(__entry->preauth))
+);
+
+TRACE_EVENT(cfg80211_report_obss_beacon,
+	TP_PROTO(struct wiphy *wiphy, const u8 *frame, size_t len,
+		 int freq, int sig_dbm),
+	TP_ARGS(wiphy, frame, len, freq, sig_dbm),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		__field(int, freq)
+		__field(int, sig_dbm)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		__entry->freq = freq;
+		__entry->sig_dbm = sig_dbm;
+	),
+	TP_printk(WIPHY_PR_FMT ", freq: %d, sig_dbm: %d",
+		  WIPHY_PR_ARG, __entry->freq, __entry->sig_dbm)
+);
+
+TRACE_EVENT(cfg80211_tdls_oper_request,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *peer,
+		 enum nl80211_tdls_operation oper, u16 reason_code),
+	TP_ARGS(wiphy, netdev, peer, oper, reason_code),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		MAC_ENTRY(peer)
+		__field(enum nl80211_tdls_operation, oper)
+		__field(u16, reason_code)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		MAC_ASSIGN(peer, peer);
+		__entry->oper = oper;
+		__entry->reason_code = reason_code;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", oper: %d, reason_code %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->oper,
+		  __entry->reason_code)
+	);
+
+TRACE_EVENT(cfg80211_scan_done,
+	TP_PROTO(struct cfg80211_scan_request *request, bool aborted),
+	TP_ARGS(request, aborted),
+	TP_STRUCT__entry(
+		__field(u32, n_channels)
+		__dynamic_array(u8, ie, request ? request->ie_len : 0)
+		__array(u32, rates, IEEE80211_NUM_BANDS)
+		__field(u32, wdev_id)
+		MAC_ENTRY(wiphy_mac)
+		__field(bool, no_cck)
+		__field(bool, aborted)
+	),
+	TP_fast_assign(
+		if (request) {
+			memcpy(__get_dynamic_array(ie), request->ie,
+			       request->ie_len);
+			memcpy(__entry->rates, request->rates,
+			       IEEE80211_NUM_BANDS);
+			__entry->wdev_id = request->wdev ?
+					request->wdev->identifier : 0;
+			if (request->wiphy)
+				MAC_ASSIGN(wiphy_mac,
+					   request->wiphy->perm_addr);
+			__entry->no_cck = request->no_cck;
+		}
+		__entry->aborted = aborted;
+	),
+	TP_printk("aborted: %s", BOOL_TO_STR(__entry->aborted))
+);
+
+DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_results,
+	TP_PROTO(struct wiphy *wiphy),
+	TP_ARGS(wiphy)
+);
+
+DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_stopped,
+	TP_PROTO(struct wiphy *wiphy),
+	TP_ARGS(wiphy)
+);
+
+TRACE_EVENT(cfg80211_get_bss,
+	TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
+		 const u8 *bssid, const u8 *ssid, size_t ssid_len,
+		 u16 capa_mask, u16 capa_val),
+	TP_ARGS(wiphy, channel, bssid, ssid, ssid_len, capa_mask, capa_val),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		CHAN_ENTRY
+		MAC_ENTRY(bssid)
+		__dynamic_array(u8, ssid, ssid_len)
+		__field(u16, capa_mask)
+		__field(u16, capa_val)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		CHAN_ASSIGN(channel);
+		MAC_ASSIGN(bssid, bssid);
+		memcpy(__get_dynamic_array(ssid), ssid, ssid_len);
+		__entry->capa_mask = capa_mask;
+		__entry->capa_val = capa_val;
+	),
+	TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT ", " MAC_PR_FMT ", buf: %#.2x, "
+		  "capa_mask: %d, capa_val: %u", WIPHY_PR_ARG, CHAN_PR_ARG,
+		  MAC_PR_ARG(bssid), ((u8 *)__get_dynamic_array(ssid))[0],
+		  __entry->capa_mask, __entry->capa_val)
+);
+
+TRACE_EVENT(cfg80211_inform_bss_frame,
+	TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
+		 struct ieee80211_mgmt *mgmt, size_t len,
+		 s32 signal),
+	TP_ARGS(wiphy, channel, mgmt, len, signal),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		CHAN_ENTRY
+		__dynamic_array(u8, mgmt, len)
+		__field(s32, signal)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		CHAN_ASSIGN(channel);
+		if (mgmt)
+			memcpy(__get_dynamic_array(mgmt), mgmt, len);
+		__entry->signal = signal;
+	),
+	TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "signal: %d",
+		  WIPHY_PR_ARG, CHAN_PR_ARG, __entry->signal)
+);
+
+DECLARE_EVENT_CLASS(cfg80211_bss_evt,
+	TP_PROTO(struct cfg80211_bss *pub),
+	TP_ARGS(pub),
+	TP_STRUCT__entry(
+		MAC_ENTRY(bssid)
+		CHAN_ENTRY
+	),
+	TP_fast_assign(
+		MAC_ASSIGN(bssid, pub->bssid);
+		CHAN_ASSIGN(pub->channel);
+	),
+	TP_printk(MAC_PR_FMT ", " CHAN_PR_FMT, MAC_PR_ARG(bssid), CHAN_PR_ARG)
+);
+
+DEFINE_EVENT(cfg80211_bss_evt, cfg80211_return_bss,
+	TP_PROTO(struct cfg80211_bss *pub),
+	TP_ARGS(pub)
+);
+
+TRACE_EVENT(cfg80211_return_uint,
+	TP_PROTO(unsigned int ret),
+	TP_ARGS(ret),
+	TP_STRUCT__entry(
+		__field(unsigned int, ret)
+	),
+	TP_fast_assign(
+		__entry->ret = ret;
+	),
+	TP_printk("ret: %d", __entry->ret)
+);
+
+TRACE_EVENT(cfg80211_return_u32,
+	TP_PROTO(u32 ret),
+	TP_ARGS(ret),
+	TP_STRUCT__entry(
+		__field(u32, ret)
+	),
+	TP_fast_assign(
+		__entry->ret = ret;
+	),
+	TP_printk("ret: %u", __entry->ret)
+);
+
+#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 2762e83..3cce6e4 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -11,6 +11,8 @@
 #include <net/ip.h>
 #include <net/dsfield.h>
 #include "core.h"
+#include "rdev-ops.h"
+
 
 struct ieee80211_rate *
 ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
@@ -705,19 +707,18 @@
 	for (i = 0; i < 6; i++) {
 		if (!wdev->connect_keys->params[i].cipher)
 			continue;
-		if (rdev->ops->add_key(wdev->wiphy, dev, i, false, NULL,
-					&wdev->connect_keys->params[i])) {
+		if (rdev_add_key(rdev, dev, i, false, NULL,
+				 &wdev->connect_keys->params[i])) {
 			netdev_err(dev, "failed to set key %d\n", i);
 			continue;
 		}
 		if (wdev->connect_keys->def == i)
-			if (rdev->ops->set_default_key(wdev->wiphy, dev,
-						       i, true, true)) {
+			if (rdev_set_default_key(rdev, dev, i, true, true)) {
 				netdev_err(dev, "failed to set defkey %d\n", i);
 				continue;
 			}
 		if (wdev->connect_keys->defmgmt == i)
-			if (rdev->ops->set_default_mgmt_key(wdev->wiphy, dev, i))
+			if (rdev_set_default_mgmt_key(rdev, dev, i))
 				netdev_err(dev, "failed to set mgtdef %d\n", i);
 	}
 
@@ -850,8 +851,7 @@
 		cfg80211_process_rdev_events(rdev);
 	}
 
-	err = rdev->ops->change_virtual_intf(&rdev->wiphy, dev,
-					     ntype, flags, params);
+	err = rdev_change_virtual_intf(rdev, dev, ntype, flags, params);
 
 	WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype);
 
@@ -944,14 +944,86 @@
 	return __mcs2bitrate[rate->mcs];
 }
 
+static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
+{
+	static const u32 base[4][10] = {
+		{   6500000,
+		   13000000,
+		   19500000,
+		   26000000,
+		   39000000,
+		   52000000,
+		   58500000,
+		   65000000,
+		   78000000,
+		   0,
+		},
+		{  13500000,
+		   27000000,
+		   40500000,
+		   54000000,
+		   81000000,
+		  108000000,
+		  121500000,
+		  135000000,
+		  162000000,
+		  180000000,
+		},
+		{  29300000,
+		   58500000,
+		   87800000,
+		  117000000,
+		  175500000,
+		  234000000,
+		  263300000,
+		  292500000,
+		  351000000,
+		  390000000,
+		},
+		{  58500000,
+		  117000000,
+		  175500000,
+		  234000000,
+		  351000000,
+		  468000000,
+		  526500000,
+		  585000000,
+		  702000000,
+		  780000000,
+		},
+	};
+	u32 bitrate;
+	int idx;
+
+	if (WARN_ON_ONCE(rate->mcs > 9))
+		return 0;
+
+	idx = rate->flags & (RATE_INFO_FLAGS_160_MHZ_WIDTH |
+			     RATE_INFO_FLAGS_80P80_MHZ_WIDTH) ? 3 :
+		  rate->flags & RATE_INFO_FLAGS_80_MHZ_WIDTH ? 2 :
+		  rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH ? 1 : 0;
+
+	bitrate = base[idx][rate->mcs];
+	bitrate *= rate->nss;
+
+	if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
+		bitrate = (bitrate / 9) * 10;
+
+	/* do NOT round down here */
+	return (bitrate + 50000) / 100000;
+}
+
 u32 cfg80211_calculate_bitrate(struct rate_info *rate)
 {
 	int modulation, streams, bitrate;
 
-	if (!(rate->flags & RATE_INFO_FLAGS_MCS))
+	if (!(rate->flags & RATE_INFO_FLAGS_MCS) &&
+	    !(rate->flags & RATE_INFO_FLAGS_VHT_MCS))
 		return rate->legacy;
 	if (rate->flags & RATE_INFO_FLAGS_60G)
 		return cfg80211_calculate_bitrate_60g(rate);
+	if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
+		return cfg80211_calculate_bitrate_vht(rate);
 
 	/* the formula below does only work for MCS values smaller than 32 */
 	if (WARN_ON_ONCE(rate->mcs >= 32))
@@ -980,6 +1052,106 @@
 }
 EXPORT_SYMBOL(cfg80211_calculate_bitrate);
 
+int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
+			  enum ieee80211_p2p_attr_id attr,
+			  u8 *buf, unsigned int bufsize)
+{
+	u8 *out = buf;
+	u16 attr_remaining = 0;
+	bool desired_attr = false;
+	u16 desired_len = 0;
+
+	while (len > 0) {
+		unsigned int iedatalen;
+		unsigned int copy;
+		const u8 *iedata;
+
+		if (len < 2)
+			return -EILSEQ;
+		iedatalen = ies[1];
+		if (iedatalen + 2 > len)
+			return -EILSEQ;
+
+		if (ies[0] != WLAN_EID_VENDOR_SPECIFIC)
+			goto cont;
+
+		if (iedatalen < 4)
+			goto cont;
+
+		iedata = ies + 2;
+
+		/* check WFA OUI, P2P subtype */
+		if (iedata[0] != 0x50 || iedata[1] != 0x6f ||
+		    iedata[2] != 0x9a || iedata[3] != 0x09)
+			goto cont;
+
+		iedatalen -= 4;
+		iedata += 4;
+
+		/* check attribute continuation into this IE */
+		copy = min_t(unsigned int, attr_remaining, iedatalen);
+		if (copy && desired_attr) {
+			desired_len += copy;
+			if (out) {
+				memcpy(out, iedata, min(bufsize, copy));
+				out += min(bufsize, copy);
+				bufsize -= min(bufsize, copy);
+			}
+
+
+			if (copy == attr_remaining)
+				return desired_len;
+		}
+
+		attr_remaining -= copy;
+		if (attr_remaining)
+			goto cont;
+
+		iedatalen -= copy;
+		iedata += copy;
+
+		while (iedatalen > 0) {
+			u16 attr_len;
+
+			/* P2P attribute ID & size must fit */
+			if (iedatalen < 3)
+				return -EILSEQ;
+			desired_attr = iedata[0] == attr;
+			attr_len = get_unaligned_le16(iedata + 1);
+			iedatalen -= 3;
+			iedata += 3;
+
+			copy = min_t(unsigned int, attr_len, iedatalen);
+
+			if (desired_attr) {
+				desired_len += copy;
+				if (out) {
+					memcpy(out, iedata, min(bufsize, copy));
+					out += min(bufsize, copy);
+					bufsize -= min(bufsize, copy);
+				}
+
+				if (copy == attr_len)
+					return desired_len;
+			}
+
+			iedata += copy;
+			iedatalen -= copy;
+			attr_remaining = attr_len - copy;
+		}
+
+ cont:
+		len -= ies[1] + 2;
+		ies += ies[1] + 2;
+	}
+
+	if (attr_remaining && desired_attr)
+		return -EILSEQ;
+
+	return -ENOENT;
+}
+EXPORT_SYMBOL(cfg80211_get_p2p_attr);
+
 int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
 				 u32 beacon_int)
 {
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 494379e..f9680c9 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -19,6 +19,7 @@
 #include <net/cfg80211-wext.h>
 #include "wext-compat.h"
 #include "core.h"
+#include "rdev-ops.h"
 
 int cfg80211_wext_giwname(struct net_device *dev,
 			  struct iw_request_info *info,
@@ -301,8 +302,7 @@
 	else
 		wdev->wiphy->rts_threshold = rts->value;
 
-	err = rdev->ops->set_wiphy_params(wdev->wiphy,
-					  WIPHY_PARAM_RTS_THRESHOLD);
+	err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_RTS_THRESHOLD);
 	if (err)
 		wdev->wiphy->rts_threshold = orts;
 
@@ -342,8 +342,7 @@
 		wdev->wiphy->frag_threshold = frag->value & ~0x1;
 	}
 
-	err = rdev->ops->set_wiphy_params(wdev->wiphy,
-					  WIPHY_PARAM_FRAG_THRESHOLD);
+	err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_FRAG_THRESHOLD);
 	if (err)
 		wdev->wiphy->frag_threshold = ofrag;
 
@@ -396,7 +395,7 @@
 	if (!changed)
 		return 0;
 
-	err = rdev->ops->set_wiphy_params(wdev->wiphy, changed);
+	err = rdev_set_wiphy_params(rdev, changed);
 	if (err) {
 		wdev->wiphy->retry_short = oshort;
 		wdev->wiphy->retry_long = olong;
@@ -490,8 +489,8 @@
 			    !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
 				err = -ENOENT;
 			else
-				err = rdev->ops->del_key(&rdev->wiphy, dev, idx,
-							 pairwise, addr);
+				err = rdev_del_key(rdev, dev, idx, pairwise,
+						   addr);
 		}
 		wdev->wext.connect.privacy = false;
 		/*
@@ -525,8 +524,7 @@
 
 	err = 0;
 	if (wdev->current_bss)
-		err = rdev->ops->add_key(&rdev->wiphy, dev, idx,
-					 pairwise, addr, params);
+		err = rdev_add_key(rdev, dev, idx, pairwise, addr, params);
 	if (err)
 		return err;
 
@@ -552,8 +550,7 @@
 				__cfg80211_leave_ibss(rdev, wdev->netdev, true);
 				rejoin = true;
 			}
-			err = rdev->ops->set_default_key(&rdev->wiphy, dev,
-							 idx, true, true);
+			err = rdev_set_default_key(rdev, dev, idx, true, true);
 		}
 		if (!err) {
 			wdev->wext.default_key = idx;
@@ -566,8 +563,7 @@
 	if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC &&
 	    (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) {
 		if (wdev->current_bss)
-			err = rdev->ops->set_default_mgmt_key(&rdev->wiphy,
-							      dev, idx);
+			err = rdev_set_default_mgmt_key(rdev, dev, idx);
 		if (!err)
 			wdev->wext.default_mgmt_key = idx;
 		return err;
@@ -631,8 +627,8 @@
 		err = 0;
 		wdev_lock(wdev);
 		if (wdev->current_bss)
-			err = rdev->ops->set_default_key(&rdev->wiphy, dev,
-							 idx, true, true);
+			err = rdev_set_default_key(rdev, dev, idx, true,
+						   true);
 		if (!err)
 			wdev->wext.default_key = idx;
 		wdev_unlock(wdev);
@@ -788,6 +784,9 @@
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+	struct cfg80211_chan_def chandef = {
+		.width = NL80211_CHAN_WIDTH_20_NOHT,
+	};
 	int freq, err;
 
 	switch (wdev->iftype) {
@@ -801,8 +800,12 @@
 			return freq;
 		if (freq == 0)
 			return -EINVAL;
+		chandef.center_freq1 = freq;
+		chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq);
+		if (!chandef.chan)
+			return -EINVAL;
 		mutex_lock(&rdev->devlist_mtx);
-		err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT);
+		err = cfg80211_set_monitor_channel(rdev, &chandef);
 		mutex_unlock(&rdev->devlist_mtx);
 		return err;
 	case NL80211_IFTYPE_MESH_POINT:
@@ -811,9 +814,12 @@
 			return freq;
 		if (freq == 0)
 			return -EINVAL;
+		chandef.center_freq1 = freq;
+		chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq);
+		if (!chandef.chan)
+			return -EINVAL;
 		mutex_lock(&rdev->devlist_mtx);
-		err = cfg80211_set_mesh_freq(rdev, wdev, freq,
-					     NL80211_CHAN_NO_HT);
+		err = cfg80211_set_mesh_channel(rdev, wdev, &chandef);
 		mutex_unlock(&rdev->devlist_mtx);
 		return err;
 	default:
@@ -827,8 +833,8 @@
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
-	struct ieee80211_channel *chan;
-	enum nl80211_channel_type channel_type;
+	struct cfg80211_chan_def chandef;
+	int ret;
 
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_STATION:
@@ -839,10 +845,10 @@
 		if (!rdev->ops->get_channel)
 			return -EINVAL;
 
-		chan = rdev->ops->get_channel(wdev->wiphy, wdev, &channel_type);
-		if (!chan)
-			return -EINVAL;
-		freq->m = chan->center_freq;
+		ret = rdev_get_channel(rdev, wdev, &chandef);
+		if (ret)
+			return ret;
+		freq->m = chandef.chan->center_freq;
 		freq->e = 6;
 		return 0;
 	default:
@@ -899,7 +905,7 @@
 		return 0;
 	}
 
-	return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm));
+	return rdev_set_tx_power(rdev, wdev, type, DBM_TO_MBM(dbm));
 }
 
 static int cfg80211_wext_giwtxpower(struct net_device *dev,
@@ -918,7 +924,7 @@
 	if (!rdev->ops->get_tx_power)
 		return -EOPNOTSUPP;
 
-	err = rdev->ops->get_tx_power(wdev->wiphy, &val);
+	err = rdev_get_tx_power(rdev, wdev, &val);
 	if (err)
 		return err;
 
@@ -1158,7 +1164,7 @@
 			timeout = wrq->value / 1000;
 	}
 
-	err = rdev->ops->set_power_mgmt(wdev->wiphy, dev, ps, timeout);
+	err = rdev_set_power_mgmt(rdev, dev, ps, timeout);
 	if (err)
 		return err;
 
@@ -1200,7 +1206,7 @@
 	if (!rdev->ops->set_wds_peer)
 		return -EOPNOTSUPP;
 
-	err = rdev->ops->set_wds_peer(wdev->wiphy, dev, (u8 *) &addr->sa_data);
+	err = rdev_set_wds_peer(rdev, dev, (u8 *)&addr->sa_data);
 	if (err)
 		return err;
 
@@ -1272,7 +1278,7 @@
 	if (!match)
 		return -EINVAL;
 
-	return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
+	return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
 }
 
 static int cfg80211_wext_giwrate(struct net_device *dev,
@@ -1302,7 +1308,7 @@
 	if (err)
 		return err;
 
-	err = rdev->ops->get_station(&rdev->wiphy, dev, addr, &sinfo);
+	err = rdev_get_station(rdev, dev, addr, &sinfo);
 	if (err)
 		return err;
 
@@ -1339,7 +1345,7 @@
 	memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
 	wdev_unlock(wdev);
 
-	if (rdev->ops->get_station(&rdev->wiphy, dev, bssid, &sinfo))
+	if (rdev_get_station(rdev, dev, bssid, &sinfo))
 		return NULL;
 
 	memset(&wstats, 0, sizeof(wstats));
@@ -1474,19 +1480,19 @@
 		if (!rdev->ops->set_pmksa)
 			return -EOPNOTSUPP;
 
-		return rdev->ops->set_pmksa(&rdev->wiphy, dev, &cfg_pmksa);
+		return rdev_set_pmksa(rdev, dev, &cfg_pmksa);
 
 	case IW_PMKSA_REMOVE:
 		if (!rdev->ops->del_pmksa)
 			return -EOPNOTSUPP;
 
-		return rdev->ops->del_pmksa(&rdev->wiphy, dev, &cfg_pmksa);
+		return rdev_del_pmksa(rdev, dev, &cfg_pmksa);
 
 	case IW_PMKSA_FLUSH:
 		if (!rdev->ops->flush_pmksa)
 			return -EOPNOTSUPP;
 
-		return rdev->ops->flush_pmksa(&rdev->wiphy, dev);
+		return rdev_flush_pmksa(rdev, dev);
 
 	default:
 		return -EOPNOTSUPP;
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 1f773f6..873af63 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -119,7 +119,16 @@
 	 * channel we disconnected above and reconnect below.
 	 */
 	if (chan && !wdev->wext.connect.ssid_len) {
-		err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT);
+		struct cfg80211_chan_def chandef = {
+			.width = NL80211_CHAN_WIDTH_20_NOHT,
+			.center_freq1 = freq,
+		};
+
+		chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq);
+		if (chandef.chan)
+			err = cfg80211_set_monitor_channel(rdev, &chandef);
+		else
+			err = -EINVAL;
 		goto out;
 	}
 
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index e5246fb..2906d52 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -276,18 +276,16 @@
 	struct crypto_comp * __percpu *tfms;
 	int cpu;
 
-	/* This can be any valid CPU ID so we don't need locking. */
-	cpu = raw_smp_processor_id();
 
 	list_for_each_entry(pos, &ipcomp_tfms_list, list) {
 		struct crypto_comp *tfm;
 
-		tfms = pos->tfms;
-		tfm = *per_cpu_ptr(tfms, cpu);
+		/* This can be any valid CPU ID so we don't need locking. */
+		tfm = __this_cpu_read(*pos->tfms);
 
 		if (!strcmp(crypto_comp_name(tfm), alg_name)) {
 			pos->users++;
-			return tfms;
+			return pos->tfms;
 		}
 	}
 
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 3efb07d..765f6fe 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -521,13 +521,12 @@
 		    replay_esn->bmp_len * sizeof(__u32) * 8)
 			return -EINVAL;
 
-	if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0)
-		return -EINVAL;
-
-	if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn)
-		x->repl = &xfrm_replay_esn;
-	else
-		x->repl = &xfrm_replay_bmp;
+		if (x->props.flags & XFRM_STATE_ESN) {
+			if (replay_esn->replay_window == 0)
+				return -EINVAL;
+			x->repl = &xfrm_replay_esn;
+		} else
+			x->repl = &xfrm_replay_bmp;
 	} else
 		x->repl = &xfrm_replay_legacy;
 
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 380976f..05a6e3d 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -54,6 +54,10 @@
 	table[2].data = &net->xfrm.sysctl_larval_drop;
 	table[3].data = &net->xfrm.sysctl_acq_expires;
 
+	/* Don't export sysctls to unprivileged users */
+	if (net->user_ns != &init_user_ns)
+		table[0].procname = NULL;
+
 	net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table);
 	if (!net->xfrm.sysctl_hdr)
 		goto out_register;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 421f984..eb872b2 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2349,7 +2349,7 @@
 	link = &xfrm_dispatch[type];
 
 	/* All operations require privileges, even GET */
-	if (!capable(CAP_NET_ADMIN))
+	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
 	if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||