Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:

	drivers/net/wireless/ath9k/core.c
	drivers/net/wireless/ath9k/main.c
	net/core/dev.c
diff --git a/Documentation/DocBook/mac80211.tmpl b/Documentation/DocBook/mac80211.tmpl
index b651e0a..77c3c20 100644
--- a/Documentation/DocBook/mac80211.tmpl
+++ b/Documentation/DocBook/mac80211.tmpl
@@ -145,7 +145,6 @@
         this though and the recommendation to allow only a single
         interface in STA mode at first!
       </para>
-!Finclude/net/mac80211.h ieee80211_if_types
 !Finclude/net/mac80211.h ieee80211_if_init_conf
 !Finclude/net/mac80211.h ieee80211_if_conf
     </chapter>
@@ -177,8 +176,7 @@
         <title>functions/definitions</title>
 !Finclude/net/mac80211.h ieee80211_rx_status
 !Finclude/net/mac80211.h mac80211_rx_flags
-!Finclude/net/mac80211.h ieee80211_tx_control
-!Finclude/net/mac80211.h ieee80211_tx_status_flags
+!Finclude/net/mac80211.h ieee80211_tx_info
 !Finclude/net/mac80211.h ieee80211_rx
 !Finclude/net/mac80211.h ieee80211_rx_irqsafe
 !Finclude/net/mac80211.h ieee80211_tx_status
@@ -189,12 +187,11 @@
 !Finclude/net/mac80211.h ieee80211_ctstoself_duration
 !Finclude/net/mac80211.h ieee80211_generic_frame_duration
 !Finclude/net/mac80211.h ieee80211_get_hdrlen_from_skb
-!Finclude/net/mac80211.h ieee80211_get_hdrlen
+!Finclude/net/mac80211.h ieee80211_hdrlen
 !Finclude/net/mac80211.h ieee80211_wake_queue
 !Finclude/net/mac80211.h ieee80211_stop_queue
-!Finclude/net/mac80211.h ieee80211_start_queues
-!Finclude/net/mac80211.h ieee80211_stop_queues
 !Finclude/net/mac80211.h ieee80211_wake_queues
+!Finclude/net/mac80211.h ieee80211_stop_queues
       </sect1>
     </chapter>
 
@@ -230,8 +227,7 @@
       <title>Multiple queues and QoS support</title>
       <para>TBD</para>
 !Finclude/net/mac80211.h ieee80211_tx_queue_params
-!Finclude/net/mac80211.h ieee80211_tx_queue_stats_data
-!Finclude/net/mac80211.h ieee80211_tx_queue
+!Finclude/net/mac80211.h ieee80211_tx_queue_stats
     </chapter>
 
     <chapter id="AP">
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 83c88ca..d0f22fa 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -6,6 +6,24 @@
 
 ---------------------------
 
+What:	old static regulatory information and ieee80211_regdom module parameter
+When:	2.6.29
+Why:	The old regulatory infrastructure has been replaced with a new one
+	which does not require statically defined regulatory domains. We do
+	not want to keep static regulatory domains in the kernel due to the
+	the dynamic nature of regulatory law and localization. We kept around
+	the old static definitions for the regulatory domains of:
+		* US
+		* JP
+		* EU
+	and used by default the US when CONFIG_WIRELESS_OLD_REGULATORY was
+	set. We also kept around the ieee80211_regdom module parameter in case
+	some applications were relying on it. Changing regulatory domains
+	can now be done instead by using nl80211, as is done with iw.
+Who:	Luis R. Rodriguez <lrodriguez@atheros.com>
+
+---------------------------
+
 What:	dev->power.power_state
 When:	July 2007
 Why:	Broken design for runtime control over driver power states, confusing
diff --git a/Documentation/networking/LICENSE.qlge b/Documentation/networking/LICENSE.qlge
new file mode 100644
index 0000000..123b6ed
--- /dev/null
+++ b/Documentation/networking/LICENSE.qlge
@@ -0,0 +1,46 @@
+Copyright (c)  2003-2008 QLogic Corporation
+QLogic Linux Networking HBA Driver
+
+This program includes a device driver for Linux 2.6 that may be
+distributed with QLogic hardware specific firmware binary file.
+You may modify and redistribute the device driver code under the
+GNU General Public License as published by the Free Software
+Foundation (version 2 or a later version).
+
+You may redistribute the hardware specific firmware binary file
+under the following terms:
+
+	1. Redistribution of source code (only if applicable),
+	   must retain the above copyright notice, this list of
+	   conditions and the following disclaimer.
+
+	2. Redistribution in binary form must reproduce the above
+	   copyright notice, this list of conditions and the
+	   following disclaimer in the documentation and/or other
+	   materials provided with the distribution.
+
+	3. The name of QLogic Corporation may not be used to
+	   endorse or promote products derived from this software
+	   without specific prior written permission
+
+REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
+THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
+CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
+OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
+TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
+ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
+COMBINATION WITH THIS PROGRAM.
+
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index 297ba7b..2035bc4 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -35,8 +35,9 @@
     6.1 general settings
     6.2 local loopback of sent frames
     6.3 CAN controller hardware filters
-    6.4 currently supported CAN hardware
-    6.5 todo
+    6.4 The virtual CAN driver (vcan)
+    6.5 currently supported CAN hardware
+    6.6 todo
 
   7 Credits
 
@@ -584,7 +585,42 @@
   @133MHz with four SJA1000 CAN controllers from 2002 under heavy bus
   load without any problems ...
 
-  6.4 currently supported CAN hardware (September 2007)
+  6.4 The virtual CAN driver (vcan)
+
+  Similar to the network loopback devices, vcan offers a virtual local
+  CAN interface. A full qualified address on CAN consists of
+
+  - a unique CAN Identifier (CAN ID)
+  - the CAN bus this CAN ID is transmitted on (e.g. can0)
+
+  so in common use cases more than one virtual CAN interface is needed.
+
+  The virtual CAN interfaces allow the transmission and reception of CAN
+  frames without real CAN controller hardware. Virtual CAN network
+  devices are usually named 'vcanX', like vcan0 vcan1 vcan2 ...
+  When compiled as a module the virtual CAN driver module is called vcan.ko
+
+  Since Linux Kernel version 2.6.24 the vcan driver supports the Kernel
+  netlink interface to create vcan network devices. The creation and
+  removal of vcan network devices can be managed with the ip(8) tool:
+
+  - Create a virtual CAN network interface:
+       ip link add type vcan
+
+  - Create a virtual CAN network interface with a specific name 'vcan42':
+       ip link add dev vcan42 type vcan
+
+  - Remove a (virtual CAN) network interface 'vcan42':
+       ip link del vcan42
+
+  The tool 'vcan' from the SocketCAN SVN repository on BerliOS is obsolete.
+
+  Virtual CAN network device creation in older Kernels:
+  In Linux Kernel versions < 2.6.24 the vcan driver creates 4 vcan
+  netdevices at module load time by default. This value can be changed
+  with the module parameter 'numdev'. E.g. 'modprobe vcan numdev=8'
+
+  6.5 currently supported CAN hardware
 
   On the project website http://developer.berlios.de/projects/socketcan
   there are different drivers available:
@@ -603,7 +639,7 @@
 
   Please check the Mailing Lists on the berlios OSS project website.
 
-  6.5 todo (September 2007)
+  6.6 todo
 
   The configuration interface for CAN network drivers is still an open
   issue that has not been finalized in the socketcan project. Also the
diff --git a/Documentation/networking/multiqueue.txt b/Documentation/networking/multiqueue.txt
index d391ea6..4caa0e3 100644
--- a/Documentation/networking/multiqueue.txt
+++ b/Documentation/networking/multiqueue.txt
@@ -24,4 +24,56 @@
 device is still operational.  netdev->queue_lock is still used when the device
 comes online or when it's completely shut down (unregister_netdev(), etc.).
 
-Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com>
+
+Section 2: Qdisc support for multiqueue devices
+
+-----------------------------------------------
+
+Currently two qdiscs are optimized for multiqueue devices.  The first is the
+default pfifo_fast qdisc.  This qdisc supports one qdisc per hardware queue.
+A new round-robin qdisc, sch_multiq also supports multiple hardware queues. The
+qdisc is responsible for classifying the skb's and then directing the skb's to
+bands and queues based on the value in skb->queue_mapping.  Use this field in
+the base driver to determine which queue to send the skb to.
+
+sch_multiq has been added for hardware that wishes to avoid head-of-line
+blocking.  It will cycle though the bands and verify that the hardware queue
+associated with the band is not stopped prior to dequeuing a packet.
+
+On qdisc load, the number of bands is based on the number of queues on the
+hardware.  Once the association is made, any skb with skb->queue_mapping set,
+will be queued to the band associated with the hardware queue.
+
+
+Section 3: Brief howto using MULTIQ for multiqueue devices
+---------------------------------------------------------------
+
+The userspace command 'tc,' part of the iproute2 package, is used to configure
+qdiscs.  To add the MULTIQ qdisc to your network device, assuming the device
+is called eth0, run the following command:
+
+# tc qdisc add dev eth0 root handle 1: multiq
+
+The qdisc will allocate the number of bands to equal the number of queues that
+the device reports, and bring the qdisc online.  Assuming eth0 has 4 Tx
+queues, the band mapping would look like:
+
+band 0 => queue 0
+band 1 => queue 1
+band 2 => queue 2
+band 3 => queue 3
+
+Traffic will begin flowing through each queue based on either the simple_tx_hash
+function or based on netdev->select_queue() if you have it defined.
+
+The behavior of tc filters remains the same.  However a new tc action,
+skbedit, has been added.  Assuming you wanted to route all traffic to a
+specific host, for example 192.168.0.3, through a specific queue you could use
+this action and establish a filter such as:
+
+tc filter add dev eth0 parent 1: protocol ip prio 1 u32 \
+	match ip dst 192.168.0.3 \
+	action skbedit queue_mapping 3
+
+Author: Alexander Duyck <alexander.h.duyck@intel.com>
+Original Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com>
diff --git a/Documentation/networking/phonet.txt b/Documentation/networking/phonet.txt
new file mode 100644
index 0000000..57d3e59
--- /dev/null
+++ b/Documentation/networking/phonet.txt
@@ -0,0 +1,121 @@
+Linux Phonet protocol family
+============================
+
+Introduction
+------------
+
+Phonet is a packet protocol used by Nokia cellular modems for both IPC
+and RPC. With the Linux Phonet socket family, Linux host processes can
+receive and send messages from/to the modem, or any other external
+device attached to the modem. The modem takes care of routing.
+
+Phonet packets can be exchanged through various hardware connections
+depending on the device, such as:
+  - USB with the CDC Phonet interface,
+  - infrared,
+  - Bluetooth,
+  - an RS232 serial port (with a dedicated "FBUS" line discipline),
+  - the SSI bus with some TI OMAP processors.
+
+
+Packets format
+--------------
+
+Phonet packets have a common header as follows:
+
+  struct phonethdr {
+    uint8_t  pn_media;  /* Media type (link-layer identifier) */
+    uint8_t  pn_rdev;   /* Receiver device ID */
+    uint8_t  pn_sdev;   /* Sender device ID */
+    uint8_t  pn_res;    /* Resource ID or function */
+    uint16_t pn_length; /* Big-endian message byte length (minus 6) */
+    uint8_t  pn_robj;   /* Receiver object ID */
+    uint8_t  pn_sobj;   /* Sender object ID */
+  };
+
+On Linux, the link-layer header includes the pn_media byte (see below).
+The next 7 bytes are part of the network-layer header.
+
+The device ID is split: the 6 higher-order bits consitute the device
+address, while the 2 lower-order bits are used for multiplexing, as are
+the 8-bit object identifiers. As such, Phonet can be considered as a
+network layer with 6 bits of address space and 10 bits for transport
+protocol (much like port numbers in IP world).
+
+The modem always has address number zero. All other device have a their
+own 6-bit address.
+
+
+Link layer
+----------
+
+Phonet links are always point-to-point links. The link layer header
+consists of a single Phonet media type byte. It uniquely identifies the
+link through which the packet is transmitted, from the modem's
+perspective. Each Phonet network device shall prepend and set the media
+type byte as appropriate. For convenience, a common phonet_header_ops
+link-layer header operations structure is provided. It sets the
+media type according to the network device hardware address.
+
+Linux Phonet network interfaces support a dedicated link layer packets
+type (ETH_P_PHONET) which is out of the Ethernet type range. They can
+only send and receive Phonet packets.
+
+The virtual TUN tunnel device driver can also be used for Phonet. This
+requires IFF_TUN mode, _without_ the IFF_NO_PI flag. In this case,
+there is no link-layer header, so there is no Phonet media type byte.
+
+Note that Phonet interfaces are not allowed to re-order packets, so
+only the (default) Linux FIFO qdisc should be used with them.
+
+
+Network layer
+-------------
+
+The Phonet socket address family maps the Phonet packet header:
+
+  struct sockaddr_pn {
+    sa_family_t spn_family;    /* AF_PHONET */
+    uint8_t     spn_obj;       /* Object ID */
+    uint8_t     spn_dev;       /* Device ID */
+    uint8_t     spn_resource;  /* Resource or function */
+    uint8_t     spn_zero[...]; /* Padding */
+  };
+
+The resource field is only used when sending and receiving;
+It is ignored by bind() and getsockname().
+
+
+Low-level datagram protocol
+---------------------------
+
+Applications can send Phonet messages using the Phonet datagram socket
+protocol from the PF_PHONET family. Each socket is bound to one of the
+2^10 object IDs available, and can send and receive packets with any
+other peer.
+
+  struct sockaddr_pn addr = { .spn_family = AF_PHONET, };
+  ssize_t len;
+  socklen_t addrlen = sizeof(addr);
+  int fd;
+
+  fd = socket(PF_PHONET, SOCK_DGRAM, 0);
+  bind(fd, (struct sockaddr *)&addr, sizeof(addr));
+  /* ... */
+
+  sendto(fd, msg, msglen, 0, (struct sockaddr *)&addr, sizeof(addr));
+  len = recvfrom(fd, buf, sizeof(buf), 0,
+                 (struct sockaddr *)&addr, &addrlen);
+
+This protocol follows the SOCK_DGRAM connection-less semantics.
+However, connect() and getpeername() are not supported, as they did
+not seem useful with Phonet usages (could be added easily).
+
+
+Authors
+-------
+
+Linux Phonet was initially written by Sakari Ailus.
+Other contributors include Mikä Liljeberg, Andras Domokos,
+Carlos Chinea and Rémi Denis-Courmont.
+Copyright (C) 2008 Nokia Corporation.
diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt
new file mode 100644
index 0000000..a96989a
--- /dev/null
+++ b/Documentation/networking/regulatory.txt
@@ -0,0 +1,194 @@
+Linux wireless regulatory documentation
+---------------------------------------
+
+This document gives a brief review over how the Linux wireless
+regulatory infrastructure works.
+
+More up to date information can be obtained at the project's web page:
+
+http://wireless.kernel.org/en/developers/Regulatory
+
+Keeping regulatory domains in userspace
+---------------------------------------
+
+Due to the dynamic nature of regulatory domains we keep them
+in userspace and provide a framework for userspace to upload
+to the kernel one regulatory domain to be used as the central
+core regulatory domain all wireless devices should adhere to.
+
+How to get regulatory domains to the kernel
+-------------------------------------------
+
+Userspace gets a regulatory domain in the kernel by having
+a userspace agent build it and send it via nl80211. Only
+expected regulatory domains will be respected by the kernel.
+
+A currently available userspace agent which can accomplish this
+is CRDA - central regulatory domain agent. Its documented here:
+
+http://wireless.kernel.org/en/developers/Regulatory/CRDA
+
+Essentially the kernel will send a udev event when it knows
+it needs a new regulatory domain. A udev rule can be put in place
+to trigger crda to send the respective regulatory domain for a
+specific ISO/IEC 3166 alpha2.
+
+Below is an example udev rule which can be used:
+
+# Example file, should be put in /etc/udev/rules.d/regulatory.rules
+KERNEL=="regulatory*", ACTION=="change", SUBSYSTEM=="platform", RUN+="/sbin/crda"
+
+The alpha2 is passed as an environment variable under the variable COUNTRY.
+
+Who asks for regulatory domains?
+--------------------------------
+
+* Users
+
+Users can use iw:
+
+http://wireless.kernel.org/en/users/Documentation/iw
+
+An example:
+
+  # set regulatory domain to "Costa Rica"
+  iw reg set CR
+
+This will request the kernel to set the regulatory domain to
+the specificied alpha2. The kernel in turn will then ask userspace
+to provide a regulatory domain for the alpha2 specified by the user
+by sending a uevent.
+
+* Wireless subsystems for Country Information elements
+
+The kernel will send a uevent to inform userspace a new
+regulatory domain is required. More on this to be added
+as its integration is added.
+
+* Drivers
+
+If drivers determine they need a specific regulatory domain
+set they can inform the wireless core using regulatory_hint().
+They have two options -- they either provide an alpha2 so that
+crda can provide back a regulatory domain for that country or
+they can build their own regulatory domain based on internal
+custom knowledge so the wireless core can respect it.
+
+*Most* drivers will rely on the first mechanism of providing a
+regulatory hint with an alpha2. For these drivers there is an additional
+check that can be used to ensure compliance based on custom EEPROM
+regulatory data. This additional check can be used by drivers by
+registering on its struct wiphy a reg_notifier() callback. This notifier
+is called when the core's regulatory domain has been changed. The driver
+can use this to review the changes made and also review who made them
+(driver, user, country IE) and determine what to allow based on its
+internal EEPROM data. Devices drivers wishing to be capable of world
+roaming should use this callback. More on world roaming will be
+added to this document when its support is enabled.
+
+Device drivers who provide their own built regulatory domain
+do not need a callback as the channels registered by them are
+the only ones that will be allowed and therefore *additional*
+cannels cannot be enabled.
+
+Example code - drivers hinting an alpha2:
+------------------------------------------
+
+This example comes from the zd1211rw device driver. You can start
+by having a mapping of your device's EEPROM country/regulatory
+domain value to to a specific alpha2 as follows:
+
+static struct zd_reg_alpha2_map reg_alpha2_map[] = {
+	{ ZD_REGDOMAIN_FCC, "US" },
+	{ ZD_REGDOMAIN_IC, "CA" },
+	{ ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
+	{ ZD_REGDOMAIN_JAPAN, "JP" },
+	{ ZD_REGDOMAIN_JAPAN_ADD, "JP" },
+	{ ZD_REGDOMAIN_SPAIN, "ES" },
+	{ ZD_REGDOMAIN_FRANCE, "FR" },
+
+Then you can define a routine to map your read EEPROM value to an alpha2,
+as follows:
+
+static int zd_reg2alpha2(u8 regdomain, char *alpha2)
+{
+	unsigned int i;
+	struct zd_reg_alpha2_map *reg_map;
+		for (i = 0; i < ARRAY_SIZE(reg_alpha2_map); i++) {
+			reg_map = &reg_alpha2_map[i];
+			if (regdomain == reg_map->reg) {
+			alpha2[0] = reg_map->alpha2[0];
+			alpha2[1] = reg_map->alpha2[1];
+			return 0;
+		}
+	}
+	return 1;
+}
+
+Lastly, you can then hint to the core of your discovered alpha2, if a match
+was found. You need to do this after you have registered your wiphy. You
+are expected to do this during initialization.
+
+	r = zd_reg2alpha2(mac->regdomain, alpha2);
+	if (!r)
+		regulatory_hint(hw->wiphy, alpha2, NULL);
+
+Example code - drivers providing a built in regulatory domain:
+--------------------------------------------------------------
+
+If you have regulatory information you can obtain from your
+driver and you *need* to use this we let you build a regulatory domain
+structure and pass it to the wireless core. To do this you should
+kmalloc() a structure big enough to hold your regulatory domain
+structure and you should then fill it with your data. Finally you simply
+call regulatory_hint() with the regulatory domain structure in it.
+
+Bellow is a simple example, with a regulatory domain cached using the stack.
+Your implementation may vary (read EEPROM cache instead, for example).
+
+Example cache of some regulatory domain
+
+struct ieee80211_regdomain mydriver_jp_regdom = {
+	.n_reg_rules = 3,
+	.alpha2 =  "JP",
+	//.alpha2 =  "99", /* If I have no alpha2 to map it to */
+	.reg_rules = {
+		/* IEEE 802.11b/g, channels 1..14 */
+		REG_RULE(2412-20, 2484+20, 40, 6, 20, 0),
+		/* IEEE 802.11a, channels 34..48 */
+		REG_RULE(5170-20, 5240+20, 40, 6, 20,
+			NL80211_RRF_PASSIVE_SCAN),
+		/* IEEE 802.11a, channels 52..64 */
+		REG_RULE(5260-20, 5320+20, 40, 6, 20,
+			NL80211_RRF_NO_IBSS |
+			NL80211_RRF_DFS),
+	}
+};
+
+Then in some part of your code after your wiphy has been registered:
+
+	int r;
+	struct ieee80211_regdomain *rd;
+	int size_of_regd;
+	int num_rules = mydriver_jp_regdom.n_reg_rules;
+	unsigned int i;
+
+	size_of_regd = sizeof(struct ieee80211_regdomain) +
+		(num_rules * sizeof(struct ieee80211_reg_rule));
+
+	rd = kzalloc(size_of_regd, GFP_KERNEL);
+	if (!rd)
+	return -ENOMEM;
+
+	memcpy(rd, &mydriver_jp_regdom, sizeof(struct ieee80211_regdomain));
+
+	for (i=0; i < num_rules; i++) {
+		memcpy(&rd->reg_rules[i], &mydriver_jp_regdom.reg_rules[i],
+			sizeof(struct ieee80211_reg_rule));
+	}
+	r = regulatory_hint(hw->wiphy, NULL, rd);
+	if (r) {
+		kfree(rd);
+		return r;
+	}
+
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index 6fcb306..b65f079 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -341,6 +341,8 @@
 3.1 Guidelines for wireless device drivers
 ------------------------------------------
 
+(in this text, rfkill->foo means the foo field of struct rfkill).
+
 1. Each independent transmitter in a wireless device (usually there is only one
 transmitter per device) should have a SINGLE rfkill class attached to it.
 
@@ -363,10 +365,32 @@
 when possible) the overall transmitter rfkill state, not of a particular rfkill
 line.
 
-5. During suspend, the rfkill class will attempt to soft-block the radio
-through a call to rfkill->toggle_radio, and will try to restore its previous
-state during resume.  After a rfkill class is suspended, it will *not* call
-rfkill->toggle_radio until it is resumed.
+5. The wireless device driver MUST NOT leave the transmitter enabled during
+suspend and hibernation unless:
+
+	5.1. The transmitter has to be enabled for some sort of functionality
+	like wake-on-wireless-packet or autonomous packed forwarding in a mesh
+	network, and that functionality is enabled for this suspend/hibernation
+	cycle.
+
+AND
+
+	5.2. The device was not on a user-requested BLOCKED state before
+	the suspend (i.e. the driver must NOT unblock a device, not even
+	to support wake-on-wireless-packet or remain in the mesh).
+
+In other words, there is absolutely no allowed scenario where a driver can
+automatically take action to unblock a rfkill controller (obviously, this deals
+with scenarios where soft-blocking or both soft and hard blocking is happening.
+Scenarios where hardware rfkill lines are the only ones blocking the
+transmitter are outside of this rule, since the wireless device driver does not
+control its input hardware rfkill lines in the first place).
+
+6. During resume, rfkill will try to restore its previous state.
+
+7. After a rfkill class is suspended, it will *not* call rfkill->toggle_radio
+until it is resumed.
+
 
 Example of a WLAN wireless driver connected to the rfkill subsystem:
 --------------------------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 3596d17..e6aa6aa 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1048,6 +1048,13 @@
 W:	http://www.ibm.com/developerworks/power/cell/
 S:	Supported
 
+CISCO 10G ETHERNET DRIVER
+P:	Scott Feldman
+M:	scofeldm@cisco.com
+P:	Joe Eykholt
+M:	jeykholt@cisco.com
+S:	Supported
+
 CFAG12864B LCD DRIVER
 P:	Miguel Ojeda Sandonis
 M:	miguel.ojeda.sandonis@gmail.com
@@ -2321,6 +2328,12 @@
 W:	http://www.ivtvdriver.org
 S:	Maintained
 
+JME NETWORK DRIVER
+P:	Guo-Fu Tseng
+M:	cooldavid@cooldavid.org
+L:	netdev@vger.kernel.org
+S:	Maintained
+
 JOURNALLING FLASH FILE SYSTEM V2 (JFFS2)
 P:	David Woodhouse
 M:	dwmw2@infradead.org
@@ -3385,6 +3398,13 @@
 L:	netdev@vger.kernel.org
 S:	Supported
 
+QLOGIC QLGE 10Gb ETHERNET DRIVER
+P:	Ron Mercer
+M:	linux-driver@qlogic.com
+M:	ron.mercer@qlogic.com
+L:	netdev@vger.kernel.org
+S:	Supported
+
 QNX4 FILESYSTEM
 P:	Anders Larsen
 M:	al@alarsen.net
diff --git a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
index 610fb24..cd31779 100644
--- a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
+++ b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
@@ -25,7 +25,7 @@
 #include "common.h"
 
 static struct mv643xx_eth_platform_data db88f6281_ge00_data = {
-	.phy_addr	= 8,
+	.phy_addr	= MV643XX_ETH_PHY_ADDR(8),
 };
 
 static struct mv_sata_platform_data db88f6281_sata_data = {
diff --git a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
index a3012d4..b1d1a87 100644
--- a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
@@ -30,7 +30,7 @@
 #define RD88F6192_GPIO_USB_VBUS		10
 
 static struct mv643xx_eth_platform_data rd88f6192_ge00_data = {
-	.phy_addr	= 8,
+	.phy_addr	= MV643XX_ETH_PHY_ADDR(8),
 };
 
 static struct mv_sata_platform_data rd88f6192_sata_data = {
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c
index d96487a0..b641661 100644
--- a/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -69,7 +69,7 @@
 };
 
 static struct mv643xx_eth_platform_data rd88f6281_ge00_data = {
-	.phy_addr	= -1,
+	.phy_addr	= MV643XX_ETH_PHY_NONE,
 	.speed		= SPEED_1000,
 	.duplex		= DUPLEX_FULL,
 };
diff --git a/arch/arm/mach-loki/lb88rc8480-setup.c b/arch/arm/mach-loki/lb88rc8480-setup.c
index 2cc9ac9..85f9c12 100644
--- a/arch/arm/mach-loki/lb88rc8480-setup.c
+++ b/arch/arm/mach-loki/lb88rc8480-setup.c
@@ -67,7 +67,7 @@
 };
 
 static struct mv643xx_eth_platform_data lb88rc8480_ge0_data = {
-	.phy_addr	= 1,
+	.phy_addr	= MV643XX_ETH_PHY_ADDR(1),
 	.mac_addr	= { 0x00, 0x50, 0x43, 0x11, 0x22, 0x33 },
 };
 
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index 953a26c..5842d3b 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -330,6 +330,7 @@
 struct mv643xx_eth_shared_platform_data mv78xx0_ge01_shared_data = {
 	.t_clk		= 0,
 	.dram		= &mv78xx0_mbus_dram_info,
+	.shared_smi	= &mv78xx0_ge00_shared,
 };
 
 static struct resource mv78xx0_ge01_shared_resources[] = {
@@ -370,7 +371,6 @@
 void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
 {
 	eth_data->shared = &mv78xx0_ge01_shared;
-	eth_data->shared_smi = &mv78xx0_ge00_shared;
 	mv78xx0_ge01.dev.platform_data = eth_data;
 
 	platform_device_register(&mv78xx0_ge01_shared);
@@ -384,6 +384,7 @@
 struct mv643xx_eth_shared_platform_data mv78xx0_ge10_shared_data = {
 	.t_clk		= 0,
 	.dram		= &mv78xx0_mbus_dram_info,
+	.shared_smi	= &mv78xx0_ge00_shared,
 };
 
 static struct resource mv78xx0_ge10_shared_resources[] = {
@@ -424,7 +425,6 @@
 void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
 {
 	eth_data->shared = &mv78xx0_ge10_shared;
-	eth_data->shared_smi = &mv78xx0_ge00_shared;
 	mv78xx0_ge10.dev.platform_data = eth_data;
 
 	platform_device_register(&mv78xx0_ge10_shared);
@@ -438,6 +438,7 @@
 struct mv643xx_eth_shared_platform_data mv78xx0_ge11_shared_data = {
 	.t_clk		= 0,
 	.dram		= &mv78xx0_mbus_dram_info,
+	.shared_smi	= &mv78xx0_ge00_shared,
 };
 
 static struct resource mv78xx0_ge11_shared_resources[] = {
@@ -478,7 +479,6 @@
 void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data)
 {
 	eth_data->shared = &mv78xx0_ge11_shared;
-	eth_data->shared_smi = &mv78xx0_ge00_shared;
 	mv78xx0_ge11.dev.platform_data = eth_data;
 
 	platform_device_register(&mv78xx0_ge11_shared);
diff --git a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
index a2d0c97..49f434c 100644
--- a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
+++ b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
@@ -19,19 +19,19 @@
 #include "common.h"
 
 static struct mv643xx_eth_platform_data db78x00_ge00_data = {
-	.phy_addr	= 8,
+	.phy_addr	= MV643XX_ETH_PHY_ADDR(8),
 };
 
 static struct mv643xx_eth_platform_data db78x00_ge01_data = {
-	.phy_addr	= 9,
+	.phy_addr	= MV643XX_ETH_PHY_ADDR(9),
 };
 
 static struct mv643xx_eth_platform_data db78x00_ge10_data = {
-	.phy_addr	= -1,
+	.phy_addr	= MV643XX_ETH_PHY_NONE,
 };
 
 static struct mv643xx_eth_platform_data db78x00_ge11_data = {
-	.phy_addr	= -1,
+	.phy_addr	= MV643XX_ETH_PHY_NONE,
 };
 
 static struct mv_sata_platform_data db78x00_sata_data = {
diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c
index ff13e90..d318bea 100644
--- a/arch/arm/mach-orion5x/db88f5281-setup.c
+++ b/arch/arm/mach-orion5x/db88f5281-setup.c
@@ -285,7 +285,7 @@
  * Ethernet
  ****************************************************************************/
 static struct mv643xx_eth_platform_data db88f5281_eth_data = {
-	.phy_addr	= 8,
+	.phy_addr	= MV643XX_ETH_PHY_ADDR(8),
 };
 
 /*****************************************************************************
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index b38c65c..3e66098 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -79,7 +79,7 @@
  */
 
 static struct mv643xx_eth_platform_data dns323_eth_data = {
-	.phy_addr = 8,
+	.phy_addr = MV643XX_ETH_PHY_ADDR(8),
 };
 
 /****************************************************************************
diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c
index e321ec3..610f2a6 100644
--- a/arch/arm/mach-orion5x/kurobox_pro-setup.c
+++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c
@@ -161,7 +161,7 @@
  ****************************************************************************/
 
 static struct mv643xx_eth_platform_data kurobox_pro_eth_data = {
-	.phy_addr	= 8,
+	.phy_addr	= MV643XX_ETH_PHY_ADDR(8),
 };
 
 /*****************************************************************************
diff --git a/arch/arm/mach-orion5x/mss2-setup.c b/arch/arm/mach-orion5x/mss2-setup.c
index 53ff189..68acca9 100644
--- a/arch/arm/mach-orion5x/mss2-setup.c
+++ b/arch/arm/mach-orion5x/mss2-setup.c
@@ -109,7 +109,7 @@
  ****************************************************************************/
 
 static struct mv643xx_eth_platform_data mss2_eth_data = {
-	.phy_addr	= 8,
+	.phy_addr	= MV643XX_ETH_PHY_ADDR(8),
 };
 
 /*****************************************************************************
diff --git a/arch/arm/mach-orion5x/mv2120-setup.c b/arch/arm/mach-orion5x/mv2120-setup.c
index 978d4d5..97c9ccb 100644
--- a/arch/arm/mach-orion5x/mv2120-setup.c
+++ b/arch/arm/mach-orion5x/mv2120-setup.c
@@ -39,7 +39,7 @@
  * Ethernet
  ****************************************************************************/
 static struct mv643xx_eth_platform_data mv2120_eth_data = {
-	.phy_addr	= 8,
+	.phy_addr	= MV643XX_ETH_PHY_ADDR(8),
 };
 
 static struct mv_sata_platform_data mv2120_sata_data = {
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
index e72fe1e..500cdad 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
@@ -88,7 +88,7 @@
 };
 
 static struct mv643xx_eth_platform_data rd88f5181l_fxo_eth_data = {
-	.phy_addr	= -1,
+	.phy_addr	= MV643XX_ETH_PHY_NONE,
 	.speed		= SPEED_1000,
 	.duplex		= DUPLEX_FULL,
 };
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
index a1fe325..ebde814 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
@@ -89,7 +89,7 @@
 };
 
 static struct mv643xx_eth_platform_data rd88f5181l_ge_eth_data = {
-	.phy_addr	= -1,
+	.phy_addr	= MV643XX_ETH_PHY_NONE,
 	.speed		= SPEED_1000,
 	.duplex		= DUPLEX_FULL,
 };
diff --git a/arch/arm/mach-orion5x/rd88f5182-setup.c b/arch/arm/mach-orion5x/rd88f5182-setup.c
index 4c3bcd7..a04f9e4 100644
--- a/arch/arm/mach-orion5x/rd88f5182-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5182-setup.c
@@ -221,7 +221,7 @@
  ****************************************************************************/
 
 static struct mv643xx_eth_platform_data rd88f5182_eth_data = {
-	.phy_addr	= 8,
+	.phy_addr	= MV643XX_ETH_PHY_ADDR(8),
 };
 
 /*****************************************************************************
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index ae0a5dc..1368e9f 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -103,8 +103,7 @@
  * Ethernet
  ****************************************************************************/
 static struct mv643xx_eth_platform_data ts78xx_eth_data = {
-	.phy_addr	= 0,
-	.force_phy_addr = 1,
+	.phy_addr	= MV643XX_ETH_PHY_ADDR(0),
 };
 
 /*****************************************************************************
diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c
index 83feac3..19cde24 100644
--- a/arch/arm/mach-orion5x/tsx09-common.c
+++ b/arch/arm/mach-orion5x/tsx09-common.c
@@ -48,7 +48,7 @@
  ****************************************************************************/
 
 struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
-	.phy_addr	= 8,
+	.phy_addr	= MV643XX_ETH_PHY_ADDR(8),
 };
 
 static int __init qnap_tsx09_parse_hex_nibble(char n)
diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c
index b6bc43e..7ddc22c 100644
--- a/arch/arm/mach-orion5x/wnr854t-setup.c
+++ b/arch/arm/mach-orion5x/wnr854t-setup.c
@@ -92,7 +92,7 @@
 };
 
 static struct mv643xx_eth_platform_data wnr854t_eth_data = {
-	.phy_addr	= -1,
+	.phy_addr	= MV643XX_ETH_PHY_NONE,
 	.speed		= SPEED_1000,
 	.duplex		= DUPLEX_FULL,
 };
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index b10da17..9a4fd52 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -100,7 +100,7 @@
 };
 
 static struct mv643xx_eth_platform_data wrt350n_v2_eth_data = {
-	.phy_addr	= -1,
+	.phy_addr	= MV643XX_ETH_PHY_NONE,
 	.speed		= SPEED_1000,
 	.duplex		= DUPLEX_FULL,
 };
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 32e0ad0..b6bd775 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -293,10 +293,8 @@
 		return -ENODEV;
 
 	prop = of_get_property(phy, "reg", NULL);
-	if (prop) {
-		pdata.force_phy_addr = 1;
-		pdata.phy_addr = *prop;
-	}
+	if (prop)
+		pdata.phy_addr = MV643XX_ETH_PHY_ADDR(*prop);
 
 	of_node_put(phy);
 
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 735f5ea..12cf5d4 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -260,6 +260,9 @@
 config ACPI_TOSHIBA
 	tristate "Toshiba Laptop Extras"
 	depends on X86
+	select INPUT_POLLDEV
+	select NET
+	select RFKILL
 	select BACKLIGHT_CLASS_DEVICE
 	---help---
 	  This driver adds support for access to certain system settings
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c
index 0a43c8e..8a649f4 100644
--- a/drivers/acpi/toshiba_acpi.c
+++ b/drivers/acpi/toshiba_acpi.c
@@ -3,6 +3,7 @@
  *
  *
  *  Copyright (C) 2002-2004 John Belmonte
+ *  Copyright (C) 2008 Philip Langdale
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -33,7 +34,7 @@
  *
  */
 
-#define TOSHIBA_ACPI_VERSION	"0.18"
+#define TOSHIBA_ACPI_VERSION	"0.19"
 #define PROC_INTERFACE_VERSION	1
 
 #include <linux/kernel.h>
@@ -42,6 +43,9 @@
 #include <linux/types.h>
 #include <linux/proc_fs.h>
 #include <linux/backlight.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+#include <linux/input-polldev.h>
 
 #include <asm/uaccess.h>
 
@@ -90,6 +94,7 @@
 #define HCI_VIDEO_OUT			0x001c
 #define HCI_HOTKEY_EVENT		0x001e
 #define HCI_LCD_BRIGHTNESS		0x002a
+#define HCI_WIRELESS			0x0056
 
 /* field definitions */
 #define HCI_LCD_BRIGHTNESS_BITS		3
@@ -98,9 +103,14 @@
 #define HCI_VIDEO_OUT_LCD		0x1
 #define HCI_VIDEO_OUT_CRT		0x2
 #define HCI_VIDEO_OUT_TV		0x4
+#define HCI_WIRELESS_KILL_SWITCH	0x01
+#define HCI_WIRELESS_BT_PRESENT		0x0f
+#define HCI_WIRELESS_BT_ATTACH		0x40
+#define HCI_WIRELESS_BT_POWER		0x80
 
 static const struct acpi_device_id toshiba_device_ids[] = {
 	{"TOS6200", 0},
+	{"TOS6208", 0},
 	{"TOS1900", 0},
 	{"", 0},
 };
@@ -193,7 +203,7 @@
 	return status;
 }
 
-/* common hci tasks (get or set one value)
+/* common hci tasks (get or set one or two value)
  *
  * In addition to the ACPI status, the HCI system returns a result which
  * may be useful (such as "not supported").
@@ -218,6 +228,152 @@
 	return status;
 }
 
+static acpi_status hci_write2(u32 reg, u32 in1, u32 in2, u32 *result)
+{
+	u32 in[HCI_WORDS] = { HCI_SET, reg, in1, in2, 0, 0 };
+	u32 out[HCI_WORDS];
+	acpi_status status = hci_raw(in, out);
+	*result = (status == AE_OK) ? out[0] : HCI_FAILURE;
+	return status;
+}
+
+static acpi_status hci_read2(u32 reg, u32 *out1, u32 *out2, u32 *result)
+{
+	u32 in[HCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
+	u32 out[HCI_WORDS];
+	acpi_status status = hci_raw(in, out);
+	*out1 = out[2];
+	*out2 = out[3];
+	*result = (status == AE_OK) ? out[0] : HCI_FAILURE;
+	return status;
+}
+
+struct toshiba_acpi_dev {
+	struct platform_device *p_dev;
+	struct rfkill *rfk_dev;
+	struct input_polled_dev *poll_dev;
+
+	const char *bt_name;
+	const char *rfk_name;
+
+	bool last_rfk_state;
+
+	struct mutex mutex;
+};
+
+static struct toshiba_acpi_dev toshiba_acpi = {
+	.bt_name = "Toshiba Bluetooth",
+	.rfk_name = "Toshiba RFKill Switch",
+	.last_rfk_state = false,
+};
+
+/* Bluetooth rfkill handlers */
+
+static u32 hci_get_bt_present(bool *present)
+{
+	u32 hci_result;
+	u32 value, value2;
+
+	value = 0;
+	value2 = 0;
+	hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
+	if (hci_result == HCI_SUCCESS)
+		*present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false;
+
+	return hci_result;
+}
+
+static u32 hci_get_bt_on(bool *on)
+{
+	u32 hci_result;
+	u32 value, value2;
+
+	value = 0;
+	value2 = 0x0001;
+	hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
+	if (hci_result == HCI_SUCCESS)
+		*on = (value & HCI_WIRELESS_BT_POWER) &&
+		      (value & HCI_WIRELESS_BT_ATTACH);
+
+	return hci_result;
+}
+
+static u32 hci_get_radio_state(bool *radio_state)
+{
+	u32 hci_result;
+	u32 value, value2;
+
+	value = 0;
+	value2 = 0x0001;
+	hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
+
+	*radio_state = value & HCI_WIRELESS_KILL_SWITCH;
+	return hci_result;
+}
+
+static int bt_rfkill_toggle_radio(void *data, enum rfkill_state state)
+{
+	u32 result1, result2;
+	u32 value;
+	bool radio_state;
+	struct toshiba_acpi_dev *dev = data;
+
+	value = (state == RFKILL_STATE_UNBLOCKED);
+
+	if (hci_get_radio_state(&radio_state) != HCI_SUCCESS)
+		return -EFAULT;
+
+	switch (state) {
+	case RFKILL_STATE_UNBLOCKED:
+		if (!radio_state)
+			return -EPERM;
+		break;
+	case RFKILL_STATE_SOFT_BLOCKED:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->mutex);
+	hci_write2(HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER, &result1);
+	hci_write2(HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH, &result2);
+	mutex_unlock(&dev->mutex);
+
+	if (result1 != HCI_SUCCESS || result2 != HCI_SUCCESS)
+		return -EFAULT;
+
+	return 0;
+}
+
+static void bt_poll_rfkill(struct input_polled_dev *poll_dev)
+{
+	bool state_changed;
+	bool new_rfk_state;
+	bool value;
+	u32 hci_result;
+	struct toshiba_acpi_dev *dev = poll_dev->private;
+
+	hci_result = hci_get_radio_state(&value);
+	if (hci_result != HCI_SUCCESS)
+		return; /* Can't do anything useful */
+
+	new_rfk_state = value;
+
+	mutex_lock(&dev->mutex);
+	state_changed = new_rfk_state != dev->last_rfk_state;
+	dev->last_rfk_state = new_rfk_state;
+	mutex_unlock(&dev->mutex);
+
+	if (unlikely(state_changed)) {
+		rfkill_force_state(dev->rfk_dev,
+				   new_rfk_state ?
+				   RFKILL_STATE_SOFT_BLOCKED :
+				   RFKILL_STATE_HARD_BLOCKED);
+		input_report_switch(poll_dev->input, SW_RFKILL_ALL,
+				    new_rfk_state);
+	}
+}
+
 static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
 static struct backlight_device *toshiba_backlight_device;
 static int force_fan;
@@ -547,6 +703,14 @@
 
 static void toshiba_acpi_exit(void)
 {
+	if (toshiba_acpi.poll_dev) {
+		input_unregister_polled_device(toshiba_acpi.poll_dev);
+		input_free_polled_device(toshiba_acpi.poll_dev);
+	}
+
+	if (toshiba_acpi.rfk_dev)
+		rfkill_unregister(toshiba_acpi.rfk_dev);
+
 	if (toshiba_backlight_device)
 		backlight_device_unregister(toshiba_backlight_device);
 
@@ -555,6 +719,8 @@
 	if (toshiba_proc_dir)
 		remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
 
+	platform_device_unregister(toshiba_acpi.p_dev);
+
 	return;
 }
 
@@ -562,6 +728,10 @@
 {
 	acpi_status status = AE_OK;
 	u32 hci_result;
+	bool bt_present;
+	bool bt_on;
+	bool radio_on;
+	int ret = 0;
 
 	if (acpi_disabled)
 		return -ENODEV;
@@ -578,6 +748,18 @@
 	       TOSHIBA_ACPI_VERSION);
 	printk(MY_INFO "    HCI method: %s\n", method_hci);
 
+	mutex_init(&toshiba_acpi.mutex);
+
+	toshiba_acpi.p_dev = platform_device_register_simple("toshiba_acpi",
+							      -1, NULL, 0);
+	if (IS_ERR(toshiba_acpi.p_dev)) {
+		ret = PTR_ERR(toshiba_acpi.p_dev);
+		printk(MY_ERR "unable to register platform device\n");
+		toshiba_acpi.p_dev = NULL;
+		toshiba_acpi_exit();
+		return ret;
+	}
+
 	force_fan = 0;
 	key_event_valid = 0;
 
@@ -586,19 +768,23 @@
 
 	toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
 	if (!toshiba_proc_dir) {
-		status = AE_ERROR;
+		toshiba_acpi_exit();
+		return -ENODEV;
 	} else {
 		toshiba_proc_dir->owner = THIS_MODULE;
 		status = add_device();
-		if (ACPI_FAILURE(status))
-			remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
+		if (ACPI_FAILURE(status)) {
+			toshiba_acpi_exit();
+			return -ENODEV;
+		}
 	}
 
-	toshiba_backlight_device = backlight_device_register("toshiba",NULL,
+	toshiba_backlight_device = backlight_device_register("toshiba",
+						&toshiba_acpi.p_dev->dev,
 						NULL,
 						&toshiba_backlight_data);
         if (IS_ERR(toshiba_backlight_device)) {
-		int ret = PTR_ERR(toshiba_backlight_device);
+		ret = PTR_ERR(toshiba_backlight_device);
 
 		printk(KERN_ERR "Could not register toshiba backlight device\n");
 		toshiba_backlight_device = NULL;
@@ -607,7 +793,66 @@
 	}
         toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
 
-	return (ACPI_SUCCESS(status)) ? 0 : -ENODEV;
+	/* Register rfkill switch for Bluetooth */
+	if (hci_get_bt_present(&bt_present) == HCI_SUCCESS && bt_present) {
+		toshiba_acpi.rfk_dev = rfkill_allocate(&toshiba_acpi.p_dev->dev,
+							RFKILL_TYPE_BLUETOOTH);
+		if (!toshiba_acpi.rfk_dev) {
+			printk(MY_ERR "unable to allocate rfkill device\n");
+			toshiba_acpi_exit();
+			return -ENOMEM;
+		}
+
+		toshiba_acpi.rfk_dev->name = toshiba_acpi.bt_name;
+		toshiba_acpi.rfk_dev->toggle_radio = bt_rfkill_toggle_radio;
+		toshiba_acpi.rfk_dev->user_claim_unsupported = 1;
+		toshiba_acpi.rfk_dev->data = &toshiba_acpi;
+
+		if (hci_get_bt_on(&bt_on) == HCI_SUCCESS && bt_on) {
+			toshiba_acpi.rfk_dev->state = RFKILL_STATE_UNBLOCKED;
+		} else if (hci_get_radio_state(&radio_on) == HCI_SUCCESS &&
+			   radio_on) {
+			toshiba_acpi.rfk_dev->state = RFKILL_STATE_SOFT_BLOCKED;
+		} else {
+			toshiba_acpi.rfk_dev->state = RFKILL_STATE_HARD_BLOCKED;
+		}
+
+		ret = rfkill_register(toshiba_acpi.rfk_dev);
+		if (ret) {
+			printk(MY_ERR "unable to register rfkill device\n");
+			toshiba_acpi_exit();
+			return -ENOMEM;
+		}
+	}
+
+	/* Register input device for kill switch */
+	toshiba_acpi.poll_dev = input_allocate_polled_device();
+	if (!toshiba_acpi.poll_dev) {
+		printk(MY_ERR "unable to allocate kill-switch input device\n");
+		toshiba_acpi_exit();
+		return -ENOMEM;
+	}
+	toshiba_acpi.poll_dev->private = &toshiba_acpi;
+	toshiba_acpi.poll_dev->poll = bt_poll_rfkill;
+	toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */
+
+	toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
+	toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
+	toshiba_acpi.poll_dev->input->id.vendor = 0x0930; /* Toshiba USB ID */
+	set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
+	set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
+	input_report_switch(toshiba_acpi.poll_dev->input, SW_RFKILL_ALL, TRUE);
+
+	ret = input_register_polled_device(toshiba_acpi.poll_dev);
+	if (ret) {
+		printk(MY_ERR "unable to register kill-switch input device\n");
+		rfkill_free(toshiba_acpi.rfk_dev);
+		toshiba_acpi.rfk_dev = NULL;
+		toshiba_acpi_exit();
+		return ret;
+	}
+
+	return 0;
 }
 
 module_init(toshiba_acpi_init);
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 41b2204..5503bfc 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1270,7 +1270,7 @@
 			if (*pre < 3) (*pre)++; /* else fail later */
 			div = pre_div[*pre]*-*pcr;
 			DPRINTK("max div %d\n",div);
-			*res = (TS_CLOCK+div-1)/div-1;
+			*res = DIV_ROUND_UP(TS_CLOCK, div)-1;
 		}
 		if (*res < 0) *res = 0;
 		if (*res > MID_SEG_MAX_RATE) *res = MID_SEG_MAX_RATE;
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index c0ac728..6154123 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -635,7 +635,7 @@
 		// take care of rounding
 		switch (r) {
 			case round_down:
-				pre = (br+(c<<div)-1)/(c<<div);
+				pre = DIV_ROUND_UP(br, c<<div);
 				// but p must be non-zero
 				if (!pre)
 					pre = 1;
@@ -668,7 +668,7 @@
 			// take care of rounding
 			switch (r) {
 				case round_down:
-					pre = (br+(c<<div)-1)/(c<<div);
+					pre = DIV_ROUND_UP(br, c<<div);
 					break;
 				case round_nearest:
 					pre = (br+(c<<div)/2)/(c<<div);
@@ -698,7 +698,7 @@
 		if (bits)
 			*bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1);
 		if (actual) {
-			*actual = (br + (pre<<div) - 1) / (pre<<div);
+			*actual = DIV_ROUND_UP(br, pre<<div);
 			PRINTD (DBG_QOS, "actual rate: %u", *actual);
 		}
 		return 0;
@@ -1967,7 +1967,7 @@
   // Set the max AAL5 cell count to be just enough to contain the
   // largest AAL5 frame that the user wants to receive
   wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF,
-	   (max_rx_size + ATM_AAL5_TRAILER + ATM_CELL_PAYLOAD - 1) / ATM_CELL_PAYLOAD);
+	   DIV_ROUND_UP(max_rx_size + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD));
   
   // Enable receive
   wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 3a504e9..e33ae00 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1114,11 +1114,8 @@
 
 	rpp = &vc->rcv.rx_pool;
 
+	__skb_queue_tail(&rpp->queue, skb);
 	rpp->len += skb->len;
-	if (!rpp->count++)
-		rpp->first = skb;
-	*rpp->last = skb;
-	rpp->last = &skb->next;
 
 	if (stat & SAR_RSQE_EPDU) {
 		unsigned char *l1l2;
@@ -1145,7 +1142,7 @@
 			atomic_inc(&vcc->stats->rx_err);
 			return;
 		}
-		if (rpp->count > 1) {
+		if (skb_queue_len(&rpp->queue) > 1) {
 			struct sk_buff *sb;
 
 			skb = dev_alloc_skb(rpp->len);
@@ -1161,12 +1158,9 @@
 				dev_kfree_skb(skb);
 				return;
 			}
-			sb = rpp->first;
-			for (i = 0; i < rpp->count; i++) {
+			skb_queue_walk(&rpp->queue, sb)
 				memcpy(skb_put(skb, sb->len),
 				       sb->data, sb->len);
-				sb = sb->next;
-			}
 
 			recycle_rx_pool_skb(card, rpp);
 
@@ -1180,7 +1174,6 @@
 			return;
 		}
 
-		skb->next = NULL;
 		flush_rx_pool(card, rpp);
 
 		if (!atm_charge(vcc, skb->truesize)) {
@@ -1918,25 +1911,18 @@
 static void
 flush_rx_pool(struct idt77252_dev *card, struct rx_pool *rpp)
 {
+	skb_queue_head_init(&rpp->queue);
 	rpp->len = 0;
-	rpp->count = 0;
-	rpp->first = NULL;
-	rpp->last = &rpp->first;
 }
 
 static void
 recycle_rx_pool_skb(struct idt77252_dev *card, struct rx_pool *rpp)
 {
-	struct sk_buff *skb, *next;
-	int i;
+	struct sk_buff *skb, *tmp;
 
-	skb = rpp->first;
-	for (i = 0; i < rpp->count; i++) {
-		next = skb->next;
-		skb->next = NULL;
+	skb_queue_walk_safe(&rpp->queue, skb, tmp)
 		recycle_rx_skb(card, skb);
-		skb = next;
-	}
+
 	flush_rx_pool(card, rpp);
 }
 
@@ -2537,7 +2523,7 @@
 		waitfor_idle(card);
 		spin_unlock_irqrestore(&card->cmd_lock, flags);
 
-		if (vc->rcv.rx_pool.count) {
+		if (skb_queue_len(&vc->rcv.rx_pool.queue) != 0) {
 			DPRINTK("%s: closing a VC with pending rx buffers.\n",
 				card->name);
 
@@ -2970,7 +2956,7 @@
 			waitfor_idle(card);
 			spin_unlock_irqrestore(&card->cmd_lock, flags);
 
-			if (vc->rcv.rx_pool.count) {
+			if (skb_queue_len(&vc->rcv.rx_pool.queue) != 0) {
 				DPRINTK("%s: closing a VC "
 					"with pending rx buffers.\n",
 					card->name);
diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h
index e83eaf1..5042bb2 100644
--- a/drivers/atm/idt77252.h
+++ b/drivers/atm/idt77252.h
@@ -173,10 +173,8 @@
 };
 
 struct rx_pool {
-	struct sk_buff		*first;
-	struct sk_buff		**last;
+	struct sk_buff_head	queue;
 	unsigned int		len;
-	unsigned int		count;
 };
 
 struct aal1 {
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 58583c6..752b1ba 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -496,8 +496,8 @@
 			vcc->qos.rxtp.max_sdu = 65464;
 			/* fix this - we may want to receive 64kB SDUs
 			   later */
-		cells = (vcc->qos.rxtp.max_sdu+ATM_AAL5_TRAILER+
-		    ATM_CELL_PAYLOAD-1)/ATM_CELL_PAYLOAD;
+		cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER,
+				ATM_CELL_PAYLOAD);
 		zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD);
 	}
 	else {
@@ -820,7 +820,7 @@
 			}
 			else {
 				i = 255;
-				m = (ATM_OC3_PCR*255+max-1)/max;
+				m = DIV_ROUND_UP(ATM_OC3_PCR*255, max);
 			}
 		}
 		if (i > m) {
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 5b4c6e6..93f3690 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -159,11 +159,8 @@
 	sector_t ssize;
 	struct timer_list timer;
 	spinlock_t lock;
-	struct sk_buff *sendq_hd; /* packets needing to be sent, list head */
-	struct sk_buff *sendq_tl;
-	struct sk_buff *skbpool_hd;
-	struct sk_buff *skbpool_tl;
-	int nskbpool;
+	struct sk_buff_head sendq;
+	struct sk_buff_head skbpool;
 	mempool_t *bufpool;	/* for deadlock-free Buf allocation */
 	struct list_head bufq;	/* queue of bios to work on */
 	struct buf *inprocess;	/* the one we're currently working on */
@@ -199,7 +196,7 @@
 
 int aoenet_init(void);
 void aoenet_exit(void);
-void aoenet_xmit(struct sk_buff *);
+void aoenet_xmit(struct sk_buff_head *);
 int is_aoe_netif(struct net_device *ifp);
 int set_aoe_iflist(const char __user *str, size_t size);
 
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 0c39782..fd2cf54 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -158,9 +158,9 @@
 static int
 aoeblk_make_request(struct request_queue *q, struct bio *bio)
 {
+	struct sk_buff_head queue;
 	struct aoedev *d;
 	struct buf *buf;
-	struct sk_buff *sl;
 	ulong flags;
 
 	blk_queue_bounce(q, &bio);
@@ -213,11 +213,11 @@
 	list_add_tail(&buf->bufs, &d->bufq);
 
 	aoecmd_work(d);
-	sl = d->sendq_hd;
-	d->sendq_hd = d->sendq_tl = NULL;
+	__skb_queue_head_init(&queue);
+	skb_queue_splice_init(&d->sendq, &queue);
 
 	spin_unlock_irqrestore(&d->lock, flags);
-	aoenet_xmit(sl);
+	aoenet_xmit(&queue);
 
 	return 0;
 }
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 181ebb8..1f56d2c 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -9,6 +9,7 @@
 #include <linux/completion.h>
 #include <linux/delay.h>
 #include <linux/smp_lock.h>
+#include <linux/skbuff.h>
 #include "aoe.h"
 
 enum {
@@ -103,7 +104,12 @@
 		spin_lock_irqsave(&d->lock, flags);
 		goto loop;
 	}
-	aoenet_xmit(skb);
+	if (skb) {
+		struct sk_buff_head queue;
+		__skb_queue_head_init(&queue);
+		__skb_queue_tail(&queue, skb);
+		aoenet_xmit(&queue);
+	}
 	aoecmd_cfg(major, minor);
 	return 0;
 }
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 2f17462..e33da30 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -114,29 +114,22 @@
 static void
 skb_pool_put(struct aoedev *d, struct sk_buff *skb)
 {
-	if (!d->skbpool_hd)
-		d->skbpool_hd = skb;
-	else
-		d->skbpool_tl->next = skb;
-	d->skbpool_tl = skb;
+	__skb_queue_tail(&d->skbpool, skb);
 }
 
 static struct sk_buff *
 skb_pool_get(struct aoedev *d)
 {
-	struct sk_buff *skb;
+	struct sk_buff *skb = skb_peek(&d->skbpool);
 
-	skb = d->skbpool_hd;
 	if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
-		d->skbpool_hd = skb->next;
-		skb->next = NULL;
+		__skb_unlink(skb, &d->skbpool);
 		return skb;
 	}
-	if (d->nskbpool < NSKBPOOLMAX
-	&& (skb = new_skb(ETH_ZLEN))) {
-		d->nskbpool++;
+	if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
+	    (skb = new_skb(ETH_ZLEN)))
 		return skb;
-	}
+
 	return NULL;
 }
 
@@ -293,29 +286,22 @@
 
 	skb->dev = t->ifp->nd;
 	skb = skb_clone(skb, GFP_ATOMIC);
-	if (skb) {
-		if (d->sendq_hd)
-			d->sendq_tl->next = skb;
-		else
-			d->sendq_hd = skb;
-		d->sendq_tl = skb;
-	}
+	if (skb)
+		__skb_queue_tail(&d->sendq, skb);
 	return 1;
 }
 
 /* some callers cannot sleep, and they can call this function,
  * transmitting the packets later, when interrupts are on
  */
-static struct sk_buff *
-aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
+static void
+aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
 {
 	struct aoe_hdr *h;
 	struct aoe_cfghdr *ch;
-	struct sk_buff *skb, *sl, *sl_tail;
+	struct sk_buff *skb;
 	struct net_device *ifp;
 
-	sl = sl_tail = NULL;
-
 	read_lock(&dev_base_lock);
 	for_each_netdev(&init_net, ifp) {
 		dev_hold(ifp);
@@ -329,8 +315,7 @@
 		}
 		skb_put(skb, sizeof *h + sizeof *ch);
 		skb->dev = ifp;
-		if (sl_tail == NULL)
-			sl_tail = skb;
+		__skb_queue_tail(queue, skb);
 		h = (struct aoe_hdr *) skb_mac_header(skb);
 		memset(h, 0, sizeof *h + sizeof *ch);
 
@@ -342,16 +327,10 @@
 		h->minor = aoeminor;
 		h->cmd = AOECMD_CFG;
 
-		skb->next = sl;
-		sl = skb;
 cont:
 		dev_put(ifp);
 	}
 	read_unlock(&dev_base_lock);
-
-	if (tail != NULL)
-		*tail = sl_tail;
-	return sl;
 }
 
 static void
@@ -406,11 +385,7 @@
 	skb = skb_clone(skb, GFP_ATOMIC);
 	if (skb == NULL)
 		return;
-	if (d->sendq_hd)
-		d->sendq_tl->next = skb;
-	else
-		d->sendq_hd = skb;
-	d->sendq_tl = skb;
+	__skb_queue_tail(&d->sendq, skb);
 }
 
 static int
@@ -508,16 +483,15 @@
 static void
 rexmit_timer(ulong vp)
 {
+	struct sk_buff_head queue;
 	struct aoedev *d;
 	struct aoetgt *t, **tt, **te;
 	struct aoeif *ifp;
 	struct frame *f, *e;
-	struct sk_buff *sl;
 	register long timeout;
 	ulong flags, n;
 
 	d = (struct aoedev *) vp;
-	sl = NULL;
 
 	/* timeout is always ~150% of the moving average */
 	timeout = d->rttavg;
@@ -589,7 +563,7 @@
 		}
 	}
 
-	if (d->sendq_hd) {
+	if (!skb_queue_empty(&d->sendq)) {
 		n = d->rttavg <<= 1;
 		if (n > MAXTIMER)
 			d->rttavg = MAXTIMER;
@@ -600,15 +574,15 @@
 		aoecmd_work(d);
 	}
 
-	sl = d->sendq_hd;
-	d->sendq_hd = d->sendq_tl = NULL;
+	__skb_queue_head_init(&queue);
+	skb_queue_splice_init(&d->sendq, &queue);
 
 	d->timer.expires = jiffies + TIMERTICK;
 	add_timer(&d->timer);
 
 	spin_unlock_irqrestore(&d->lock, flags);
 
-	aoenet_xmit(sl);
+	aoenet_xmit(&queue);
 }
 
 /* enters with d->lock held */
@@ -767,12 +741,12 @@
 void
 aoecmd_ata_rsp(struct sk_buff *skb)
 {
+	struct sk_buff_head queue;
 	struct aoedev *d;
 	struct aoe_hdr *hin, *hout;
 	struct aoe_atahdr *ahin, *ahout;
 	struct frame *f;
 	struct buf *buf;
-	struct sk_buff *sl;
 	struct aoetgt *t;
 	struct aoeif *ifp;
 	register long n;
@@ -893,21 +867,21 @@
 
 	aoecmd_work(d);
 xmit:
-	sl = d->sendq_hd;
-	d->sendq_hd = d->sendq_tl = NULL;
+	__skb_queue_head_init(&queue);
+	skb_queue_splice_init(&d->sendq, &queue);
 
 	spin_unlock_irqrestore(&d->lock, flags);
-	aoenet_xmit(sl);
+	aoenet_xmit(&queue);
 }
 
 void
 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
 {
-	struct sk_buff *sl;
+	struct sk_buff_head queue;
 
-	sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
-
-	aoenet_xmit(sl);
+	__skb_queue_head_init(&queue);
+	aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
+	aoenet_xmit(&queue);
 }
  
 struct sk_buff *
@@ -1076,7 +1050,12 @@
 
 	spin_unlock_irqrestore(&d->lock, flags);
 
-	aoenet_xmit(sl);
+	if (sl) {
+		struct sk_buff_head queue;
+		__skb_queue_head_init(&queue);
+		__skb_queue_tail(&queue, sl);
+		aoenet_xmit(&queue);
+	}
 }
 
 void
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index a1d813a..75a610a 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -188,14 +188,12 @@
 static void
 skbpoolfree(struct aoedev *d)
 {
-	struct sk_buff *skb;
+	struct sk_buff *skb, *tmp;
 
-	while ((skb = d->skbpool_hd)) {
-		d->skbpool_hd = skb->next;
-		skb->next = NULL;
+	skb_queue_walk_safe(&d->skbpool, skb, tmp)
 		skbfree(skb);
-	}
-	d->skbpool_tl = NULL;
+
+	__skb_queue_head_init(&d->skbpool);
 }
 
 /* find it or malloc it */
@@ -217,6 +215,8 @@
 		goto out;
 	INIT_WORK(&d->work, aoecmd_sleepwork);
 	spin_lock_init(&d->lock);
+	skb_queue_head_init(&d->sendq);
+	skb_queue_head_init(&d->skbpool);
 	init_timer(&d->timer);
 	d->timer.data = (ulong) d;
 	d->timer.function = dummy_timer;
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index 7b15a5e..7f83ad9 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -7,6 +7,7 @@
 #include <linux/hdreg.h>
 #include <linux/blkdev.h>
 #include <linux/module.h>
+#include <linux/skbuff.h>
 #include "aoe.h"
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 0c81ca7..9157d64 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -95,13 +95,12 @@
 }
 
 void
-aoenet_xmit(struct sk_buff *sl)
+aoenet_xmit(struct sk_buff_head *queue)
 {
-	struct sk_buff *skb;
+	struct sk_buff *skb, *tmp;
 
-	while ((skb = sl)) {
-		sl = sl->next;
-		skb->next = skb->prev = NULL;
+	skb_queue_walk_safe(queue, skb, tmp) {
+		__skb_unlink(skb, queue);
 		dev_queue_xmit(skb);
 	}
 }
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 4d37bb3..7938062 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -352,14 +352,14 @@
 /* Remove ack'ed packets */
 static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
 {
+	struct sk_buff *skb, *tmp;
 	unsigned long flags;
-	struct sk_buff *skb;
 	int i, pkts_to_be_removed;
 	u8 seqno;
 
 	spin_lock_irqsave(&bcsp->unack.lock, flags);
 
-	pkts_to_be_removed = bcsp->unack.qlen;
+	pkts_to_be_removed = skb_queue_len(&bcsp->unack);
 	seqno = bcsp->msgq_txseq;
 
 	while (pkts_to_be_removed) {
@@ -373,19 +373,19 @@
 		BT_ERR("Peer acked invalid packet");
 
 	BT_DBG("Removing %u pkts out of %u, up to seqno %u",
-		pkts_to_be_removed, bcsp->unack.qlen, (seqno - 1) & 0x07);
+	       pkts_to_be_removed, skb_queue_len(&bcsp->unack),
+	       (seqno - 1) & 0x07);
 
-	for (i = 0, skb = ((struct sk_buff *) &bcsp->unack)->next; i < pkts_to_be_removed
-			&& skb != (struct sk_buff *) &bcsp->unack; i++) {
-		struct sk_buff *nskb;
+	i = 0;
+	skb_queue_walk_safe(&bcsp->unack, skb, tmp) {
+		if (i++ >= pkts_to_be_removed)
+			break;
 
-		nskb = skb->next;
 		__skb_unlink(skb, &bcsp->unack);
 		kfree_skb(skb);
-		skb = nskb;
 	}
 
-	if (bcsp->unack.qlen == 0)
+	if (skb_queue_empty(&bcsp->unack))
 		del_timer(&bcsp->tbcsp);
 
 	spin_unlock_irqrestore(&bcsp->unack.lock, flags);
diff --git a/drivers/bluetooth/hci_usb.h b/drivers/bluetooth/hci_usb.h
index 1790cc8..8e65991 100644
--- a/drivers/bluetooth/hci_usb.h
+++ b/drivers/bluetooth/hci_usb.h
@@ -70,8 +70,8 @@
 {
 	unsigned long flags;
 	spin_lock_irqsave(&q->lock, flags);
-	/* _urb_unlink needs to know which spinlock to use, thus mb(). */
-	_urb->queue = q; mb(); list_add(&_urb->list, &q->head);
+	/* _urb_unlink needs to know which spinlock to use, thus smp_mb(). */
+	_urb->queue = q; smp_mb(); list_add(&_urb->list, &q->head);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
@@ -79,8 +79,8 @@
 {
 	unsigned long flags;
 	spin_lock_irqsave(&q->lock, flags);
-	/* _urb_unlink needs to know which spinlock to use, thus mb(). */
-	_urb->queue = q; mb(); list_add_tail(&_urb->list, &q->head);
+	/* _urb_unlink needs to know which spinlock to use, thus smp_mb(). */
+	_urb->queue = q; smp_mb(); list_add_tail(&_urb->list, &q->head);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
@@ -89,7 +89,7 @@
 	struct _urb_queue *q;
 	unsigned long flags;
 
-	mb();
+	smp_mb();
 	q = _urb->queue;
 	/* If q is NULL, it will die at easy-to-debug NULL pointer dereference.
 	   No need to BUG(). */
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 75726ea..5360c4f 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -828,15 +828,18 @@
 			return -ESRCH;
 		if (card->load_firmware == NULL) {
 			printk(KERN_DEBUG "kcapi: load: no load function\n");
+			capi_ctr_put(card);
 			return -ESRCH;
 		}
 
 		if (ldef.t4file.len <= 0) {
 			printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len);
+			capi_ctr_put(card);
 			return -EINVAL;
 		}
 		if (ldef.t4file.data == NULL) {
 			printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n");
+			capi_ctr_put(card);
 			return -EINVAL;
 		}
 
@@ -849,6 +852,7 @@
 
 		if (card->cardstate != CARD_DETECTED) {
 			printk(KERN_INFO "kcapi: load: contr=%d not in detect state\n", ldef.contr);
+			capi_ctr_put(card);
 			return -EBUSY;
 		}
 		card->cardstate = CARD_LOADING;
diff --git a/drivers/isdn/hardware/mISDN/hfc_pci.h b/drivers/isdn/hardware/mISDN/hfc_pci.h
index fd2c9be..5783d22 100644
--- a/drivers/isdn/hardware/mISDN/hfc_pci.h
+++ b/drivers/isdn/hardware/mISDN/hfc_pci.h
@@ -183,8 +183,8 @@
 #define D_FREG_MASK  0xF
 
 struct zt {
-	unsigned short z1;  /* Z1 pointer 16 Bit */
-	unsigned short z2;  /* Z2 pointer 16 Bit */
+	__le16 z1;  /* Z1 pointer 16 Bit */
+	__le16 z2;  /* Z2 pointer 16 Bit */
 };
 
 struct dfifo {
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 9cf5edb..cd8302a 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -43,7 +43,7 @@
 module_param(debug, uint, 0);
 
 static LIST_HEAD(HFClist);
-DEFINE_RWLOCK(HFClock);
+static DEFINE_RWLOCK(HFClock);
 
 enum {
 	HFC_CCD_2BD0,
@@ -88,7 +88,7 @@
 	unsigned char		bswapped;
 	unsigned char		protocol;
 	int			nt_timer;
-	unsigned char		*pci_io; /* start of PCI IO memory */
+	unsigned char __iomem 	*pci_io; /* start of PCI IO memory */
 	dma_addr_t		dmahandle;
 	void			*fifos; /* FIFO memory */
 	int			last_bfifo_cnt[2];
@@ -153,7 +153,7 @@
 	pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
 	del_timer(&hc->hw.timer);
 	pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle);
-	iounmap((void *)hc->hw.pci_io);
+	iounmap(hc->hw.pci_io);
 }
 
 /*
@@ -366,8 +366,7 @@
 	bzt->f2 = MAX_B_FRAMES;
 	bzt->f1 = bzt->f2;	/* init F pointers to remain constant */
 	bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
-	bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(
-	    le16_to_cpu(bzt->za[MAX_B_FRAMES].z1 - 1));
+	bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2);
 	if (fifo_state)
 		hc->hw.fifo_en |= fifo_state;
 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
@@ -482,7 +481,7 @@
 			df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
 			    (MAX_D_FRAMES + 1);	/* next buffer */
 			df->za[df->f2 & D_FREG_MASK].z2 =
-			    cpu_to_le16((zp->z2 + rcnt) & (D_FIFO_SIZE - 1));
+			    cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) & (D_FIFO_SIZE - 1));
 		} else {
 			dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
 			if (!dch->rx_skb) {
@@ -523,10 +522,10 @@
 /*
  * check for transparent receive data and read max one threshold size if avail
  */
-int
+static int
 hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
 {
-	unsigned short	*z1r, *z2r;
+	 __le16 *z1r, *z2r;
 	int		new_z2, fcnt, maxlen;
 	u_char		*ptr, *ptr1;
 
@@ -576,7 +575,7 @@
 /*
  * B-channel main receive routine
  */
-void
+static void
 main_rec_hfcpci(struct bchannel *bch)
 {
 	struct hfc_pci	*hc = bch->hw;
@@ -724,7 +723,7 @@
 	struct bzfifo	*bz;
 	u_char		*bdata;
 	u_char		new_f1, *src, *dst;
-	unsigned short	*z1t, *z2t;
+	__le16 *z1t, *z2t;
 
 	if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
 		printk(KERN_DEBUG "%s\n", __func__);
@@ -1679,7 +1678,7 @@
  * called for card init message
  */
 
-void
+static void
 inithfcpci(struct hfc_pci *hc)
 {
 	printk(KERN_DEBUG "inithfcpci: entered\n");
@@ -1966,7 +1965,7 @@
 		printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
 		return 1;
 	}
-	hc->hw.pci_io = (char *)(ulong)hc->pdev->resource[1].start;
+	hc->hw.pci_io = (char __iomem *)(unsigned long)hc->pdev->resource[1].start;
 
 	if (!hc->hw.pci_io) {
 		printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 127cfda..77c280e 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1533,8 +1533,10 @@
 	int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle);
 	if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL )
 		return -ENOMEM;
-	for( i = 0; i < ISDN_MAX_CHANNELS; i++ )
+	for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
 		spin_lock_init(&isdn_ppp_bundle_arr[i].lock);
+		skb_queue_head_init(&isdn_ppp_bundle_arr[i].frags);
+	}
 	return 0;
 }
 
@@ -1567,7 +1569,7 @@
 		if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL)
 			return -ENOMEM;
 		lp->next = lp->last = lp;	/* nobody else in a queue */
-		lp->netdev->pb->frags = NULL;
+		skb_queue_head_init(&lp->netdev->pb->frags);
 		lp->netdev->pb->frames = 0;
 		lp->netdev->pb->seq = UINT_MAX;
 	}
@@ -1579,28 +1581,29 @@
 
 static u32 isdn_ppp_mp_get_seq( int short_seq, 
 					struct sk_buff * skb, u32 last_seq );
-static struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp,
-			struct sk_buff * from, struct sk_buff * to );
-static void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
-				struct sk_buff * from, struct sk_buff * to );
-static void isdn_ppp_mp_free_skb( ippp_bundle * mp, struct sk_buff * skb );
+static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from,
+				struct sk_buff *to);
+static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
+				   struct sk_buff *from, struct sk_buff *to,
+				   u32 lastseq);
+static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb);
 static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb );
 
 static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, 
-							struct sk_buff *skb)
+				struct sk_buff *skb)
 {
-	struct ippp_struct *is;
-	isdn_net_local * lpq;
-	ippp_bundle * mp;
-	isdn_mppp_stats * stats;
-	struct sk_buff * newfrag, * frag, * start, *nextf;
+	struct sk_buff *newfrag, *frag, *start, *nextf;
 	u32 newseq, minseq, thisseq;
+	isdn_mppp_stats *stats;
+	struct ippp_struct *is;
 	unsigned long flags;
+	isdn_net_local *lpq;
+	ippp_bundle *mp;
 	int slot;
 
 	spin_lock_irqsave(&net_dev->pb->lock, flags);
-    	mp = net_dev->pb;
-        stats = &mp->stats;
+	mp = net_dev->pb;
+	stats = &mp->stats;
 	slot = lp->ppp_slot;
 	if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
 		printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
@@ -1611,20 +1614,19 @@
 		return;
 	}
 	is = ippp_table[slot];
-    	if( ++mp->frames > stats->max_queue_len )
+	if (++mp->frames > stats->max_queue_len)
 		stats->max_queue_len = mp->frames;
-	
+
 	if (is->debug & 0x8)
 		isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb);
 
-	newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, 
-						skb, is->last_link_seqno);
-
+	newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ,
+				     skb, is->last_link_seqno);
 
 	/* if this packet seq # is less than last already processed one,
 	 * toss it right away, but check for sequence start case first 
 	 */
-	if( mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT) ) {
+	if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) {
 		mp->seq = newseq;	/* the first packet: required for
 					 * rfc1990 non-compliant clients --
 					 * prevents constant packet toss */
@@ -1634,7 +1636,7 @@
 		spin_unlock_irqrestore(&mp->lock, flags);
 		return;
 	}
-	
+
 	/* find the minimum received sequence number over all links */
 	is->last_link_seqno = minseq = newseq;
 	for (lpq = net_dev->queue;;) {
@@ -1655,22 +1657,31 @@
 					 * packets */
 	newfrag = skb;
 
-  	/* if this new fragment is before the first one, then enqueue it now. */
-  	if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) {
-		newfrag->next = frag;
-    		mp->frags = frag = newfrag;
-    		newfrag = NULL;
-  	}
+	/* Insert new fragment into the proper sequence slot.  */
+	skb_queue_walk(&mp->frags, frag) {
+		if (MP_SEQ(frag) == newseq) {
+			isdn_ppp_mp_free_skb(mp, newfrag);
+			newfrag = NULL;
+			break;
+		}
+		if (MP_LT(newseq, MP_SEQ(frag))) {
+			__skb_queue_before(&mp->frags, frag, newfrag);
+			newfrag = NULL;
+			break;
+		}
+	}
+	if (newfrag)
+		__skb_queue_tail(&mp->frags, newfrag);
 
-  	start = MP_FLAGS(frag) & MP_BEGIN_FRAG &&
-				MP_SEQ(frag) == mp->seq ? frag : NULL;
+	frag = skb_peek(&mp->frags);
+	start = ((MP_FLAGS(frag) & MP_BEGIN_FRAG) &&
+		 (MP_SEQ(frag) == mp->seq)) ? frag : NULL;
+	if (!start)
+		goto check_overflow;
 
-	/* 
-	 * main fragment traversing loop
+	/* main fragment traversing loop
 	 *
 	 * try to accomplish several tasks:
-	 * - insert new fragment into the proper sequence slot (once that's done
-	 *   newfrag will be set to NULL)
 	 * - reassemble any complete fragment sequence (non-null 'start'
 	 *   indicates there is a continguous sequence present)
 	 * - discard any incomplete sequences that are below minseq -- due
@@ -1679,71 +1690,46 @@
 	 *   come to complete such sequence and it should be discarded
 	 *
 	 * loop completes when we accomplished the following tasks:
-	 * - new fragment is inserted in the proper sequence ('newfrag' is 
-	 *   set to NULL)
 	 * - we hit a gap in the sequence, so no reassembly/processing is 
 	 *   possible ('start' would be set to NULL)
 	 *
 	 * algorithm for this code is derived from code in the book
 	 * 'PPP Design And Debugging' by James Carlson (Addison-Wesley)
 	 */
-  	while (start != NULL || newfrag != NULL) {
+	skb_queue_walk_safe(&mp->frags, frag, nextf) {
+		thisseq = MP_SEQ(frag);
 
-    		thisseq = MP_SEQ(frag);
-    		nextf = frag->next;
-
-    		/* drop any duplicate fragments */
-    		if (newfrag != NULL && thisseq == newseq) {
-      			isdn_ppp_mp_free_skb(mp, newfrag);
-      			newfrag = NULL;
-    		}
-
-    		/* insert new fragment before next element if possible. */
-    		if (newfrag != NULL && (nextf == NULL || 
-						MP_LT(newseq, MP_SEQ(nextf)))) {
-      			newfrag->next = nextf;
-      			frag->next = nextf = newfrag;
-      			newfrag = NULL;
-    		}
-
-    		if (start != NULL) {
-	    		/* check for misplaced start */
-      			if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
-				printk(KERN_WARNING"isdn_mppp(seq %d): new "
-				      "BEGIN flag with no prior END", thisseq);
-				stats->seqerrs++;
-				stats->frame_drops++;
-				start = isdn_ppp_mp_discard(mp, start,frag);
-				nextf = frag->next;
-      			}
-    		} else if (MP_LE(thisseq, minseq)) {		
-      			if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
+		/* check for misplaced start */
+		if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
+			printk(KERN_WARNING"isdn_mppp(seq %d): new "
+			       "BEGIN flag with no prior END", thisseq);
+			stats->seqerrs++;
+			stats->frame_drops++;
+			isdn_ppp_mp_discard(mp, start, frag);
+			start = frag;
+		} else if (MP_LE(thisseq, minseq)) {		
+			if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
 				start = frag;
-      			else {
+			else {
 				if (MP_FLAGS(frag) & MP_END_FRAG)
-	  				stats->frame_drops++;
-				if( mp->frags == frag )
-					mp->frags = nextf;	
+					stats->frame_drops++;
+				__skb_unlink(skb, &mp->frags);
 				isdn_ppp_mp_free_skb(mp, frag);
-				frag = nextf;
 				continue;
-      			}
+			}
 		}
-		
-		/* if start is non-null and we have end fragment, then
-		 * we have full reassembly sequence -- reassemble 
-		 * and process packet now
-		 */
-    		if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) {
-      			minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK;
-      			/* Reassemble the packet then dispatch it */
-			isdn_ppp_mp_reassembly(net_dev, lp, start, nextf);
-      
-      			start = NULL;
-      			frag = NULL;
 
-      			mp->frags = nextf;
-    		}
+		/* if we have end fragment, then we have full reassembly
+		 * sequence -- reassemble and process packet now
+		 */
+		if (MP_FLAGS(frag) & MP_END_FRAG) {
+			minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK;
+			/* Reassemble the packet then dispatch it */
+			isdn_ppp_mp_reassembly(net_dev, lp, start, frag, thisseq);
+
+			start = NULL;
+			frag = NULL;
+		}
 
 		/* check if need to update start pointer: if we just
 		 * reassembled the packet and sequence is contiguous
@@ -1754,26 +1740,25 @@
 		 * below low watermark and set start to the next frag or
 		 * clear start ptr.
 		 */ 
-    		if (nextf != NULL && 
+		if (nextf != (struct sk_buff *)&mp->frags && 
 		    ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) {
-      			/* if we just reassembled and the next one is here, 
-			 * then start another reassembly. */
-
-      			if (frag == NULL) {
+			/* if we just reassembled and the next one is here, 
+			 * then start another reassembly.
+			 */
+			if (frag == NULL) {
 				if (MP_FLAGS(nextf) & MP_BEGIN_FRAG)
-	  				start = nextf;
-				else
-				{
-	  				printk(KERN_WARNING"isdn_mppp(seq %d):"
-						" END flag with no following "
-						"BEGIN", thisseq);
+					start = nextf;
+				else {
+					printk(KERN_WARNING"isdn_mppp(seq %d):"
+					       " END flag with no following "
+					       "BEGIN", thisseq);
 					stats->seqerrs++;
 				}
 			}
-
-    		} else {
-			if ( nextf != NULL && frag != NULL &&
-						MP_LT(thisseq, minseq)) {
+		} else {
+			if (nextf != (struct sk_buff *)&mp->frags &&
+			    frag != NULL &&
+			    MP_LT(thisseq, minseq)) {
 				/* we've got a break in the sequence
 				 * and we not at the end yet
 				 * and we did not just reassembled
@@ -1782,41 +1767,39 @@
 			 	 * discard all the frames below low watermark 
 				 * and start over */
 				stats->frame_drops++;
-				mp->frags = isdn_ppp_mp_discard(mp,start,nextf);
+				isdn_ppp_mp_discard(mp, start, nextf);
 			}
 			/* break in the sequence, no reassembly */
-      			start = NULL;
-    		}
-	  			
-    		frag = nextf;
-  	}	/* while -- main loop */
-	
-  	if (mp->frags == NULL)
-    		mp->frags = frag;
-		
+			start = NULL;
+		}
+		if (!start)
+			break;
+	}
+
+check_overflow:
 	/* rather straighforward way to deal with (not very) possible 
-	 * queue overflow */
+	 * queue overflow
+	 */
 	if (mp->frames > MP_MAX_QUEUE_LEN) {
 		stats->overflows++;
-		while (mp->frames > MP_MAX_QUEUE_LEN) {
-			frag = mp->frags->next;
-			isdn_ppp_mp_free_skb(mp, mp->frags);
-			mp->frags = frag;
+		skb_queue_walk_safe(&mp->frags, frag, nextf) {
+			if (mp->frames <= MP_MAX_QUEUE_LEN)
+				break;
+			__skb_unlink(frag, &mp->frags);
+			isdn_ppp_mp_free_skb(mp, frag);
 		}
 	}
 	spin_unlock_irqrestore(&mp->lock, flags);
 }
 
-static void isdn_ppp_mp_cleanup( isdn_net_local * lp )
+static void isdn_ppp_mp_cleanup(isdn_net_local *lp)
 {
-	struct sk_buff * frag = lp->netdev->pb->frags;
-	struct sk_buff * nextfrag;
-    	while( frag ) {
-		nextfrag = frag->next;
-		isdn_ppp_mp_free_skb(lp->netdev->pb, frag);
-		frag = nextfrag;
+	struct sk_buff *skb, *tmp;
+
+	skb_queue_walk_safe(&lp->netdev->pb->frags, skb, tmp) {
+		__skb_unlink(skb, &lp->netdev->pb->frags);
+		isdn_ppp_mp_free_skb(lp->netdev->pb, skb);
 	}
-	lp->netdev->pb->frags = NULL;
 }
 
 static u32 isdn_ppp_mp_get_seq( int short_seq, 
@@ -1853,72 +1836,115 @@
 	return seq;
 }
 
-struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp,
-			struct sk_buff * from, struct sk_buff * to )
+static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from,
+				struct sk_buff *to)
 {
-	if( from )
-		while (from != to) {
-	  		struct sk_buff * next = from->next;
-			isdn_ppp_mp_free_skb(mp, from);
-	  		from = next;
+	if (from) {
+		struct sk_buff *skb, *tmp;
+		int freeing = 0;
+
+		skb_queue_walk_safe(&mp->frags, skb, tmp) {
+			if (skb == to)
+				break;
+			if (skb == from)
+				freeing = 1;
+			if (!freeing)
+				continue;
+			__skb_unlink(skb, &mp->frags);
+			isdn_ppp_mp_free_skb(mp, skb);
 		}
-	return from;
+	}
 }
 
-void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
-				struct sk_buff * from, struct sk_buff * to )
+static unsigned int calc_tot_len(struct sk_buff_head *queue,
+				 struct sk_buff *from, struct sk_buff *to)
 {
-	ippp_bundle * mp = net_dev->pb;
-	int proto;
-	struct sk_buff * skb;
+	unsigned int tot_len = 0;
+	struct sk_buff *skb;
+	int found_start = 0;
+
+	skb_queue_walk(queue, skb) {
+		if (skb == from)
+			found_start = 1;
+		if (!found_start)
+			continue;
+		tot_len += skb->len - MP_HEADER_LEN;
+		if (skb == to)
+			break;
+	}
+	return tot_len;
+}
+
+/* Reassemble packet using fragments in the reassembly queue from
+ * 'from' until 'to', inclusive.
+ */
+static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
+				   struct sk_buff *from, struct sk_buff *to,
+				   u32 lastseq)
+{
+	ippp_bundle *mp = net_dev->pb;
 	unsigned int tot_len;
+	struct sk_buff *skb;
+	int proto;
 
 	if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
 		printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
 			__func__, lp->ppp_slot);
 		return;
 	}
-	if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) {
-		if( ippp_table[lp->ppp_slot]->debug & 0x40 )
+
+	tot_len = calc_tot_len(&mp->frags, from, to);
+
+	if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) {
+		if (ippp_table[lp->ppp_slot]->debug & 0x40)
 			printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, "
-					"len %d\n", MP_SEQ(from), from->len );
+			       "len %d\n", MP_SEQ(from), from->len);
 		skb = from;
 		skb_pull(skb, MP_HEADER_LEN);
+		__skb_unlink(skb, &mp->frags);
 		mp->frames--;	
 	} else {
-		struct sk_buff * frag;
-		int n;
+		struct sk_buff *walk, *tmp;
+		int found_start = 0;
 
-		for(tot_len=n=0, frag=from; frag != to; frag=frag->next, n++)
-			tot_len += frag->len - MP_HEADER_LEN;
-
-		if( ippp_table[lp->ppp_slot]->debug & 0x40 )
+		if (ippp_table[lp->ppp_slot]->debug & 0x40)
 			printk(KERN_DEBUG"isdn_mppp: reassembling frames %d "
-				"to %d, len %d\n", MP_SEQ(from), 
-				(MP_SEQ(from)+n-1) & MP_LONGSEQ_MASK, tot_len );
-		if( (skb = dev_alloc_skb(tot_len)) == NULL ) {
+			       "to %d, len %d\n", MP_SEQ(from), lastseq,
+			       tot_len);
+
+		skb = dev_alloc_skb(tot_len);
+		if (!skb)
 			printk(KERN_ERR "isdn_mppp: cannot allocate sk buff "
-					"of size %d\n", tot_len);
-			isdn_ppp_mp_discard(mp, from, to);
-			return;
-		}
+			       "of size %d\n", tot_len);
 
-		while( from != to ) {
-			unsigned int len = from->len - MP_HEADER_LEN;
+		found_start = 0;
+		skb_queue_walk_safe(&mp->frags, walk, tmp) {
+			if (walk == from)
+				found_start = 1;
+			if (!found_start)
+				continue;
 
-			skb_copy_from_linear_data_offset(from, MP_HEADER_LEN,
-							 skb_put(skb,len),
-							 len);
-			frag = from->next;
-			isdn_ppp_mp_free_skb(mp, from);
-			from = frag; 
+			if (skb) {
+				unsigned int len = walk->len - MP_HEADER_LEN;
+				skb_copy_from_linear_data_offset(walk, MP_HEADER_LEN,
+								 skb_put(skb, len),
+								 len);
+			}
+			__skb_unlink(walk, &mp->frags);
+			isdn_ppp_mp_free_skb(mp, walk);
+
+			if (walk == to)
+				break;
 		}
 	}
+	if (!skb)
+		return;
+
    	proto = isdn_ppp_strip_proto(skb);
 	isdn_ppp_push_higher(net_dev, lp, skb, proto);
 }
 
-static void isdn_ppp_mp_free_skb(ippp_bundle * mp, struct sk_buff * skb)
+static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb)
 {
 	dev_kfree_skb(skb);
 	mp->frames--;
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index b5fabc7..e746292 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -124,18 +124,6 @@
 	return ret;
 }
 
-static loff_t
-mISDN_llseek(struct file *filep, loff_t offset, int orig)
-{
-	return -ESPIPE;
-}
-
-static ssize_t
-mISDN_write(struct file *filep, const char *buf, size_t count, loff_t *off)
-{
-	return -EOPNOTSUPP;
-}
-
 static unsigned int
 mISDN_poll(struct file *filep, poll_table *wait)
 {
@@ -157,8 +145,9 @@
 }
 
 static void
-dev_expire_timer(struct mISDNtimer *timer)
+dev_expire_timer(unsigned long data)
 {
+	struct mISDNtimer *timer = (void *)data;
 	u_long			flags;
 
 	spin_lock_irqsave(&timer->dev->lock, flags);
@@ -191,7 +180,7 @@
 		spin_unlock_irqrestore(&dev->lock, flags);
 		timer->dev = dev;
 		timer->tl.data = (long)timer;
-		timer->tl.function = (void *) dev_expire_timer;
+		timer->tl.function = dev_expire_timer;
 		init_timer(&timer->tl);
 		timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000);
 		add_timer(&timer->tl);
@@ -211,6 +200,9 @@
 	list_for_each_entry(timer, &dev->pending, list) {
 		if (timer->id == id) {
 			list_del_init(&timer->list);
+			/* RED-PEN AK: race -- timer can be still running on
+			 * other CPU. Needs reference count I think
+			 */
 			del_timer(&timer->tl);
 			ret = timer->id;
 			kfree(timer);
@@ -268,9 +260,7 @@
 }
 
 static struct file_operations mISDN_fops = {
-	.llseek		= mISDN_llseek,
 	.read		= mISDN_read,
-	.write		= mISDN_write,
 	.poll		= mISDN_poll,
 	.ioctl		= mISDN_ioctl,
 	.open		= mISDN_open,
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index fdfb2b2..a424869 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -130,12 +130,12 @@
 
 static const char timeout_msg[] = "*** timeout at %s:%s (line %d) ***\n";
 #define TIMEOUT_MSG(lineno) \
-	printk(timeout_msg, filename,__FUNCTION__,(lineno))
+	printk(timeout_msg, filename,__func__,(lineno))
 
 static const char invalid_pcb_msg[] =
 "*** invalid pcb length %d at %s:%s (line %d) ***\n";
 #define INVALID_PCB_MSG(len) \
-	printk(invalid_pcb_msg, (len),filename,__FUNCTION__,__LINE__)
+	printk(invalid_pcb_msg, (len),filename,__func__,__LINE__)
 
 static char search_msg[] __initdata = KERN_INFO "%s: Looking for 3c505 adapter at address %#x...";
 
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 6011d6f..85fa40a 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -127,7 +127,6 @@
 	  (CP)->tx_tail - (CP)->tx_head - 1)
 
 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
-#define RX_OFFSET		2
 #define CP_INTERNAL_PHY		32
 
 /* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
@@ -552,14 +551,14 @@
 			printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
 			       dev->name, rx_tail, status, len);
 
-		buflen = cp->rx_buf_sz + RX_OFFSET;
-		new_skb = dev_alloc_skb (buflen);
+		buflen = cp->rx_buf_sz + NET_IP_ALIGN;
+		new_skb = netdev_alloc_skb(dev, buflen);
 		if (!new_skb) {
 			dev->stats.rx_dropped++;
 			goto rx_next;
 		}
 
-		skb_reserve(new_skb, RX_OFFSET);
+		skb_reserve(new_skb, NET_IP_ALIGN);
 
 		dma_unmap_single(&cp->pdev->dev, mapping,
 				 buflen, PCI_DMA_FROMDEVICE);
@@ -1051,19 +1050,20 @@
 	cpw8_f(Cfg9346, Cfg9346_Lock);
 }
 
-static int cp_refill_rx (struct cp_private *cp)
+static int cp_refill_rx(struct cp_private *cp)
 {
+	struct net_device *dev = cp->dev;
 	unsigned i;
 
 	for (i = 0; i < CP_RX_RING_SIZE; i++) {
 		struct sk_buff *skb;
 		dma_addr_t mapping;
 
-		skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
+		skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN);
 		if (!skb)
 			goto err_out;
 
-		skb_reserve(skb, RX_OFFSET);
+		skb_reserve(skb, NET_IP_ALIGN);
 
 		mapping = dma_map_single(&cp->pdev->dev, skb->data,
 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 8a5b0d2..32e66f0 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -309,7 +309,7 @@
 	Cfg9346		= 0x50,
 	Config0		= 0x51,
 	Config1		= 0x52,
-	FlashReg	= 0x54,
+	TimerInt	= 0x54,
 	MediaStatus	= 0x58,
 	Config3		= 0x59,
 	Config4		= 0x5A,	 /* absent on RTL-8139A */
@@ -325,6 +325,7 @@
 	FIFOTMS		= 0x70,	 /* FIFO Control and test. */
 	CSCR		= 0x74,	 /* Chip Status and Configuration Register. */
 	PARA78		= 0x78,
+	FlashReg	= 0xD4,	/* Communication with Flash ROM, four bytes. */
 	PARA7c		= 0x7c,	 /* Magic transceiver parameter register. */
 	Config5		= 0xD8,	 /* absent on RTL-8139A */
 };
@@ -2009,9 +2010,9 @@
 		/* Malloc up new buffer, compatible with net-2e. */
 		/* Omit the four octet CRC from the length. */
 
-		skb = dev_alloc_skb (pkt_size + 2);
+		skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
 		if (likely(skb)) {
-			skb_reserve (skb, 2);	/* 16 byte align the IP fields. */
+			skb_reserve (skb, NET_IP_ALIGN);	/* 16 byte align the IP fields. */
 #if RX_BUF_IDX == 3
 			wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
 #else
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4a11296..2d6a060 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1813,7 +1813,7 @@
 
 config FEC_MPC52xx
 	tristate "MPC52xx FEC driver"
-	depends on PPC_MERGE && PPC_MPC52xx && PPC_BESTCOMM_FEC
+	depends on PPC_MPC52xx && PPC_BESTCOMM_FEC
 	select CRC32
 	select PHYLIB
 	---help---
@@ -1840,6 +1840,17 @@
 	  Say Y here if you want to use the NE2000 compatible
 	  controller on the Renesas H8/300 processor.
 
+config ATL2
+	tristate "Atheros L2 Fast Ethernet support"
+	depends on PCI
+	select CRC32
+	select MII
+	help
+	  This driver supports the Atheros L2 fast ethernet adapter.
+
+	  To compile this driver as a module, choose M here.  The module
+	  will be called atl2.
+
 source "drivers/net/fs_enet/Kconfig"
 
 endif # NET_ETHERNET
@@ -1927,15 +1938,6 @@
 	  To compile this driver as a module, choose M here. The module
 	  will be called e1000.
 
-config E1000_DISABLE_PACKET_SPLIT
-	bool "Disable Packet Split for PCI express adapters"
-	depends on E1000
-	help
-	  Say Y here if you want to use the legacy receive path for PCI express
-	  hardware.
-
-	  If in doubt, say N.
-
 config E1000E
 	tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
 	depends on PCI && (!SPARC32 || BROKEN)
@@ -2046,6 +2048,7 @@
 	tristate "Realtek 8169 gigabit ethernet support"
 	depends on PCI
 	select CRC32
+	select MII
 	---help---
 	  Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
 
@@ -2262,7 +2265,7 @@
 config MV643XX_ETH
 	tristate "Marvell Discovery (643XX) and Orion ethernet support"
 	depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) || PLAT_ORION
-	select MII
+	select PHYLIB
 	help
 	  This driver supports the gigabit ethernet MACs in the
 	  Marvell Discovery PPC/MIPS chipset family (MV643XX) and
@@ -2302,6 +2305,18 @@
 	  To compile this driver as a module, choose M here.  The module
 	  will be called atl1e.
 
+config JME
+	tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
+	depends on PCI
+	select CRC32
+	select MII
+	---help---
+	  This driver supports the PCI-Express gigabit ethernet adapters
+	  based on JMicron JMC250 chipset.
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called jme.
+
 endif # NETDEV_1000
 
 #
@@ -2377,10 +2392,18 @@
 	  To compile the driver as a module, choose M here. The module
 	  will be called ehea.
 
+config ENIC
+	tristate "E, the Cisco 10G Ethernet NIC"
+	depends on PCI && INET
+	select INET_LRO
+	help
+	  This enables the support for the Cisco 10G Ethernet card.
+
 config IXGBE
 	tristate "Intel(R) 10GbE PCI Express adapters support"
 	depends on PCI && INET
 	select INET_LRO
+	select INTEL_IOATDMA
 	---help---
 	  This driver supports Intel(R) 10GbE PCI Express family of
 	  adapters.  For more information on how to identify your adapter, go
@@ -2432,6 +2455,7 @@
 	select FW_LOADER
 	select CRC32
 	select INET_LRO
+	select INTEL_IOATDMA
 	---help---
 	  This driver supports Myricom Myri-10G Dual Protocol interface in
 	  Ethernet mode. If the eeprom on your board is not recent enough,
@@ -2496,6 +2520,15 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called bnx2x.  This is recommended.
 
+config QLGE
+	tristate "QLogic QLGE 10Gb Ethernet Driver Support"
+	depends on PCI
+	help
+	  This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called qlge.
+
 source "drivers/net/sfc/Kconfig"
 
 endif # NETDEV_10000
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7629c90..fa2510b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -15,9 +15,12 @@
 obj-$(CONFIG_CAN) += can/
 obj-$(CONFIG_BONDING) += bonding/
 obj-$(CONFIG_ATL1) += atlx/
+obj-$(CONFIG_ATL2) += atlx/
 obj-$(CONFIG_ATL1E) += atl1e/
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 obj-$(CONFIG_TEHUTI) += tehuti.o
+obj-$(CONFIG_ENIC) += enic/
+obj-$(CONFIG_JME) += jme.o
 
 gianfar_driver-objs := gianfar.o \
 		gianfar_ethtool.o \
@@ -111,7 +114,7 @@
 obj-$(CONFIG_NE2000) += ne.o 8390p.o
 obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
 obj-$(CONFIG_HPLAN) += hp.o 8390p.o
-obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
+obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390.o
 obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
 obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
 obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
@@ -128,6 +131,7 @@
 obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 obj-$(CONFIG_QLA3XXX) += qla3xxx.o
+obj-$(CONFIG_QLGE) += qlge/
 
 obj-$(CONFIG_PPP) += ppp_generic.o
 obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index bdc4c0b..a5b0769 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -442,24 +442,24 @@
 		BUGMSG(D_NORMAL, "WARNING!  Station address FF may confuse "
 		       "DOS networking programs!\n");
 
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 	if (ASTATUS() & RESETflag) {
-	  	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+	  	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 		ACOMMAND(CFLAGScmd | RESETclear);
 	}
 
 
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 	/* make sure we're ready to receive IRQ's. */
 	AINTMASK(0);
 	udelay(1);		/* give it time to set the mask before
 				 * we reset it again. (may not even be
 				 * necessary)
 				 */
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 	lp->intmask = NORXflag | RECONflag;
 	AINTMASK(lp->intmask);
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 
 	netif_start_queue(dev);
 
@@ -670,14 +670,14 @@
 		freeskb = 0;
 	}
 
-	BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS());
+	BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS());
 	/* make sure we didn't ignore a TX IRQ while we were in here */
 	AINTMASK(0);
 
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 	lp->intmask |= TXFREEflag|EXCNAKflag;
 	AINTMASK(lp->intmask);
-	BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS());
+	BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS());
 
 	spin_unlock_irqrestore(&lp->lock, flags);
 	if (freeskb) {
@@ -798,7 +798,7 @@
                 diagstatus = (status >> 8) & 0xFF;
 
 		BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n",
-			__FILE__,__LINE__,__FUNCTION__,status);
+			__FILE__,__LINE__,__func__,status);
 		didsomething = 0;
 
 		/*
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 8b51313..70124a9 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -238,15 +238,15 @@
 	u_char inbyte;
 
 	BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n",
-		__FILE__,__LINE__,__FUNCTION__,dev,lp,dev->name);
+		__FILE__,__LINE__,__func__,dev,lp,dev->name);
 	BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n",
 	       dev->name, ASTATUS());
 
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 	lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2);
 	/* power-up defaults */
 	SETCONF;
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 
 	if (really_reset) {
 		/* reset the card */
@@ -254,22 +254,22 @@
 		mdelay(RESETtime * 2);	/* COM20020 seems to be slower sometimes */
 	}
 	/* clear flags & end reset */
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 	ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
 
 	/* verify that the ARCnet signature byte is present */
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 
 	com20020_copy_from_card(dev, 0, 0, &inbyte, 1);
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 	if (inbyte != TESTvalue) {
-		BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+		BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 		BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
 		return 1;
 	}
 	/* enable extended (512-byte) packets */
 	ACOMMAND(CONFIGcmd | EXTconf);
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
 
 	/* done!  return success. */
 	return 0;
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 949e753..8cbc1b5 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -397,7 +397,7 @@
  */
 int atl1e_phy_commit(struct atl1e_hw *hw)
 {
-	struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter;
+	struct atl1e_adapter *adapter = hw->adapter;
 	struct pci_dev *pdev = adapter->pdev;
 	int ret_val;
 	u16 phy_data;
@@ -431,7 +431,7 @@
 
 int atl1e_phy_init(struct atl1e_hw *hw)
 {
-	struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter;
+	struct atl1e_adapter *adapter = hw->adapter;
 	struct pci_dev *pdev = adapter->pdev;
 	s32 ret_val;
 	u16 phy_val;
@@ -525,7 +525,7 @@
  */
 int atl1e_reset_hw(struct atl1e_hw *hw)
 {
-	struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter;
+	struct atl1e_adapter *adapter = hw->adapter;
 	struct pci_dev *pdev = adapter->pdev;
 
 	u32 idle_status_data = 0;
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 7685b99..9b60352 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -2390,9 +2390,7 @@
 	}
 
 	/* Init GPHY as early as possible due to power saving issue  */
-	spin_lock(&adapter->mdio_lock);
 	atl1e_phy_init(&adapter->hw);
-	spin_unlock(&adapter->mdio_lock);
 	/* reset the controller to
 	 * put the device in a known good starting state */
 	err = atl1e_reset_hw(&adapter->hw);
diff --git a/drivers/net/atlx/Makefile b/drivers/net/atlx/Makefile
index ca45553..e4f6022 100644
--- a/drivers/net/atlx/Makefile
+++ b/drivers/net/atlx/Makefile
@@ -1 +1,3 @@
 obj-$(CONFIG_ATL1)	+= atl1.o
+obj-$(CONFIG_ATL2)	+= atl2.o
+
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
new file mode 100644
index 0000000..5ab9c76
--- /dev/null
+++ b/drivers/net/atlx/atl2.c
@@ -0,0 +1,3129 @@
+/*
+ * Copyright(c) 2006 - 2007 Atheros Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2008 Chris Snook <csnook@redhat.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <asm/atomic.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/hardirq.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/irqflags.h>
+#include <linux/irqreturn.h>
+#include <linux/mii.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/pm.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "atl2.h"
+
+#define ATL2_DRV_VERSION "2.2.3"
+
+static char atl2_driver_name[] = "atl2";
+static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
+static char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
+static char atl2_driver_version[] = ATL2_DRV_VERSION;
+
+MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
+MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ATL2_DRV_VERSION);
+
+/*
+ * atl2_pci_tbl - PCI Device ID Table
+ */
+static struct pci_device_id atl2_pci_tbl[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
+	/* required last entry */
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
+
+static void atl2_set_ethtool_ops(struct net_device *netdev);
+
+static void atl2_check_options(struct atl2_adapter *adapter);
+
+/*
+ * atl2_sw_init - Initialize general software structures (struct atl2_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * atl2_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
+{
+	struct atl2_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+
+	/* PCI config space info */
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_id = pdev->subsystem_device;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+	adapter->wol = 0;
+	adapter->ict = 50000;  /* ~100ms */
+	adapter->link_speed = SPEED_0;   /* hardware init */
+	adapter->link_duplex = FULL_DUPLEX;
+
+	hw->phy_configured = false;
+	hw->preamble_len = 7;
+	hw->ipgt = 0x60;
+	hw->min_ifg = 0x50;
+	hw->ipgr1 = 0x40;
+	hw->ipgr2 = 0x60;
+	hw->retry_buf = 2;
+	hw->max_retry = 0xf;
+	hw->lcol = 0x37;
+	hw->jam_ipg = 7;
+	hw->fc_rxd_hi = 0;
+	hw->fc_rxd_lo = 0;
+	hw->max_frame_size = adapter->netdev->mtu;
+
+	spin_lock_init(&adapter->stats_lock);
+	spin_lock_init(&adapter->tx_lock);
+
+	set_bit(__ATL2_DOWN, &adapter->flags);
+
+	return 0;
+}
+
+/*
+ * atl2_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void atl2_set_multi(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+	struct dev_mc_list *mc_ptr;
+	u32 rctl;
+	u32 hash_value;
+
+	/* Check for Promiscuous and All Multicast modes */
+	rctl = ATL2_READ_REG(hw, REG_MAC_CTRL);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= MAC_CTRL_PROMIS_EN;
+	} else if (netdev->flags & IFF_ALLMULTI) {
+		rctl |= MAC_CTRL_MC_ALL_EN;
+		rctl &= ~MAC_CTRL_PROMIS_EN;
+	} else
+		rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
+
+	ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl);
+
+	/* clear the old settings from the multicast hash table */
+	ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
+	ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
+
+	/* comoute mc addresses' hash value ,and put it into hash table */
+	for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+		hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr);
+		atl2_hash_set(hw, hash_value);
+	}
+}
+
+static void init_ring_ptrs(struct atl2_adapter *adapter)
+{
+	/* Read / Write Ptr Initialize: */
+	adapter->txd_write_ptr = 0;
+	atomic_set(&adapter->txd_read_ptr, 0);
+
+	adapter->rxd_read_ptr = 0;
+	adapter->rxd_write_ptr = 0;
+
+	atomic_set(&adapter->txs_write_ptr, 0);
+	adapter->txs_next_clear = 0;
+}
+
+/*
+ * atl2_configure - Configure Transmit&Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx /Rx unit of the MAC after a reset.
+ */
+static int atl2_configure(struct atl2_adapter *adapter)
+{
+	struct atl2_hw *hw = &adapter->hw;
+	u32 value;
+
+	/* clear interrupt status */
+	ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff);
+
+	/* set MAC Address */
+	value = (((u32)hw->mac_addr[2]) << 24) |
+		(((u32)hw->mac_addr[3]) << 16) |
+		(((u32)hw->mac_addr[4]) << 8) |
+		(((u32)hw->mac_addr[5]));
+	ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value);
+	value = (((u32)hw->mac_addr[0]) << 8) |
+		(((u32)hw->mac_addr[1]));
+	ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value);
+
+	/* HI base address */
+	ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
+		(u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32));
+
+	/* LO base address */
+	ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO,
+		(u32)(adapter->txd_dma & 0x00000000ffffffffULL));
+	ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO,
+		(u32)(adapter->txs_dma & 0x00000000ffffffffULL));
+	ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO,
+		(u32)(adapter->rxd_dma & 0x00000000ffffffffULL));
+
+	/* element count */
+	ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4));
+	ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size);
+	ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM,  (u16)adapter->rxd_ring_size);
+
+	/* config Internal SRAM */
+/*
+    ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_tx_end);
+    ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_rx_end);
+*/
+
+	/* config IPG/IFG */
+	value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) <<
+		MAC_IPG_IFG_IPGT_SHIFT) |
+		(((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) <<
+		MAC_IPG_IFG_MIFG_SHIFT) |
+		(((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) <<
+		MAC_IPG_IFG_IPGR1_SHIFT)|
+		(((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) <<
+		MAC_IPG_IFG_IPGR2_SHIFT);
+	ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value);
+
+	/* config  Half-Duplex Control */
+	value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
+		(((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) <<
+		MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
+		MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
+		(0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
+		(((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) <<
+		MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
+	ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value);
+
+	/* set Interrupt Moderator Timer */
+	ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt);
+	ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN);
+
+	/* set Interrupt Clear Timer */
+	ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict);
+
+	/* set MTU */
+	ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu +
+		ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE);
+
+	/* 1590 */
+	ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177);
+
+	/* flow control */
+	ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi);
+	ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo);
+
+	/* Init mailbox */
+	ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr);
+	ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr);
+
+	/* enable DMA read/write */
+	ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN);
+	ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN);
+
+	value = ATL2_READ_REG(&adapter->hw, REG_ISR);
+	if ((value & ISR_PHY_LINKDOWN) != 0)
+		value = 1; /* config failed */
+	else
+		value = 0;
+
+	/* clear all interrupt status */
+	ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff);
+	ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
+	return value;
+}
+
+/*
+ * atl2_setup_ring_resources - allocate Tx / RX descriptor resources
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+	u8 offset = 0;
+
+	/* real ring DMA buffer */
+	adapter->ring_size = size =
+		adapter->txd_ring_size * 1 + 7 +	/* dword align */
+		adapter->txs_ring_size * 4 + 7 +	/* dword align */
+		adapter->rxd_ring_size * 1536 + 127;	/* 128bytes align */
+
+	adapter->ring_vir_addr = pci_alloc_consistent(pdev, size,
+		&adapter->ring_dma);
+	if (!adapter->ring_vir_addr)
+		return -ENOMEM;
+	memset(adapter->ring_vir_addr, 0, adapter->ring_size);
+
+	/* Init TXD Ring */
+	adapter->txd_dma = adapter->ring_dma ;
+	offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0;
+	adapter->txd_dma += offset;
+	adapter->txd_ring = (struct tx_pkt_header *) (adapter->ring_vir_addr +
+		offset);
+
+	/* Init TXS Ring */
+	adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size;
+	offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0;
+	adapter->txs_dma += offset;
+	adapter->txs_ring = (struct tx_pkt_status *)
+		(((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset));
+
+	/* Init RXD Ring */
+	adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4;
+	offset = (adapter->rxd_dma & 127) ?
+		(128 - (adapter->rxd_dma & 127)) : 0;
+	if (offset > 7)
+		offset -= 8;
+	else
+		offset += (128 - 8);
+
+	adapter->rxd_dma += offset;
+	adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) +
+		(adapter->txs_ring_size * 4 + offset));
+
+/*
+ * Read / Write Ptr Initialize:
+ *      init_ring_ptrs(adapter);
+ */
+	return 0;
+}
+
+/*
+ * atl2_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static inline void atl2_irq_enable(struct atl2_adapter *adapter)
+{
+	ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
+	ATL2_WRITE_FLUSH(&adapter->hw);
+}
+
+/*
+ * atl2_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static inline void atl2_irq_disable(struct atl2_adapter *adapter)
+{
+    ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0);
+    ATL2_WRITE_FLUSH(&adapter->hw);
+    synchronize_irq(adapter->pdev->irq);
+}
+
+#ifdef NETIF_F_HW_VLAN_TX
+static void atl2_vlan_rx_register(struct net_device *netdev,
+	struct vlan_group *grp)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	u32 ctrl;
+
+	atl2_irq_disable(adapter);
+	adapter->vlgrp = grp;
+
+	if (grp) {
+		/* enable VLAN tag insert/strip */
+		ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
+		ctrl |= MAC_CTRL_RMV_VLAN;
+		ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
+	} else {
+		/* disable VLAN tag insert/strip */
+		ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
+		ctrl &= ~MAC_CTRL_RMV_VLAN;
+		ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
+	}
+
+	atl2_irq_enable(adapter);
+}
+
+static void atl2_restore_vlan(struct atl2_adapter *adapter)
+{
+	atl2_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+}
+#endif
+
+static void atl2_intr_rx(struct atl2_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct rx_desc *rxd;
+	struct sk_buff *skb;
+
+	do {
+		rxd = adapter->rxd_ring+adapter->rxd_write_ptr;
+		if (!rxd->status.update)
+			break; /* end of tx */
+
+		/* clear this flag at once */
+		rxd->status.update = 0;
+
+		if (rxd->status.ok && rxd->status.pkt_size >= 60) {
+			int rx_size = (int)(rxd->status.pkt_size - 4);
+			/* alloc new buffer */
+			skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN);
+			if (NULL == skb) {
+				printk(KERN_WARNING
+					"%s: Mem squeeze, deferring packet.\n",
+					netdev->name);
+				/*
+				 * Check that some rx space is free. If not,
+				 * free one and mark stats->rx_dropped++.
+				 */
+				adapter->net_stats.rx_dropped++;
+				break;
+			}
+			skb_reserve(skb, NET_IP_ALIGN);
+			skb->dev = netdev;
+			memcpy(skb->data, rxd->packet, rx_size);
+			skb_put(skb, rx_size);
+			skb->protocol = eth_type_trans(skb, netdev);
+#ifdef NETIF_F_HW_VLAN_TX
+			if (adapter->vlgrp && (rxd->status.vlan)) {
+				u16 vlan_tag = (rxd->status.vtag>>4) |
+					((rxd->status.vtag&7) << 13) |
+					((rxd->status.vtag&8) << 9);
+				vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
+			} else
+#endif
+			netif_rx(skb);
+			adapter->net_stats.rx_bytes += rx_size;
+			adapter->net_stats.rx_packets++;
+			netdev->last_rx = jiffies;
+		} else {
+			adapter->net_stats.rx_errors++;
+
+			if (rxd->status.ok && rxd->status.pkt_size <= 60)
+				adapter->net_stats.rx_length_errors++;
+			if (rxd->status.mcast)
+				adapter->net_stats.multicast++;
+			if (rxd->status.crc)
+				adapter->net_stats.rx_crc_errors++;
+			if (rxd->status.align)
+				adapter->net_stats.rx_frame_errors++;
+		}
+
+		/* advance write ptr */
+		if (++adapter->rxd_write_ptr == adapter->rxd_ring_size)
+			adapter->rxd_write_ptr = 0;
+	} while (1);
+
+	/* update mailbox? */
+	adapter->rxd_read_ptr = adapter->rxd_write_ptr;
+	ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr);
+}
+
+static void atl2_intr_tx(struct atl2_adapter *adapter)
+{
+	u32 txd_read_ptr;
+	u32 txs_write_ptr;
+	struct tx_pkt_status *txs;
+	struct tx_pkt_header *txph;
+	int free_hole = 0;
+
+	do {
+		txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
+		txs = adapter->txs_ring + txs_write_ptr;
+		if (!txs->update)
+			break; /* tx stop here */
+
+		free_hole = 1;
+		txs->update = 0;
+
+		if (++txs_write_ptr == adapter->txs_ring_size)
+			txs_write_ptr = 0;
+		atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr);
+
+		txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr);
+		txph = (struct tx_pkt_header *)
+			(((u8 *)adapter->txd_ring) + txd_read_ptr);
+
+		if (txph->pkt_size != txs->pkt_size) {
+			struct tx_pkt_status *old_txs = txs;
+			printk(KERN_WARNING
+				"%s: txs packet size not consistent with txd"
+				" txd_:0x%08x, txs_:0x%08x!\n",
+				adapter->netdev->name,
+				*(u32 *)txph, *(u32 *)txs);
+			printk(KERN_WARNING
+				"txd read ptr: 0x%x\n",
+				txd_read_ptr);
+			txs = adapter->txs_ring + txs_write_ptr;
+			printk(KERN_WARNING
+				"txs-behind:0x%08x\n",
+				*(u32 *)txs);
+			if (txs_write_ptr < 2) {
+				txs = adapter->txs_ring +
+					(adapter->txs_ring_size +
+					txs_write_ptr - 2);
+			} else {
+				txs = adapter->txs_ring + (txs_write_ptr - 2);
+			}
+			printk(KERN_WARNING
+				"txs-before:0x%08x\n",
+				*(u32 *)txs);
+			txs = old_txs;
+		}
+
+		 /* 4for TPH */
+		txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3);
+		if (txd_read_ptr >= adapter->txd_ring_size)
+			txd_read_ptr -= adapter->txd_ring_size;
+
+		atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr);
+
+		/* tx statistics: */
+		if (txs->ok) {
+			adapter->net_stats.tx_bytes += txs->pkt_size;
+			adapter->net_stats.tx_packets++;
+		}
+		else
+			adapter->net_stats.tx_errors++;
+
+		if (txs->defer)
+			adapter->net_stats.collisions++;
+		if (txs->abort_col)
+			adapter->net_stats.tx_aborted_errors++;
+		if (txs->late_col)
+			adapter->net_stats.tx_window_errors++;
+		if (txs->underun)
+			adapter->net_stats.tx_fifo_errors++;
+	} while (1);
+
+	if (free_hole) {
+		if (netif_queue_stopped(adapter->netdev) &&
+			netif_carrier_ok(adapter->netdev))
+			netif_wake_queue(adapter->netdev);
+	}
+}
+
+static void atl2_check_for_link(struct atl2_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u16 phy_data = 0;
+
+	spin_lock(&adapter->stats_lock);
+	atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+	atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+	spin_unlock(&adapter->stats_lock);
+
+	/* notify upper layer link down ASAP */
+	if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */
+		if (netif_carrier_ok(netdev)) { /* old link state: Up */
+		printk(KERN_INFO "%s: %s NIC Link is Down\n",
+			atl2_driver_name, netdev->name);
+		adapter->link_speed = SPEED_0;
+		netif_carrier_off(netdev);
+		netif_stop_queue(netdev);
+		}
+	}
+	schedule_work(&adapter->link_chg_task);
+}
+
+static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
+{
+	u16 phy_data;
+	spin_lock(&adapter->stats_lock);
+	atl2_read_phy_reg(&adapter->hw, 19, &phy_data);
+	spin_unlock(&adapter->stats_lock);
+}
+
+/*
+ * atl2_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ */
+static irqreturn_t atl2_intr(int irq, void *data)
+{
+	struct atl2_adapter *adapter = netdev_priv(data);
+	struct atl2_hw *hw = &adapter->hw;
+	u32 status;
+
+	status = ATL2_READ_REG(hw, REG_ISR);
+	if (0 == status)
+		return IRQ_NONE;
+
+	/* link event */
+	if (status & ISR_PHY)
+		atl2_clear_phy_int(adapter);
+
+	/* clear ISR status, and Enable CMB DMA/Disable Interrupt */
+	ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
+
+	/* check if PCIE PHY Link down */
+	if (status & ISR_PHY_LINKDOWN) {
+		if (netif_running(adapter->netdev)) { /* reset MAC */
+			ATL2_WRITE_REG(hw, REG_ISR, 0);
+			ATL2_WRITE_REG(hw, REG_IMR, 0);
+			ATL2_WRITE_FLUSH(hw);
+			schedule_work(&adapter->reset_task);
+			return IRQ_HANDLED;
+		}
+	}
+
+	/* check if DMA read/write error? */
+	if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
+		ATL2_WRITE_REG(hw, REG_ISR, 0);
+		ATL2_WRITE_REG(hw, REG_IMR, 0);
+		ATL2_WRITE_FLUSH(hw);
+		schedule_work(&adapter->reset_task);
+		return IRQ_HANDLED;
+	}
+
+	/* link event */
+	if (status & (ISR_PHY | ISR_MANUAL)) {
+		adapter->net_stats.tx_carrier_errors++;
+		atl2_check_for_link(adapter);
+	}
+
+	/* transmit event */
+	if (status & ISR_TX_EVENT)
+		atl2_intr_tx(adapter);
+
+	/* rx exception */
+	if (status & ISR_RX_EVENT)
+		atl2_intr_rx(adapter);
+
+	/* re-enable Interrupt */
+	ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
+	return IRQ_HANDLED;
+}
+
+static int atl2_request_irq(struct atl2_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int flags, err = 0;
+
+	flags = IRQF_SHARED;
+#ifdef CONFIG_PCI_MSI
+	adapter->have_msi = true;
+	err = pci_enable_msi(adapter->pdev);
+	if (err)
+		adapter->have_msi = false;
+
+	if (adapter->have_msi)
+		flags &= ~IRQF_SHARED;
+#endif
+
+	return request_irq(adapter->pdev->irq, &atl2_intr, flags, netdev->name,
+		netdev);
+}
+
+/*
+ * atl2_free_ring_resources - Free Tx / RX descriptor Resources
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void atl2_free_ring_resources(struct atl2_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr,
+		adapter->ring_dma);
+}
+
+/*
+ * atl2_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ */
+static int atl2_open(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	int err;
+	u32 val;
+
+	/* disallow open during test */
+	if (test_bit(__ATL2_TESTING, &adapter->flags))
+		return -EBUSY;
+
+	/* allocate transmit descriptors */
+	err = atl2_setup_ring_resources(adapter);
+	if (err)
+		return err;
+
+	err = atl2_init_hw(&adapter->hw);
+	if (err) {
+		err = -EIO;
+		goto err_init_hw;
+	}
+
+	/* hardware has been reset, we need to reload some things */
+	atl2_set_multi(netdev);
+	init_ring_ptrs(adapter);
+
+#ifdef NETIF_F_HW_VLAN_TX
+	atl2_restore_vlan(adapter);
+#endif
+
+	if (atl2_configure(adapter)) {
+		err = -EIO;
+		goto err_config;
+	}
+
+	err = atl2_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	clear_bit(__ATL2_DOWN, &adapter->flags);
+
+	mod_timer(&adapter->watchdog_timer, jiffies + 4*HZ);
+
+	val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
+	ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
+		val | MASTER_CTRL_MANUAL_INT);
+
+	atl2_irq_enable(adapter);
+
+	return 0;
+
+err_init_hw:
+err_req_irq:
+err_config:
+	atl2_free_ring_resources(adapter);
+	atl2_reset_hw(&adapter->hw);
+
+	return err;
+}
+
+static void atl2_down(struct atl2_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	/* signal that we're down so the interrupt handler does not
+	 * reschedule our watchdog timer */
+	set_bit(__ATL2_DOWN, &adapter->flags);
+
+#ifdef NETIF_F_LLTX
+	netif_stop_queue(netdev);
+#else
+	netif_tx_disable(netdev);
+#endif
+
+	/* reset MAC to disable all RX/TX */
+	atl2_reset_hw(&adapter->hw);
+	msleep(1);
+
+	atl2_irq_disable(adapter);
+
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_config_timer);
+	clear_bit(0, &adapter->cfg_phy);
+
+	netif_carrier_off(netdev);
+	adapter->link_speed = SPEED_0;
+	adapter->link_duplex = -1;
+}
+
+static void atl2_free_irq(struct atl2_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	free_irq(adapter->pdev->irq, netdev);
+
+#ifdef CONFIG_PCI_MSI
+	if (adapter->have_msi)
+		pci_disable_msi(adapter->pdev);
+#endif
+}
+
+/*
+ * atl2_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ */
+static int atl2_close(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
+
+	atl2_down(adapter);
+	atl2_free_irq(adapter);
+	atl2_free_ring_resources(adapter);
+
+	return 0;
+}
+
+static inline int TxsFreeUnit(struct atl2_adapter *adapter)
+{
+	u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
+
+	return (adapter->txs_next_clear >= txs_write_ptr) ?
+		(int) (adapter->txs_ring_size - adapter->txs_next_clear +
+		txs_write_ptr - 1) :
+		(int) (txs_write_ptr - adapter->txs_next_clear - 1);
+}
+
+static inline int TxdFreeBytes(struct atl2_adapter *adapter)
+{
+	u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr);
+
+	return (adapter->txd_write_ptr >= txd_read_ptr) ?
+		(int) (adapter->txd_ring_size - adapter->txd_write_ptr +
+		txd_read_ptr - 1) :
+		(int) (txd_read_ptr - adapter->txd_write_ptr - 1);
+}
+
+static int atl2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
+	struct tx_pkt_header *txph;
+	u32 offset, copy_len;
+	int txs_unused;
+	int txbuf_unused;
+
+	if (test_bit(__ATL2_DOWN, &adapter->flags)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (unlikely(skb->len <= 0)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+#ifdef NETIF_F_LLTX
+	local_irq_save(flags);
+	if (!spin_trylock(&adapter->tx_lock)) {
+		/* Collision - tell upper layer to requeue */
+		local_irq_restore(flags);
+		return NETDEV_TX_LOCKED;
+	}
+#else
+	spin_lock_irqsave(&adapter->tx_lock, flags);
+#endif
+	txs_unused = TxsFreeUnit(adapter);
+	txbuf_unused = TxdFreeBytes(adapter);
+
+	if (skb->len + sizeof(struct tx_pkt_header) + 4  > txbuf_unused ||
+		txs_unused < 1) {
+		/* not enough resources */
+		netif_stop_queue(netdev);
+		spin_unlock_irqrestore(&adapter->tx_lock, flags);
+		return NETDEV_TX_BUSY;
+	}
+
+	offset = adapter->txd_write_ptr;
+
+	txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset);
+
+	*(u32 *)txph = 0;
+	txph->pkt_size = skb->len;
+
+	offset += 4;
+	if (offset >= adapter->txd_ring_size)
+		offset -= adapter->txd_ring_size;
+	copy_len = adapter->txd_ring_size - offset;
+	if (copy_len >= skb->len) {
+		memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len);
+		offset += ((u32)(skb->len + 3) & ~3);
+	} else {
+		memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len);
+		memcpy((u8 *)adapter->txd_ring, skb->data+copy_len,
+			skb->len-copy_len);
+		offset = ((u32)(skb->len-copy_len + 3) & ~3);
+	}
+#ifdef NETIF_F_HW_VLAN_TX
+	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+		u16 vlan_tag = vlan_tx_tag_get(skb);
+		vlan_tag = (vlan_tag << 4) |
+			(vlan_tag >> 13) |
+			((vlan_tag >> 9) & 0x8);
+		txph->ins_vlan = 1;
+		txph->vlan = vlan_tag;
+	}
+#endif
+	if (offset >= adapter->txd_ring_size)
+		offset -= adapter->txd_ring_size;
+	adapter->txd_write_ptr = offset;
+
+	/* clear txs before send */
+	adapter->txs_ring[adapter->txs_next_clear].update = 0;
+	if (++adapter->txs_next_clear == adapter->txs_ring_size)
+		adapter->txs_next_clear = 0;
+
+	ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX,
+		(adapter->txd_write_ptr >> 2));
+
+	spin_unlock_irqrestore(&adapter->tx_lock, flags);
+
+	netdev->trans_start = jiffies;
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+/*
+ * atl2_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ */
+static struct net_device_stats *atl2_get_stats(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	return &adapter->net_stats;
+}
+
+/*
+ * atl2_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+
+	if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
+		return -EINVAL;
+
+	/* set MTU */
+	if (hw->max_frame_size != new_mtu) {
+		netdev->mtu = new_mtu;
+		ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE +
+			VLAN_SIZE + ETHERNET_FCS_SIZE);
+	}
+
+	return 0;
+}
+
+/*
+ * atl2_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl2_set_mac(struct net_device *netdev, void *p)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+
+	atl2_set_mac_addr(&adapter->hw);
+
+	return 0;
+}
+
+/*
+ * atl2_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct mii_ioctl_data *data = if_mii(ifr);
+	unsigned long flags;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = 0;
+		break;
+	case SIOCGMIIREG:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		spin_lock_irqsave(&adapter->stats_lock, flags);
+		if (atl2_read_phy_reg(&adapter->hw,
+			data->reg_num & 0x1F, &data->val_out)) {
+			spin_unlock_irqrestore(&adapter->stats_lock, flags);
+			return -EIO;
+		}
+		spin_unlock_irqrestore(&adapter->stats_lock, flags);
+		break;
+	case SIOCSMIIREG:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (data->reg_num & ~(0x1F))
+			return -EFAULT;
+		spin_lock_irqsave(&adapter->stats_lock, flags);
+		if (atl2_write_phy_reg(&adapter->hw, data->reg_num,
+			data->val_in)) {
+			spin_unlock_irqrestore(&adapter->stats_lock, flags);
+			return -EIO;
+		}
+		spin_unlock_irqrestore(&adapter->stats_lock, flags);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/*
+ * atl2_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return atl2_mii_ioctl(netdev, ifr, cmd);
+#ifdef ETHTOOL_OPS_COMPAT
+	case SIOCETHTOOL:
+		return ethtool_ioctl(ifr);
+#endif
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/*
+ * atl2_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void atl2_tx_timeout(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	/* Do the reset outside of interrupt context */
+	schedule_work(&adapter->reset_task);
+}
+
+/*
+ * atl2_watchdog - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl2_watchdog(unsigned long data)
+{
+	struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+	u32 drop_rxd, drop_rxs;
+	unsigned long flags;
+
+	if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
+		spin_lock_irqsave(&adapter->stats_lock, flags);
+		drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV);
+		drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV);
+		adapter->net_stats.rx_over_errors += (drop_rxd+drop_rxs);
+		spin_unlock_irqrestore(&adapter->stats_lock, flags);
+
+		/* Reset the timer */
+		mod_timer(&adapter->watchdog_timer, jiffies + 4 * HZ);
+	}
+}
+
+/*
+ * atl2_phy_config - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl2_phy_config(unsigned long data)
+{
+	struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+	struct atl2_hw *hw = &adapter->hw;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adapter->stats_lock, flags);
+	atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
+	atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN |
+		MII_CR_RESTART_AUTO_NEG);
+	spin_unlock_irqrestore(&adapter->stats_lock, flags);
+	clear_bit(0, &adapter->cfg_phy);
+}
+
+static int atl2_up(struct atl2_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err = 0;
+	u32 val;
+
+	/* hardware has been reset, we need to reload some things */
+
+	err = atl2_init_hw(&adapter->hw);
+	if (err) {
+		err = -EIO;
+		return err;
+	}
+
+	atl2_set_multi(netdev);
+	init_ring_ptrs(adapter);
+
+#ifdef NETIF_F_HW_VLAN_TX
+	atl2_restore_vlan(adapter);
+#endif
+
+	if (atl2_configure(adapter)) {
+		err = -EIO;
+		goto err_up;
+	}
+
+	clear_bit(__ATL2_DOWN, &adapter->flags);
+
+	val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
+	ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val |
+		MASTER_CTRL_MANUAL_INT);
+
+	atl2_irq_enable(adapter);
+
+err_up:
+	return err;
+}
+
+static void atl2_reinit_locked(struct atl2_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
+		msleep(1);
+	atl2_down(adapter);
+	atl2_up(adapter);
+	clear_bit(__ATL2_RESETTING, &adapter->flags);
+}
+
+static void atl2_reset_task(struct work_struct *work)
+{
+	struct atl2_adapter *adapter;
+	adapter = container_of(work, struct atl2_adapter, reset_task);
+
+	atl2_reinit_locked(adapter);
+}
+
+static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter)
+{
+	u32 value;
+	struct atl2_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+
+	/* Config MAC CTRL Register */
+	value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
+
+	/* duplex */
+	if (FULL_DUPLEX == adapter->link_duplex)
+		value |= MAC_CTRL_DUPLX;
+
+	/* flow control */
+	value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
+
+	/* PAD & CRC */
+	value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
+
+	/* preamble length */
+	value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) <<
+		MAC_CTRL_PRMLEN_SHIFT);
+
+	/* vlan */
+	if (adapter->vlgrp)
+		value |= MAC_CTRL_RMV_VLAN;
+
+	/* filter mode */
+	value |= MAC_CTRL_BC_EN;
+	if (netdev->flags & IFF_PROMISC)
+		value |= MAC_CTRL_PROMIS_EN;
+	else if (netdev->flags & IFF_ALLMULTI)
+		value |= MAC_CTRL_MC_ALL_EN;
+
+	/* half retry buffer */
+	value |= (((u32)(adapter->hw.retry_buf &
+		MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT);
+
+	ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
+}
+
+static int atl2_check_link(struct atl2_adapter *adapter)
+{
+	struct atl2_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	int ret_val;
+	u16 speed, duplex, phy_data;
+	int reconfig = 0;
+
+	/* MII_BMSR must read twise */
+	atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
+	atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
+	if (!(phy_data&BMSR_LSTATUS)) { /* link down */
+		if (netif_carrier_ok(netdev)) { /* old link state: Up */
+			u32 value;
+			/* disable rx */
+			value = ATL2_READ_REG(hw, REG_MAC_CTRL);
+			value &= ~MAC_CTRL_RX_EN;
+			ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
+			adapter->link_speed = SPEED_0;
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+		}
+		return 0;
+	}
+
+	/* Link Up */
+	ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
+	if (ret_val)
+		return ret_val;
+	switch (hw->MediaType) {
+	case MEDIA_TYPE_100M_FULL:
+		if (speed  != SPEED_100 || duplex != FULL_DUPLEX)
+			reconfig = 1;
+		break;
+	case MEDIA_TYPE_100M_HALF:
+		if (speed  != SPEED_100 || duplex != HALF_DUPLEX)
+			reconfig = 1;
+		break;
+	case MEDIA_TYPE_10M_FULL:
+		if (speed != SPEED_10 || duplex != FULL_DUPLEX)
+			reconfig = 1;
+		break;
+	case MEDIA_TYPE_10M_HALF:
+		if (speed  != SPEED_10 || duplex != HALF_DUPLEX)
+			reconfig = 1;
+		break;
+	}
+	/* link result is our setting */
+	if (reconfig == 0) {
+		if (adapter->link_speed != speed ||
+			adapter->link_duplex != duplex) {
+			adapter->link_speed = speed;
+			adapter->link_duplex = duplex;
+			atl2_setup_mac_ctrl(adapter);
+			printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n",
+				atl2_driver_name, netdev->name,
+				adapter->link_speed,
+				adapter->link_duplex == FULL_DUPLEX ?
+					"Full Duplex" : "Half Duplex");
+		}
+
+		if (!netif_carrier_ok(netdev)) { /* Link down -> Up */
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+		}
+		return 0;
+	}
+
+	/* change original link status */
+	if (netif_carrier_ok(netdev)) {
+		u32 value;
+		/* disable rx */
+		value = ATL2_READ_REG(hw, REG_MAC_CTRL);
+		value &= ~MAC_CTRL_RX_EN;
+		ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
+
+		adapter->link_speed = SPEED_0;
+		netif_carrier_off(netdev);
+		netif_stop_queue(netdev);
+	}
+
+	/* auto-neg, insert timer to re-config phy
+	 * (if interval smaller than 5 seconds, something strange) */
+	if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
+		if (!test_and_set_bit(0, &adapter->cfg_phy))
+			mod_timer(&adapter->phy_config_timer, jiffies + 5 * HZ);
+	}
+
+	return 0;
+}
+
+/*
+ * atl2_link_chg_task - deal with link change event Out of interrupt context
+ * @netdev: network interface device structure
+ */
+static void atl2_link_chg_task(struct work_struct *work)
+{
+	struct atl2_adapter *adapter;
+	unsigned long flags;
+
+	adapter = container_of(work, struct atl2_adapter, link_chg_task);
+
+	spin_lock_irqsave(&adapter->stats_lock, flags);
+	atl2_check_link(adapter);
+	spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
+static void atl2_setup_pcicmd(struct pci_dev *pdev)
+{
+	u16 cmd;
+
+	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+
+	if (cmd & PCI_COMMAND_INTX_DISABLE)
+		cmd &= ~PCI_COMMAND_INTX_DISABLE;
+	if (cmd & PCI_COMMAND_IO)
+		cmd &= ~PCI_COMMAND_IO;
+	if (0 == (cmd & PCI_COMMAND_MEMORY))
+		cmd |= PCI_COMMAND_MEMORY;
+	if (0 == (cmd & PCI_COMMAND_MASTER))
+		cmd |= PCI_COMMAND_MASTER;
+	pci_write_config_word(pdev, PCI_COMMAND, cmd);
+
+	/*
+	 * some motherboards BIOS(PXE/EFI) driver may set PME
+	 * while they transfer control to OS (Windows/Linux)
+	 * so we should clear this bit before NIC work normally
+	 */
+	pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
+}
+
+/*
+ * atl2_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in atl2_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * atl2_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ */
+static int __devinit atl2_probe(struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct atl2_adapter *adapter;
+	static int cards_found;
+	unsigned long mmio_start;
+	int mmio_len;
+	int err;
+
+	cards_found = 0;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	/*
+	 * atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA
+	 * until the kernel has the proper infrastructure to support 64-bit DMA
+	 * on these devices.
+	 */
+	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) &&
+		pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
+		printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
+		goto err_dma;
+	}
+
+	/* Mark all PCI regions associated with PCI device
+	 * pdev as being reserved by owner atl2_driver_name */
+	err = pci_request_regions(pdev, atl2_driver_name);
+	if (err)
+		goto err_pci_reg;
+
+	/* Enables bus-mastering on the device and calls
+	 * pcibios_set_master to do the needed arch specific settings */
+	pci_set_master(pdev);
+
+	err = -ENOMEM;
+	netdev = alloc_etherdev(sizeof(struct atl2_adapter));
+	if (!netdev)
+		goto err_alloc_etherdev;
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev_priv(netdev);
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	adapter->hw.back = adapter;
+
+	mmio_start = pci_resource_start(pdev, 0x0);
+	mmio_len = pci_resource_len(pdev, 0x0);
+
+	adapter->hw.mem_rang = (u32)mmio_len;
+	adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+	if (!adapter->hw.hw_addr) {
+		err = -EIO;
+		goto err_ioremap;
+	}
+
+	atl2_setup_pcicmd(pdev);
+
+	netdev->open = &atl2_open;
+	netdev->stop = &atl2_close;
+	netdev->hard_start_xmit = &atl2_xmit_frame;
+	netdev->get_stats = &atl2_get_stats;
+	netdev->set_multicast_list = &atl2_set_multi;
+	netdev->set_mac_address = &atl2_set_mac;
+	netdev->change_mtu = &atl2_change_mtu;
+	netdev->do_ioctl = &atl2_ioctl;
+	atl2_set_ethtool_ops(netdev);
+
+#ifdef HAVE_TX_TIMEOUT
+	netdev->tx_timeout = &atl2_tx_timeout;
+	netdev->watchdog_timeo = 5 * HZ;
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+	netdev->vlan_rx_register = atl2_vlan_rx_register;
+#endif
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len;
+	adapter->bd_number = cards_found;
+	adapter->pci_using_64 = false;
+
+	/* setup the private structure */
+	err = atl2_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	err = -EIO;
+
+#ifdef NETIF_F_HW_VLAN_TX
+	netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
+#endif
+
+#ifdef NETIF_F_LLTX
+	netdev->features |= NETIF_F_LLTX;
+#endif
+
+	/* Init PHY as early as possible due to power saving issue  */
+	atl2_phy_init(&adapter->hw);
+
+	/* reset the controller to
+	 * put the device in a known good starting state */
+
+	if (atl2_reset_hw(&adapter->hw)) {
+		err = -EIO;
+		goto err_reset;
+	}
+
+	/* copy the MAC address out of the EEPROM */
+	atl2_read_mac_addr(&adapter->hw);
+	memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+/* FIXME: do we still need this? */
+#ifdef ETHTOOL_GPERMADDR
+	memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
+
+	if (!is_valid_ether_addr(netdev->perm_addr)) {
+#else
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+#endif
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	atl2_check_options(adapter);
+
+	init_timer(&adapter->watchdog_timer);
+	adapter->watchdog_timer.function = &atl2_watchdog;
+	adapter->watchdog_timer.data = (unsigned long) adapter;
+
+	init_timer(&adapter->phy_config_timer);
+	adapter->phy_config_timer.function = &atl2_phy_config;
+	adapter->phy_config_timer.data = (unsigned long) adapter;
+
+	INIT_WORK(&adapter->reset_task, atl2_reset_task);
+	INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
+
+	strcpy(netdev->name, "eth%d"); /* ?? */
+	err = register_netdev(netdev);
+	if (err)
+		goto err_register;
+
+	/* assume we have no link for now */
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+
+	cards_found++;
+
+	return 0;
+
+err_reset:
+err_register:
+err_sw_init:
+err_eeprom:
+	iounmap(adapter->hw.hw_addr);
+err_ioremap:
+	free_netdev(netdev);
+err_alloc_etherdev:
+	pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/*
+ * atl2_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * atl2_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ */
+/* FIXME: write the original MAC address back in case it was changed from a
+ * BIOS-set value, as in atl1 -- CHS */
+static void __devexit atl2_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	/* flush_scheduled work may reschedule our watchdog task, so
+	 * explicitly disable watchdog tasks from being rescheduled  */
+	set_bit(__ATL2_DOWN, &adapter->flags);
+
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_config_timer);
+
+	flush_scheduled_work();
+
+	unregister_netdev(netdev);
+
+	atl2_force_ps(&adapter->hw);
+
+	iounmap(adapter->hw.hw_addr);
+	pci_release_regions(pdev);
+
+	free_netdev(netdev);
+
+	pci_disable_device(pdev);
+}
+
+static int atl2_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+	u16 speed, duplex;
+	u32 ctrl = 0;
+	u32 wufc = adapter->wol;
+
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
+		atl2_down(adapter);
+	}
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+#endif
+
+	atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
+	atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
+	if (ctrl & BMSR_LSTATUS)
+		wufc &= ~ATLX_WUFC_LNKC;
+
+	if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) {
+		u32 ret_val;
+		/* get current link speed & duplex */
+		ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
+		if (ret_val) {
+			printk(KERN_DEBUG
+				"%s: get speed&duplex error while suspend\n",
+				atl2_driver_name);
+			goto wol_dis;
+		}
+
+		ctrl = 0;
+
+		/* turn on magic packet wol */
+		if (wufc & ATLX_WUFC_MAG)
+			ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
+
+		/* ignore Link Chg event when Link is up */
+		ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
+
+		/* Config MAC CTRL Register */
+		ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
+		if (FULL_DUPLEX == adapter->link_duplex)
+			ctrl |= MAC_CTRL_DUPLX;
+		ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
+		ctrl |= (((u32)adapter->hw.preamble_len &
+			MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
+		ctrl |= (((u32)(adapter->hw.retry_buf &
+			MAC_CTRL_HALF_LEFT_BUF_MASK)) <<
+			MAC_CTRL_HALF_LEFT_BUF_SHIFT);
+		if (wufc & ATLX_WUFC_MAG) {
+			/* magic packet maybe Broadcast&multicast&Unicast */
+			ctrl |= MAC_CTRL_BC_EN;
+		}
+
+		ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl);
+
+		/* pcie patch */
+		ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
+		ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+		ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+		ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
+		ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
+		ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
+
+		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+		goto suspend_exit;
+	}
+
+	if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) {
+		/* link is down, so only LINK CHG WOL event enable */
+		ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
+		ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
+		ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0);
+
+		/* pcie patch */
+		ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
+		ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+		ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+		ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
+		ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
+		ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
+
+		hw->phy_configured = false; /* re-init PHY when resume */
+
+		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+
+		goto suspend_exit;
+	}
+
+wol_dis:
+	/* WOL disabled */
+	ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0);
+
+	/* pcie patch */
+	ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
+	ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+	ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+	ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
+	ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
+	ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
+
+	atl2_force_ps(hw);
+	hw->phy_configured = false; /* re-init PHY when resume */
+
+	pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+
+suspend_exit:
+	if (netif_running(netdev))
+		atl2_free_irq(adapter);
+
+	pci_disable_device(pdev);
+
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int atl2_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR
+			"atl2: Cannot enable PCI device from suspend\n");
+		return err;
+	}
+
+	pci_set_master(pdev);
+
+	ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
+
+	err = atl2_request_irq(adapter);
+	if (netif_running(netdev) && err)
+		return err;
+
+	atl2_reset_hw(&adapter->hw);
+
+	if (netif_running(netdev))
+		atl2_up(adapter);
+
+	netif_device_attach(netdev);
+
+	return 0;
+}
+#endif
+
+static void atl2_shutdown(struct pci_dev *pdev)
+{
+	atl2_suspend(pdev, PMSG_SUSPEND);
+}
+
+static struct pci_driver atl2_driver = {
+	.name     = atl2_driver_name,
+	.id_table = atl2_pci_tbl,
+	.probe    = atl2_probe,
+	.remove   = __devexit_p(atl2_remove),
+	/* Power Managment Hooks */
+	.suspend  = atl2_suspend,
+#ifdef CONFIG_PM
+	.resume   = atl2_resume,
+#endif
+	.shutdown = atl2_shutdown,
+};
+
+/*
+ * atl2_init_module - Driver Registration Routine
+ *
+ * atl2_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init atl2_init_module(void)
+{
+	printk(KERN_INFO "%s - version %s\n", atl2_driver_string,
+		atl2_driver_version);
+	printk(KERN_INFO "%s\n", atl2_copyright);
+	return pci_register_driver(&atl2_driver);
+}
+module_init(atl2_init_module);
+
+/*
+ * atl2_exit_module - Driver Exit Cleanup Routine
+ *
+ * atl2_exit_module is called just before the driver is removed
+ * from memory.
+ */
+static void __exit atl2_exit_module(void)
+{
+	pci_unregister_driver(&atl2_driver);
+}
+module_exit(atl2_exit_module);
+
+static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
+{
+	struct atl2_adapter *adapter = hw->back;
+	pci_read_config_word(adapter->pdev, reg, value);
+}
+
+static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
+{
+	struct atl2_adapter *adapter = hw->back;
+	pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+static int atl2_get_settings(struct net_device *netdev,
+	struct ethtool_cmd *ecmd)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+
+	ecmd->supported = (SUPPORTED_10baseT_Half |
+		SUPPORTED_10baseT_Full |
+		SUPPORTED_100baseT_Half |
+		SUPPORTED_100baseT_Full |
+		SUPPORTED_Autoneg |
+		SUPPORTED_TP);
+	ecmd->advertising = ADVERTISED_TP;
+
+	ecmd->advertising |= ADVERTISED_Autoneg;
+	ecmd->advertising |= hw->autoneg_advertised;
+
+	ecmd->port = PORT_TP;
+	ecmd->phy_address = 0;
+	ecmd->transceiver = XCVR_INTERNAL;
+
+	if (adapter->link_speed != SPEED_0) {
+		ecmd->speed = adapter->link_speed;
+		if (adapter->link_duplex == FULL_DUPLEX)
+			ecmd->duplex = DUPLEX_FULL;
+		else
+			ecmd->duplex = DUPLEX_HALF;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+
+	ecmd->autoneg = AUTONEG_ENABLE;
+	return 0;
+}
+
+static int atl2_set_settings(struct net_device *netdev,
+	struct ethtool_cmd *ecmd)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+
+	while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
+		msleep(1);
+
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+#define MY_ADV_MASK	(ADVERTISE_10_HALF | \
+			 ADVERTISE_10_FULL | \
+			 ADVERTISE_100_HALF| \
+			 ADVERTISE_100_FULL)
+
+		if ((ecmd->advertising & MY_ADV_MASK) == MY_ADV_MASK) {
+			hw->MediaType = MEDIA_TYPE_AUTO_SENSOR;
+			hw->autoneg_advertised =  MY_ADV_MASK;
+		} else if ((ecmd->advertising & MY_ADV_MASK) ==
+				ADVERTISE_100_FULL) {
+			hw->MediaType = MEDIA_TYPE_100M_FULL;
+			hw->autoneg_advertised = ADVERTISE_100_FULL;
+		} else if ((ecmd->advertising & MY_ADV_MASK) ==
+				ADVERTISE_100_HALF) {
+			hw->MediaType = MEDIA_TYPE_100M_HALF;
+			hw->autoneg_advertised = ADVERTISE_100_HALF;
+		} else if ((ecmd->advertising & MY_ADV_MASK) ==
+				ADVERTISE_10_FULL) {
+			hw->MediaType = MEDIA_TYPE_10M_FULL;
+			hw->autoneg_advertised = ADVERTISE_10_FULL;
+		}  else if ((ecmd->advertising & MY_ADV_MASK) ==
+				ADVERTISE_10_HALF) {
+			hw->MediaType = MEDIA_TYPE_10M_HALF;
+			hw->autoneg_advertised = ADVERTISE_10_HALF;
+		} else {
+			clear_bit(__ATL2_RESETTING, &adapter->flags);
+			return -EINVAL;
+		}
+		ecmd->advertising = hw->autoneg_advertised |
+			ADVERTISED_TP | ADVERTISED_Autoneg;
+	} else {
+		clear_bit(__ATL2_RESETTING, &adapter->flags);
+		return -EINVAL;
+	}
+
+	/* reset the link */
+	if (netif_running(adapter->netdev)) {
+		atl2_down(adapter);
+		atl2_up(adapter);
+	} else
+		atl2_reset_hw(&adapter->hw);
+
+	clear_bit(__ATL2_RESETTING, &adapter->flags);
+	return 0;
+}
+
+static u32 atl2_get_tx_csum(struct net_device *netdev)
+{
+	return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static u32 atl2_get_msglevel(struct net_device *netdev)
+{
+	return 0;
+}
+
+/*
+ * It's sane for this to be empty, but we might want to take advantage of this.
+ */
+static void atl2_set_msglevel(struct net_device *netdev, u32 data)
+{
+}
+
+static int atl2_get_regs_len(struct net_device *netdev)
+{
+#define ATL2_REGS_LEN 42
+	return sizeof(u32) * ATL2_REGS_LEN;
+}
+
+static void atl2_get_regs(struct net_device *netdev,
+	struct ethtool_regs *regs, void *p)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+	u32 *regs_buff = p;
+	u16 phy_data;
+
+	memset(p, 0, sizeof(u32) * ATL2_REGS_LEN);
+
+	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+	regs_buff[0]  = ATL2_READ_REG(hw, REG_VPD_CAP);
+	regs_buff[1]  = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
+	regs_buff[2]  = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG);
+	regs_buff[3]  = ATL2_READ_REG(hw, REG_TWSI_CTRL);
+	regs_buff[4]  = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL);
+	regs_buff[5]  = ATL2_READ_REG(hw, REG_MASTER_CTRL);
+	regs_buff[6]  = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT);
+	regs_buff[7]  = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT);
+	regs_buff[8]  = ATL2_READ_REG(hw, REG_PHY_ENABLE);
+	regs_buff[9]  = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER);
+	regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS);
+	regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL);
+	regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK);
+	regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL);
+	regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG);
+	regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
+	regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4);
+	regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE);
+	regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4);
+	regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL);
+	regs_buff[20] = ATL2_READ_REG(hw, REG_MTU);
+	regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL);
+	regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END);
+	regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI);
+	regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO);
+	regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE);
+	regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO);
+	regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE);
+	regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO);
+	regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM);
+	regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR);
+	regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH);
+	regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW);
+	regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH);
+	regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH);
+	regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX);
+	regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX);
+	regs_buff[38] = ATL2_READ_REG(hw, REG_ISR);
+	regs_buff[39] = ATL2_READ_REG(hw, REG_IMR);
+
+	atl2_read_phy_reg(hw, MII_BMCR, &phy_data);
+	regs_buff[40] = (u32)phy_data;
+	atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
+	regs_buff[41] = (u32)phy_data;
+}
+
+static int atl2_get_eeprom_len(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	if (!atl2_check_eeprom_exist(&adapter->hw))
+		return 512;
+	else
+		return 0;
+}
+
+static int atl2_get_eeprom(struct net_device *netdev,
+	struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+	u32 *eeprom_buff;
+	int first_dword, last_dword;
+	int ret_val = 0;
+	int i;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	if (atl2_check_eeprom_exist(hw))
+		return -EINVAL;
+
+	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+	first_dword = eeprom->offset >> 2;
+	last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+
+	eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1),
+		GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	for (i = first_dword; i < last_dword; i++) {
+		if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword])))
+			return -EIO;
+	}
+
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
+		eeprom->len);
+	kfree(eeprom_buff);
+
+	return ret_val;
+}
+
+static int atl2_set_eeprom(struct net_device *netdev,
+	struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	struct atl2_hw *hw = &adapter->hw;
+	u32 *eeprom_buff;
+	u32 *ptr;
+	int max_len, first_dword, last_dword, ret_val = 0;
+	int i;
+
+	if (eeprom->len == 0)
+		return -EOPNOTSUPP;
+
+	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+		return -EFAULT;
+
+	max_len = 512;
+
+	first_dword = eeprom->offset >> 2;
+	last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	ptr = (u32 *)eeprom_buff;
+
+	if (eeprom->offset & 3) {
+		/* need read/modify/write of first changed EEPROM word */
+		/* only the second byte of the word is being modified */
+		if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0])))
+			return -EIO;
+		ptr++;
+	}
+	if (((eeprom->offset + eeprom->len) & 3)) {
+		/*
+		 * need read/modify/write of last changed EEPROM word
+		 * only the first byte of the word is being modified
+		 */
+		if (!atl2_read_eeprom(hw, last_dword * 4,
+			&(eeprom_buff[last_dword - first_dword])))
+			return -EIO;
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	memcpy(ptr, bytes, eeprom->len);
+
+	for (i = 0; i < last_dword - first_dword + 1; i++) {
+		if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i]))
+			return -EIO;
+	}
+
+	kfree(eeprom_buff);
+	return ret_val;
+}
+
+static void atl2_get_drvinfo(struct net_device *netdev,
+	struct ethtool_drvinfo *drvinfo)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	strncpy(drvinfo->driver,  atl2_driver_name, 32);
+	strncpy(drvinfo->version, atl2_driver_version, 32);
+	strncpy(drvinfo->fw_version, "L2", 32);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	drvinfo->n_stats = 0;
+	drvinfo->testinfo_len = 0;
+	drvinfo->regdump_len = atl2_get_regs_len(netdev);
+	drvinfo->eedump_len = atl2_get_eeprom_len(netdev);
+}
+
+static void atl2_get_wol(struct net_device *netdev,
+	struct ethtool_wolinfo *wol)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	wol->supported = WAKE_MAGIC;
+	wol->wolopts = 0;
+
+	if (adapter->wol & ATLX_WUFC_EX)
+		wol->wolopts |= WAKE_UCAST;
+	if (adapter->wol & ATLX_WUFC_MC)
+		wol->wolopts |= WAKE_MCAST;
+	if (adapter->wol & ATLX_WUFC_BC)
+		wol->wolopts |= WAKE_BCAST;
+	if (adapter->wol & ATLX_WUFC_MAG)
+		wol->wolopts |= WAKE_MAGIC;
+	if (adapter->wol & ATLX_WUFC_LNKC)
+		wol->wolopts |= WAKE_PHY;
+}
+
+static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+
+	if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
+		return -EOPNOTSUPP;
+
+	if (wol->wolopts & (WAKE_MCAST|WAKE_BCAST|WAKE_MCAST))
+		return -EOPNOTSUPP;
+
+	/* these settings will always override what we currently have */
+	adapter->wol = 0;
+
+	if (wol->wolopts & WAKE_MAGIC)
+		adapter->wol |= ATLX_WUFC_MAG;
+	if (wol->wolopts & WAKE_PHY)
+		adapter->wol |= ATLX_WUFC_LNKC;
+
+	return 0;
+}
+
+static int atl2_nway_reset(struct net_device *netdev)
+{
+	struct atl2_adapter *adapter = netdev_priv(netdev);
+	if (netif_running(netdev))
+		atl2_reinit_locked(adapter);
+	return 0;
+}
+
+static struct ethtool_ops atl2_ethtool_ops = {
+	.get_settings		= atl2_get_settings,
+	.set_settings		= atl2_set_settings,
+	.get_drvinfo		= atl2_get_drvinfo,
+	.get_regs_len		= atl2_get_regs_len,
+	.get_regs		= atl2_get_regs,
+	.get_wol		= atl2_get_wol,
+	.set_wol		= atl2_set_wol,
+	.get_msglevel		= atl2_get_msglevel,
+	.set_msglevel		= atl2_set_msglevel,
+	.nway_reset		= atl2_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_eeprom_len		= atl2_get_eeprom_len,
+	.get_eeprom		= atl2_get_eeprom,
+	.set_eeprom		= atl2_set_eeprom,
+	.get_tx_csum		= atl2_get_tx_csum,
+	.get_sg			= ethtool_op_get_sg,
+	.set_sg			= ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+	.get_tso		= ethtool_op_get_tso,
+#endif
+};
+
+static void atl2_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
+}
+
+#define LBYTESWAP(a)  ((((a) & 0x00ff00ff) << 8) | \
+	(((a) & 0xff00ff00) >> 8))
+#define LONGSWAP(a)   ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
+#define SHORTSWAP(a)  (((a) << 8) | ((a) >> 8))
+
+/*
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * return : 0  or  idle status (if error)
+ */
+static s32 atl2_reset_hw(struct atl2_hw *hw)
+{
+	u32 icr;
+	u16 pci_cfg_cmd_word;
+	int i;
+
+	/* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+	atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
+	if ((pci_cfg_cmd_word &
+		(CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) !=
+		(CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) {
+		pci_cfg_cmd_word |=
+			(CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER);
+		atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
+	}
+
+	/* Clear Interrupt mask to stop board from generating
+	 * interrupts & Clear any pending interrupt events
+	 */
+	/* FIXME */
+	/* ATL2_WRITE_REG(hw, REG_IMR, 0); */
+	/* ATL2_WRITE_REG(hw, REG_ISR, 0xffffffff); */
+
+	/* Issue Soft Reset to the MAC.  This will reset the chip's
+	 * transmit, receive, DMA.  It will not effect
+	 * the current PCI configuration.  The global reset bit is self-
+	 * clearing, and should clear within a microsecond.
+	 */
+	ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
+	wmb();
+	msleep(1); /* delay about 1ms */
+
+	/* Wait at least 10ms for All module to be Idle */
+	for (i = 0; i < 10; i++) {
+		icr = ATL2_READ_REG(hw, REG_IDLE_STATUS);
+		if (!icr)
+			break;
+		msleep(1); /* delay 1 ms */
+		cpu_relax();
+	}
+
+	if (icr)
+		return icr;
+
+	return 0;
+}
+
+#define CUSTOM_SPI_CS_SETUP        2
+#define CUSTOM_SPI_CLK_HI          2
+#define CUSTOM_SPI_CLK_LO          2
+#define CUSTOM_SPI_CS_HOLD         2
+#define CUSTOM_SPI_CS_HI           3
+
+static struct atl2_spi_flash_dev flash_table[] =
+{
+/* MFR    WRSR  READ  PROGRAM WREN  WRDI  RDSR  RDID  SECTOR_ERASE CHIP_ERASE */
+{"Atmel", 0x0,  0x03, 0x02,   0x06, 0x04, 0x05, 0x15, 0x52,        0x62 },
+{"SST",   0x01, 0x03, 0x02,   0x06, 0x04, 0x05, 0x90, 0x20,        0x60 },
+{"ST",    0x01, 0x03, 0x02,   0x06, 0x04, 0x05, 0xAB, 0xD8,        0xC7 },
+};
+
+static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf)
+{
+	int i;
+	u32 value;
+
+	ATL2_WRITE_REG(hw, REG_SPI_DATA, 0);
+	ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr);
+
+	value = SPI_FLASH_CTRL_WAIT_READY |
+		(CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
+			SPI_FLASH_CTRL_CS_SETUP_SHIFT |
+		(CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) <<
+			SPI_FLASH_CTRL_CLK_HI_SHIFT |
+		(CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) <<
+			SPI_FLASH_CTRL_CLK_LO_SHIFT |
+		(CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) <<
+			SPI_FLASH_CTRL_CS_HOLD_SHIFT |
+		(CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) <<
+			SPI_FLASH_CTRL_CS_HI_SHIFT |
+		(0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT;
+
+	ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
+
+	value |= SPI_FLASH_CTRL_START;
+
+	ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
+
+	for (i = 0; i < 10; i++) {
+		msleep(1);
+		value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
+		if (!(value & SPI_FLASH_CTRL_START))
+			break;
+	}
+
+	if (value & SPI_FLASH_CTRL_START)
+		return false;
+
+	*buf = ATL2_READ_REG(hw, REG_SPI_DATA);
+
+	return true;
+}
+
+/*
+ * get_permanent_address
+ * return 0 if get valid mac address,
+ */
+static int get_permanent_address(struct atl2_hw *hw)
+{
+	u32 Addr[2];
+	u32 i, Control;
+	u16 Register;
+	u8  EthAddr[NODE_ADDRESS_SIZE];
+	bool KeyValid;
+
+	if (is_valid_ether_addr(hw->perm_mac_addr))
+		return 0;
+
+	Addr[0] = 0;
+	Addr[1] = 0;
+
+	if (!atl2_check_eeprom_exist(hw)) { /* eeprom exists */
+		Register = 0;
+		KeyValid = false;
+
+		/* Read out all EEPROM content */
+		i = 0;
+		while (1) {
+			if (atl2_read_eeprom(hw, i + 0x100, &Control)) {
+				if (KeyValid) {
+					if (Register == REG_MAC_STA_ADDR)
+						Addr[0] = Control;
+					else if (Register ==
+						(REG_MAC_STA_ADDR + 4))
+						Addr[1] = Control;
+					KeyValid = false;
+				} else if ((Control & 0xff) == 0x5A) {
+					KeyValid = true;
+					Register = (u16) (Control >> 16);
+				} else {
+			/* assume data end while encount an invalid KEYWORD */
+					break;
+				}
+			} else {
+				break; /* read error */
+			}
+			i += 4;
+		}
+
+		*(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
+		*(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
+
+		if (is_valid_ether_addr(EthAddr)) {
+			memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
+			return 0;
+		}
+		return 1;
+	}
+
+	/* see if SPI flash exists? */
+	Addr[0] = 0;
+	Addr[1] = 0;
+	Register = 0;
+	KeyValid = false;
+	i = 0;
+	while (1) {
+		if (atl2_spi_read(hw, i + 0x1f000, &Control)) {
+			if (KeyValid) {
+				if (Register == REG_MAC_STA_ADDR)
+					Addr[0] = Control;
+				else if (Register == (REG_MAC_STA_ADDR + 4))
+					Addr[1] = Control;
+				KeyValid = false;
+			} else if ((Control & 0xff) == 0x5A) {
+				KeyValid = true;
+				Register = (u16) (Control >> 16);
+			} else {
+				break; /* data end */
+			}
+		} else {
+			break; /* read error */
+		}
+		i += 4;
+	}
+
+	*(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
+	*(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]);
+	if (is_valid_ether_addr(EthAddr)) {
+		memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
+		return 0;
+	}
+	/* maybe MAC-address is from BIOS */
+	Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
+	Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4);
+	*(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
+	*(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
+
+	if (is_valid_ether_addr(EthAddr)) {
+		memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
+		return 0;
+	}
+
+	return 1;
+}
+
+/*
+ * Reads the adapter's MAC address from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ */
+static s32 atl2_read_mac_addr(struct atl2_hw *hw)
+{
+	u16 i;
+
+	if (get_permanent_address(hw)) {
+		/* for test */
+		/* FIXME: shouldn't we use random_ether_addr() here? */
+		hw->perm_mac_addr[0] = 0x00;
+		hw->perm_mac_addr[1] = 0x13;
+		hw->perm_mac_addr[2] = 0x74;
+		hw->perm_mac_addr[3] = 0x00;
+		hw->perm_mac_addr[4] = 0x5c;
+		hw->perm_mac_addr[5] = 0x38;
+	}
+
+	for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+		hw->mac_addr[i] = hw->perm_mac_addr[i];
+
+	return 0;
+}
+
+/*
+ * Hashes an address to determine its location in the multicast table
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *
+ * atl2_hash_mc_addr
+ *  purpose
+ *      set hash value for a multicast address
+ *      hash calcu processing :
+ *          1. calcu 32bit CRC for multicast address
+ *          2. reverse crc with MSB to LSB
+ */
+static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr)
+{
+	u32 crc32, value;
+	int i;
+
+	value = 0;
+	crc32 = ether_crc_le(6, mc_addr);
+
+	for (i = 0; i < 32; i++)
+		value |= (((crc32 >> i) & 1) << (31 - i));
+
+	return value;
+}
+
+/*
+ * Sets the bit in the multicast table corresponding to the hash value.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ */
+static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value)
+{
+	u32 hash_bit, hash_reg;
+	u32 mta;
+
+	/* The HASH Table  is a register array of 2 32-bit registers.
+	 * It is treated like an array of 64 bits.  We want to set
+	 * bit BitArray[hash_value]. So we figure out what register
+	 * the bit is in, read it, OR in the new bit, then write
+	 * back the new value.  The register is determined by the
+	 * upper 7 bits of the hash value and the bit within that
+	 * register are determined by the lower 5 bits of the value.
+	 */
+	hash_reg = (hash_value >> 31) & 0x1;
+	hash_bit = (hash_value >> 26) & 0x1F;
+
+	mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
+
+	mta |= (1 << hash_bit);
+
+	ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
+}
+
+/*
+ * atl2_init_pcie - init PCIE module
+ */
+static void atl2_init_pcie(struct atl2_hw *hw)
+{
+    u32 value;
+    value = LTSSM_TEST_MODE_DEF;
+    ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
+
+    value = PCIE_DLL_TX_CTRL1_DEF;
+    ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value);
+}
+
+static void atl2_init_flash_opcode(struct atl2_hw *hw)
+{
+	if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
+		hw->flash_vendor = 0; /* ATMEL */
+
+	/* Init OP table */
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM,
+		flash_table[hw->flash_vendor].cmdPROGRAM);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE,
+		flash_table[hw->flash_vendor].cmdSECTOR_ERASE);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE,
+		flash_table[hw->flash_vendor].cmdCHIP_ERASE);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID,
+		flash_table[hw->flash_vendor].cmdRDID);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN,
+		flash_table[hw->flash_vendor].cmdWREN);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR,
+		flash_table[hw->flash_vendor].cmdRDSR);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR,
+		flash_table[hw->flash_vendor].cmdWRSR);
+	ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ,
+		flash_table[hw->flash_vendor].cmdREAD);
+}
+
+/********************************************************************
+* Performs basic configuration of the adapter.
+*
+* hw - Struct containing variables accessed by shared code
+* Assumes that the controller has previously been reset and is in a
+* post-reset uninitialized state. Initializes multicast table,
+* and  Calls routines to setup link
+* Leaves the transmit and receive units disabled and uninitialized.
+********************************************************************/
+static s32 atl2_init_hw(struct atl2_hw *hw)
+{
+	u32 ret_val = 0;
+
+	atl2_init_pcie(hw);
+
+	/* Zero out the Multicast HASH table */
+	/* clear the old settings from the multicast hash table */
+	ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
+	ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
+
+	atl2_init_flash_opcode(hw);
+
+	ret_val = atl2_phy_init(hw);
+
+	return ret_val;
+}
+
+/*
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ */
+static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
+	u16 *duplex)
+{
+	s32 ret_val;
+	u16 phy_data;
+
+	/* Read PHY Specific Status Register (17) */
+	ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
+		return ATLX_ERR_PHY_RES;
+
+	switch (phy_data & MII_ATLX_PSSR_SPEED) {
+	case MII_ATLX_PSSR_100MBS:
+		*speed = SPEED_100;
+		break;
+	case MII_ATLX_PSSR_10MBS:
+		*speed = SPEED_10;
+		break;
+	default:
+		return ATLX_ERR_PHY_SPEED;
+		break;
+	}
+
+	if (phy_data & MII_ATLX_PSSR_DPLX)
+		*duplex = FULL_DUPLEX;
+	else
+		*duplex = HALF_DUPLEX;
+
+	return 0;
+}
+
+/*
+ * Reads the value from a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to read
+ */
+static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data)
+{
+	u32 val;
+	int i;
+
+	val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
+		MDIO_START |
+		MDIO_SUP_PREAMBLE |
+		MDIO_RW |
+		MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+	ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
+
+	wmb();
+
+	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+		udelay(2);
+		val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
+		if (!(val & (MDIO_START | MDIO_BUSY)))
+			break;
+		wmb();
+	}
+	if (!(val & (MDIO_START | MDIO_BUSY))) {
+		*phy_data = (u16)val;
+		return 0;
+	}
+
+	return ATLX_ERR_PHY;
+}
+
+/*
+ * Writes a value to a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to write
+ * data - data to write to the PHY
+ */
+static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data)
+{
+	int i;
+	u32 val;
+
+	val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
+		(reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
+		MDIO_SUP_PREAMBLE |
+		MDIO_START |
+		MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+	ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
+
+	wmb();
+
+	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+		udelay(2);
+		val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
+		if (!(val & (MDIO_START | MDIO_BUSY)))
+			break;
+
+		wmb();
+	}
+
+	if (!(val & (MDIO_START | MDIO_BUSY)))
+		return 0;
+
+	return ATLX_ERR_PHY;
+}
+
+/*
+ * Configures PHY autoneg and flow control advertisement settings
+ *
+ * hw - Struct containing variables accessed by shared code
+ */
+static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw)
+{
+	s32 ret_val;
+	s16 mii_autoneg_adv_reg;
+
+	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
+	mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
+
+	/* Need to parse autoneg_advertised  and set up
+	 * the appropriate PHY registers.  First we will parse for
+	 * autoneg_advertised software override.  Since we can advertise
+	 * a plethora of combinations, we need to check each bit
+	 * individually.
+	 */
+
+	/* First we clear all the 10/100 mb speed bits in the Auto-Neg
+	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
+	 * the  1000Base-T Control Register (Address 9). */
+	mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+
+	/* Need to parse MediaType and setup the
+	 * appropriate PHY registers. */
+	switch (hw->MediaType) {
+	case MEDIA_TYPE_AUTO_SENSOR:
+		mii_autoneg_adv_reg |=
+			(MII_AR_10T_HD_CAPS |
+			MII_AR_10T_FD_CAPS  |
+			MII_AR_100TX_HD_CAPS|
+			MII_AR_100TX_FD_CAPS);
+		hw->autoneg_advertised =
+			ADVERTISE_10_HALF |
+			ADVERTISE_10_FULL |
+			ADVERTISE_100_HALF|
+			ADVERTISE_100_FULL;
+		break;
+	case MEDIA_TYPE_100M_FULL:
+		mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
+		hw->autoneg_advertised = ADVERTISE_100_FULL;
+		break;
+	case MEDIA_TYPE_100M_HALF:
+		mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
+		hw->autoneg_advertised = ADVERTISE_100_HALF;
+		break;
+	case MEDIA_TYPE_10M_FULL:
+		mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
+		hw->autoneg_advertised = ADVERTISE_10_FULL;
+		break;
+	default:
+		mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
+		hw->autoneg_advertised = ADVERTISE_10_HALF;
+		break;
+	}
+
+	/* flow control fixed to enable all */
+	mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+
+	hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
+
+	ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
+
+	if (ret_val)
+		return ret_val;
+
+	return 0;
+}
+
+/*
+ * Resets the PHY and make all config validate
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
+ */
+static s32 atl2_phy_commit(struct atl2_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_data;
+
+	phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
+	ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data);
+	if (ret_val) {
+		u32 val;
+		int i;
+		/* pcie serdes link may be down ! */
+		for (i = 0; i < 25; i++) {
+			msleep(1);
+			val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
+			if (!(val & (MDIO_START | MDIO_BUSY)))
+				break;
+		}
+
+		if (0 != (val & (MDIO_START | MDIO_BUSY))) {
+			printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n");
+			return ret_val;
+		}
+	}
+	return 0;
+}
+
+static s32 atl2_phy_init(struct atl2_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_val;
+
+	if (hw->phy_configured)
+		return 0;
+
+	/* Enable PHY */
+	ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1);
+	ATL2_WRITE_FLUSH(hw);
+	msleep(1);
+
+	/* check if the PHY is in powersaving mode */
+	atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
+	atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
+
+	/* 024E / 124E 0r 0274 / 1274 ? */
+	if (phy_val & 0x1000) {
+		phy_val &= ~0x1000;
+		atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val);
+	}
+
+	msleep(1);
+
+	/*Enable PHY LinkChange Interrupt */
+	ret_val = atl2_write_phy_reg(hw, 18, 0xC00);
+	if (ret_val)
+		return ret_val;
+
+	/* setup AutoNeg parameters */
+	ret_val = atl2_phy_setup_autoneg_adv(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* SW.Reset & En-Auto-Neg to restart Auto-Neg */
+	ret_val = atl2_phy_commit(hw);
+	if (ret_val)
+		return ret_val;
+
+	hw->phy_configured = true;
+
+	return ret_val;
+}
+
+static void atl2_set_mac_addr(struct atl2_hw *hw)
+{
+	u32 value;
+	/* 00-0B-6A-F6-00-DC
+	 * 0:  6AF600DC   1: 000B
+	 * low dword */
+	value = (((u32)hw->mac_addr[2]) << 24) |
+		(((u32)hw->mac_addr[3]) << 16) |
+		(((u32)hw->mac_addr[4]) << 8)  |
+		(((u32)hw->mac_addr[5]));
+	ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
+	/* hight dword */
+	value = (((u32)hw->mac_addr[0]) << 8) |
+		(((u32)hw->mac_addr[1]));
+	ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
+}
+
+/*
+ * check_eeprom_exist
+ * return 0 if eeprom exist
+ */
+static int atl2_check_eeprom_exist(struct atl2_hw *hw)
+{
+	u32 value;
+
+	value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
+	if (value & SPI_FLASH_CTRL_EN_VPD) {
+		value &= ~SPI_FLASH_CTRL_EN_VPD;
+		ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
+	}
+	value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST);
+	return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
+}
+
+/* FIXME: This doesn't look right. -- CHS */
+static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value)
+{
+	return true;
+}
+
+static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue)
+{
+	int i;
+	u32    Control;
+
+	if (Offset & 0x3)
+		return false; /* address do not align */
+
+	ATL2_WRITE_REG(hw, REG_VPD_DATA, 0);
+	Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
+	ATL2_WRITE_REG(hw, REG_VPD_CAP, Control);
+
+	for (i = 0; i < 10; i++) {
+		msleep(2);
+		Control = ATL2_READ_REG(hw, REG_VPD_CAP);
+		if (Control & VPD_CAP_VPD_FLAG)
+			break;
+	}
+
+	if (Control & VPD_CAP_VPD_FLAG) {
+		*pValue = ATL2_READ_REG(hw, REG_VPD_DATA);
+		return true;
+	}
+	return false; /* timeout */
+}
+
+static void atl2_force_ps(struct atl2_hw *hw)
+{
+	u16 phy_val;
+
+	atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
+	atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
+	atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000);
+
+	atl2_write_phy_reg(hw, MII_DBG_ADDR, 2);
+	atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
+	atl2_write_phy_reg(hw, MII_DBG_ADDR, 3);
+	atl2_write_phy_reg(hw, MII_DBG_DATA, 0);
+}
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+#define ATL2_MAX_NIC 4
+
+#define OPTION_UNSET    -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+#define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET}
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when atl2_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define ATL2_PARAM(X, desc) \
+    static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
+    MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
+    MODULE_PARM_DESC(X, desc);
+#else
+#define ATL2_PARAM(X, desc) \
+    static int __devinitdata X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \
+    static int num_##X = 0; \
+    module_param_array_named(X, X, int, &num_##X, 0); \
+    MODULE_PARM_DESC(X, desc);
+#endif
+
+/*
+ * Transmit Memory Size
+ * Valid Range: 64-2048
+ * Default Value: 128
+ */
+#define ATL2_MIN_TX_MEMSIZE		4	/* 4KB */
+#define ATL2_MAX_TX_MEMSIZE		64	/* 64KB */
+#define ATL2_DEFAULT_TX_MEMSIZE		8	/* 8KB */
+ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory");
+
+/*
+ * Receive Memory Block Count
+ * Valid Range: 16-512
+ * Default Value: 128
+ */
+#define ATL2_MIN_RXD_COUNT		16
+#define ATL2_MAX_RXD_COUNT		512
+#define ATL2_DEFAULT_RXD_COUNT		64
+ATL2_PARAM(RxMemBlock, "Number of receive memory block");
+
+/*
+ * User Specified MediaType Override
+ *
+ * Valid Range: 0-5
+ *  - 0    - auto-negotiate at all supported speeds
+ *  - 1    - only link at 1000Mbps Full Duplex
+ *  - 2    - only link at 100Mbps Full Duplex
+ *  - 3    - only link at 100Mbps Half Duplex
+ *  - 4    - only link at 10Mbps Full Duplex
+ *  - 5    - only link at 10Mbps Half Duplex
+ * Default Value: 0
+ */
+ATL2_PARAM(MediaType, "MediaType Select");
+
+/*
+ * Interrupt Moderate Timer in units of 2048 ns (~2 us)
+ * Valid Range: 10-65535
+ * Default Value: 45000(90ms)
+ */
+#define INT_MOD_DEFAULT_CNT	100 /* 200us */
+#define INT_MOD_MAX_CNT		65000
+#define INT_MOD_MIN_CNT		50
+ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer");
+
+/*
+ * FlashVendor
+ * Valid Range: 0-2
+ * 0 - Atmel
+ * 1 - SST
+ * 2 - ST
+ */
+ATL2_PARAM(FlashVendor, "SPI Flash Vendor");
+
+#define AUTONEG_ADV_DEFAULT	0x2F
+#define AUTONEG_ADV_MASK	0x2F
+#define FLOW_CONTROL_DEFAULT	FLOW_CONTROL_FULL
+
+#define FLASH_VENDOR_DEFAULT	0
+#define FLASH_VENDOR_MIN	0
+#define FLASH_VENDOR_MAX	2
+
+struct atl2_option {
+	enum { enable_option, range_option, list_option } type;
+	char *name;
+	char *err;
+	int  def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			struct atl2_opt_list { int i; char *str; } *p;
+		} l;
+	} arg;
+};
+
+static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
+{
+	int i;
+	struct atl2_opt_list *ent;
+
+	if (*value == OPTION_UNSET) {
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			printk(KERN_INFO "%s Enabled\n", opt->name);
+			return 0;
+			break;
+		case OPTION_DISABLED:
+			printk(KERN_INFO "%s Disabled\n", opt->name);
+			return 0;
+			break;
+		}
+		break;
+	case range_option:
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+			printk(KERN_INFO "%s set to %i\n", opt->name, *value);
+			return 0;
+		}
+		break;
+	case list_option:
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			ent = &opt->arg.l.p[i];
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					printk(KERN_INFO "%s\n", ent->str);
+			return 0;
+			}
+		}
+		break;
+	default:
+		BUG();
+	}
+
+	printk(KERN_INFO "Invalid %s specified (%i) %s\n",
+		opt->name, *value, opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+/*
+ * atl2_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ */
+static void __devinit atl2_check_options(struct atl2_adapter *adapter)
+{
+	int val;
+	struct atl2_option opt;
+	int bd = adapter->bd_number;
+	if (bd >= ATL2_MAX_NIC) {
+		printk(KERN_NOTICE "Warning: no configuration for board #%i\n",
+			bd);
+		printk(KERN_NOTICE "Using defaults for all values\n");
+#ifndef module_param_array
+		bd = ATL2_MAX_NIC;
+#endif
+	}
+
+	/* Bytes of Transmit Memory */
+	opt.type = range_option;
+	opt.name = "Bytes of Transmit Memory";
+	opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE);
+	opt.def = ATL2_DEFAULT_TX_MEMSIZE;
+	opt.arg.r.min = ATL2_MIN_TX_MEMSIZE;
+	opt.arg.r.max = ATL2_MAX_TX_MEMSIZE;
+#ifdef module_param_array
+	if (num_TxMemSize > bd) {
+#endif
+		val = TxMemSize[bd];
+		atl2_validate_option(&val, &opt);
+		adapter->txd_ring_size = ((u32) val) * 1024;
+#ifdef module_param_array
+	} else
+		adapter->txd_ring_size = ((u32)opt.def) * 1024;
+#endif
+	/* txs ring size: */
+	adapter->txs_ring_size = adapter->txd_ring_size / 128;
+	if (adapter->txs_ring_size > 160)
+		adapter->txs_ring_size = 160;
+
+	/* Receive Memory Block Count */
+	opt.type = range_option;
+	opt.name = "Number of receive memory block";
+	opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT);
+	opt.def = ATL2_DEFAULT_RXD_COUNT;
+	opt.arg.r.min = ATL2_MIN_RXD_COUNT;
+	opt.arg.r.max = ATL2_MAX_RXD_COUNT;
+#ifdef module_param_array
+	if (num_RxMemBlock > bd) {
+#endif
+		val = RxMemBlock[bd];
+		atl2_validate_option(&val, &opt);
+		adapter->rxd_ring_size = (u32)val;
+		/* FIXME */
+		/* ((u16)val)&~1; */	/* even number */
+#ifdef module_param_array
+	} else
+		adapter->rxd_ring_size = (u32)opt.def;
+#endif
+	/* init RXD Flow control value */
+	adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7;
+	adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) >
+		(adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) :
+		(adapter->rxd_ring_size / 12);
+
+	/* Interrupt Moderate Timer */
+	opt.type = range_option;
+	opt.name = "Interrupt Moderate Timer";
+	opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT);
+	opt.def = INT_MOD_DEFAULT_CNT;
+	opt.arg.r.min = INT_MOD_MIN_CNT;
+	opt.arg.r.max = INT_MOD_MAX_CNT;
+#ifdef module_param_array
+	if (num_IntModTimer > bd) {
+#endif
+		val = IntModTimer[bd];
+		atl2_validate_option(&val, &opt);
+		adapter->imt = (u16) val;
+#ifdef module_param_array
+	} else
+		adapter->imt = (u16)(opt.def);
+#endif
+	/* Flash Vendor */
+	opt.type = range_option;
+	opt.name = "SPI Flash Vendor";
+	opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT);
+	opt.def = FLASH_VENDOR_DEFAULT;
+	opt.arg.r.min = FLASH_VENDOR_MIN;
+	opt.arg.r.max = FLASH_VENDOR_MAX;
+#ifdef module_param_array
+	if (num_FlashVendor > bd) {
+#endif
+		val = FlashVendor[bd];
+		atl2_validate_option(&val, &opt);
+		adapter->hw.flash_vendor = (u8) val;
+#ifdef module_param_array
+	} else
+		adapter->hw.flash_vendor = (u8)(opt.def);
+#endif
+	/* MediaType */
+	opt.type = range_option;
+	opt.name = "Speed/Duplex Selection";
+	opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR);
+	opt.def = MEDIA_TYPE_AUTO_SENSOR;
+	opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR;
+	opt.arg.r.max = MEDIA_TYPE_10M_HALF;
+#ifdef module_param_array
+	if (num_MediaType > bd) {
+#endif
+		val = MediaType[bd];
+		atl2_validate_option(&val, &opt);
+		adapter->hw.MediaType = (u16) val;
+#ifdef module_param_array
+	} else
+		adapter->hw.MediaType = (u16)(opt.def);
+#endif
+}
diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h
new file mode 100644
index 0000000..6e1f28f
--- /dev/null
+++ b/drivers/net/atlx/atl2.h
@@ -0,0 +1,530 @@
+/* atl2.h -- atl2 driver definitions
+ *
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ * Copyright(c) 2006 xiong huang <xiong.huang@atheros.com>
+ * Copyright(c) 2007 Chris Snook <csnook@redhat.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#ifndef _ATL2_H_
+#define _ATL2_H_
+
+#include <asm/atomic.h>
+#include <linux/netdevice.h>
+
+#ifndef _ATL2_HW_H_
+#define _ATL2_HW_H_
+
+#ifndef _ATL2_OSDEP_H_
+#define _ATL2_OSDEP_H_
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+
+#include "atlx.h"
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+#endif
+
+#define PCI_COMMAND_REGISTER	PCI_COMMAND
+#define CMD_MEM_WRT_INVALIDATE	PCI_COMMAND_INVALIDATE
+#define ETH_ADDR_LEN		ETH_ALEN
+
+#define ATL2_WRITE_REG(a, reg, value) (iowrite32((value), \
+	((a)->hw_addr + (reg))))
+
+#define ATL2_WRITE_FLUSH(a) (ioread32((a)->hw_addr))
+
+#define ATL2_READ_REG(a, reg) (ioread32((a)->hw_addr + (reg)))
+
+#define ATL2_WRITE_REGB(a, reg, value) (iowrite8((value), \
+	((a)->hw_addr + (reg))))
+
+#define ATL2_READ_REGB(a, reg) (ioread8((a)->hw_addr + (reg)))
+
+#define ATL2_WRITE_REGW(a, reg, value) (iowrite16((value), \
+	((a)->hw_addr + (reg))))
+
+#define ATL2_READ_REGW(a, reg) (ioread16((a)->hw_addr + (reg)))
+
+#define ATL2_WRITE_REG_ARRAY(a, reg, offset, value) \
+	(iowrite32((value), (((a)->hw_addr + (reg)) + ((offset) << 2))))
+
+#define ATL2_READ_REG_ARRAY(a, reg, offset) \
+	(ioread32(((a)->hw_addr + (reg)) + ((offset) << 2)))
+
+#endif /* _ATL2_OSDEP_H_ */
+
+struct atl2_adapter;
+struct atl2_hw;
+
+/* function prototype */
+static s32 atl2_reset_hw(struct atl2_hw *hw);
+static s32 atl2_read_mac_addr(struct atl2_hw *hw);
+static s32 atl2_init_hw(struct atl2_hw *hw);
+static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
+	u16 *duplex);
+static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr);
+static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value);
+static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data);
+static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data);
+static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
+static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
+static void atl2_set_mac_addr(struct atl2_hw *hw);
+static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue);
+static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value);
+static s32 atl2_phy_init(struct atl2_hw *hw);
+static int atl2_check_eeprom_exist(struct atl2_hw *hw);
+static void atl2_force_ps(struct atl2_hw *hw);
+
+/* register definition */
+
+/* Block IDLE Status Register */
+#define IDLE_STATUS_RXMAC	1	/* 1: RXMAC is non-IDLE */
+#define IDLE_STATUS_TXMAC	2	/* 1: TXMAC is non-IDLE */
+#define IDLE_STATUS_DMAR	8	/* 1: DMAR is non-IDLE */
+#define IDLE_STATUS_DMAW	4	/* 1: DMAW is non-IDLE */
+
+/* MDIO Control Register */
+#define MDIO_WAIT_TIMES		10
+
+/* MAC Control Register */
+#define MAC_CTRL_DBG_TX_BKPRESURE	0x100000	/* 1: TX max backoff */
+#define MAC_CTRL_MACLP_CLK_PHY		0x8000000	/* 1: 25MHz from phy */
+#define MAC_CTRL_HALF_LEFT_BUF_SHIFT	28
+#define MAC_CTRL_HALF_LEFT_BUF_MASK	0xF		/* MAC retry buf x32B */
+
+/* Internal SRAM Partition Register */
+#define REG_SRAM_TXRAM_END	0x1500	/* Internal tail address of TXRAM
+					 * default: 2byte*1024 */
+#define REG_SRAM_RXRAM_END	0x1502	/* Internal tail address of RXRAM
+					 * default: 2byte*1024 */
+
+/* Descriptor Control register */
+#define REG_TXD_BASE_ADDR_LO	0x1544	/* The base address of the Transmit
+					 * Data Mem low 32-bit(dword align) */
+#define REG_TXD_MEM_SIZE	0x1548	/* Transmit Data Memory size(by
+					 * double word , max 256KB) */
+#define REG_TXS_BASE_ADDR_LO	0x154C	/* The base address of the Transmit
+					 * Status Memory low 32-bit(dword word
+					 * align) */
+#define REG_TXS_MEM_SIZE	0x1550	/* double word unit, max 4*2047
+					 * bytes. */
+#define REG_RXD_BASE_ADDR_LO	0x1554	/* The base address of the Transmit
+					 * Status Memory low 32-bit(unit 8
+					 * bytes) */
+#define REG_RXD_BUF_NUM		0x1558	/* Receive Data & Status Memory buffer
+					 * number (unit 1536bytes, max
+					 * 1536*2047) */
+
+/* DMAR Control Register */
+#define REG_DMAR	0x1580
+#define     DMAR_EN	0x1	/* 1: Enable DMAR */
+
+/* TX Cur-Through (early tx threshold) Control Register */
+#define REG_TX_CUT_THRESH	0x1590	/* TxMac begin transmit packet
+					 * threshold(unit word) */
+
+/* DMAW Control Register */
+#define REG_DMAW	0x15A0
+#define     DMAW_EN	0x1
+
+/* Flow control register */
+#define REG_PAUSE_ON_TH		0x15A8	/* RXD high watermark of overflow
+					 * threshold configuration register */
+#define REG_PAUSE_OFF_TH	0x15AA	/* RXD lower watermark of overflow
+					 * threshold configuration register */
+
+/* Mailbox Register */
+#define REG_MB_TXD_WR_IDX	0x15f0	/* double word align */
+#define REG_MB_RXD_RD_IDX	0x15F4	/* RXD Read index (unit: 1536byets) */
+
+/* Interrupt Status Register */
+#define ISR_TIMER	1	/* Interrupt when Timer counts down to zero */
+#define ISR_MANUAL	2	/* Software manual interrupt, for debug. Set
+				 * when SW_MAN_INT_EN is set in Table 51
+				 * Selene Master Control Register
+				 * (Offset 0x1400). */
+#define ISR_RXF_OV	4	/* RXF overflow interrupt */
+#define ISR_TXF_UR	8	/* TXF underrun interrupt */
+#define ISR_TXS_OV	0x10	/* Internal transmit status buffer full
+				 * interrupt */
+#define ISR_RXS_OV	0x20	/* Internal receive status buffer full
+				 * interrupt */
+#define ISR_LINK_CHG	0x40	/* Link Status Change Interrupt */
+#define ISR_HOST_TXD_UR	0x80
+#define ISR_HOST_RXD_OV	0x100	/* Host rx data memory full , one pulse */
+#define ISR_DMAR_TO_RST	0x200	/* DMAR op timeout interrupt. SW should
+				 * do Reset */
+#define ISR_DMAW_TO_RST	0x400
+#define ISR_PHY		0x800	/* phy interrupt */
+#define ISR_TS_UPDATE	0x10000	/* interrupt after new tx pkt status written
+				 * to host */
+#define ISR_RS_UPDATE	0x20000	/* interrupt ater new rx pkt status written
+				 * to host. */
+#define ISR_TX_EARLY	0x40000	/* interrupt when txmac begin transmit one
+				 * packet */
+
+#define ISR_TX_EVENT (ISR_TXF_UR | ISR_TXS_OV | ISR_HOST_TXD_UR |\
+	ISR_TS_UPDATE | ISR_TX_EARLY)
+#define ISR_RX_EVENT (ISR_RXF_OV | ISR_RXS_OV | ISR_HOST_RXD_OV |\
+	 ISR_RS_UPDATE)
+
+#define IMR_NORMAL_MASK		(\
+	/*ISR_LINK_CHG		|*/\
+	ISR_MANUAL		|\
+	ISR_DMAR_TO_RST		|\
+	ISR_DMAW_TO_RST		|\
+	ISR_PHY			|\
+	ISR_PHY_LINKDOWN	|\
+	ISR_TS_UPDATE		|\
+	ISR_RS_UPDATE)
+
+/* Receive MAC Statistics Registers */
+#define REG_STS_RX_PAUSE	0x1700	/* Num pause packets received */
+#define REG_STS_RXD_OV		0x1704	/* Num frames dropped due to RX
+					 * FIFO overflow */
+#define REG_STS_RXS_OV		0x1708	/* Num frames dropped due to RX
+					 * Status Buffer Overflow */
+#define REG_STS_RX_FILTER	0x170C	/* Num packets dropped due to
+					 * address filtering */
+
+/* MII definitions */
+
+/* PHY Common Register */
+#define MII_SMARTSPEED	0x14
+#define MII_DBG_ADDR	0x1D
+#define MII_DBG_DATA	0x1E
+
+/* PCI Command Register Bit Definitions */
+#define PCI_REG_COMMAND		0x04
+#define CMD_IO_SPACE		0x0001
+#define CMD_MEMORY_SPACE	0x0002
+#define CMD_BUS_MASTER		0x0004
+
+#define MEDIA_TYPE_100M_FULL	1
+#define MEDIA_TYPE_100M_HALF	2
+#define MEDIA_TYPE_10M_FULL	3
+#define MEDIA_TYPE_10M_HALF	4
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT	0x000F	/* Everything */
+
+/* The size (in bytes) of a ethernet packet */
+#define ENET_HEADER_SIZE		14
+#define MAXIMUM_ETHERNET_FRAME_SIZE	1518	/* with FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE	64	/* with FCS */
+#define ETHERNET_FCS_SIZE		4
+#define MAX_JUMBO_FRAME_SIZE		0x2000
+#define VLAN_SIZE                                               4
+
+struct tx_pkt_header {
+	unsigned pkt_size:11;
+	unsigned:4;			/* reserved */
+	unsigned ins_vlan:1;		/* txmac should insert vlan */
+	unsigned short vlan;		/* vlan tag */
+};
+/* FIXME: replace above bitfields with MASK/SHIFT defines below */
+#define TX_PKT_HEADER_SIZE_MASK		0x7FF
+#define TX_PKT_HEADER_SIZE_SHIFT	0
+#define TX_PKT_HEADER_INS_VLAN_MASK	0x1
+#define TX_PKT_HEADER_INS_VLAN_SHIFT	15
+#define TX_PKT_HEADER_VLAN_TAG_MASK	0xFFFF
+#define TX_PKT_HEADER_VLAN_TAG_SHIFT	16
+
+struct tx_pkt_status {
+	unsigned pkt_size:11;
+	unsigned:5;		/* reserved */
+	unsigned ok:1;		/* current packet transmitted without error */
+	unsigned bcast:1;	/* broadcast packet */
+	unsigned mcast:1;	/* multicast packet */
+	unsigned pause:1;	/* transmiited a pause frame */
+	unsigned ctrl:1;
+	unsigned defer:1;    	/* current packet is xmitted with defer */
+	unsigned exc_defer:1;
+	unsigned single_col:1;
+	unsigned multi_col:1;
+	unsigned late_col:1;
+	unsigned abort_col:1;
+	unsigned underun:1;	/* current packet is aborted
+				 * due to txram underrun */
+	unsigned:3;		/* reserved */
+	unsigned update:1;	/* always 1'b1 in tx_status_buf */
+};
+/* FIXME: replace above bitfields with MASK/SHIFT defines below */
+#define TX_PKT_STATUS_SIZE_MASK		0x7FF
+#define TX_PKT_STATUS_SIZE_SHIFT	0
+#define TX_PKT_STATUS_OK_MASK		0x1
+#define TX_PKT_STATUS_OK_SHIFT		16
+#define TX_PKT_STATUS_BCAST_MASK	0x1
+#define TX_PKT_STATUS_BCAST_SHIFT	17
+#define TX_PKT_STATUS_MCAST_MASK	0x1
+#define TX_PKT_STATUS_MCAST_SHIFT	18
+#define TX_PKT_STATUS_PAUSE_MASK	0x1
+#define TX_PKT_STATUS_PAUSE_SHIFT	19
+#define TX_PKT_STATUS_CTRL_MASK		0x1
+#define TX_PKT_STATUS_CTRL_SHIFT	20
+#define TX_PKT_STATUS_DEFER_MASK	0x1
+#define TX_PKT_STATUS_DEFER_SHIFT	21
+#define TX_PKT_STATUS_EXC_DEFER_MASK	0x1
+#define TX_PKT_STATUS_EXC_DEFER_SHIFT	22
+#define TX_PKT_STATUS_SINGLE_COL_MASK	0x1
+#define TX_PKT_STATUS_SINGLE_COL_SHIFT	23
+#define TX_PKT_STATUS_MULTI_COL_MASK	0x1
+#define TX_PKT_STATUS_MULTI_COL_SHIFT	24
+#define TX_PKT_STATUS_LATE_COL_MASK	0x1
+#define TX_PKT_STATUS_LATE_COL_SHIFT	25
+#define TX_PKT_STATUS_ABORT_COL_MASK	0x1
+#define TX_PKT_STATUS_ABORT_COL_SHIFT	26
+#define TX_PKT_STATUS_UNDERRUN_MASK	0x1
+#define TX_PKT_STATUS_UNDERRUN_SHIFT	27
+#define TX_PKT_STATUS_UPDATE_MASK	0x1
+#define TX_PKT_STATUS_UPDATE_SHIFT	31
+
+struct rx_pkt_status {
+	unsigned pkt_size:11;	/* packet size, max 2047 bytes */
+	unsigned:5;		/* reserved */
+	unsigned ok:1;		/* current packet received ok without error */
+	unsigned bcast:1;	/* current packet is broadcast */
+	unsigned mcast:1;	/* current packet is multicast */
+	unsigned pause:1;
+	unsigned ctrl:1;
+	unsigned crc:1;		/* received a packet with crc error */
+	unsigned code:1;	/* received a packet with code error */
+	unsigned runt:1;	/* received a packet less than 64 bytes
+				 * with good crc */
+	unsigned frag:1;	/* received a packet less than 64 bytes
+				 * with bad crc */
+	unsigned trunc:1;	/* current frame truncated due to rxram full */
+	unsigned align:1;	/* this packet is alignment error */
+	unsigned vlan:1;	/* this packet has vlan */
+	unsigned:3;		/* reserved */
+	unsigned update:1;
+	unsigned short vtag;	/* vlan tag */
+	unsigned:16;
+};
+/* FIXME: replace above bitfields with MASK/SHIFT defines below */
+#define RX_PKT_STATUS_SIZE_MASK		0x7FF
+#define RX_PKT_STATUS_SIZE_SHIFT	0
+#define RX_PKT_STATUS_OK_MASK		0x1
+#define RX_PKT_STATUS_OK_SHIFT		16
+#define RX_PKT_STATUS_BCAST_MASK	0x1
+#define RX_PKT_STATUS_BCAST_SHIFT	17
+#define RX_PKT_STATUS_MCAST_MASK	0x1
+#define RX_PKT_STATUS_MCAST_SHIFT	18
+#define RX_PKT_STATUS_PAUSE_MASK	0x1
+#define RX_PKT_STATUS_PAUSE_SHIFT	19
+#define RX_PKT_STATUS_CTRL_MASK		0x1
+#define RX_PKT_STATUS_CTRL_SHIFT	20
+#define RX_PKT_STATUS_CRC_MASK		0x1
+#define RX_PKT_STATUS_CRC_SHIFT		21
+#define RX_PKT_STATUS_CODE_MASK		0x1
+#define RX_PKT_STATUS_CODE_SHIFT	22
+#define RX_PKT_STATUS_RUNT_MASK		0x1
+#define RX_PKT_STATUS_RUNT_SHIFT	23
+#define RX_PKT_STATUS_FRAG_MASK		0x1
+#define RX_PKT_STATUS_FRAG_SHIFT	24
+#define RX_PKT_STATUS_TRUNK_MASK	0x1
+#define RX_PKT_STATUS_TRUNK_SHIFT	25
+#define RX_PKT_STATUS_ALIGN_MASK	0x1
+#define RX_PKT_STATUS_ALIGN_SHIFT	26
+#define RX_PKT_STATUS_VLAN_MASK		0x1
+#define RX_PKT_STATUS_VLAN_SHIFT	27
+#define RX_PKT_STATUS_UPDATE_MASK	0x1
+#define RX_PKT_STATUS_UPDATE_SHIFT	31
+#define RX_PKT_STATUS_VLAN_TAG_MASK	0xFFFF
+#define RX_PKT_STATUS_VLAN_TAG_SHIFT	32
+
+struct rx_desc {
+	struct rx_pkt_status	status;
+	unsigned char     	packet[1536-sizeof(struct rx_pkt_status)];
+};
+
+enum atl2_speed_duplex {
+	atl2_10_half = 0,
+	atl2_10_full = 1,
+	atl2_100_half = 2,
+	atl2_100_full = 3
+};
+
+struct atl2_spi_flash_dev {
+	const char *manu_name;	/* manufacturer id */
+	/* op-code */
+	u8 cmdWRSR;
+	u8 cmdREAD;
+	u8 cmdPROGRAM;
+	u8 cmdWREN;
+	u8 cmdWRDI;
+	u8 cmdRDSR;
+	u8 cmdRDID;
+	u8 cmdSECTOR_ERASE;
+	u8 cmdCHIP_ERASE;
+};
+
+/* Structure containing variables used by the shared code (atl2_hw.c) */
+struct atl2_hw {
+	u8 __iomem *hw_addr;
+	void *back;
+
+	u8 preamble_len;
+	u8 max_retry;          /* Retransmission maximum, afterwards the
+				* packet will be discarded. */
+	u8 jam_ipg;            /* IPG to start JAM for collision based flow
+				* control in half-duplex mode. In unit of
+				* 8-bit time. */
+	u8 ipgt;               /* Desired back to back inter-packet gap. The
+				* default is 96-bit time. */
+	u8 min_ifg;            /* Minimum number of IFG to enforce in between
+				* RX frames. Frame gap below such IFP is
+				* dropped. */
+	u8 ipgr1;              /* 64bit Carrier-Sense window */
+	u8 ipgr2;              /* 96-bit IPG window */
+	u8 retry_buf;          /* When half-duplex mode, should hold some
+				* bytes for mac retry . (8*4bytes unit) */
+
+	u16 fc_rxd_hi;
+	u16 fc_rxd_lo;
+	u16 lcol;              /* Collision Window */
+	u16 max_frame_size;
+
+	u16 MediaType;
+	u16 autoneg_advertised;
+	u16 pci_cmd_word;
+
+	u16 mii_autoneg_adv_reg;
+
+	u32 mem_rang;
+	u32 txcw;
+	u32 mc_filter_type;
+	u32 num_mc_addrs;
+	u32 collision_delta;
+	u32 tx_packet_delta;
+	u16 phy_spd_default;
+
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_id;
+	u16 subsystem_vendor_id;
+	u8 revision_id;
+
+	/* spi flash */
+	u8 flash_vendor;
+
+	u8 dma_fairness;
+	u8 mac_addr[NODE_ADDRESS_SIZE];
+	u8 perm_mac_addr[NODE_ADDRESS_SIZE];
+
+	/* FIXME */
+	/* bool phy_preamble_sup; */
+	bool phy_configured;
+};
+
+#endif /* _ATL2_HW_H_ */
+
+struct atl2_ring_header {
+    /* pointer to the descriptor ring memory */
+    void *desc;
+    /* physical adress of the descriptor ring */
+    dma_addr_t dma;
+    /* length of descriptor ring in bytes */
+    unsigned int size;
+};
+
+/* board specific private data structure */
+struct atl2_adapter {
+	/* OS defined structs */
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+#ifdef NETIF_F_HW_VLAN_TX
+	struct vlan_group *vlgrp;
+#endif
+	u32 wol;
+	u16 link_speed;
+	u16 link_duplex;
+
+	spinlock_t stats_lock;
+	spinlock_t tx_lock;
+
+	struct work_struct reset_task;
+	struct work_struct link_chg_task;
+	struct timer_list watchdog_timer;
+	struct timer_list phy_config_timer;
+
+	unsigned long cfg_phy;
+	bool mac_disabled;
+
+	/* All Descriptor memory */
+	dma_addr_t	ring_dma;
+	void		*ring_vir_addr;
+	int		ring_size;
+
+	struct tx_pkt_header	*txd_ring;
+	dma_addr_t	txd_dma;
+
+	struct tx_pkt_status	*txs_ring;
+	dma_addr_t	txs_dma;
+
+	struct rx_desc	*rxd_ring;
+	dma_addr_t	rxd_dma;
+
+	u32 txd_ring_size;         /* bytes per unit */
+	u32 txs_ring_size;         /* dwords per unit */
+	u32 rxd_ring_size;         /* 1536 bytes per unit */
+
+	/* read /write ptr: */
+	/* host */
+	u32 txd_write_ptr;
+	u32 txs_next_clear;
+	u32 rxd_read_ptr;
+
+	/* nic */
+	atomic_t txd_read_ptr;
+	atomic_t txs_write_ptr;
+	u32 rxd_write_ptr;
+
+	/* Interrupt Moderator timer ( 2us resolution) */
+	u16 imt;
+	/* Interrupt Clear timer (2us resolution) */
+	u16 ict;
+
+	unsigned long flags;
+	/* structs defined in atl2_hw.h */
+	u32 bd_number;     /* board number */
+	bool pci_using_64;
+	bool have_msi;
+	struct atl2_hw hw;
+
+	u32 usr_cmd;
+	/* FIXME */
+	/* u32 regs_buff[ATL2_REGS_LEN]; */
+	u32 pci_state[16];
+
+	u32 *config_space;
+};
+
+enum atl2_state_t {
+	__ATL2_TESTING,
+	__ATL2_RESETTING,
+	__ATL2_DOWN
+};
+
+#endif /* _ATL2_H_ */
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 5ee1b05..92c16c3 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -653,6 +653,8 @@
 
 	aup = dev->priv;
 
+	spin_lock_init(&aup->lock);
+
 	/* Allocate the data buffers */
 	/* Snooping works fine with eth on all au1xxx */
 	aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
@@ -753,7 +755,6 @@
 		aup->tx_db_inuse[i] = pDB;
 	}
 
-	spin_lock_init(&aup->lock);
 	dev->base_addr = base;
 	dev->irq = irq;
 	dev->open = au1000_open;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index a886a4b..4207d6e 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -153,7 +153,7 @@
 	while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
 		if (jiffies - reset_start_time > 2*HZ/100) {
 			dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
-			       __FUNCTION__, dev->name);
+			       __func__, dev->name);
 			break;
 		}
 	}
@@ -173,7 +173,7 @@
 	if (ei_status.dmaing) {
 		dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
 			"[DMAstat:%d][irqlock:%d].\n",
-			dev->name, __FUNCTION__,
+			dev->name, __func__,
 			ei_status.dmaing, ei_status.irqlock);
 		return;
 	}
@@ -215,7 +215,7 @@
 		dev_err(&ax->dev->dev,
 			"%s: DMAing conflict in %s "
 			"[DMAstat:%d][irqlock:%d].\n",
-			dev->name, __FUNCTION__,
+			dev->name, __func__,
 			ei_status.dmaing, ei_status.irqlock);
 		return;
 	}
@@ -260,7 +260,7 @@
 	if (ei_status.dmaing) {
 		dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
 			"[DMAstat:%d][irqlock:%d]\n",
-			dev->name, __FUNCTION__,
+			dev->name, __func__,
 		       ei_status.dmaing, ei_status.irqlock);
 		return;
 	}
@@ -396,7 +396,7 @@
 {
 	if (phy_debug)
 		pr_debug("%s: dev %p, %04x, %04x, %d\n",
-			__FUNCTION__, dev, phy_addr, reg, opc);
+			__func__, dev, phy_addr, reg, opc);
 
 	ax_mii_ei_outbits(dev, 0x3f, 6);	/* pre-amble */
 	ax_mii_ei_outbits(dev, 1, 2);		/* frame-start */
@@ -422,7 +422,7 @@
       	spin_unlock_irqrestore(&ei_local->page_lock, flags);
 
 	if (phy_debug)
-		pr_debug("%s: %04x.%04x => read %04x\n", __FUNCTION__,
+		pr_debug("%s: %04x.%04x => read %04x\n", __func__,
 			 phy_addr, reg, result);
 
 	return result;
@@ -436,7 +436,7 @@
 	unsigned long flags;
 
 	dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
-		__FUNCTION__, dev, phy_addr, reg, value);
+		__func__, dev, phy_addr, reg, value);
 
       	spin_lock_irqsave(&ei->page_lock, flags);
 
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 3db7db1..df896e2 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -811,7 +811,7 @@
 {
 	u32 opmode;
 
-	pr_debug("%s: %s\n", DRV_NAME, __FUNCTION__);
+	pr_debug("%s: %s\n", DRV_NAME, __func__);
 
 	/* Set RX DMA */
 	bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -847,7 +847,7 @@
 /* Our watchdog timed out. Called by the networking layer */
 static void bfin_mac_timeout(struct net_device *dev)
 {
-	pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+	pr_debug("%s: %s\n", dev->name, __func__);
 
 	bfin_mac_disable();
 
@@ -949,7 +949,7 @@
 {
 	struct bfin_mac_local *lp = netdev_priv(dev);
 	int retval;
-	pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+	pr_debug("%s: %s\n", dev->name, __func__);
 
 	/*
 	 * Check that the address is valid.  If its not, refuse
@@ -989,7 +989,7 @@
 static int bfin_mac_close(struct net_device *dev)
 {
 	struct bfin_mac_local *lp = netdev_priv(dev);
-	pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+	pr_debug("%s: %s\n", dev->name, __func__);
 
 	netif_stop_queue(dev);
 	netif_carrier_off(dev);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 2486a65..883e0a7 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -69,7 +69,7 @@
 	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 
@@ -1127,7 +1127,7 @@
 	}
 }
 
-static int
+static void
 bnx2_set_mac_link(struct bnx2 *bp)
 {
 	u32 val;
@@ -1193,8 +1193,6 @@
 
 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
 		bnx2_init_all_rx_contexts(bp);
-
-	return 0;
 }
 
 static void
@@ -5600,7 +5598,7 @@
 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
 		u32 bmcr;
 
-		bp->current_interval = bp->timer_interval;
+		bp->current_interval = BNX2_TIMER_INTERVAL;
 
 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
 
@@ -5629,7 +5627,7 @@
 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
 		}
 	} else
-		bp->current_interval = bp->timer_interval;
+		bp->current_interval = BNX2_TIMER_INTERVAL;
 
 	if (check_link) {
 		u32 val;
@@ -5674,11 +5672,11 @@
 		} else {
 			bnx2_disable_forced_2g5(bp);
 			bp->serdes_an_pending = 2;
-			bp->current_interval = bp->timer_interval;
+			bp->current_interval = BNX2_TIMER_INTERVAL;
 		}
 
 	} else
-		bp->current_interval = bp->timer_interval;
+		bp->current_interval = BNX2_TIMER_INTERVAL;
 
 	spin_unlock(&bp->phy_lock);
 }
@@ -7516,8 +7514,7 @@
 
 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
 
-	bp->timer_interval =  HZ;
-	bp->current_interval =  HZ;
+	bp->current_interval = BNX2_TIMER_INTERVAL;
 
 	bp->phy_addr = 1;
 
@@ -7607,7 +7604,7 @@
 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
 
 	init_timer(&bp->timer);
-	bp->timer.expires = RUN_AT(bp->timer_interval);
+	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
 	bp->timer.data = (unsigned long) bp;
 	bp->timer.function = bnx2_timer;
 
@@ -7720,7 +7717,6 @@
 
 	memcpy(dev->dev_addr, bp->mac_addr, 6);
 	memcpy(dev->perm_addr, bp->mac_addr, 6);
-	bp->name = board_info[ent->driver_data].name;
 
 	dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
@@ -7747,7 +7743,7 @@
 	printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
 		"IRQ %d, node addr %s\n",
 		dev->name,
-		bp->name,
+		board_info[ent->driver_data].name,
 		((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
 		((CHIP_ID(bp) & 0x0ff0) >> 4),
 		bnx2_bus_string(bp, str),
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index dfacd31..edc7774 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6654,6 +6654,8 @@
 	struct bnx2_tx_ring_info	tx_ring;
 };
 
+#define BNX2_TIMER_INTERVAL			HZ
+
 struct bnx2 {
 	/* Fields used in the tx and intr/napi performance paths are grouped */
 	/* together in the beginning of the structure. */
@@ -6701,9 +6703,6 @@
 
 	/* End of fields used in the performance code paths. */
 
-	char			*name;
-
-	int			timer_interval;
 	int			current_interval;
 	struct			timer_list timer;
 	struct work_struct	reset_task;
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index a8eb3c4..fce7451 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -59,8 +59,8 @@
 #include "bnx2x.h"
 #include "bnx2x_init.h"
 
-#define DRV_MODULE_VERSION	"1.45.21"
-#define DRV_MODULE_RELDATE	"2008/09/03"
+#define DRV_MODULE_VERSION	"1.45.22"
+#define DRV_MODULE_RELDATE	"2008/09/09"
 #define BNX2X_BC_VER		0x040200
 
 /* Time in jiffies before concluding the transmitter is hung */
@@ -649,15 +649,16 @@
 		BNX2X_ERR("BUG! proper val not read from IGU!\n");
 }
 
-static void bnx2x_int_disable_sync(struct bnx2x *bp)
+static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
 {
 	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
 	int i;
 
 	/* disable interrupt handling */
 	atomic_inc(&bp->intr_sem);
-	/* prevent the HW from sending interrupts */
-	bnx2x_int_disable(bp);
+	if (disable_hw)
+		/* prevent the HW from sending interrupts */
+		bnx2x_int_disable(bp);
 
 	/* make sure all ISRs are done */
 	if (msix) {
@@ -6086,9 +6087,9 @@
 	}
 }
 
-static void bnx2x_netif_stop(struct bnx2x *bp)
+static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
 {
-	bnx2x_int_disable_sync(bp);
+	bnx2x_int_disable_sync(bp, disable_hw);
 	if (netif_running(bp->dev)) {
 		bnx2x_napi_disable(bp);
 		netif_tx_disable(bp->dev);
@@ -6475,7 +6476,7 @@
 	for_each_queue(bp, i)
 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 load_int_disable:
-	bnx2x_int_disable_sync(bp);
+	bnx2x_int_disable_sync(bp, 1);
 	/* Release IRQs */
 	bnx2x_free_irq(bp);
 load_error:
@@ -6650,7 +6651,7 @@
 	bp->rx_mode = BNX2X_RX_MODE_NONE;
 	bnx2x_set_storm_rx_mode(bp);
 
-	bnx2x_netif_stop(bp);
+	bnx2x_netif_stop(bp, 1);
 	if (!netif_running(bp->dev))
 		bnx2x_napi_disable(bp);
 	del_timer_sync(&bp->timer);
@@ -8791,7 +8792,7 @@
 	if (!netif_running(bp->dev))
 		return BNX2X_LOOPBACK_FAILED;
 
-	bnx2x_netif_stop(bp);
+	bnx2x_netif_stop(bp, 1);
 
 	if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
 		DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
@@ -10346,6 +10347,74 @@
 	return rc;
 }
 
+static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
+{
+	int i;
+
+	bp->state = BNX2X_STATE_ERROR;
+
+	bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+	bnx2x_netif_stop(bp, 0);
+
+	del_timer_sync(&bp->timer);
+	bp->stats_state = STATS_STATE_DISABLED;
+	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
+	/* Release IRQs */
+	bnx2x_free_irq(bp);
+
+	if (CHIP_IS_E1(bp)) {
+		struct mac_configuration_cmd *config =
+						bnx2x_sp(bp, mcast_config);
+
+		for (i = 0; i < config->hdr.length_6b; i++)
+			CAM_INVALIDATE(config->config_table[i]);
+	}
+
+	/* Free SKBs, SGEs, TPA pool and driver internals */
+	bnx2x_free_skbs(bp);
+	for_each_queue(bp, i)
+		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+	bnx2x_free_mem(bp);
+
+	bp->state = BNX2X_STATE_CLOSED;
+
+	netif_carrier_off(bp->dev);
+
+	return 0;
+}
+
+static void bnx2x_eeh_recover(struct bnx2x *bp)
+{
+	u32 val;
+
+	mutex_init(&bp->port.phy_mutex);
+
+	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+	bp->link_params.shmem_base = bp->common.shmem_base;
+	BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
+
+	if (!bp->common.shmem_base ||
+	    (bp->common.shmem_base < 0xA0000) ||
+	    (bp->common.shmem_base >= 0xC0000)) {
+		BNX2X_DEV_INFO("MCP not active\n");
+		bp->flags |= NO_MCP_FLAG;
+		return;
+	}
+
+	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+		BNX2X_ERR("BAD MCP validity signature\n");
+
+	if (!BP_NOMCP(bp)) {
+		bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
+			      & DRV_MSG_SEQ_NUMBER_MASK);
+		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+	}
+}
+
 /**
  * bnx2x_io_error_detected - called when PCI error is detected
  * @pdev: Pointer to PCI device
@@ -10365,7 +10434,7 @@
 	netif_device_detach(dev);
 
 	if (netif_running(dev))
-		bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+		bnx2x_eeh_nic_unload(bp);
 
 	pci_disable_device(pdev);
 
@@ -10420,8 +10489,10 @@
 
 	rtnl_lock();
 
+	bnx2x_eeh_recover(bp);
+
 	if (netif_running(dev))
-		bnx2x_nic_load(bp, LOAD_OPEN);
+		bnx2x_nic_load(bp, LOAD_NORMAL);
 
 	netif_device_attach(dev);
 
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index b211486..ade5f3f 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -38,6 +38,7 @@
 #include <linux/in.h>
 #include <net/ipx.h>
 #include <net/arp.h>
+#include <net/ipv6.h>
 #include <asm/byteorder.h>
 #include "bonding.h"
 #include "bond_alb.h"
@@ -81,6 +82,7 @@
 #define RLB_PROMISC_TIMEOUT	10*ALB_TIMER_TICKS_PER_SEC
 
 static const u8 mac_bcast[ETH_ALEN] = {0xff,0xff,0xff,0xff,0xff,0xff};
+static const u8 mac_v6_allmcast[ETH_ALEN] = {0x33,0x33,0x00,0x00,0x00,0x01};
 static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
 
 #pragma pack(1)
@@ -710,7 +712,7 @@
 	struct arp_pkt *arp = arp_pkt(skb);
 	struct slave *tx_slave = NULL;
 
-	if (arp->op_code == __constant_htons(ARPOP_REPLY)) {
+	if (arp->op_code == htons(ARPOP_REPLY)) {
 		/* the arp must be sent on the selected
 		* rx channel
 		*/
@@ -719,7 +721,7 @@
 			memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN);
 		}
 		dprintk("Server sent ARP Reply packet\n");
-	} else if (arp->op_code == __constant_htons(ARPOP_REQUEST)) {
+	} else if (arp->op_code == htons(ARPOP_REQUEST)) {
 		/* Create an entry in the rx_hashtbl for this client as a
 		 * place holder.
 		 * When the arp reply is received the entry will be updated
@@ -1290,6 +1292,7 @@
 	u32 hash_index = 0;
 	const u8 *hash_start = NULL;
 	int res = 1;
+	struct ipv6hdr *ip6hdr;
 
 	skb_reset_mac_header(skb);
 	eth_data = eth_hdr(skb);
@@ -1319,11 +1322,32 @@
 	}
 		break;
 	case ETH_P_IPV6:
+		/* IPv6 doesn't really use broadcast mac address, but leave
+		 * that here just in case.
+		 */
 		if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) {
 			do_tx_balance = 0;
 			break;
 		}
 
+		/* IPv6 uses all-nodes multicast as an equivalent to
+		 * broadcasts in IPv4.
+		 */
+		if (memcmp(eth_data->h_dest, mac_v6_allmcast, ETH_ALEN) == 0) {
+			do_tx_balance = 0;
+			break;
+		}
+
+		/* Additianally, DAD probes should not be tx-balanced as that
+		 * will lead to false positives for duplicate addresses and
+		 * prevent address configuration from working.
+		 */
+		ip6hdr = ipv6_hdr(skb);
+		if (ipv6_addr_any(&ip6hdr->saddr)) {
+			do_tx_balance = 0;
+			break;
+		}
+
 		hash_start = (char *)&(ipv6_hdr(skb)->daddr);
 		hash_size = sizeof(ipv6_hdr(skb)->daddr);
 		break;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c792138..8e2be24 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3702,7 +3702,7 @@
 	struct ethhdr *data = (struct ethhdr *)skb->data;
 	struct iphdr *iph = ip_hdr(skb);
 
-	if (skb->protocol == __constant_htons(ETH_P_IP)) {
+	if (skb->protocol == htons(ETH_P_IP)) {
 		return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
 			(data->h_dest[5] ^ bond_dev->dev_addr[5])) % count;
 	}
@@ -3723,8 +3723,8 @@
 	__be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
 	int layer4_xor = 0;
 
-	if (skb->protocol == __constant_htons(ETH_P_IP)) {
-		if (!(iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) &&
+	if (skb->protocol == htons(ETH_P_IP)) {
+		if (!(iph->frag_off & htons(IP_MF|IP_OFFSET)) &&
 		    (iph->protocol == IPPROTO_TCP ||
 		     iph->protocol == IPPROTO_UDP)) {
 			layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1)));
@@ -4493,6 +4493,12 @@
 
 static const struct ethtool_ops bond_ethtool_ops = {
 	.get_drvinfo		= bond_ethtool_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_tx_csum		= ethtool_op_get_tx_csum,
+	.get_sg			= ethtool_op_get_sg,
+	.get_tso		= ethtool_op_get_tso,
+	.get_ufo		= ethtool_op_get_ufo,
+	.get_flags		= ethtool_op_get_flags,
 };
 
 /*
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index fb730ec..ffb668d 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -32,7 +32,7 @@
 #ifdef BONDING_DEBUG
 #define dprintk(fmt, args...) \
 	printk(KERN_DEBUG     \
-	       DRV_NAME ": %s() %d: " fmt, __FUNCTION__, __LINE__ , ## args )
+	       DRV_NAME ": %s() %d: " fmt, __func__, __LINE__ , ## args )
 #else
 #define dprintk(fmt, args...)
 #endif /* BONDING_DEBUG */
@@ -333,5 +333,13 @@
 void bond_register_arp(struct bonding *);
 void bond_unregister_arp(struct bonding *);
 
+/* exported from bond_main.c */
+extern struct list_head bond_dev_list;
+extern struct bond_parm_tbl bond_lacp_tbl[];
+extern struct bond_parm_tbl bond_mode_tbl[];
+extern struct bond_parm_tbl xmit_hashtype_tbl[];
+extern struct bond_parm_tbl arp_validate_tbl[];
+extern struct bond_parm_tbl fail_over_mac_tbl[];
+
 #endif /* _LINUX_BONDING_H */
 
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f1936d5..86909cf 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -74,6 +74,7 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/vmalloc.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
 #include <linux/mm.h>
@@ -91,6 +92,7 @@
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/mutex.h>
+#include <linux/firmware.h>
 
 #include <net/checksum.h>
 
@@ -197,6 +199,7 @@
 MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
 MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
 MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("sun/cassini.bin");
 module_param(cassini_debug, int, 0);
 MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
 module_param(link_mode, int, 0);
@@ -812,9 +815,44 @@
 	return (limit <= 0);
 }
 
+static int cas_saturn_firmware_init(struct cas *cp)
+{
+	const struct firmware *fw;
+	const char fw_name[] = "sun/cassini.bin";
+	int err;
+
+	if (PHY_NS_DP83065 != cp->phy_id)
+		return 0;
+
+	err = request_firmware(&fw, fw_name, &cp->pdev->dev);
+	if (err) {
+		printk(KERN_ERR "cassini: Failed to load firmware \"%s\"\n",
+		       fw_name);
+		return err;
+	}
+	if (fw->size < 2) {
+		printk(KERN_ERR "cassini: bogus length %zu in \"%s\"\n",
+		       fw->size, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+	cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
+	cp->fw_size = fw->size - 2;
+	cp->fw_data = vmalloc(cp->fw_size);
+	if (!cp->fw_data) {
+		err = -ENOMEM;
+		printk(KERN_ERR "cassini: \"%s\" Failed %d\n", fw_name, err);
+		goto out;
+	}
+	memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
+out:
+	release_firmware(fw);
+	return err;
+}
+
 static void cas_saturn_firmware_load(struct cas *cp)
 {
-	cas_saturn_patch_t *patch = cas_saturn_patch;
+	int i;
 
 	cas_phy_powerdown(cp);
 
@@ -833,11 +871,9 @@
 
 	/* download new firmware */
 	cas_phy_write(cp, DP83065_MII_MEM, 0x1);
-	cas_phy_write(cp, DP83065_MII_REGE, patch->addr);
-	while (patch->addr) {
-		cas_phy_write(cp, DP83065_MII_REGD, patch->val);
-		patch++;
-	}
+	cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
+	for (i = 0; i < cp->fw_size; i++)
+		cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
 
 	/* enable firmware */
 	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
@@ -2182,7 +2218,7 @@
 	 * do any additional locking here. stick the buffer
 	 * at the end.
 	 */
-	__skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow);
+	__skb_queue_tail(flow, skb);
 	if (words[0] & RX_COMP1_RELEASE_FLOW) {
 		while ((skb = __skb_dequeue(flow))) {
 			cas_skb_release(skb);
@@ -5108,6 +5144,9 @@
 	cas_reset(cp, 0);
 	if (cas_check_invariants(cp))
 		goto err_out_iounmap;
+	if (cp->cas_flags & CAS_FLAG_SATURN)
+		if (cas_saturn_firmware_init(cp))
+			goto err_out_iounmap;
 
 	cp->init_block = (struct cas_init_block *)
 		pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
@@ -5217,6 +5256,9 @@
 	cp = netdev_priv(dev);
 	unregister_netdev(dev);
 
+	if (cp->fw_data)
+		vfree(cp->fw_data);
+
 	mutex_lock(&cp->pm_mutex);
 	flush_scheduled_work();
 	if (cp->hw_running)
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
index 552af89..fd17a00 100644
--- a/drivers/net/cassini.h
+++ b/drivers/net/cassini.h
@@ -2514,1523 +2514,6 @@
 #define CAS_HP_FIRMWARE               cas_prog_null
 #endif
 
-/* firmware patch for NS_DP83065 */
-typedef struct cas_saturn_patch {
-	u16 addr;
-	u16 val;
-} cas_saturn_patch_t;
-
-#if 1
-cas_saturn_patch_t cas_saturn_patch[] = {
-{0x8200,    0x007e}, {0x8201,    0x0082}, {0x8202,    0x0009},
-{0x8203,    0x0000}, {0x8204,    0x0000}, {0x8205,    0x0000},
-{0x8206,    0x0000}, {0x8207,    0x0000}, {0x8208,    0x0000},
-{0x8209,    0x008e}, {0x820a,    0x008e}, {0x820b,    0x00ff},
-{0x820c,    0x00ce}, {0x820d,    0x0082}, {0x820e,    0x0025},
-{0x820f,    0x00ff}, {0x8210,    0x0001}, {0x8211,    0x000f},
-{0x8212,    0x00ce}, {0x8213,    0x0084}, {0x8214,    0x0026},
-{0x8215,    0x00ff}, {0x8216,    0x0001}, {0x8217,    0x0011},
-{0x8218,    0x00ce}, {0x8219,    0x0085}, {0x821a,    0x003d},
-{0x821b,    0x00df}, {0x821c,    0x00e5}, {0x821d,    0x0086},
-{0x821e,    0x0039}, {0x821f,    0x00b7}, {0x8220,    0x008f},
-{0x8221,    0x00f8}, {0x8222,    0x007e}, {0x8223,    0x00c3},
-{0x8224,    0x00c2}, {0x8225,    0x0096}, {0x8226,    0x0047},
-{0x8227,    0x0084}, {0x8228,    0x00f3}, {0x8229,    0x008a},
-{0x822a,    0x0000}, {0x822b,    0x0097}, {0x822c,    0x0047},
-{0x822d,    0x00ce}, {0x822e,    0x0082}, {0x822f,    0x0033},
-{0x8230,    0x00ff}, {0x8231,    0x0001}, {0x8232,    0x000f},
-{0x8233,    0x0096}, {0x8234,    0x0046}, {0x8235,    0x0084},
-{0x8236,    0x000c}, {0x8237,    0x0081}, {0x8238,    0x0004},
-{0x8239,    0x0027}, {0x823a,    0x000b}, {0x823b,    0x0096},
-{0x823c,    0x0046}, {0x823d,    0x0084}, {0x823e,    0x000c},
-{0x823f,    0x0081}, {0x8240,    0x0008}, {0x8241,    0x0027},
-{0x8242,    0x0057}, {0x8243,    0x007e}, {0x8244,    0x0084},
-{0x8245,    0x0025}, {0x8246,    0x0096}, {0x8247,    0x0047},
-{0x8248,    0x0084}, {0x8249,    0x00f3}, {0x824a,    0x008a},
-{0x824b,    0x0004}, {0x824c,    0x0097}, {0x824d,    0x0047},
-{0x824e,    0x00ce}, {0x824f,    0x0082}, {0x8250,    0x0054},
-{0x8251,    0x00ff}, {0x8252,    0x0001}, {0x8253,    0x000f},
-{0x8254,    0x0096}, {0x8255,    0x0046}, {0x8256,    0x0084},
-{0x8257,    0x000c}, {0x8258,    0x0081}, {0x8259,    0x0004},
-{0x825a,    0x0026}, {0x825b,    0x0038}, {0x825c,    0x00b6},
-{0x825d,    0x0012}, {0x825e,    0x0020}, {0x825f,    0x0084},
-{0x8260,    0x0020}, {0x8261,    0x0026}, {0x8262,    0x0003},
-{0x8263,    0x007e}, {0x8264,    0x0084}, {0x8265,    0x0025},
-{0x8266,    0x0096}, {0x8267,    0x007b}, {0x8268,    0x00d6},
-{0x8269,    0x007c}, {0x826a,    0x00fe}, {0x826b,    0x008f},
-{0x826c,    0x0056}, {0x826d,    0x00bd}, {0x826e,    0x00f7},
-{0x826f,    0x00b6}, {0x8270,    0x00fe}, {0x8271,    0x008f},
-{0x8272,    0x004e}, {0x8273,    0x00bd}, {0x8274,    0x00ec},
-{0x8275,    0x008e}, {0x8276,    0x00bd}, {0x8277,    0x00fa},
-{0x8278,    0x00f7}, {0x8279,    0x00bd}, {0x827a,    0x00f7},
-{0x827b,    0x0028}, {0x827c,    0x00ce}, {0x827d,    0x0082},
-{0x827e,    0x0082}, {0x827f,    0x00ff}, {0x8280,    0x0001},
-{0x8281,    0x000f}, {0x8282,    0x0096}, {0x8283,    0x0046},
-{0x8284,    0x0084}, {0x8285,    0x000c}, {0x8286,    0x0081},
-{0x8287,    0x0004}, {0x8288,    0x0026}, {0x8289,    0x000a},
-{0x828a,    0x00b6}, {0x828b,    0x0012}, {0x828c,    0x0020},
-{0x828d,    0x0084}, {0x828e,    0x0020}, {0x828f,    0x0027},
-{0x8290,    0x00b5}, {0x8291,    0x007e}, {0x8292,    0x0084},
-{0x8293,    0x0025}, {0x8294,    0x00bd}, {0x8295,    0x00f7},
-{0x8296,    0x001f}, {0x8297,    0x007e}, {0x8298,    0x0084},
-{0x8299,    0x001f}, {0x829a,    0x0096}, {0x829b,    0x0047},
-{0x829c,    0x0084}, {0x829d,    0x00f3}, {0x829e,    0x008a},
-{0x829f,    0x0008}, {0x82a0,    0x0097}, {0x82a1,    0x0047},
-{0x82a2,    0x00de}, {0x82a3,    0x00e1}, {0x82a4,    0x00ad},
-{0x82a5,    0x0000}, {0x82a6,    0x00ce}, {0x82a7,    0x0082},
-{0x82a8,    0x00af}, {0x82a9,    0x00ff}, {0x82aa,    0x0001},
-{0x82ab,    0x000f}, {0x82ac,    0x007e}, {0x82ad,    0x0084},
-{0x82ae,    0x0025}, {0x82af,    0x0096}, {0x82b0,    0x0041},
-{0x82b1,    0x0085}, {0x82b2,    0x0010}, {0x82b3,    0x0026},
-{0x82b4,    0x0006}, {0x82b5,    0x0096}, {0x82b6,    0x0023},
-{0x82b7,    0x0085}, {0x82b8,    0x0040}, {0x82b9,    0x0027},
-{0x82ba,    0x0006}, {0x82bb,    0x00bd}, {0x82bc,    0x00ed},
-{0x82bd,    0x0000}, {0x82be,    0x007e}, {0x82bf,    0x0083},
-{0x82c0,    0x00a2}, {0x82c1,    0x00de}, {0x82c2,    0x0042},
-{0x82c3,    0x00bd}, {0x82c4,    0x00eb}, {0x82c5,    0x008e},
-{0x82c6,    0x0096}, {0x82c7,    0x0024}, {0x82c8,    0x0084},
-{0x82c9,    0x0008}, {0x82ca,    0x0027}, {0x82cb,    0x0003},
-{0x82cc,    0x007e}, {0x82cd,    0x0083}, {0x82ce,    0x00df},
-{0x82cf,    0x0096}, {0x82d0,    0x007b}, {0x82d1,    0x00d6},
-{0x82d2,    0x007c}, {0x82d3,    0x00fe}, {0x82d4,    0x008f},
-{0x82d5,    0x0056}, {0x82d6,    0x00bd}, {0x82d7,    0x00f7},
-{0x82d8,    0x00b6}, {0x82d9,    0x00fe}, {0x82da,    0x008f},
-{0x82db,    0x0050}, {0x82dc,    0x00bd}, {0x82dd,    0x00ec},
-{0x82de,    0x008e}, {0x82df,    0x00bd}, {0x82e0,    0x00fa},
-{0x82e1,    0x00f7}, {0x82e2,    0x0086}, {0x82e3,    0x0011},
-{0x82e4,    0x00c6}, {0x82e5,    0x0049}, {0x82e6,    0x00bd},
-{0x82e7,    0x00e4}, {0x82e8,    0x0012}, {0x82e9,    0x00ce},
-{0x82ea,    0x0082}, {0x82eb,    0x00ef}, {0x82ec,    0x00ff},
-{0x82ed,    0x0001}, {0x82ee,    0x000f}, {0x82ef,    0x0096},
-{0x82f0,    0x0046}, {0x82f1,    0x0084}, {0x82f2,    0x000c},
-{0x82f3,    0x0081}, {0x82f4,    0x0000}, {0x82f5,    0x0027},
-{0x82f6,    0x0017}, {0x82f7,    0x00c6}, {0x82f8,    0x0049},
-{0x82f9,    0x00bd}, {0x82fa,    0x00e4}, {0x82fb,    0x0091},
-{0x82fc,    0x0024}, {0x82fd,    0x000d}, {0x82fe,    0x00b6},
-{0x82ff,    0x0012}, {0x8300,    0x0020}, {0x8301,    0x0085},
-{0x8302,    0x0020}, {0x8303,    0x0026}, {0x8304,    0x000c},
-{0x8305,    0x00ce}, {0x8306,    0x0082}, {0x8307,    0x00c1},
-{0x8308,    0x00ff}, {0x8309,    0x0001}, {0x830a,    0x000f},
-{0x830b,    0x007e}, {0x830c,    0x0084}, {0x830d,    0x0025},
-{0x830e,    0x007e}, {0x830f,    0x0084}, {0x8310,    0x0016},
-{0x8311,    0x00fe}, {0x8312,    0x008f}, {0x8313,    0x0052},
-{0x8314,    0x00bd}, {0x8315,    0x00ec}, {0x8316,    0x008e},
-{0x8317,    0x00bd}, {0x8318,    0x00fa}, {0x8319,    0x00f7},
-{0x831a,    0x0086}, {0x831b,    0x006a}, {0x831c,    0x00c6},
-{0x831d,    0x0049}, {0x831e,    0x00bd}, {0x831f,    0x00e4},
-{0x8320,    0x0012}, {0x8321,    0x00ce}, {0x8322,    0x0083},
-{0x8323,    0x0027}, {0x8324,    0x00ff}, {0x8325,    0x0001},
-{0x8326,    0x000f}, {0x8327,    0x0096}, {0x8328,    0x0046},
-{0x8329,    0x0084}, {0x832a,    0x000c}, {0x832b,    0x0081},
-{0x832c,    0x0000}, {0x832d,    0x0027}, {0x832e,    0x000a},
-{0x832f,    0x00c6}, {0x8330,    0x0049}, {0x8331,    0x00bd},
-{0x8332,    0x00e4}, {0x8333,    0x0091}, {0x8334,    0x0025},
-{0x8335,    0x0006}, {0x8336,    0x007e}, {0x8337,    0x0084},
-{0x8338,    0x0025}, {0x8339,    0x007e}, {0x833a,    0x0084},
-{0x833b,    0x0016}, {0x833c,    0x00b6}, {0x833d,    0x0018},
-{0x833e,    0x0070}, {0x833f,    0x00bb}, {0x8340,    0x0019},
-{0x8341,    0x0070}, {0x8342,    0x002a}, {0x8343,    0x0004},
-{0x8344,    0x0081}, {0x8345,    0x00af}, {0x8346,    0x002e},
-{0x8347,    0x0019}, {0x8348,    0x0096}, {0x8349,    0x007b},
-{0x834a,    0x00f6}, {0x834b,    0x0020}, {0x834c,    0x0007},
-{0x834d,    0x00fa}, {0x834e,    0x0020}, {0x834f,    0x0027},
-{0x8350,    0x00c4}, {0x8351,    0x0038}, {0x8352,    0x0081},
-{0x8353,    0x0038}, {0x8354,    0x0027}, {0x8355,    0x000b},
-{0x8356,    0x00f6}, {0x8357,    0x0020}, {0x8358,    0x0007},
-{0x8359,    0x00fa}, {0x835a,    0x0020}, {0x835b,    0x0027},
-{0x835c,    0x00cb}, {0x835d,    0x0008}, {0x835e,    0x007e},
-{0x835f,    0x0082}, {0x8360,    0x00d3}, {0x8361,    0x00bd},
-{0x8362,    0x00f7}, {0x8363,    0x0066}, {0x8364,    0x0086},
-{0x8365,    0x0074}, {0x8366,    0x00c6}, {0x8367,    0x0049},
-{0x8368,    0x00bd}, {0x8369,    0x00e4}, {0x836a,    0x0012},
-{0x836b,    0x00ce}, {0x836c,    0x0083}, {0x836d,    0x0071},
-{0x836e,    0x00ff}, {0x836f,    0x0001}, {0x8370,    0x000f},
-{0x8371,    0x0096}, {0x8372,    0x0046}, {0x8373,    0x0084},
-{0x8374,    0x000c}, {0x8375,    0x0081}, {0x8376,    0x0008},
-{0x8377,    0x0026}, {0x8378,    0x000a}, {0x8379,    0x00c6},
-{0x837a,    0x0049}, {0x837b,    0x00bd}, {0x837c,    0x00e4},
-{0x837d,    0x0091}, {0x837e,    0x0025}, {0x837f,    0x0006},
-{0x8380,    0x007e}, {0x8381,    0x0084}, {0x8382,    0x0025},
-{0x8383,    0x007e}, {0x8384,    0x0084}, {0x8385,    0x0016},
-{0x8386,    0x00bd}, {0x8387,    0x00f7}, {0x8388,    0x003e},
-{0x8389,    0x0026}, {0x838a,    0x000e}, {0x838b,    0x00bd},
-{0x838c,    0x00e5}, {0x838d,    0x0009}, {0x838e,    0x0026},
-{0x838f,    0x0006}, {0x8390,    0x00ce}, {0x8391,    0x0082},
-{0x8392,    0x00c1}, {0x8393,    0x00ff}, {0x8394,    0x0001},
-{0x8395,    0x000f}, {0x8396,    0x007e}, {0x8397,    0x0084},
-{0x8398,    0x0025}, {0x8399,    0x00fe}, {0x839a,    0x008f},
-{0x839b,    0x0054}, {0x839c,    0x00bd}, {0x839d,    0x00ec},
-{0x839e,    0x008e}, {0x839f,    0x00bd}, {0x83a0,    0x00fa},
-{0x83a1,    0x00f7}, {0x83a2,    0x00bd}, {0x83a3,    0x00f7},
-{0x83a4,    0x0033}, {0x83a5,    0x0086}, {0x83a6,    0x000f},
-{0x83a7,    0x00c6}, {0x83a8,    0x0051}, {0x83a9,    0x00bd},
-{0x83aa,    0x00e4}, {0x83ab,    0x0012}, {0x83ac,    0x00ce},
-{0x83ad,    0x0083}, {0x83ae,    0x00b2}, {0x83af,    0x00ff},
-{0x83b0,    0x0001}, {0x83b1,    0x000f}, {0x83b2,    0x0096},
-{0x83b3,    0x0046}, {0x83b4,    0x0084}, {0x83b5,    0x000c},
-{0x83b6,    0x0081}, {0x83b7,    0x0008}, {0x83b8,    0x0026},
-{0x83b9,    0x005c}, {0x83ba,    0x00b6}, {0x83bb,    0x0012},
-{0x83bc,    0x0020}, {0x83bd,    0x0084}, {0x83be,    0x003f},
-{0x83bf,    0x0081}, {0x83c0,    0x003a}, {0x83c1,    0x0027},
-{0x83c2,    0x001c}, {0x83c3,    0x0096}, {0x83c4,    0x0023},
-{0x83c5,    0x0085}, {0x83c6,    0x0040}, {0x83c7,    0x0027},
-{0x83c8,    0x0003}, {0x83c9,    0x007e}, {0x83ca,    0x0084},
-{0x83cb,    0x0025}, {0x83cc,    0x00c6}, {0x83cd,    0x0051},
-{0x83ce,    0x00bd}, {0x83cf,    0x00e4}, {0x83d0,    0x0091},
-{0x83d1,    0x0025}, {0x83d2,    0x0003}, {0x83d3,    0x007e},
-{0x83d4,    0x0084}, {0x83d5,    0x0025}, {0x83d6,    0x00ce},
-{0x83d7,    0x0082}, {0x83d8,    0x00c1}, {0x83d9,    0x00ff},
-{0x83da,    0x0001}, {0x83db,    0x000f}, {0x83dc,    0x007e},
-{0x83dd,    0x0084}, {0x83de,    0x0025}, {0x83df,    0x00bd},
-{0x83e0,    0x00f8}, {0x83e1,    0x0037}, {0x83e2,    0x007c},
-{0x83e3,    0x0000}, {0x83e4,    0x007a}, {0x83e5,    0x00ce},
-{0x83e6,    0x0083}, {0x83e7,    0x00ee}, {0x83e8,    0x00ff},
-{0x83e9,    0x0001}, {0x83ea,    0x000f}, {0x83eb,    0x007e},
-{0x83ec,    0x0084}, {0x83ed,    0x0025}, {0x83ee,    0x0096},
-{0x83ef,    0x0046}, {0x83f0,    0x0084}, {0x83f1,    0x000c},
-{0x83f2,    0x0081}, {0x83f3,    0x0008}, {0x83f4,    0x0026},
-{0x83f5,    0x0020}, {0x83f6,    0x0096}, {0x83f7,    0x0024},
-{0x83f8,    0x0084}, {0x83f9,    0x0008}, {0x83fa,    0x0026},
-{0x83fb,    0x0029}, {0x83fc,    0x00b6}, {0x83fd,    0x0018},
-{0x83fe,    0x0082}, {0x83ff,    0x00bb}, {0x8400,    0x0019},
-{0x8401,    0x0082}, {0x8402,    0x00b1}, {0x8403,    0x0001},
-{0x8404,    0x003b}, {0x8405,    0x0022}, {0x8406,    0x0009},
-{0x8407,    0x00b6}, {0x8408,    0x0012}, {0x8409,    0x0020},
-{0x840a,    0x0084}, {0x840b,    0x0037}, {0x840c,    0x0081},
-{0x840d,    0x0032}, {0x840e,    0x0027}, {0x840f,    0x0015},
-{0x8410,    0x00bd}, {0x8411,    0x00f8}, {0x8412,    0x0044},
-{0x8413,    0x007e}, {0x8414,    0x0082}, {0x8415,    0x00c1},
-{0x8416,    0x00bd}, {0x8417,    0x00f7}, {0x8418,    0x001f},
-{0x8419,    0x00bd}, {0x841a,    0x00f8}, {0x841b,    0x0044},
-{0x841c,    0x00bd}, {0x841d,    0x00fc}, {0x841e,    0x0029},
-{0x841f,    0x00ce}, {0x8420,    0x0082}, {0x8421,    0x0025},
-{0x8422,    0x00ff}, {0x8423,    0x0001}, {0x8424,    0x000f},
-{0x8425,    0x0039}, {0x8426,    0x0096}, {0x8427,    0x0047},
-{0x8428,    0x0084}, {0x8429,    0x00fc}, {0x842a,    0x008a},
-{0x842b,    0x0000}, {0x842c,    0x0097}, {0x842d,    0x0047},
-{0x842e,    0x00ce}, {0x842f,    0x0084}, {0x8430,    0x0034},
-{0x8431,    0x00ff}, {0x8432,    0x0001}, {0x8433,    0x0011},
-{0x8434,    0x0096}, {0x8435,    0x0046}, {0x8436,    0x0084},
-{0x8437,    0x0003}, {0x8438,    0x0081}, {0x8439,    0x0002},
-{0x843a,    0x0027}, {0x843b,    0x0003}, {0x843c,    0x007e},
-{0x843d,    0x0085}, {0x843e,    0x001e}, {0x843f,    0x0096},
-{0x8440,    0x0047}, {0x8441,    0x0084}, {0x8442,    0x00fc},
-{0x8443,    0x008a}, {0x8444,    0x0002}, {0x8445,    0x0097},
-{0x8446,    0x0047}, {0x8447,    0x00de}, {0x8448,    0x00e1},
-{0x8449,    0x00ad}, {0x844a,    0x0000}, {0x844b,    0x0086},
-{0x844c,    0x0001}, {0x844d,    0x00b7}, {0x844e,    0x0012},
-{0x844f,    0x0051}, {0x8450,    0x00bd}, {0x8451,    0x00f7},
-{0x8452,    0x0014}, {0x8453,    0x00b6}, {0x8454,    0x0010},
-{0x8455,    0x0031}, {0x8456,    0x0084}, {0x8457,    0x00fd},
-{0x8458,    0x00b7}, {0x8459,    0x0010}, {0x845a,    0x0031},
-{0x845b,    0x00bd}, {0x845c,    0x00f8}, {0x845d,    0x001e},
-{0x845e,    0x0096}, {0x845f,    0x0081}, {0x8460,    0x00d6},
-{0x8461,    0x0082}, {0x8462,    0x00fe}, {0x8463,    0x008f},
-{0x8464,    0x005a}, {0x8465,    0x00bd}, {0x8466,    0x00f7},
-{0x8467,    0x00b6}, {0x8468,    0x00fe}, {0x8469,    0x008f},
-{0x846a,    0x005c}, {0x846b,    0x00bd}, {0x846c,    0x00ec},
-{0x846d,    0x008e}, {0x846e,    0x00bd}, {0x846f,    0x00fa},
-{0x8470,    0x00f7}, {0x8471,    0x0086}, {0x8472,    0x0008},
-{0x8473,    0x00d6}, {0x8474,    0x0000}, {0x8475,    0x00c5},
-{0x8476,    0x0010}, {0x8477,    0x0026}, {0x8478,    0x0002},
-{0x8479,    0x008b}, {0x847a,    0x0020}, {0x847b,    0x00c6},
-{0x847c,    0x0051}, {0x847d,    0x00bd}, {0x847e,    0x00e4},
-{0x847f,    0x0012}, {0x8480,    0x00ce}, {0x8481,    0x0084},
-{0x8482,    0x0086}, {0x8483,    0x00ff}, {0x8484,    0x0001},
-{0x8485,    0x0011}, {0x8486,    0x0096}, {0x8487,    0x0046},
-{0x8488,    0x0084}, {0x8489,    0x0003}, {0x848a,    0x0081},
-{0x848b,    0x0002}, {0x848c,    0x0027}, {0x848d,    0x0003},
-{0x848e,    0x007e}, {0x848f,    0x0085}, {0x8490,    0x000f},
-{0x8491,    0x00c6}, {0x8492,    0x0051}, {0x8493,    0x00bd},
-{0x8494,    0x00e4}, {0x8495,    0x0091}, {0x8496,    0x0025},
-{0x8497,    0x0003}, {0x8498,    0x007e}, {0x8499,    0x0085},
-{0x849a,    0x001e}, {0x849b,    0x0096}, {0x849c,    0x0044},
-{0x849d,    0x0085}, {0x849e,    0x0010}, {0x849f,    0x0026},
-{0x84a0,    0x000a}, {0x84a1,    0x00b6}, {0x84a2,    0x0012},
-{0x84a3,    0x0050}, {0x84a4,    0x00ba}, {0x84a5,    0x0001},
-{0x84a6,    0x003c}, {0x84a7,    0x0085}, {0x84a8,    0x0010},
-{0x84a9,    0x0027}, {0x84aa,    0x00a8}, {0x84ab,    0x00bd},
-{0x84ac,    0x00f7}, {0x84ad,    0x0066}, {0x84ae,    0x00ce},
-{0x84af,    0x0084}, {0x84b0,    0x00b7}, {0x84b1,    0x00ff},
-{0x84b2,    0x0001}, {0x84b3,    0x0011}, {0x84b4,    0x007e},
-{0x84b5,    0x0085}, {0x84b6,    0x001e}, {0x84b7,    0x0096},
-{0x84b8,    0x0046}, {0x84b9,    0x0084}, {0x84ba,    0x0003},
-{0x84bb,    0x0081}, {0x84bc,    0x0002}, {0x84bd,    0x0026},
-{0x84be,    0x0050}, {0x84bf,    0x00b6}, {0x84c0,    0x0012},
-{0x84c1,    0x0030}, {0x84c2,    0x0084}, {0x84c3,    0x0003},
-{0x84c4,    0x0081}, {0x84c5,    0x0001}, {0x84c6,    0x0027},
-{0x84c7,    0x0003}, {0x84c8,    0x007e}, {0x84c9,    0x0085},
-{0x84ca,    0x001e}, {0x84cb,    0x0096}, {0x84cc,    0x0044},
-{0x84cd,    0x0085}, {0x84ce,    0x0010}, {0x84cf,    0x0026},
-{0x84d0,    0x0013}, {0x84d1,    0x00b6}, {0x84d2,    0x0012},
-{0x84d3,    0x0050}, {0x84d4,    0x00ba}, {0x84d5,    0x0001},
-{0x84d6,    0x003c}, {0x84d7,    0x0085}, {0x84d8,    0x0010},
-{0x84d9,    0x0026}, {0x84da,    0x0009}, {0x84db,    0x00ce},
-{0x84dc,    0x0084}, {0x84dd,    0x0053}, {0x84de,    0x00ff},
-{0x84df,    0x0001}, {0x84e0,    0x0011}, {0x84e1,    0x007e},
-{0x84e2,    0x0085}, {0x84e3,    0x001e}, {0x84e4,    0x00b6},
-{0x84e5,    0x0010}, {0x84e6,    0x0031}, {0x84e7,    0x008a},
-{0x84e8,    0x0002}, {0x84e9,    0x00b7}, {0x84ea,    0x0010},
-{0x84eb,    0x0031}, {0x84ec,    0x00bd}, {0x84ed,    0x0085},
-{0x84ee,    0x001f}, {0x84ef,    0x00bd}, {0x84f0,    0x00f8},
-{0x84f1,    0x0037}, {0x84f2,    0x007c}, {0x84f3,    0x0000},
-{0x84f4,    0x0080}, {0x84f5,    0x00ce}, {0x84f6,    0x0084},
-{0x84f7,    0x00fe}, {0x84f8,    0x00ff}, {0x84f9,    0x0001},
-{0x84fa,    0x0011}, {0x84fb,    0x007e}, {0x84fc,    0x0085},
-{0x84fd,    0x001e}, {0x84fe,    0x0096}, {0x84ff,    0x0046},
-{0x8500,    0x0084}, {0x8501,    0x0003}, {0x8502,    0x0081},
-{0x8503,    0x0002}, {0x8504,    0x0026}, {0x8505,    0x0009},
-{0x8506,    0x00b6}, {0x8507,    0x0012}, {0x8508,    0x0030},
-{0x8509,    0x0084}, {0x850a,    0x0003}, {0x850b,    0x0081},
-{0x850c,    0x0001}, {0x850d,    0x0027}, {0x850e,    0x000f},
-{0x850f,    0x00bd}, {0x8510,    0x00f8}, {0x8511,    0x0044},
-{0x8512,    0x00bd}, {0x8513,    0x00f7}, {0x8514,    0x000b},
-{0x8515,    0x00bd}, {0x8516,    0x00fc}, {0x8517,    0x0029},
-{0x8518,    0x00ce}, {0x8519,    0x0084}, {0x851a,    0x0026},
-{0x851b,    0x00ff}, {0x851c,    0x0001}, {0x851d,    0x0011},
-{0x851e,    0x0039}, {0x851f,    0x00d6}, {0x8520,    0x0022},
-{0x8521,    0x00c4}, {0x8522,    0x000f}, {0x8523,    0x00b6},
-{0x8524,    0x0012}, {0x8525,    0x0030}, {0x8526,    0x00ba},
-{0x8527,    0x0012}, {0x8528,    0x0032}, {0x8529,    0x0084},
-{0x852a,    0x0004}, {0x852b,    0x0027}, {0x852c,    0x000d},
-{0x852d,    0x0096}, {0x852e,    0x0022}, {0x852f,    0x0085},
-{0x8530,    0x0004}, {0x8531,    0x0027}, {0x8532,    0x0005},
-{0x8533,    0x00ca}, {0x8534,    0x0010}, {0x8535,    0x007e},
-{0x8536,    0x0085}, {0x8537,    0x003a}, {0x8538,    0x00ca},
-{0x8539,    0x0020}, {0x853a,    0x00d7}, {0x853b,    0x0022},
-{0x853c,    0x0039}, {0x853d,    0x0086}, {0x853e,    0x0000},
-{0x853f,    0x0097}, {0x8540,    0x0083}, {0x8541,    0x0018},
-{0x8542,    0x00ce}, {0x8543,    0x001c}, {0x8544,    0x0000},
-{0x8545,    0x00bd}, {0x8546,    0x00eb}, {0x8547,    0x0046},
-{0x8548,    0x0096}, {0x8549,    0x0057}, {0x854a,    0x0085},
-{0x854b,    0x0001}, {0x854c,    0x0027}, {0x854d,    0x0002},
-{0x854e,    0x004f}, {0x854f,    0x0039}, {0x8550,    0x0085},
-{0x8551,    0x0002}, {0x8552,    0x0027}, {0x8553,    0x0001},
-{0x8554,    0x0039}, {0x8555,    0x007f}, {0x8556,    0x008f},
-{0x8557,    0x007d}, {0x8558,    0x0086}, {0x8559,    0x0004},
-{0x855a,    0x00b7}, {0x855b,    0x0012}, {0x855c,    0x0004},
-{0x855d,    0x0086}, {0x855e,    0x0008}, {0x855f,    0x00b7},
-{0x8560,    0x0012}, {0x8561,    0x0007}, {0x8562,    0x0086},
-{0x8563,    0x0010}, {0x8564,    0x00b7}, {0x8565,    0x0012},
-{0x8566,    0x000c}, {0x8567,    0x0086}, {0x8568,    0x0007},
-{0x8569,    0x00b7}, {0x856a,    0x0012}, {0x856b,    0x0006},
-{0x856c,    0x00b6}, {0x856d,    0x008f}, {0x856e,    0x007d},
-{0x856f,    0x00b7}, {0x8570,    0x0012}, {0x8571,    0x0070},
-{0x8572,    0x0086}, {0x8573,    0x0001}, {0x8574,    0x00ba},
-{0x8575,    0x0012}, {0x8576,    0x0004}, {0x8577,    0x00b7},
-{0x8578,    0x0012}, {0x8579,    0x0004}, {0x857a,    0x0001},
-{0x857b,    0x0001}, {0x857c,    0x0001}, {0x857d,    0x0001},
-{0x857e,    0x0001}, {0x857f,    0x0001}, {0x8580,    0x00b6},
-{0x8581,    0x0012}, {0x8582,    0x0004}, {0x8583,    0x0084},
-{0x8584,    0x00fe}, {0x8585,    0x008a}, {0x8586,    0x0002},
-{0x8587,    0x00b7}, {0x8588,    0x0012}, {0x8589,    0x0004},
-{0x858a,    0x0001}, {0x858b,    0x0001}, {0x858c,    0x0001},
-{0x858d,    0x0001}, {0x858e,    0x0001}, {0x858f,    0x0001},
-{0x8590,    0x0086}, {0x8591,    0x00fd}, {0x8592,    0x00b4},
-{0x8593,    0x0012}, {0x8594,    0x0004}, {0x8595,    0x00b7},
-{0x8596,    0x0012}, {0x8597,    0x0004}, {0x8598,    0x00b6},
-{0x8599,    0x0012}, {0x859a,    0x0000}, {0x859b,    0x0084},
-{0x859c,    0x0008}, {0x859d,    0x0081}, {0x859e,    0x0008},
-{0x859f,    0x0027}, {0x85a0,    0x0016}, {0x85a1,    0x00b6},
-{0x85a2,    0x008f}, {0x85a3,    0x007d}, {0x85a4,    0x0081},
-{0x85a5,    0x000c}, {0x85a6,    0x0027}, {0x85a7,    0x0008},
-{0x85a8,    0x008b}, {0x85a9,    0x0004}, {0x85aa,    0x00b7},
-{0x85ab,    0x008f}, {0x85ac,    0x007d}, {0x85ad,    0x007e},
-{0x85ae,    0x0085}, {0x85af,    0x006c}, {0x85b0,    0x0086},
-{0x85b1,    0x0003}, {0x85b2,    0x0097}, {0x85b3,    0x0040},
-{0x85b4,    0x007e}, {0x85b5,    0x0089}, {0x85b6,    0x006e},
-{0x85b7,    0x0086}, {0x85b8,    0x0007}, {0x85b9,    0x00b7},
-{0x85ba,    0x0012}, {0x85bb,    0x0006}, {0x85bc,    0x005f},
-{0x85bd,    0x00f7}, {0x85be,    0x008f}, {0x85bf,    0x0082},
-{0x85c0,    0x005f}, {0x85c1,    0x00f7}, {0x85c2,    0x008f},
-{0x85c3,    0x007f}, {0x85c4,    0x00f7}, {0x85c5,    0x008f},
-{0x85c6,    0x0070}, {0x85c7,    0x00f7}, {0x85c8,    0x008f},
-{0x85c9,    0x0071}, {0x85ca,    0x00f7}, {0x85cb,    0x008f},
-{0x85cc,    0x0072}, {0x85cd,    0x00f7}, {0x85ce,    0x008f},
-{0x85cf,    0x0073}, {0x85d0,    0x00f7}, {0x85d1,    0x008f},
-{0x85d2,    0x0074}, {0x85d3,    0x00f7}, {0x85d4,    0x008f},
-{0x85d5,    0x0075}, {0x85d6,    0x00f7}, {0x85d7,    0x008f},
-{0x85d8,    0x0076}, {0x85d9,    0x00f7}, {0x85da,    0x008f},
-{0x85db,    0x0077}, {0x85dc,    0x00f7}, {0x85dd,    0x008f},
-{0x85de,    0x0078}, {0x85df,    0x00f7}, {0x85e0,    0x008f},
-{0x85e1,    0x0079}, {0x85e2,    0x00f7}, {0x85e3,    0x008f},
-{0x85e4,    0x007a}, {0x85e5,    0x00f7}, {0x85e6,    0x008f},
-{0x85e7,    0x007b}, {0x85e8,    0x00b6}, {0x85e9,    0x0012},
-{0x85ea,    0x0004}, {0x85eb,    0x008a}, {0x85ec,    0x0010},
-{0x85ed,    0x00b7}, {0x85ee,    0x0012}, {0x85ef,    0x0004},
-{0x85f0,    0x0086}, {0x85f1,    0x00e4}, {0x85f2,    0x00b7},
-{0x85f3,    0x0012}, {0x85f4,    0x0070}, {0x85f5,    0x00b7},
-{0x85f6,    0x0012}, {0x85f7,    0x0007}, {0x85f8,    0x00f7},
-{0x85f9,    0x0012}, {0x85fa,    0x0005}, {0x85fb,    0x00f7},
-{0x85fc,    0x0012}, {0x85fd,    0x0009}, {0x85fe,    0x0086},
-{0x85ff,    0x0008}, {0x8600,    0x00ba}, {0x8601,    0x0012},
-{0x8602,    0x0004}, {0x8603,    0x00b7}, {0x8604,    0x0012},
-{0x8605,    0x0004}, {0x8606,    0x0086}, {0x8607,    0x00f7},
-{0x8608,    0x00b4}, {0x8609,    0x0012}, {0x860a,    0x0004},
-{0x860b,    0x00b7}, {0x860c,    0x0012}, {0x860d,    0x0004},
-{0x860e,    0x0001}, {0x860f,    0x0001}, {0x8610,    0x0001},
-{0x8611,    0x0001}, {0x8612,    0x0001}, {0x8613,    0x0001},
-{0x8614,    0x00b6}, {0x8615,    0x0012}, {0x8616,    0x0008},
-{0x8617,    0x0027}, {0x8618,    0x007f}, {0x8619,    0x0081},
-{0x861a,    0x0080}, {0x861b,    0x0026}, {0x861c,    0x000b},
-{0x861d,    0x0086}, {0x861e,    0x0008}, {0x861f,    0x00ce},
-{0x8620,    0x008f}, {0x8621,    0x0079}, {0x8622,    0x00bd},
-{0x8623,    0x0089}, {0x8624,    0x007b}, {0x8625,    0x007e},
-{0x8626,    0x0086}, {0x8627,    0x008e}, {0x8628,    0x0081},
-{0x8629,    0x0040}, {0x862a,    0x0026}, {0x862b,    0x000b},
-{0x862c,    0x0086}, {0x862d,    0x0004}, {0x862e,    0x00ce},
-{0x862f,    0x008f}, {0x8630,    0x0076}, {0x8631,    0x00bd},
-{0x8632,    0x0089}, {0x8633,    0x007b}, {0x8634,    0x007e},
-{0x8635,    0x0086}, {0x8636,    0x008e}, {0x8637,    0x0081},
-{0x8638,    0x0020}, {0x8639,    0x0026}, {0x863a,    0x000b},
-{0x863b,    0x0086}, {0x863c,    0x0002}, {0x863d,    0x00ce},
-{0x863e,    0x008f}, {0x863f,    0x0073}, {0x8640,    0x00bd},
-{0x8641,    0x0089}, {0x8642,    0x007b}, {0x8643,    0x007e},
-{0x8644,    0x0086}, {0x8645,    0x008e}, {0x8646,    0x0081},
-{0x8647,    0x0010}, {0x8648,    0x0026}, {0x8649,    0x000b},
-{0x864a,    0x0086}, {0x864b,    0x0001}, {0x864c,    0x00ce},
-{0x864d,    0x008f}, {0x864e,    0x0070}, {0x864f,    0x00bd},
-{0x8650,    0x0089}, {0x8651,    0x007b}, {0x8652,    0x007e},
-{0x8653,    0x0086}, {0x8654,    0x008e}, {0x8655,    0x0081},
-{0x8656,    0x0008}, {0x8657,    0x0026}, {0x8658,    0x000b},
-{0x8659,    0x0086}, {0x865a,    0x0008}, {0x865b,    0x00ce},
-{0x865c,    0x008f}, {0x865d,    0x0079}, {0x865e,    0x00bd},
-{0x865f,    0x0089}, {0x8660,    0x007f}, {0x8661,    0x007e},
-{0x8662,    0x0086}, {0x8663,    0x008e}, {0x8664,    0x0081},
-{0x8665,    0x0004}, {0x8666,    0x0026}, {0x8667,    0x000b},
-{0x8668,    0x0086}, {0x8669,    0x0004}, {0x866a,    0x00ce},
-{0x866b,    0x008f}, {0x866c,    0x0076}, {0x866d,    0x00bd},
-{0x866e,    0x0089}, {0x866f,    0x007f}, {0x8670,    0x007e},
-{0x8671,    0x0086}, {0x8672,    0x008e}, {0x8673,    0x0081},
-{0x8674,    0x0002}, {0x8675,    0x0026}, {0x8676,    0x000b},
-{0x8677,    0x008a}, {0x8678,    0x0002}, {0x8679,    0x00ce},
-{0x867a,    0x008f}, {0x867b,    0x0073}, {0x867c,    0x00bd},
-{0x867d,    0x0089}, {0x867e,    0x007f}, {0x867f,    0x007e},
-{0x8680,    0x0086}, {0x8681,    0x008e}, {0x8682,    0x0081},
-{0x8683,    0x0001}, {0x8684,    0x0026}, {0x8685,    0x0008},
-{0x8686,    0x0086}, {0x8687,    0x0001}, {0x8688,    0x00ce},
-{0x8689,    0x008f}, {0x868a,    0x0070}, {0x868b,    0x00bd},
-{0x868c,    0x0089}, {0x868d,    0x007f}, {0x868e,    0x00b6},
-{0x868f,    0x008f}, {0x8690,    0x007f}, {0x8691,    0x0081},
-{0x8692,    0x000f}, {0x8693,    0x0026}, {0x8694,    0x0003},
-{0x8695,    0x007e}, {0x8696,    0x0087}, {0x8697,    0x0047},
-{0x8698,    0x00b6}, {0x8699,    0x0012}, {0x869a,    0x0009},
-{0x869b,    0x0084}, {0x869c,    0x0003}, {0x869d,    0x0081},
-{0x869e,    0x0003}, {0x869f,    0x0027}, {0x86a0,    0x0006},
-{0x86a1,    0x007c}, {0x86a2,    0x0012}, {0x86a3,    0x0009},
-{0x86a4,    0x007e}, {0x86a5,    0x0085}, {0x86a6,    0x00fe},
-{0x86a7,    0x00b6}, {0x86a8,    0x0012}, {0x86a9,    0x0006},
-{0x86aa,    0x0084}, {0x86ab,    0x0007}, {0x86ac,    0x0081},
-{0x86ad,    0x0007}, {0x86ae,    0x0027}, {0x86af,    0x0008},
-{0x86b0,    0x008b}, {0x86b1,    0x0001}, {0x86b2,    0x00b7},
-{0x86b3,    0x0012}, {0x86b4,    0x0006}, {0x86b5,    0x007e},
-{0x86b6,    0x0086}, {0x86b7,    0x00d5}, {0x86b8,    0x00b6},
-{0x86b9,    0x008f}, {0x86ba,    0x0082}, {0x86bb,    0x0026},
-{0x86bc,    0x000a}, {0x86bd,    0x007c}, {0x86be,    0x008f},
-{0x86bf,    0x0082}, {0x86c0,    0x004f}, {0x86c1,    0x00b7},
-{0x86c2,    0x0012}, {0x86c3,    0x0006}, {0x86c4,    0x007e},
-{0x86c5,    0x0085}, {0x86c6,    0x00c0}, {0x86c7,    0x00b6},
-{0x86c8,    0x0012}, {0x86c9,    0x0006}, {0x86ca,    0x0084},
-{0x86cb,    0x003f}, {0x86cc,    0x0081}, {0x86cd,    0x003f},
-{0x86ce,    0x0027}, {0x86cf,    0x0010}, {0x86d0,    0x008b},
-{0x86d1,    0x0008}, {0x86d2,    0x00b7}, {0x86d3,    0x0012},
-{0x86d4,    0x0006}, {0x86d5,    0x00b6}, {0x86d6,    0x0012},
-{0x86d7,    0x0009}, {0x86d8,    0x0084}, {0x86d9,    0x00fc},
-{0x86da,    0x00b7}, {0x86db,    0x0012}, {0x86dc,    0x0009},
-{0x86dd,    0x007e}, {0x86de,    0x0085}, {0x86df,    0x00fe},
-{0x86e0,    0x00ce}, {0x86e1,    0x008f}, {0x86e2,    0x0070},
-{0x86e3,    0x0018}, {0x86e4,    0x00ce}, {0x86e5,    0x008f},
-{0x86e6,    0x0084}, {0x86e7,    0x00c6}, {0x86e8,    0x000c},
-{0x86e9,    0x00bd}, {0x86ea,    0x0089}, {0x86eb,    0x006f},
-{0x86ec,    0x00ce}, {0x86ed,    0x008f}, {0x86ee,    0x0084},
-{0x86ef,    0x0018}, {0x86f0,    0x00ce}, {0x86f1,    0x008f},
-{0x86f2,    0x0070}, {0x86f3,    0x00c6}, {0x86f4,    0x000c},
-{0x86f5,    0x00bd}, {0x86f6,    0x0089}, {0x86f7,    0x006f},
-{0x86f8,    0x00d6}, {0x86f9,    0x0083}, {0x86fa,    0x00c1},
-{0x86fb,    0x004f}, {0x86fc,    0x002d}, {0x86fd,    0x0003},
-{0x86fe,    0x007e}, {0x86ff,    0x0087}, {0x8700,    0x0040},
-{0x8701,    0x00b6}, {0x8702,    0x008f}, {0x8703,    0x007f},
-{0x8704,    0x0081}, {0x8705,    0x0007}, {0x8706,    0x0027},
-{0x8707,    0x000f}, {0x8708,    0x0081}, {0x8709,    0x000b},
-{0x870a,    0x0027}, {0x870b,    0x0015}, {0x870c,    0x0081},
-{0x870d,    0x000d}, {0x870e,    0x0027}, {0x870f,    0x001b},
-{0x8710,    0x0081}, {0x8711,    0x000e}, {0x8712,    0x0027},
-{0x8713,    0x0021}, {0x8714,    0x007e}, {0x8715,    0x0087},
-{0x8716,    0x0040}, {0x8717,    0x00f7}, {0x8718,    0x008f},
-{0x8719,    0x007b}, {0x871a,    0x0086}, {0x871b,    0x0002},
-{0x871c,    0x00b7}, {0x871d,    0x008f}, {0x871e,    0x007a},
-{0x871f,    0x0020}, {0x8720,    0x001c}, {0x8721,    0x00f7},
-{0x8722,    0x008f}, {0x8723,    0x0078}, {0x8724,    0x0086},
-{0x8725,    0x0002}, {0x8726,    0x00b7}, {0x8727,    0x008f},
-{0x8728,    0x0077}, {0x8729,    0x0020}, {0x872a,    0x0012},
-{0x872b,    0x00f7}, {0x872c,    0x008f}, {0x872d,    0x0075},
-{0x872e,    0x0086}, {0x872f,    0x0002}, {0x8730,    0x00b7},
-{0x8731,    0x008f}, {0x8732,    0x0074}, {0x8733,    0x0020},
-{0x8734,    0x0008}, {0x8735,    0x00f7}, {0x8736,    0x008f},
-{0x8737,    0x0072}, {0x8738,    0x0086}, {0x8739,    0x0002},
-{0x873a,    0x00b7}, {0x873b,    0x008f}, {0x873c,    0x0071},
-{0x873d,    0x007e}, {0x873e,    0x0087}, {0x873f,    0x0047},
-{0x8740,    0x0086}, {0x8741,    0x0004}, {0x8742,    0x0097},
-{0x8743,    0x0040}, {0x8744,    0x007e}, {0x8745,    0x0089},
-{0x8746,    0x006e}, {0x8747,    0x00ce}, {0x8748,    0x008f},
-{0x8749,    0x0072}, {0x874a,    0x00bd}, {0x874b,    0x0089},
-{0x874c,    0x00f7}, {0x874d,    0x00ce}, {0x874e,    0x008f},
-{0x874f,    0x0075}, {0x8750,    0x00bd}, {0x8751,    0x0089},
-{0x8752,    0x00f7}, {0x8753,    0x00ce}, {0x8754,    0x008f},
-{0x8755,    0x0078}, {0x8756,    0x00bd}, {0x8757,    0x0089},
-{0x8758,    0x00f7}, {0x8759,    0x00ce}, {0x875a,    0x008f},
-{0x875b,    0x007b}, {0x875c,    0x00bd}, {0x875d,    0x0089},
-{0x875e,    0x00f7}, {0x875f,    0x004f}, {0x8760,    0x00b7},
-{0x8761,    0x008f}, {0x8762,    0x007d}, {0x8763,    0x00b7},
-{0x8764,    0x008f}, {0x8765,    0x0081}, {0x8766,    0x00b6},
-{0x8767,    0x008f}, {0x8768,    0x0072}, {0x8769,    0x0027},
-{0x876a,    0x0047}, {0x876b,    0x007c}, {0x876c,    0x008f},
-{0x876d,    0x007d}, {0x876e,    0x00b6}, {0x876f,    0x008f},
-{0x8770,    0x0075}, {0x8771,    0x0027}, {0x8772,    0x003f},
-{0x8773,    0x007c}, {0x8774,    0x008f}, {0x8775,    0x007d},
-{0x8776,    0x00b6}, {0x8777,    0x008f}, {0x8778,    0x0078},
-{0x8779,    0x0027}, {0x877a,    0x0037}, {0x877b,    0x007c},
-{0x877c,    0x008f}, {0x877d,    0x007d}, {0x877e,    0x00b6},
-{0x877f,    0x008f}, {0x8780,    0x007b}, {0x8781,    0x0027},
-{0x8782,    0x002f}, {0x8783,    0x007f}, {0x8784,    0x008f},
-{0x8785,    0x007d}, {0x8786,    0x007c}, {0x8787,    0x008f},
-{0x8788,    0x0081}, {0x8789,    0x007a}, {0x878a,    0x008f},
-{0x878b,    0x0072}, {0x878c,    0x0027}, {0x878d,    0x001b},
-{0x878e,    0x007c}, {0x878f,    0x008f}, {0x8790,    0x007d},
-{0x8791,    0x007a}, {0x8792,    0x008f}, {0x8793,    0x0075},
-{0x8794,    0x0027}, {0x8795,    0x0016}, {0x8796,    0x007c},
-{0x8797,    0x008f}, {0x8798,    0x007d}, {0x8799,    0x007a},
-{0x879a,    0x008f}, {0x879b,    0x0078}, {0x879c,    0x0027},
-{0x879d,    0x0011}, {0x879e,    0x007c}, {0x879f,    0x008f},
-{0x87a0,    0x007d}, {0x87a1,    0x007a}, {0x87a2,    0x008f},
-{0x87a3,    0x007b}, {0x87a4,    0x0027}, {0x87a5,    0x000c},
-{0x87a6,    0x007e}, {0x87a7,    0x0087}, {0x87a8,    0x0083},
-{0x87a9,    0x007a}, {0x87aa,    0x008f}, {0x87ab,    0x0075},
-{0x87ac,    0x007a}, {0x87ad,    0x008f}, {0x87ae,    0x0078},
-{0x87af,    0x007a}, {0x87b0,    0x008f}, {0x87b1,    0x007b},
-{0x87b2,    0x00ce}, {0x87b3,    0x00c1}, {0x87b4,    0x00fc},
-{0x87b5,    0x00f6}, {0x87b6,    0x008f}, {0x87b7,    0x007d},
-{0x87b8,    0x003a}, {0x87b9,    0x00a6}, {0x87ba,    0x0000},
-{0x87bb,    0x00b7}, {0x87bc,    0x0012}, {0x87bd,    0x0070},
-{0x87be,    0x00b6}, {0x87bf,    0x008f}, {0x87c0,    0x0072},
-{0x87c1,    0x0026}, {0x87c2,    0x0003}, {0x87c3,    0x007e},
-{0x87c4,    0x0087}, {0x87c5,    0x00fa}, {0x87c6,    0x00b6},
-{0x87c7,    0x008f}, {0x87c8,    0x0075}, {0x87c9,    0x0026},
-{0x87ca,    0x000a}, {0x87cb,    0x0018}, {0x87cc,    0x00ce},
-{0x87cd,    0x008f}, {0x87ce,    0x0073}, {0x87cf,    0x00bd},
-{0x87d0,    0x0089}, {0x87d1,    0x00d5}, {0x87d2,    0x007e},
-{0x87d3,    0x0087}, {0x87d4,    0x00fa}, {0x87d5,    0x00b6},
-{0x87d6,    0x008f}, {0x87d7,    0x0078}, {0x87d8,    0x0026},
-{0x87d9,    0x000a}, {0x87da,    0x0018}, {0x87db,    0x00ce},
-{0x87dc,    0x008f}, {0x87dd,    0x0076}, {0x87de,    0x00bd},
-{0x87df,    0x0089}, {0x87e0,    0x00d5}, {0x87e1,    0x007e},
-{0x87e2,    0x0087}, {0x87e3,    0x00fa}, {0x87e4,    0x00b6},
-{0x87e5,    0x008f}, {0x87e6,    0x007b}, {0x87e7,    0x0026},
-{0x87e8,    0x000a}, {0x87e9,    0x0018}, {0x87ea,    0x00ce},
-{0x87eb,    0x008f}, {0x87ec,    0x0079}, {0x87ed,    0x00bd},
-{0x87ee,    0x0089}, {0x87ef,    0x00d5}, {0x87f0,    0x007e},
-{0x87f1,    0x0087}, {0x87f2,    0x00fa}, {0x87f3,    0x0086},
-{0x87f4,    0x0005}, {0x87f5,    0x0097}, {0x87f6,    0x0040},
-{0x87f7,    0x007e}, {0x87f8,    0x0089}, {0x87f9,    0x0000},
-{0x87fa,    0x00b6}, {0x87fb,    0x008f}, {0x87fc,    0x0075},
-{0x87fd,    0x0081}, {0x87fe,    0x0007}, {0x87ff,    0x002e},
-{0x8800,    0x00f2}, {0x8801,    0x00f6}, {0x8802,    0x0012},
-{0x8803,    0x0006}, {0x8804,    0x00c4}, {0x8805,    0x00f8},
-{0x8806,    0x001b}, {0x8807,    0x00b7}, {0x8808,    0x0012},
-{0x8809,    0x0006}, {0x880a,    0x00b6}, {0x880b,    0x008f},
-{0x880c,    0x0078}, {0x880d,    0x0081}, {0x880e,    0x0007},
-{0x880f,    0x002e}, {0x8810,    0x00e2}, {0x8811,    0x0048},
-{0x8812,    0x0048}, {0x8813,    0x0048}, {0x8814,    0x00f6},
-{0x8815,    0x0012}, {0x8816,    0x0006}, {0x8817,    0x00c4},
-{0x8818,    0x00c7}, {0x8819,    0x001b}, {0x881a,    0x00b7},
-{0x881b,    0x0012}, {0x881c,    0x0006}, {0x881d,    0x00b6},
-{0x881e,    0x008f}, {0x881f,    0x007b}, {0x8820,    0x0081},
-{0x8821,    0x0007}, {0x8822,    0x002e}, {0x8823,    0x00cf},
-{0x8824,    0x00f6}, {0x8825,    0x0012}, {0x8826,    0x0005},
-{0x8827,    0x00c4}, {0x8828,    0x00f8}, {0x8829,    0x001b},
-{0x882a,    0x00b7}, {0x882b,    0x0012}, {0x882c,    0x0005},
-{0x882d,    0x0086}, {0x882e,    0x0000}, {0x882f,    0x00f6},
-{0x8830,    0x008f}, {0x8831,    0x0071}, {0x8832,    0x00bd},
-{0x8833,    0x0089}, {0x8834,    0x0094}, {0x8835,    0x0086},
-{0x8836,    0x0001}, {0x8837,    0x00f6}, {0x8838,    0x008f},
-{0x8839,    0x0074}, {0x883a,    0x00bd}, {0x883b,    0x0089},
-{0x883c,    0x0094}, {0x883d,    0x0086}, {0x883e,    0x0002},
-{0x883f,    0x00f6}, {0x8840,    0x008f}, {0x8841,    0x0077},
-{0x8842,    0x00bd}, {0x8843,    0x0089}, {0x8844,    0x0094},
-{0x8845,    0x0086}, {0x8846,    0x0003}, {0x8847,    0x00f6},
-{0x8848,    0x008f}, {0x8849,    0x007a}, {0x884a,    0x00bd},
-{0x884b,    0x0089}, {0x884c,    0x0094}, {0x884d,    0x00ce},
-{0x884e,    0x008f}, {0x884f,    0x0070}, {0x8850,    0x00a6},
-{0x8851,    0x0001}, {0x8852,    0x0081}, {0x8853,    0x0001},
-{0x8854,    0x0027}, {0x8855,    0x0007}, {0x8856,    0x0081},
-{0x8857,    0x0003}, {0x8858,    0x0027}, {0x8859,    0x0003},
-{0x885a,    0x007e}, {0x885b,    0x0088}, {0x885c,    0x0066},
-{0x885d,    0x00a6}, {0x885e,    0x0000}, {0x885f,    0x00b8},
-{0x8860,    0x008f}, {0x8861,    0x0081}, {0x8862,    0x0084},
-{0x8863,    0x0001}, {0x8864,    0x0026}, {0x8865,    0x000b},
-{0x8866,    0x008c}, {0x8867,    0x008f}, {0x8868,    0x0079},
-{0x8869,    0x002c}, {0x886a,    0x000e}, {0x886b,    0x0008},
-{0x886c,    0x0008}, {0x886d,    0x0008}, {0x886e,    0x007e},
-{0x886f,    0x0088}, {0x8870,    0x0050}, {0x8871,    0x00b6},
-{0x8872,    0x0012}, {0x8873,    0x0004}, {0x8874,    0x008a},
-{0x8875,    0x0040}, {0x8876,    0x00b7}, {0x8877,    0x0012},
-{0x8878,    0x0004}, {0x8879,    0x00b6}, {0x887a,    0x0012},
-{0x887b,    0x0004}, {0x887c,    0x0084}, {0x887d,    0x00fb},
-{0x887e,    0x0084}, {0x887f,    0x00ef}, {0x8880,    0x00b7},
-{0x8881,    0x0012}, {0x8882,    0x0004}, {0x8883,    0x00b6},
-{0x8884,    0x0012}, {0x8885,    0x0007}, {0x8886,    0x0036},
-{0x8887,    0x00b6}, {0x8888,    0x008f}, {0x8889,    0x007c},
-{0x888a,    0x0048}, {0x888b,    0x0048}, {0x888c,    0x00b7},
-{0x888d,    0x0012}, {0x888e,    0x0007}, {0x888f,    0x0086},
-{0x8890,    0x0001}, {0x8891,    0x00ba}, {0x8892,    0x0012},
-{0x8893,    0x0004}, {0x8894,    0x00b7}, {0x8895,    0x0012},
-{0x8896,    0x0004}, {0x8897,    0x0001}, {0x8898,    0x0001},
-{0x8899,    0x0001}, {0x889a,    0x0001}, {0x889b,    0x0001},
-{0x889c,    0x0001}, {0x889d,    0x0086}, {0x889e,    0x00fe},
-{0x889f,    0x00b4}, {0x88a0,    0x0012}, {0x88a1,    0x0004},
-{0x88a2,    0x00b7}, {0x88a3,    0x0012}, {0x88a4,    0x0004},
-{0x88a5,    0x0086}, {0x88a6,    0x0002}, {0x88a7,    0x00ba},
-{0x88a8,    0x0012}, {0x88a9,    0x0004}, {0x88aa,    0x00b7},
-{0x88ab,    0x0012}, {0x88ac,    0x0004}, {0x88ad,    0x0086},
-{0x88ae,    0x00fd}, {0x88af,    0x00b4}, {0x88b0,    0x0012},
-{0x88b1,    0x0004}, {0x88b2,    0x00b7}, {0x88b3,    0x0012},
-{0x88b4,    0x0004}, {0x88b5,    0x0032}, {0x88b6,    0x00b7},
-{0x88b7,    0x0012}, {0x88b8,    0x0007}, {0x88b9,    0x00b6},
-{0x88ba,    0x0012}, {0x88bb,    0x0000}, {0x88bc,    0x0084},
-{0x88bd,    0x0008}, {0x88be,    0x0081}, {0x88bf,    0x0008},
-{0x88c0,    0x0027}, {0x88c1,    0x000f}, {0x88c2,    0x007c},
-{0x88c3,    0x0082}, {0x88c4,    0x0008}, {0x88c5,    0x0026},
-{0x88c6,    0x0007}, {0x88c7,    0x0086}, {0x88c8,    0x0076},
-{0x88c9,    0x0097}, {0x88ca,    0x0040}, {0x88cb,    0x007e},
-{0x88cc,    0x0089}, {0x88cd,    0x006e}, {0x88ce,    0x007e},
-{0x88cf,    0x0086}, {0x88d0,    0x00ec}, {0x88d1,    0x00b6},
-{0x88d2,    0x008f}, {0x88d3,    0x007f}, {0x88d4,    0x0081},
-{0x88d5,    0x000f}, {0x88d6,    0x0027}, {0x88d7,    0x003c},
-{0x88d8,    0x00bd}, {0x88d9,    0x00e6}, {0x88da,    0x00c7},
-{0x88db,    0x00b7}, {0x88dc,    0x0012}, {0x88dd,    0x000d},
-{0x88de,    0x00bd}, {0x88df,    0x00e6}, {0x88e0,    0x00cb},
-{0x88e1,    0x00b6}, {0x88e2,    0x0012}, {0x88e3,    0x0004},
-{0x88e4,    0x008a}, {0x88e5,    0x0020}, {0x88e6,    0x00b7},
-{0x88e7,    0x0012}, {0x88e8,    0x0004}, {0x88e9,    0x00ce},
-{0x88ea,    0x00ff}, {0x88eb,    0x00ff}, {0x88ec,    0x00b6},
-{0x88ed,    0x0012}, {0x88ee,    0x0000}, {0x88ef,    0x0081},
-{0x88f0,    0x000c}, {0x88f1,    0x0026}, {0x88f2,    0x0005},
-{0x88f3,    0x0009}, {0x88f4,    0x0026}, {0x88f5,    0x00f6},
-{0x88f6,    0x0027}, {0x88f7,    0x001c}, {0x88f8,    0x00b6},
-{0x88f9,    0x0012}, {0x88fa,    0x0004}, {0x88fb,    0x0084},
-{0x88fc,    0x00df}, {0x88fd,    0x00b7}, {0x88fe,    0x0012},
-{0x88ff,    0x0004}, {0x8900,    0x0096}, {0x8901,    0x0083},
-{0x8902,    0x0081}, {0x8903,    0x0007}, {0x8904,    0x002c},
-{0x8905,    0x0005}, {0x8906,    0x007c}, {0x8907,    0x0000},
-{0x8908,    0x0083}, {0x8909,    0x0020}, {0x890a,    0x0006},
-{0x890b,    0x0096}, {0x890c,    0x0083}, {0x890d,    0x008b},
-{0x890e,    0x0008}, {0x890f,    0x0097}, {0x8910,    0x0083},
-{0x8911,    0x007e}, {0x8912,    0x0085}, {0x8913,    0x0041},
-{0x8914,    0x007f}, {0x8915,    0x008f}, {0x8916,    0x007e},
-{0x8917,    0x0086}, {0x8918,    0x0080}, {0x8919,    0x00b7},
-{0x891a,    0x0012}, {0x891b,    0x000c}, {0x891c,    0x0086},
-{0x891d,    0x0001}, {0x891e,    0x00b7}, {0x891f,    0x008f},
-{0x8920,    0x007d}, {0x8921,    0x00b6}, {0x8922,    0x0012},
-{0x8923,    0x000c}, {0x8924,    0x0084}, {0x8925,    0x007f},
-{0x8926,    0x00b7}, {0x8927,    0x0012}, {0x8928,    0x000c},
-{0x8929,    0x008a}, {0x892a,    0x0080}, {0x892b,    0x00b7},
-{0x892c,    0x0012}, {0x892d,    0x000c}, {0x892e,    0x0086},
-{0x892f,    0x000a}, {0x8930,    0x00bd}, {0x8931,    0x008a},
-{0x8932,    0x0006}, {0x8933,    0x00b6}, {0x8934,    0x0012},
-{0x8935,    0x000a}, {0x8936,    0x002a}, {0x8937,    0x0009},
-{0x8938,    0x00b6}, {0x8939,    0x0012}, {0x893a,    0x000c},
-{0x893b,    0x00ba}, {0x893c,    0x008f}, {0x893d,    0x007d},
-{0x893e,    0x00b7}, {0x893f,    0x0012}, {0x8940,    0x000c},
-{0x8941,    0x00b6}, {0x8942,    0x008f}, {0x8943,    0x007e},
-{0x8944,    0x0081}, {0x8945,    0x0060}, {0x8946,    0x0027},
-{0x8947,    0x001a}, {0x8948,    0x008b}, {0x8949,    0x0020},
-{0x894a,    0x00b7}, {0x894b,    0x008f}, {0x894c,    0x007e},
-{0x894d,    0x00b6}, {0x894e,    0x0012}, {0x894f,    0x000c},
-{0x8950,    0x0084}, {0x8951,    0x009f}, {0x8952,    0x00ba},
-{0x8953,    0x008f}, {0x8954,    0x007e}, {0x8955,    0x00b7},
-{0x8956,    0x0012}, {0x8957,    0x000c}, {0x8958,    0x00b6},
-{0x8959,    0x008f}, {0x895a,    0x007d}, {0x895b,    0x0048},
-{0x895c,    0x00b7}, {0x895d,    0x008f}, {0x895e,    0x007d},
-{0x895f,    0x007e}, {0x8960,    0x0089}, {0x8961,    0x0021},
-{0x8962,    0x00b6}, {0x8963,    0x0012}, {0x8964,    0x0004},
-{0x8965,    0x008a}, {0x8966,    0x0020}, {0x8967,    0x00b7},
-{0x8968,    0x0012}, {0x8969,    0x0004}, {0x896a,    0x00bd},
-{0x896b,    0x008a}, {0x896c,    0x000a}, {0x896d,    0x004f},
-{0x896e,    0x0039}, {0x896f,    0x00a6}, {0x8970,    0x0000},
-{0x8971,    0x0018}, {0x8972,    0x00a7}, {0x8973,    0x0000},
-{0x8974,    0x0008}, {0x8975,    0x0018}, {0x8976,    0x0008},
-{0x8977,    0x005a}, {0x8978,    0x0026}, {0x8979,    0x00f5},
-{0x897a,    0x0039}, {0x897b,    0x0036}, {0x897c,    0x006c},
-{0x897d,    0x0000}, {0x897e,    0x0032}, {0x897f,    0x00ba},
-{0x8980,    0x008f}, {0x8981,    0x007f}, {0x8982,    0x00b7},
-{0x8983,    0x008f}, {0x8984,    0x007f}, {0x8985,    0x00b6},
-{0x8986,    0x0012}, {0x8987,    0x0009}, {0x8988,    0x0084},
-{0x8989,    0x0003}, {0x898a,    0x00a7}, {0x898b,    0x0001},
-{0x898c,    0x00b6}, {0x898d,    0x0012}, {0x898e,    0x0006},
-{0x898f,    0x0084}, {0x8990,    0x003f}, {0x8991,    0x00a7},
-{0x8992,    0x0002}, {0x8993,    0x0039}, {0x8994,    0x0036},
-{0x8995,    0x0086}, {0x8996,    0x0003}, {0x8997,    0x00b7},
-{0x8998,    0x008f}, {0x8999,    0x0080}, {0x899a,    0x0032},
-{0x899b,    0x00c1}, {0x899c,    0x0000}, {0x899d,    0x0026},
-{0x899e,    0x0006}, {0x899f,    0x00b7}, {0x89a0,    0x008f},
-{0x89a1,    0x007c}, {0x89a2,    0x007e}, {0x89a3,    0x0089},
-{0x89a4,    0x00c9}, {0x89a5,    0x00c1}, {0x89a6,    0x0001},
-{0x89a7,    0x0027}, {0x89a8,    0x0018}, {0x89a9,    0x00c1},
-{0x89aa,    0x0002}, {0x89ab,    0x0027}, {0x89ac,    0x000c},
-{0x89ad,    0x00c1}, {0x89ae,    0x0003}, {0x89af,    0x0027},
-{0x89b0,    0x0000}, {0x89b1,    0x00f6}, {0x89b2,    0x008f},
-{0x89b3,    0x0080}, {0x89b4,    0x0005}, {0x89b5,    0x0005},
-{0x89b6,    0x00f7}, {0x89b7,    0x008f}, {0x89b8,    0x0080},
-{0x89b9,    0x00f6}, {0x89ba,    0x008f}, {0x89bb,    0x0080},
-{0x89bc,    0x0005}, {0x89bd,    0x0005}, {0x89be,    0x00f7},
-{0x89bf,    0x008f}, {0x89c0,    0x0080}, {0x89c1,    0x00f6},
-{0x89c2,    0x008f}, {0x89c3,    0x0080}, {0x89c4,    0x0005},
-{0x89c5,    0x0005}, {0x89c6,    0x00f7}, {0x89c7,    0x008f},
-{0x89c8,    0x0080}, {0x89c9,    0x00f6}, {0x89ca,    0x008f},
-{0x89cb,    0x0080}, {0x89cc,    0x0053}, {0x89cd,    0x00f4},
-{0x89ce,    0x0012}, {0x89cf,    0x0007}, {0x89d0,    0x001b},
-{0x89d1,    0x00b7}, {0x89d2,    0x0012}, {0x89d3,    0x0007},
-{0x89d4,    0x0039}, {0x89d5,    0x00ce}, {0x89d6,    0x008f},
-{0x89d7,    0x0070}, {0x89d8,    0x00a6}, {0x89d9,    0x0000},
-{0x89da,    0x0018}, {0x89db,    0x00e6}, {0x89dc,    0x0000},
-{0x89dd,    0x0018}, {0x89de,    0x00a7}, {0x89df,    0x0000},
-{0x89e0,    0x00e7}, {0x89e1,    0x0000}, {0x89e2,    0x00a6},
-{0x89e3,    0x0001}, {0x89e4,    0x0018}, {0x89e5,    0x00e6},
-{0x89e6,    0x0001}, {0x89e7,    0x0018}, {0x89e8,    0x00a7},
-{0x89e9,    0x0001}, {0x89ea,    0x00e7}, {0x89eb,    0x0001},
-{0x89ec,    0x00a6}, {0x89ed,    0x0002}, {0x89ee,    0x0018},
-{0x89ef,    0x00e6}, {0x89f0,    0x0002}, {0x89f1,    0x0018},
-{0x89f2,    0x00a7}, {0x89f3,    0x0002}, {0x89f4,    0x00e7},
-{0x89f5,    0x0002}, {0x89f6,    0x0039}, {0x89f7,    0x00a6},
-{0x89f8,    0x0000}, {0x89f9,    0x0084}, {0x89fa,    0x0007},
-{0x89fb,    0x00e6}, {0x89fc,    0x0000}, {0x89fd,    0x00c4},
-{0x89fe,    0x0038}, {0x89ff,    0x0054}, {0x8a00,    0x0054},
-{0x8a01,    0x0054}, {0x8a02,    0x001b}, {0x8a03,    0x00a7},
-{0x8a04,    0x0000}, {0x8a05,    0x0039}, {0x8a06,    0x004a},
-{0x8a07,    0x0026}, {0x8a08,    0x00fd}, {0x8a09,    0x0039},
-{0x8a0a,    0x0096}, {0x8a0b,    0x0022}, {0x8a0c,    0x0084},
-{0x8a0d,    0x000f}, {0x8a0e,    0x0097}, {0x8a0f,    0x0022},
-{0x8a10,    0x0086}, {0x8a11,    0x0001}, {0x8a12,    0x00b7},
-{0x8a13,    0x008f}, {0x8a14,    0x0070}, {0x8a15,    0x00b6},
-{0x8a16,    0x0012}, {0x8a17,    0x0007}, {0x8a18,    0x00b7},
-{0x8a19,    0x008f}, {0x8a1a,    0x0071}, {0x8a1b,    0x00f6},
-{0x8a1c,    0x0012}, {0x8a1d,    0x000c}, {0x8a1e,    0x00c4},
-{0x8a1f,    0x000f}, {0x8a20,    0x00c8}, {0x8a21,    0x000f},
-{0x8a22,    0x00f7}, {0x8a23,    0x008f}, {0x8a24,    0x0072},
-{0x8a25,    0x00f6}, {0x8a26,    0x008f}, {0x8a27,    0x0072},
-{0x8a28,    0x00b6}, {0x8a29,    0x008f}, {0x8a2a,    0x0071},
-{0x8a2b,    0x0084}, {0x8a2c,    0x0003}, {0x8a2d,    0x0027},
-{0x8a2e,    0x0014}, {0x8a2f,    0x0081}, {0x8a30,    0x0001},
-{0x8a31,    0x0027}, {0x8a32,    0x001c}, {0x8a33,    0x0081},
-{0x8a34,    0x0002}, {0x8a35,    0x0027}, {0x8a36,    0x0024},
-{0x8a37,    0x00f4}, {0x8a38,    0x008f}, {0x8a39,    0x0070},
-{0x8a3a,    0x0027}, {0x8a3b,    0x002a}, {0x8a3c,    0x0096},
-{0x8a3d,    0x0022}, {0x8a3e,    0x008a}, {0x8a3f,    0x0080},
-{0x8a40,    0x007e}, {0x8a41,    0x008a}, {0x8a42,    0x0064},
-{0x8a43,    0x00f4}, {0x8a44,    0x008f}, {0x8a45,    0x0070},
-{0x8a46,    0x0027}, {0x8a47,    0x001e}, {0x8a48,    0x0096},
-{0x8a49,    0x0022}, {0x8a4a,    0x008a}, {0x8a4b,    0x0010},
-{0x8a4c,    0x007e}, {0x8a4d,    0x008a}, {0x8a4e,    0x0064},
-{0x8a4f,    0x00f4}, {0x8a50,    0x008f}, {0x8a51,    0x0070},
-{0x8a52,    0x0027}, {0x8a53,    0x0012}, {0x8a54,    0x0096},
-{0x8a55,    0x0022}, {0x8a56,    0x008a}, {0x8a57,    0x0020},
-{0x8a58,    0x007e}, {0x8a59,    0x008a}, {0x8a5a,    0x0064},
-{0x8a5b,    0x00f4}, {0x8a5c,    0x008f}, {0x8a5d,    0x0070},
-{0x8a5e,    0x0027}, {0x8a5f,    0x0006}, {0x8a60,    0x0096},
-{0x8a61,    0x0022}, {0x8a62,    0x008a}, {0x8a63,    0x0040},
-{0x8a64,    0x0097}, {0x8a65,    0x0022}, {0x8a66,    0x0074},
-{0x8a67,    0x008f}, {0x8a68,    0x0071}, {0x8a69,    0x0074},
-{0x8a6a,    0x008f}, {0x8a6b,    0x0071}, {0x8a6c,    0x0078},
-{0x8a6d,    0x008f}, {0x8a6e,    0x0070}, {0x8a6f,    0x00b6},
-{0x8a70,    0x008f}, {0x8a71,    0x0070}, {0x8a72,    0x0085},
-{0x8a73,    0x0010}, {0x8a74,    0x0027}, {0x8a75,    0x00af},
-{0x8a76,    0x00d6}, {0x8a77,    0x0022}, {0x8a78,    0x00c4},
-{0x8a79,    0x0010}, {0x8a7a,    0x0058}, {0x8a7b,    0x00b6},
-{0x8a7c,    0x0012}, {0x8a7d,    0x0070}, {0x8a7e,    0x0081},
-{0x8a7f,    0x00e4}, {0x8a80,    0x0027}, {0x8a81,    0x0036},
-{0x8a82,    0x0081}, {0x8a83,    0x00e1}, {0x8a84,    0x0026},
-{0x8a85,    0x000c}, {0x8a86,    0x0096}, {0x8a87,    0x0022},
-{0x8a88,    0x0084}, {0x8a89,    0x0020}, {0x8a8a,    0x0044},
-{0x8a8b,    0x001b}, {0x8a8c,    0x00d6}, {0x8a8d,    0x0022},
-{0x8a8e,    0x00c4}, {0x8a8f,    0x00cf}, {0x8a90,    0x0020},
-{0x8a91,    0x0023}, {0x8a92,    0x0058}, {0x8a93,    0x0081},
-{0x8a94,    0x00c6}, {0x8a95,    0x0026}, {0x8a96,    0x000d},
-{0x8a97,    0x0096}, {0x8a98,    0x0022}, {0x8a99,    0x0084},
-{0x8a9a,    0x0040}, {0x8a9b,    0x0044}, {0x8a9c,    0x0044},
-{0x8a9d,    0x001b}, {0x8a9e,    0x00d6}, {0x8a9f,    0x0022},
-{0x8aa0,    0x00c4}, {0x8aa1,    0x00af}, {0x8aa2,    0x0020},
-{0x8aa3,    0x0011}, {0x8aa4,    0x0058}, {0x8aa5,    0x0081},
-{0x8aa6,    0x0027}, {0x8aa7,    0x0026}, {0x8aa8,    0x000f},
-{0x8aa9,    0x0096}, {0x8aaa,    0x0022}, {0x8aab,    0x0084},
-{0x8aac,    0x0080}, {0x8aad,    0x0044}, {0x8aae,    0x0044},
-{0x8aaf,    0x0044}, {0x8ab0,    0x001b}, {0x8ab1,    0x00d6},
-{0x8ab2,    0x0022}, {0x8ab3,    0x00c4}, {0x8ab4,    0x006f},
-{0x8ab5,    0x001b}, {0x8ab6,    0x0097}, {0x8ab7,    0x0022},
-{0x8ab8,    0x0039}, {0x8ab9,    0x0027}, {0x8aba,    0x000c},
-{0x8abb,    0x007c}, {0x8abc,    0x0082}, {0x8abd,    0x0006},
-{0x8abe,    0x00bd}, {0x8abf,    0x00d9}, {0x8ac0,    0x00ed},
-{0x8ac1,    0x00b6}, {0x8ac2,    0x0082}, {0x8ac3,    0x0007},
-{0x8ac4,    0x007e}, {0x8ac5,    0x008a}, {0x8ac6,    0x00b9},
-{0x8ac7,    0x007f}, {0x8ac8,    0x0082}, {0x8ac9,    0x0006},
-{0x8aca,    0x0039}, { 0x0, 0x0 }
-};
-#else
-cas_saturn_patch_t cas_saturn_patch[] = {
-{0x8200,    0x007e}, {0x8201,    0x0082}, {0x8202,    0x0009},
-{0x8203,    0x0000}, {0x8204,    0x0000}, {0x8205,    0x0000},
-{0x8206,    0x0000}, {0x8207,    0x0000}, {0x8208,    0x0000},
-{0x8209,    0x008e}, {0x820a,    0x008e}, {0x820b,    0x00ff},
-{0x820c,    0x00ce}, {0x820d,    0x0082}, {0x820e,    0x0025},
-{0x820f,    0x00ff}, {0x8210,    0x0001}, {0x8211,    0x000f},
-{0x8212,    0x00ce}, {0x8213,    0x0084}, {0x8214,    0x0026},
-{0x8215,    0x00ff}, {0x8216,    0x0001}, {0x8217,    0x0011},
-{0x8218,    0x00ce}, {0x8219,    0x0085}, {0x821a,    0x003d},
-{0x821b,    0x00df}, {0x821c,    0x00e5}, {0x821d,    0x0086},
-{0x821e,    0x0039}, {0x821f,    0x00b7}, {0x8220,    0x008f},
-{0x8221,    0x00f8}, {0x8222,    0x007e}, {0x8223,    0x00c3},
-{0x8224,    0x00c2}, {0x8225,    0x0096}, {0x8226,    0x0047},
-{0x8227,    0x0084}, {0x8228,    0x00f3}, {0x8229,    0x008a},
-{0x822a,    0x0000}, {0x822b,    0x0097}, {0x822c,    0x0047},
-{0x822d,    0x00ce}, {0x822e,    0x0082}, {0x822f,    0x0033},
-{0x8230,    0x00ff}, {0x8231,    0x0001}, {0x8232,    0x000f},
-{0x8233,    0x0096}, {0x8234,    0x0046}, {0x8235,    0x0084},
-{0x8236,    0x000c}, {0x8237,    0x0081}, {0x8238,    0x0004},
-{0x8239,    0x0027}, {0x823a,    0x000b}, {0x823b,    0x0096},
-{0x823c,    0x0046}, {0x823d,    0x0084}, {0x823e,    0x000c},
-{0x823f,    0x0081}, {0x8240,    0x0008}, {0x8241,    0x0027},
-{0x8242,    0x0057}, {0x8243,    0x007e}, {0x8244,    0x0084},
-{0x8245,    0x0025}, {0x8246,    0x0096}, {0x8247,    0x0047},
-{0x8248,    0x0084}, {0x8249,    0x00f3}, {0x824a,    0x008a},
-{0x824b,    0x0004}, {0x824c,    0x0097}, {0x824d,    0x0047},
-{0x824e,    0x00ce}, {0x824f,    0x0082}, {0x8250,    0x0054},
-{0x8251,    0x00ff}, {0x8252,    0x0001}, {0x8253,    0x000f},
-{0x8254,    0x0096}, {0x8255,    0x0046}, {0x8256,    0x0084},
-{0x8257,    0x000c}, {0x8258,    0x0081}, {0x8259,    0x0004},
-{0x825a,    0x0026}, {0x825b,    0x0038}, {0x825c,    0x00b6},
-{0x825d,    0x0012}, {0x825e,    0x0020}, {0x825f,    0x0084},
-{0x8260,    0x0020}, {0x8261,    0x0026}, {0x8262,    0x0003},
-{0x8263,    0x007e}, {0x8264,    0x0084}, {0x8265,    0x0025},
-{0x8266,    0x0096}, {0x8267,    0x007b}, {0x8268,    0x00d6},
-{0x8269,    0x007c}, {0x826a,    0x00fe}, {0x826b,    0x008f},
-{0x826c,    0x0056}, {0x826d,    0x00bd}, {0x826e,    0x00f7},
-{0x826f,    0x00b6}, {0x8270,    0x00fe}, {0x8271,    0x008f},
-{0x8272,    0x004e}, {0x8273,    0x00bd}, {0x8274,    0x00ec},
-{0x8275,    0x008e}, {0x8276,    0x00bd}, {0x8277,    0x00fa},
-{0x8278,    0x00f7}, {0x8279,    0x00bd}, {0x827a,    0x00f7},
-{0x827b,    0x0028}, {0x827c,    0x00ce}, {0x827d,    0x0082},
-{0x827e,    0x0082}, {0x827f,    0x00ff}, {0x8280,    0x0001},
-{0x8281,    0x000f}, {0x8282,    0x0096}, {0x8283,    0x0046},
-{0x8284,    0x0084}, {0x8285,    0x000c}, {0x8286,    0x0081},
-{0x8287,    0x0004}, {0x8288,    0x0026}, {0x8289,    0x000a},
-{0x828a,    0x00b6}, {0x828b,    0x0012}, {0x828c,    0x0020},
-{0x828d,    0x0084}, {0x828e,    0x0020}, {0x828f,    0x0027},
-{0x8290,    0x00b5}, {0x8291,    0x007e}, {0x8292,    0x0084},
-{0x8293,    0x0025}, {0x8294,    0x00bd}, {0x8295,    0x00f7},
-{0x8296,    0x001f}, {0x8297,    0x007e}, {0x8298,    0x0084},
-{0x8299,    0x001f}, {0x829a,    0x0096}, {0x829b,    0x0047},
-{0x829c,    0x0084}, {0x829d,    0x00f3}, {0x829e,    0x008a},
-{0x829f,    0x0008}, {0x82a0,    0x0097}, {0x82a1,    0x0047},
-{0x82a2,    0x00de}, {0x82a3,    0x00e1}, {0x82a4,    0x00ad},
-{0x82a5,    0x0000}, {0x82a6,    0x00ce}, {0x82a7,    0x0082},
-{0x82a8,    0x00af}, {0x82a9,    0x00ff}, {0x82aa,    0x0001},
-{0x82ab,    0x000f}, {0x82ac,    0x007e}, {0x82ad,    0x0084},
-{0x82ae,    0x0025}, {0x82af,    0x0096}, {0x82b0,    0x0041},
-{0x82b1,    0x0085}, {0x82b2,    0x0010}, {0x82b3,    0x0026},
-{0x82b4,    0x0006}, {0x82b5,    0x0096}, {0x82b6,    0x0023},
-{0x82b7,    0x0085}, {0x82b8,    0x0040}, {0x82b9,    0x0027},
-{0x82ba,    0x0006}, {0x82bb,    0x00bd}, {0x82bc,    0x00ed},
-{0x82bd,    0x0000}, {0x82be,    0x007e}, {0x82bf,    0x0083},
-{0x82c0,    0x00a2}, {0x82c1,    0x00de}, {0x82c2,    0x0042},
-{0x82c3,    0x00bd}, {0x82c4,    0x00eb}, {0x82c5,    0x008e},
-{0x82c6,    0x0096}, {0x82c7,    0x0024}, {0x82c8,    0x0084},
-{0x82c9,    0x0008}, {0x82ca,    0x0027}, {0x82cb,    0x0003},
-{0x82cc,    0x007e}, {0x82cd,    0x0083}, {0x82ce,    0x00df},
-{0x82cf,    0x0096}, {0x82d0,    0x007b}, {0x82d1,    0x00d6},
-{0x82d2,    0x007c}, {0x82d3,    0x00fe}, {0x82d4,    0x008f},
-{0x82d5,    0x0056}, {0x82d6,    0x00bd}, {0x82d7,    0x00f7},
-{0x82d8,    0x00b6}, {0x82d9,    0x00fe}, {0x82da,    0x008f},
-{0x82db,    0x0050}, {0x82dc,    0x00bd}, {0x82dd,    0x00ec},
-{0x82de,    0x008e}, {0x82df,    0x00bd}, {0x82e0,    0x00fa},
-{0x82e1,    0x00f7}, {0x82e2,    0x0086}, {0x82e3,    0x0011},
-{0x82e4,    0x00c6}, {0x82e5,    0x0049}, {0x82e6,    0x00bd},
-{0x82e7,    0x00e4}, {0x82e8,    0x0012}, {0x82e9,    0x00ce},
-{0x82ea,    0x0082}, {0x82eb,    0x00ef}, {0x82ec,    0x00ff},
-{0x82ed,    0x0001}, {0x82ee,    0x000f}, {0x82ef,    0x0096},
-{0x82f0,    0x0046}, {0x82f1,    0x0084}, {0x82f2,    0x000c},
-{0x82f3,    0x0081}, {0x82f4,    0x0000}, {0x82f5,    0x0027},
-{0x82f6,    0x0017}, {0x82f7,    0x00c6}, {0x82f8,    0x0049},
-{0x82f9,    0x00bd}, {0x82fa,    0x00e4}, {0x82fb,    0x0091},
-{0x82fc,    0x0024}, {0x82fd,    0x000d}, {0x82fe,    0x00b6},
-{0x82ff,    0x0012}, {0x8300,    0x0020}, {0x8301,    0x0085},
-{0x8302,    0x0020}, {0x8303,    0x0026}, {0x8304,    0x000c},
-{0x8305,    0x00ce}, {0x8306,    0x0082}, {0x8307,    0x00c1},
-{0x8308,    0x00ff}, {0x8309,    0x0001}, {0x830a,    0x000f},
-{0x830b,    0x007e}, {0x830c,    0x0084}, {0x830d,    0x0025},
-{0x830e,    0x007e}, {0x830f,    0x0084}, {0x8310,    0x0016},
-{0x8311,    0x00fe}, {0x8312,    0x008f}, {0x8313,    0x0052},
-{0x8314,    0x00bd}, {0x8315,    0x00ec}, {0x8316,    0x008e},
-{0x8317,    0x00bd}, {0x8318,    0x00fa}, {0x8319,    0x00f7},
-{0x831a,    0x0086}, {0x831b,    0x006a}, {0x831c,    0x00c6},
-{0x831d,    0x0049}, {0x831e,    0x00bd}, {0x831f,    0x00e4},
-{0x8320,    0x0012}, {0x8321,    0x00ce}, {0x8322,    0x0083},
-{0x8323,    0x0027}, {0x8324,    0x00ff}, {0x8325,    0x0001},
-{0x8326,    0x000f}, {0x8327,    0x0096}, {0x8328,    0x0046},
-{0x8329,    0x0084}, {0x832a,    0x000c}, {0x832b,    0x0081},
-{0x832c,    0x0000}, {0x832d,    0x0027}, {0x832e,    0x000a},
-{0x832f,    0x00c6}, {0x8330,    0x0049}, {0x8331,    0x00bd},
-{0x8332,    0x00e4}, {0x8333,    0x0091}, {0x8334,    0x0025},
-{0x8335,    0x0006}, {0x8336,    0x007e}, {0x8337,    0x0084},
-{0x8338,    0x0025}, {0x8339,    0x007e}, {0x833a,    0x0084},
-{0x833b,    0x0016}, {0x833c,    0x00b6}, {0x833d,    0x0018},
-{0x833e,    0x0070}, {0x833f,    0x00bb}, {0x8340,    0x0019},
-{0x8341,    0x0070}, {0x8342,    0x002a}, {0x8343,    0x0004},
-{0x8344,    0x0081}, {0x8345,    0x00af}, {0x8346,    0x002e},
-{0x8347,    0x0019}, {0x8348,    0x0096}, {0x8349,    0x007b},
-{0x834a,    0x00f6}, {0x834b,    0x0020}, {0x834c,    0x0007},
-{0x834d,    0x00fa}, {0x834e,    0x0020}, {0x834f,    0x0027},
-{0x8350,    0x00c4}, {0x8351,    0x0038}, {0x8352,    0x0081},
-{0x8353,    0x0038}, {0x8354,    0x0027}, {0x8355,    0x000b},
-{0x8356,    0x00f6}, {0x8357,    0x0020}, {0x8358,    0x0007},
-{0x8359,    0x00fa}, {0x835a,    0x0020}, {0x835b,    0x0027},
-{0x835c,    0x00cb}, {0x835d,    0x0008}, {0x835e,    0x007e},
-{0x835f,    0x0082}, {0x8360,    0x00d3}, {0x8361,    0x00bd},
-{0x8362,    0x00f7}, {0x8363,    0x0066}, {0x8364,    0x0086},
-{0x8365,    0x0074}, {0x8366,    0x00c6}, {0x8367,    0x0049},
-{0x8368,    0x00bd}, {0x8369,    0x00e4}, {0x836a,    0x0012},
-{0x836b,    0x00ce}, {0x836c,    0x0083}, {0x836d,    0x0071},
-{0x836e,    0x00ff}, {0x836f,    0x0001}, {0x8370,    0x000f},
-{0x8371,    0x0096}, {0x8372,    0x0046}, {0x8373,    0x0084},
-{0x8374,    0x000c}, {0x8375,    0x0081}, {0x8376,    0x0008},
-{0x8377,    0x0026}, {0x8378,    0x000a}, {0x8379,    0x00c6},
-{0x837a,    0x0049}, {0x837b,    0x00bd}, {0x837c,    0x00e4},
-{0x837d,    0x0091}, {0x837e,    0x0025}, {0x837f,    0x0006},
-{0x8380,    0x007e}, {0x8381,    0x0084}, {0x8382,    0x0025},
-{0x8383,    0x007e}, {0x8384,    0x0084}, {0x8385,    0x0016},
-{0x8386,    0x00bd}, {0x8387,    0x00f7}, {0x8388,    0x003e},
-{0x8389,    0x0026}, {0x838a,    0x000e}, {0x838b,    0x00bd},
-{0x838c,    0x00e5}, {0x838d,    0x0009}, {0x838e,    0x0026},
-{0x838f,    0x0006}, {0x8390,    0x00ce}, {0x8391,    0x0082},
-{0x8392,    0x00c1}, {0x8393,    0x00ff}, {0x8394,    0x0001},
-{0x8395,    0x000f}, {0x8396,    0x007e}, {0x8397,    0x0084},
-{0x8398,    0x0025}, {0x8399,    0x00fe}, {0x839a,    0x008f},
-{0x839b,    0x0054}, {0x839c,    0x00bd}, {0x839d,    0x00ec},
-{0x839e,    0x008e}, {0x839f,    0x00bd}, {0x83a0,    0x00fa},
-{0x83a1,    0x00f7}, {0x83a2,    0x00bd}, {0x83a3,    0x00f7},
-{0x83a4,    0x0033}, {0x83a5,    0x0086}, {0x83a6,    0x000f},
-{0x83a7,    0x00c6}, {0x83a8,    0x0051}, {0x83a9,    0x00bd},
-{0x83aa,    0x00e4}, {0x83ab,    0x0012}, {0x83ac,    0x00ce},
-{0x83ad,    0x0083}, {0x83ae,    0x00b2}, {0x83af,    0x00ff},
-{0x83b0,    0x0001}, {0x83b1,    0x000f}, {0x83b2,    0x0096},
-{0x83b3,    0x0046}, {0x83b4,    0x0084}, {0x83b5,    0x000c},
-{0x83b6,    0x0081}, {0x83b7,    0x0008}, {0x83b8,    0x0026},
-{0x83b9,    0x005c}, {0x83ba,    0x00b6}, {0x83bb,    0x0012},
-{0x83bc,    0x0020}, {0x83bd,    0x0084}, {0x83be,    0x003f},
-{0x83bf,    0x0081}, {0x83c0,    0x003a}, {0x83c1,    0x0027},
-{0x83c2,    0x001c}, {0x83c3,    0x0096}, {0x83c4,    0x0023},
-{0x83c5,    0x0085}, {0x83c6,    0x0040}, {0x83c7,    0x0027},
-{0x83c8,    0x0003}, {0x83c9,    0x007e}, {0x83ca,    0x0084},
-{0x83cb,    0x0025}, {0x83cc,    0x00c6}, {0x83cd,    0x0051},
-{0x83ce,    0x00bd}, {0x83cf,    0x00e4}, {0x83d0,    0x0091},
-{0x83d1,    0x0025}, {0x83d2,    0x0003}, {0x83d3,    0x007e},
-{0x83d4,    0x0084}, {0x83d5,    0x0025}, {0x83d6,    0x00ce},
-{0x83d7,    0x0082}, {0x83d8,    0x00c1}, {0x83d9,    0x00ff},
-{0x83da,    0x0001}, {0x83db,    0x000f}, {0x83dc,    0x007e},
-{0x83dd,    0x0084}, {0x83de,    0x0025}, {0x83df,    0x00bd},
-{0x83e0,    0x00f8}, {0x83e1,    0x0037}, {0x83e2,    0x007c},
-{0x83e3,    0x0000}, {0x83e4,    0x007a}, {0x83e5,    0x00ce},
-{0x83e6,    0x0083}, {0x83e7,    0x00ee}, {0x83e8,    0x00ff},
-{0x83e9,    0x0001}, {0x83ea,    0x000f}, {0x83eb,    0x007e},
-{0x83ec,    0x0084}, {0x83ed,    0x0025}, {0x83ee,    0x0096},
-{0x83ef,    0x0046}, {0x83f0,    0x0084}, {0x83f1,    0x000c},
-{0x83f2,    0x0081}, {0x83f3,    0x0008}, {0x83f4,    0x0026},
-{0x83f5,    0x0020}, {0x83f6,    0x0096}, {0x83f7,    0x0024},
-{0x83f8,    0x0084}, {0x83f9,    0x0008}, {0x83fa,    0x0026},
-{0x83fb,    0x0029}, {0x83fc,    0x00b6}, {0x83fd,    0x0018},
-{0x83fe,    0x0082}, {0x83ff,    0x00bb}, {0x8400,    0x0019},
-{0x8401,    0x0082}, {0x8402,    0x00b1}, {0x8403,    0x0001},
-{0x8404,    0x003b}, {0x8405,    0x0022}, {0x8406,    0x0009},
-{0x8407,    0x00b6}, {0x8408,    0x0012}, {0x8409,    0x0020},
-{0x840a,    0x0084}, {0x840b,    0x0037}, {0x840c,    0x0081},
-{0x840d,    0x0032}, {0x840e,    0x0027}, {0x840f,    0x0015},
-{0x8410,    0x00bd}, {0x8411,    0x00f8}, {0x8412,    0x0044},
-{0x8413,    0x007e}, {0x8414,    0x0082}, {0x8415,    0x00c1},
-{0x8416,    0x00bd}, {0x8417,    0x00f7}, {0x8418,    0x001f},
-{0x8419,    0x00bd}, {0x841a,    0x00f8}, {0x841b,    0x0044},
-{0x841c,    0x00bd}, {0x841d,    0x00fc}, {0x841e,    0x0029},
-{0x841f,    0x00ce}, {0x8420,    0x0082}, {0x8421,    0x0025},
-{0x8422,    0x00ff}, {0x8423,    0x0001}, {0x8424,    0x000f},
-{0x8425,    0x0039}, {0x8426,    0x0096}, {0x8427,    0x0047},
-{0x8428,    0x0084}, {0x8429,    0x00fc}, {0x842a,    0x008a},
-{0x842b,    0x0000}, {0x842c,    0x0097}, {0x842d,    0x0047},
-{0x842e,    0x00ce}, {0x842f,    0x0084}, {0x8430,    0x0034},
-{0x8431,    0x00ff}, {0x8432,    0x0001}, {0x8433,    0x0011},
-{0x8434,    0x0096}, {0x8435,    0x0046}, {0x8436,    0x0084},
-{0x8437,    0x0003}, {0x8438,    0x0081}, {0x8439,    0x0002},
-{0x843a,    0x0027}, {0x843b,    0x0003}, {0x843c,    0x007e},
-{0x843d,    0x0085}, {0x843e,    0x001e}, {0x843f,    0x0096},
-{0x8440,    0x0047}, {0x8441,    0x0084}, {0x8442,    0x00fc},
-{0x8443,    0x008a}, {0x8444,    0x0002}, {0x8445,    0x0097},
-{0x8446,    0x0047}, {0x8447,    0x00de}, {0x8448,    0x00e1},
-{0x8449,    0x00ad}, {0x844a,    0x0000}, {0x844b,    0x0086},
-{0x844c,    0x0001}, {0x844d,    0x00b7}, {0x844e,    0x0012},
-{0x844f,    0x0051}, {0x8450,    0x00bd}, {0x8451,    0x00f7},
-{0x8452,    0x0014}, {0x8453,    0x00b6}, {0x8454,    0x0010},
-{0x8455,    0x0031}, {0x8456,    0x0084}, {0x8457,    0x00fd},
-{0x8458,    0x00b7}, {0x8459,    0x0010}, {0x845a,    0x0031},
-{0x845b,    0x00bd}, {0x845c,    0x00f8}, {0x845d,    0x001e},
-{0x845e,    0x0096}, {0x845f,    0x0081}, {0x8460,    0x00d6},
-{0x8461,    0x0082}, {0x8462,    0x00fe}, {0x8463,    0x008f},
-{0x8464,    0x005a}, {0x8465,    0x00bd}, {0x8466,    0x00f7},
-{0x8467,    0x00b6}, {0x8468,    0x00fe}, {0x8469,    0x008f},
-{0x846a,    0x005c}, {0x846b,    0x00bd}, {0x846c,    0x00ec},
-{0x846d,    0x008e}, {0x846e,    0x00bd}, {0x846f,    0x00fa},
-{0x8470,    0x00f7}, {0x8471,    0x0086}, {0x8472,    0x0008},
-{0x8473,    0x00d6}, {0x8474,    0x0000}, {0x8475,    0x00c5},
-{0x8476,    0x0010}, {0x8477,    0x0026}, {0x8478,    0x0002},
-{0x8479,    0x008b}, {0x847a,    0x0020}, {0x847b,    0x00c6},
-{0x847c,    0x0051}, {0x847d,    0x00bd}, {0x847e,    0x00e4},
-{0x847f,    0x0012}, {0x8480,    0x00ce}, {0x8481,    0x0084},
-{0x8482,    0x0086}, {0x8483,    0x00ff}, {0x8484,    0x0001},
-{0x8485,    0x0011}, {0x8486,    0x0096}, {0x8487,    0x0046},
-{0x8488,    0x0084}, {0x8489,    0x0003}, {0x848a,    0x0081},
-{0x848b,    0x0002}, {0x848c,    0x0027}, {0x848d,    0x0003},
-{0x848e,    0x007e}, {0x848f,    0x0085}, {0x8490,    0x000f},
-{0x8491,    0x00c6}, {0x8492,    0x0051}, {0x8493,    0x00bd},
-{0x8494,    0x00e4}, {0x8495,    0x0091}, {0x8496,    0x0025},
-{0x8497,    0x0003}, {0x8498,    0x007e}, {0x8499,    0x0085},
-{0x849a,    0x001e}, {0x849b,    0x0096}, {0x849c,    0x0044},
-{0x849d,    0x0085}, {0x849e,    0x0010}, {0x849f,    0x0026},
-{0x84a0,    0x000a}, {0x84a1,    0x00b6}, {0x84a2,    0x0012},
-{0x84a3,    0x0050}, {0x84a4,    0x00ba}, {0x84a5,    0x0001},
-{0x84a6,    0x003c}, {0x84a7,    0x0085}, {0x84a8,    0x0010},
-{0x84a9,    0x0027}, {0x84aa,    0x00a8}, {0x84ab,    0x00bd},
-{0x84ac,    0x00f7}, {0x84ad,    0x0066}, {0x84ae,    0x00ce},
-{0x84af,    0x0084}, {0x84b0,    0x00b7}, {0x84b1,    0x00ff},
-{0x84b2,    0x0001}, {0x84b3,    0x0011}, {0x84b4,    0x007e},
-{0x84b5,    0x0085}, {0x84b6,    0x001e}, {0x84b7,    0x0096},
-{0x84b8,    0x0046}, {0x84b9,    0x0084}, {0x84ba,    0x0003},
-{0x84bb,    0x0081}, {0x84bc,    0x0002}, {0x84bd,    0x0026},
-{0x84be,    0x0050}, {0x84bf,    0x00b6}, {0x84c0,    0x0012},
-{0x84c1,    0x0030}, {0x84c2,    0x0084}, {0x84c3,    0x0003},
-{0x84c4,    0x0081}, {0x84c5,    0x0001}, {0x84c6,    0x0027},
-{0x84c7,    0x0003}, {0x84c8,    0x007e}, {0x84c9,    0x0085},
-{0x84ca,    0x001e}, {0x84cb,    0x0096}, {0x84cc,    0x0044},
-{0x84cd,    0x0085}, {0x84ce,    0x0010}, {0x84cf,    0x0026},
-{0x84d0,    0x0013}, {0x84d1,    0x00b6}, {0x84d2,    0x0012},
-{0x84d3,    0x0050}, {0x84d4,    0x00ba}, {0x84d5,    0x0001},
-{0x84d6,    0x003c}, {0x84d7,    0x0085}, {0x84d8,    0x0010},
-{0x84d9,    0x0026}, {0x84da,    0x0009}, {0x84db,    0x00ce},
-{0x84dc,    0x0084}, {0x84dd,    0x0053}, {0x84de,    0x00ff},
-{0x84df,    0x0001}, {0x84e0,    0x0011}, {0x84e1,    0x007e},
-{0x84e2,    0x0085}, {0x84e3,    0x001e}, {0x84e4,    0x00b6},
-{0x84e5,    0x0010}, {0x84e6,    0x0031}, {0x84e7,    0x008a},
-{0x84e8,    0x0002}, {0x84e9,    0x00b7}, {0x84ea,    0x0010},
-{0x84eb,    0x0031}, {0x84ec,    0x00bd}, {0x84ed,    0x0085},
-{0x84ee,    0x001f}, {0x84ef,    0x00bd}, {0x84f0,    0x00f8},
-{0x84f1,    0x0037}, {0x84f2,    0x007c}, {0x84f3,    0x0000},
-{0x84f4,    0x0080}, {0x84f5,    0x00ce}, {0x84f6,    0x0084},
-{0x84f7,    0x00fe}, {0x84f8,    0x00ff}, {0x84f9,    0x0001},
-{0x84fa,    0x0011}, {0x84fb,    0x007e}, {0x84fc,    0x0085},
-{0x84fd,    0x001e}, {0x84fe,    0x0096}, {0x84ff,    0x0046},
-{0x8500,    0x0084}, {0x8501,    0x0003}, {0x8502,    0x0081},
-{0x8503,    0x0002}, {0x8504,    0x0026}, {0x8505,    0x0009},
-{0x8506,    0x00b6}, {0x8507,    0x0012}, {0x8508,    0x0030},
-{0x8509,    0x0084}, {0x850a,    0x0003}, {0x850b,    0x0081},
-{0x850c,    0x0001}, {0x850d,    0x0027}, {0x850e,    0x000f},
-{0x850f,    0x00bd}, {0x8510,    0x00f8}, {0x8511,    0x0044},
-{0x8512,    0x00bd}, {0x8513,    0x00f7}, {0x8514,    0x000b},
-{0x8515,    0x00bd}, {0x8516,    0x00fc}, {0x8517,    0x0029},
-{0x8518,    0x00ce}, {0x8519,    0x0084}, {0x851a,    0x0026},
-{0x851b,    0x00ff}, {0x851c,    0x0001}, {0x851d,    0x0011},
-{0x851e,    0x0039}, {0x851f,    0x00d6}, {0x8520,    0x0022},
-{0x8521,    0x00c4}, {0x8522,    0x000f}, {0x8523,    0x00b6},
-{0x8524,    0x0012}, {0x8525,    0x0030}, {0x8526,    0x00ba},
-{0x8527,    0x0012}, {0x8528,    0x0032}, {0x8529,    0x0084},
-{0x852a,    0x0004}, {0x852b,    0x0027}, {0x852c,    0x000d},
-{0x852d,    0x0096}, {0x852e,    0x0022}, {0x852f,    0x0085},
-{0x8530,    0x0004}, {0x8531,    0x0027}, {0x8532,    0x0005},
-{0x8533,    0x00ca}, {0x8534,    0x0010}, {0x8535,    0x007e},
-{0x8536,    0x0085}, {0x8537,    0x003a}, {0x8538,    0x00ca},
-{0x8539,    0x0020}, {0x853a,    0x00d7}, {0x853b,    0x0022},
-{0x853c,    0x0039}, {0x853d,    0x0086}, {0x853e,    0x0000},
-{0x853f,    0x0097}, {0x8540,    0x0083}, {0x8541,    0x0018},
-{0x8542,    0x00ce}, {0x8543,    0x001c}, {0x8544,    0x0000},
-{0x8545,    0x00bd}, {0x8546,    0x00eb}, {0x8547,    0x0046},
-{0x8548,    0x0096}, {0x8549,    0x0057}, {0x854a,    0x0085},
-{0x854b,    0x0001}, {0x854c,    0x0027}, {0x854d,    0x0002},
-{0x854e,    0x004f}, {0x854f,    0x0039}, {0x8550,    0x0085},
-{0x8551,    0x0002}, {0x8552,    0x0027}, {0x8553,    0x0001},
-{0x8554,    0x0039}, {0x8555,    0x007f}, {0x8556,    0x008f},
-{0x8557,    0x007d}, {0x8558,    0x0086}, {0x8559,    0x0004},
-{0x855a,    0x00b7}, {0x855b,    0x0012}, {0x855c,    0x0004},
-{0x855d,    0x0086}, {0x855e,    0x0008}, {0x855f,    0x00b7},
-{0x8560,    0x0012}, {0x8561,    0x0007}, {0x8562,    0x0086},
-{0x8563,    0x0010}, {0x8564,    0x00b7}, {0x8565,    0x0012},
-{0x8566,    0x000c}, {0x8567,    0x0086}, {0x8568,    0x0007},
-{0x8569,    0x00b7}, {0x856a,    0x0012}, {0x856b,    0x0006},
-{0x856c,    0x00b6}, {0x856d,    0x008f}, {0x856e,    0x007d},
-{0x856f,    0x00b7}, {0x8570,    0x0012}, {0x8571,    0x0070},
-{0x8572,    0x0086}, {0x8573,    0x0001}, {0x8574,    0x00ba},
-{0x8575,    0x0012}, {0x8576,    0x0004}, {0x8577,    0x00b7},
-{0x8578,    0x0012}, {0x8579,    0x0004}, {0x857a,    0x0001},
-{0x857b,    0x0001}, {0x857c,    0x0001}, {0x857d,    0x0001},
-{0x857e,    0x0001}, {0x857f,    0x0001}, {0x8580,    0x00b6},
-{0x8581,    0x0012}, {0x8582,    0x0004}, {0x8583,    0x0084},
-{0x8584,    0x00fe}, {0x8585,    0x008a}, {0x8586,    0x0002},
-{0x8587,    0x00b7}, {0x8588,    0x0012}, {0x8589,    0x0004},
-{0x858a,    0x0001}, {0x858b,    0x0001}, {0x858c,    0x0001},
-{0x858d,    0x0001}, {0x858e,    0x0001}, {0x858f,    0x0001},
-{0x8590,    0x0086}, {0x8591,    0x00fd}, {0x8592,    0x00b4},
-{0x8593,    0x0012}, {0x8594,    0x0004}, {0x8595,    0x00b7},
-{0x8596,    0x0012}, {0x8597,    0x0004}, {0x8598,    0x00b6},
-{0x8599,    0x0012}, {0x859a,    0x0000}, {0x859b,    0x0084},
-{0x859c,    0x0008}, {0x859d,    0x0081}, {0x859e,    0x0008},
-{0x859f,    0x0027}, {0x85a0,    0x0016}, {0x85a1,    0x00b6},
-{0x85a2,    0x008f}, {0x85a3,    0x007d}, {0x85a4,    0x0081},
-{0x85a5,    0x000c}, {0x85a6,    0x0027}, {0x85a7,    0x0008},
-{0x85a8,    0x008b}, {0x85a9,    0x0004}, {0x85aa,    0x00b7},
-{0x85ab,    0x008f}, {0x85ac,    0x007d}, {0x85ad,    0x007e},
-{0x85ae,    0x0085}, {0x85af,    0x006c}, {0x85b0,    0x0086},
-{0x85b1,    0x0003}, {0x85b2,    0x0097}, {0x85b3,    0x0040},
-{0x85b4,    0x007e}, {0x85b5,    0x0089}, {0x85b6,    0x006e},
-{0x85b7,    0x0086}, {0x85b8,    0x0007}, {0x85b9,    0x00b7},
-{0x85ba,    0x0012}, {0x85bb,    0x0006}, {0x85bc,    0x005f},
-{0x85bd,    0x00f7}, {0x85be,    0x008f}, {0x85bf,    0x0082},
-{0x85c0,    0x005f}, {0x85c1,    0x00f7}, {0x85c2,    0x008f},
-{0x85c3,    0x007f}, {0x85c4,    0x00f7}, {0x85c5,    0x008f},
-{0x85c6,    0x0070}, {0x85c7,    0x00f7}, {0x85c8,    0x008f},
-{0x85c9,    0x0071}, {0x85ca,    0x00f7}, {0x85cb,    0x008f},
-{0x85cc,    0x0072}, {0x85cd,    0x00f7}, {0x85ce,    0x008f},
-{0x85cf,    0x0073}, {0x85d0,    0x00f7}, {0x85d1,    0x008f},
-{0x85d2,    0x0074}, {0x85d3,    0x00f7}, {0x85d4,    0x008f},
-{0x85d5,    0x0075}, {0x85d6,    0x00f7}, {0x85d7,    0x008f},
-{0x85d8,    0x0076}, {0x85d9,    0x00f7}, {0x85da,    0x008f},
-{0x85db,    0x0077}, {0x85dc,    0x00f7}, {0x85dd,    0x008f},
-{0x85de,    0x0078}, {0x85df,    0x00f7}, {0x85e0,    0x008f},
-{0x85e1,    0x0079}, {0x85e2,    0x00f7}, {0x85e3,    0x008f},
-{0x85e4,    0x007a}, {0x85e5,    0x00f7}, {0x85e6,    0x008f},
-{0x85e7,    0x007b}, {0x85e8,    0x00b6}, {0x85e9,    0x0012},
-{0x85ea,    0x0004}, {0x85eb,    0x008a}, {0x85ec,    0x0010},
-{0x85ed,    0x00b7}, {0x85ee,    0x0012}, {0x85ef,    0x0004},
-{0x85f0,    0x0086}, {0x85f1,    0x00e4}, {0x85f2,    0x00b7},
-{0x85f3,    0x0012}, {0x85f4,    0x0070}, {0x85f5,    0x00b7},
-{0x85f6,    0x0012}, {0x85f7,    0x0007}, {0x85f8,    0x00f7},
-{0x85f9,    0x0012}, {0x85fa,    0x0005}, {0x85fb,    0x00f7},
-{0x85fc,    0x0012}, {0x85fd,    0x0009}, {0x85fe,    0x0086},
-{0x85ff,    0x0008}, {0x8600,    0x00ba}, {0x8601,    0x0012},
-{0x8602,    0x0004}, {0x8603,    0x00b7}, {0x8604,    0x0012},
-{0x8605,    0x0004}, {0x8606,    0x0086}, {0x8607,    0x00f7},
-{0x8608,    0x00b4}, {0x8609,    0x0012}, {0x860a,    0x0004},
-{0x860b,    0x00b7}, {0x860c,    0x0012}, {0x860d,    0x0004},
-{0x860e,    0x0001}, {0x860f,    0x0001}, {0x8610,    0x0001},
-{0x8611,    0x0001}, {0x8612,    0x0001}, {0x8613,    0x0001},
-{0x8614,    0x00b6}, {0x8615,    0x0012}, {0x8616,    0x0008},
-{0x8617,    0x0027}, {0x8618,    0x007f}, {0x8619,    0x0081},
-{0x861a,    0x0080}, {0x861b,    0x0026}, {0x861c,    0x000b},
-{0x861d,    0x0086}, {0x861e,    0x0008}, {0x861f,    0x00ce},
-{0x8620,    0x008f}, {0x8621,    0x0079}, {0x8622,    0x00bd},
-{0x8623,    0x0089}, {0x8624,    0x007b}, {0x8625,    0x007e},
-{0x8626,    0x0086}, {0x8627,    0x008e}, {0x8628,    0x0081},
-{0x8629,    0x0040}, {0x862a,    0x0026}, {0x862b,    0x000b},
-{0x862c,    0x0086}, {0x862d,    0x0004}, {0x862e,    0x00ce},
-{0x862f,    0x008f}, {0x8630,    0x0076}, {0x8631,    0x00bd},
-{0x8632,    0x0089}, {0x8633,    0x007b}, {0x8634,    0x007e},
-{0x8635,    0x0086}, {0x8636,    0x008e}, {0x8637,    0x0081},
-{0x8638,    0x0020}, {0x8639,    0x0026}, {0x863a,    0x000b},
-{0x863b,    0x0086}, {0x863c,    0x0002}, {0x863d,    0x00ce},
-{0x863e,    0x008f}, {0x863f,    0x0073}, {0x8640,    0x00bd},
-{0x8641,    0x0089}, {0x8642,    0x007b}, {0x8643,    0x007e},
-{0x8644,    0x0086}, {0x8645,    0x008e}, {0x8646,    0x0081},
-{0x8647,    0x0010}, {0x8648,    0x0026}, {0x8649,    0x000b},
-{0x864a,    0x0086}, {0x864b,    0x0001}, {0x864c,    0x00ce},
-{0x864d,    0x008f}, {0x864e,    0x0070}, {0x864f,    0x00bd},
-{0x8650,    0x0089}, {0x8651,    0x007b}, {0x8652,    0x007e},
-{0x8653,    0x0086}, {0x8654,    0x008e}, {0x8655,    0x0081},
-{0x8656,    0x0008}, {0x8657,    0x0026}, {0x8658,    0x000b},
-{0x8659,    0x0086}, {0x865a,    0x0008}, {0x865b,    0x00ce},
-{0x865c,    0x008f}, {0x865d,    0x0079}, {0x865e,    0x00bd},
-{0x865f,    0x0089}, {0x8660,    0x007f}, {0x8661,    0x007e},
-{0x8662,    0x0086}, {0x8663,    0x008e}, {0x8664,    0x0081},
-{0x8665,    0x0004}, {0x8666,    0x0026}, {0x8667,    0x000b},
-{0x8668,    0x0086}, {0x8669,    0x0004}, {0x866a,    0x00ce},
-{0x866b,    0x008f}, {0x866c,    0x0076}, {0x866d,    0x00bd},
-{0x866e,    0x0089}, {0x866f,    0x007f}, {0x8670,    0x007e},
-{0x8671,    0x0086}, {0x8672,    0x008e}, {0x8673,    0x0081},
-{0x8674,    0x0002}, {0x8675,    0x0026}, {0x8676,    0x000b},
-{0x8677,    0x008a}, {0x8678,    0x0002}, {0x8679,    0x00ce},
-{0x867a,    0x008f}, {0x867b,    0x0073}, {0x867c,    0x00bd},
-{0x867d,    0x0089}, {0x867e,    0x007f}, {0x867f,    0x007e},
-{0x8680,    0x0086}, {0x8681,    0x008e}, {0x8682,    0x0081},
-{0x8683,    0x0001}, {0x8684,    0x0026}, {0x8685,    0x0008},
-{0x8686,    0x0086}, {0x8687,    0x0001}, {0x8688,    0x00ce},
-{0x8689,    0x008f}, {0x868a,    0x0070}, {0x868b,    0x00bd},
-{0x868c,    0x0089}, {0x868d,    0x007f}, {0x868e,    0x00b6},
-{0x868f,    0x008f}, {0x8690,    0x007f}, {0x8691,    0x0081},
-{0x8692,    0x000f}, {0x8693,    0x0026}, {0x8694,    0x0003},
-{0x8695,    0x007e}, {0x8696,    0x0087}, {0x8697,    0x0047},
-{0x8698,    0x00b6}, {0x8699,    0x0012}, {0x869a,    0x0009},
-{0x869b,    0x0084}, {0x869c,    0x0003}, {0x869d,    0x0081},
-{0x869e,    0x0003}, {0x869f,    0x0027}, {0x86a0,    0x0006},
-{0x86a1,    0x007c}, {0x86a2,    0x0012}, {0x86a3,    0x0009},
-{0x86a4,    0x007e}, {0x86a5,    0x0085}, {0x86a6,    0x00fe},
-{0x86a7,    0x00b6}, {0x86a8,    0x0012}, {0x86a9,    0x0006},
-{0x86aa,    0x0084}, {0x86ab,    0x0007}, {0x86ac,    0x0081},
-{0x86ad,    0x0007}, {0x86ae,    0x0027}, {0x86af,    0x0008},
-{0x86b0,    0x008b}, {0x86b1,    0x0001}, {0x86b2,    0x00b7},
-{0x86b3,    0x0012}, {0x86b4,    0x0006}, {0x86b5,    0x007e},
-{0x86b6,    0x0086}, {0x86b7,    0x00d5}, {0x86b8,    0x00b6},
-{0x86b9,    0x008f}, {0x86ba,    0x0082}, {0x86bb,    0x0026},
-{0x86bc,    0x000a}, {0x86bd,    0x007c}, {0x86be,    0x008f},
-{0x86bf,    0x0082}, {0x86c0,    0x004f}, {0x86c1,    0x00b7},
-{0x86c2,    0x0012}, {0x86c3,    0x0006}, {0x86c4,    0x007e},
-{0x86c5,    0x0085}, {0x86c6,    0x00c0}, {0x86c7,    0x00b6},
-{0x86c8,    0x0012}, {0x86c9,    0x0006}, {0x86ca,    0x0084},
-{0x86cb,    0x003f}, {0x86cc,    0x0081}, {0x86cd,    0x003f},
-{0x86ce,    0x0027}, {0x86cf,    0x0010}, {0x86d0,    0x008b},
-{0x86d1,    0x0008}, {0x86d2,    0x00b7}, {0x86d3,    0x0012},
-{0x86d4,    0x0006}, {0x86d5,    0x00b6}, {0x86d6,    0x0012},
-{0x86d7,    0x0009}, {0x86d8,    0x0084}, {0x86d9,    0x00fc},
-{0x86da,    0x00b7}, {0x86db,    0x0012}, {0x86dc,    0x0009},
-{0x86dd,    0x007e}, {0x86de,    0x0085}, {0x86df,    0x00fe},
-{0x86e0,    0x00ce}, {0x86e1,    0x008f}, {0x86e2,    0x0070},
-{0x86e3,    0x0018}, {0x86e4,    0x00ce}, {0x86e5,    0x008f},
-{0x86e6,    0x0084}, {0x86e7,    0x00c6}, {0x86e8,    0x000c},
-{0x86e9,    0x00bd}, {0x86ea,    0x0089}, {0x86eb,    0x006f},
-{0x86ec,    0x00ce}, {0x86ed,    0x008f}, {0x86ee,    0x0084},
-{0x86ef,    0x0018}, {0x86f0,    0x00ce}, {0x86f1,    0x008f},
-{0x86f2,    0x0070}, {0x86f3,    0x00c6}, {0x86f4,    0x000c},
-{0x86f5,    0x00bd}, {0x86f6,    0x0089}, {0x86f7,    0x006f},
-{0x86f8,    0x00d6}, {0x86f9,    0x0083}, {0x86fa,    0x00c1},
-{0x86fb,    0x004f}, {0x86fc,    0x002d}, {0x86fd,    0x0003},
-{0x86fe,    0x007e}, {0x86ff,    0x0087}, {0x8700,    0x0040},
-{0x8701,    0x00b6}, {0x8702,    0x008f}, {0x8703,    0x007f},
-{0x8704,    0x0081}, {0x8705,    0x0007}, {0x8706,    0x0027},
-{0x8707,    0x000f}, {0x8708,    0x0081}, {0x8709,    0x000b},
-{0x870a,    0x0027}, {0x870b,    0x0015}, {0x870c,    0x0081},
-{0x870d,    0x000d}, {0x870e,    0x0027}, {0x870f,    0x001b},
-{0x8710,    0x0081}, {0x8711,    0x000e}, {0x8712,    0x0027},
-{0x8713,    0x0021}, {0x8714,    0x007e}, {0x8715,    0x0087},
-{0x8716,    0x0040}, {0x8717,    0x00f7}, {0x8718,    0x008f},
-{0x8719,    0x007b}, {0x871a,    0x0086}, {0x871b,    0x0002},
-{0x871c,    0x00b7}, {0x871d,    0x008f}, {0x871e,    0x007a},
-{0x871f,    0x0020}, {0x8720,    0x001c}, {0x8721,    0x00f7},
-{0x8722,    0x008f}, {0x8723,    0x0078}, {0x8724,    0x0086},
-{0x8725,    0x0002}, {0x8726,    0x00b7}, {0x8727,    0x008f},
-{0x8728,    0x0077}, {0x8729,    0x0020}, {0x872a,    0x0012},
-{0x872b,    0x00f7}, {0x872c,    0x008f}, {0x872d,    0x0075},
-{0x872e,    0x0086}, {0x872f,    0x0002}, {0x8730,    0x00b7},
-{0x8731,    0x008f}, {0x8732,    0x0074}, {0x8733,    0x0020},
-{0x8734,    0x0008}, {0x8735,    0x00f7}, {0x8736,    0x008f},
-{0x8737,    0x0072}, {0x8738,    0x0086}, {0x8739,    0x0002},
-{0x873a,    0x00b7}, {0x873b,    0x008f}, {0x873c,    0x0071},
-{0x873d,    0x007e}, {0x873e,    0x0087}, {0x873f,    0x0047},
-{0x8740,    0x0086}, {0x8741,    0x0004}, {0x8742,    0x0097},
-{0x8743,    0x0040}, {0x8744,    0x007e}, {0x8745,    0x0089},
-{0x8746,    0x006e}, {0x8747,    0x00ce}, {0x8748,    0x008f},
-{0x8749,    0x0072}, {0x874a,    0x00bd}, {0x874b,    0x0089},
-{0x874c,    0x00f7}, {0x874d,    0x00ce}, {0x874e,    0x008f},
-{0x874f,    0x0075}, {0x8750,    0x00bd}, {0x8751,    0x0089},
-{0x8752,    0x00f7}, {0x8753,    0x00ce}, {0x8754,    0x008f},
-{0x8755,    0x0078}, {0x8756,    0x00bd}, {0x8757,    0x0089},
-{0x8758,    0x00f7}, {0x8759,    0x00ce}, {0x875a,    0x008f},
-{0x875b,    0x007b}, {0x875c,    0x00bd}, {0x875d,    0x0089},
-{0x875e,    0x00f7}, {0x875f,    0x004f}, {0x8760,    0x00b7},
-{0x8761,    0x008f}, {0x8762,    0x007d}, {0x8763,    0x00b7},
-{0x8764,    0x008f}, {0x8765,    0x0081}, {0x8766,    0x00b6},
-{0x8767,    0x008f}, {0x8768,    0x0072}, {0x8769,    0x0027},
-{0x876a,    0x0047}, {0x876b,    0x007c}, {0x876c,    0x008f},
-{0x876d,    0x007d}, {0x876e,    0x00b6}, {0x876f,    0x008f},
-{0x8770,    0x0075}, {0x8771,    0x0027}, {0x8772,    0x003f},
-{0x8773,    0x007c}, {0x8774,    0x008f}, {0x8775,    0x007d},
-{0x8776,    0x00b6}, {0x8777,    0x008f}, {0x8778,    0x0078},
-{0x8779,    0x0027}, {0x877a,    0x0037}, {0x877b,    0x007c},
-{0x877c,    0x008f}, {0x877d,    0x007d}, {0x877e,    0x00b6},
-{0x877f,    0x008f}, {0x8780,    0x007b}, {0x8781,    0x0027},
-{0x8782,    0x002f}, {0x8783,    0x007f}, {0x8784,    0x008f},
-{0x8785,    0x007d}, {0x8786,    0x007c}, {0x8787,    0x008f},
-{0x8788,    0x0081}, {0x8789,    0x007a}, {0x878a,    0x008f},
-{0x878b,    0x0072}, {0x878c,    0x0027}, {0x878d,    0x001b},
-{0x878e,    0x007c}, {0x878f,    0x008f}, {0x8790,    0x007d},
-{0x8791,    0x007a}, {0x8792,    0x008f}, {0x8793,    0x0075},
-{0x8794,    0x0027}, {0x8795,    0x0016}, {0x8796,    0x007c},
-{0x8797,    0x008f}, {0x8798,    0x007d}, {0x8799,    0x007a},
-{0x879a,    0x008f}, {0x879b,    0x0078}, {0x879c,    0x0027},
-{0x879d,    0x0011}, {0x879e,    0x007c}, {0x879f,    0x008f},
-{0x87a0,    0x007d}, {0x87a1,    0x007a}, {0x87a2,    0x008f},
-{0x87a3,    0x007b}, {0x87a4,    0x0027}, {0x87a5,    0x000c},
-{0x87a6,    0x007e}, {0x87a7,    0x0087}, {0x87a8,    0x0083},
-{0x87a9,    0x007a}, {0x87aa,    0x008f}, {0x87ab,    0x0075},
-{0x87ac,    0x007a}, {0x87ad,    0x008f}, {0x87ae,    0x0078},
-{0x87af,    0x007a}, {0x87b0,    0x008f}, {0x87b1,    0x007b},
-{0x87b2,    0x00ce}, {0x87b3,    0x00c1}, {0x87b4,    0x00fc},
-{0x87b5,    0x00f6}, {0x87b6,    0x008f}, {0x87b7,    0x007d},
-{0x87b8,    0x003a}, {0x87b9,    0x00a6}, {0x87ba,    0x0000},
-{0x87bb,    0x00b7}, {0x87bc,    0x0012}, {0x87bd,    0x0070},
-{0x87be,    0x00b6}, {0x87bf,    0x008f}, {0x87c0,    0x0072},
-{0x87c1,    0x0026}, {0x87c2,    0x0003}, {0x87c3,    0x007e},
-{0x87c4,    0x0087}, {0x87c5,    0x00fa}, {0x87c6,    0x00b6},
-{0x87c7,    0x008f}, {0x87c8,    0x0075}, {0x87c9,    0x0026},
-{0x87ca,    0x000a}, {0x87cb,    0x0018}, {0x87cc,    0x00ce},
-{0x87cd,    0x008f}, {0x87ce,    0x0073}, {0x87cf,    0x00bd},
-{0x87d0,    0x0089}, {0x87d1,    0x00d5}, {0x87d2,    0x007e},
-{0x87d3,    0x0087}, {0x87d4,    0x00fa}, {0x87d5,    0x00b6},
-{0x87d6,    0x008f}, {0x87d7,    0x0078}, {0x87d8,    0x0026},
-{0x87d9,    0x000a}, {0x87da,    0x0018}, {0x87db,    0x00ce},
-{0x87dc,    0x008f}, {0x87dd,    0x0076}, {0x87de,    0x00bd},
-{0x87df,    0x0089}, {0x87e0,    0x00d5}, {0x87e1,    0x007e},
-{0x87e2,    0x0087}, {0x87e3,    0x00fa}, {0x87e4,    0x00b6},
-{0x87e5,    0x008f}, {0x87e6,    0x007b}, {0x87e7,    0x0026},
-{0x87e8,    0x000a}, {0x87e9,    0x0018}, {0x87ea,    0x00ce},
-{0x87eb,    0x008f}, {0x87ec,    0x0079}, {0x87ed,    0x00bd},
-{0x87ee,    0x0089}, {0x87ef,    0x00d5}, {0x87f0,    0x007e},
-{0x87f1,    0x0087}, {0x87f2,    0x00fa}, {0x87f3,    0x0086},
-{0x87f4,    0x0005}, {0x87f5,    0x0097}, {0x87f6,    0x0040},
-{0x87f7,    0x007e}, {0x87f8,    0x0089}, {0x87f9,    0x006e},
-{0x87fa,    0x00b6}, {0x87fb,    0x008f}, {0x87fc,    0x0075},
-{0x87fd,    0x0081}, {0x87fe,    0x0007}, {0x87ff,    0x002e},
-{0x8800,    0x00f2}, {0x8801,    0x00f6}, {0x8802,    0x0012},
-{0x8803,    0x0006}, {0x8804,    0x00c4}, {0x8805,    0x00f8},
-{0x8806,    0x001b}, {0x8807,    0x00b7}, {0x8808,    0x0012},
-{0x8809,    0x0006}, {0x880a,    0x00b6}, {0x880b,    0x008f},
-{0x880c,    0x0078}, {0x880d,    0x0081}, {0x880e,    0x0007},
-{0x880f,    0x002e}, {0x8810,    0x00e2}, {0x8811,    0x0048},
-{0x8812,    0x0048}, {0x8813,    0x0048}, {0x8814,    0x00f6},
-{0x8815,    0x0012}, {0x8816,    0x0006}, {0x8817,    0x00c4},
-{0x8818,    0x00c7}, {0x8819,    0x001b}, {0x881a,    0x00b7},
-{0x881b,    0x0012}, {0x881c,    0x0006}, {0x881d,    0x00b6},
-{0x881e,    0x008f}, {0x881f,    0x007b}, {0x8820,    0x0081},
-{0x8821,    0x0007}, {0x8822,    0x002e}, {0x8823,    0x00cf},
-{0x8824,    0x00f6}, {0x8825,    0x0012}, {0x8826,    0x0005},
-{0x8827,    0x00c4}, {0x8828,    0x00f8}, {0x8829,    0x001b},
-{0x882a,    0x00b7}, {0x882b,    0x0012}, {0x882c,    0x0005},
-{0x882d,    0x0086}, {0x882e,    0x0000}, {0x882f,    0x00f6},
-{0x8830,    0x008f}, {0x8831,    0x0071}, {0x8832,    0x00bd},
-{0x8833,    0x0089}, {0x8834,    0x0094}, {0x8835,    0x0086},
-{0x8836,    0x0001}, {0x8837,    0x00f6}, {0x8838,    0x008f},
-{0x8839,    0x0074}, {0x883a,    0x00bd}, {0x883b,    0x0089},
-{0x883c,    0x0094}, {0x883d,    0x0086}, {0x883e,    0x0002},
-{0x883f,    0x00f6}, {0x8840,    0x008f}, {0x8841,    0x0077},
-{0x8842,    0x00bd}, {0x8843,    0x0089}, {0x8844,    0x0094},
-{0x8845,    0x0086}, {0x8846,    0x0003}, {0x8847,    0x00f6},
-{0x8848,    0x008f}, {0x8849,    0x007a}, {0x884a,    0x00bd},
-{0x884b,    0x0089}, {0x884c,    0x0094}, {0x884d,    0x00ce},
-{0x884e,    0x008f}, {0x884f,    0x0070}, {0x8850,    0x00a6},
-{0x8851,    0x0001}, {0x8852,    0x0081}, {0x8853,    0x0001},
-{0x8854,    0x0027}, {0x8855,    0x0007}, {0x8856,    0x0081},
-{0x8857,    0x0003}, {0x8858,    0x0027}, {0x8859,    0x0003},
-{0x885a,    0x007e}, {0x885b,    0x0088}, {0x885c,    0x0066},
-{0x885d,    0x00a6}, {0x885e,    0x0000}, {0x885f,    0x00b8},
-{0x8860,    0x008f}, {0x8861,    0x0081}, {0x8862,    0x0084},
-{0x8863,    0x0001}, {0x8864,    0x0026}, {0x8865,    0x000b},
-{0x8866,    0x008c}, {0x8867,    0x008f}, {0x8868,    0x0079},
-{0x8869,    0x002c}, {0x886a,    0x000e}, {0x886b,    0x0008},
-{0x886c,    0x0008}, {0x886d,    0x0008}, {0x886e,    0x007e},
-{0x886f,    0x0088}, {0x8870,    0x0050}, {0x8871,    0x00b6},
-{0x8872,    0x0012}, {0x8873,    0x0004}, {0x8874,    0x008a},
-{0x8875,    0x0040}, {0x8876,    0x00b7}, {0x8877,    0x0012},
-{0x8878,    0x0004}, {0x8879,    0x00b6}, {0x887a,    0x0012},
-{0x887b,    0x0004}, {0x887c,    0x0084}, {0x887d,    0x00fb},
-{0x887e,    0x0084}, {0x887f,    0x00ef}, {0x8880,    0x00b7},
-{0x8881,    0x0012}, {0x8882,    0x0004}, {0x8883,    0x00b6},
-{0x8884,    0x0012}, {0x8885,    0x0007}, {0x8886,    0x0036},
-{0x8887,    0x00b6}, {0x8888,    0x008f}, {0x8889,    0x007c},
-{0x888a,    0x0048}, {0x888b,    0x0048}, {0x888c,    0x00b7},
-{0x888d,    0x0012}, {0x888e,    0x0007}, {0x888f,    0x0086},
-{0x8890,    0x0001}, {0x8891,    0x00ba}, {0x8892,    0x0012},
-{0x8893,    0x0004}, {0x8894,    0x00b7}, {0x8895,    0x0012},
-{0x8896,    0x0004}, {0x8897,    0x0001}, {0x8898,    0x0001},
-{0x8899,    0x0001}, {0x889a,    0x0001}, {0x889b,    0x0001},
-{0x889c,    0x0001}, {0x889d,    0x0086}, {0x889e,    0x00fe},
-{0x889f,    0x00b4}, {0x88a0,    0x0012}, {0x88a1,    0x0004},
-{0x88a2,    0x00b7}, {0x88a3,    0x0012}, {0x88a4,    0x0004},
-{0x88a5,    0x0086}, {0x88a6,    0x0002}, {0x88a7,    0x00ba},
-{0x88a8,    0x0012}, {0x88a9,    0x0004}, {0x88aa,    0x00b7},
-{0x88ab,    0x0012}, {0x88ac,    0x0004}, {0x88ad,    0x0086},
-{0x88ae,    0x00fd}, {0x88af,    0x00b4}, {0x88b0,    0x0012},
-{0x88b1,    0x0004}, {0x88b2,    0x00b7}, {0x88b3,    0x0012},
-{0x88b4,    0x0004}, {0x88b5,    0x0032}, {0x88b6,    0x00b7},
-{0x88b7,    0x0012}, {0x88b8,    0x0007}, {0x88b9,    0x00b6},
-{0x88ba,    0x0012}, {0x88bb,    0x0000}, {0x88bc,    0x0084},
-{0x88bd,    0x0008}, {0x88be,    0x0081}, {0x88bf,    0x0008},
-{0x88c0,    0x0027}, {0x88c1,    0x000f}, {0x88c2,    0x007c},
-{0x88c3,    0x0082}, {0x88c4,    0x0008}, {0x88c5,    0x0026},
-{0x88c6,    0x0007}, {0x88c7,    0x0086}, {0x88c8,    0x0076},
-{0x88c9,    0x0097}, {0x88ca,    0x0040}, {0x88cb,    0x007e},
-{0x88cc,    0x0089}, {0x88cd,    0x006e}, {0x88ce,    0x007e},
-{0x88cf,    0x0086}, {0x88d0,    0x00ec}, {0x88d1,    0x00b6},
-{0x88d2,    0x008f}, {0x88d3,    0x007f}, {0x88d4,    0x0081},
-{0x88d5,    0x000f}, {0x88d6,    0x0027}, {0x88d7,    0x003c},
-{0x88d8,    0x00bd}, {0x88d9,    0x00e6}, {0x88da,    0x00c7},
-{0x88db,    0x00b7}, {0x88dc,    0x0012}, {0x88dd,    0x000d},
-{0x88de,    0x00bd}, {0x88df,    0x00e6}, {0x88e0,    0x00cb},
-{0x88e1,    0x00b6}, {0x88e2,    0x0012}, {0x88e3,    0x0004},
-{0x88e4,    0x008a}, {0x88e5,    0x0020}, {0x88e6,    0x00b7},
-{0x88e7,    0x0012}, {0x88e8,    0x0004}, {0x88e9,    0x00ce},
-{0x88ea,    0x00ff}, {0x88eb,    0x00ff}, {0x88ec,    0x00b6},
-{0x88ed,    0x0012}, {0x88ee,    0x0000}, {0x88ef,    0x0081},
-{0x88f0,    0x000c}, {0x88f1,    0x0026}, {0x88f2,    0x0005},
-{0x88f3,    0x0009}, {0x88f4,    0x0026}, {0x88f5,    0x00f6},
-{0x88f6,    0x0027}, {0x88f7,    0x001c}, {0x88f8,    0x00b6},
-{0x88f9,    0x0012}, {0x88fa,    0x0004}, {0x88fb,    0x0084},
-{0x88fc,    0x00df}, {0x88fd,    0x00b7}, {0x88fe,    0x0012},
-{0x88ff,    0x0004}, {0x8900,    0x0096}, {0x8901,    0x0083},
-{0x8902,    0x0081}, {0x8903,    0x0007}, {0x8904,    0x002c},
-{0x8905,    0x0005}, {0x8906,    0x007c}, {0x8907,    0x0000},
-{0x8908,    0x0083}, {0x8909,    0x0020}, {0x890a,    0x0006},
-{0x890b,    0x0096}, {0x890c,    0x0083}, {0x890d,    0x008b},
-{0x890e,    0x0008}, {0x890f,    0x0097}, {0x8910,    0x0083},
-{0x8911,    0x007e}, {0x8912,    0x0085}, {0x8913,    0x0041},
-{0x8914,    0x007f}, {0x8915,    0x008f}, {0x8916,    0x007e},
-{0x8917,    0x0086}, {0x8918,    0x0080}, {0x8919,    0x00b7},
-{0x891a,    0x0012}, {0x891b,    0x000c}, {0x891c,    0x0086},
-{0x891d,    0x0001}, {0x891e,    0x00b7}, {0x891f,    0x008f},
-{0x8920,    0x007d}, {0x8921,    0x00b6}, {0x8922,    0x0012},
-{0x8923,    0x000c}, {0x8924,    0x0084}, {0x8925,    0x007f},
-{0x8926,    0x00b7}, {0x8927,    0x0012}, {0x8928,    0x000c},
-{0x8929,    0x008a}, {0x892a,    0x0080}, {0x892b,    0x00b7},
-{0x892c,    0x0012}, {0x892d,    0x000c}, {0x892e,    0x0086},
-{0x892f,    0x000a}, {0x8930,    0x00bd}, {0x8931,    0x008a},
-{0x8932,    0x0006}, {0x8933,    0x00b6}, {0x8934,    0x0012},
-{0x8935,    0x000a}, {0x8936,    0x002a}, {0x8937,    0x0009},
-{0x8938,    0x00b6}, {0x8939,    0x0012}, {0x893a,    0x000c},
-{0x893b,    0x00ba}, {0x893c,    0x008f}, {0x893d,    0x007d},
-{0x893e,    0x00b7}, {0x893f,    0x0012}, {0x8940,    0x000c},
-{0x8941,    0x00b6}, {0x8942,    0x008f}, {0x8943,    0x007e},
-{0x8944,    0x0081}, {0x8945,    0x0060}, {0x8946,    0x0027},
-{0x8947,    0x001a}, {0x8948,    0x008b}, {0x8949,    0x0020},
-{0x894a,    0x00b7}, {0x894b,    0x008f}, {0x894c,    0x007e},
-{0x894d,    0x00b6}, {0x894e,    0x0012}, {0x894f,    0x000c},
-{0x8950,    0x0084}, {0x8951,    0x009f}, {0x8952,    0x00ba},
-{0x8953,    0x008f}, {0x8954,    0x007e}, {0x8955,    0x00b7},
-{0x8956,    0x0012}, {0x8957,    0x000c}, {0x8958,    0x00b6},
-{0x8959,    0x008f}, {0x895a,    0x007d}, {0x895b,    0x0048},
-{0x895c,    0x00b7}, {0x895d,    0x008f}, {0x895e,    0x007d},
-{0x895f,    0x007e}, {0x8960,    0x0089}, {0x8961,    0x0021},
-{0x8962,    0x00b6}, {0x8963,    0x0012}, {0x8964,    0x0004},
-{0x8965,    0x008a}, {0x8966,    0x0020}, {0x8967,    0x00b7},
-{0x8968,    0x0012}, {0x8969,    0x0004}, {0x896a,    0x00bd},
-{0x896b,    0x008a}, {0x896c,    0x000a}, {0x896d,    0x004f},
-{0x896e,    0x0039}, {0x896f,    0x00a6}, {0x8970,    0x0000},
-{0x8971,    0x0018}, {0x8972,    0x00a7}, {0x8973,    0x0000},
-{0x8974,    0x0008}, {0x8975,    0x0018}, {0x8976,    0x0008},
-{0x8977,    0x005a}, {0x8978,    0x0026}, {0x8979,    0x00f5},
-{0x897a,    0x0039}, {0x897b,    0x0036}, {0x897c,    0x006c},
-{0x897d,    0x0000}, {0x897e,    0x0032}, {0x897f,    0x00ba},
-{0x8980,    0x008f}, {0x8981,    0x007f}, {0x8982,    0x00b7},
-{0x8983,    0x008f}, {0x8984,    0x007f}, {0x8985,    0x00b6},
-{0x8986,    0x0012}, {0x8987,    0x0009}, {0x8988,    0x0084},
-{0x8989,    0x0003}, {0x898a,    0x00a7}, {0x898b,    0x0001},
-{0x898c,    0x00b6}, {0x898d,    0x0012}, {0x898e,    0x0006},
-{0x898f,    0x0084}, {0x8990,    0x003f}, {0x8991,    0x00a7},
-{0x8992,    0x0002}, {0x8993,    0x0039}, {0x8994,    0x0036},
-{0x8995,    0x0086}, {0x8996,    0x0003}, {0x8997,    0x00b7},
-{0x8998,    0x008f}, {0x8999,    0x0080}, {0x899a,    0x0032},
-{0x899b,    0x00c1}, {0x899c,    0x0000}, {0x899d,    0x0026},
-{0x899e,    0x0006}, {0x899f,    0x00b7}, {0x89a0,    0x008f},
-{0x89a1,    0x007c}, {0x89a2,    0x007e}, {0x89a3,    0x0089},
-{0x89a4,    0x00c9}, {0x89a5,    0x00c1}, {0x89a6,    0x0001},
-{0x89a7,    0x0027}, {0x89a8,    0x0018}, {0x89a9,    0x00c1},
-{0x89aa,    0x0002}, {0x89ab,    0x0027}, {0x89ac,    0x000c},
-{0x89ad,    0x00c1}, {0x89ae,    0x0003}, {0x89af,    0x0027},
-{0x89b0,    0x0000}, {0x89b1,    0x00f6}, {0x89b2,    0x008f},
-{0x89b3,    0x0080}, {0x89b4,    0x0005}, {0x89b5,    0x0005},
-{0x89b6,    0x00f7}, {0x89b7,    0x008f}, {0x89b8,    0x0080},
-{0x89b9,    0x00f6}, {0x89ba,    0x008f}, {0x89bb,    0x0080},
-{0x89bc,    0x0005}, {0x89bd,    0x0005}, {0x89be,    0x00f7},
-{0x89bf,    0x008f}, {0x89c0,    0x0080}, {0x89c1,    0x00f6},
-{0x89c2,    0x008f}, {0x89c3,    0x0080}, {0x89c4,    0x0005},
-{0x89c5,    0x0005}, {0x89c6,    0x00f7}, {0x89c7,    0x008f},
-{0x89c8,    0x0080}, {0x89c9,    0x00f6}, {0x89ca,    0x008f},
-{0x89cb,    0x0080}, {0x89cc,    0x0053}, {0x89cd,    0x00f4},
-{0x89ce,    0x0012}, {0x89cf,    0x0007}, {0x89d0,    0x001b},
-{0x89d1,    0x00b7}, {0x89d2,    0x0012}, {0x89d3,    0x0007},
-{0x89d4,    0x0039}, {0x89d5,    0x00ce}, {0x89d6,    0x008f},
-{0x89d7,    0x0070}, {0x89d8,    0x00a6}, {0x89d9,    0x0000},
-{0x89da,    0x0018}, {0x89db,    0x00e6}, {0x89dc,    0x0000},
-{0x89dd,    0x0018}, {0x89de,    0x00a7}, {0x89df,    0x0000},
-{0x89e0,    0x00e7}, {0x89e1,    0x0000}, {0x89e2,    0x00a6},
-{0x89e3,    0x0001}, {0x89e4,    0x0018}, {0x89e5,    0x00e6},
-{0x89e6,    0x0001}, {0x89e7,    0x0018}, {0x89e8,    0x00a7},
-{0x89e9,    0x0001}, {0x89ea,    0x00e7}, {0x89eb,    0x0001},
-{0x89ec,    0x00a6}, {0x89ed,    0x0002}, {0x89ee,    0x0018},
-{0x89ef,    0x00e6}, {0x89f0,    0x0002}, {0x89f1,    0x0018},
-{0x89f2,    0x00a7}, {0x89f3,    0x0002}, {0x89f4,    0x00e7},
-{0x89f5,    0x0002}, {0x89f6,    0x0039}, {0x89f7,    0x00a6},
-{0x89f8,    0x0000}, {0x89f9,    0x0084}, {0x89fa,    0x0007},
-{0x89fb,    0x00e6}, {0x89fc,    0x0000}, {0x89fd,    0x00c4},
-{0x89fe,    0x0038}, {0x89ff,    0x0054}, {0x8a00,    0x0054},
-{0x8a01,    0x0054}, {0x8a02,    0x001b}, {0x8a03,    0x00a7},
-{0x8a04,    0x0000}, {0x8a05,    0x0039}, {0x8a06,    0x004a},
-{0x8a07,    0x0026}, {0x8a08,    0x00fd}, {0x8a09,    0x0039},
-{0x8a0a,    0x0096}, {0x8a0b,    0x0022}, {0x8a0c,    0x0084},
-{0x8a0d,    0x000f}, {0x8a0e,    0x0097}, {0x8a0f,    0x0022},
-{0x8a10,    0x0086}, {0x8a11,    0x0001}, {0x8a12,    0x00b7},
-{0x8a13,    0x008f}, {0x8a14,    0x0070}, {0x8a15,    0x00b6},
-{0x8a16,    0x0012}, {0x8a17,    0x0007}, {0x8a18,    0x00b7},
-{0x8a19,    0x008f}, {0x8a1a,    0x0071}, {0x8a1b,    0x00f6},
-{0x8a1c,    0x0012}, {0x8a1d,    0x000c}, {0x8a1e,    0x00c4},
-{0x8a1f,    0x000f}, {0x8a20,    0x00c8}, {0x8a21,    0x000f},
-{0x8a22,    0x00f7}, {0x8a23,    0x008f}, {0x8a24,    0x0072},
-{0x8a25,    0x00f6}, {0x8a26,    0x008f}, {0x8a27,    0x0072},
-{0x8a28,    0x00b6}, {0x8a29,    0x008f}, {0x8a2a,    0x0071},
-{0x8a2b,    0x0084}, {0x8a2c,    0x0003}, {0x8a2d,    0x0027},
-{0x8a2e,    0x0014}, {0x8a2f,    0x0081}, {0x8a30,    0x0001},
-{0x8a31,    0x0027}, {0x8a32,    0x001c}, {0x8a33,    0x0081},
-{0x8a34,    0x0002}, {0x8a35,    0x0027}, {0x8a36,    0x0024},
-{0x8a37,    0x00f4}, {0x8a38,    0x008f}, {0x8a39,    0x0070},
-{0x8a3a,    0x0027}, {0x8a3b,    0x002a}, {0x8a3c,    0x0096},
-{0x8a3d,    0x0022}, {0x8a3e,    0x008a}, {0x8a3f,    0x0080},
-{0x8a40,    0x007e}, {0x8a41,    0x008a}, {0x8a42,    0x0064},
-{0x8a43,    0x00f4}, {0x8a44,    0x008f}, {0x8a45,    0x0070},
-{0x8a46,    0x0027}, {0x8a47,    0x001e}, {0x8a48,    0x0096},
-{0x8a49,    0x0022}, {0x8a4a,    0x008a}, {0x8a4b,    0x0010},
-{0x8a4c,    0x007e}, {0x8a4d,    0x008a}, {0x8a4e,    0x0064},
-{0x8a4f,    0x00f4}, {0x8a50,    0x008f}, {0x8a51,    0x0070},
-{0x8a52,    0x0027}, {0x8a53,    0x0012}, {0x8a54,    0x0096},
-{0x8a55,    0x0022}, {0x8a56,    0x008a}, {0x8a57,    0x0020},
-{0x8a58,    0x007e}, {0x8a59,    0x008a}, {0x8a5a,    0x0064},
-{0x8a5b,    0x00f4}, {0x8a5c,    0x008f}, {0x8a5d,    0x0070},
-{0x8a5e,    0x0027}, {0x8a5f,    0x0006}, {0x8a60,    0x0096},
-{0x8a61,    0x0022}, {0x8a62,    0x008a}, {0x8a63,    0x0040},
-{0x8a64,    0x0097}, {0x8a65,    0x0022}, {0x8a66,    0x0074},
-{0x8a67,    0x008f}, {0x8a68,    0x0071}, {0x8a69,    0x0074},
-{0x8a6a,    0x008f}, {0x8a6b,    0x0071}, {0x8a6c,    0x0078},
-{0x8a6d,    0x008f}, {0x8a6e,    0x0070}, {0x8a6f,    0x00b6},
-{0x8a70,    0x008f}, {0x8a71,    0x0070}, {0x8a72,    0x0085},
-{0x8a73,    0x0010}, {0x8a74,    0x0027}, {0x8a75,    0x00af},
-{0x8a76,    0x00d6}, {0x8a77,    0x0022}, {0x8a78,    0x00c4},
-{0x8a79,    0x0010}, {0x8a7a,    0x0058}, {0x8a7b,    0x00b6},
-{0x8a7c,    0x0012}, {0x8a7d,    0x0070}, {0x8a7e,    0x0081},
-{0x8a7f,    0x00e4}, {0x8a80,    0x0027}, {0x8a81,    0x0036},
-{0x8a82,    0x0081}, {0x8a83,    0x00e1}, {0x8a84,    0x0026},
-{0x8a85,    0x000c}, {0x8a86,    0x0096}, {0x8a87,    0x0022},
-{0x8a88,    0x0084}, {0x8a89,    0x0020}, {0x8a8a,    0x0044},
-{0x8a8b,    0x001b}, {0x8a8c,    0x00d6}, {0x8a8d,    0x0022},
-{0x8a8e,    0x00c4}, {0x8a8f,    0x00cf}, {0x8a90,    0x0020},
-{0x8a91,    0x0023}, {0x8a92,    0x0058}, {0x8a93,    0x0081},
-{0x8a94,    0x00c6}, {0x8a95,    0x0026}, {0x8a96,    0x000d},
-{0x8a97,    0x0096}, {0x8a98,    0x0022}, {0x8a99,    0x0084},
-{0x8a9a,    0x0040}, {0x8a9b,    0x0044}, {0x8a9c,    0x0044},
-{0x8a9d,    0x001b}, {0x8a9e,    0x00d6}, {0x8a9f,    0x0022},
-{0x8aa0,    0x00c4}, {0x8aa1,    0x00af}, {0x8aa2,    0x0020},
-{0x8aa3,    0x0011}, {0x8aa4,    0x0058}, {0x8aa5,    0x0081},
-{0x8aa6,    0x0027}, {0x8aa7,    0x0026}, {0x8aa8,    0x000f},
-{0x8aa9,    0x0096}, {0x8aaa,    0x0022}, {0x8aab,    0x0084},
-{0x8aac,    0x0080}, {0x8aad,    0x0044}, {0x8aae,    0x0044},
-{0x8aaf,    0x0044}, {0x8ab0,    0x001b}, {0x8ab1,    0x00d6},
-{0x8ab2,    0x0022}, {0x8ab3,    0x00c4}, {0x8ab4,    0x006f},
-{0x8ab5,    0x001b}, {0x8ab6,    0x0097}, {0x8ab7,    0x0022},
-{0x8ab8,    0x0039}, {0x8ab9,    0x0027}, {0x8aba,    0x000c},
-{0x8abb,    0x007c}, {0x8abc,    0x0082}, {0x8abd,    0x0006},
-{0x8abe,    0x00bd}, {0x8abf,    0x00d9}, {0x8ac0,    0x00ed},
-{0x8ac1,    0x00b6}, {0x8ac2,    0x0082}, {0x8ac3,    0x0007},
-{0x8ac4,    0x007e}, {0x8ac5,    0x008a}, {0x8ac6,    0x00b9},
-{0x8ac7,    0x007f}, {0x8ac8,    0x0082}, {0x8ac9,    0x0006},
-{0x8aca,    0x0039}, { 0x0, 0x0 }
-};
-#endif
-
-
 /* phy types */
 #define   CAS_PHY_UNKNOWN       0x00
 #define   CAS_PHY_SERDES        0x01
@@ -4389,6 +2872,11 @@
 	dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS];
 	struct pci_dev *pdev;
 	struct net_device *dev;
+
+	/* Firmware Info */
+	u16			fw_load_addr;
+	u32			fw_size;
+	u8			*fw_data;
 };
 
 #define TX_DESC_NEXT(r, x)  (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1))
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index ea6144a..b0b6676 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1397,9 +1397,7 @@
 release_dma:
 #if ALLOW_DMA
 		free_dma(dev->dma);
-#endif
 release_irq:
-#if ALLOW_DMA
 		release_dma_buff(lp);
 #endif
                 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 2711404..4f5cc69 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -124,8 +124,7 @@
 	dma_addr_t phys_addr;	/* physical address of the ring */
 	unsigned int cntxt_id;	/* SGE context id for the response q */
 	spinlock_t lock;	/* guards response processing */
-	struct sk_buff *rx_head;	/* offload packet receive queue head */
-	struct sk_buff *rx_tail;	/* offload packet receive queue tail */
+	struct sk_buff_head rx_queue; /* offload packet receive queue */
 	struct sk_buff *pg_skb; /* used to build frag list in napi handler */
 
 	unsigned long offload_pkts;
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index c5b3de1..0f6fd63 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1018,7 +1018,7 @@
 
 	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
 	if (!skb) {
-		printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__);
+		printk(KERN_ERR "%s: cannot allocate skb!\n", __func__);
 		return;
 	}
 	skb->priority = CPL_PRIORITY_CONTROL;
@@ -1049,14 +1049,14 @@
 		return;
 	if (!is_offloading(newdev)) {
 		printk(KERN_WARNING "%s: Redirect to non-offload "
-		       "device ignored.\n", __FUNCTION__);
+		       "device ignored.\n", __func__);
 		return;
 	}
 	tdev = dev2t3cdev(olddev);
 	BUG_ON(!tdev);
 	if (tdev != dev2t3cdev(newdev)) {
 		printk(KERN_WARNING "%s: Redirect to different "
-		       "offload device ignored.\n", __FUNCTION__);
+		       "offload device ignored.\n", __func__);
 		return;
 	}
 
@@ -1064,7 +1064,7 @@
 	e = t3_l2t_get(tdev, new->neighbour, newdev);
 	if (!e) {
 		printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
-		       __FUNCTION__);
+		       __func__);
 		return;
 	}
 
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 825e510..b2c5314 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -86,6 +86,7 @@
 				  struct l2t_entry *e)
 {
 	struct cpl_l2t_write_req *req;
+	struct sk_buff *tmp;
 
 	if (!skb) {
 		skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
@@ -103,13 +104,11 @@
 	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
 	skb->priority = CPL_PRIORITY_CONTROL;
 	cxgb3_ofld_send(dev, skb);
-	while (e->arpq_head) {
-		skb = e->arpq_head;
-		e->arpq_head = skb->next;
-		skb->next = NULL;
+
+	skb_queue_walk_safe(&e->arpq, skb, tmp) {
+		__skb_unlink(skb, &e->arpq);
 		cxgb3_ofld_send(dev, skb);
 	}
-	e->arpq_tail = NULL;
 	e->state = L2T_STATE_VALID;
 
 	return 0;
@@ -121,12 +120,7 @@
  */
 static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
 {
-	skb->next = NULL;
-	if (e->arpq_head)
-		e->arpq_tail->next = skb;
-	else
-		e->arpq_head = skb;
-	e->arpq_tail = skb;
+	__skb_queue_tail(&e->arpq, skb);
 }
 
 int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
@@ -167,7 +161,7 @@
 				break;
 
 			spin_lock_bh(&e->lock);
-			if (e->arpq_head)
+			if (!skb_queue_empty(&e->arpq))
 				setup_l2e_send_pending(dev, skb, e);
 			else	/* we lost the race */
 				__kfree_skb(skb);
@@ -357,14 +351,14 @@
  * XXX: maybe we should abandon the latter behavior and just require a failure
  * handler.
  */
-static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
+static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
 {
-	while (arpq) {
-		struct sk_buff *skb = arpq;
+	struct sk_buff *skb, *tmp;
+
+	skb_queue_walk_safe(arpq, skb, tmp) {
 		struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
 
-		arpq = skb->next;
-		skb->next = NULL;
+		__skb_unlink(skb, arpq);
 		if (cb->arp_failure_handler)
 			cb->arp_failure_handler(dev, skb);
 		else
@@ -378,8 +372,8 @@
  */
 void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
 {
+	struct sk_buff_head arpq;
 	struct l2t_entry *e;
-	struct sk_buff *arpq = NULL;
 	struct l2t_data *d = L2DATA(dev);
 	u32 addr = *(u32 *) neigh->primary_key;
 	int ifidx = neigh->dev->ifindex;
@@ -395,6 +389,8 @@
 	return;
 
 found:
+	__skb_queue_head_init(&arpq);
+
 	read_unlock(&d->lock);
 	if (atomic_read(&e->refcnt)) {
 		if (neigh != e->neigh)
@@ -402,8 +398,7 @@
 
 		if (e->state == L2T_STATE_RESOLVING) {
 			if (neigh->nud_state & NUD_FAILED) {
-				arpq = e->arpq_head;
-				e->arpq_head = e->arpq_tail = NULL;
+				skb_queue_splice_init(&e->arpq, &arpq);
 			} else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
 				setup_l2e_send_pending(dev, NULL, e);
 		} else {
@@ -415,8 +410,8 @@
 	}
 	spin_unlock_bh(&e->lock);
 
-	if (arpq)
-		handle_failed_resolution(dev, arpq);
+	if (!skb_queue_empty(&arpq))
+		handle_failed_resolution(dev, &arpq);
 }
 
 struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
index d790013..42ce65f 100644
--- a/drivers/net/cxgb3/l2t.h
+++ b/drivers/net/cxgb3/l2t.h
@@ -64,8 +64,7 @@
 	struct neighbour *neigh;	/* associated neighbour */
 	struct l2t_entry *first;	/* start of hash chain */
 	struct l2t_entry *next;	/* next l2t_entry on chain */
-	struct sk_buff *arpq_head;	/* queue of packets awaiting resolution */
-	struct sk_buff *arpq_tail;
+	struct sk_buff_head arpq;	/* queue of packets awaiting resolution */
 	spinlock_t lock;
 	atomic_t refcnt;	/* entry reference count */
 	u8 dmac[6];		/* neighbour's MAC address */
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 1b0861d..89efd04 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1704,16 +1704,15 @@
  */
 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
 {
-	skb->next = skb->prev = NULL;
-	if (q->rx_tail)
-		q->rx_tail->next = skb;
-	else {
+	int was_empty = skb_queue_empty(&q->rx_queue);
+
+	__skb_queue_tail(&q->rx_queue, skb);
+
+	if (was_empty) {
 		struct sge_qset *qs = rspq_to_qset(q);
 
 		napi_schedule(&qs->napi);
-		q->rx_head = skb;
 	}
-	q->rx_tail = skb;
 }
 
 /**
@@ -1754,26 +1753,29 @@
 	int work_done = 0;
 
 	while (work_done < budget) {
-		struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
+		struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
+		struct sk_buff_head queue;
 		int ngathered;
 
 		spin_lock_irq(&q->lock);
-		head = q->rx_head;
-		if (!head) {
+		__skb_queue_head_init(&queue);
+		skb_queue_splice_init(&q->rx_queue, &queue);
+		if (skb_queue_empty(&queue)) {
 			napi_complete(napi);
 			spin_unlock_irq(&q->lock);
 			return work_done;
 		}
-
-		tail = q->rx_tail;
-		q->rx_head = q->rx_tail = NULL;
 		spin_unlock_irq(&q->lock);
 
-		for (ngathered = 0; work_done < budget && head; work_done++) {
-			prefetch(head->data);
-			skbs[ngathered] = head;
-			head = head->next;
-			skbs[ngathered]->next = NULL;
+		ngathered = 0;
+		skb_queue_walk_safe(&queue, skb, tmp) {
+			if (work_done >= budget)
+				break;
+			work_done++;
+
+			__skb_unlink(skb, &queue);
+			prefetch(skb->data);
+			skbs[ngathered] = skb;
 			if (++ngathered == RX_BUNDLE_SIZE) {
 				q->offload_bundles++;
 				adapter->tdev.recv(&adapter->tdev, skbs,
@@ -1781,12 +1783,10 @@
 				ngathered = 0;
 			}
 		}
-		if (head) {	/* splice remaining packets back onto Rx queue */
+		if (!skb_queue_empty(&queue)) {
+			/* splice remaining packets back onto Rx queue */
 			spin_lock_irq(&q->lock);
-			tail->next = q->rx_head;
-			if (!q->rx_head)
-				q->rx_tail = tail;
-			q->rx_head = head;
+			skb_queue_splice(&queue, &q->rx_queue);
 			spin_unlock_irq(&q->lock);
 		}
 		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
@@ -1937,38 +1937,6 @@
 		eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
 }
 
-#define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\
-                       TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\
-		                       TCP_FLAG_SYN | TCP_FLAG_FIN)
-#define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\
-                     (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
-
-/**
- *	lro_segment_ok - check if a TCP segment is eligible for LRO
- *	@tcph: the TCP header of the packet
- *
- *	Returns true if a TCP packet is eligible for LRO.  This requires that
- *	the packet have only the ACK flag set and no TCP options besides
- *	time stamps.
- */
-static inline int lro_segment_ok(const struct tcphdr *tcph)
-{
-	int optlen;
-
-	if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK))
-		return 0;
-
-	optlen = (tcph->doff << 2) - sizeof(*tcph);
-	if (optlen) {
-		const u32 *opt = (const u32 *)(tcph + 1);
-
-		if (optlen != TCPOLEN_TSTAMP_ALIGNED ||
-		    *opt != htonl(TSTAMP_WORD) || !opt[2])
-			return 0;
-	}
-	return 1;
-}
-
 static int t3_get_lro_header(void **eh,  void **iph, void **tcph,
 			     u64 *hdr_flags, void *priv)
 {
@@ -1981,9 +1949,6 @@
 	*iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
 	*tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
 
-	 if (!lro_segment_ok(*tcph))
-		return -1;
-
 	*hdr_flags = LRO_IPV4 | LRO_TCP;
 	return 0;
 }
@@ -2934,6 +2899,7 @@
 	q->rspq.gen = 1;
 	q->rspq.size = p->rspq_size;
 	spin_lock_init(&q->rspq.lock);
+	skb_queue_head_init(&q->rspq.rx_queue);
 
 	q->txq[TXQ_ETH].stop_thres = nports *
 	    flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 5cf78d6..3d69fae 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -191,7 +191,7 @@
 #define DPRINTK(nlevel, klevel, fmt, args...) \
 	(void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
 	printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
-		__FUNCTION__ , ## args))
+		__func__ , ## args))
 
 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 19e317e..62f6297 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -155,8 +155,6 @@
 #endif
 
 #define E1000_MNG_VLAN_NONE (-1)
-/* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
 
 /* wrapper around a pointer to a socket buffer,
  * so a DMA handle can be stored along with the buffer */
@@ -168,14 +166,6 @@
 	u16 next_to_watch;
 };
 
-struct e1000_ps_page {
-	struct page *ps_page[PS_PAGE_BUFFERS];
-};
-
-struct e1000_ps_page_dma {
-	u64 ps_page_dma[PS_PAGE_BUFFERS];
-};
-
 struct e1000_tx_ring {
 	/* pointer to the descriptor ring memory */
 	void *desc;
@@ -213,9 +203,6 @@
 	unsigned int next_to_clean;
 	/* array of buffer information structs */
 	struct e1000_buffer *buffer_info;
-	/* arrays of page information for packet split */
-	struct e1000_ps_page *ps_page;
-	struct e1000_ps_page_dma *ps_page_dma;
 
 	/* cpu for rx queue */
 	int cpu;
@@ -228,8 +215,6 @@
 	((((R)->next_to_clean > (R)->next_to_use)			\
 	  ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
 
-#define E1000_RX_DESC_PS(R, i)						\
-	(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
 #define E1000_RX_DESC_EXT(R, i)						\
 	(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
 #define E1000_GET_DESC(R, i, type)	(&(((struct type *)((R).desc))[i]))
@@ -311,10 +296,8 @@
 	u32 rx_int_delay;
 	u32 rx_abs_int_delay;
 	bool rx_csum;
-	unsigned int rx_ps_pages;
 	u32 gorcl;
 	u64 gorcl_old;
-	u16 rx_ps_bsize0;
 
 	/* OS defined structs */
 	struct net_device *netdev;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ad6da7b..2ab44db 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -137,15 +137,9 @@
 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 			       struct e1000_rx_ring *rx_ring,
 			       int *work_done, int work_to_do);
-static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
-				  struct e1000_rx_ring *rx_ring,
-				  int *work_done, int work_to_do);
 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                                    struct e1000_rx_ring *rx_ring,
 				   int cleaned_count);
-static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
-                                      struct e1000_rx_ring *rx_ring,
-				      int cleaned_count);
 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 			   int cmd);
@@ -1331,7 +1325,6 @@
 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 
 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-	adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
 	hw->max_frame_size = netdev->mtu +
 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
@@ -1815,26 +1808,6 @@
 	}
 	memset(rxdr->buffer_info, 0, size);
 
-	rxdr->ps_page = kcalloc(rxdr->count, sizeof(struct e1000_ps_page),
-	                        GFP_KERNEL);
-	if (!rxdr->ps_page) {
-		vfree(rxdr->buffer_info);
-		DPRINTK(PROBE, ERR,
-		"Unable to allocate memory for the receive descriptor ring\n");
-		return -ENOMEM;
-	}
-
-	rxdr->ps_page_dma = kcalloc(rxdr->count,
-	                            sizeof(struct e1000_ps_page_dma),
-	                            GFP_KERNEL);
-	if (!rxdr->ps_page_dma) {
-		vfree(rxdr->buffer_info);
-		kfree(rxdr->ps_page);
-		DPRINTK(PROBE, ERR,
-		"Unable to allocate memory for the receive descriptor ring\n");
-		return -ENOMEM;
-	}
-
 	if (hw->mac_type <= e1000_82547_rev_2)
 		desc_len = sizeof(struct e1000_rx_desc);
 	else
@@ -1852,8 +1825,6 @@
 		"Unable to allocate memory for the receive descriptor ring\n");
 setup_rx_desc_die:
 		vfree(rxdr->buffer_info);
-		kfree(rxdr->ps_page);
-		kfree(rxdr->ps_page_dma);
 		return -ENOMEM;
 	}
 
@@ -1932,11 +1903,7 @@
 static void e1000_setup_rctl(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	u32 rctl, rfctl;
-	u32 psrctl = 0;
-#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
-	u32 pages = 0;
-#endif
+	u32 rctl;
 
 	rctl = er32(RCTL);
 
@@ -1988,55 +1955,6 @@
 			break;
 	}
 
-#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
-	/* 82571 and greater support packet-split where the protocol
-	 * header is placed in skb->data and the packet data is
-	 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
-	 * In the case of a non-split, skb->data is linearly filled,
-	 * followed by the page buffers.  Therefore, skb->data is
-	 * sized to hold the largest protocol header.
-	 */
-	/* allocations using alloc_page take too long for regular MTU
-	 * so only enable packet split for jumbo frames */
-	pages = PAGE_USE_COUNT(adapter->netdev->mtu);
-	if ((hw->mac_type >= e1000_82571) && (pages <= 3) &&
-	    PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
-		adapter->rx_ps_pages = pages;
-	else
-		adapter->rx_ps_pages = 0;
-#endif
-	if (adapter->rx_ps_pages) {
-		/* Configure extra packet-split registers */
-		rfctl = er32(RFCTL);
-		rfctl |= E1000_RFCTL_EXTEN;
-		/* disable packet split support for IPv6 extension headers,
-		 * because some malformed IPv6 headers can hang the RX */
-		rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
-		          E1000_RFCTL_NEW_IPV6_EXT_DIS);
-
-		ew32(RFCTL, rfctl);
-
-		rctl |= E1000_RCTL_DTYP_PS;
-
-		psrctl |= adapter->rx_ps_bsize0 >>
-			E1000_PSRCTL_BSIZE0_SHIFT;
-
-		switch (adapter->rx_ps_pages) {
-		case 3:
-			psrctl |= PAGE_SIZE <<
-				E1000_PSRCTL_BSIZE3_SHIFT;
-		case 2:
-			psrctl |= PAGE_SIZE <<
-				E1000_PSRCTL_BSIZE2_SHIFT;
-		case 1:
-			psrctl |= PAGE_SIZE >>
-				E1000_PSRCTL_BSIZE1_SHIFT;
-			break;
-		}
-
-		ew32(PSRCTL, psrctl);
-	}
-
 	ew32(RCTL, rctl);
 }
 
@@ -2053,18 +1971,10 @@
 	struct e1000_hw *hw = &adapter->hw;
 	u32 rdlen, rctl, rxcsum, ctrl_ext;
 
-	if (adapter->rx_ps_pages) {
-		/* this is a 32 byte descriptor */
-		rdlen = adapter->rx_ring[0].count *
-			sizeof(union e1000_rx_desc_packet_split);
-		adapter->clean_rx = e1000_clean_rx_irq_ps;
-		adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
-	} else {
-		rdlen = adapter->rx_ring[0].count *
-			sizeof(struct e1000_rx_desc);
-		adapter->clean_rx = e1000_clean_rx_irq;
-		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
-	}
+	rdlen = adapter->rx_ring[0].count *
+		sizeof(struct e1000_rx_desc);
+	adapter->clean_rx = e1000_clean_rx_irq;
+	adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
 
 	/* disable receives while setting up the descriptors */
 	rctl = er32(RCTL);
@@ -2109,28 +2019,14 @@
 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
 	if (hw->mac_type >= e1000_82543) {
 		rxcsum = er32(RXCSUM);
-		if (adapter->rx_csum) {
+		if (adapter->rx_csum)
 			rxcsum |= E1000_RXCSUM_TUOFL;
-
-			/* Enable 82571 IPv4 payload checksum for UDP fragments
-			 * Must be used in conjunction with packet-split. */
-			if ((hw->mac_type >= e1000_82571) &&
-			    (adapter->rx_ps_pages)) {
-				rxcsum |= E1000_RXCSUM_IPPCSE;
-			}
-		} else {
-			rxcsum &= ~E1000_RXCSUM_TUOFL;
+		else
 			/* don't need to clear IPPCSE as it defaults to 0 */
-		}
+			rxcsum &= ~E1000_RXCSUM_TUOFL;
 		ew32(RXCSUM, rxcsum);
 	}
 
-	/* enable early receives on 82573, only takes effect if using > 2048
-	 * byte total frame size.  for example only for jumbo frames */
-#define E1000_ERT_2048 0x100
-	if (hw->mac_type == e1000_82573)
-		ew32(ERT, E1000_ERT_2048);
-
 	/* Enable Receives */
 	ew32(RCTL, rctl);
 }
@@ -2256,10 +2152,6 @@
 
 	vfree(rx_ring->buffer_info);
 	rx_ring->buffer_info = NULL;
-	kfree(rx_ring->ps_page);
-	rx_ring->ps_page = NULL;
-	kfree(rx_ring->ps_page_dma);
-	rx_ring->ps_page_dma = NULL;
 
 	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
 
@@ -2292,11 +2184,9 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 	struct e1000_buffer *buffer_info;
-	struct e1000_ps_page *ps_page;
-	struct e1000_ps_page_dma *ps_page_dma;
 	struct pci_dev *pdev = adapter->pdev;
 	unsigned long size;
-	unsigned int i, j;
+	unsigned int i;
 
 	/* Free all the Rx ring sk_buffs */
 	for (i = 0; i < rx_ring->count; i++) {
@@ -2310,25 +2200,10 @@
 			dev_kfree_skb(buffer_info->skb);
 			buffer_info->skb = NULL;
 		}
-		ps_page = &rx_ring->ps_page[i];
-		ps_page_dma = &rx_ring->ps_page_dma[i];
-		for (j = 0; j < adapter->rx_ps_pages; j++) {
-			if (!ps_page->ps_page[j]) break;
-			pci_unmap_page(pdev,
-				       ps_page_dma->ps_page_dma[j],
-				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
-			ps_page_dma->ps_page_dma[j] = 0;
-			put_page(ps_page->ps_page[j]);
-			ps_page->ps_page[j] = NULL;
-		}
 	}
 
 	size = sizeof(struct e1000_buffer) * rx_ring->count;
 	memset(rx_ring->buffer_info, 0, size);
-	size = sizeof(struct e1000_ps_page) * rx_ring->count;
-	memset(rx_ring->ps_page, 0, size);
-	size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
-	memset(rx_ring->ps_page_dma, 0, size);
 
 	/* Zero out the descriptor ring */
 
@@ -4235,181 +4110,6 @@
 }
 
 /**
- * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
- * @adapter: board private structure
- **/
-
-static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
-				  struct e1000_rx_ring *rx_ring,
-				  int *work_done, int work_to_do)
-{
-	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
-	struct net_device *netdev = adapter->netdev;
-	struct pci_dev *pdev = adapter->pdev;
-	struct e1000_buffer *buffer_info, *next_buffer;
-	struct e1000_ps_page *ps_page;
-	struct e1000_ps_page_dma *ps_page_dma;
-	struct sk_buff *skb;
-	unsigned int i, j;
-	u32 length, staterr;
-	int cleaned_count = 0;
-	bool cleaned = false;
-	unsigned int total_rx_bytes=0, total_rx_packets=0;
-
-	i = rx_ring->next_to_clean;
-	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
-	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
-	buffer_info = &rx_ring->buffer_info[i];
-
-	while (staterr & E1000_RXD_STAT_DD) {
-		ps_page = &rx_ring->ps_page[i];
-		ps_page_dma = &rx_ring->ps_page_dma[i];
-
-		if (unlikely(*work_done >= work_to_do))
-			break;
-		(*work_done)++;
-
-		skb = buffer_info->skb;
-
-		/* in the packet split case this is header only */
-		prefetch(skb->data - NET_IP_ALIGN);
-
-		if (++i == rx_ring->count) i = 0;
-		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
-		prefetch(next_rxd);
-
-		next_buffer = &rx_ring->buffer_info[i];
-
-		cleaned = true;
-		cleaned_count++;
-		pci_unmap_single(pdev, buffer_info->dma,
-				 buffer_info->length,
-				 PCI_DMA_FROMDEVICE);
-
-		if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
-			E1000_DBG("%s: Packet Split buffers didn't pick up"
-				  " the full packet\n", netdev->name);
-			dev_kfree_skb_irq(skb);
-			goto next_desc;
-		}
-
-		if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
-			dev_kfree_skb_irq(skb);
-			goto next_desc;
-		}
-
-		length = le16_to_cpu(rx_desc->wb.middle.length0);
-
-		if (unlikely(!length)) {
-			E1000_DBG("%s: Last part of the packet spanning"
-				  " multiple descriptors\n", netdev->name);
-			dev_kfree_skb_irq(skb);
-			goto next_desc;
-		}
-
-		/* Good Receive */
-		skb_put(skb, length);
-
-		{
-		/* this looks ugly, but it seems compiler issues make it
-		   more efficient than reusing j */
-		int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
-
-		/* page alloc/put takes too long and effects small packet
-		 * throughput, so unsplit small packets and save the alloc/put*/
-		if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) {
-			u8 *vaddr;
-			/* there is no documentation about how to call
-			 * kmap_atomic, so we can't hold the mapping
-			 * very long */
-			pci_dma_sync_single_for_cpu(pdev,
-				ps_page_dma->ps_page_dma[0],
-				PAGE_SIZE,
-				PCI_DMA_FROMDEVICE);
-			vaddr = kmap_atomic(ps_page->ps_page[0],
-			                    KM_SKB_DATA_SOFTIRQ);
-			memcpy(skb_tail_pointer(skb), vaddr, l1);
-			kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
-			pci_dma_sync_single_for_device(pdev,
-				ps_page_dma->ps_page_dma[0],
-				PAGE_SIZE, PCI_DMA_FROMDEVICE);
-			/* remove the CRC */
-			l1 -= 4;
-			skb_put(skb, l1);
-			goto copydone;
-		} /* if */
-		}
-
-		for (j = 0; j < adapter->rx_ps_pages; j++) {
-			length = le16_to_cpu(rx_desc->wb.upper.length[j]);
-			if (!length)
-				break;
-			pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
-					PAGE_SIZE, PCI_DMA_FROMDEVICE);
-			ps_page_dma->ps_page_dma[j] = 0;
-			skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
-			                   length);
-			ps_page->ps_page[j] = NULL;
-			skb->len += length;
-			skb->data_len += length;
-			skb->truesize += length;
-		}
-
-		/* strip the ethernet crc, problem is we're using pages now so
-		 * this whole operation can get a little cpu intensive */
-		pskb_trim(skb, skb->len - 4);
-
-copydone:
-		total_rx_bytes += skb->len;
-		total_rx_packets++;
-
-		e1000_rx_checksum(adapter, staterr,
-				  le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
-		skb->protocol = eth_type_trans(skb, netdev);
-
-		if (likely(rx_desc->wb.upper.header_status &
-			   cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
-			adapter->rx_hdr_split++;
-
-		if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
-			vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
-				le16_to_cpu(rx_desc->wb.middle.vlan));
-		} else {
-			netif_receive_skb(skb);
-		}
-
-		netdev->last_rx = jiffies;
-
-next_desc:
-		rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
-		buffer_info->skb = NULL;
-
-		/* return some buffers to hardware, one at a time is too slow */
-		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
-			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
-			cleaned_count = 0;
-		}
-
-		/* use prefetched values */
-		rx_desc = next_rxd;
-		buffer_info = next_buffer;
-
-		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
-	}
-	rx_ring->next_to_clean = i;
-
-	cleaned_count = E1000_DESC_UNUSED(rx_ring);
-	if (cleaned_count)
-		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
-
-	adapter->total_rx_packets += total_rx_packets;
-	adapter->total_rx_bytes += total_rx_bytes;
-	adapter->net_stats.rx_bytes += total_rx_bytes;
-	adapter->net_stats.rx_packets += total_rx_packets;
-	return cleaned;
-}
-
-/**
  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
  * @adapter: address of board private structure
  **/
@@ -4521,104 +4221,6 @@
 }
 
 /**
- * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @adapter: address of board private structure
- **/
-
-static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
-				      struct e1000_rx_ring *rx_ring,
-				      int cleaned_count)
-{
-	struct e1000_hw *hw = &adapter->hw;
-	struct net_device *netdev = adapter->netdev;
-	struct pci_dev *pdev = adapter->pdev;
-	union e1000_rx_desc_packet_split *rx_desc;
-	struct e1000_buffer *buffer_info;
-	struct e1000_ps_page *ps_page;
-	struct e1000_ps_page_dma *ps_page_dma;
-	struct sk_buff *skb;
-	unsigned int i, j;
-
-	i = rx_ring->next_to_use;
-	buffer_info = &rx_ring->buffer_info[i];
-	ps_page = &rx_ring->ps_page[i];
-	ps_page_dma = &rx_ring->ps_page_dma[i];
-
-	while (cleaned_count--) {
-		rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
-
-		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
-			if (j < adapter->rx_ps_pages) {
-				if (likely(!ps_page->ps_page[j])) {
-					ps_page->ps_page[j] =
-						alloc_page(GFP_ATOMIC);
-					if (unlikely(!ps_page->ps_page[j])) {
-						adapter->alloc_rx_buff_failed++;
-						goto no_buffers;
-					}
-					ps_page_dma->ps_page_dma[j] =
-						pci_map_page(pdev,
-							    ps_page->ps_page[j],
-							    0, PAGE_SIZE,
-							    PCI_DMA_FROMDEVICE);
-				}
-				/* Refresh the desc even if buffer_addrs didn't
-				 * change because each write-back erases
-				 * this info.
-				 */
-				rx_desc->read.buffer_addr[j+1] =
-				     cpu_to_le64(ps_page_dma->ps_page_dma[j]);
-			} else
-				rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
-		}
-
-		skb = netdev_alloc_skb(netdev,
-		                       adapter->rx_ps_bsize0 + NET_IP_ALIGN);
-
-		if (unlikely(!skb)) {
-			adapter->alloc_rx_buff_failed++;
-			break;
-		}
-
-		/* Make buffer alignment 2 beyond a 16 byte boundary
-		 * this will result in a 16 byte aligned IP header after
-		 * the 14 byte MAC header is removed
-		 */
-		skb_reserve(skb, NET_IP_ALIGN);
-
-		buffer_info->skb = skb;
-		buffer_info->length = adapter->rx_ps_bsize0;
-		buffer_info->dma = pci_map_single(pdev, skb->data,
-						  adapter->rx_ps_bsize0,
-						  PCI_DMA_FROMDEVICE);
-
-		rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
-
-		if (unlikely(++i == rx_ring->count)) i = 0;
-		buffer_info = &rx_ring->buffer_info[i];
-		ps_page = &rx_ring->ps_page[i];
-		ps_page_dma = &rx_ring->ps_page_dma[i];
-	}
-
-no_buffers:
-	if (likely(rx_ring->next_to_use != i)) {
-		rx_ring->next_to_use = i;
-		if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
-
-		/* Force memory writes to complete before letting h/w
-		 * know there are new descriptors to fetch.  (Only
-		 * applicable for weak-ordered memory model archs,
-		 * such as IA-64). */
-		wmb();
-		/* Hardware increments by 16 bytes, but packet split
-		 * descriptors are 32 bytes...so we increment tail
-		 * twice as much.
-		 */
-		writel(i<<1, hw->hw_addr + rx_ring->rdt);
-	}
-}
-
-/**
  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
  * @adapter:
  **/
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 462351c..b2c910c 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -38,6 +38,7 @@
  * 82573V Gigabit Ethernet Controller (Copper)
  * 82573E Gigabit Ethernet Controller (Copper)
  * 82573L Gigabit Ethernet Controller
+ * 82574L Gigabit Network Connection
  */
 
 #include <linux/netdevice.h>
@@ -54,6 +55,8 @@
 
 #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
 
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
+
 static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
 static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
 static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
@@ -63,6 +66,8 @@
 static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
 static s32 e1000_setup_link_82571(struct e1000_hw *hw);
 static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
+static s32 e1000_led_on_82574(struct e1000_hw *hw);
 
 /**
  *  e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -92,6 +97,9 @@
 	case e1000_82573:
 		phy->type		 = e1000_phy_m88;
 		break;
+	case e1000_82574:
+		phy->type		 = e1000_phy_bm;
+		break;
 	default:
 		return -E1000_ERR_PHY;
 		break;
@@ -111,6 +119,10 @@
 		if (phy->id != M88E1111_I_PHY_ID)
 			return -E1000_ERR_PHY;
 		break;
+	case e1000_82574:
+		if (phy->id != BME1000_E_PHY_ID_R2)
+			return -E1000_ERR_PHY;
+		break;
 	default:
 		return -E1000_ERR_PHY;
 		break;
@@ -150,6 +162,7 @@
 
 	switch (hw->mac.type) {
 	case e1000_82573:
+	case e1000_82574:
 		if (((eecd >> 15) & 0x3) == 0x3) {
 			nvm->type = e1000_nvm_flash_hw;
 			nvm->word_size = 2048;
@@ -245,6 +258,17 @@
 		break;
 	}
 
+	switch (hw->mac.type) {
+	case e1000_82574:
+		func->check_mng_mode = e1000_check_mng_mode_82574;
+		func->led_on = e1000_led_on_82574;
+		break;
+	default:
+		func->check_mng_mode = e1000e_check_mng_mode_generic;
+		func->led_on = e1000e_led_on_generic;
+		break;
+	}
+
 	return 0;
 }
 
@@ -330,6 +354,8 @@
 static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_id = 0;
 
 	switch (hw->mac.type) {
 	case e1000_82571:
@@ -345,6 +371,20 @@
 	case e1000_82573:
 		return e1000e_get_phy_id(hw);
 		break;
+	case e1000_82574:
+		ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+		if (ret_val)
+			return ret_val;
+
+		phy->id = (u32)(phy_id << 16);
+		udelay(20);
+		ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+		if (ret_val)
+			return ret_val;
+
+		phy->id |= (u32)(phy_id);
+		phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+		break;
 	default:
 		return -E1000_ERR_PHY;
 		break;
@@ -421,7 +461,7 @@
 	if (ret_val)
 		return ret_val;
 
-	if (hw->mac.type != e1000_82573)
+	if (hw->mac.type != e1000_82573 && hw->mac.type != e1000_82574)
 		ret_val = e1000e_acquire_nvm(hw);
 
 	if (ret_val)
@@ -461,6 +501,7 @@
 
 	switch (hw->mac.type) {
 	case e1000_82573:
+	case e1000_82574:
 		ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
 		break;
 	case e1000_82571:
@@ -735,7 +776,7 @@
 	 * Must acquire the MDIO ownership before MAC reset.
 	 * Ownership defaults to firmware after a reset.
 	 */
-	if (hw->mac.type == e1000_82573) {
+	if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
 		extcnf_ctrl = er32(EXTCNF_CTRL);
 		extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
 
@@ -776,7 +817,7 @@
 	 * Need to wait for Phy configuration completion before accessing
 	 * NVM and Phy.
 	 */
-	if (hw->mac.type == e1000_82573)
+	if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574)
 		msleep(25);
 
 	/* Clear any pending interrupt events. */
@@ -843,7 +884,7 @@
 	ew32(TXDCTL(0), reg_data);
 
 	/* ...for both queues. */
-	if (mac->type != e1000_82573) {
+	if (mac->type != e1000_82573 && mac->type != e1000_82574) {
 		reg_data = er32(TXDCTL(1));
 		reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
 			   E1000_TXDCTL_FULL_TX_DESC_WB |
@@ -918,19 +959,28 @@
 	}
 
 	/* Device Control */
-	if (hw->mac.type == e1000_82573) {
+	if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
 		reg = er32(CTRL);
 		reg &= ~(1 << 29);
 		ew32(CTRL, reg);
 	}
 
 	/* Extended Device Control */
-	if (hw->mac.type == e1000_82573) {
+	if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
 		reg = er32(CTRL_EXT);
 		reg &= ~(1 << 23);
 		reg |= (1 << 22);
 		ew32(CTRL_EXT, reg);
 	}
+
+	/* PCI-Ex Control Register */
+	if (hw->mac.type == e1000_82574) {
+		reg = er32(GCR);
+		reg |= (1 << 22);
+		ew32(GCR, reg);
+	}
+
+	return;
 }
 
 /**
@@ -947,7 +997,7 @@
 	u32 vfta_offset = 0;
 	u32 vfta_bit_in_reg = 0;
 
-	if (hw->mac.type == e1000_82573) {
+	if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
 		if (hw->mng_cookie.vlan_id != 0) {
 			/*
 			 * The VFTA is a 4096b bit-field, each identifying
@@ -976,6 +1026,48 @@
 }
 
 /**
+ *  e1000_check_mng_mode_82574 - Check manageability is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the NVM Initialization Control Word 2 and returns true
+ *  (>0) if any manageability is enabled, else false (0).
+ **/
+static bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
+{
+	u16 data;
+
+	e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+	return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
+}
+
+/**
+ *  e1000_led_on_82574 - Turn LED on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED on.
+ **/
+static s32 e1000_led_on_82574(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	u32 i;
+
+	ctrl = hw->mac.ledctl_mode2;
+	if (!(E1000_STATUS_LU & er32(STATUS))) {
+		/*
+		 * If no link, then turn LED on by setting the invert bit
+		 * for each LED that's "on" (0x0E) in ledctl_mode2.
+		 */
+		for (i = 0; i < 4; i++)
+			if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+			    E1000_LEDCTL_MODE_LED_ON)
+				ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8));
+	}
+	ew32(LEDCTL, ctrl);
+
+	return 0;
+}
+
+/**
  *  e1000_update_mc_addr_list_82571 - Update Multicast addresses
  *  @hw: pointer to the HW structure
  *  @mc_addr_list: array of multicast addresses to program
@@ -1018,7 +1110,8 @@
 	 * the default flow control setting, so we explicitly
 	 * set it to full.
 	 */
-	if (hw->mac.type == e1000_82573)
+	if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) &&
+	    hw->fc.type == e1000_fc_default)
 		hw->fc.type = e1000_fc_full;
 
 	return e1000e_setup_link(hw);
@@ -1045,6 +1138,7 @@
 
 	switch (hw->phy.type) {
 	case e1000_phy_m88:
+	case e1000_phy_bm:
 		ret_val = e1000e_copper_link_setup_m88(hw);
 		break;
 	case e1000_phy_igp_2:
@@ -1114,11 +1208,10 @@
 		return ret_val;
 	}
 
-	if (hw->mac.type == e1000_82573 &&
+	if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) &&
 	    *data == ID_LED_RESERVED_F746)
 		*data = ID_LED_DEFAULT_82573;
-	else if (*data == ID_LED_RESERVED_0000 ||
-		 *data == ID_LED_RESERVED_FFFF)
+	else if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
 		*data = ID_LED_DEFAULT;
 
 	return 0;
@@ -1265,13 +1358,13 @@
 }
 
 static struct e1000_mac_operations e82571_mac_ops = {
-	.mng_mode_enab		= E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT,
+	/* .check_mng_mode: mac type dependent */
 	/* .check_for_link: media type dependent */
 	.cleanup_led		= e1000e_cleanup_led_generic,
 	.clear_hw_cntrs		= e1000_clear_hw_cntrs_82571,
 	.get_bus_info		= e1000e_get_bus_info_pcie,
 	/* .get_link_up_info: media type dependent */
-	.led_on			= e1000e_led_on_generic,
+	/* .led_on: mac type dependent */
 	.led_off		= e1000e_led_off_generic,
 	.update_mc_addr_list	= e1000_update_mc_addr_list_82571,
 	.reset_hw		= e1000_reset_hw_82571,
@@ -1312,6 +1405,22 @@
 	.write_phy_reg		= e1000e_write_phy_reg_m88,
 };
 
+static struct e1000_phy_operations e82_phy_ops_bm = {
+	.acquire_phy		= e1000_get_hw_semaphore_82571,
+	.check_reset_block	= e1000e_check_reset_block_generic,
+	.commit_phy		= e1000e_phy_sw_reset,
+	.force_speed_duplex	= e1000e_phy_force_speed_duplex_m88,
+	.get_cfg_done		= e1000e_get_cfg_done,
+	.get_cable_length	= e1000e_get_cable_length_m88,
+	.get_phy_info		= e1000e_get_phy_info_m88,
+	.read_phy_reg		= e1000e_read_phy_reg_bm2,
+	.release_phy		= e1000_put_hw_semaphore_82571,
+	.reset_phy		= e1000e_phy_hw_reset_generic,
+	.set_d0_lplu_state	= e1000_set_d0_lplu_state_82571,
+	.set_d3_lplu_state	= e1000e_set_d3_lplu_state,
+	.write_phy_reg		= e1000e_write_phy_reg_bm2,
+};
+
 static struct e1000_nvm_operations e82571_nvm_ops = {
 	.acquire_nvm		= e1000_acquire_nvm_82571,
 	.read_nvm		= e1000e_read_nvm_eerd,
@@ -1375,3 +1484,21 @@
 	.nvm_ops		= &e82571_nvm_ops,
 };
 
+struct e1000_info e1000_82574_info = {
+	.mac			= e1000_82574,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_MSIX
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_RX_CSUM_ENABLED
+				  | FLAG_HAS_SMART_POWER_DOWN
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_CTRLEXT_ON_LOAD,
+	.pba			= 20,
+	.get_variants		= e1000_get_variants_82571,
+	.mac_ops		= &e82571_mac_ops,
+	.phy_ops		= &e82_phy_ops_bm,
+	.nvm_ops		= &e82571_nvm_ops,
+};
+
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 14b0e6c..48f79ec 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -71,9 +71,11 @@
 #define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
 #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_EIAME          0x01000000
 #define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
 #define E1000_CTRL_EXT_IAME           0x08000000 /* Interrupt acknowledge Auto-mask */
 #define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CTRL_EXT_PBA_CLR        0x80000000 /* PBA Clear */
 
 /* Receive Descriptor bit definitions */
 #define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
@@ -299,6 +301,7 @@
 #define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
 
 /* Header split receive */
+#define E1000_RFCTL_ACK_DIS             0x00001000
 #define E1000_RFCTL_EXTEN               0x00008000
 #define E1000_RFCTL_IPV6_EX_DIS         0x00010000
 #define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
@@ -363,6 +366,11 @@
 #define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
 #define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
 #define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXQ0          0x00100000 /* Rx Queue 0 Interrupt */
+#define E1000_ICR_RXQ1          0x00200000 /* Rx Queue 1 Interrupt */
+#define E1000_ICR_TXQ0          0x00400000 /* Tx Queue 0 Interrupt */
+#define E1000_ICR_TXQ1          0x00800000 /* Tx Queue 1 Interrupt */
+#define E1000_ICR_OTHER         0x01000000 /* Other Interrupts */
 
 /*
  * This defines the bits that are set in the Interrupt Mask
@@ -386,6 +394,11 @@
 #define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
 #define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
 #define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
+#define E1000_IMS_RXQ0      E1000_ICR_RXQ0      /* Rx Queue 0 Interrupt */
+#define E1000_IMS_RXQ1      E1000_ICR_RXQ1      /* Rx Queue 1 Interrupt */
+#define E1000_IMS_TXQ0      E1000_ICR_TXQ0      /* Tx Queue 0 Interrupt */
+#define E1000_IMS_TXQ1      E1000_ICR_TXQ1      /* Tx Queue 1 Interrupt */
+#define E1000_IMS_OTHER     E1000_ICR_OTHER     /* Other Interrupts */
 
 /* Interrupt Cause Set */
 #define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
@@ -505,6 +518,7 @@
 #define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
 
 /* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS     0x0001 /* LP has Auto Neg Capability */
 
 /* 1000BASE-T Control Register */
 #define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
@@ -540,6 +554,7 @@
 #define E1000_EECD_DO        0x00000008 /* NVM Data Out */
 #define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
 #define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* NVM Present */
 #define E1000_EECD_SIZE      0x00000200 /* NVM Size (0=64 word 1=256 word) */
 /* NVM Addressing bits based on type (0-small, 1-large) */
 #define E1000_EECD_ADDR_BITS 0x00000400
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index ac4e506..0a1916b 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -62,6 +62,11 @@
 	e_printk(KERN_NOTICE, adapter, format, ## arg)
 
 
+/* Interrupt modes, as used by the IntMode paramter */
+#define E1000E_INT_MODE_LEGACY		0
+#define E1000E_INT_MODE_MSI		1
+#define E1000E_INT_MODE_MSIX		2
+
 /* Tx/Rx descriptor defines */
 #define E1000_DEFAULT_TXD		256
 #define E1000_MAX_TXD			4096
@@ -95,9 +100,11 @@
 	board_82571,
 	board_82572,
 	board_82573,
+	board_82574,
 	board_80003es2lan,
 	board_ich8lan,
 	board_ich9lan,
+	board_ich10lan,
 };
 
 struct e1000_queue_stats {
@@ -146,6 +153,12 @@
 	/* array of buffer information structs */
 	struct e1000_buffer *buffer_info;
 
+	char name[IFNAMSIZ + 5];
+	u32 ims_val;
+	u32 itr_val;
+	u16 itr_register;
+	int set_itr;
+
 	struct sk_buff *rx_skb_top;
 
 	struct e1000_queue_stats stats;
@@ -274,6 +287,9 @@
 	u32 test_icr;
 
 	u32 msg_enable;
+	struct msix_entry *msix_entries;
+	int int_mode;
+	u32 eiac_mask;
 
 	u32 eeprom_wol;
 	u32 wol;
@@ -306,6 +322,7 @@
 #define FLAG_HAS_SWSM_ON_LOAD             (1 << 6)
 #define FLAG_HAS_JUMBO_FRAMES             (1 << 7)
 #define FLAG_IS_ICH                       (1 << 9)
+#define FLAG_HAS_MSIX                     (1 << 10)
 #define FLAG_HAS_SMART_POWER_DOWN         (1 << 11)
 #define FLAG_IS_QUAD_PORT_A               (1 << 12)
 #define FLAG_IS_QUAD_PORT                 (1 << 13)
@@ -364,6 +381,8 @@
 extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
 extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
 extern void e1000e_update_stats(struct e1000_adapter *adapter);
+extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
 
 extern unsigned int copybreak;
 
@@ -372,8 +391,10 @@
 extern struct e1000_info e1000_82571_info;
 extern struct e1000_info e1000_82572_info;
 extern struct e1000_info e1000_82573_info;
+extern struct e1000_info e1000_82574_info;
 extern struct e1000_info e1000_ich8_info;
 extern struct e1000_info e1000_ich9_info;
+extern struct e1000_info e1000_ich10_info;
 extern struct e1000_info e1000_es2_info;
 
 extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
@@ -446,10 +467,13 @@
 extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
 extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
 extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
 extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
 extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
 extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
 extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
 extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
 extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
 extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
@@ -520,7 +544,12 @@
 	return hw->phy.ops.get_phy_info(hw);
 }
 
-extern bool e1000e_check_mng_mode(struct e1000_hw *hw);
+static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
+{
+	return hw->mac.ops.check_mng_mode(hw);
+}
+
+extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
 extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
 extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
 
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index dc552d7..da9c09c 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1247,7 +1247,7 @@
 }
 
 static struct e1000_mac_operations es2_mac_ops = {
-	.mng_mode_enab		= E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT,
+	.check_mng_mode		= e1000e_check_mng_mode_generic,
 	/* check_for_link dependent on media type */
 	.cleanup_led		= e1000e_cleanup_led_generic,
 	.clear_hw_cntrs		= e1000_clear_hw_cntrs_80003es2lan,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index e21c9e0..52b762e 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -568,6 +568,7 @@
 	 * and flush shadow RAM for 82573 controllers
 	 */
 	if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) ||
+			       (hw->mac.type == e1000_82574) ||
 			       (hw->mac.type == e1000_82573)))
 		e1000e_update_nvm_checksum(hw);
 
@@ -779,8 +780,10 @@
 		toggle = 0x7FFFF3FF;
 		break;
 	case e1000_82573:
+	case e1000_82574:
 	case e1000_ich8lan:
 	case e1000_ich9lan:
+	case e1000_ich10lan:
 		toggle = 0x7FFFF033;
 		break;
 	default:
@@ -833,7 +836,9 @@
 	REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
 	for (i = 0; i < mac->rar_entry_count; i++)
 		REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
-				       0x8003FFFF, 0xFFFFFFFF);
+				       ((mac->type == e1000_ich10lan) ?
+					   0x8007FFFF : 0x8003FFFF),
+				       0xFFFFFFFF);
 
 	for (i = 0; i < mac->mta_reg_count; i++)
 		REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
@@ -884,10 +889,18 @@
 	u32 shared_int = 1;
 	u32 irq = adapter->pdev->irq;
 	int i;
+	int ret_val = 0;
+	int int_mode = E1000E_INT_MODE_LEGACY;
 
 	*data = 0;
 
-	/* NOTE: we don't test MSI interrupts here, yet */
+	/* NOTE: we don't test MSI/MSI-X interrupts here, yet */
+	if (adapter->int_mode == E1000E_INT_MODE_MSIX) {
+		int_mode = adapter->int_mode;
+		e1000e_reset_interrupt_capability(adapter);
+		adapter->int_mode = E1000E_INT_MODE_LEGACY;
+		e1000e_set_interrupt_capability(adapter);
+	}
 	/* Hook up test interrupt handler just for this test */
 	if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
 			 netdev)) {
@@ -895,7 +908,8 @@
 	} else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
 		 netdev->name, netdev)) {
 		*data = 1;
-		return -1;
+		ret_val = -1;
+		goto out;
 	}
 	e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
 
@@ -905,12 +919,23 @@
 
 	/* Test each interrupt */
 	for (i = 0; i < 10; i++) {
-		if ((adapter->flags & FLAG_IS_ICH) && (i == 8))
-			continue;
-
 		/* Interrupt to test */
 		mask = 1 << i;
 
+		if (adapter->flags & FLAG_IS_ICH) {
+			switch (mask) {
+			case E1000_ICR_RXSEQ:
+				continue;
+			case 0x00000100:
+				if (adapter->hw.mac.type == e1000_ich8lan ||
+				    adapter->hw.mac.type == e1000_ich9lan)
+					continue;
+				break;
+			default:
+				break;
+			}
+		}
+
 		if (!shared_int) {
 			/*
 			 * Disable the interrupt to be reported in
@@ -974,7 +999,14 @@
 	/* Unhook test interrupt handler */
 	free_irq(irq, netdev);
 
-	return *data;
+out:
+	if (int_mode == E1000E_INT_MODE_MSIX) {
+		e1000e_reset_interrupt_capability(adapter);
+		adapter->int_mode = int_mode;
+		e1000e_set_interrupt_capability(adapter);
+	}
+
+	return ret_val;
 }
 
 static void e1000_free_desc_rings(struct e1000_adapter *adapter)
@@ -1755,11 +1787,13 @@
 static int e1000_phys_id(struct net_device *netdev, u32 data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
 
 	if (!data)
 		data = INT_MAX;
 
-	if (adapter->hw.phy.type == e1000_phy_ife) {
+	if ((hw->phy.type == e1000_phy_ife) ||
+	    (hw->mac.type == e1000_82574)) {
 		if (!adapter->blink_timer.function) {
 			init_timer(&adapter->blink_timer);
 			adapter->blink_timer.function =
@@ -1769,16 +1803,16 @@
 		mod_timer(&adapter->blink_timer, jiffies);
 		msleep_interruptible(data * 1000);
 		del_timer_sync(&adapter->blink_timer);
-		e1e_wphy(&adapter->hw,
-				    IFE_PHY_SPECIAL_CONTROL_LED, 0);
+		if (hw->phy.type == e1000_phy_ife)
+			e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
 	} else {
-		e1000e_blink_led(&adapter->hw);
+		e1000e_blink_led(hw);
 		msleep_interruptible(data * 1000);
 	}
 
-	adapter->hw.mac.ops.led_off(&adapter->hw);
+	hw->mac.ops.led_off(hw);
 	clear_bit(E1000_LED_ON, &adapter->led_status);
-	adapter->hw.mac.ops.cleanup_led(&adapter->hw);
+	hw->mac.ops.cleanup_led(hw);
 
 	return 0;
 }
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 74f263a..f66ed37 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -65,7 +65,11 @@
 	E1000_ICS      = 0x000C8, /* Interrupt Cause Set - WO */
 	E1000_IMS      = 0x000D0, /* Interrupt Mask Set - RW */
 	E1000_IMC      = 0x000D8, /* Interrupt Mask Clear - WO */
+	E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
 	E1000_IAM      = 0x000E0, /* Interrupt Acknowledge Auto Mask */
+	E1000_IVAR     = 0x000E4, /* Interrupt Vector Allocation - RW */
+	E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
+#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
 	E1000_RCTL     = 0x00100, /* Rx Control - RW */
 	E1000_FCTTV    = 0x00170, /* Flow Control Transmit Timer Value - RW */
 	E1000_TXCW     = 0x00178, /* Tx Configuration Word - RW */
@@ -332,6 +336,7 @@
 #define E1000_DEV_ID_82573E			0x108B
 #define E1000_DEV_ID_82573E_IAMT		0x108C
 #define E1000_DEV_ID_82573L			0x109A
+#define E1000_DEV_ID_82574L			0x10D3
 
 #define E1000_DEV_ID_80003ES2LAN_COPPER_DPT	0x1096
 #define E1000_DEV_ID_80003ES2LAN_SERDES_DPT	0x1098
@@ -346,6 +351,7 @@
 #define E1000_DEV_ID_ICH8_IFE_G			0x10C5
 #define E1000_DEV_ID_ICH8_IGP_M			0x104D
 #define E1000_DEV_ID_ICH9_IGP_AMT		0x10BD
+#define E1000_DEV_ID_ICH9_BM			0x10E5
 #define E1000_DEV_ID_ICH9_IGP_M_AMT		0x10F5
 #define E1000_DEV_ID_ICH9_IGP_M			0x10BF
 #define E1000_DEV_ID_ICH9_IGP_M_V		0x10CB
@@ -356,6 +362,10 @@
 #define E1000_DEV_ID_ICH10_R_BM_LM		0x10CC
 #define E1000_DEV_ID_ICH10_R_BM_LF		0x10CD
 #define E1000_DEV_ID_ICH10_R_BM_V		0x10CE
+#define E1000_DEV_ID_ICH10_D_BM_LM		0x10DE
+#define E1000_DEV_ID_ICH10_D_BM_LF		0x10DF
+
+#define E1000_REVISION_4 4
 
 #define E1000_FUNC_1 1
 
@@ -363,9 +373,11 @@
 	e1000_82571,
 	e1000_82572,
 	e1000_82573,
+	e1000_82574,
 	e1000_80003es2lan,
 	e1000_ich8lan,
 	e1000_ich9lan,
+	e1000_ich10lan,
 };
 
 enum e1000_media_type {
@@ -696,8 +708,7 @@
 
 /* Function pointers and static data for the MAC. */
 struct e1000_mac_operations {
-	u32			mng_mode_enab;
-
+	bool (*check_mng_mode)(struct e1000_hw *);
 	s32  (*check_for_link)(struct e1000_hw *);
 	s32  (*cleanup_led)(struct e1000_hw *);
 	void (*clear_hw_cntrs)(struct e1000_hw *);
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9e38452..692251b 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -43,7 +43,9 @@
  * 82567LM-2 Gigabit Network Connection
  * 82567LF-2 Gigabit Network Connection
  * 82567V-2 Gigabit Network Connection
- * 82562GT-3 10/100 Network Connection
+ * 82567LF-3 Gigabit Network Connection
+ * 82567LM-3 Gigabit Network Connection
+ * 82567LM-4 Gigabit Network Connection
  */
 
 #include <linux/netdevice.h>
@@ -157,12 +159,15 @@
 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
 						u32 offset, u8 byte);
+static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u8 *data);
 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
 					 u16 *data);
 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 					 u8 size, u16 *data);
 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
 
 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
 {
@@ -393,6 +398,8 @@
 
 	if (!timeout) {
 		hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
+		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+		ew32(EXTCNF_CTRL, extcnf_ctrl);
 		return -E1000_ERR_CONFIG;
 	}
 
@@ -417,6 +424,22 @@
 }
 
 /**
+ *  e1000_check_mng_mode_ich8lan - Checks management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks if the adapter has manageability enabled.
+ *  This is a function pointer entry point only called by read/write
+ *  routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
+{
+	u32 fwsm = er32(FWSM);
+
+	return (fwsm & E1000_FWSM_MODE_MASK) ==
+		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+}
+
+/**
  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
  *  @hw: pointer to the HW structure
  *
@@ -897,6 +920,56 @@
 }
 
 /**
+ *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
+ *  @hw: pointer to the HW structure
+ *  @bank:  pointer to the variable that returns the active bank
+ *
+ *  Reads signature byte from the NVM using the flash access registers.
+ **/
+static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	/* flash bank size is in words */
+	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
+	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
+	u8 bank_high_byte = 0;
+
+	if (hw->mac.type != e1000_ich10lan) {
+		if (er32(EECD) & E1000_EECD_SEC1VAL)
+			*bank = 1;
+		else
+			*bank = 0;
+	} else {
+		/*
+		 * Make sure the signature for bank 0 is valid,
+		 * if not check for bank1
+		 */
+		e1000_read_flash_byte_ich8lan(hw, act_offset, &bank_high_byte);
+		if ((bank_high_byte & 0xC0) == 0x80) {
+			*bank = 0;
+		} else {
+			/*
+			 * find if segment 1 is valid by verifying
+			 * bit 15:14 = 10b in word 0x13
+			 */
+			e1000_read_flash_byte_ich8lan(hw,
+						      act_offset + bank1_offset,
+						      &bank_high_byte);
+
+			/* bank1 has a valid signature equivalent to SEC1V */
+			if ((bank_high_byte & 0xC0) == 0x80) {
+				*bank = 1;
+			} else {
+				hw_dbg(hw, "ERROR: EEPROM not present\n");
+				return -E1000_ERR_NVM;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
  *  @hw: pointer to the HW structure
  *  @offset: The offset (in bytes) of the word(s) to read.
@@ -912,6 +985,7 @@
 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 	u32 act_offset;
 	s32 ret_val;
+	u32 bank = 0;
 	u16 i, word;
 
 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
@@ -924,10 +998,11 @@
 	if (ret_val)
 		return ret_val;
 
-	/* Start with the bank offset, then add the relative offset. */
-	act_offset = (er32(EECD) & E1000_EECD_SEC1VAL)
-		     ? nvm->flash_bank_size
-		     : 0;
+	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+	if (ret_val)
+		return ret_val;
+
+	act_offset = (bank) ? nvm->flash_bank_size : 0;
 	act_offset += offset;
 
 	for (i = 0; i < words; i++) {
@@ -1075,6 +1150,29 @@
 }
 
 /**
+ *  e1000_read_flash_byte_ich8lan - Read byte from flash
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset of the byte to read.
+ *  @data: Pointer to a byte to store the value read.
+ *
+ *  Reads a single byte from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u8 *data)
+{
+	s32 ret_val;
+	u16 word = 0;
+
+	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+	if (ret_val)
+		return ret_val;
+
+	*data = (u8)word;
+
+	return 0;
+}
+
+/**
  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
  *  @hw: pointer to the HW structure
  *  @offset: The offset (in bytes) of the byte or word to read.
@@ -1205,7 +1303,7 @@
 {
 	struct e1000_nvm_info *nvm = &hw->nvm;
 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
-	u32 i, act_offset, new_bank_offset, old_bank_offset;
+	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
 	s32 ret_val;
 	u16 data;
 
@@ -1225,7 +1323,11 @@
 	 * write to bank 0 etc.  We also need to erase the segment that
 	 * is going to be written
 	 */
-	if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
+	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+	if (ret_val)
+		return ret_val;
+
+	if (bank == 0) {
 		new_bank_offset = nvm->flash_bank_size;
 		old_bank_offset = 0;
 		e1000_erase_flash_bank_ich8lan(hw, 1);
@@ -2189,13 +2291,14 @@
  *  'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
  *  to a lower speed.
  *
- *  Should only be called for ICH9 devices.
+ *  Should only be called for ICH9 and ICH10 devices.
  **/
 void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
 {
 	u32 phy_ctrl;
 
-	if (hw->mac.type == e1000_ich9lan) {
+	if ((hw->mac.type == e1000_ich10lan) ||
+	    (hw->mac.type == e1000_ich9lan)) {
 		phy_ctrl = er32(PHY_CTRL);
 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
 		            E1000_PHY_CTRL_GBE_DISABLE;
@@ -2253,6 +2356,39 @@
 }
 
 /**
+ *  e1000_get_cfg_done_ich8lan - Read config done bit
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the management control register for the config done bit for
+ *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
+ *  to read the config done bit, so an error is *ONLY* logged and returns
+ *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
+ *  would not be able to be reset or change link.
+ **/
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
+{
+	u32 bank = 0;
+
+	e1000e_get_cfg_done(hw);
+
+	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
+	if (hw->mac.type != e1000_ich10lan) {
+		if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
+		    (hw->phy.type == e1000_phy_igp_3)) {
+			e1000e_phy_init_script_igp3(hw);
+		}
+	} else {
+		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
+			/* Maybe we should do a basic PHY config */
+			hw_dbg(hw, "EEPROM not present\n");
+			return -E1000_ERR_CONFIG;
+		}
+	}
+
+	return 0;
+}
+
+/**
  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
  *  @hw: pointer to the HW structure
  *
@@ -2282,7 +2418,7 @@
 }
 
 static struct e1000_mac_operations ich8_mac_ops = {
-	.mng_mode_enab		= E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT,
+	.check_mng_mode		= e1000_check_mng_mode_ich8lan,
 	.check_for_link		= e1000e_check_for_copper_link,
 	.cleanup_led		= e1000_cleanup_led_ich8lan,
 	.clear_hw_cntrs		= e1000_clear_hw_cntrs_ich8lan,
@@ -2302,7 +2438,7 @@
 	.check_reset_block	= e1000_check_reset_block_ich8lan,
 	.commit_phy		= NULL,
 	.force_speed_duplex	= e1000_phy_force_speed_duplex_ich8lan,
-	.get_cfg_done		= e1000e_get_cfg_done,
+	.get_cfg_done		= e1000_get_cfg_done_ich8lan,
 	.get_cable_length	= e1000e_get_cable_length_igp_2,
 	.get_phy_info		= e1000_get_phy_info_ich8lan,
 	.read_phy_reg		= e1000e_read_phy_reg_igp,
@@ -2357,3 +2493,20 @@
 	.nvm_ops		= &ich8_nvm_ops,
 };
 
+struct e1000_info e1000_ich10_info = {
+	.mac			= e1000_ich10lan,
+	.flags			= FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_RX_CSUM_ENABLED
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_ERT
+				  | FLAG_HAS_FLASH
+				  | FLAG_APME_IN_WUC,
+	.pba			= 10,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index f1f4e9d..c733730 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -2222,17 +2222,18 @@
 }
 
 /**
- *  e1000e_check_mng_mode - check management mode
+ *  e1000e_check_mng_mode_generic - check management mode
  *  @hw: pointer to the HW structure
  *
  *  Reads the firmware semaphore register and returns true (>0) if
  *  manageability is enabled, else false (0).
  **/
-bool e1000e_check_mng_mode(struct e1000_hw *hw)
+bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
 {
 	u32 fwsm = er32(FWSM);
 
-	return (fwsm & E1000_FWSM_MODE_MASK) == hw->mac.ops.mng_mode_enab;
+	return (fwsm & E1000_FWSM_MODE_MASK) ==
+		(E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
 }
 
 /**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d266510..24d05cb 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -55,9 +55,11 @@
 	[board_82571]		= &e1000_82571_info,
 	[board_82572]		= &e1000_82572_info,
 	[board_82573]		= &e1000_82573_info,
+	[board_82574]		= &e1000_82574_info,
 	[board_80003es2lan]	= &e1000_es2_info,
 	[board_ich8lan]		= &e1000_ich8_info,
 	[board_ich9lan]		= &e1000_ich9_info,
+	[board_ich10lan]	= &e1000_ich10_info,
 };
 
 #ifdef DEBUG
@@ -1179,8 +1181,8 @@
 	struct net_device *netdev = data;
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
-
 	u32 rctl, icr = er32(ICR);
+
 	if (!icr)
 		return IRQ_NONE;  /* Not our interrupt */
 
@@ -1236,6 +1238,263 @@
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t e1000_msix_other(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = er32(ICR);
+
+	if (!(icr & E1000_ICR_INT_ASSERTED)) {
+		ew32(IMS, E1000_IMS_OTHER);
+		return IRQ_NONE;
+	}
+
+	if (icr & adapter->eiac_mask)
+		ew32(ICS, (icr & adapter->eiac_mask));
+
+	if (icr & E1000_ICR_OTHER) {
+		if (!(icr & E1000_ICR_LSC))
+			goto no_link_interrupt;
+		hw->mac.get_link_status = 1;
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->state))
+			mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+no_link_interrupt:
+	ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
+
+	return IRQ_HANDLED;
+}
+
+
+static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+
+
+	adapter->total_tx_bytes = 0;
+	adapter->total_tx_packets = 0;
+
+	if (!e1000_clean_tx_irq(adapter))
+		/* Ring was not completely cleaned, so fire another interrupt */
+		ew32(ICS, tx_ring->ims_val);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	/* Write the ITR value calculated at the end of the
+	 * previous interrupt.
+	 */
+	if (adapter->rx_ring->set_itr) {
+		writel(1000000000 / (adapter->rx_ring->itr_val * 256),
+		       adapter->hw.hw_addr + adapter->rx_ring->itr_register);
+		adapter->rx_ring->set_itr = 0;
+	}
+
+	if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
+		adapter->total_rx_bytes = 0;
+		adapter->total_rx_packets = 0;
+		__netif_rx_schedule(netdev, &adapter->napi);
+	}
+	return IRQ_HANDLED;
+}
+
+/**
+ * e1000_configure_msix - Configure MSI-X hardware
+ *
+ * e1000_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ **/
+static void e1000_configure_msix(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	int vector = 0;
+	u32 ctrl_ext, ivar = 0;
+
+	adapter->eiac_mask = 0;
+
+	/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
+	if (hw->mac.type == e1000_82574) {
+		u32 rfctl = er32(RFCTL);
+		rfctl |= E1000_RFCTL_ACK_DIS;
+		ew32(RFCTL, rfctl);
+	}
+
+#define E1000_IVAR_INT_ALLOC_VALID	0x8
+	/* Configure Rx vector */
+	rx_ring->ims_val = E1000_IMS_RXQ0;
+	adapter->eiac_mask |= rx_ring->ims_val;
+	if (rx_ring->itr_val)
+		writel(1000000000 / (rx_ring->itr_val * 256),
+		       hw->hw_addr + rx_ring->itr_register);
+	else
+		writel(1, hw->hw_addr + rx_ring->itr_register);
+	ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
+
+	/* Configure Tx vector */
+	tx_ring->ims_val = E1000_IMS_TXQ0;
+	vector++;
+	if (tx_ring->itr_val)
+		writel(1000000000 / (tx_ring->itr_val * 256),
+		       hw->hw_addr + tx_ring->itr_register);
+	else
+		writel(1, hw->hw_addr + tx_ring->itr_register);
+	adapter->eiac_mask |= tx_ring->ims_val;
+	ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
+
+	/* set vector for Other Causes, e.g. link changes */
+	vector++;
+	ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
+	if (rx_ring->itr_val)
+		writel(1000000000 / (rx_ring->itr_val * 256),
+		       hw->hw_addr + E1000_EITR_82574(vector));
+	else
+		writel(1, hw->hw_addr + E1000_EITR_82574(vector));
+
+	/* Cause Tx interrupts on every write back */
+	ivar |= (1 << 31);
+
+	ew32(IVAR, ivar);
+
+	/* enable MSI-X PBA support */
+	ctrl_ext = er32(CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
+
+	/* Auto-Mask Other interrupts upon ICR read */
+#define E1000_EIAC_MASK_82574   0x01F00000
+	ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
+	ctrl_ext |= E1000_CTRL_EXT_EIAME;
+	ew32(CTRL_EXT, ctrl_ext);
+	e1e_flush();
+}
+
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
+{
+	if (adapter->msix_entries) {
+		pci_disable_msix(adapter->pdev);
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+	} else if (adapter->flags & FLAG_MSI_ENABLED) {
+		pci_disable_msi(adapter->pdev);
+		adapter->flags &= ~FLAG_MSI_ENABLED;
+	}
+
+	return;
+}
+
+/**
+ * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
+{
+	int err;
+	int numvecs, i;
+
+
+	switch (adapter->int_mode) {
+	case E1000E_INT_MODE_MSIX:
+		if (adapter->flags & FLAG_HAS_MSIX) {
+			numvecs = 3; /* RxQ0, TxQ0 and other */
+			adapter->msix_entries = kcalloc(numvecs,
+						      sizeof(struct msix_entry),
+						      GFP_KERNEL);
+			if (adapter->msix_entries) {
+				for (i = 0; i < numvecs; i++)
+					adapter->msix_entries[i].entry = i;
+
+				err = pci_enable_msix(adapter->pdev,
+						      adapter->msix_entries,
+						      numvecs);
+				if (err == 0)
+					return;
+			}
+			/* MSI-X failed, so fall through and try MSI */
+			e_err("Failed to initialize MSI-X interrupts.  "
+			      "Falling back to MSI interrupts.\n");
+			e1000e_reset_interrupt_capability(adapter);
+		}
+		adapter->int_mode = E1000E_INT_MODE_MSI;
+		/* Fall through */
+	case E1000E_INT_MODE_MSI:
+		if (!pci_enable_msi(adapter->pdev)) {
+			adapter->flags |= FLAG_MSI_ENABLED;
+		} else {
+			adapter->int_mode = E1000E_INT_MODE_LEGACY;
+			e_err("Failed to initialize MSI interrupts.  Falling "
+			      "back to legacy interrupts.\n");
+		}
+		/* Fall through */
+	case E1000E_INT_MODE_LEGACY:
+		/* Don't do anything; this is the system default */
+		break;
+	}
+
+	return;
+}
+
+/**
+ * e1000_request_msix - Initialize MSI-X interrupts
+ *
+ * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ **/
+static int e1000_request_msix(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err = 0, vector = 0;
+
+	if (strlen(netdev->name) < (IFNAMSIZ - 5))
+		sprintf(adapter->rx_ring->name, "%s-rx0", netdev->name);
+	else
+		memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
+	err = request_irq(adapter->msix_entries[vector].vector,
+			  &e1000_intr_msix_rx, 0, adapter->rx_ring->name,
+			  netdev);
+	if (err)
+		goto out;
+	adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
+	adapter->rx_ring->itr_val = adapter->itr;
+	vector++;
+
+	if (strlen(netdev->name) < (IFNAMSIZ - 5))
+		sprintf(adapter->tx_ring->name, "%s-tx0", netdev->name);
+	else
+		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
+	err = request_irq(adapter->msix_entries[vector].vector,
+			  &e1000_intr_msix_tx, 0, adapter->tx_ring->name,
+			  netdev);
+	if (err)
+		goto out;
+	adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
+	adapter->tx_ring->itr_val = adapter->itr;
+	vector++;
+
+	err = request_irq(adapter->msix_entries[vector].vector,
+			  &e1000_msix_other, 0, netdev->name, netdev);
+	if (err)
+		goto out;
+
+	e1000_configure_msix(adapter);
+	return 0;
+out:
+	return err;
+}
+
 /**
  * e1000_request_irq - initialize interrupts
  *
@@ -1245,28 +1504,32 @@
 static int e1000_request_irq(struct e1000_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
-	int irq_flags = IRQF_SHARED;
 	int err;
 
-	if (!(adapter->flags & FLAG_MSI_TEST_FAILED)) {
-		err = pci_enable_msi(adapter->pdev);
-		if (!err) {
-			adapter->flags |= FLAG_MSI_ENABLED;
-			irq_flags = 0;
-		}
+	if (adapter->msix_entries) {
+		err = e1000_request_msix(adapter);
+		if (!err)
+			return err;
+		/* fall back to MSI */
+		e1000e_reset_interrupt_capability(adapter);
+		adapter->int_mode = E1000E_INT_MODE_MSI;
+		e1000e_set_interrupt_capability(adapter);
+	}
+	if (adapter->flags & FLAG_MSI_ENABLED) {
+		err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0,
+				  netdev->name, netdev);
+		if (!err)
+			return err;
+
+		/* fall back to legacy interrupt */
+		e1000e_reset_interrupt_capability(adapter);
+		adapter->int_mode = E1000E_INT_MODE_LEGACY;
 	}
 
-	err = request_irq(adapter->pdev->irq,
-			  ((adapter->flags & FLAG_MSI_ENABLED) ?
-				&e1000_intr_msi : &e1000_intr),
-			  irq_flags, netdev->name, netdev);
-	if (err) {
-		if (adapter->flags & FLAG_MSI_ENABLED) {
-			pci_disable_msi(adapter->pdev);
-			adapter->flags &= ~FLAG_MSI_ENABLED;
-		}
+	err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED,
+			  netdev->name, netdev);
+	if (err)
 		e_err("Unable to allocate interrupt, Error: %d\n", err);
-	}
 
 	return err;
 }
@@ -1275,11 +1538,21 @@
 {
 	struct net_device *netdev = adapter->netdev;
 
-	free_irq(adapter->pdev->irq, netdev);
-	if (adapter->flags & FLAG_MSI_ENABLED) {
-		pci_disable_msi(adapter->pdev);
-		adapter->flags &= ~FLAG_MSI_ENABLED;
+	if (adapter->msix_entries) {
+		int vector = 0;
+
+		free_irq(adapter->msix_entries[vector].vector, netdev);
+		vector++;
+
+		free_irq(adapter->msix_entries[vector].vector, netdev);
+		vector++;
+
+		/* Other Causes interrupt vector */
+		free_irq(adapter->msix_entries[vector].vector, netdev);
+		return;
 	}
+
+	free_irq(adapter->pdev->irq, netdev);
 }
 
 /**
@@ -1290,6 +1563,8 @@
 	struct e1000_hw *hw = &adapter->hw;
 
 	ew32(IMC, ~0);
+	if (adapter->msix_entries)
+		ew32(EIAC_82574, 0);
 	e1e_flush();
 	synchronize_irq(adapter->pdev->irq);
 }
@@ -1301,7 +1576,12 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 
-	ew32(IMS, IMS_ENABLE_MASK);
+	if (adapter->msix_entries) {
+		ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
+		ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
+	} else {
+		ew32(IMS, IMS_ENABLE_MASK);
+	}
 	e1e_flush();
 }
 
@@ -1551,9 +1831,8 @@
  *      traffic pattern.  Constants in this function were computed
  *      based on theoretical maximum wire speed and thresholds were set based
  *      on testing data as well as attempting to minimize response time
- *      while increasing bulk throughput.
- *      this functionality is controlled by the InterruptThrottleRate module
- *      parameter (see e1000_param.c)
+ *      while increasing bulk throughput.  This functionality is controlled
+ *      by the InterruptThrottleRate module parameter.
  **/
 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
 				     u16 itr_setting, int packets,
@@ -1661,11 +1940,37 @@
 			     min(adapter->itr + (new_itr >> 2), new_itr) :
 			     new_itr;
 		adapter->itr = new_itr;
-		ew32(ITR, 1000000000 / (new_itr * 256));
+		adapter->rx_ring->itr_val = new_itr;
+		if (adapter->msix_entries)
+			adapter->rx_ring->set_itr = 1;
+		else
+			ew32(ITR, 1000000000 / (new_itr * 256));
 	}
 }
 
 /**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ **/
+static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+	adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+	if (!adapter->tx_ring)
+		goto err;
+
+	adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+	if (!adapter->rx_ring)
+		goto err;
+
+	return 0;
+err:
+	e_err("Unable to allocate memory for queues\n");
+	kfree(adapter->rx_ring);
+	kfree(adapter->tx_ring);
+	return -ENOMEM;
+}
+
+/**
  * e1000_clean - NAPI Rx polling callback
  * @napi: struct associated with this polling callback
  * @budget: amount of packets driver is allowed to process this poll
@@ -1673,12 +1978,17 @@
 static int e1000_clean(struct napi_struct *napi, int budget)
 {
 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *poll_dev = adapter->netdev;
 	int tx_cleaned = 0, work_done = 0;
 
 	/* Must NOT use netdev_priv macro here. */
 	adapter = poll_dev->priv;
 
+	if (adapter->msix_entries &&
+	    !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
+		goto clean_rx;
+
 	/*
 	 * e1000_clean is called per-cpu.  This lock protects
 	 * tx_ring from being cleaned by multiple cpus
@@ -1690,6 +2000,7 @@
 		spin_unlock(&adapter->tx_queue_lock);
 	}
 
+clean_rx:
 	adapter->clean_rx(adapter, &work_done, budget);
 
 	if (tx_cleaned)
@@ -1700,7 +2011,10 @@
 		if (adapter->itr_setting & 3)
 			e1000_set_itr(adapter);
 		netif_rx_complete(poll_dev, napi);
-		e1000_irq_enable(adapter);
+		if (adapter->msix_entries)
+			ew32(IMS, adapter->rx_ring->ims_val);
+		else
+			e1000_irq_enable(adapter);
 	}
 
 	return work_done;
@@ -2496,6 +2810,8 @@
 	clear_bit(__E1000_DOWN, &adapter->state);
 
 	napi_enable(&adapter->napi);
+	if (adapter->msix_entries)
+		e1000_configure_msix(adapter);
 	e1000_irq_enable(adapter);
 
 	/* fire a link change interrupt to start the watchdog */
@@ -2579,13 +2895,10 @@
 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
-	adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
-	if (!adapter->tx_ring)
-		goto err;
+	e1000e_set_interrupt_capability(adapter);
 
-	adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
-	if (!adapter->rx_ring)
-		goto err;
+	if (e1000_alloc_queues(adapter))
+		return -ENOMEM;
 
 	spin_lock_init(&adapter->tx_queue_lock);
 
@@ -2596,12 +2909,6 @@
 
 	set_bit(__E1000_DOWN, &adapter->state);
 	return 0;
-
-err:
-	e_err("Unable to allocate memory for queues\n");
-	kfree(adapter->rx_ring);
-	kfree(adapter->tx_ring);
-	return -ENOMEM;
 }
 
 /**
@@ -2643,6 +2950,7 @@
 
 	/* free the real vector and request a test handler */
 	e1000_free_irq(adapter);
+	e1000e_reset_interrupt_capability(adapter);
 
 	/* Assume that the test fails, if it succeeds then the test
 	 * MSI irq handler will unset this flag */
@@ -2673,6 +2981,7 @@
 	rmb();
 
 	if (adapter->flags & FLAG_MSI_TEST_FAILED) {
+		adapter->int_mode = E1000E_INT_MODE_LEGACY;
 		err = -EIO;
 		e_info("MSI interrupt test failed!\n");
 	}
@@ -2686,7 +2995,7 @@
 	/* okay so the test worked, restore settings */
 	e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name);
 msi_test_failed:
-	/* restore the original vector, even if it failed */
+	e1000e_set_interrupt_capability(adapter);
 	e1000_request_irq(adapter);
 	return err;
 }
@@ -2796,7 +3105,7 @@
 	 * ignore e1000e MSI messages, which means we need to test our MSI
 	 * interrupt now
 	 */
-	{
+	if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
 		err = e1000_test_msi(adapter);
 		if (err) {
 			e_err("Interrupt allocation failed\n");
@@ -2988,7 +3297,8 @@
 
 	adapter->stats.algnerrc += er32(ALGNERRC);
 	adapter->stats.rxerrc += er32(RXERRC);
-	adapter->stats.tncrs += er32(TNCRS);
+	if (hw->mac.type != e1000_82574)
+		adapter->stats.tncrs += er32(TNCRS);
 	adapter->stats.cexterr += er32(CEXTERR);
 	adapter->stats.tsctc += er32(TSCTC);
 	adapter->stats.tsctfc += er32(TSCTFC);
@@ -3201,6 +3511,27 @@
 						   &adapter->link_duplex);
 			e1000_print_link_info(adapter);
 			/*
+			 * On supported PHYs, check for duplex mismatch only
+			 * if link has autonegotiated at 10/100 half
+			 */
+			if ((hw->phy.type == e1000_phy_igp_3 ||
+			     hw->phy.type == e1000_phy_bm) &&
+			    (hw->mac.autoneg == true) &&
+			    (adapter->link_speed == SPEED_10 ||
+			     adapter->link_speed == SPEED_100) &&
+			    (adapter->link_duplex == HALF_DUPLEX)) {
+				u16 autoneg_exp;
+
+				e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
+
+				if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
+					e_info("Autonegotiated half duplex but"
+					       " link partner cannot autoneg. "
+					       " Try forcing full duplex if "
+					       "link gets many collisions.\n");
+			}
+
+			/*
 			 * tweak tx_queue_len according to speed/duplex
 			 * and adjust the timeout factor
 			 */
@@ -3315,7 +3646,10 @@
 	}
 
 	/* Cause software interrupt to ensure Rx ring is cleaned */
-	ew32(ICS, E1000_ICS_RXDMT0);
+	if (adapter->msix_entries)
+		ew32(ICS, adapter->rx_ring->ims_val);
+	else
+		ew32(ICS, E1000_ICS_RXDMT0);
 
 	/* Force detection of hung controller every watchdog period */
 	adapter->detect_tx_hung = 1;
@@ -4032,6 +4366,7 @@
 		e1000e_down(adapter);
 		e1000_free_irq(adapter);
 	}
+	e1000e_reset_interrupt_capability(adapter);
 
 	retval = pci_save_state(pdev);
 	if (retval)
@@ -4158,6 +4493,7 @@
 	pci_enable_wake(pdev, PCI_D3hot, 0);
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 
+	e1000e_set_interrupt_capability(adapter);
 	if (netif_running(netdev)) {
 		err = e1000_request_irq(adapter);
 		if (err)
@@ -4335,13 +4671,15 @@
 	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
 	if (!(le16_to_cpu(buf) & (1 << 0))) {
 		/* Deep Smart Power Down (DSPD) */
-		e_warn("Warning: detected DSPD enabled in EEPROM\n");
+		dev_warn(&adapter->pdev->dev,
+			 "Warning: detected DSPD enabled in EEPROM\n");
 	}
 
 	ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
 	if (le16_to_cpu(buf) & (3 << 2)) {
 		/* ASPM enable */
-		e_warn("Warning: detected ASPM enabled in EEPROM\n");
+		dev_warn(&adapter->pdev->dev,
+			 "Warning: detected ASPM enabled in EEPROM\n");
 	}
 }
 
@@ -4467,6 +4805,8 @@
 
 	adapter->bd_number = cards_found++;
 
+	e1000e_check_options(adapter);
+
 	/* setup adapter struct */
 	err = e1000_sw_init(adapter);
 	if (err)
@@ -4573,8 +4913,6 @@
 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
 	INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
 
-	e1000e_check_options(adapter);
-
 	/* Initialize link parameters. User can change them with ethtool */
 	adapter->hw.mac.autoneg = 1;
 	adapter->fc_autoneg = 1;
@@ -4704,6 +5042,7 @@
 	if (!e1000_check_reset_block(&adapter->hw))
 		e1000_phy_hw_reset(&adapter->hw);
 
+	e1000e_reset_interrupt_capability(adapter);
 	kfree(adapter->tx_ring);
 	kfree(adapter->rx_ring);
 
@@ -4745,6 +5084,8 @@
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
 
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
+
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
 	  board_80003es2lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
@@ -4767,6 +5108,7 @@
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
@@ -4775,6 +5117,9 @@
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
 
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+
 	{ }	/* terminate list */
 };
 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index ed912e0..f46db6cd 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -114,6 +114,15 @@
 #define DEFAULT_ITR 3
 #define MAX_ITR 100000
 #define MIN_ITR 100
+/* IntMode (Interrupt Mode)
+ *
+ * Valid Range: 0 - 2
+ *
+ * Default Value: 2 (MSI-X)
+ */
+E1000_PARAM(IntMode, "Interrupt Mode");
+#define MAX_INTMODE	2
+#define MIN_INTMODE	0
 
 /*
  * Enable Smart Power Down of the PHY
@@ -352,6 +361,24 @@
 			adapter->itr = 20000;
 		}
 	}
+	{ /* Interrupt Mode */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Interrupt Mode",
+			.err  = "defaulting to 2 (MSI-X)",
+			.def  = E1000E_INT_MODE_MSIX,
+			.arg  = { .r = { .min = MIN_INTMODE,
+					 .max = MAX_INTMODE } }
+		};
+
+		if (num_IntMode > bd) {
+			unsigned int int_mode = IntMode[bd];
+			e1000_validate_option(&int_mode, &opt, adapter);
+			adapter->int_mode = int_mode;
+		} else {
+			adapter->int_mode = opt.def;
+		}
+	}
 	{ /* Smart Power Down */
 		const struct e1000_option opt = {
 			.type = enable_option,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index b133dcf..6cd333a 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -476,7 +476,9 @@
 	if (ret_val)
 		return ret_val;
 
-	if ((phy->type == e1000_phy_m88) && (phy->revision < 4)) {
+	if ((phy->type == e1000_phy_m88) &&
+	    (phy->revision < E1000_REVISION_4) &&
+	    (phy->id != BME1000_E_PHY_ID_R2)) {
 		/*
 		 * Force TX_CLK in the Extended PHY Specific Control Register
 		 * to 25MHz clock.
@@ -504,6 +506,18 @@
 			return ret_val;
 	}
 
+	if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) {
+		/* Set PHY page 0, register 29 to 0x0003 */
+		ret_val = e1e_wphy(hw, 29, 0x0003);
+		if (ret_val)
+			return ret_val;
+
+		/* Set PHY page 0, register 30 to 0x0000 */
+		ret_val = e1e_wphy(hw, 30, 0x0000);
+		if (ret_val)
+			return ret_val;
+	}
+
 	/* Commit the changes. */
 	ret_val = e1000e_commit_phy(hw);
 	if (ret_val)
@@ -1720,6 +1734,91 @@
 	return 0;
 }
 
+/**
+ *  e1000e_phy_init_script_igp3 - Inits the IGP3 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
+{
+	hw_dbg(hw, "Running IGP 3 PHY init script\n");
+
+	/* PHY init IGP 3 */
+	/* Enable rise/fall, 10-mode work in class-A */
+	e1e_wphy(hw, 0x2F5B, 0x9018);
+	/* Remove all caps from Replica path filter */
+	e1e_wphy(hw, 0x2F52, 0x0000);
+	/* Bias trimming for ADC, AFE and Driver (Default) */
+	e1e_wphy(hw, 0x2FB1, 0x8B24);
+	/* Increase Hybrid poly bias */
+	e1e_wphy(hw, 0x2FB2, 0xF8F0);
+	/* Add 4% to Tx amplitude in Gig mode */
+	e1e_wphy(hw, 0x2010, 0x10B0);
+	/* Disable trimming (TTT) */
+	e1e_wphy(hw, 0x2011, 0x0000);
+	/* Poly DC correction to 94.6% + 2% for all channels */
+	e1e_wphy(hw, 0x20DD, 0x249A);
+	/* ABS DC correction to 95.9% */
+	e1e_wphy(hw, 0x20DE, 0x00D3);
+	/* BG temp curve trim */
+	e1e_wphy(hw, 0x28B4, 0x04CE);
+	/* Increasing ADC OPAMP stage 1 currents to max */
+	e1e_wphy(hw, 0x2F70, 0x29E4);
+	/* Force 1000 ( required for enabling PHY regs configuration) */
+	e1e_wphy(hw, 0x0000, 0x0140);
+	/* Set upd_freq to 6 */
+	e1e_wphy(hw, 0x1F30, 0x1606);
+	/* Disable NPDFE */
+	e1e_wphy(hw, 0x1F31, 0xB814);
+	/* Disable adaptive fixed FFE (Default) */
+	e1e_wphy(hw, 0x1F35, 0x002A);
+	/* Enable FFE hysteresis */
+	e1e_wphy(hw, 0x1F3E, 0x0067);
+	/* Fixed FFE for short cable lengths */
+	e1e_wphy(hw, 0x1F54, 0x0065);
+	/* Fixed FFE for medium cable lengths */
+	e1e_wphy(hw, 0x1F55, 0x002A);
+	/* Fixed FFE for long cable lengths */
+	e1e_wphy(hw, 0x1F56, 0x002A);
+	/* Enable Adaptive Clip Threshold */
+	e1e_wphy(hw, 0x1F72, 0x3FB0);
+	/* AHT reset limit to 1 */
+	e1e_wphy(hw, 0x1F76, 0xC0FF);
+	/* Set AHT master delay to 127 msec */
+	e1e_wphy(hw, 0x1F77, 0x1DEC);
+	/* Set scan bits for AHT */
+	e1e_wphy(hw, 0x1F78, 0xF9EF);
+	/* Set AHT Preset bits */
+	e1e_wphy(hw, 0x1F79, 0x0210);
+	/* Change integ_factor of channel A to 3 */
+	e1e_wphy(hw, 0x1895, 0x0003);
+	/* Change prop_factor of channels BCD to 8 */
+	e1e_wphy(hw, 0x1796, 0x0008);
+	/* Change cg_icount + enable integbp for channels BCD */
+	e1e_wphy(hw, 0x1798, 0xD008);
+	/*
+	 * Change cg_icount + enable integbp + change prop_factor_master
+	 * to 8 for channel A
+	 */
+	e1e_wphy(hw, 0x1898, 0xD918);
+	/* Disable AHT in Slave mode on channel A */
+	e1e_wphy(hw, 0x187A, 0x0800);
+	/*
+	 * Enable LPLU and disable AN to 1000 in non-D0a states,
+	 * Enable SPD+B2B
+	 */
+	e1e_wphy(hw, 0x0019, 0x008D);
+	/* Enable restart AN on an1000_dis change */
+	e1e_wphy(hw, 0x001B, 0x2080);
+	/* Enable wh_fifo read clock in 10/100 modes */
+	e1e_wphy(hw, 0x0014, 0x0045);
+	/* Restart AN, Speed selection is 1000 */
+	e1e_wphy(hw, 0x0000, 0x1340);
+
+	return 0;
+}
+
 /* Internal function pointers */
 
 /**
@@ -1969,6 +2068,99 @@
 }
 
 /**
+ *  e1000e_read_phy_reg_bm2 - Read BM PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+	u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+							 true);
+		return ret_val;
+	}
+
+	ret_val = hw->phy.ops.acquire_phy(hw);
+	if (ret_val)
+		return ret_val;
+
+	hw->phy.addr = 1;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+
+		/* Page is shifted left, PHY expects (page x 32) */
+		ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+						    page);
+
+		if (ret_val) {
+			hw->phy.ops.release_phy(hw);
+			return ret_val;
+		}
+	}
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					   data);
+	hw->phy.ops.release_phy(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_write_phy_reg_bm2 - Write BM PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+	u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+							 false);
+		return ret_val;
+	}
+
+	ret_val = hw->phy.ops.acquire_phy(hw);
+	if (ret_val)
+		return ret_val;
+
+	hw->phy.addr = 1;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		/* Page is shifted left, PHY expects (page x 32) */
+		ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+						    page);
+
+		if (ret_val) {
+			hw->phy.ops.release_phy(hw);
+			return ret_val;
+		}
+	}
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					    data);
+
+	hw->phy.ops.release_phy(hw);
+
+	return ret_val;
+}
+
+/**
  *  e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to be read or written
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index e01926b..5524271 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,13 +40,13 @@
 #include <asm/io.h>
 
 #define DRV_NAME	"ehea"
-#define DRV_VERSION	"EHEA_0092"
+#define DRV_VERSION	"EHEA_0093"
 
 /* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
 #define DLPAR_MEM_ADD      2
 #define DLPAR_MEM_REM      4
-#define EHEA_CAPABILITIES  (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD)
+#define EHEA_CAPABILITIES  (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
 
 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
 	| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 156eb63..2a33a61 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -535,7 +535,7 @@
 				       cb_logaddr,		/* R5 */
 				       0, 0, 0, 0, 0);		/* R6-R10 */
 #ifdef DEBUG
-	ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
+	ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
 #endif
 	return hret;
 }
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 140f05b..db8a925 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -595,7 +595,8 @@
 	end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
 	mr_len = *(unsigned long *)arg;
 
-	ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
+	if (!ehea_bmap)
+		ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
 	if (!ehea_bmap)
 		return -ENOMEM;
 
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index aa0bf6e..e1b441e 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -110,7 +110,7 @@
 	}
 	if (ret && netif_msg_drv(priv))
 		printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
-			__FUNCTION__, ret);
+			__func__, ret);
 
 	return ret;
 }
@@ -131,7 +131,7 @@
 		ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1);
 		if (ret && netif_msg_drv(priv))
 			printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
-				__FUNCTION__, ret);
+				__func__, ret);
 	}
 	return ret;
 }
@@ -156,7 +156,7 @@
 	ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen);
 	if (ret)
 		printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
-			__FUNCTION__, ret);
+			__func__, ret);
 	else
 		val = rx_buf[slen - 1];
 
@@ -176,14 +176,14 @@
 	ret = spi_write(priv->spi, priv->spi_transfer_buf, 2);
 	if (ret && netif_msg_drv(priv))
 		printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
-			__FUNCTION__, ret);
+			__func__, ret);
 	return ret;
 }
 
 static void enc28j60_soft_reset(struct enc28j60_net *priv)
 {
 	if (netif_msg_hw(priv))
-		printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__);
+		printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
 
 	spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET);
 	/* Errata workaround #1, CLKRDY check is unreliable,
@@ -357,7 +357,7 @@
 		reg = nolock_regw_read(priv, ERDPTL);
 		if (reg != addr)
 			printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT "
-				"(0x%04x - 0x%04x)\n", __FUNCTION__, reg, addr);
+				"(0x%04x - 0x%04x)\n", __func__, reg, addr);
 	}
 #endif
 	spi_read_buf(priv, len, data);
@@ -380,7 +380,7 @@
 		if (reg != TXSTART_INIT)
 			printk(KERN_DEBUG DRV_NAME
 				": %s() ERWPT:0x%04x != 0x%04x\n",
-				__FUNCTION__, reg, TXSTART_INIT);
+				__func__, reg, TXSTART_INIT);
 	}
 #endif
 	/* Set the TXND pointer to correspond to the packet size given */
@@ -390,13 +390,13 @@
 	if (netif_msg_hw(priv))
 		printk(KERN_DEBUG DRV_NAME
 			": %s() after control byte ERWPT:0x%04x\n",
-			__FUNCTION__, nolock_regw_read(priv, EWRPTL));
+			__func__, nolock_regw_read(priv, EWRPTL));
 	/* copy the packet into the transmit buffer */
 	spi_write_buf(priv, len, data);
 	if (netif_msg_hw(priv))
 		printk(KERN_DEBUG DRV_NAME
 			 ": %s() after write packet ERWPT:0x%04x, len=%d\n",
-			 __FUNCTION__, nolock_regw_read(priv, EWRPTL), len);
+			 __func__, nolock_regw_read(priv, EWRPTL), len);
 	mutex_unlock(&priv->lock);
 }
 
@@ -495,7 +495,7 @@
 		if (netif_msg_drv(priv))
 			printk(KERN_DEBUG DRV_NAME
 				": %s() Hardware must be disabled to set "
-				"Mac address\n", __FUNCTION__);
+				"Mac address\n", __func__);
 		ret = -EBUSY;
 	}
 	mutex_unlock(&priv->lock);
@@ -575,7 +575,7 @@
 	if (start > 0x1FFF || end > 0x1FFF || start > end) {
 		if (netif_msg_drv(priv))
 			printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO "
-				"bad parameters!\n", __FUNCTION__, start, end);
+				"bad parameters!\n", __func__, start, end);
 		return;
 	}
 	/* set receive buffer start + end */
@@ -591,7 +591,7 @@
 	if (start > 0x1FFF || end > 0x1FFF || start > end) {
 		if (netif_msg_drv(priv))
 			printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO "
-				"bad parameters!\n", __FUNCTION__, start, end);
+				"bad parameters!\n", __func__, start, end);
 		return;
 	}
 	/* set transmit buffer start + end */
@@ -630,7 +630,7 @@
 	u8 reg;
 
 	if (netif_msg_drv(priv))
-		printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __FUNCTION__,
+		printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __func__,
 			priv->full_duplex ? "FullDuplex" : "HalfDuplex");
 
 	mutex_lock(&priv->lock);
@@ -661,7 +661,7 @@
 	if (reg == 0x00 || reg == 0xff) {
 		if (netif_msg_drv(priv))
 			printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n",
-				__FUNCTION__, reg);
+				__func__, reg);
 		return 0;
 	}
 
@@ -724,7 +724,7 @@
 	/* enable interrupts */
 	if (netif_msg_hw(priv))
 		printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n",
-			__FUNCTION__);
+			__func__);
 
 	enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE);
 
@@ -888,7 +888,7 @@
 		if (netif_msg_rx_err(priv))
 			dev_err(&ndev->dev,
 				"%s() Invalid packet address!! 0x%04x\n",
-				__FUNCTION__, priv->next_pk_ptr);
+				__func__, priv->next_pk_ptr);
 		/* packet address corrupted: reset RX logic */
 		mutex_lock(&priv->lock);
 		nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
@@ -917,7 +917,7 @@
 	rxstat |= rsv[4];
 
 	if (netif_msg_rx_status(priv))
-		enc28j60_dump_rsv(priv, __FUNCTION__, next_packet, len, rxstat);
+		enc28j60_dump_rsv(priv, __func__, next_packet, len, rxstat);
 
 	if (!RSV_GETBIT(rxstat, RSV_RXOK)) {
 		if (netif_msg_rx_err(priv))
@@ -941,7 +941,7 @@
 			enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv),
 					len, skb_put(skb, len));
 			if (netif_msg_pktdata(priv))
-				dump_packet(__FUNCTION__, skb->len, skb->data);
+				dump_packet(__func__, skb->len, skb->data);
 			skb->protocol = eth_type_trans(skb, ndev);
 			/* update statistics */
 			ndev->stats.rx_packets++;
@@ -958,7 +958,7 @@
 	erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT);
 	if (netif_msg_hw(priv))
 		printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n",
-			__FUNCTION__, erxrdpt);
+			__func__, erxrdpt);
 
 	mutex_lock(&priv->lock);
 	nolock_regw_write(priv, ERXRDPTL, erxrdpt);
@@ -968,7 +968,7 @@
 		reg = nolock_regw_read(priv, ERXRDPTL);
 		if (reg != erxrdpt)
 			printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify "
-				"error (0x%04x - 0x%04x)\n", __FUNCTION__,
+				"error (0x%04x - 0x%04x)\n", __func__,
 				reg, erxrdpt);
 	}
 #endif
@@ -1006,7 +1006,7 @@
 	mutex_unlock(&priv->lock);
 	if (netif_msg_rx_status(priv))
 		printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n",
-			__FUNCTION__, free_space);
+			__func__, free_space);
 	return free_space;
 }
 
@@ -1022,7 +1022,7 @@
 	reg = enc28j60_phy_read(priv, PHSTAT2);
 	if (netif_msg_hw(priv))
 		printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, "
-			"PHSTAT2: %04x\n", __FUNCTION__,
+			"PHSTAT2: %04x\n", __func__,
 			enc28j60_phy_read(priv, PHSTAT1), reg);
 	duplex = reg & PHSTAT2_DPXSTAT;
 
@@ -1095,7 +1095,7 @@
 	int intflags, loop;
 
 	if (netif_msg_intr(priv))
-		printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__);
+		printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
 	/* disable further interrupts */
 	locked_reg_bfclr(priv, EIE, EIE_INTIE);
 
@@ -1198,7 +1198,7 @@
 	/* re-enable interrupts */
 	locked_reg_bfset(priv, EIE, EIE_INTIE);
 	if (netif_msg_intr(priv))
-		printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __FUNCTION__);
+		printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __func__);
 }
 
 /*
@@ -1213,7 +1213,7 @@
 			": Tx Packet Len:%d\n", priv->tx_skb->len);
 
 	if (netif_msg_pktdata(priv))
-		dump_packet(__FUNCTION__,
+		dump_packet(__func__,
 			    priv->tx_skb->len, priv->tx_skb->data);
 	enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data);
 
@@ -1254,7 +1254,7 @@
 	struct enc28j60_net *priv = netdev_priv(dev);
 
 	if (netif_msg_tx_queued(priv))
-		printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__);
+		printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
 
 	/* If some error occurs while trying to transmit this
 	 * packet, you should return '1' from this function.
@@ -1325,7 +1325,7 @@
 	struct enc28j60_net *priv = netdev_priv(dev);
 
 	if (netif_msg_drv(priv))
-		printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__);
+		printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
 
 	if (!is_valid_ether_addr(dev->dev_addr)) {
 		if (netif_msg_ifup(priv)) {
@@ -1363,7 +1363,7 @@
 	struct enc28j60_net *priv = netdev_priv(dev);
 
 	if (netif_msg_drv(priv))
-		printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__);
+		printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
 
 	enc28j60_hw_disable(priv);
 	enc28j60_lowpower(priv, true);
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
new file mode 100644
index 0000000..391c3bc
--- /dev/null
+++ b/drivers/net/enic/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_ENIC) := enic.o
+
+enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
+	enic_res.o vnic_dev.o vnic_rq.o
+
diff --git a/drivers/net/enic/cq_desc.h b/drivers/net/enic/cq_desc.h
new file mode 100644
index 0000000..c036a8b
--- /dev/null
+++ b/drivers/net/enic/cq_desc.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _CQ_DESC_H_
+#define _CQ_DESC_H_
+
+/*
+ * Completion queue descriptor types
+ */
+enum cq_desc_types {
+	CQ_DESC_TYPE_WQ_ENET = 0,
+	CQ_DESC_TYPE_DESC_COPY = 1,
+	CQ_DESC_TYPE_WQ_EXCH = 2,
+	CQ_DESC_TYPE_RQ_ENET = 3,
+	CQ_DESC_TYPE_RQ_FCP = 4,
+};
+
+/* Completion queue descriptor: 16B
+ *
+ * All completion queues have this basic layout.  The
+ * type_specfic area is unique for each completion
+ * queue type.
+ */
+struct cq_desc {
+	__le16 completed_index;
+	__le16 q_number;
+	u8 type_specfic[11];
+	u8 type_color;
+};
+
+#define CQ_DESC_TYPE_BITS        7
+#define CQ_DESC_TYPE_MASK        ((1 << CQ_DESC_TYPE_BITS) - 1)
+#define CQ_DESC_COLOR_MASK       1
+#define CQ_DESC_Q_NUM_BITS       10
+#define CQ_DESC_Q_NUM_MASK       ((1 << CQ_DESC_Q_NUM_BITS) - 1)
+#define CQ_DESC_COMP_NDX_BITS    12
+#define CQ_DESC_COMP_NDX_MASK    ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
+
+static inline void cq_desc_dec(const struct cq_desc *desc_arg,
+	u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+	const struct cq_desc *desc = desc_arg;
+	const u8 type_color = desc->type_color;
+
+	*color = (type_color >> CQ_DESC_TYPE_BITS) & CQ_DESC_COLOR_MASK;
+
+	/*
+	 * Make sure color bit is read from desc *before* other fields
+	 * are read from desc.  Hardware guarantees color bit is last
+	 * bit (byte) written.  Adding the rmb() prevents the compiler
+	 * and/or CPU from reordering the reads which would potentially
+	 * result in reading stale values.
+	 */
+
+	rmb();
+
+	*type = type_color & CQ_DESC_TYPE_MASK;
+	*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+	*completed_index = le16_to_cpu(desc->completed_index) &
+		CQ_DESC_COMP_NDX_MASK;
+}
+
+#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
new file mode 100644
index 0000000..03dce9e
--- /dev/null
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _CQ_ENET_DESC_H_
+#define _CQ_ENET_DESC_H_
+
+#include "cq_desc.h"
+
+/* Ethernet completion queue descriptor: 16B */
+struct cq_enet_wq_desc {
+	__le16 completed_index;
+	__le16 q_number;
+	u8 reserved[11];
+	u8 type_color;
+};
+
+static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
+	u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+	cq_desc_dec((struct cq_desc *)desc, type,
+		color, q_number, completed_index);
+}
+
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_desc {
+	__le16 completed_index_flags;
+	__le16 q_number_rss_type_flags;
+	__le32 rss_hash;
+	__le16 bytes_written_flags;
+	__le16 vlan;
+	__le16 checksum_fcoe;
+	u8 flags;
+	u8 type_color;
+};
+
+#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT          (0x1 << 12)
+#define CQ_ENET_RQ_DESC_FLAGS_FCOE                  (0x1 << 13)
+#define CQ_ENET_RQ_DESC_FLAGS_EOP                   (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_SOP                   (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS               4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
+	((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
+#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE               0
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4               1
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4           2
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6               3
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6           4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX            5
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX        6
+
+#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC         (0x1 << 14)
+
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS          14
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
+	((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED             (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED         (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS               4
+#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
+	((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS               8
+#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
+	((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT              8
+
+#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK       (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK              (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FLAGS_UDP                   (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR              (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TCP                   (0x1 << 2)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK          (0x1 << 3)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV6                  (0x1 << 4)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4                  (0x1 << 5)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT         (0x1 << 6)
+#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK                (0x1 << 7)
+
+static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
+	u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
+	u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+	u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
+	u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
+	u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
+	u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
+	u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
+{
+	u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+	u16 q_number_rss_type_flags =
+		le16_to_cpu(desc->q_number_rss_type_flags);
+	u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+	cq_desc_dec((struct cq_desc *)desc, type,
+		color, q_number, completed_index);
+
+	*ingress_port = (completed_index_flags &
+		CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+	*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+		1 : 0;
+	*eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+		1 : 0;
+	*sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+		1 : 0;
+
+	*rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+		CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+	*csum_not_calc = (q_number_rss_type_flags &
+		CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+	*rss_hash = le32_to_cpu(desc->rss_hash);
+
+	*bytes_written = bytes_written_flags &
+		CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+	*packet_error = (bytes_written_flags &
+		CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+	*vlan_stripped = (bytes_written_flags &
+		CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+	*vlan = le16_to_cpu(desc->vlan);
+
+	if (*fcoe) {
+		*fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+			CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+		*fcoe_fc_crc_ok = (desc->flags &
+			CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+		*fcoe_enc_error = (desc->flags &
+			CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+		*fcoe_eof = (u8)((desc->checksum_fcoe >>
+			CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+			CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+		*checksum = 0;
+	} else {
+		*fcoe_sof = 0;
+		*fcoe_fc_crc_ok = 0;
+		*fcoe_enc_error = 0;
+		*fcoe_eof = 0;
+		*checksum = le16_to_cpu(desc->checksum_fcoe);
+	}
+
+	*tcp_udp_csum_ok =
+		(desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+	*udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+	*tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+	*ipv4_csum_ok =
+		(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+	*ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+	*ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+	*ipv4_fragment =
+		(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+	*fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
new file mode 100644
index 0000000..7f677e8
--- /dev/null
+++ b/drivers/net/enic/enic.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_H_
+#define _ENIC_H_
+
+#include <linux/inet_lro.h>
+
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_rss.h"
+
+#define DRV_NAME		"enic"
+#define DRV_DESCRIPTION		"Cisco 10G Ethernet Driver"
+#define DRV_VERSION		"0.0.1-18163.472-k1"
+#define DRV_COPYRIGHT		"Copyright 2008 Cisco Systems, Inc"
+#define PFX			DRV_NAME ": "
+
+#define ENIC_LRO_MAX_DESC	8
+#define ENIC_LRO_MAX_AGGR	64
+
+enum enic_cq_index {
+	ENIC_CQ_RQ,
+	ENIC_CQ_WQ,
+	ENIC_CQ_MAX,
+};
+
+enum enic_intx_intr_index {
+	ENIC_INTX_WQ_RQ,
+	ENIC_INTX_ERR,
+	ENIC_INTX_NOTIFY,
+	ENIC_INTX_MAX,
+};
+
+enum enic_msix_intr_index {
+	ENIC_MSIX_RQ,
+	ENIC_MSIX_WQ,
+	ENIC_MSIX_ERR,
+	ENIC_MSIX_NOTIFY,
+	ENIC_MSIX_MAX,
+};
+
+struct enic_msix_entry {
+	int requested;
+	char devname[IFNAMSIZ];
+	irqreturn_t (*isr)(int, void *);
+	void *devid;
+};
+
+/* Per-instance private data structure */
+struct enic {
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct vnic_enet_config config;
+	struct vnic_dev_bar bar0;
+	struct vnic_dev *vdev;
+	struct timer_list notify_timer;
+	struct work_struct reset;
+	struct msix_entry msix_entry[ENIC_MSIX_MAX];
+	struct enic_msix_entry msix[ENIC_MSIX_MAX];
+	u32 msg_enable;
+	spinlock_t devcmd_lock;
+	u8 mac_addr[ETH_ALEN];
+	u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+	unsigned int mc_count;
+	int csum_rx_enabled;
+	u32 port_mtu;
+
+	/* work queue cache line section */
+	____cacheline_aligned struct vnic_wq wq[1];
+	spinlock_t wq_lock[1];
+	unsigned int wq_count;
+	struct vlan_group *vlan_group;
+
+	/* receive queue cache line section */
+	____cacheline_aligned struct vnic_rq rq[1];
+	unsigned int rq_count;
+	int (*rq_alloc_buf)(struct vnic_rq *rq);
+	struct napi_struct napi;
+	struct net_lro_mgr lro_mgr;
+	struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC];
+
+	/* interrupt resource cache line section */
+	____cacheline_aligned struct vnic_intr intr[ENIC_MSIX_MAX];
+	unsigned int intr_count;
+	u32 __iomem *legacy_pba;		/* memory-mapped */
+
+	/* completion queue cache line section */
+	____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
+	unsigned int cq_count;
+};
+
+#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
new file mode 100644
index 0000000..f3a47a8
--- /dev/null
+++ b/drivers/net/enic/enic_main.c
@@ -0,0 +1,1934 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+
+#include "cq_enet_desc.h"
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "enic_res.h"
+#include "enic.h"
+
+#define ENIC_NOTIFY_TIMER_PERIOD	(2 * HZ)
+
+/* Supported devices */
+static struct pci_device_id enic_id_table[] = {
+	{ PCI_VDEVICE(CISCO, 0x0043) },
+	{ 0, }	/* end of table */
+};
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, enic_id_table);
+
+struct enic_stat {
+	char name[ETH_GSTRING_LEN];
+	unsigned int offset;
+};
+
+#define ENIC_TX_STAT(stat)	\
+	{ .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
+#define ENIC_RX_STAT(stat)	\
+	{ .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
+
+static const struct enic_stat enic_tx_stats[] = {
+	ENIC_TX_STAT(tx_frames_ok),
+	ENIC_TX_STAT(tx_unicast_frames_ok),
+	ENIC_TX_STAT(tx_multicast_frames_ok),
+	ENIC_TX_STAT(tx_broadcast_frames_ok),
+	ENIC_TX_STAT(tx_bytes_ok),
+	ENIC_TX_STAT(tx_unicast_bytes_ok),
+	ENIC_TX_STAT(tx_multicast_bytes_ok),
+	ENIC_TX_STAT(tx_broadcast_bytes_ok),
+	ENIC_TX_STAT(tx_drops),
+	ENIC_TX_STAT(tx_errors),
+	ENIC_TX_STAT(tx_tso),
+};
+
+static const struct enic_stat enic_rx_stats[] = {
+	ENIC_RX_STAT(rx_frames_ok),
+	ENIC_RX_STAT(rx_frames_total),
+	ENIC_RX_STAT(rx_unicast_frames_ok),
+	ENIC_RX_STAT(rx_multicast_frames_ok),
+	ENIC_RX_STAT(rx_broadcast_frames_ok),
+	ENIC_RX_STAT(rx_bytes_ok),
+	ENIC_RX_STAT(rx_unicast_bytes_ok),
+	ENIC_RX_STAT(rx_multicast_bytes_ok),
+	ENIC_RX_STAT(rx_broadcast_bytes_ok),
+	ENIC_RX_STAT(rx_drop),
+	ENIC_RX_STAT(rx_no_bufs),
+	ENIC_RX_STAT(rx_errors),
+	ENIC_RX_STAT(rx_rss),
+	ENIC_RX_STAT(rx_crc_errors),
+	ENIC_RX_STAT(rx_frames_64),
+	ENIC_RX_STAT(rx_frames_127),
+	ENIC_RX_STAT(rx_frames_255),
+	ENIC_RX_STAT(rx_frames_511),
+	ENIC_RX_STAT(rx_frames_1023),
+	ENIC_RX_STAT(rx_frames_1518),
+	ENIC_RX_STAT(rx_frames_to_max),
+};
+
+static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
+static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+
+static int enic_get_settings(struct net_device *netdev,
+	struct ethtool_cmd *ecmd)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+	ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+	ecmd->port = PORT_FIBRE;
+	ecmd->transceiver = XCVR_EXTERNAL;
+
+	if (netif_carrier_ok(netdev)) {
+		ecmd->speed = vnic_dev_port_speed(enic->vdev);
+		ecmd->duplex = DUPLEX_FULL;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+
+	ecmd->autoneg = AUTONEG_DISABLE;
+
+	return 0;
+}
+
+static void enic_get_drvinfo(struct net_device *netdev,
+	struct ethtool_drvinfo *drvinfo)
+{
+	struct enic *enic = netdev_priv(netdev);
+	struct vnic_devcmd_fw_info *fw_info;
+
+	spin_lock(&enic->devcmd_lock);
+	vnic_dev_fw_info(enic->vdev, &fw_info);
+	spin_unlock(&enic->devcmd_lock);
+
+	strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+	strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+	strncpy(drvinfo->fw_version, fw_info->fw_version,
+		sizeof(drvinfo->fw_version));
+	strncpy(drvinfo->bus_info, pci_name(enic->pdev),
+		sizeof(drvinfo->bus_info));
+}
+
+static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+	unsigned int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < enic_n_tx_stats; i++) {
+			memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
+			data += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < enic_n_rx_stats; i++) {
+			memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
+			data += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static int enic_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return enic_n_tx_stats + enic_n_rx_stats;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void enic_get_ethtool_stats(struct net_device *netdev,
+	struct ethtool_stats *stats, u64 *data)
+{
+	struct enic *enic = netdev_priv(netdev);
+	struct vnic_stats *vstats;
+	unsigned int i;
+
+	spin_lock(&enic->devcmd_lock);
+	vnic_dev_stats_dump(enic->vdev, &vstats);
+	spin_unlock(&enic->devcmd_lock);
+
+	for (i = 0; i < enic_n_tx_stats; i++)
+		*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
+	for (i = 0; i < enic_n_rx_stats; i++)
+		*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
+}
+
+static u32 enic_get_rx_csum(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	return enic->csum_rx_enabled;
+}
+
+static int enic_set_rx_csum(struct net_device *netdev, u32 data)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	if (data && !ENIC_SETTING(enic, RXCSUM))
+		return -EINVAL;
+
+	enic->csum_rx_enabled = !!data;
+
+	return 0;
+}
+
+static int enic_set_tx_csum(struct net_device *netdev, u32 data)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	if (data && !ENIC_SETTING(enic, TXCSUM))
+		return -EINVAL;
+
+	if (data)
+		netdev->features |= NETIF_F_HW_CSUM;
+	else
+		netdev->features &= ~NETIF_F_HW_CSUM;
+
+	return 0;
+}
+
+static int enic_set_tso(struct net_device *netdev, u32 data)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	if (data && !ENIC_SETTING(enic, TSO))
+		return -EINVAL;
+
+	if (data)
+		netdev->features |=
+			NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN;
+	else
+		netdev->features &=
+			~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN);
+
+	return 0;
+}
+
+static u32 enic_get_msglevel(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	return enic->msg_enable;
+}
+
+static void enic_set_msglevel(struct net_device *netdev, u32 value)
+{
+	struct enic *enic = netdev_priv(netdev);
+	enic->msg_enable = value;
+}
+
+static struct ethtool_ops enic_ethtool_ops = {
+	.get_settings = enic_get_settings,
+	.get_drvinfo = enic_get_drvinfo,
+	.get_msglevel = enic_get_msglevel,
+	.set_msglevel = enic_set_msglevel,
+	.get_link = ethtool_op_get_link,
+	.get_strings = enic_get_strings,
+	.get_sset_count = enic_get_sset_count,
+	.get_ethtool_stats = enic_get_ethtool_stats,
+	.get_rx_csum = enic_get_rx_csum,
+	.set_rx_csum = enic_set_rx_csum,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.set_tx_csum = enic_set_tx_csum,
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = ethtool_op_set_sg,
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = enic_set_tso,
+};
+
+static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+	struct enic *enic = vnic_dev_priv(wq->vdev);
+
+	if (buf->sop)
+		pci_unmap_single(enic->pdev, buf->dma_addr,
+			buf->len, PCI_DMA_TODEVICE);
+	else
+		pci_unmap_page(enic->pdev, buf->dma_addr,
+			buf->len, PCI_DMA_TODEVICE);
+
+	if (buf->os_buf)
+		dev_kfree_skb_any(buf->os_buf);
+}
+
+static void enic_wq_free_buf(struct vnic_wq *wq,
+	struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
+{
+	enic_free_wq_buf(wq, buf);
+}
+
+static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+	u8 type, u16 q_number, u16 completed_index, void *opaque)
+{
+	struct enic *enic = vnic_dev_priv(vdev);
+
+	spin_lock(&enic->wq_lock[q_number]);
+
+	vnic_wq_service(&enic->wq[q_number], cq_desc,
+		completed_index, enic_wq_free_buf,
+		opaque);
+
+	if (netif_queue_stopped(enic->netdev) &&
+	    vnic_wq_desc_avail(&enic->wq[q_number]) >= MAX_SKB_FRAGS + 1)
+		netif_wake_queue(enic->netdev);
+
+	spin_unlock(&enic->wq_lock[q_number]);
+
+	return 0;
+}
+
+static void enic_log_q_error(struct enic *enic)
+{
+	unsigned int i;
+	u32 error_status;
+
+	for (i = 0; i < enic->wq_count; i++) {
+		error_status = vnic_wq_error_status(&enic->wq[i]);
+		if (error_status)
+			printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n",
+				enic->netdev->name, i, error_status);
+	}
+
+	for (i = 0; i < enic->rq_count; i++) {
+		error_status = vnic_rq_error_status(&enic->rq[i]);
+		if (error_status)
+			printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n",
+				enic->netdev->name, i, error_status);
+	}
+}
+
+static void enic_link_check(struct enic *enic)
+{
+	int link_status = vnic_dev_link_status(enic->vdev);
+	int carrier_ok = netif_carrier_ok(enic->netdev);
+
+	if (link_status && !carrier_ok) {
+		printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name);
+		netif_carrier_on(enic->netdev);
+	} else if (!link_status && carrier_ok) {
+		printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name);
+		netif_carrier_off(enic->netdev);
+	}
+}
+
+static void enic_mtu_check(struct enic *enic)
+{
+	u32 mtu = vnic_dev_mtu(enic->vdev);
+
+	if (mtu != enic->port_mtu) {
+		if (mtu < enic->netdev->mtu)
+			printk(KERN_WARNING PFX
+				"%s: interface MTU (%d) set higher "
+				"than switch port MTU (%d)\n",
+				enic->netdev->name, enic->netdev->mtu, mtu);
+		enic->port_mtu = mtu;
+	}
+}
+
+static void enic_msglvl_check(struct enic *enic)
+{
+	u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
+
+	if (msg_enable != enic->msg_enable) {
+		printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n",
+			enic->netdev->name, enic->msg_enable, msg_enable);
+		enic->msg_enable = msg_enable;
+	}
+}
+
+static void enic_notify_check(struct enic *enic)
+{
+	enic_msglvl_check(enic);
+	enic_mtu_check(enic);
+	enic_link_check(enic);
+}
+
+#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
+
+static irqreturn_t enic_isr_legacy(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct enic *enic = netdev_priv(netdev);
+	u32 pba;
+
+	vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]);
+
+	pba = vnic_intr_legacy_pba(enic->legacy_pba);
+	if (!pba) {
+		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
+		return IRQ_NONE;	/* not our interrupt */
+	}
+
+	if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY))
+		enic_notify_check(enic);
+
+	if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) {
+		enic_log_q_error(enic);
+		/* schedule recovery from WQ/RQ error */
+		schedule_work(&enic->reset);
+		return IRQ_HANDLED;
+	}
+
+	if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
+		if (netif_rx_schedule_prep(netdev, &enic->napi))
+			__netif_rx_schedule(netdev, &enic->napi);
+	} else {
+		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msi(int irq, void *data)
+{
+	struct enic *enic = data;
+
+	/* With MSI, there is no sharing of interrupts, so this is
+	 * our interrupt and there is no need to ack it.  The device
+	 * is not providing per-vector masking, so the OS will not
+	 * write to PCI config space to mask/unmask the interrupt.
+	 * We're using mask_on_assertion for MSI, so the device
+	 * automatically masks the interrupt when the interrupt is
+	 * generated.  Later, when exiting polling, the interrupt
+	 * will be unmasked (see enic_poll).
+	 *
+	 * Also, the device uses the same PCIe Traffic Class (TC)
+	 * for Memory Write data and MSI, so there are no ordering
+	 * issues; the MSI will always arrive at the Root Complex
+	 * _after_ corresponding Memory Writes (i.e. descriptor
+	 * writes).
+	 */
+
+	netif_rx_schedule(enic->netdev, &enic->napi);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msix_rq(int irq, void *data)
+{
+	struct enic *enic = data;
+
+	/* schedule NAPI polling for RQ cleanup */
+	netif_rx_schedule(enic->netdev, &enic->napi);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msix_wq(int irq, void *data)
+{
+	struct enic *enic = data;
+	unsigned int wq_work_to_do = -1; /* no limit */
+	unsigned int wq_work_done;
+
+	wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
+		wq_work_to_do, enic_wq_service, NULL);
+
+	vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ],
+		wq_work_done,
+		1 /* unmask intr */,
+		1 /* reset intr timer */);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msix_err(int irq, void *data)
+{
+	struct enic *enic = data;
+
+	enic_log_q_error(enic);
+
+	/* schedule recovery from WQ/RQ error */
+	schedule_work(&enic->reset);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msix_notify(int irq, void *data)
+{
+	struct enic *enic = data;
+
+	enic_notify_check(enic);
+	vnic_intr_unmask(&enic->intr[ENIC_MSIX_NOTIFY]);
+
+	return IRQ_HANDLED;
+}
+
+static inline void enic_queue_wq_skb_cont(struct enic *enic,
+	struct vnic_wq *wq, struct sk_buff *skb,
+	unsigned int len_left)
+{
+	skb_frag_t *frag;
+
+	/* Queue additional data fragments */
+	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
+		len_left -= frag->size;
+		enic_queue_wq_desc_cont(wq, skb,
+			pci_map_page(enic->pdev, frag->page,
+				frag->page_offset, frag->size,
+				PCI_DMA_TODEVICE),
+			frag->size,
+			(len_left == 0));	/* EOP? */
+	}
+}
+
+static inline void enic_queue_wq_skb_vlan(struct enic *enic,
+	struct vnic_wq *wq, struct sk_buff *skb,
+	int vlan_tag_insert, unsigned int vlan_tag)
+{
+	unsigned int head_len = skb_headlen(skb);
+	unsigned int len_left = skb->len - head_len;
+	int eop = (len_left == 0);
+
+	/* Queue the main skb fragment */
+	enic_queue_wq_desc(wq, skb,
+		pci_map_single(enic->pdev, skb->data,
+			head_len, PCI_DMA_TODEVICE),
+		head_len,
+		vlan_tag_insert, vlan_tag,
+		eop);
+
+	if (!eop)
+		enic_queue_wq_skb_cont(enic, wq, skb, len_left);
+}
+
+static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
+	struct vnic_wq *wq, struct sk_buff *skb,
+	int vlan_tag_insert, unsigned int vlan_tag)
+{
+	unsigned int head_len = skb_headlen(skb);
+	unsigned int len_left = skb->len - head_len;
+	unsigned int hdr_len = skb_transport_offset(skb);
+	unsigned int csum_offset = hdr_len + skb->csum_offset;
+	int eop = (len_left == 0);
+
+	/* Queue the main skb fragment */
+	enic_queue_wq_desc_csum_l4(wq, skb,
+		pci_map_single(enic->pdev, skb->data,
+			head_len, PCI_DMA_TODEVICE),
+		head_len,
+		csum_offset,
+		hdr_len,
+		vlan_tag_insert, vlan_tag,
+		eop);
+
+	if (!eop)
+		enic_queue_wq_skb_cont(enic, wq, skb, len_left);
+}
+
+static inline void enic_queue_wq_skb_tso(struct enic *enic,
+	struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
+	int vlan_tag_insert, unsigned int vlan_tag)
+{
+	unsigned int head_len = skb_headlen(skb);
+	unsigned int len_left = skb->len - head_len;
+	unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+	int eop = (len_left == 0);
+
+	/* Preload TCP csum field with IP pseudo hdr calculated
+	 * with IP length set to zero.  HW will later add in length
+	 * to each TCP segment resulting from the TSO.
+	 */
+
+	if (skb->protocol == __constant_htons(ETH_P_IP)) {
+		ip_hdr(skb)->check = 0;
+		tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+			ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+	} else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
+		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+			&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+	}
+
+	/* Queue the main skb fragment */
+	enic_queue_wq_desc_tso(wq, skb,
+		pci_map_single(enic->pdev, skb->data,
+			head_len, PCI_DMA_TODEVICE),
+		head_len,
+		mss, hdr_len,
+		vlan_tag_insert, vlan_tag,
+		eop);
+
+	if (!eop)
+		enic_queue_wq_skb_cont(enic, wq, skb, len_left);
+}
+
+static inline void enic_queue_wq_skb(struct enic *enic,
+	struct vnic_wq *wq, struct sk_buff *skb)
+{
+	unsigned int mss = skb_shinfo(skb)->gso_size;
+	unsigned int vlan_tag = 0;
+	int vlan_tag_insert = 0;
+
+	if (enic->vlan_group && vlan_tx_tag_present(skb)) {
+		/* VLAN tag from trunking driver */
+		vlan_tag_insert = 1;
+		vlan_tag = vlan_tx_tag_get(skb);
+	}
+
+	if (mss)
+		enic_queue_wq_skb_tso(enic, wq, skb, mss,
+			vlan_tag_insert, vlan_tag);
+	else if	(skb->ip_summed == CHECKSUM_PARTIAL)
+		enic_queue_wq_skb_csum_l4(enic, wq, skb,
+			vlan_tag_insert, vlan_tag);
+	else
+		enic_queue_wq_skb_vlan(enic, wq, skb,
+			vlan_tag_insert, vlan_tag);
+}
+
+/* netif_tx_lock held, process context with BHs disabled */
+static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	struct vnic_wq *wq = &enic->wq[0];
+	unsigned long flags;
+
+	if (skb->len <= 0) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
+	 * which is very likely.  In the off chance it's going to take
+	 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
+	 */
+
+	if (skb_shinfo(skb)->gso_size == 0 &&
+	    skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
+	    skb_linearize(skb)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	spin_lock_irqsave(&enic->wq_lock[0], flags);
+
+	if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + 1) {
+		netif_stop_queue(netdev);
+		/* This is a hard error, log it */
+		printk(KERN_ERR PFX "%s: BUG! Tx ring full when "
+			"queue awake!\n", netdev->name);
+		spin_unlock_irqrestore(&enic->wq_lock[0], flags);
+		return NETDEV_TX_BUSY;
+	}
+
+	enic_queue_wq_skb(enic, wq, skb);
+
+	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1)
+		netif_stop_queue(netdev);
+
+	netdev->trans_start = jiffies;
+
+	spin_unlock_irqrestore(&enic->wq_lock[0], flags);
+
+	return NETDEV_TX_OK;
+}
+
+/* dev_base_lock rwlock held, nominally process context */
+static struct net_device_stats *enic_get_stats(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &netdev->stats;
+	struct vnic_stats *stats;
+
+	spin_lock(&enic->devcmd_lock);
+	vnic_dev_stats_dump(enic->vdev, &stats);
+	spin_unlock(&enic->devcmd_lock);
+
+	net_stats->tx_packets = stats->tx.tx_frames_ok;
+	net_stats->tx_bytes = stats->tx.tx_bytes_ok;
+	net_stats->tx_errors = stats->tx.tx_errors;
+	net_stats->tx_dropped = stats->tx.tx_drops;
+
+	net_stats->rx_packets = stats->rx.rx_frames_ok;
+	net_stats->rx_bytes = stats->rx.rx_bytes_ok;
+	net_stats->rx_errors = stats->rx.rx_errors;
+	net_stats->multicast = stats->rx.rx_multicast_frames_ok;
+	net_stats->rx_crc_errors = stats->rx.rx_crc_errors;
+	net_stats->rx_dropped = stats->rx.rx_no_bufs;
+
+	return net_stats;
+}
+
+static void enic_reset_mcaddrs(struct enic *enic)
+{
+	enic->mc_count = 0;
+}
+
+static int enic_set_mac_addr(struct net_device *netdev, char *addr)
+{
+	if (!is_valid_ether_addr(addr))
+		return -EADDRNOTAVAIL;
+
+	memcpy(netdev->dev_addr, addr, netdev->addr_len);
+
+	return 0;
+}
+
+/* netif_tx_lock held, BHs disabled */
+static void enic_set_multicast_list(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	struct dev_mc_list *list = netdev->mc_list;
+	int directed = 1;
+	int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
+	int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
+	int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
+	int allmulti = (netdev->flags & IFF_ALLMULTI) ||
+	    (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
+	u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+	unsigned int mc_count = netdev->mc_count;
+	unsigned int i, j;
+
+	if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
+		mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
+
+	spin_lock(&enic->devcmd_lock);
+
+	vnic_dev_packet_filter(enic->vdev, directed,
+		multicast, broadcast, promisc, allmulti);
+
+	/* Is there an easier way?  Trying to minimize to
+	 * calls to add/del multicast addrs.  We keep the
+	 * addrs from the last call in enic->mc_addr and
+	 * look for changes to add/del.
+	 */
+
+	for (i = 0; list && i < mc_count; i++) {
+		memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN);
+		list = list->next;
+	}
+
+	for (i = 0; i < enic->mc_count; i++) {
+		for (j = 0; j < mc_count; j++)
+			if (compare_ether_addr(enic->mc_addr[i],
+				mc_addr[j]) == 0)
+				break;
+		if (j == mc_count)
+			enic_del_multicast_addr(enic, enic->mc_addr[i]);
+	}
+
+	for (i = 0; i < mc_count; i++) {
+		for (j = 0; j < enic->mc_count; j++)
+			if (compare_ether_addr(mc_addr[i],
+				enic->mc_addr[j]) == 0)
+				break;
+		if (j == enic->mc_count)
+			enic_add_multicast_addr(enic, mc_addr[i]);
+	}
+
+	/* Save the list to compare against next time
+	 */
+
+	for (i = 0; i < mc_count; i++)
+		memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
+
+	enic->mc_count = mc_count;
+
+	spin_unlock(&enic->devcmd_lock);
+}
+
+/* rtnl lock is held */
+static void enic_vlan_rx_register(struct net_device *netdev,
+	struct vlan_group *vlan_group)
+{
+	struct enic *enic = netdev_priv(netdev);
+	enic->vlan_group = vlan_group;
+}
+
+/* rtnl lock is held */
+static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	spin_lock(&enic->devcmd_lock);
+	enic_add_vlan(enic, vid);
+	spin_unlock(&enic->devcmd_lock);
+}
+
+/* rtnl lock is held */
+static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	spin_lock(&enic->devcmd_lock);
+	enic_del_vlan(enic, vid);
+	spin_unlock(&enic->devcmd_lock);
+}
+
+/* netif_tx_lock held, BHs disabled */
+static void enic_tx_timeout(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	schedule_work(&enic->reset);
+}
+
+static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+	struct enic *enic = vnic_dev_priv(rq->vdev);
+
+	if (!buf->os_buf)
+		return;
+
+	pci_unmap_single(enic->pdev, buf->dma_addr,
+		buf->len, PCI_DMA_FROMDEVICE);
+	dev_kfree_skb_any(buf->os_buf);
+}
+
+static inline struct sk_buff *enic_rq_alloc_skb(unsigned int size)
+{
+	struct sk_buff *skb;
+
+	skb = dev_alloc_skb(size + NET_IP_ALIGN);
+
+	if (skb)
+		skb_reserve(skb, NET_IP_ALIGN);
+
+	return skb;
+}
+
+static int enic_rq_alloc_buf(struct vnic_rq *rq)
+{
+	struct enic *enic = vnic_dev_priv(rq->vdev);
+	struct sk_buff *skb;
+	unsigned int len = enic->netdev->mtu + ETH_HLEN;
+	unsigned int os_buf_index = 0;
+	dma_addr_t dma_addr;
+
+	skb = enic_rq_alloc_skb(len);
+	if (!skb)
+		return -ENOMEM;
+
+	dma_addr = pci_map_single(enic->pdev, skb->data,
+		len, PCI_DMA_FROMDEVICE);
+
+	enic_queue_rq_desc(rq, skb, os_buf_index,
+		dma_addr, len);
+
+	return 0;
+}
+
+static int enic_get_skb_header(struct sk_buff *skb, void **iphdr,
+	void **tcph, u64 *hdr_flags, void *priv)
+{
+	struct cq_enet_rq_desc *cq_desc = priv;
+	unsigned int ip_len;
+	struct iphdr *iph;
+
+	u8 type, color, eop, sop, ingress_port, vlan_stripped;
+	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+	u8 packet_error;
+	u16 q_number, completed_index, bytes_written, vlan, checksum;
+	u32 rss_hash;
+
+	cq_enet_rq_desc_dec(cq_desc,
+		&type, &color, &q_number, &completed_index,
+		&ingress_port, &fcoe, &eop, &sop, &rss_type,
+		&csum_not_calc, &rss_hash, &bytes_written,
+		&packet_error, &vlan_stripped, &vlan, &checksum,
+		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
+		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
+		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
+		&fcs_ok);
+
+	if (!(ipv4 && tcp && !ipv4_fragment))
+		return -1;
+
+	skb_reset_network_header(skb);
+	iph = ip_hdr(skb);
+
+	ip_len = ip_hdrlen(skb);
+	skb_set_transport_header(skb, ip_len);
+
+	/* check if ip header and tcp header are complete */
+	if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
+		return -1;
+
+	*hdr_flags = LRO_IPV4 | LRO_TCP;
+	*tcph = tcp_hdr(skb);
+	*iphdr = iph;
+
+	return 0;
+}
+
+static void enic_rq_indicate_buf(struct vnic_rq *rq,
+	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+	int skipped, void *opaque)
+{
+	struct enic *enic = vnic_dev_priv(rq->vdev);
+	struct sk_buff *skb;
+
+	u8 type, color, eop, sop, ingress_port, vlan_stripped;
+	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+	u8 packet_error;
+	u16 q_number, completed_index, bytes_written, vlan, checksum;
+	u32 rss_hash;
+
+	if (skipped)
+		return;
+
+	skb = buf->os_buf;
+	prefetch(skb->data - NET_IP_ALIGN);
+	pci_unmap_single(enic->pdev, buf->dma_addr,
+		buf->len, PCI_DMA_FROMDEVICE);
+
+	cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
+		&type, &color, &q_number, &completed_index,
+		&ingress_port, &fcoe, &eop, &sop, &rss_type,
+		&csum_not_calc, &rss_hash, &bytes_written,
+		&packet_error, &vlan_stripped, &vlan, &checksum,
+		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
+		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
+		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
+		&fcs_ok);
+
+	if (packet_error) {
+
+		if (bytes_written > 0 && !fcs_ok) {
+			if (net_ratelimit())
+				printk(KERN_ERR PFX
+					"%s: packet error: bad FCS\n",
+					enic->netdev->name);
+		}
+
+		dev_kfree_skb_any(skb);
+
+		return;
+	}
+
+	if (eop && bytes_written > 0) {
+
+		/* Good receive
+		 */
+
+		skb_put(skb, bytes_written);
+		skb->protocol = eth_type_trans(skb, enic->netdev);
+
+		if (enic->csum_rx_enabled && !csum_not_calc) {
+			skb->csum = htons(checksum);
+			skb->ip_summed = CHECKSUM_COMPLETE;
+		}
+
+		skb->dev = enic->netdev;
+		enic->netdev->last_rx = jiffies;
+
+		if (enic->vlan_group && vlan_stripped) {
+
+			if (ENIC_SETTING(enic, LRO) && ipv4)
+				lro_vlan_hwaccel_receive_skb(&enic->lro_mgr,
+					skb, enic->vlan_group,
+					vlan, cq_desc);
+			else
+				vlan_hwaccel_receive_skb(skb,
+					enic->vlan_group, vlan);
+
+		} else {
+
+			if (ENIC_SETTING(enic, LRO) && ipv4)
+				lro_receive_skb(&enic->lro_mgr, skb, cq_desc);
+			else
+				netif_receive_skb(skb);
+
+		}
+
+	} else {
+
+		/* Buffer overflow
+		 */
+
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+	u8 type, u16 q_number, u16 completed_index, void *opaque)
+{
+	struct enic *enic = vnic_dev_priv(vdev);
+
+	vnic_rq_service(&enic->rq[q_number], cq_desc,
+		completed_index, VNIC_RQ_RETURN_DESC,
+		enic_rq_indicate_buf, opaque);
+
+	return 0;
+}
+
+static void enic_rq_drop_buf(struct vnic_rq *rq,
+	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+	int skipped, void *opaque)
+{
+	struct enic *enic = vnic_dev_priv(rq->vdev);
+	struct sk_buff *skb = buf->os_buf;
+
+	if (skipped)
+		return;
+
+	pci_unmap_single(enic->pdev, buf->dma_addr,
+		buf->len, PCI_DMA_FROMDEVICE);
+
+	dev_kfree_skb_any(skb);
+}
+
+static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+	u8 type, u16 q_number, u16 completed_index, void *opaque)
+{
+	struct enic *enic = vnic_dev_priv(vdev);
+
+	vnic_rq_service(&enic->rq[q_number], cq_desc,
+		completed_index, VNIC_RQ_RETURN_DESC,
+		enic_rq_drop_buf, opaque);
+
+	return 0;
+}
+
+static int enic_poll(struct napi_struct *napi, int budget)
+{
+	struct enic *enic = container_of(napi, struct enic, napi);
+	struct net_device *netdev = enic->netdev;
+	unsigned int rq_work_to_do = budget;
+	unsigned int wq_work_to_do = -1; /* no limit */
+	unsigned int  work_done, rq_work_done, wq_work_done;
+
+	/* Service RQ (first) and WQ
+	 */
+
+	rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
+		rq_work_to_do, enic_rq_service, NULL);
+
+	wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
+		wq_work_to_do, enic_wq_service, NULL);
+
+	/* Accumulate intr event credits for this polling
+	 * cycle.  An intr event is the completion of a
+	 * a WQ or RQ packet.
+	 */
+
+	work_done = rq_work_done + wq_work_done;
+
+	if (work_done > 0)
+		vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ],
+			work_done,
+			0 /* don't unmask intr */,
+			0 /* don't reset intr timer */);
+
+	if (rq_work_done > 0) {
+
+		/* Replenish RQ
+		 */
+
+		vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+
+	} else {
+
+		/* If no work done, flush all LROs and exit polling
+		 */
+
+		if (ENIC_SETTING(enic, LRO))
+			lro_flush_all(&enic->lro_mgr);
+
+		netif_rx_complete(netdev, napi);
+		vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
+	}
+
+	return rq_work_done;
+}
+
+static int enic_poll_msix(struct napi_struct *napi, int budget)
+{
+	struct enic *enic = container_of(napi, struct enic, napi);
+	struct net_device *netdev = enic->netdev;
+	unsigned int work_to_do = budget;
+	unsigned int work_done;
+
+	/* Service RQ
+	 */
+
+	work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
+		work_to_do, enic_rq_service, NULL);
+
+	if (work_done > 0) {
+
+		/* Replenish RQ
+		 */
+
+		vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+
+		/* Accumulate intr event credits for this polling
+		 * cycle.  An intr event is the completion of a
+		 * a WQ or RQ packet.
+		 */
+
+		vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
+			work_done,
+			0 /* don't unmask intr */,
+			0 /* don't reset intr timer */);
+	} else {
+
+		/* If no work done, flush all LROs and exit polling
+		 */
+
+		if (ENIC_SETTING(enic, LRO))
+			lro_flush_all(&enic->lro_mgr);
+
+		netif_rx_complete(netdev, napi);
+		vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
+	}
+
+	return work_done;
+}
+
+static void enic_notify_timer(unsigned long data)
+{
+	struct enic *enic = (struct enic *)data;
+
+	enic_notify_check(enic);
+
+	mod_timer(&enic->notify_timer,
+		round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
+}
+
+static void enic_free_intr(struct enic *enic)
+{
+	struct net_device *netdev = enic->netdev;
+	unsigned int i;
+
+	switch (vnic_dev_get_intr_mode(enic->vdev)) {
+	case VNIC_DEV_INTR_MODE_INTX:
+		free_irq(enic->pdev->irq, netdev);
+		break;
+	case VNIC_DEV_INTR_MODE_MSI:
+		free_irq(enic->pdev->irq, enic);
+		break;
+	case VNIC_DEV_INTR_MODE_MSIX:
+		for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
+			if (enic->msix[i].requested)
+				free_irq(enic->msix_entry[i].vector,
+					enic->msix[i].devid);
+		break;
+	default:
+		break;
+	}
+}
+
+static int enic_request_intr(struct enic *enic)
+{
+	struct net_device *netdev = enic->netdev;
+	unsigned int i;
+	int err = 0;
+
+	switch (vnic_dev_get_intr_mode(enic->vdev)) {
+
+	case VNIC_DEV_INTR_MODE_INTX:
+
+		err = request_irq(enic->pdev->irq, enic_isr_legacy,
+			IRQF_SHARED, netdev->name, netdev);
+		break;
+
+	case VNIC_DEV_INTR_MODE_MSI:
+
+		err = request_irq(enic->pdev->irq, enic_isr_msi,
+			0, netdev->name, enic);
+		break;
+
+	case VNIC_DEV_INTR_MODE_MSIX:
+
+		sprintf(enic->msix[ENIC_MSIX_RQ].devname,
+			"%.11s-rx-0", netdev->name);
+		enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq;
+		enic->msix[ENIC_MSIX_RQ].devid = enic;
+
+		sprintf(enic->msix[ENIC_MSIX_WQ].devname,
+			"%.11s-tx-0", netdev->name);
+		enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq;
+		enic->msix[ENIC_MSIX_WQ].devid = enic;
+
+		sprintf(enic->msix[ENIC_MSIX_ERR].devname,
+			"%.11s-err", netdev->name);
+		enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err;
+		enic->msix[ENIC_MSIX_ERR].devid = enic;
+
+		sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname,
+			"%.11s-notify", netdev->name);
+		enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify;
+		enic->msix[ENIC_MSIX_NOTIFY].devid = enic;
+
+		for (i = 0; i < ARRAY_SIZE(enic->msix); i++) {
+			err = request_irq(enic->msix_entry[i].vector,
+				enic->msix[i].isr, 0,
+				enic->msix[i].devname,
+				enic->msix[i].devid);
+			if (err) {
+				enic_free_intr(enic);
+				break;
+			}
+			enic->msix[i].requested = 1;
+		}
+
+		break;
+
+	default:
+		break;
+	}
+
+	return err;
+}
+
+static int enic_notify_set(struct enic *enic)
+{
+	int err;
+
+	switch (vnic_dev_get_intr_mode(enic->vdev)) {
+	case VNIC_DEV_INTR_MODE_INTX:
+		err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY);
+		break;
+	case VNIC_DEV_INTR_MODE_MSIX:
+		err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY);
+		break;
+	default:
+		err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
+		break;
+	}
+
+	return err;
+}
+
+static void enic_notify_timer_start(struct enic *enic)
+{
+	switch (vnic_dev_get_intr_mode(enic->vdev)) {
+	case VNIC_DEV_INTR_MODE_MSI:
+		mod_timer(&enic->notify_timer, jiffies);
+		break;
+	default:
+		/* Using intr for notification for INTx/MSI-X */
+		break;
+	};
+}
+
+/* rtnl lock is held, process context */
+static int enic_open(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	unsigned int i;
+	int err;
+
+	err = enic_request_intr(enic);
+	if (err) {
+		printk(KERN_ERR PFX "%s: Unable to request irq.\n",
+			netdev->name);
+		return err;
+	}
+
+	err = enic_notify_set(enic);
+	if (err) {
+		printk(KERN_ERR PFX
+			"%s: Failed to alloc notify buffer, aborting.\n",
+			netdev->name);
+		goto err_out_free_intr;
+	}
+
+	for (i = 0; i < enic->rq_count; i++) {
+		err = vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
+		if (err) {
+			printk(KERN_ERR PFX
+				"%s: Unable to alloc receive buffers.\n",
+				netdev->name);
+			goto err_out_notify_unset;
+		}
+	}
+
+	for (i = 0; i < enic->wq_count; i++)
+		vnic_wq_enable(&enic->wq[i]);
+	for (i = 0; i < enic->rq_count; i++)
+		vnic_rq_enable(&enic->rq[i]);
+
+	enic_add_station_addr(enic);
+	enic_set_multicast_list(netdev);
+
+	netif_wake_queue(netdev);
+	napi_enable(&enic->napi);
+	vnic_dev_enable(enic->vdev);
+
+	for (i = 0; i < enic->intr_count; i++)
+		vnic_intr_unmask(&enic->intr[i]);
+
+	enic_notify_timer_start(enic);
+
+	return 0;
+
+err_out_notify_unset:
+	vnic_dev_notify_unset(enic->vdev);
+err_out_free_intr:
+	enic_free_intr(enic);
+
+	return err;
+}
+
+/* rtnl lock is held, process context */
+static int enic_stop(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	unsigned int i;
+	int err;
+
+	del_timer_sync(&enic->notify_timer);
+
+	vnic_dev_disable(enic->vdev);
+	napi_disable(&enic->napi);
+	netif_stop_queue(netdev);
+
+	for (i = 0; i < enic->intr_count; i++)
+		vnic_intr_mask(&enic->intr[i]);
+
+	for (i = 0; i < enic->wq_count; i++) {
+		err = vnic_wq_disable(&enic->wq[i]);
+		if (err)
+			return err;
+	}
+	for (i = 0; i < enic->rq_count; i++) {
+		err = vnic_rq_disable(&enic->rq[i]);
+		if (err)
+			return err;
+	}
+
+	vnic_dev_notify_unset(enic->vdev);
+	enic_free_intr(enic);
+
+	(void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
+		-1, enic_rq_service_drop, NULL);
+	(void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
+		-1, enic_wq_service, NULL);
+
+	for (i = 0; i < enic->wq_count; i++)
+		vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
+	for (i = 0; i < enic->rq_count; i++)
+		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+	for (i = 0; i < enic->cq_count; i++)
+		vnic_cq_clean(&enic->cq[i]);
+	for (i = 0; i < enic->intr_count; i++)
+		vnic_intr_clean(&enic->intr[i]);
+
+	return 0;
+}
+
+static int enic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct enic *enic = netdev_priv(netdev);
+	int running = netif_running(netdev);
+
+	if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
+		return -EINVAL;
+
+	if (running)
+		enic_stop(netdev);
+
+	netdev->mtu = new_mtu;
+
+	if (netdev->mtu > enic->port_mtu)
+		printk(KERN_WARNING PFX
+			"%s: interface MTU (%d) set higher "
+			"than port MTU (%d)\n",
+			netdev->name, netdev->mtu, enic->port_mtu);
+
+	if (running)
+		enic_open(netdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void enic_poll_controller(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	struct vnic_dev *vdev = enic->vdev;
+
+	switch (vnic_dev_get_intr_mode(vdev)) {
+	case VNIC_DEV_INTR_MODE_MSIX:
+		enic_isr_msix_rq(enic->pdev->irq, enic);
+		enic_isr_msix_wq(enic->pdev->irq, enic);
+		break;
+	case VNIC_DEV_INTR_MODE_MSI:
+		enic_isr_msi(enic->pdev->irq, enic);
+		break;
+	case VNIC_DEV_INTR_MODE_INTX:
+		enic_isr_legacy(enic->pdev->irq, netdev);
+		break;
+	default:
+		break;
+	}
+}
+#endif
+
+static int enic_dev_wait(struct vnic_dev *vdev,
+	int (*start)(struct vnic_dev *, int),
+	int (*finished)(struct vnic_dev *, int *),
+	int arg)
+{
+	unsigned long time;
+	int done;
+	int err;
+
+	BUG_ON(in_interrupt());
+
+	err = start(vdev, arg);
+	if (err)
+		return err;
+
+	/* Wait for func to complete...2 seconds max
+	 */
+
+	time = jiffies + (HZ * 2);
+	do {
+
+		err = finished(vdev, &done);
+		if (err)
+			return err;
+
+		if (done)
+			return 0;
+
+		schedule_timeout_uninterruptible(HZ / 10);
+
+	} while (time_after(time, jiffies));
+
+	return -ETIMEDOUT;
+}
+
+static int enic_dev_open(struct enic *enic)
+{
+	int err;
+
+	err = enic_dev_wait(enic->vdev, vnic_dev_open,
+		vnic_dev_open_done, 0);
+	if (err)
+		printk(KERN_ERR PFX
+			"vNIC device open failed, err %d.\n", err);
+
+	return err;
+}
+
+static int enic_dev_soft_reset(struct enic *enic)
+{
+	int err;
+
+	err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
+		vnic_dev_soft_reset_done, 0);
+	if (err)
+		printk(KERN_ERR PFX
+			"vNIC soft reset failed, err %d.\n", err);
+
+	return err;
+}
+
+static void enic_reset(struct work_struct *work)
+{
+	struct enic *enic = container_of(work, struct enic, reset);
+
+	if (!netif_running(enic->netdev))
+		return;
+
+	rtnl_lock();
+
+	spin_lock(&enic->devcmd_lock);
+	vnic_dev_hang_notify(enic->vdev);
+	spin_unlock(&enic->devcmd_lock);
+
+	enic_stop(enic->netdev);
+	enic_dev_soft_reset(enic);
+	enic_reset_mcaddrs(enic);
+	enic_init_vnic_resources(enic);
+	enic_open(enic->netdev);
+
+	rtnl_unlock();
+}
+
+static int enic_set_intr_mode(struct enic *enic)
+{
+	unsigned int n = ARRAY_SIZE(enic->rq);
+	unsigned int m = ARRAY_SIZE(enic->wq);
+	unsigned int i;
+
+	/* Set interrupt mode (INTx, MSI, MSI-X) depending
+	 * system capabilities.
+	 *
+	 * Try MSI-X first
+	 *
+	 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
+	 * (the second to last INTR is used for WQ/RQ errors)
+	 * (the last INTR is used for notifications)
+	 */
+
+	BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
+	for (i = 0; i < n + m + 2; i++)
+		enic->msix_entry[i].entry = i;
+
+	if (enic->config.intr_mode < 1 &&
+	    enic->rq_count >= n &&
+	    enic->wq_count >= m &&
+	    enic->cq_count >= n + m &&
+	    enic->intr_count >= n + m + 2 &&
+	    !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
+
+		enic->rq_count = n;
+		enic->wq_count = m;
+		enic->cq_count = n + m;
+		enic->intr_count = n + m + 2;
+
+		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);
+
+		return 0;
+	}
+
+	/* Next try MSI
+	 *
+	 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
+	 */
+
+	if (enic->config.intr_mode < 2 &&
+	    enic->rq_count >= 1 &&
+	    enic->wq_count >= 1 &&
+	    enic->cq_count >= 2 &&
+	    enic->intr_count >= 1 &&
+	    !pci_enable_msi(enic->pdev)) {
+
+		enic->rq_count = 1;
+		enic->wq_count = 1;
+		enic->cq_count = 2;
+		enic->intr_count = 1;
+
+		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
+
+		return 0;
+	}
+
+	/* Next try INTx
+	 *
+	 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
+	 * (the first INTR is used for WQ/RQ)
+	 * (the second INTR is used for WQ/RQ errors)
+	 * (the last INTR is used for notifications)
+	 */
+
+	if (enic->config.intr_mode < 3 &&
+	    enic->rq_count >= 1 &&
+	    enic->wq_count >= 1 &&
+	    enic->cq_count >= 2 &&
+	    enic->intr_count >= 3) {
+
+		enic->rq_count = 1;
+		enic->wq_count = 1;
+		enic->cq_count = 2;
+		enic->intr_count = 3;
+
+		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
+
+		return 0;
+	}
+
+	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+
+	return -EINVAL;
+}
+
+static void enic_clear_intr_mode(struct enic *enic)
+{
+	switch (vnic_dev_get_intr_mode(enic->vdev)) {
+	case VNIC_DEV_INTR_MODE_MSIX:
+		pci_disable_msix(enic->pdev);
+		break;
+	case VNIC_DEV_INTR_MODE_MSI:
+		pci_disable_msi(enic->pdev);
+		break;
+	default:
+		break;
+	}
+
+	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+}
+
+static void enic_iounmap(struct enic *enic)
+{
+	if (enic->bar0.vaddr)
+		iounmap(enic->bar0.vaddr);
+}
+
+static int __devinit enic_probe(struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct enic *enic;
+	int using_dac = 0;
+	unsigned int i;
+	int err;
+
+	const u8 rss_default_cpu = 0;
+	const u8 rss_hash_type = 0;
+	const u8 rss_hash_bits = 0;
+	const u8 rss_base_cpu = 0;
+	const u8 rss_enable = 0;
+	const u8 tso_ipid_split_en = 0;
+	const u8 ig_vlan_strip_en = 1;
+
+	/* Allocate net device structure and initialize.  Private
+	 * instance data is initialized to zero.
+	 */
+
+	netdev = alloc_etherdev(sizeof(struct enic));
+	if (!netdev) {
+		printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+		return -ENOMEM;
+	}
+
+	pci_set_drvdata(pdev, netdev);
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	enic = netdev_priv(netdev);
+	enic->netdev = netdev;
+	enic->pdev = pdev;
+
+	/* Setup PCI resources
+	 */
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR PFX
+			"Cannot enable PCI device, aborting.\n");
+		goto err_out_free_netdev;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		printk(KERN_ERR PFX
+			"Cannot request PCI regions, aborting.\n");
+		goto err_out_disable_device;
+	}
+
+	pci_set_master(pdev);
+
+	/* Query PCI controller on system for DMA addressing
+	 * limitation for the device.  Try 40-bit first, and
+	 * fail to 32-bit.
+	 */
+
+	err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
+	if (err) {
+		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		if (err) {
+			printk(KERN_ERR PFX
+				"No usable DMA configuration, aborting.\n");
+			goto err_out_release_regions;
+		}
+		err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+		if (err) {
+			printk(KERN_ERR PFX
+				"Unable to obtain 32-bit DMA "
+				"for consistent allocations, aborting.\n");
+			goto err_out_release_regions;
+		}
+	} else {
+		err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
+		if (err) {
+			printk(KERN_ERR PFX
+				"Unable to obtain 40-bit DMA "
+				"for consistent allocations, aborting.\n");
+			goto err_out_release_regions;
+		}
+		using_dac = 1;
+	}
+
+	/* Map vNIC resources from BAR0
+	 */
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		printk(KERN_ERR PFX
+			"BAR0 not memory-map'able, aborting.\n");
+		err = -ENODEV;
+		goto err_out_release_regions;
+	}
+
+	enic->bar0.vaddr = pci_iomap(pdev, 0, enic->bar0.len);
+	enic->bar0.bus_addr = pci_resource_start(pdev, 0);
+	enic->bar0.len = pci_resource_len(pdev, 0);
+
+	if (!enic->bar0.vaddr) {
+		printk(KERN_ERR PFX
+			"Cannot memory-map BAR0 res hdr, aborting.\n");
+		err = -ENODEV;
+		goto err_out_release_regions;
+	}
+
+	/* Register vNIC device
+	 */
+
+	enic->vdev = vnic_dev_register(NULL, enic, pdev, &enic->bar0);
+	if (!enic->vdev) {
+		printk(KERN_ERR PFX
+			"vNIC registration failed, aborting.\n");
+		err = -ENODEV;
+		goto err_out_iounmap;
+	}
+
+	/* Issue device open to get device in known state
+	 */
+
+	err = enic_dev_open(enic);
+	if (err) {
+		printk(KERN_ERR PFX
+			"vNIC dev open failed, aborting.\n");
+		goto err_out_vnic_unregister;
+	}
+
+	/* Issue device init to initialize the vnic-to-switch link.
+	 * We'll start with carrier off and wait for link UP
+	 * notification later to turn on carrier.  We don't need
+	 * to wait here for the vnic-to-switch link initialization
+	 * to complete; link UP notification is the indication that
+	 * the process is complete.
+	 */
+
+	netif_carrier_off(netdev);
+
+	err = vnic_dev_init(enic->vdev, 0);
+	if (err) {
+		printk(KERN_ERR PFX
+			"vNIC dev init failed, aborting.\n");
+		goto err_out_dev_close;
+	}
+
+	/* Get vNIC configuration
+	 */
+
+	err = enic_get_vnic_config(enic);
+	if (err) {
+		printk(KERN_ERR PFX
+			"Get vNIC configuration failed, aborting.\n");
+		goto err_out_dev_close;
+	}
+
+	/* Get available resource counts
+	*/
+
+	enic_get_res_counts(enic);
+
+	/* Set interrupt mode based on resource counts and system
+	 * capabilities
+	*/
+
+	err = enic_set_intr_mode(enic);
+	if (err) {
+		printk(KERN_ERR PFX
+			"Failed to set intr mode, aborting.\n");
+		goto err_out_dev_close;
+	}
+
+	/* Allocate and configure vNIC resources
+	 */
+
+	err = enic_alloc_vnic_resources(enic);
+	if (err) {
+		printk(KERN_ERR PFX
+			"Failed to alloc vNIC resources, aborting.\n");
+		goto err_out_free_vnic_resources;
+	}
+
+	enic_init_vnic_resources(enic);
+
+	/* Enable VLAN tag stripping.  RSS not enabled (yet).
+	 */
+
+	err = enic_set_nic_cfg(enic,
+		rss_default_cpu, rss_hash_type,
+		rss_hash_bits, rss_base_cpu,
+		rss_enable, tso_ipid_split_en,
+		ig_vlan_strip_en);
+	if (err) {
+		printk(KERN_ERR PFX
+			"Failed to config nic, aborting.\n");
+		goto err_out_free_vnic_resources;
+	}
+
+	/* Setup notification timer, HW reset task, and locks
+	 */
+
+	init_timer(&enic->notify_timer);
+	enic->notify_timer.function = enic_notify_timer;
+	enic->notify_timer.data = (unsigned long)enic;
+
+	INIT_WORK(&enic->reset, enic_reset);
+
+	for (i = 0; i < enic->wq_count; i++)
+		spin_lock_init(&enic->wq_lock[i]);
+
+	spin_lock_init(&enic->devcmd_lock);
+
+	/* Register net device
+	 */
+
+	enic->port_mtu = enic->config.mtu;
+	(void)enic_change_mtu(netdev, enic->port_mtu);
+
+	err = enic_set_mac_addr(netdev, enic->mac_addr);
+	if (err) {
+		printk(KERN_ERR PFX
+			"Invalid MAC address, aborting.\n");
+		goto err_out_free_vnic_resources;
+	}
+
+	netdev->open = enic_open;
+	netdev->stop = enic_stop;
+	netdev->hard_start_xmit = enic_hard_start_xmit;
+	netdev->get_stats = enic_get_stats;
+	netdev->set_multicast_list = enic_set_multicast_list;
+	netdev->change_mtu = enic_change_mtu;
+	netdev->vlan_rx_register = enic_vlan_rx_register;
+	netdev->vlan_rx_add_vid = enic_vlan_rx_add_vid;
+	netdev->vlan_rx_kill_vid = enic_vlan_rx_kill_vid;
+	netdev->tx_timeout = enic_tx_timeout;
+	netdev->watchdog_timeo = 2 * HZ;
+	netdev->ethtool_ops = &enic_ethtool_ops;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	netdev->poll_controller = enic_poll_controller;
+#endif
+
+	switch (vnic_dev_get_intr_mode(enic->vdev)) {
+	default:
+		netif_napi_add(netdev, &enic->napi, enic_poll, 64);
+		break;
+	case VNIC_DEV_INTR_MODE_MSIX:
+		netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64);
+		break;
+	}
+
+	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+	if (ENIC_SETTING(enic, TXCSUM))
+		netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+	if (ENIC_SETTING(enic, TSO))
+		netdev->features |= NETIF_F_TSO |
+			NETIF_F_TSO6 | NETIF_F_TSO_ECN;
+	if (using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+
+	enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
+
+	if (ENIC_SETTING(enic, LRO)) {
+		enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR;
+		enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC;
+		enic->lro_mgr.lro_arr = enic->lro_desc;
+		enic->lro_mgr.get_skb_header = enic_get_skb_header;
+		enic->lro_mgr.features	= LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
+		enic->lro_mgr.dev = netdev;
+		enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE;
+		enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+	}
+
+	err = register_netdev(netdev);
+	if (err) {
+		printk(KERN_ERR PFX
+			"Cannot register net device, aborting.\n");
+		goto err_out_free_vnic_resources;
+	}
+
+	return 0;
+
+err_out_free_vnic_resources:
+	enic_free_vnic_resources(enic);
+err_out_dev_close:
+	vnic_dev_close(enic->vdev);
+err_out_vnic_unregister:
+	enic_clear_intr_mode(enic);
+	vnic_dev_unregister(enic->vdev);
+err_out_iounmap:
+	enic_iounmap(enic);
+err_out_release_regions:
+	pci_release_regions(pdev);
+err_out_disable_device:
+	pci_disable_device(pdev);
+err_out_free_netdev:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+
+	return err;
+}
+
+static void __devexit enic_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+
+	if (netdev) {
+		struct enic *enic = netdev_priv(netdev);
+
+		flush_scheduled_work();
+		unregister_netdev(netdev);
+		enic_free_vnic_resources(enic);
+		vnic_dev_close(enic->vdev);
+		enic_clear_intr_mode(enic);
+		vnic_dev_unregister(enic->vdev);
+		enic_iounmap(enic);
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		pci_set_drvdata(pdev, NULL);
+		free_netdev(netdev);
+	}
+}
+
+static struct pci_driver enic_driver = {
+	.name = DRV_NAME,
+	.id_table = enic_id_table,
+	.probe = enic_probe,
+	.remove = __devexit_p(enic_remove),
+};
+
+static int __init enic_init_module(void)
+{
+	printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+
+	return pci_register_driver(&enic_driver);
+}
+
+static void __exit enic_cleanup_module(void)
+{
+	pci_unregister_driver(&enic_driver);
+}
+
+module_init(enic_init_module);
+module_exit(enic_cleanup_module);
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
new file mode 100644
index 0000000..95184b9
--- /dev/null
+++ b/drivers/net/enic/enic_res.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_resource.h"
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "vnic_rss.h"
+#include "enic_res.h"
+#include "enic.h"
+
+int enic_get_vnic_config(struct enic *enic)
+{
+	struct vnic_enet_config *c = &enic->config;
+	int err;
+
+	err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
+	if (err) {
+		printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err);
+		return err;
+	}
+
+#define GET_CONFIG(m) \
+	do { \
+		err = vnic_dev_spec(enic->vdev, \
+			offsetof(struct vnic_enet_config, m), \
+			sizeof(c->m), &c->m); \
+		if (err) { \
+			printk(KERN_ERR PFX \
+				"Error getting %s, %d\n", #m, err); \
+			return err; \
+		} \
+	} while (0)
+
+	GET_CONFIG(flags);
+	GET_CONFIG(wq_desc_count);
+	GET_CONFIG(rq_desc_count);
+	GET_CONFIG(mtu);
+	GET_CONFIG(intr_timer);
+	GET_CONFIG(intr_timer_type);
+	GET_CONFIG(intr_mode);
+
+	c->wq_desc_count =
+		min_t(u32, ENIC_MAX_WQ_DESCS,
+		max_t(u32, ENIC_MIN_WQ_DESCS,
+		c->wq_desc_count));
+	c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
+
+	c->rq_desc_count =
+		min_t(u32, ENIC_MAX_RQ_DESCS,
+		max_t(u32, ENIC_MIN_RQ_DESCS,
+		c->rq_desc_count));
+	c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
+
+	if (c->mtu == 0)
+		c->mtu = 1500;
+	c->mtu = min_t(u16, ENIC_MAX_MTU,
+		max_t(u16, ENIC_MIN_MTU,
+		c->mtu));
+
+	c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
+
+	printk(KERN_INFO PFX "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
+		"wq/rq %d/%d\n",
+		enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
+		enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
+		c->wq_desc_count, c->rq_desc_count);
+	printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
+		"intr timer %d\n",
+		c->mtu, ENIC_SETTING(enic, TXCSUM),
+		ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
+		ENIC_SETTING(enic, LRO), c->intr_timer);
+
+	return 0;
+}
+
+void enic_add_station_addr(struct enic *enic)
+{
+	vnic_dev_add_addr(enic->vdev, enic->mac_addr);
+}
+
+void enic_add_multicast_addr(struct enic *enic, u8 *addr)
+{
+	vnic_dev_add_addr(enic->vdev, addr);
+}
+
+void enic_del_multicast_addr(struct enic *enic, u8 *addr)
+{
+	vnic_dev_del_addr(enic->vdev, addr);
+}
+
+void enic_add_vlan(struct enic *enic, u16 vlanid)
+{
+	u64 a0 = vlanid, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR PFX "Can't add vlan id, %d\n", err);
+}
+
+void enic_del_vlan(struct enic *enic, u16 vlanid)
+{
+	u64 a0 = vlanid, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR PFX "Can't delete vlan id, %d\n", err);
+}
+
+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
+	u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
+	u8 ig_vlan_strip_en)
+{
+	u64 a0, a1;
+	u32 nic_cfg;
+	int wait = 1000;
+
+	vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
+		rss_hash_type, rss_hash_bits, rss_base_cpu,
+		rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
+
+	a0 = nic_cfg;
+	a1 = 0;
+
+	return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
+}
+
+void enic_free_vnic_resources(struct enic *enic)
+{
+	unsigned int i;
+
+	for (i = 0; i < enic->wq_count; i++)
+		vnic_wq_free(&enic->wq[i]);
+	for (i = 0; i < enic->rq_count; i++)
+		vnic_rq_free(&enic->rq[i]);
+	for (i = 0; i < enic->cq_count; i++)
+		vnic_cq_free(&enic->cq[i]);
+	for (i = 0; i < enic->intr_count; i++)
+		vnic_intr_free(&enic->intr[i]);
+}
+
+void enic_get_res_counts(struct enic *enic)
+{
+	enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
+	enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
+	enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
+	enic->intr_count = vnic_dev_get_res_count(enic->vdev,
+		RES_TYPE_INTR_CTRL);
+
+	printk(KERN_INFO PFX "vNIC resources avail: "
+		"wq %d rq %d cq %d intr %d\n",
+		enic->wq_count, enic->rq_count,
+		enic->cq_count, enic->intr_count);
+}
+
+void enic_init_vnic_resources(struct enic *enic)
+{
+	enum vnic_dev_intr_mode intr_mode;
+	unsigned int mask_on_assertion;
+	unsigned int interrupt_offset;
+	unsigned int error_interrupt_enable;
+	unsigned int error_interrupt_offset;
+	unsigned int cq_index;
+	unsigned int i;
+
+	intr_mode = vnic_dev_get_intr_mode(enic->vdev);
+
+	/* Init RQ/WQ resources.
+	 *
+	 * RQ[0 - n-1] point to CQ[0 - n-1]
+	 * WQ[0 - m-1] point to CQ[n - n+m-1]
+	 *
+	 * Error interrupt is not enabled for MSI.
+	 */
+
+	switch (intr_mode) {
+	case VNIC_DEV_INTR_MODE_INTX:
+	case VNIC_DEV_INTR_MODE_MSIX:
+		error_interrupt_enable = 1;
+		error_interrupt_offset = enic->intr_count - 2;
+		break;
+	default:
+		error_interrupt_enable = 0;
+		error_interrupt_offset = 0;
+		break;
+	}
+
+	for (i = 0; i < enic->rq_count; i++) {
+		cq_index = i;
+		vnic_rq_init(&enic->rq[i],
+			cq_index,
+			error_interrupt_enable,
+			error_interrupt_offset);
+	}
+
+	for (i = 0; i < enic->wq_count; i++) {
+		cq_index = enic->rq_count + i;
+		vnic_wq_init(&enic->wq[i],
+			cq_index,
+			error_interrupt_enable,
+			error_interrupt_offset);
+	}
+
+	/* Init CQ resources
+	 *
+	 * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
+	 * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
+	 */
+
+	for (i = 0; i < enic->cq_count; i++) {
+
+		switch (intr_mode) {
+		case VNIC_DEV_INTR_MODE_MSIX:
+			interrupt_offset = i;
+			break;
+		default:
+			interrupt_offset = 0;
+			break;
+		}
+
+		vnic_cq_init(&enic->cq[i],
+			0 /* flow_control_enable */,
+			1 /* color_enable */,
+			0 /* cq_head */,
+			0 /* cq_tail */,
+			1 /* cq_tail_color */,
+			1 /* interrupt_enable */,
+			1 /* cq_entry_enable */,
+			0 /* cq_message_enable */,
+			interrupt_offset,
+			0 /* cq_message_addr */);
+	}
+
+	/* Init INTR resources
+	 *
+	 * mask_on_assertion is not used for INTx due to the level-
+	 * triggered nature of INTx
+	 */
+
+	switch (intr_mode) {
+	case VNIC_DEV_INTR_MODE_MSI:
+	case VNIC_DEV_INTR_MODE_MSIX:
+		mask_on_assertion = 1;
+		break;
+	default:
+		mask_on_assertion = 0;
+		break;
+	}
+
+	for (i = 0; i < enic->intr_count; i++) {
+		vnic_intr_init(&enic->intr[i],
+			enic->config.intr_timer,
+			enic->config.intr_timer_type,
+			mask_on_assertion);
+	}
+
+	/* Clear LIF stats
+	 */
+
+	vnic_dev_stats_clear(enic->vdev);
+}
+
+int enic_alloc_vnic_resources(struct enic *enic)
+{
+	enum vnic_dev_intr_mode intr_mode;
+	unsigned int i;
+	int err;
+
+	intr_mode = vnic_dev_get_intr_mode(enic->vdev);
+
+	printk(KERN_INFO PFX "vNIC resources used:  "
+		"wq %d rq %d cq %d intr %d intr mode %s\n",
+		enic->wq_count, enic->rq_count,
+		enic->cq_count, enic->intr_count,
+		intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
+		intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
+		intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
+		"unknown"
+		);
+
+	/* Allocate queue resources
+	 */
+
+	for (i = 0; i < enic->wq_count; i++) {
+		err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
+			enic->config.wq_desc_count,
+			sizeof(struct wq_enet_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	for (i = 0; i < enic->rq_count; i++) {
+		err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
+			enic->config.rq_desc_count,
+			sizeof(struct rq_enet_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	for (i = 0; i < enic->cq_count; i++) {
+		if (i < enic->rq_count)
+			err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
+				enic->config.rq_desc_count,
+				sizeof(struct cq_enet_rq_desc));
+		else
+			err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
+				enic->config.wq_desc_count,
+				sizeof(struct cq_enet_wq_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	for (i = 0; i < enic->intr_count; i++) {
+		err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	/* Hook remaining resource
+	 */
+
+	enic->legacy_pba = vnic_dev_get_res(enic->vdev,
+		RES_TYPE_INTR_PBA_LEGACY, 0);
+	if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
+		printk(KERN_ERR PFX "Failed to hook legacy pba resource\n");
+		err = -ENODEV;
+		goto err_out_cleanup;
+	}
+
+	return 0;
+
+err_out_cleanup:
+	enic_free_vnic_resources(enic);
+
+	return err;
+}
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
new file mode 100644
index 0000000..68534a2
--- /dev/null
+++ b/drivers/net/enic/enic_res.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_RES_H_
+#define _ENIC_RES_H_
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+
+#define ENIC_MIN_WQ_DESCS		64
+#define ENIC_MAX_WQ_DESCS		4096
+#define ENIC_MIN_RQ_DESCS		64
+#define ENIC_MAX_RQ_DESCS		4096
+
+#define ENIC_MIN_MTU			576  /* minimum for IPv4 */
+#define ENIC_MAX_MTU			9000
+
+#define ENIC_MULTICAST_PERFECT_FILTERS	32
+
+#define ENIC_NON_TSO_MAX_DESC		16
+
+#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
+
+static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr, unsigned int len,
+	unsigned int mss_or_csum_offset, unsigned int hdr_len,
+	int vlan_tag_insert, unsigned int vlan_tag,
+	int offload_mode, int cq_entry, int sop, int eop)
+{
+	struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+
+	wq_enet_desc_enc(desc,
+		(u64)dma_addr | VNIC_PADDR_TARGET,
+		(u16)len,
+		(u16)mss_or_csum_offset,
+		(u16)hdr_len, (u8)offload_mode,
+		(u8)eop, (u8)cq_entry,
+		0, /* fcoe_encap */
+		(u8)vlan_tag_insert,
+		(u16)vlan_tag,
+		0 /* loopback */);
+
+	wmb();
+
+	vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
+}
+
+static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop)
+{
+	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+		0, 0, 0, 0, 0,
+		eop, 0 /* !SOP */, eop);
+}
+
+static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
+	dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
+	unsigned int vlan_tag, int eop)
+{
+	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+		0, 0, vlan_tag_insert, vlan_tag,
+		WQ_ENET_OFFLOAD_MODE_CSUM,
+		eop, 1 /* SOP */, eop);
+}
+
+static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr, unsigned int len,
+	int ip_csum, int tcpudp_csum, int vlan_tag_insert,
+	unsigned int vlan_tag, int eop)
+{
+	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+		(ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
+		0, vlan_tag_insert, vlan_tag,
+		WQ_ENET_OFFLOAD_MODE_CSUM,
+		eop, 1 /* SOP */, eop);
+}
+
+static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr, unsigned int len,
+	unsigned int csum_offset, unsigned int hdr_len,
+	int vlan_tag_insert, unsigned int vlan_tag, int eop)
+{
+	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+		csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
+		WQ_ENET_OFFLOAD_MODE_CSUM_L4,
+		eop, 1 /* SOP */, eop);
+}
+
+static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr, unsigned int len,
+	unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
+	unsigned int vlan_tag, int eop)
+{
+	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+		mss, hdr_len, vlan_tag_insert, vlan_tag,
+		WQ_ENET_OFFLOAD_MODE_TSO,
+		eop, 1 /* SOP */, eop);
+}
+
+static inline void enic_queue_rq_desc(struct vnic_rq *rq,
+	void *os_buf, unsigned int os_buf_index,
+	dma_addr_t dma_addr, unsigned int len)
+{
+	struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+	u8 type = os_buf_index ?
+		RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
+
+	rq_enet_desc_enc(desc,
+		(u64)dma_addr | VNIC_PADDR_TARGET,
+		type, (u16)len);
+
+	wmb();
+
+	vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
+}
+
+struct enic;
+
+int enic_get_vnic_config(struct enic *);
+void enic_add_station_addr(struct enic *enic);
+void enic_add_multicast_addr(struct enic *enic, u8 *addr);
+void enic_del_multicast_addr(struct enic *enic, u8 *addr);
+void enic_add_vlan(struct enic *enic, u16 vlanid);
+void enic_del_vlan(struct enic *enic, u16 vlanid);
+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
+	u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
+	u8 ig_vlan_strip_en);
+void enic_get_res_counts(struct enic *enic);
+void enic_init_vnic_resources(struct enic *enic);
+int enic_alloc_vnic_resources(struct enic *);
+void enic_free_vnic_resources(struct enic *);
+
+#endif /* _ENIC_RES_H_ */
diff --git a/drivers/net/enic/rq_enet_desc.h b/drivers/net/enic/rq_enet_desc.h
new file mode 100644
index 0000000..a06e649
--- /dev/null
+++ b/drivers/net/enic/rq_enet_desc.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _RQ_ENET_DESC_H_
+#define _RQ_ENET_DESC_H_
+
+/* Ethernet receive queue descriptor: 16B */
+struct rq_enet_desc {
+	__le64 address;
+	__le16 length_type;
+	u8 reserved[6];
+};
+
+enum rq_enet_type_types {
+	RQ_ENET_TYPE_ONLY_SOP = 0,
+	RQ_ENET_TYPE_NOT_SOP = 1,
+	RQ_ENET_TYPE_RESV2 = 2,
+	RQ_ENET_TYPE_RESV3 = 3,
+};
+
+#define RQ_ENET_ADDR_BITS		64
+#define RQ_ENET_LEN_BITS		14
+#define RQ_ENET_LEN_MASK		((1 << RQ_ENET_LEN_BITS) - 1)
+#define RQ_ENET_TYPE_BITS		2
+#define RQ_ENET_TYPE_MASK		((1 << RQ_ENET_TYPE_BITS) - 1)
+
+static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
+	u64 address, u8 type, u16 length)
+{
+	desc->address = cpu_to_le64(address);
+	desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
+		((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
+}
+
+static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
+	u64 *address, u8 *type, u16 *length)
+{
+	*address = le64_to_cpu(desc->address);
+	*length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
+	*type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
+		RQ_ENET_TYPE_MASK);
+}
+
+#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/enic/vnic_cq.c
new file mode 100644
index 0000000..020ae6c
--- /dev/null
+++ b/drivers/net/enic/vnic_cq.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+void vnic_cq_free(struct vnic_cq *cq)
+{
+	vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
+
+	cq->ctrl = NULL;
+}
+
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	cq->index = index;
+	cq->vdev = vdev;
+
+	cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
+	if (!cq->ctrl) {
+		printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+	unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+	unsigned int cq_tail_color, unsigned int interrupt_enable,
+	unsigned int cq_entry_enable, unsigned int cq_message_enable,
+	unsigned int interrupt_offset, u64 cq_message_addr)
+{
+	u64 paddr;
+
+	paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &cq->ctrl->ring_base);
+	iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
+	iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
+	iowrite32(color_enable, &cq->ctrl->color_enable);
+	iowrite32(cq_head, &cq->ctrl->cq_head);
+	iowrite32(cq_tail, &cq->ctrl->cq_tail);
+	iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
+	iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
+	iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
+	iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
+	iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
+	writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
+}
+
+void vnic_cq_clean(struct vnic_cq *cq)
+{
+	cq->to_clean = 0;
+	cq->last_color = 0;
+
+	iowrite32(0, &cq->ctrl->cq_head);
+	iowrite32(0, &cq->ctrl->cq_tail);
+	iowrite32(1, &cq->ctrl->cq_tail_color);
+
+	vnic_dev_clear_desc_ring(&cq->ring);
+}
diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/enic/vnic_cq.h
new file mode 100644
index 0000000..114763c
--- /dev/null
+++ b/drivers/net/enic/vnic_cq.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_CQ_H_
+#define _VNIC_CQ_H_
+
+#include "cq_desc.h"
+#include "vnic_dev.h"
+
+/* Completion queue control */
+struct vnic_cq_ctrl {
+	u64 ring_base;			/* 0x00 */
+	u32 ring_size;			/* 0x08 */
+	u32 pad0;
+	u32 flow_control_enable;	/* 0x10 */
+	u32 pad1;
+	u32 color_enable;		/* 0x18 */
+	u32 pad2;
+	u32 cq_head;			/* 0x20 */
+	u32 pad3;
+	u32 cq_tail;			/* 0x28 */
+	u32 pad4;
+	u32 cq_tail_color;		/* 0x30 */
+	u32 pad5;
+	u32 interrupt_enable;		/* 0x38 */
+	u32 pad6;
+	u32 cq_entry_enable;		/* 0x40 */
+	u32 pad7;
+	u32 cq_message_enable;		/* 0x48 */
+	u32 pad8;
+	u32 interrupt_offset;		/* 0x50 */
+	u32 pad9;
+	u64 cq_message_addr;		/* 0x58 */
+	u32 pad10;
+};
+
+struct vnic_cq {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_cq_ctrl __iomem *ctrl;              /* memory-mapped */
+	struct vnic_dev_ring ring;
+	unsigned int to_clean;
+	unsigned int last_color;
+};
+
+static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
+	unsigned int work_to_do,
+	int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+	u8 type, u16 q_number, u16 completed_index, void *opaque),
+	void *opaque)
+{
+	struct cq_desc *cq_desc;
+	unsigned int work_done = 0;
+	u16 q_number, completed_index;
+	u8 type, color;
+
+	cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+		cq->ring.desc_size * cq->to_clean);
+	cq_desc_dec(cq_desc, &type, &color,
+		&q_number, &completed_index);
+
+	while (color != cq->last_color) {
+
+		if ((*q_service)(cq->vdev, cq_desc, type,
+			q_number, completed_index, opaque))
+			break;
+
+		cq->to_clean++;
+		if (cq->to_clean == cq->ring.desc_count) {
+			cq->to_clean = 0;
+			cq->last_color = cq->last_color ? 0 : 1;
+		}
+
+		cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+			cq->ring.desc_size * cq->to_clean);
+		cq_desc_dec(cq_desc, &type, &color,
+			&q_number, &completed_index);
+
+		work_done++;
+		if (work_done >= work_to_do)
+			break;
+	}
+
+	return work_done;
+}
+
+void vnic_cq_free(struct vnic_cq *cq);
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+	unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+	unsigned int cq_tail_color, unsigned int interrupt_enable,
+	unsigned int cq_entry_enable, unsigned int message_enable,
+	unsigned int interrupt_offset, u64 message_addr);
+void vnic_cq_clean(struct vnic_cq *cq);
+
+#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
new file mode 100644
index 0000000..4d104f5
--- /dev/null
+++ b/drivers/net/enic/vnic_dev.c
@@ -0,0 +1,674 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+#include "vnic_dev.h"
+#include "vnic_stats.h"
+
+struct vnic_res {
+	void __iomem *vaddr;
+	unsigned int count;
+};
+
+struct vnic_dev {
+	void *priv;
+	struct pci_dev *pdev;
+	struct vnic_res res[RES_TYPE_MAX];
+	enum vnic_dev_intr_mode intr_mode;
+	struct vnic_devcmd __iomem *devcmd;
+	struct vnic_devcmd_notify *notify;
+	struct vnic_devcmd_notify notify_copy;
+	dma_addr_t notify_pa;
+	u32 *linkstatus;
+	dma_addr_t linkstatus_pa;
+	struct vnic_stats *stats;
+	dma_addr_t stats_pa;
+	struct vnic_devcmd_fw_info *fw_info;
+	dma_addr_t fw_info_pa;
+};
+
+#define VNIC_MAX_RES_HDR_SIZE \
+	(sizeof(struct vnic_resource_header) + \
+	sizeof(struct vnic_resource) * RES_TYPE_MAX)
+#define VNIC_RES_STRIDE	128
+
+void *vnic_dev_priv(struct vnic_dev *vdev)
+{
+	return vdev->priv;
+}
+
+static int vnic_dev_discover_res(struct vnic_dev *vdev,
+	struct vnic_dev_bar *bar)
+{
+	struct vnic_resource_header __iomem *rh;
+	struct vnic_resource __iomem *r;
+	u8 type;
+
+	if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
+		printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
+		return -EINVAL;
+	}
+
+	rh = bar->vaddr;
+	if (!rh) {
+		printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
+		return -EINVAL;
+	}
+
+	if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
+	    ioread32(&rh->version) != VNIC_RES_VERSION) {
+		printk(KERN_ERR "vNIC BAR0 res magic/version error "
+			"exp (%lx/%lx) curr (%x/%x)\n",
+			VNIC_RES_MAGIC, VNIC_RES_VERSION,
+			ioread32(&rh->magic), ioread32(&rh->version));
+		return -EINVAL;
+	}
+
+	r = (struct vnic_resource __iomem *)(rh + 1);
+
+	while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
+
+		u8 bar_num = ioread8(&r->bar);
+		u32 bar_offset = ioread32(&r->bar_offset);
+		u32 count = ioread32(&r->count);
+		u32 len;
+
+		r++;
+
+		if (bar_num != 0)  /* only mapping in BAR0 resources */
+			continue;
+
+		switch (type) {
+		case RES_TYPE_WQ:
+		case RES_TYPE_RQ:
+		case RES_TYPE_CQ:
+		case RES_TYPE_INTR_CTRL:
+			/* each count is stride bytes long */
+			len = count * VNIC_RES_STRIDE;
+			if (len + bar_offset > bar->len) {
+				printk(KERN_ERR "vNIC BAR0 resource %d "
+					"out-of-bounds, offset 0x%x + "
+					"size 0x%x > bar len 0x%lx\n",
+					type, bar_offset,
+					len,
+					bar->len);
+				return -EINVAL;
+			}
+			break;
+		case RES_TYPE_INTR_PBA_LEGACY:
+		case RES_TYPE_DEVCMD:
+			len = count;
+			break;
+		default:
+			continue;
+		}
+
+		vdev->res[type].count = count;
+		vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
+	}
+
+	return 0;
+}
+
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+	enum vnic_res_type type)
+{
+	return vdev->res[type].count;
+}
+
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+	unsigned int index)
+{
+	if (!vdev->res[type].vaddr)
+		return NULL;
+
+	switch (type) {
+	case RES_TYPE_WQ:
+	case RES_TYPE_RQ:
+	case RES_TYPE_CQ:
+	case RES_TYPE_INTR_CTRL:
+		return (char __iomem *)vdev->res[type].vaddr +
+			index * VNIC_RES_STRIDE;
+	default:
+		return (char __iomem *)vdev->res[type].vaddr;
+	}
+}
+
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	/* The base address of the desc rings must be 512 byte aligned.
+	 * Descriptor count is aligned to groups of 32 descriptors.  A
+	 * count of 0 means the maximum 4096 descriptors.  Descriptor
+	 * size is aligned to 16 bytes.
+	 */
+
+	unsigned int count_align = 32;
+	unsigned int desc_align = 16;
+
+	ring->base_align = 512;
+
+	if (desc_count == 0)
+		desc_count = 4096;
+
+	ring->desc_count = ALIGN(desc_count, count_align);
+
+	ring->desc_size = ALIGN(desc_size, desc_align);
+
+	ring->size = ring->desc_count * ring->desc_size;
+	ring->size_unaligned = ring->size + ring->base_align;
+
+	return ring->size_unaligned;
+}
+
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
+{
+	memset(ring->descs, 0, ring->size);
+}
+
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	vnic_dev_desc_ring_size(ring, desc_count, desc_size);
+
+	ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+		ring->size_unaligned,
+		&ring->base_addr_unaligned);
+
+	if (!ring->descs_unaligned) {
+		printk(KERN_ERR
+		  "Failed to allocate ring (size=%d), aborting\n",
+			(int)ring->size);
+		return -ENOMEM;
+	}
+
+	ring->base_addr = ALIGN(ring->base_addr_unaligned,
+		ring->base_align);
+	ring->descs = (u8 *)ring->descs_unaligned +
+		(ring->base_addr - ring->base_addr_unaligned);
+
+	vnic_dev_clear_desc_ring(ring);
+
+	ring->desc_avail = ring->desc_count - 1;
+
+	return 0;
+}
+
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
+{
+	if (ring->descs) {
+		pci_free_consistent(vdev->pdev,
+			ring->size_unaligned,
+			ring->descs_unaligned,
+			ring->base_addr_unaligned);
+		ring->descs = NULL;
+	}
+}
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+	u64 *a0, u64 *a1, int wait)
+{
+	struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
+	int delay;
+	u32 status;
+	int dev_cmd_err[] = {
+		/* convert from fw's version of error.h to host's version */
+		0,	/* ERR_SUCCESS */
+		EINVAL,	/* ERR_EINVAL */
+		EFAULT,	/* ERR_EFAULT */
+		EPERM,	/* ERR_EPERM */
+		EBUSY,  /* ERR_EBUSY */
+	};
+	int err;
+
+	status = ioread32(&devcmd->status);
+	if (status & STAT_BUSY) {
+		printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
+		return -EBUSY;
+	}
+
+	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+		writeq(*a0, &devcmd->args[0]);
+		writeq(*a1, &devcmd->args[1]);
+		wmb();
+	}
+
+	iowrite32(cmd, &devcmd->cmd);
+
+	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+			return 0;
+
+	for (delay = 0; delay < wait; delay++) {
+
+		udelay(100);
+
+		status = ioread32(&devcmd->status);
+		if (!(status & STAT_BUSY)) {
+
+			if (status & STAT_ERROR) {
+				err = dev_cmd_err[(int)readq(&devcmd->args[0])];
+				printk(KERN_ERR "Error %d devcmd %d\n",
+					err, _CMD_N(cmd));
+				return -err;
+			}
+
+			if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+				rmb();
+				*a0 = readq(&devcmd->args[0]);
+				*a1 = readq(&devcmd->args[1]);
+			}
+
+			return 0;
+		}
+	}
+
+	printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
+	return -ETIMEDOUT;
+}
+
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+	struct vnic_devcmd_fw_info **fw_info)
+{
+	u64 a0, a1 = 0;
+	int wait = 1000;
+	int err = 0;
+
+	if (!vdev->fw_info) {
+		vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+			sizeof(struct vnic_devcmd_fw_info),
+			&vdev->fw_info_pa);
+		if (!vdev->fw_info)
+			return -ENOMEM;
+
+		a0 = vdev->fw_info_pa;
+
+		/* only get fw_info once and cache it */
+		err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
+	}
+
+	*fw_info = vdev->fw_info;
+
+	return err;
+}
+
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+	void *value)
+{
+	u64 a0, a1;
+	int wait = 1000;
+	int err;
+
+	a0 = offset;
+	a1 = size;
+
+	err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
+
+	switch (size) {
+	case 1: *(u8 *)value = (u8)a0; break;
+	case 2: *(u16 *)value = (u16)a0; break;
+	case 4: *(u32 *)value = (u32)a0; break;
+	case 8: *(u64 *)value = a0; break;
+	default: BUG(); break;
+	}
+
+	return err;
+}
+
+int vnic_dev_stats_clear(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
+}
+
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
+{
+	u64 a0, a1;
+	int wait = 1000;
+
+	if (!vdev->stats) {
+		vdev->stats = pci_alloc_consistent(vdev->pdev,
+			sizeof(struct vnic_stats), &vdev->stats_pa);
+		if (!vdev->stats)
+			return -ENOMEM;
+	}
+
+	*stats = vdev->stats;
+	a0 = vdev->stats_pa;
+	a1 = sizeof(struct vnic_stats);
+
+	return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
+}
+
+int vnic_dev_close(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
+}
+
+int vnic_dev_enable(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_disable(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_open(struct vnic_dev *vdev, int arg)
+{
+	u64 a0 = (u32)arg, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
+}
+
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	*done = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
+	if (err)
+		return err;
+
+	*done = (a0 == 0);
+
+	return 0;
+}
+
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+{
+	u64 a0 = (u32)arg, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
+}
+
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	*done = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
+	if (err)
+		return err;
+
+	*done = (a0 == 0);
+
+	return 0;
+}
+
+int vnic_dev_hang_notify(struct vnic_dev *vdev)
+{
+	u64 a0, a1;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
+}
+
+int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+{
+	u64 a0, a1;
+	int wait = 1000;
+	int err, i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		mac_addr[i] = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+	if (err)
+		return err;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		mac_addr[i] = ((u8 *)&a0)[i];
+
+	return 0;
+}
+
+void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+	int broadcast, int promisc, int allmulti)
+{
+	u64 a0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
+	     (multicast ? CMD_PFILTER_MULTICAST : 0) |
+	     (broadcast ? CMD_PFILTER_BROADCAST : 0) |
+	     (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
+	     (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+
+	err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR "Can't set packet filter\n");
+}
+
+void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		((u8 *)&a0)[i] = addr[i];
+
+	err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR
+			"Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+			err);
+}
+
+void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		((u8 *)&a0)[i] = addr[i];
+
+	err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR
+			"Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+			err);
+}
+
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+	u64 a0, a1;
+	int wait = 1000;
+
+	if (!vdev->notify) {
+		vdev->notify = pci_alloc_consistent(vdev->pdev,
+			sizeof(struct vnic_devcmd_notify),
+			&vdev->notify_pa);
+		if (!vdev->notify)
+			return -ENOMEM;
+	}
+
+	a0 = vdev->notify_pa;
+	a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
+	a1 += sizeof(struct vnic_devcmd_notify);
+
+	return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+void vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+	u64 a0, a1;
+	int wait = 1000;
+
+	a0 = 0;  /* paddr = 0 to unset notify buffer */
+	a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
+	a1 += sizeof(struct vnic_devcmd_notify);
+
+	vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+static int vnic_dev_notify_ready(struct vnic_dev *vdev)
+{
+	u32 *words;
+	unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
+	unsigned int i;
+	u32 csum;
+
+	if (!vdev->notify)
+		return 0;
+
+	do {
+		csum = 0;
+		memcpy(&vdev->notify_copy, vdev->notify,
+			sizeof(struct vnic_devcmd_notify));
+		words = (u32 *)&vdev->notify_copy;
+		for (i = 1; i < nwords; i++)
+			csum += words[i];
+	} while (csum != words[0]);
+
+	return 1;
+}
+
+int vnic_dev_init(struct vnic_dev *vdev, int arg)
+{
+	u64 a0 = (u32)arg, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
+}
+
+int vnic_dev_link_status(struct vnic_dev *vdev)
+{
+	if (vdev->linkstatus)
+		return *vdev->linkstatus;
+
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.link_state;
+}
+
+u32 vnic_dev_port_speed(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.port_speed;
+}
+
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.msglvl;
+}
+
+u32 vnic_dev_mtu(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.mtu;
+}
+
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+	enum vnic_dev_intr_mode intr_mode)
+{
+	vdev->intr_mode = intr_mode;
+}
+
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
+	struct vnic_dev *vdev)
+{
+	return vdev->intr_mode;
+}
+
+void vnic_dev_unregister(struct vnic_dev *vdev)
+{
+	if (vdev) {
+		if (vdev->notify)
+			pci_free_consistent(vdev->pdev,
+				sizeof(struct vnic_devcmd_notify),
+				vdev->notify,
+				vdev->notify_pa);
+		if (vdev->linkstatus)
+			pci_free_consistent(vdev->pdev,
+				sizeof(u32),
+				vdev->linkstatus,
+				vdev->linkstatus_pa);
+		if (vdev->stats)
+			pci_free_consistent(vdev->pdev,
+				sizeof(struct vnic_dev),
+				vdev->stats, vdev->stats_pa);
+		if (vdev->fw_info)
+			pci_free_consistent(vdev->pdev,
+				sizeof(struct vnic_devcmd_fw_info),
+				vdev->fw_info, vdev->fw_info_pa);
+		kfree(vdev);
+	}
+}
+
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+	void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
+{
+	if (!vdev) {
+		vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
+		if (!vdev)
+			return NULL;
+	}
+
+	vdev->priv = priv;
+	vdev->pdev = pdev;
+
+	if (vnic_dev_discover_res(vdev, bar))
+		goto err_out;
+
+	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+	if (!vdev->devcmd)
+		goto err_out;
+
+	return vdev;
+
+err_out:
+	vnic_dev_unregister(vdev);
+	return NULL;
+}
+
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
new file mode 100644
index 0000000..2dcffd3
--- /dev/null
+++ b/drivers/net/enic/vnic_dev.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_DEV_H_
+#define _VNIC_DEV_H_
+
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+
+#ifndef VNIC_PADDR_TARGET
+#define VNIC_PADDR_TARGET	0x0000000000000000ULL
+#endif
+
+enum vnic_dev_intr_mode {
+	VNIC_DEV_INTR_MODE_UNKNOWN,
+	VNIC_DEV_INTR_MODE_INTX,
+	VNIC_DEV_INTR_MODE_MSI,
+	VNIC_DEV_INTR_MODE_MSIX,
+};
+
+struct vnic_dev_bar {
+	void __iomem *vaddr;
+	dma_addr_t bus_addr;
+	unsigned long len;
+};
+
+struct vnic_dev_ring {
+	void *descs;
+	size_t size;
+	dma_addr_t base_addr;
+	size_t base_align;
+	void *descs_unaligned;
+	size_t size_unaligned;
+	dma_addr_t base_addr_unaligned;
+	unsigned int desc_size;
+	unsigned int desc_count;
+	unsigned int desc_avail;
+};
+
+struct vnic_dev;
+struct vnic_stats;
+
+void *vnic_dev_priv(struct vnic_dev *vdev);
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+	enum vnic_res_type type);
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+	unsigned int index);
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
+	struct vnic_dev_ring *ring);
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+	u64 *a0, u64 *a1, int wait);
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+	struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+	void *value);
+int vnic_dev_stats_clear(struct vnic_dev *vdev);
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
+int vnic_dev_hang_notify(struct vnic_dev *vdev);
+void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+	int broadcast, int promisc, int allmulti);
+void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
+void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void vnic_dev_notify_unset(struct vnic_dev *vdev);
+int vnic_dev_link_status(struct vnic_dev *vdev);
+u32 vnic_dev_port_speed(struct vnic_dev *vdev);
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
+u32 vnic_dev_mtu(struct vnic_dev *vdev);
+int vnic_dev_close(struct vnic_dev *vdev);
+int vnic_dev_enable(struct vnic_dev *vdev);
+int vnic_dev_disable(struct vnic_dev *vdev);
+int vnic_dev_open(struct vnic_dev *vdev, int arg);
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_init(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+	enum vnic_dev_intr_mode intr_mode);
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
+void vnic_dev_unregister(struct vnic_dev *vdev);
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+	void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar);
+
+#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
new file mode 100644
index 0000000..d8617a3
--- /dev/null
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_DEVCMD_H_
+#define _VNIC_DEVCMD_H_
+
+#define _CMD_NBITS      14
+#define _CMD_VTYPEBITS	10
+#define _CMD_FLAGSBITS  6
+#define _CMD_DIRBITS	2
+
+#define _CMD_NMASK      ((1 << _CMD_NBITS)-1)
+#define _CMD_VTYPEMASK  ((1 << _CMD_VTYPEBITS)-1)
+#define _CMD_FLAGSMASK  ((1 << _CMD_FLAGSBITS)-1)
+#define _CMD_DIRMASK    ((1 << _CMD_DIRBITS)-1)
+
+#define _CMD_NSHIFT     0
+#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
+#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
+#define _CMD_DIRSHIFT   (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
+
+/*
+ * Direction bits (from host perspective).
+ */
+#define _CMD_DIR_NONE   0U
+#define _CMD_DIR_WRITE  1U
+#define _CMD_DIR_READ   2U
+#define _CMD_DIR_RW     (_CMD_DIR_WRITE | _CMD_DIR_READ)
+
+/*
+ * Flag bits.
+ */
+#define _CMD_FLAGS_NONE 0U
+#define _CMD_FLAGS_NOWAIT 1U
+
+/*
+ * vNIC type bits.
+ */
+#define _CMD_VTYPE_NONE  0U
+#define _CMD_VTYPE_ENET  1U
+#define _CMD_VTYPE_FC    2U
+#define _CMD_VTYPE_SCSI  4U
+#define _CMD_VTYPE_ALL   (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
+
+/*
+ * Used to create cmds..
+*/
+#define _CMDCF(dir, flags, vtype, nr)  \
+	(((dir)   << _CMD_DIRSHIFT) | \
+	((flags) << _CMD_FLAGSSHIFT) | \
+	((vtype) << _CMD_VTYPESHIFT) | \
+	((nr)    << _CMD_NSHIFT))
+#define _CMDC(dir, vtype, nr)    _CMDCF(dir, 0, vtype, nr)
+#define _CMDCNW(dir, vtype, nr)  _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
+
+/*
+ * Used to decode cmds..
+*/
+#define _CMD_DIR(cmd)            (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
+#define _CMD_FLAGS(cmd)          (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
+#define _CMD_VTYPE(cmd)          (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
+#define _CMD_N(cmd)              (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+
+enum vnic_devcmd_cmd {
+	CMD_NONE                = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
+
+	/* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
+	CMD_MCPU_FW_INFO        = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+	/* dev-specific block member:
+	 *    in: (u16)a0=offset,(u8)a1=size
+	 *    out: a0=value */
+	CMD_DEV_SPEC            = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
+
+	/* stats clear */
+	CMD_STATS_CLEAR         = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
+
+	/* stats dump in mem: (u64)a0=paddr to stats area,
+	 *                    (u16)a1=sizeof stats area */
+	CMD_STATS_DUMP          = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
+
+	/* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
+	CMD_PACKET_FILTER	= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
+
+	/* hang detection notification */
+	CMD_HANG_NOTIFY         = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
+
+	/* MAC address in (u48)a0 */
+	CMD_MAC_ADDR            = _CMDC(_CMD_DIR_READ,
+					_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+
+	/* disable/enable promisc mode: (u8)a0=0/1 */
+/***** XXX DEPRECATED *****/
+	CMD_PROMISC_MODE        = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
+
+	/* disable/enable all-multi mode: (u8)a0=0/1 */
+/***** XXX DEPRECATED *****/
+	CMD_ALLMULTI_MODE       = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
+
+	/* add addr from (u48)a0 */
+	CMD_ADDR_ADD            = _CMDCNW(_CMD_DIR_WRITE,
+					_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
+
+	/* del addr from (u48)a0 */
+	CMD_ADDR_DEL            = _CMDCNW(_CMD_DIR_WRITE,
+					_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
+
+	/* add VLAN id in (u16)a0 */
+	CMD_VLAN_ADD            = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
+
+	/* del VLAN id in (u16)a0 */
+	CMD_VLAN_DEL            = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
+
+	/* nic_cfg in (u32)a0 */
+	CMD_NIC_CFG             = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+	/* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
+	CMD_RSS_KEY             = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
+
+	/* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
+	CMD_RSS_CPU             = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
+
+	/* initiate softreset */
+	CMD_SOFT_RESET          = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
+
+	/* softreset status:
+	 *    out: a0=0 reset complete, a0=1 reset in progress */
+	CMD_SOFT_RESET_STATUS   = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
+
+	/* set struct vnic_devcmd_notify buffer in mem:
+	 * in:
+	 *   (u64)a0=paddr to notify (set paddr=0 to unset)
+	 *   (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+	 *   (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+	 * out:
+	 *   (u32)a1 = effective size
+	 */
+	CMD_NOTIFY              = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
+
+	/* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
+	 *           (u8)a1=PXENV_UNDI_xxx */
+	CMD_UNDI                = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
+
+	/* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
+	CMD_OPEN		= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
+
+	/* open status:
+	 *    out: a0=0 open complete, a0=1 open in progress */
+	CMD_OPEN_STATUS		= _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
+
+	/* close vnic */
+	CMD_CLOSE		= _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
+
+	/* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+	CMD_INIT		= _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
+
+	/* variant of CMD_INIT, with provisioning info
+	 *     (u64)a0=paddr of vnic_devcmd_provinfo
+	 *     (u32)a1=sizeof provision info */
+	CMD_INIT_PROV_INFO	= _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
+
+	/* enable virtual link */
+	CMD_ENABLE		= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+	/* disable virtual link */
+	CMD_DISABLE		= _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
+
+	/* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
+	CMD_STATS_DUMP_ALL	= _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
+
+	/* init status:
+	 *    out: a0=0 init complete, a0=1 init in progress
+	 *         if a0=0, a1=errno */
+	CMD_INIT_STATUS		= _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
+
+	/* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+	 *            (u8)a1=INT13_CMD_xxx */
+	CMD_INT13               = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
+
+	/* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
+	CMD_LOGICAL_UPLINK      = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
+
+	/* undo initialize of virtual link */
+	CMD_DEINIT		= _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+};
+
+/* flags for CMD_OPEN */
+#define CMD_OPENF_OPROM		0x1	/* open coming from option rom */
+
+/* flags for CMD_INIT */
+#define CMD_INITF_DEFAULT_MAC	0x1	/* init with default mac addr */
+
+/* flags for CMD_PACKET_FILTER */
+#define CMD_PFILTER_DIRECTED		0x01
+#define CMD_PFILTER_MULTICAST		0x02
+#define CMD_PFILTER_BROADCAST		0x04
+#define CMD_PFILTER_PROMISCUOUS		0x08
+#define CMD_PFILTER_ALL_MULTICAST	0x10
+
+enum vnic_devcmd_status {
+	STAT_NONE = 0,
+	STAT_BUSY = 1 << 0,	/* cmd in progress */
+	STAT_ERROR = 1 << 1,	/* last cmd caused error (code in a0) */
+};
+
+enum vnic_devcmd_error {
+	ERR_SUCCESS = 0,
+	ERR_EINVAL = 1,
+	ERR_EFAULT = 2,
+	ERR_EPERM = 3,
+	ERR_EBUSY = 4,
+	ERR_ECMDUNKNOWN = 5,
+	ERR_EBADSTATE = 6,
+	ERR_ENOMEM = 7,
+	ERR_ETIMEDOUT = 8,
+	ERR_ELINKDOWN = 9,
+};
+
+struct vnic_devcmd_fw_info {
+	char fw_version[32];
+	char fw_build[32];
+	char hw_version[32];
+	char hw_serial_number[32];
+};
+
+struct vnic_devcmd_notify {
+	u32 csum;		/* checksum over following words */
+
+	u32 link_state;		/* link up == 1 */
+	u32 port_speed;		/* effective port speed (rate limit) */
+	u32 mtu;		/* MTU */
+	u32 msglvl;		/* requested driver msg lvl */
+	u32 uif;		/* uplink interface */
+	u32 status;		/* status bits (see VNIC_STF_*) */
+	u32 error;		/* error code (see ERR_*) for first ERR */
+};
+#define VNIC_STF_FATAL_ERR	0x0001	/* fatal fw error */
+
+struct vnic_devcmd_provinfo {
+	u8 oui[3];
+	u8 type;
+	u8 data[0];
+};
+
+/*
+ * Writing cmd register causes STAT_BUSY to get set in status register.
+ * When cmd completes, STAT_BUSY will be cleared.
+ *
+ * If cmd completed successfully STAT_ERROR will be clear
+ * and args registers contain cmd-specific results.
+ *
+ * If cmd error, STAT_ERROR will be set and args[0] contains error code.
+ *
+ * status register is read-only.  While STAT_BUSY is set,
+ * all other register contents are read-only.
+ */
+
+/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
+#define VNIC_DEVCMD_NARGS 15
+struct vnic_devcmd {
+	u32 status;			/* RO */
+	u32 cmd;			/* RW */
+	u64 args[VNIC_DEVCMD_NARGS];	/* RW cmd args (little-endian) */
+};
+
+#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
new file mode 100644
index 0000000..6332ac9
--- /dev/null
+++ b/drivers/net/enic/vnic_enet.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_ENIC_H_
+#define _VNIC_ENIC_H_
+
+/* Device-specific region: enet configuration */
+struct vnic_enet_config {
+	u32 flags;
+	u32 wq_desc_count;
+	u32 rq_desc_count;
+	u16 mtu;
+	u16 intr_timer;
+	u8 intr_timer_type;
+	u8 intr_mode;
+	char devname[16];
+};
+
+#define VENETF_TSO		0x1	/* TSO enabled */
+#define VENETF_LRO		0x2	/* LRO enabled */
+#define VENETF_RXCSUM		0x4	/* RX csum enabled */
+#define VENETF_TXCSUM		0x8	/* TX csum enabled */
+#define VENETF_RSS		0x10	/* RSS enabled */
+#define VENETF_RSSHASH_IPV4	0x20	/* Hash on IPv4 fields */
+#define VENETF_RSSHASH_TCPIPV4	0x40	/* Hash on TCP + IPv4 fields */
+#define VENETF_RSSHASH_IPV6	0x80	/* Hash on IPv6 fields */
+#define VENETF_RSSHASH_TCPIPV6	0x100	/* Hash on TCP + IPv6 fields */
+#define VENETF_RSSHASH_IPV6_EX	0x200	/* Hash on IPv6 extended fields */
+#define VENETF_RSSHASH_TCPIPV6_EX 0x400	/* Hash on TCP + IPv6 ext. fields */
+
+#endif /* _VNIC_ENIC_H_ */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
new file mode 100644
index 0000000..ddc38f8
--- /dev/null
+++ b/drivers/net/enic/vnic_intr.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+
+void vnic_intr_free(struct vnic_intr *intr)
+{
+	intr->ctrl = NULL;
+}
+
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+	unsigned int index)
+{
+	intr->index = index;
+	intr->vdev = vdev;
+
+	intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
+	if (!intr->ctrl) {
+		printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
+			index);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+	unsigned int coalescing_type, unsigned int mask_on_assertion)
+{
+	iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+	iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
+	iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
+	iowrite32(0, &intr->ctrl->int_credits);
+}
+
+void vnic_intr_clean(struct vnic_intr *intr)
+{
+	iowrite32(0, &intr->ctrl->int_credits);
+}
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
new file mode 100644
index 0000000..ccc4081
--- /dev/null
+++ b/drivers/net/enic/vnic_intr.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_INTR_H_
+#define _VNIC_INTR_H_
+
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+
+#define VNIC_INTR_TIMER_MAX		0xffff
+
+#define VNIC_INTR_TIMER_TYPE_ABS	0
+#define VNIC_INTR_TIMER_TYPE_QUIET	1
+
+/* Interrupt control */
+struct vnic_intr_ctrl {
+	u32 coalescing_timer;		/* 0x00 */
+	u32 pad0;
+	u32 coalescing_value;		/* 0x08 */
+	u32 pad1;
+	u32 coalescing_type;		/* 0x10 */
+	u32 pad2;
+	u32 mask_on_assertion;		/* 0x18 */
+	u32 pad3;
+	u32 mask;			/* 0x20 */
+	u32 pad4;
+	u32 int_credits;		/* 0x28 */
+	u32 pad5;
+	u32 int_credit_return;		/* 0x30 */
+	u32 pad6;
+};
+
+struct vnic_intr {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_intr_ctrl __iomem *ctrl;		/* memory-mapped */
+};
+
+static inline void vnic_intr_unmask(struct vnic_intr *intr)
+{
+	iowrite32(0, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_mask(struct vnic_intr *intr)
+{
+	iowrite32(1, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_return_credits(struct vnic_intr *intr,
+	unsigned int credits, int unmask, int reset_timer)
+{
+#define VNIC_INTR_UNMASK_SHIFT		16
+#define VNIC_INTR_RESET_TIMER_SHIFT	17
+
+	u32 int_credit_return = (credits & 0xffff) |
+		(unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
+		(reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
+
+	iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
+}
+
+static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
+{
+	/* get and ack interrupt in one read (clear-and-ack-on-read) */
+	return ioread32(legacy_pba);
+}
+
+void vnic_intr_free(struct vnic_intr *intr);
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+	unsigned int index);
+void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+	unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_clean(struct vnic_intr *intr);
+
+#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
new file mode 100644
index 0000000..dadf26f
--- /dev/null
+++ b/drivers/net/enic/vnic_nic.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_NIC_H_
+#define _VNIC_NIC_H_
+
+#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD	0xffUL
+#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT		0
+#define NIC_CFG_RSS_HASH_TYPE			(0xffUL << 8)
+#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD	0xffUL
+#define NIC_CFG_RSS_HASH_TYPE_SHIFT		8
+#define NIC_CFG_RSS_HASH_BITS			(7UL << 16)
+#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD	7UL
+#define NIC_CFG_RSS_HASH_BITS_SHIFT		16
+#define NIC_CFG_RSS_BASE_CPU			(7UL << 19)
+#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD		7UL
+#define NIC_CFG_RSS_BASE_CPU_SHIFT		19
+#define NIC_CFG_RSS_ENABLE			(1UL << 22)
+#define NIC_CFG_RSS_ENABLE_MASK_FIELD		1UL
+#define NIC_CFG_RSS_ENABLE_SHIFT		22
+#define NIC_CFG_TSO_IPID_SPLIT_EN		(1UL << 23)
+#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD	1UL
+#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT		23
+#define NIC_CFG_IG_VLAN_STRIP_EN		(1UL << 24)
+#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD	1UL
+#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT		24
+
+static inline void vnic_set_nic_cfg(u32 *nic_cfg,
+	u8 rss_default_cpu, u8 rss_hash_type,
+	u8 rss_hash_bits, u8 rss_base_cpu,
+	u8 rss_enable, u8 tso_ipid_split_en,
+	u8 ig_vlan_strip_en)
+{
+	*nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
+		((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
+			<< NIC_CFG_RSS_HASH_TYPE_SHIFT) |
+		((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
+			<< NIC_CFG_RSS_HASH_BITS_SHIFT) |
+		((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
+			<< NIC_CFG_RSS_BASE_CPU_SHIFT) |
+		((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
+			<< NIC_CFG_RSS_ENABLE_SHIFT) |
+		((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
+			<< NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
+		((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
+			<< NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
+}
+
+#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
new file mode 100644
index 0000000..144d281
--- /dev/null
+++ b/drivers/net/enic/vnic_resource.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_RESOURCE_H_
+#define _VNIC_RESOURCE_H_
+
+#define VNIC_RES_MAGIC		0x766E6963L	/* 'vnic' */
+#define VNIC_RES_VERSION	0x00000000L
+
+/* vNIC resource types */
+enum vnic_res_type {
+	RES_TYPE_EOL,			/* End-of-list */
+	RES_TYPE_WQ,			/* Work queues */
+	RES_TYPE_RQ,			/* Receive queues */
+	RES_TYPE_CQ,			/* Completion queues */
+	RES_TYPE_RSVD1,
+	RES_TYPE_NIC_CFG,		/* Enet NIC config registers */
+	RES_TYPE_RSVD2,
+	RES_TYPE_RSVD3,
+	RES_TYPE_RSVD4,
+	RES_TYPE_RSVD5,
+	RES_TYPE_INTR_CTRL,		/* Interrupt ctrl table */
+	RES_TYPE_INTR_TABLE,		/* MSI/MSI-X Interrupt table */
+	RES_TYPE_INTR_PBA,		/* MSI/MSI-X PBA table */
+	RES_TYPE_INTR_PBA_LEGACY,	/* Legacy intr status, r2c */
+	RES_TYPE_RSVD6,
+	RES_TYPE_RSVD7,
+	RES_TYPE_DEVCMD,		/* Device command region */
+	RES_TYPE_PASS_THRU_PAGE,	/* Pass-thru page */
+
+	RES_TYPE_MAX,			/* Count of resource types */
+};
+
+struct vnic_resource_header {
+	u32 magic;
+	u32 version;
+};
+
+struct vnic_resource {
+	u8 type;
+	u8 bar;
+	u8 pad[2];
+	u32 bar_offset;
+	u32 count;
+};
+
+#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
new file mode 100644
index 0000000..9365e63
--- /dev/null
+++ b/drivers/net/enic/vnic_rq.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "vnic_dev.h"
+#include "vnic_rq.h"
+
+static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
+{
+	struct vnic_rq_buf *buf;
+	struct vnic_dev *vdev;
+	unsigned int i, j, count = rq->ring.desc_count;
+	unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
+
+	vdev = rq->vdev;
+
+	for (i = 0; i < blks; i++) {
+		rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
+		if (!rq->bufs[i]) {
+			printk(KERN_ERR "Failed to alloc rq_bufs\n");
+			return -ENOMEM;
+		}
+	}
+
+	for (i = 0; i < blks; i++) {
+		buf = rq->bufs[i];
+		for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
+			buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
+			buf->desc = (u8 *)rq->ring.descs +
+				rq->ring.desc_size * buf->index;
+			if (buf->index + 1 == count) {
+				buf->next = rq->bufs[0];
+				break;
+			} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
+				buf->next = rq->bufs[i + 1];
+			} else {
+				buf->next = buf + 1;
+				buf++;
+			}
+		}
+	}
+
+	rq->to_use = rq->to_clean = rq->bufs[0];
+	rq->buf_index = 0;
+
+	return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq)
+{
+	struct vnic_dev *vdev;
+	unsigned int i;
+
+	vdev = rq->vdev;
+
+	vnic_dev_free_desc_ring(vdev, &rq->ring);
+
+	for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
+		kfree(rq->bufs[i]);
+		rq->bufs[i] = NULL;
+	}
+
+	rq->ctrl = NULL;
+}
+
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	rq->index = index;
+	rq->vdev = vdev;
+
+	rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
+	if (!rq->ctrl) {
+		printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	vnic_rq_disable(rq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	err = vnic_rq_alloc_bufs(rq);
+	if (err) {
+		vnic_rq_free(rq);
+		return err;
+	}
+
+	return 0;
+}
+
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset)
+{
+	u64 paddr;
+	u32 fetch_index;
+
+	paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &rq->ctrl->ring_base);
+	iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
+	iowrite32(cq_index, &rq->ctrl->cq_index);
+	iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
+	iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
+	iowrite32(0, &rq->ctrl->dropped_packet_count);
+	iowrite32(0, &rq->ctrl->error_status);
+
+	/* Use current fetch_index as the ring starting point */
+	fetch_index = ioread32(&rq->ctrl->fetch_index);
+	rq->to_use = rq->to_clean =
+		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
+			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+	iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+	rq->buf_index = 0;
+}
+
+unsigned int vnic_rq_error_status(struct vnic_rq *rq)
+{
+	return ioread32(&rq->ctrl->error_status);
+}
+
+void vnic_rq_enable(struct vnic_rq *rq)
+{
+	iowrite32(1, &rq->ctrl->enable);
+}
+
+int vnic_rq_disable(struct vnic_rq *rq)
+{
+	unsigned int wait;
+
+	iowrite32(0, &rq->ctrl->enable);
+
+	/* Wait for HW to ACK disable request */
+	for (wait = 0; wait < 100; wait++) {
+		if (!(ioread32(&rq->ctrl->running)))
+			return 0;
+		udelay(1);
+	}
+
+	printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
+
+	return -ETIMEDOUT;
+}
+
+void vnic_rq_clean(struct vnic_rq *rq,
+	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
+{
+	struct vnic_rq_buf *buf;
+	u32 fetch_index;
+
+	BUG_ON(ioread32(&rq->ctrl->enable));
+
+	buf = rq->to_clean;
+
+	while (vnic_rq_desc_used(rq) > 0) {
+
+		(*buf_clean)(rq, buf);
+
+		buf = rq->to_clean = buf->next;
+		rq->ring.desc_avail++;
+	}
+
+	/* Use current fetch_index as the ring starting point */
+	fetch_index = ioread32(&rq->ctrl->fetch_index);
+	rq->to_use = rq->to_clean =
+		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
+			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+	iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+	rq->buf_index = 0;
+
+	vnic_dev_clear_desc_ring(&rq->ring);
+}
+
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
new file mode 100644
index 0000000..82bfca6
--- /dev/null
+++ b/drivers/net/enic/vnic_rq.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_RQ_H_
+#define _VNIC_RQ_H_
+
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/* Receive queue control */
+struct vnic_rq_ctrl {
+	u64 ring_base;			/* 0x00 */
+	u32 ring_size;			/* 0x08 */
+	u32 pad0;
+	u32 posted_index;		/* 0x10 */
+	u32 pad1;
+	u32 cq_index;			/* 0x18 */
+	u32 pad2;
+	u32 enable;			/* 0x20 */
+	u32 pad3;
+	u32 running;			/* 0x28 */
+	u32 pad4;
+	u32 fetch_index;		/* 0x30 */
+	u32 pad5;
+	u32 error_interrupt_enable;	/* 0x38 */
+	u32 pad6;
+	u32 error_interrupt_offset;	/* 0x40 */
+	u32 pad7;
+	u32 error_status;		/* 0x48 */
+	u32 pad8;
+	u32 dropped_packet_count;	/* 0x50 */
+	u32 pad9;
+	u32 dropped_packet_count_rc;	/* 0x58 */
+	u32 pad10;
+};
+
+/* Break the vnic_rq_buf allocations into blocks of 64 entries */
+#define VNIC_RQ_BUF_BLK_ENTRIES 64
+#define VNIC_RQ_BUF_BLK_SZ \
+	(VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
+#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
+	DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_rq_buf {
+	struct vnic_rq_buf *next;
+	dma_addr_t dma_addr;
+	void *os_buf;
+	unsigned int os_buf_index;
+	unsigned int len;
+	unsigned int index;
+	void *desc;
+};
+
+struct vnic_rq {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_rq_ctrl __iomem *ctrl;              /* memory-mapped */
+	struct vnic_dev_ring ring;
+	struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
+	struct vnic_rq_buf *to_use;
+	struct vnic_rq_buf *to_clean;
+	void *os_buf_head;
+	unsigned int buf_index;
+	unsigned int pkts_outstanding;
+};
+
+static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
+{
+	/* how many does SW own? */
+	return rq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
+{
+	/* how many does HW own? */
+	return rq->ring.desc_count - rq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
+{
+	return rq->to_use->desc;
+}
+
+static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
+{
+	return rq->to_use->index;
+}
+
+static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
+{
+	return rq->buf_index++;
+}
+
+static inline void vnic_rq_post(struct vnic_rq *rq,
+	void *os_buf, unsigned int os_buf_index,
+	dma_addr_t dma_addr, unsigned int len)
+{
+	struct vnic_rq_buf *buf = rq->to_use;
+
+	buf->os_buf = os_buf;
+	buf->os_buf_index = os_buf_index;
+	buf->dma_addr = dma_addr;
+	buf->len = len;
+
+	buf = buf->next;
+	rq->to_use = buf;
+	rq->ring.desc_avail--;
+
+	/* Move the posted_index every nth descriptor
+	 */
+
+#ifndef VNIC_RQ_RETURN_RATE
+#define VNIC_RQ_RETURN_RATE		0xf	/* keep 2^n - 1 */
+#endif
+
+	if ((buf->index & VNIC_RQ_RETURN_RATE) == 0)
+		iowrite32(buf->index, &rq->ctrl->posted_index);
+}
+
+static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
+{
+	rq->ring.desc_avail += count;
+}
+
+enum desc_return_options {
+	VNIC_RQ_RETURN_DESC,
+	VNIC_RQ_DEFER_RETURN_DESC,
+};
+
+static inline void vnic_rq_service(struct vnic_rq *rq,
+	struct cq_desc *cq_desc, u16 completed_index,
+	int desc_return, void (*buf_service)(struct vnic_rq *rq,
+	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+	int skipped, void *opaque), void *opaque)
+{
+	struct vnic_rq_buf *buf;
+	int skipped;
+
+	buf = rq->to_clean;
+	while (1) {
+
+		skipped = (buf->index != completed_index);
+
+		(*buf_service)(rq, cq_desc, buf, skipped, opaque);
+
+		if (desc_return == VNIC_RQ_RETURN_DESC)
+			rq->ring.desc_avail++;
+
+		rq->to_clean = buf->next;
+
+		if (!skipped)
+			break;
+
+		buf = rq->to_clean;
+	}
+}
+
+static inline int vnic_rq_fill(struct vnic_rq *rq,
+	int (*buf_fill)(struct vnic_rq *rq))
+{
+	int err;
+
+	while (vnic_rq_desc_avail(rq) > 1) {
+
+		err = (*buf_fill)(rq);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq);
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset);
+unsigned int vnic_rq_error_status(struct vnic_rq *rq);
+void vnic_rq_enable(struct vnic_rq *rq);
+int vnic_rq_disable(struct vnic_rq *rq);
+void vnic_rq_clean(struct vnic_rq *rq,
+	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
+
+#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h
new file mode 100644
index 0000000..e325d65
--- /dev/null
+++ b/drivers/net/enic/vnic_rss.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ */
+
+#ifndef _VNIC_RSS_H_
+#define _VNIC_RSS_H_
+
+/* RSS key array */
+union vnic_rss_key {
+	struct {
+		u8 b[10];
+		u8 b_pad[6];
+	} key[4];
+	u64 raw[8];
+};
+
+/* RSS cpu array */
+union vnic_rss_cpu {
+	struct {
+		u8 b[4] ;
+		u8 b_pad[4];
+	} cpu[32];
+	u64 raw[32];
+};
+
+void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
+void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
+void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
+void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
+
+#endif /* _VNIC_RSS_H_ */
diff --git a/drivers/net/enic/vnic_stats.h b/drivers/net/enic/vnic_stats.h
new file mode 100644
index 0000000..9ff9614
--- /dev/null
+++ b/drivers/net/enic/vnic_stats.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_STATS_H_
+#define _VNIC_STATS_H_
+
+/* Tx statistics */
+struct vnic_tx_stats {
+	u64 tx_frames_ok;
+	u64 tx_unicast_frames_ok;
+	u64 tx_multicast_frames_ok;
+	u64 tx_broadcast_frames_ok;
+	u64 tx_bytes_ok;
+	u64 tx_unicast_bytes_ok;
+	u64 tx_multicast_bytes_ok;
+	u64 tx_broadcast_bytes_ok;
+	u64 tx_drops;
+	u64 tx_errors;
+	u64 tx_tso;
+	u64 rsvd[16];
+};
+
+/* Rx statistics */
+struct vnic_rx_stats {
+	u64 rx_frames_ok;
+	u64 rx_frames_total;
+	u64 rx_unicast_frames_ok;
+	u64 rx_multicast_frames_ok;
+	u64 rx_broadcast_frames_ok;
+	u64 rx_bytes_ok;
+	u64 rx_unicast_bytes_ok;
+	u64 rx_multicast_bytes_ok;
+	u64 rx_broadcast_bytes_ok;
+	u64 rx_drop;
+	u64 rx_no_bufs;
+	u64 rx_errors;
+	u64 rx_rss;
+	u64 rx_crc_errors;
+	u64 rx_frames_64;
+	u64 rx_frames_127;
+	u64 rx_frames_255;
+	u64 rx_frames_511;
+	u64 rx_frames_1023;
+	u64 rx_frames_1518;
+	u64 rx_frames_to_max;
+	u64 rsvd[16];
+};
+
+struct vnic_stats {
+	struct vnic_tx_stats tx;
+	struct vnic_rx_stats rx;
+};
+
+#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
new file mode 100644
index 0000000..a576d04
--- /dev/null
+++ b/drivers/net/enic/vnic_wq.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+
+static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
+{
+	struct vnic_wq_buf *buf;
+	struct vnic_dev *vdev;
+	unsigned int i, j, count = wq->ring.desc_count;
+	unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
+
+	vdev = wq->vdev;
+
+	for (i = 0; i < blks; i++) {
+		wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
+		if (!wq->bufs[i]) {
+			printk(KERN_ERR "Failed to alloc wq_bufs\n");
+			return -ENOMEM;
+		}
+	}
+
+	for (i = 0; i < blks; i++) {
+		buf = wq->bufs[i];
+		for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
+			buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
+			buf->desc = (u8 *)wq->ring.descs +
+				wq->ring.desc_size * buf->index;
+			if (buf->index + 1 == count) {
+				buf->next = wq->bufs[0];
+				break;
+			} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
+				buf->next = wq->bufs[i + 1];
+			} else {
+				buf->next = buf + 1;
+				buf++;
+			}
+		}
+	}
+
+	wq->to_use = wq->to_clean = wq->bufs[0];
+
+	return 0;
+}
+
+void vnic_wq_free(struct vnic_wq *wq)
+{
+	struct vnic_dev *vdev;
+	unsigned int i;
+
+	vdev = wq->vdev;
+
+	vnic_dev_free_desc_ring(vdev, &wq->ring);
+
+	for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
+		kfree(wq->bufs[i]);
+		wq->bufs[i] = NULL;
+	}
+
+	wq->ctrl = NULL;
+}
+
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	wq->index = index;
+	wq->vdev = vdev;
+
+	wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
+	if (!wq->ctrl) {
+		printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	vnic_wq_disable(wq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	err = vnic_wq_alloc_bufs(wq);
+	if (err) {
+		vnic_wq_free(wq);
+		return err;
+	}
+
+	return 0;
+}
+
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset)
+{
+	u64 paddr;
+
+	paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &wq->ctrl->ring_base);
+	iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
+	iowrite32(0, &wq->ctrl->fetch_index);
+	iowrite32(0, &wq->ctrl->posted_index);
+	iowrite32(cq_index, &wq->ctrl->cq_index);
+	iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+	iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+	iowrite32(0, &wq->ctrl->error_status);
+}
+
+unsigned int vnic_wq_error_status(struct vnic_wq *wq)
+{
+	return ioread32(&wq->ctrl->error_status);
+}
+
+void vnic_wq_enable(struct vnic_wq *wq)
+{
+	iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_disable(struct vnic_wq *wq)
+{
+	unsigned int wait;
+
+	iowrite32(0, &wq->ctrl->enable);
+
+	/* Wait for HW to ACK disable request */
+	for (wait = 0; wait < 100; wait++) {
+		if (!(ioread32(&wq->ctrl->running)))
+			return 0;
+		udelay(1);
+	}
+
+	printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
+
+	return -ETIMEDOUT;
+}
+
+void vnic_wq_clean(struct vnic_wq *wq,
+	void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
+{
+	struct vnic_wq_buf *buf;
+
+	BUG_ON(ioread32(&wq->ctrl->enable));
+
+	buf = wq->to_clean;
+
+	while (vnic_wq_desc_used(wq) > 0) {
+
+		(*buf_clean)(wq, buf);
+
+		buf = wq->to_clean = buf->next;
+		wq->ring.desc_avail++;
+	}
+
+	wq->to_use = wq->to_clean = wq->bufs[0];
+
+	iowrite32(0, &wq->ctrl->fetch_index);
+	iowrite32(0, &wq->ctrl->posted_index);
+	iowrite32(0, &wq->ctrl->error_status);
+
+	vnic_dev_clear_desc_ring(&wq->ring);
+}
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h
new file mode 100644
index 0000000..7081828
--- /dev/null
+++ b/drivers/net/enic/vnic_wq.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_WQ_H_
+#define _VNIC_WQ_H_
+
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/* Work queue control */
+struct vnic_wq_ctrl {
+	u64 ring_base;			/* 0x00 */
+	u32 ring_size;			/* 0x08 */
+	u32 pad0;
+	u32 posted_index;		/* 0x10 */
+	u32 pad1;
+	u32 cq_index;			/* 0x18 */
+	u32 pad2;
+	u32 enable;			/* 0x20 */
+	u32 pad3;
+	u32 running;			/* 0x28 */
+	u32 pad4;
+	u32 fetch_index;		/* 0x30 */
+	u32 pad5;
+	u32 dca_value;			/* 0x38 */
+	u32 pad6;
+	u32 error_interrupt_enable;	/* 0x40 */
+	u32 pad7;
+	u32 error_interrupt_offset;	/* 0x48 */
+	u32 pad8;
+	u32 error_status;		/* 0x50 */
+	u32 pad9;
+};
+
+struct vnic_wq_buf {
+	struct vnic_wq_buf *next;
+	dma_addr_t dma_addr;
+	void *os_buf;
+	unsigned int len;
+	unsigned int index;
+	int sop;
+	void *desc;
+};
+
+/* Break the vnic_wq_buf allocations into blocks of 64 entries */
+#define VNIC_WQ_BUF_BLK_ENTRIES 64
+#define VNIC_WQ_BUF_BLK_SZ \
+	(VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
+#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
+	DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_wq {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_wq_ctrl __iomem *ctrl;              /* memory-mapped */
+	struct vnic_dev_ring ring;
+	struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
+	struct vnic_wq_buf *to_use;
+	struct vnic_wq_buf *to_clean;
+	unsigned int pkts_outstanding;
+};
+
+static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
+{
+	/* how many does SW own? */
+	return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
+{
+	/* how many does HW own? */
+	return wq->ring.desc_count - wq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
+{
+	return wq->to_use->desc;
+}
+
+static inline void vnic_wq_post(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr,
+	unsigned int len, int sop, int eop)
+{
+	struct vnic_wq_buf *buf = wq->to_use;
+
+	buf->sop = sop;
+	buf->os_buf = eop ? os_buf : NULL;
+	buf->dma_addr = dma_addr;
+	buf->len = len;
+
+	buf = buf->next;
+	if (eop)
+		iowrite32(buf->index, &wq->ctrl->posted_index);
+	wq->to_use = buf;
+
+	wq->ring.desc_avail--;
+}
+
+static inline void vnic_wq_service(struct vnic_wq *wq,
+	struct cq_desc *cq_desc, u16 completed_index,
+	void (*buf_service)(struct vnic_wq *wq,
+	struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
+	void *opaque)
+{
+	struct vnic_wq_buf *buf;
+
+	buf = wq->to_clean;
+	while (1) {
+
+		(*buf_service)(wq, cq_desc, buf, opaque);
+
+		wq->ring.desc_avail++;
+
+		wq->to_clean = buf->next;
+
+		if (buf->index == completed_index)
+			break;
+
+		buf = wq->to_clean;
+	}
+}
+
+void vnic_wq_free(struct vnic_wq *wq);
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset);
+unsigned int vnic_wq_error_status(struct vnic_wq *wq);
+void vnic_wq_enable(struct vnic_wq *wq);
+int vnic_wq_disable(struct vnic_wq *wq);
+void vnic_wq_clean(struct vnic_wq *wq,
+	void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+
+#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/enic/wq_enet_desc.h b/drivers/net/enic/wq_enet_desc.h
new file mode 100644
index 0000000..483596c
--- /dev/null
+++ b/drivers/net/enic/wq_enet_desc.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _WQ_ENET_DESC_H_
+#define _WQ_ENET_DESC_H_
+
+/* Ethernet work queue descriptor: 16B */
+struct wq_enet_desc {
+	__le64 address;
+	__le16 length;
+	__le16 mss_loopback;
+	__le16 header_length_flags;
+	__le16 vlan_tag;
+};
+
+#define WQ_ENET_ADDR_BITS		64
+#define WQ_ENET_LEN_BITS		14
+#define WQ_ENET_LEN_MASK		((1 << WQ_ENET_LEN_BITS) - 1)
+#define WQ_ENET_MSS_BITS		14
+#define WQ_ENET_MSS_MASK		((1 << WQ_ENET_MSS_BITS) - 1)
+#define WQ_ENET_MSS_SHIFT		2
+#define WQ_ENET_LOOPBACK_SHIFT		1
+#define WQ_ENET_HDRLEN_BITS		10
+#define WQ_ENET_HDRLEN_MASK		((1 << WQ_ENET_HDRLEN_BITS) - 1)
+#define WQ_ENET_FLAGS_OM_BITS		2
+#define WQ_ENET_FLAGS_OM_MASK		((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
+#define WQ_ENET_FLAGS_EOP_SHIFT		12
+#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT	13
+#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT	14
+#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT	15
+
+#define WQ_ENET_OFFLOAD_MODE_CSUM	0
+#define WQ_ENET_OFFLOAD_MODE_RESERVED	1
+#define WQ_ENET_OFFLOAD_MODE_CSUM_L4	2
+#define WQ_ENET_OFFLOAD_MODE_TSO	3
+
+static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
+	u64 address, u16 length, u16 mss, u16 header_length,
+	u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
+	u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
+{
+	desc->address = cpu_to_le64(address);
+	desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
+	desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
+		WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
+	desc->header_length_flags = cpu_to_le16(
+		(header_length & WQ_ENET_HDRLEN_MASK) |
+		(offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
+		(eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
+		(cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
+		(fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
+		(vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
+	desc->vlan_tag = cpu_to_le16(vlan_tag);
+}
+
+static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
+	u64 *address, u16 *length, u16 *mss, u16 *header_length,
+	u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
+	u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
+{
+	*address = le64_to_cpu(desc->address);
+	*length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
+	*mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
+		WQ_ENET_MSS_MASK;
+	*loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
+		WQ_ENET_LOOPBACK_SHIFT) & 1);
+	*header_length = le16_to_cpu(desc->header_length_flags) &
+		WQ_ENET_HDRLEN_MASK;
+	*offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
+	*eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_FLAGS_EOP_SHIFT) & 1);
+	*cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
+	*fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
+	*vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
+	*vlan_tag = le16_to_cpu(desc->vlan_tag);
+}
+
+#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index eeb55ed..cc7328b 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -337,7 +337,7 @@
 	NvRegMSIXIrqStatus = 0x3f0,
 
 	NvRegPowerState2 = 0x600,
-#define NVREG_POWERSTATE2_POWERUP_MASK		0x0F11
+#define NVREG_POWERSTATE2_POWERUP_MASK		0x0F15
 #define NVREG_POWERSTATE2_POWERUP_REV_A3	0x0001
 #define NVREG_POWERSTATE2_PHY_RESET		0x0004
 };
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 9d46182..cb51c1fb 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -664,23 +664,6 @@
 	return NETDEV_TX_OK;
 }
 
-static int fs_request_irq(struct net_device *dev, int irq, const char *name,
-		irq_handler_t irqf)
-{
-	struct fs_enet_private *fep = netdev_priv(dev);
-
-	(*fep->ops->pre_request_irq)(dev, irq);
-	return request_irq(irq, irqf, IRQF_SHARED, name, dev);
-}
-
-static void fs_free_irq(struct net_device *dev, int irq)
-{
-	struct fs_enet_private *fep = netdev_priv(dev);
-
-	free_irq(irq, dev);
-	(*fep->ops->post_free_irq)(dev, irq);
-}
-
 static void fs_timeout(struct net_device *dev)
 {
 	struct fs_enet_private *fep = netdev_priv(dev);
@@ -800,7 +783,8 @@
 		napi_enable(&fep->napi);
 
 	/* Install our interrupt handler. */
-	r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
+	r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
+			"fs_enet-mac", dev);
 	if (r != 0) {
 		printk(KERN_ERR DRV_MODULE_NAME
 		       ": %s Could not allocate FS_ENET IRQ!", dev->name);
@@ -842,7 +826,7 @@
 	/* release any irqs */
 	phy_disconnect(fep->phydev);
 	fep->phydev = NULL;
-	fs_free_irq(dev, fep->interrupt);
+	free_irq(fep->interrupt, dev);
 
 	return 0;
 }
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index db46d2e..85a4bab 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -34,8 +34,6 @@
 	void (*adjust_link)(struct net_device *dev);
 	void (*restart)(struct net_device *dev);
 	void (*stop)(struct net_device *dev);
-	void (*pre_request_irq)(struct net_device *dev, int irq);
-	void (*post_free_irq)(struct net_device *dev, int irq);
 	void (*napi_clear_rx_event)(struct net_device *dev);
 	void (*napi_enable_rx)(struct net_device *dev);
 	void (*napi_disable_rx)(struct net_device *dev);
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 1c7ef81..22e5a84 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -421,16 +421,6 @@
 	fs_cleanup_bds(dev);
 }
 
-static void pre_request_irq(struct net_device *dev, int irq)
-{
-	/* nothing */
-}
-
-static void post_free_irq(struct net_device *dev, int irq)
-{
-	/* nothing */
-}
-
 static void napi_clear_rx_event(struct net_device *dev)
 {
 	struct fs_enet_private *fep = netdev_priv(dev);
@@ -540,8 +530,6 @@
 	.set_multicast_list	= set_multicast_list,
 	.restart		= restart,
 	.stop			= stop,
-	.pre_request_irq	= pre_request_irq,
-	.post_free_irq		= post_free_irq,
 	.napi_clear_rx_event	= napi_clear_rx_event,
 	.napi_enable_rx		= napi_enable_rx,
 	.napi_disable_rx	= napi_disable_rx,
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 0a7d1c5..14e5753 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -313,11 +313,7 @@
 	 * Clear any outstanding interrupt.
 	 */
 	FW(fecp, ievent, 0xffc0);
-#ifndef CONFIG_PPC_MERGE
-	FW(fecp, ivec, (fep->interrupt / 2) << 29);
-#else
 	FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
-#endif
 
 	/*
 	 * adjust to speed (only for DUET & RMII)
@@ -413,30 +409,6 @@
 	}
 }
 
-static void pre_request_irq(struct net_device *dev, int irq)
-{
-#ifndef CONFIG_PPC_MERGE
-	immap_t *immap = fs_enet_immap;
-	u32 siel;
-
-	/* SIU interrupt */
-	if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
-
-		siel = in_be32(&immap->im_siu_conf.sc_siel);
-		if ((irq & 1) == 0)
-			siel |= (0x80000000 >> irq);
-		else
-			siel &= ~(0x80000000 >> (irq & ~1));
-		out_be32(&immap->im_siu_conf.sc_siel, siel);
-	}
-#endif
-}
-
-static void post_free_irq(struct net_device *dev, int irq)
-{
-	/* nothing */
-}
-
 static void napi_clear_rx_event(struct net_device *dev)
 {
 	struct fs_enet_private *fep = netdev_priv(dev);
@@ -529,8 +501,6 @@
 	.set_multicast_list	= set_multicast_list,
 	.restart		= restart,
 	.stop			= stop,
-	.pre_request_irq	= pre_request_irq,
-	.post_free_irq		= post_free_irq,
 	.napi_clear_rx_event	= napi_clear_rx_event,
 	.napi_enable_rx		= napi_enable_rx,
 	.napi_disable_rx	= napi_disable_rx,
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 22f50dd..008cdd9 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -377,30 +377,6 @@
 	fs_cleanup_bds(dev);
 }
 
-static void pre_request_irq(struct net_device *dev, int irq)
-{
-#ifndef CONFIG_PPC_MERGE
-	immap_t *immap = fs_enet_immap;
-	u32 siel;
-
-	/* SIU interrupt */
-	if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
-
-		siel = in_be32(&immap->im_siu_conf.sc_siel);
-		if ((irq & 1) == 0)
-			siel |= (0x80000000 >> irq);
-		else
-			siel &= ~(0x80000000 >> (irq & ~1));
-		out_be32(&immap->im_siu_conf.sc_siel, siel);
-	}
-#endif
-}
-
-static void post_free_irq(struct net_device *dev, int irq)
-{
-	/* nothing */
-}
-
 static void napi_clear_rx_event(struct net_device *dev)
 {
 	struct fs_enet_private *fep = netdev_priv(dev);
@@ -494,8 +470,6 @@
 	.set_multicast_list	= set_multicast_list,
 	.restart		= restart,
 	.stop			= stop,
-	.pre_request_irq	= pre_request_irq,
-	.post_free_irq		= post_free_irq,
 	.napi_clear_rx_event	= napi_clear_rx_event,
 	.napi_enable_rx		= napi_enable_rx,
 	.napi_disable_rx	= napi_disable_rx,
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index ebcfb27..678f48c 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -136,12 +136,12 @@
 
 	/* Wait until the bus is free */
 	while ((gfar_read(&regs->miimind) & MIIMIND_BUSY) &&
-			timeout--)
+			--timeout)
 		cpu_relax();
 
 	mutex_unlock(&bus->mdio_lock);
 
-	if(timeout <= 0) {
+	if(timeout == 0) {
 		printk(KERN_ERR "%s: The MII Bus is stuck!\n",
 				bus->name);
 		return -EBUSY;
@@ -211,19 +211,21 @@
 	gfar_write(&enet_regs->tbipa, 0);
 	for (i = PHY_MAX_ADDR; i > 0; i--) {
 		u32 phy_id;
-		int r;
 
-		r = get_phy_id(new_bus, i, &phy_id);
-		if (r)
-			return r;
+		err = get_phy_id(new_bus, i, &phy_id);
+		if (err)
+			goto bus_register_fail;
 
 		if (phy_id == 0xffffffff)
 			break;
 	}
 
 	/* The bus is full.  We don't support using 31 PHYs, sorry */
-	if (i == 0)
-		return -EBUSY;
+	if (i == 0) {
+		err = -EBUSY;
+
+		goto bus_register_fail;
+	}
 
 	gfar_write(&enet_regs->tbipa, i);
 
diff --git a/drivers/net/ibm_newemac/Kconfig b/drivers/net/ibm_newemac/Kconfig
index 70a3272..bcec732 100644
--- a/drivers/net/ibm_newemac/Kconfig
+++ b/drivers/net/ibm_newemac/Kconfig
@@ -1,6 +1,6 @@
 config IBM_NEW_EMAC
 	tristate "IBM EMAC Ethernet support"
-	depends on PPC_DCR && PPC_MERGE
+	depends on PPC_DCR
 	select CRC32
 	help
 	  This driver supports the IBM EMAC family of Ethernet controllers
diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h
index eaa7262..717dc38 100644
--- a/drivers/net/ibm_newemac/mal.h
+++ b/drivers/net/ibm_newemac/mal.h
@@ -102,7 +102,7 @@
 /* MAL V1 IER bits */
 #define   MAL1_IER_NWE		0x00000008
 #define   MAL1_IER_SOC_EVENTS	MAL1_IER_NWE
-#define   MAL1_IER_EVENTS	(MAL1_IER_SOC_EVENTS | MAL_IER_OTE | \
+#define   MAL1_IER_EVENTS	(MAL1_IER_SOC_EVENTS | MAL_IER_DE | \
 				 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
 
 /* MAL V2 IER bits */
@@ -110,7 +110,7 @@
 #define   MAL2_IER_PRE		0x00000040
 #define   MAL2_IER_PWE		0x00000020
 #define   MAL2_IER_SOC_EVENTS	(MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE)
-#define   MAL2_IER_EVENTS	(MAL2_IER_SOC_EVENTS | MAL_IER_OTE | \
+#define   MAL2_IER_EVENTS	(MAL2_IER_SOC_EVENTS | MAL_IER_DE | \
 				 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
 
 
diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c
index 37bfeea..9164abb 100644
--- a/drivers/net/ibm_newemac/phy.c
+++ b/drivers/net/ibm_newemac/phy.c
@@ -321,7 +321,7 @@
 
 static int m88e1111_init(struct mii_phy *phy)
 {
-	pr_debug("%s: Marvell 88E1111 Ethernet\n", __FUNCTION__);
+	pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__);
 	phy_write(phy, 0x14, 0x0ce3);
 	phy_write(phy, 0x18, 0x4101);
 	phy_write(phy, 0x09, 0x0e00);
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 634c4c9..93d02ef 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -3563,10 +3563,6 @@
 	struct net_device *netdev = adapter->netdev;
 	int work_done = 0;
 
-	/* Keep link state information with original netdev */
-	if (!netif_carrier_ok(netdev))
-		goto quit_polling;
-
 #ifdef CONFIG_DCA
 	if (adapter->flags & IGB_FLAG_DCA_ENABLED)
 		igb_update_rx_dca(rx_ring);
@@ -3576,7 +3572,6 @@
 
 	/* If not enough Rx work done, exit the polling mode */
 	if ((work_done == 0) || !netif_running(netdev)) {
-quit_polling:
 		netif_rx_complete(netdev, napi);
 
 		if (adapter->itr_setting & 3) {
@@ -3617,16 +3612,14 @@
 	unsigned int i;
 	u32 head, oldhead;
 	unsigned int count = 0;
-	bool cleaned = false;
-	bool retval = true;
 	unsigned int total_bytes = 0, total_packets = 0;
+	bool retval = true;
 
 	rmb();
 	head = get_head(tx_ring);
 	i = tx_ring->next_to_clean;
 	while (1) {
 		while (i != head) {
-			cleaned = true;
 			tx_desc = E1000_TX_DESC(*tx_ring, i);
 			buffer_info = &tx_ring->buffer_info[i];
 			skb = buffer_info->skb;
@@ -3643,7 +3636,6 @@
 			}
 
 			igb_unmap_and_free_tx_resource(adapter, buffer_info);
-			tx_desc->upper.data = 0;
 
 			i++;
 			if (i == tx_ring->count)
@@ -3665,7 +3657,7 @@
 done_cleaning:
 	tx_ring->next_to_clean = i;
 
-	if (unlikely(cleaned &&
+	if (unlikely(count &&
 		     netif_carrier_ok(netdev) &&
 		     IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
 		/* Make sure that anybody stopping the queue after this
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 18f4b3a..9c926d2 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -165,7 +165,7 @@
 	unsigned iobase = pci_resource_start(pdev, 0);
 	unsigned i;
 
-	seq_printf(seq, "\n%s (vid/did: %04x/%04x)\n",
+	seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n",
 		   pci_name(pdev), (int)pdev->vendor, (int)pdev->device);
 	seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state);
 	seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 804698f..d85717e 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -85,7 +85,7 @@
 #define DPRINTK(nlevel, klevel, fmt, args...) \
 	(void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
 	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
-		__FUNCTION__ , ## args))
+		__func__ , ## args))
 
 
 /* TX/RX descriptor defines */
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 956914a..2198b77 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -37,17 +36,15 @@
 #include "ixgbe_type.h"
 #include "ixgbe_common.h"
 
-#ifdef CONFIG_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 #include <linux/dca.h>
 #endif
 
-#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args)
-
 #define PFX "ixgbe: "
 #define DPRINTK(nlevel, klevel, fmt, args...) \
 	((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
 	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
-		__FUNCTION__ , ## args)))
+		__func__ , ## args)))
 
 /* TX/RX descriptor defines */
 #define IXGBE_DEFAULT_TXD		   1024
@@ -58,23 +55,14 @@
 #define IXGBE_MAX_RXD			   4096
 #define IXGBE_MIN_RXD			     64
 
-#define IXGBE_DEFAULT_RXQ			   1
-#define IXGBE_MAX_RXQ				   1
-#define IXGBE_MIN_RXQ				   1
-
-#define IXGBE_DEFAULT_ITR_RX_USECS	    125  /*   8k irqs/sec */
-#define IXGBE_DEFAULT_ITR_TX_USECS	    250  /*   4k irqs/sec */
-#define IXGBE_MIN_ITR_USECS		    100  /* 500k irqs/sec */
-#define IXGBE_MAX_ITR_USECS		  10000  /* 100  irqs/sec */
-
 /* flow control */
 #define IXGBE_DEFAULT_FCRTL		0x10000
-#define IXGBE_MIN_FCRTL			      0
+#define IXGBE_MIN_FCRTL			   0x40
 #define IXGBE_MAX_FCRTL			0x7FF80
 #define IXGBE_DEFAULT_FCRTH		0x20000
-#define IXGBE_MIN_FCRTH			      0
+#define IXGBE_MIN_FCRTH			  0x600
 #define IXGBE_MAX_FCRTH			0x7FFF0
-#define IXGBE_DEFAULT_FCPAUSE		 0x6800  /* may be too long */
+#define IXGBE_DEFAULT_FCPAUSE		 0xFFFF
 #define IXGBE_MIN_FCPAUSE		      0
 #define IXGBE_MAX_FCPAUSE		 0xFFFF
 
@@ -88,9 +76,6 @@
 
 #define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
 
-/* How many Tx Descriptors do we need to call netif_wake_queue? */
-#define IXGBE_TX_QUEUE_WAKE 16
-
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define IXGBE_RX_BUFFER_WRITE	16	/* Must be power of 2 */
 
@@ -119,6 +104,7 @@
 	dma_addr_t dma;
 	struct page *page;
 	dma_addr_t page_dma;
+	unsigned int page_offset;
 };
 
 struct ixgbe_queue_stats {
@@ -150,22 +136,20 @@
 		      * offset associated with this ring, which is different
 		      * for DCE and RSS modes */
 
-#ifdef CONFIG_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 	/* cpu for tx queue */
 	int cpu;
 #endif
 	struct net_lro_mgr lro_mgr;
 	bool lro_used;
 	struct ixgbe_queue_stats stats;
-	u8 v_idx; /* maps directly to the index for this ring in the hardware
-		   * vector array, can also be used for finding the bit in EICR
-		   * and friends that represents the vector for this ring */
+	u16 v_idx; /* maps directly to the index for this ring in the hardware
+	           * vector array, can also be used for finding the bit in EICR
+	           * and friends that represents the vector for this ring */
 
-	u32 eims_value;
-	u16 itr_register;
 
-	char name[IFNAMSIZ + 5];
 	u16 work_limit;                /* max work per interrupt */
+	u16 rx_buf_len;
 };
 
 #define RING_F_VMDQ 1
@@ -190,8 +174,8 @@
 	DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
 	u8 rxr_count;     /* Rx ring count assigned to this vector */
 	u8 txr_count;     /* Tx ring count assigned to this vector */
-	u8 tx_eitr;
-	u8 rx_eitr;
+	u8 tx_itr;
+	u8 rx_itr;
 	u32 eitr;
 };
 
@@ -228,7 +212,6 @@
 	struct timer_list watchdog_timer;
 	struct vlan_group *vlgrp;
 	u16 bd_number;
-	u16 rx_buf_len;
 	struct work_struct reset_task;
 	struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
 	char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
@@ -240,7 +223,9 @@
 
 	/* TX */
 	struct ixgbe_ring *tx_ring;	/* One per active queue */
+	int num_tx_queues;
 	u64 restart_queue;
+	u64 hw_csum_tx_good;
 	u64 lsc_int;
 	u64 hw_tso_ctxt;
 	u64 hw_tso6_ctxt;
@@ -249,12 +234,10 @@
 
 	/* RX */
 	struct ixgbe_ring *rx_ring;	/* One per active queue */
-	u64 hw_csum_tx_good;
+	int num_rx_queues;
 	u64 hw_csum_rx_error;
 	u64 hw_csum_rx_good;
 	u64 non_eop_descs;
-	int num_tx_queues;
-	int num_rx_queues;
 	int num_msix_vectors;
 	struct ixgbe_ring_feature ring_feature[3];
 	struct msix_entry *msix_entries;
@@ -267,15 +250,28 @@
 	 * thus the additional *_CAPABLE flags.
 	 */
 	u32 flags;
-#define IXGBE_FLAG_RX_CSUM_ENABLED              (u32)(1 << 0)
-#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 1)
-#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 2)
-#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 3)
-#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 4)
-#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 5)
-#define IXGBE_FLAG_RSS_ENABLED                  (u32)(1 << 6)
-#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 7)
-#define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 8)
+#define IXGBE_FLAG_RX_CSUM_ENABLED              (u32)(1)
+#define IXGBE_FLAG_MSI_CAPABLE                  (u32)(1 << 1)
+#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 2)
+#define IXGBE_FLAG_MSIX_CAPABLE                 (u32)(1 << 3)
+#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 4)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 6)
+#define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 7)
+#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 8)
+#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 9)
+#define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 10)
+#define IXGBE_FLAG_DCA_CAPABLE                  (u32)(1 << 11)
+#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 12)
+#define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 13)
+#define IXGBE_FLAG_RSS_ENABLED                  (u32)(1 << 16)
+#define IXGBE_FLAG_RSS_CAPABLE                  (u32)(1 << 17)
+#define IXGBE_FLAG_VMDQ_CAPABLE                 (u32)(1 << 18)
+#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 19)
+#define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 22)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK             (u32)(1 << 23)
+
+/* default to trying for four seconds */
+#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
 
 	/* OS defined structs */
 	struct net_device *netdev;
@@ -288,14 +284,21 @@
 	struct ixgbe_hw_stats stats;
 
 	/* Interrupt Throttle Rate */
-	u32 rx_eitr;
-	u32 tx_eitr;
+	u32 eitr_param;
 
 	unsigned long state;
 	u64 tx_busy;
 	u64 lro_aggregated;
 	u64 lro_flushed;
 	u64 lro_no_desc;
+	unsigned int tx_ring_count;
+	unsigned int rx_ring_count;
+
+	u32 link_speed;
+	bool link_up;
+	unsigned long link_check_timeout;
+
+	struct work_struct watchdog_task;
 };
 
 enum ixbge_state_t {
@@ -317,11 +320,11 @@
 extern void ixgbe_down(struct ixgbe_adapter *adapter);
 extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
 extern void ixgbe_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
-				    struct ixgbe_ring *rxdr);
-extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
-				    struct ixgbe_ring *txdr);
+extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
+extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
+extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
+extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
+extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 
 #endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index f96358b..7cddcfb 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -36,67 +35,62 @@
 #define IXGBE_82598_MAX_TX_QUEUES 32
 #define IXGBE_82598_MAX_RX_QUEUES 64
 #define IXGBE_82598_RAR_ENTRIES   16
+#define IXGBE_82598_MC_TBL_SIZE  128
+#define IXGBE_82598_VFT_TBL_SIZE 128
 
-static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw);
-static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed,
-					 bool *autoneg);
-static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
-						u32 *speed, bool *autoneg);
-static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
-static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
-static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
-				      bool *link_up);
-static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
-					    bool autoneg,
-					    bool autoneg_wait_to_complete);
+static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
+                                             ixgbe_link_speed *speed,
+                                             bool *autoneg);
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
-static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
-					       bool autoneg,
-					       bool autoneg_wait_to_complete);
-static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
+                                               ixgbe_link_speed speed,
+                                               bool autoneg,
+                                               bool autoneg_wait_to_complete);
 
-
+/**
+ */
 static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
 {
-	hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
-	hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
-	hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES;
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
 
-	/* PHY ops are filled in by default properly for Fiber only */
-	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
-		hw->mac.ops.setup_link = &ixgbe_setup_copper_link_82598;
-		hw->mac.ops.setup_link_speed = &ixgbe_setup_copper_link_speed_82598;
-		hw->mac.ops.get_link_settings =
-				&ixgbe_get_copper_link_settings_82598;
+	/* Call PHY identify routine to get the phy type */
+	ixgbe_identify_phy_generic(hw);
 
-		/* Call PHY identify routine to get the phy type */
-		ixgbe_identify_phy(hw);
-
-		switch (hw->phy.type) {
-		case ixgbe_phy_tn:
-			hw->phy.ops.setup_link = &ixgbe_setup_tnx_phy_link;
-			hw->phy.ops.check_link = &ixgbe_check_tnx_phy_link;
-			hw->phy.ops.setup_link_speed =
-					&ixgbe_setup_tnx_phy_link_speed;
-			break;
-		default:
-			break;
-		}
+	/* PHY Init */
+	switch (phy->type) {
+	default:
+		break;
 	}
 
+	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+		mac->ops.setup_link_speed =
+		                     &ixgbe_setup_copper_link_speed_82598;
+		mac->ops.get_link_capabilities =
+		                     &ixgbe_get_copper_link_capabilities_82598;
+	}
+
+	mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
+	mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
+	mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
+	mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
+	mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
+
 	return 0;
 }
 
 /**
- *  ixgbe_get_link_settings_82598 - Determines default link settings
+ *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
  *  @hw: pointer to hardware structure
  *  @speed: pointer to link speed
  *  @autoneg: boolean auto-negotiation value
  *
- *  Determines the default link settings by reading the AUTOC register.
+ *  Determines the link capabilities by reading the AUTOC register.
  **/
-static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed,
-					 bool *autoneg)
+static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+                                             ixgbe_link_speed *speed,
+                                             bool *autoneg)
 {
 	s32 status = 0;
 	s32 autoc_reg;
@@ -145,15 +139,16 @@
 }
 
 /**
- *  ixgbe_get_copper_link_settings_82598 - Determines default link settings
+ *  ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
  *  @hw: pointer to hardware structure
  *  @speed: pointer to link speed
  *  @autoneg: boolean auto-negotiation value
  *
- *  Determines the default link settings by reading the AUTOC register.
+ *  Determines the link capabilities by reading the AUTOC register.
  **/
-static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
-						u32 *speed, bool *autoneg)
+s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
+                                             ixgbe_link_speed *speed,
+                                             bool *autoneg)
 {
 	s32 status = IXGBE_ERR_LINK_SETUP;
 	u16 speed_ability;
@@ -161,9 +156,9 @@
 	*speed = 0;
 	*autoneg = true;
 
-	status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
-				    IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-				    &speed_ability);
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
+	                              IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+	                              &speed_ability);
 
 	if (status == 0) {
 		if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
@@ -191,11 +186,9 @@
 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
 	case IXGBE_DEV_ID_82598EB_CX4:
 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+	case IXGBE_DEV_ID_82598EB_XF_LR:
 		media_type = ixgbe_media_type_fiber;
 		break;
-	case IXGBE_DEV_ID_82598AT_DUAL_PORT:
-		media_type = ixgbe_media_type_copper;
-		break;
 	default:
 		media_type = ixgbe_media_type_unknown;
 		break;
@@ -205,6 +198,122 @@
 }
 
 /**
+ *  ixgbe_setup_fc_82598 - Configure flow control settings
+ *  @hw: pointer to hardware structure
+ *  @packetbuf_num: packet buffer number (0-7)
+ *
+ *  Configures the flow control settings based on SW configuration.  This
+ *  function is used for 802.3x flow control configuration only.
+ **/
+s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+	u32 frctl_reg;
+	u32 rmcs_reg;
+
+	if (packetbuf_num < 0 || packetbuf_num > 7) {
+		hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
+		          " 0-7\n", packetbuf_num);
+	}
+
+	frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+
+	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
+
+	/*
+	 * 10 gig parts do not have a word in the EEPROM to determine the
+	 * default flow control setting, so we explicitly set it to full.
+	 */
+	if (hw->fc.type == ixgbe_fc_default)
+		hw->fc.type = ixgbe_fc_full;
+
+	/*
+	 * We want to save off the original Flow Control configuration just in
+	 * case we get disconnected and then reconnected into a different hub
+	 * or switch with different Flow Control capabilities.
+	 */
+	hw->fc.original_type = hw->fc.type;
+
+	/*
+	 * The possible values of the "flow_control" parameter are:
+	 * 0: Flow control is completely disabled
+	 * 1: Rx flow control is enabled (we can receive pause frames but not
+	 *    send pause frames).
+	 * 2: Tx flow control is enabled (we can send pause frames but we do not
+	 *    support receiving pause frames)
+	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
+	 * other: Invalid.
+	 */
+	switch (hw->fc.type) {
+	case ixgbe_fc_none:
+		break;
+	case ixgbe_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled,
+		 * and Tx Flow control is disabled.
+		 */
+		frctl_reg |= IXGBE_FCTRL_RFCE;
+		break;
+	case ixgbe_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is disabled,
+		 * by a software over-ride.
+		 */
+		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+		break;
+	case ixgbe_fc_full:
+		/*
+		 * Flow control (both Rx and Tx) is enabled by a software
+		 * over-ride.
+		 */
+		frctl_reg |= IXGBE_FCTRL_RFCE;
+		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+		break;
+	default:
+		/* We should never get here.  The value should be 0-3. */
+		hw_dbg(hw, "Flow control param set incorrectly\n");
+		break;
+	}
+
+	/* Enable 802.3x based flow control settings. */
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
+	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
+
+	/*
+	 * Check for invalid software configuration, zeros are completely
+	 * invalid for all parameters used past this point, and if we enable
+	 * flow control with zero water marks, we blast flow control packets.
+	 */
+	if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
+		hw_dbg(hw, "Flow control structure initialized incorrectly\n");
+		return IXGBE_ERR_INVALID_LINK_SETTINGS;
+	}
+
+	/*
+	 * We need to set up the Receive Threshold high and low water
+	 * marks as well as (optionally) enabling the transmission of
+	 * XON frames.
+	 */
+	if (hw->fc.type & ixgbe_fc_tx_pause) {
+		if (hw->fc.send_xon) {
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
+			                (hw->fc.low_water | IXGBE_FCRTL_XONE));
+		} else {
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
+			                hw->fc.low_water);
+		}
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
+		                (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+
+	return 0;
+}
+
+/**
  *  ixgbe_setup_mac_link_82598 - Configures MAC link settings
  *  @hw: pointer to hardware structure
  *
@@ -248,8 +357,7 @@
 			}
 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
-				hw_dbg(hw,
-				       "Autonegotiation did not complete.\n");
+				hw_dbg(hw, "Autonegotiation did not complete.\n");
 			}
 		}
 	}
@@ -259,8 +367,8 @@
 	 * case we get disconnected and then reconnected into a different hub
 	 * or switch with different Flow Control capabilities.
 	 */
-	hw->fc.type = hw->fc.original_type;
-	ixgbe_setup_fc(hw, 0);
+	hw->fc.original_type = hw->fc.type;
+	ixgbe_setup_fc_82598(hw, 0);
 
 	/* Add delay to filter out noises during initial link setup */
 	msleep(50);
@@ -273,20 +381,35 @@
  *  @hw: pointer to hardware structure
  *  @speed: pointer to link speed
  *  @link_up: true is link is up, false otherwise
+ *  @link_up_wait_to_complete: bool used to wait for link up or not
  *
  *  Reads the links register to determine if link is up and the current speed
  **/
-static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
-				      bool *link_up)
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+                                      ixgbe_link_speed *speed, bool *link_up,
+                                      bool link_up_wait_to_complete)
 {
 	u32 links_reg;
+	u32 i;
 
 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
-
-	if (links_reg & IXGBE_LINKS_UP)
-		*link_up = true;
-	else
-		*link_up = false;
+	if (link_up_wait_to_complete) {
+		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+			if (links_reg & IXGBE_LINKS_UP) {
+				*link_up = true;
+				break;
+			} else {
+				*link_up = false;
+			}
+			msleep(100);
+			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+		}
+	} else {
+		if (links_reg & IXGBE_LINKS_UP)
+			*link_up = true;
+		else
+			*link_up = false;
+	}
 
 	if (links_reg & IXGBE_LINKS_SPEED)
 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
@@ -296,6 +419,7 @@
 	return 0;
 }
 
+
 /**
  *  ixgbe_setup_mac_link_speed_82598 - Set MAC link speed
  *  @hw: pointer to hardware structure
@@ -306,18 +430,18 @@
  *  Set the link speed in the AUTOC register and restarts link.
  **/
 static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
-					    u32 speed, bool autoneg,
-					    bool autoneg_wait_to_complete)
+                                            ixgbe_link_speed speed, bool autoneg,
+                                            bool autoneg_wait_to_complete)
 {
 	s32 status = 0;
 
 	/* If speed is 10G, then check for CX4 or XAUI. */
 	if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
-	    (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4)))
+	    (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) {
 		hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
-	else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg))
+	} else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) {
 		hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
-	else if (autoneg) {
+	} else if (autoneg) {
 		/* BX mode - Autonegotiate 1G */
 		if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD))
 			hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN;
@@ -336,7 +460,7 @@
 		 * ixgbe_hw This will write the AUTOC register based on the new
 		 * stored values
 		 */
-		hw->mac.ops.setup_link(hw);
+		ixgbe_setup_mac_link_82598(hw);
 	}
 
 	return status;
@@ -354,18 +478,17 @@
  **/
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
 {
-	s32 status = 0;
+	s32 status;
 
 	/* Restart autonegotiation on PHY */
-	if (hw->phy.ops.setup_link)
-		status = hw->phy.ops.setup_link(hw);
+	status = hw->phy.ops.setup_link(hw);
 
-	/* Set MAC to KX/KX4 autoneg, which defaultis to Parallel detection */
+	/* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
 	hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
 	hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
 
 	/* Set up MAC */
-	hw->mac.ops.setup_link(hw);
+	ixgbe_setup_mac_link_82598(hw);
 
 	return status;
 }
@@ -379,23 +502,23 @@
  *
  *  Sets the link speed in the AUTOC register in the MAC and restarts link.
  **/
-static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
-					       bool autoneg,
-					       bool autoneg_wait_to_complete)
+static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
+                                               ixgbe_link_speed speed,
+                                               bool autoneg,
+                                               bool autoneg_wait_to_complete)
 {
-	s32 status = 0;
+	s32 status;
 
 	/* Setup the PHY according to input speed */
-	if (hw->phy.ops.setup_link_speed)
-		status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
-						autoneg_wait_to_complete);
+	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+	                                      autoneg_wait_to_complete);
 
 	/* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
 	hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
 	hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
 
 	/* Set up MAC */
-	hw->mac.ops.setup_link(hw);
+	ixgbe_setup_mac_link_82598(hw);
 
 	return status;
 }
@@ -404,7 +527,7 @@
  *  ixgbe_reset_hw_82598 - Performs hardware reset
  *  @hw: pointer to hardware structure
  *
- *  Resets the hardware by reseting the transmit and receive units, masks and
+ *  Resets the hardware by resetting the transmit and receive units, masks and
  *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
  *  reset.
  **/
@@ -418,35 +541,44 @@
 	u8  analog_val;
 
 	/* Call adapter stop to disable tx/rx and clear interrupts */
-	ixgbe_stop_adapter(hw);
+	hw->mac.ops.stop_adapter(hw);
 
 	/*
-	 * Power up the Atlas TX lanes if they are currently powered down.
-	 * Atlas TX lanes are powered down for MAC loopback tests, but
+	 * Power up the Atlas Tx lanes if they are currently powered down.
+	 * Atlas Tx lanes are powered down for MAC loopback tests, but
 	 * they are not automatically restored on reset.
 	 */
-	ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
 	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
-		/* Enable TX Atlas so packets can be transmitted again */
-		ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+		/* Enable Tx Atlas so packets can be transmitted again */
+		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+		                             &analog_val);
 		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
-		ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val);
+		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+		                              analog_val);
 
-		ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val);
+		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+		                             &analog_val);
 		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
-		ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val);
+		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+		                              analog_val);
 
-		ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val);
+		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+		                             &analog_val);
 		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
-		ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val);
+		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+		                              analog_val);
 
-		ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val);
+		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+		                             &analog_val);
 		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
-		ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val);
+		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+		                              analog_val);
 	}
 
 	/* Reset PHY */
-	ixgbe_reset_phy(hw);
+	if (hw->phy.reset_disable == false)
+		hw->phy.ops.reset(hw);
 
 	/*
 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
@@ -499,29 +631,311 @@
 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
 	} else {
 		hw->mac.link_attach_type =
-					 (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
+		                         (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
 		hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK);
 		hw->mac.link_settings_loaded = true;
 	}
 
 	/* Store the permanent mac address */
-	ixgbe_get_mac_addr(hw, hw->mac.perm_addr);
+	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
 
 	return status;
 }
 
+/**
+ *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to associate with a VMDq index
+ *  @vmdq: VMDq set index
+ **/
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+	u32 rar_high;
+
+	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+	rar_high &= ~IXGBE_RAH_VIND_MASK;
+	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
+	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+	return 0;
+}
+
+/**
+ *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to associate with a VMDq index
+ *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
+ **/
+static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+	u32 rar_high;
+	u32 rar_entries = hw->mac.num_rar_entries;
+
+	if (rar < rar_entries) {
+		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+		if (rar_high & IXGBE_RAH_VIND_MASK) {
+			rar_high &= ~IXGBE_RAH_VIND_MASK;
+			IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+		}
+	} else {
+		hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+	}
+
+	return 0;
+}
+
+/**
+ *  ixgbe_set_vfta_82598 - Set VLAN filter table
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFTA
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+                         bool vlan_on)
+{
+	u32 regindex;
+	u32 bitindex;
+	u32 bits;
+	u32 vftabyte;
+
+	if (vlan > 4095)
+		return IXGBE_ERR_PARAM;
+
+	/* Determine 32-bit word position in array */
+	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
+
+	/* Determine the location of the (VMD) queue index */
+	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
+	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
+
+	/* Set the nibble for VMD queue index */
+	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
+	bits &= (~(0x0F << bitindex));
+	bits |= (vind << bitindex);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
+
+	/* Determine the location of the bit for this VLAN id */
+	bitindex = vlan & 0x1F;   /* lower five bits */
+
+	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+	if (vlan_on)
+		/* Turn on this VLAN id */
+		bits |= (1 << bitindex);
+	else
+		/* Turn off this VLAN id */
+		bits &= ~(1 << bitindex);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+{
+	u32 offset;
+	u32 vlanbyte;
+
+	for (offset = 0; offset < hw->mac.vft_size; offset++)
+		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
+		for (offset = 0; offset < hw->mac.vft_size; offset++)
+			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
+			                0);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_blink_led_start_82598 - Blink LED based on index.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to blink
+ **/
+static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
+{
+	ixgbe_link_speed speed = 0;
+	bool link_up = 0;
+	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+	/*
+	 * Link must be up to auto-blink the LEDs on the 82598EB MAC;
+	 * force it if link is down.
+	 */
+	hw->mac.ops.check_link(hw, &speed, &link_up, false);
+
+	if (!link_up) {
+		autoc_reg |= IXGBE_AUTOC_FLU;
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+		msleep(10);
+	}
+
+	led_reg &= ~IXGBE_LED_MODE_MASK(index);
+	led_reg |= IXGBE_LED_BLINK(index);
+	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_blink_led_stop_82598 - Stop blinking LED based on index.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to stop blinking
+ **/
+static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
+{
+	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+	autoc_reg &= ~IXGBE_AUTOC_FLU;
+	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+	led_reg &= ~IXGBE_LED_MODE_MASK(index);
+	led_reg &= ~IXGBE_LED_BLINK(index);
+	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to read
+ *  @val: read value
+ *
+ *  Performs read operation to Atlas analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+	u32  atlas_ctl;
+
+	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
+	                IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+	IXGBE_WRITE_FLUSH(hw);
+	udelay(10);
+	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+	*val = (u8)atlas_ctl;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: atlas register to write
+ *  @val: value to write
+ *
+ *  Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+	u32  atlas_ctl;
+
+	atlas_ctl = (reg << 8) | val;
+	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
+	IXGBE_WRITE_FLUSH(hw);
+	udelay(10);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+{
+	s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+
+	switch (hw->device_id) {
+	case IXGBE_DEV_ID_82598EB_CX4:
+	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+		break;
+	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+		break;
+	case IXGBE_DEV_ID_82598EB_XF_LR:
+		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+		break;
+
+	default:
+		physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+		break;
+	}
+
+	return physical_layer;
+}
+
 static struct ixgbe_mac_operations mac_ops_82598 = {
-	.reset			= &ixgbe_reset_hw_82598,
+	.init_hw		= &ixgbe_init_hw_generic,
+	.reset_hw		= &ixgbe_reset_hw_82598,
+	.start_hw		= &ixgbe_start_hw_generic,
+	.clear_hw_cntrs		= &ixgbe_clear_hw_cntrs_generic,
 	.get_media_type		= &ixgbe_get_media_type_82598,
+	.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
+	.get_mac_addr		= &ixgbe_get_mac_addr_generic,
+	.stop_adapter		= &ixgbe_stop_adapter_generic,
+	.read_analog_reg8	= &ixgbe_read_analog_reg8_82598,
+	.write_analog_reg8	= &ixgbe_write_analog_reg8_82598,
 	.setup_link		= &ixgbe_setup_mac_link_82598,
-	.check_link		= &ixgbe_check_mac_link_82598,
 	.setup_link_speed	= &ixgbe_setup_mac_link_speed_82598,
-	.get_link_settings	= &ixgbe_get_link_settings_82598,
+	.check_link		= &ixgbe_check_mac_link_82598,
+	.get_link_capabilities	= &ixgbe_get_link_capabilities_82598,
+	.led_on			= &ixgbe_led_on_generic,
+	.led_off		= &ixgbe_led_off_generic,
+	.blink_led_start	= &ixgbe_blink_led_start_82598,
+	.blink_led_stop		= &ixgbe_blink_led_stop_82598,
+	.set_rar		= &ixgbe_set_rar_generic,
+	.clear_rar		= &ixgbe_clear_rar_generic,
+	.set_vmdq		= &ixgbe_set_vmdq_82598,
+	.clear_vmdq		= &ixgbe_clear_vmdq_82598,
+	.init_rx_addrs		= &ixgbe_init_rx_addrs_generic,
+	.update_uc_addr_list	= &ixgbe_update_uc_addr_list_generic,
+	.update_mc_addr_list	= &ixgbe_update_mc_addr_list_generic,
+	.enable_mc		= &ixgbe_enable_mc_generic,
+	.disable_mc		= &ixgbe_disable_mc_generic,
+	.clear_vfta		= &ixgbe_clear_vfta_82598,
+	.set_vfta		= &ixgbe_set_vfta_82598,
+	.setup_fc		= &ixgbe_setup_fc_82598,
+};
+
+static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
+	.init_params		= &ixgbe_init_eeprom_params_generic,
+	.read			= &ixgbe_read_eeprom_generic,
+	.validate_checksum	= &ixgbe_validate_eeprom_checksum_generic,
+	.update_checksum	= &ixgbe_update_eeprom_checksum_generic,
+};
+
+static struct ixgbe_phy_operations phy_ops_82598 = {
+	.identify		= &ixgbe_identify_phy_generic,
+	/* .identify_sfp	= &ixgbe_identify_sfp_module_generic, */
+	.reset			= &ixgbe_reset_phy_generic,
+	.read_reg		= &ixgbe_read_phy_reg_generic,
+	.write_reg		= &ixgbe_write_phy_reg_generic,
+	.setup_link		= &ixgbe_setup_phy_link_generic,
+	.setup_link_speed	= &ixgbe_setup_phy_link_speed_generic,
 };
 
 struct ixgbe_info ixgbe_82598_info = {
 	.mac			= ixgbe_mac_82598EB,
 	.get_invariants		= &ixgbe_get_invariants_82598,
 	.mac_ops		= &mac_ops_82598,
+	.eeprom_ops		= &eeprom_ops_82598,
+	.phy_ops		= &phy_ops_82598,
 };
 
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 7fd6aeb..f67c684 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -33,20 +32,28 @@
 #include "ixgbe_common.h"
 #include "ixgbe_phy.h"
 
-static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
-
 static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
+static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
+static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
+static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+                                        u16 count);
+static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
+static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
 static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
 
-static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
-static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
+static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
+static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
 static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
+static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
 
 /**
- *  ixgbe_start_hw - Prepare hardware for TX/RX
+ *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
  *  @hw: pointer to hardware structure
  *
  *  Starts the hardware by filling the bus info structure and media type, clears
@@ -54,7 +61,7 @@
  *  table, VLAN filter table, calls routine to set up link and flow control
  *  settings, and leaves transmit and receive units disabled and uninitialized
  **/
-s32 ixgbe_start_hw(struct ixgbe_hw *hw)
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
 {
 	u32 ctrl_ext;
 
@@ -62,22 +69,22 @@
 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
 
 	/* Identify the PHY */
-	ixgbe_identify_phy(hw);
+	hw->phy.ops.identify(hw);
 
 	/*
 	 * Store MAC address from RAR0, clear receive address registers, and
 	 * clear the multicast table
 	 */
-	ixgbe_init_rx_addrs(hw);
+	hw->mac.ops.init_rx_addrs(hw);
 
 	/* Clear the VLAN filter table */
-	ixgbe_clear_vfta(hw);
+	hw->mac.ops.clear_vfta(hw);
 
 	/* Set up link */
 	hw->mac.ops.setup_link(hw);
 
 	/* Clear statistics registers */
-	ixgbe_clear_hw_cntrs(hw);
+	hw->mac.ops.clear_hw_cntrs(hw);
 
 	/* Set No Snoop Disable */
 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
@@ -92,34 +99,34 @@
 }
 
 /**
- *  ixgbe_init_hw - Generic hardware initialization
+ *  ixgbe_init_hw_generic - Generic hardware initialization
  *  @hw: pointer to hardware structure
  *
- *  Initialize the hardware by reseting the hardware, filling the bus info
+ *  Initialize the hardware by resetting the hardware, filling the bus info
  *  structure and media type, clears all on chip counters, initializes receive
  *  address registers, multicast table, VLAN filter table, calls routine to set
  *  up link and flow control settings, and leaves transmit and receive units
  *  disabled and uninitialized
  **/
-s32 ixgbe_init_hw(struct ixgbe_hw *hw)
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
 {
 	/* Reset the hardware */
-	hw->mac.ops.reset(hw);
+	hw->mac.ops.reset_hw(hw);
 
 	/* Start the HW */
-	ixgbe_start_hw(hw);
+	hw->mac.ops.start_hw(hw);
 
 	return 0;
 }
 
 /**
- *  ixgbe_clear_hw_cntrs - Generic clear hardware counters
+ *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
  *  @hw: pointer to hardware structure
  *
  *  Clears all hardware statistics counters by reading them from the hardware
  *  Statistics counters are clear on read.
  **/
-static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
 {
 	u16 i = 0;
 
@@ -191,7 +198,36 @@
 }
 
 /**
- *  ixgbe_get_mac_addr - Generic get MAC address
+ *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number from the EEPROM
+ *
+ *  Reads the part number from the EEPROM.
+ **/
+s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
+{
+	s32 ret_val;
+	u16 data;
+
+	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+	if (ret_val) {
+		hw_dbg(hw, "NVM Read Error\n");
+		return ret_val;
+	}
+	*pba_num = (u32)(data << 16);
+
+	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
+	if (ret_val) {
+		hw_dbg(hw, "NVM Read Error\n");
+		return ret_val;
+	}
+	*pba_num |= data;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_get_mac_addr_generic - Generic get MAC address
  *  @hw: pointer to hardware structure
  *  @mac_addr: Adapter MAC address
  *
@@ -199,7 +235,7 @@
  *  A reset of the adapter must be performed prior to calling this function
  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
  **/
-s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
 {
 	u32 rar_high;
 	u32 rar_low;
@@ -217,30 +253,8 @@
 	return 0;
 }
 
-s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num)
-{
-	s32 ret_val;
-	u16 data;
-
-	ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM0_PTR, &data);
-	if (ret_val) {
-		hw_dbg(hw, "NVM Read Error\n");
-		return ret_val;
-	}
-	*part_num = (u32)(data << 16);
-
-	ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM1_PTR, &data);
-	if (ret_val) {
-		hw_dbg(hw, "NVM Read Error\n");
-		return ret_val;
-	}
-	*part_num |= data;
-
-	return 0;
-}
-
 /**
- *  ixgbe_stop_adapter - Generic stop TX/RX units
+ *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
  *  @hw: pointer to hardware structure
  *
  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
@@ -248,7 +262,7 @@
  *  the shared code and drivers to determine if the adapter is in a stopped
  *  state and should not touch the hardware.
  **/
-s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
 {
 	u32 number_of_queues;
 	u32 reg_val;
@@ -264,6 +278,7 @@
 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 	reg_val &= ~(IXGBE_RXCTRL_RXEN);
 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
+	IXGBE_WRITE_FLUSH(hw);
 	msleep(2);
 
 	/* Clear interrupt mask to stop from interrupts being generated */
@@ -273,7 +288,7 @@
 	IXGBE_READ_REG(hw, IXGBE_EICR);
 
 	/* Disable the transmit unit.  Each queue must be disabled. */
-	number_of_queues = hw->mac.num_tx_queues;
+	number_of_queues = hw->mac.max_tx_queues;
 	for (i = 0; i < number_of_queues; i++) {
 		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
 		if (reg_val & IXGBE_TXDCTL_ENABLE) {
@@ -282,15 +297,22 @@
 		}
 	}
 
+	/*
+	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+	 * access and verify no pending requests
+	 */
+	if (ixgbe_disable_pcie_master(hw) != 0)
+		hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
+
 	return 0;
 }
 
 /**
- *  ixgbe_led_on - Turns on the software controllable LEDs.
+ *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
  *  @hw: pointer to hardware structure
  *  @index: led number to turn on
  **/
-s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
 {
 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
 
@@ -304,11 +326,11 @@
 }
 
 /**
- *  ixgbe_led_off - Turns off the software controllable LEDs.
+ *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
  *  @hw: pointer to hardware structure
  *  @index: led number to turn off
  **/
-s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
 {
 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
 
@@ -321,15 +343,14 @@
 	return 0;
 }
 
-
 /**
- *  ixgbe_init_eeprom - Initialize EEPROM params
+ *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
  *  @hw: pointer to hardware structure
  *
  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
  *  ixgbe_hw struct in order to set up EEPROM access.
  **/
-s32 ixgbe_init_eeprom(struct ixgbe_hw *hw)
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
 {
 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
 	u32 eec;
@@ -337,6 +358,9 @@
 
 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
 		eeprom->type = ixgbe_eeprom_none;
+		/* Set default semaphore delay to 10ms which is a well
+		 * tested value */
+		eeprom->semaphore_delay = 10;
 
 		/*
 		 * Check for EEPROM present first.
@@ -369,18 +393,85 @@
 }
 
 /**
- *  ixgbe_read_eeprom - Read EEPROM word using EERD
+ *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @data: read 16 bit value from EEPROM
+ *
+ *  Reads 16 bit value from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+                                       u16 *data)
+{
+	s32 status;
+	u16 word_in;
+	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+
+	hw->eeprom.ops.init_params(hw);
+
+	if (offset >= hw->eeprom.word_size) {
+		status = IXGBE_ERR_EEPROM;
+		goto out;
+	}
+
+	/* Prepare the EEPROM for reading  */
+	status = ixgbe_acquire_eeprom(hw);
+
+	if (status == 0) {
+		if (ixgbe_ready_eeprom(hw) != 0) {
+			ixgbe_release_eeprom(hw);
+			status = IXGBE_ERR_EEPROM;
+		}
+	}
+
+	if (status == 0) {
+		ixgbe_standby_eeprom(hw);
+
+		/*
+		 * Some SPI eeproms use the 8th address bit embedded in the
+		 * opcode
+		 */
+		if ((hw->eeprom.address_bits == 8) && (offset >= 128))
+			read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+		/* Send the READ command (opcode + addr) */
+		ixgbe_shift_out_eeprom_bits(hw, read_opcode,
+		                            IXGBE_EEPROM_OPCODE_BITS);
+		ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
+		                            hw->eeprom.address_bits);
+
+		/* Read the data. */
+		word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
+		*data = (word_in >> 8) | (word_in << 8);
+
+		/* End this read operation */
+		ixgbe_release_eeprom(hw);
+	}
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_read_eeprom_generic - Read EEPROM word using EERD
  *  @hw: pointer to hardware structure
  *  @offset: offset of  word in the EEPROM to read
  *  @data: word read from the EEPROM
  *
  *  Reads a 16 bit word from the EEPROM using the EERD register.
  **/
-s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
+s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
 {
 	u32 eerd;
 	s32 status;
 
+	hw->eeprom.ops.init_params(hw);
+
+	if (offset >= hw->eeprom.word_size) {
+		status = IXGBE_ERR_EEPROM;
+		goto out;
+	}
+
 	eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) +
 	       IXGBE_EEPROM_READ_REG_START;
 
@@ -389,10 +480,11 @@
 
 	if (status == 0)
 		*data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
-			IXGBE_EEPROM_READ_REG_DATA);
+		         IXGBE_EEPROM_READ_REG_DATA);
 	else
 		hw_dbg(hw, "Eeprom read timed out\n");
 
+out:
 	return status;
 }
 
@@ -420,6 +512,58 @@
 }
 
 /**
+ *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
+ *  @hw: pointer to hardware structure
+ *
+ *  Prepares EEPROM for access using bit-bang method. This function should
+ *  be called before issuing a command to the EEPROM.
+ **/
+static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+{
+	s32 status = 0;
+	u32 eec;
+	u32 i;
+
+	if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
+		status = IXGBE_ERR_SWFW_SYNC;
+
+	if (status == 0) {
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+		/* Request EEPROM Access */
+		eec |= IXGBE_EEC_REQ;
+		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
+			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+			if (eec & IXGBE_EEC_GNT)
+				break;
+			udelay(5);
+		}
+
+		/* Release if grant not acquired */
+		if (!(eec & IXGBE_EEC_GNT)) {
+			eec &= ~IXGBE_EEC_REQ;
+			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+			hw_dbg(hw, "Could not acquire EEPROM grant\n");
+
+			ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+			status = IXGBE_ERR_EEPROM;
+		}
+	}
+
+	/* Setup EEPROM for Read/Write */
+	if (status == 0) {
+		/* Clear CS and SK */
+		eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+		IXGBE_WRITE_FLUSH(hw);
+		udelay(1);
+	}
+	return status;
+}
+
+/**
  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
  *  @hw: pointer to hardware structure
  *
@@ -475,7 +619,7 @@
 		 */
 		if (i >= timeout) {
 			hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
-				 "not granted.\n");
+			       "not granted.\n");
 			ixgbe_release_eeprom_semaphore(hw);
 			status = IXGBE_ERR_EEPROM;
 		}
@@ -503,6 +647,217 @@
 }
 
 /**
+ *  ixgbe_ready_eeprom - Polls for EEPROM ready
+ *  @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+{
+	s32 status = 0;
+	u16 i;
+	u8 spi_stat_reg;
+
+	/*
+	 * Read "Status Register" repeatedly until the LSB is cleared.  The
+	 * EEPROM will signal that the command has been completed by clearing
+	 * bit 0 of the internal status register.  If it's not cleared within
+	 * 5 milliseconds, then error out.
+	 */
+	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
+		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
+		                            IXGBE_EEPROM_OPCODE_BITS);
+		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
+		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
+			break;
+
+		udelay(5);
+		ixgbe_standby_eeprom(hw);
+	};
+
+	/*
+	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
+	 * devices (and only 0-5mSec on 5V devices)
+	 */
+	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
+		hw_dbg(hw, "SPI EEPROM Status error\n");
+		status = IXGBE_ERR_EEPROM;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
+ *  @hw: pointer to hardware structure
+ **/
+static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
+{
+	u32 eec;
+
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+	/* Toggle CS to flush commands */
+	eec |= IXGBE_EEC_CS;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_FLUSH(hw);
+	udelay(1);
+	eec &= ~IXGBE_EEC_CS;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_FLUSH(hw);
+	udelay(1);
+}
+
+/**
+ *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
+ *  @hw: pointer to hardware structure
+ *  @data: data to send to the EEPROM
+ *  @count: number of bits to shift out
+ **/
+static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+                                        u16 count)
+{
+	u32 eec;
+	u32 mask;
+	u32 i;
+
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+	/*
+	 * Mask is used to shift "count" bits of "data" out to the EEPROM
+	 * one bit at a time.  Determine the starting bit based on count
+	 */
+	mask = 0x01 << (count - 1);
+
+	for (i = 0; i < count; i++) {
+		/*
+		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
+		 * "1", and then raising and then lowering the clock (the SK
+		 * bit controls the clock input to the EEPROM).  A "0" is
+		 * shifted out to the EEPROM by setting "DI" to "0" and then
+		 * raising and then lowering the clock.
+		 */
+		if (data & mask)
+			eec |= IXGBE_EEC_DI;
+		else
+			eec &= ~IXGBE_EEC_DI;
+
+		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+		IXGBE_WRITE_FLUSH(hw);
+
+		udelay(1);
+
+		ixgbe_raise_eeprom_clk(hw, &eec);
+		ixgbe_lower_eeprom_clk(hw, &eec);
+
+		/*
+		 * Shift mask to signify next bit of data to shift in to the
+		 * EEPROM
+		 */
+		mask = mask >> 1;
+	};
+
+	/* We leave the "DI" bit set to "0" when we leave this routine. */
+	eec &= ~IXGBE_EEC_DI;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
+ *  @hw: pointer to hardware structure
+ **/
+static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
+{
+	u32 eec;
+	u32 i;
+	u16 data = 0;
+
+	/*
+	 * In order to read a register from the EEPROM, we need to shift
+	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
+	 * the clock input to the EEPROM (setting the SK bit), and then reading
+	 * the value of the "DO" bit.  During this "shifting in" process the
+	 * "DI" bit should always be clear.
+	 */
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
+
+	for (i = 0; i < count; i++) {
+		data = data << 1;
+		ixgbe_raise_eeprom_clk(hw, &eec);
+
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+		eec &= ~(IXGBE_EEC_DI);
+		if (eec & IXGBE_EEC_DO)
+			data |= 1;
+
+		ixgbe_lower_eeprom_clk(hw, &eec);
+	}
+
+	return data;
+}
+
+/**
+ *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
+ *  @hw: pointer to hardware structure
+ *  @eec: EEC register's current value
+ **/
+static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+	/*
+	 * Raise the clock input to the EEPROM
+	 * (setting the SK bit), then delay
+	 */
+	*eec = *eec | IXGBE_EEC_SK;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+	IXGBE_WRITE_FLUSH(hw);
+	udelay(1);
+}
+
+/**
+ *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
+ *  @hw: pointer to hardware structure
+ *  @eecd: EECD's current value
+ **/
+static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+	/*
+	 * Lower the clock input to the EEPROM (clearing the SK bit), then
+	 * delay
+	 */
+	*eec = *eec & ~IXGBE_EEC_SK;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+	IXGBE_WRITE_FLUSH(hw);
+	udelay(1);
+}
+
+/**
+ *  ixgbe_release_eeprom - Release EEPROM, release semaphores
+ *  @hw: pointer to hardware structure
+ **/
+static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
+{
+	u32 eec;
+
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+	eec |= IXGBE_EEC_CS;  /* Pull CS high */
+	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
+
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_FLUSH(hw);
+
+	udelay(1);
+
+	/* Stop requesting EEPROM access */
+	eec &= ~IXGBE_EEC_REQ;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+	ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+}
+
+/**
  *  ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
  *  @hw: pointer to hardware structure
  **/
@@ -517,7 +872,7 @@
 
 	/* Include 0x0-0x3F in the checksum */
 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
-		if (ixgbe_read_eeprom(hw, i, &word) != 0) {
+		if (hw->eeprom.ops.read(hw, i, &word) != 0) {
 			hw_dbg(hw, "EEPROM read failed\n");
 			break;
 		}
@@ -526,15 +881,15 @@
 
 	/* Include all data from pointers except for the fw pointer */
 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
-		ixgbe_read_eeprom(hw, i, &pointer);
+		hw->eeprom.ops.read(hw, i, &pointer);
 
 		/* Make sure the pointer seems valid */
 		if (pointer != 0xFFFF && pointer != 0) {
-			ixgbe_read_eeprom(hw, pointer, &length);
+			hw->eeprom.ops.read(hw, pointer, &length);
 
 			if (length != 0xFFFF && length != 0) {
 				for (j = pointer+1; j <= pointer+length; j++) {
-					ixgbe_read_eeprom(hw, j, &word);
+					hw->eeprom.ops.read(hw, j, &word);
 					checksum += word;
 				}
 			}
@@ -547,14 +902,15 @@
 }
 
 /**
- *  ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
  *  @hw: pointer to hardware structure
  *  @checksum_val: calculated checksum
  *
  *  Performs checksum calculation and validates the EEPROM checksum.  If the
  *  caller does not need checksum_val, the value can be NULL.
  **/
-s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+                                           u16 *checksum_val)
 {
 	s32 status;
 	u16 checksum;
@@ -565,12 +921,12 @@
 	 * not continue or we could be in for a very long wait while every
 	 * EEPROM read fails
 	 */
-	status = ixgbe_read_eeprom(hw, 0, &checksum);
+	status = hw->eeprom.ops.read(hw, 0, &checksum);
 
 	if (status == 0) {
 		checksum = ixgbe_calc_eeprom_checksum(hw);
 
-		ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
 
 		/*
 		 * Verify read checksum from EEPROM is the same as
@@ -590,6 +946,33 @@
 }
 
 /**
+ *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+	s32 status;
+	u16 checksum;
+
+	/*
+	 * Read the first word from the EEPROM. If this times out or fails, do
+	 * not continue or we could be in for a very long wait while every
+	 * EEPROM read fails
+	 */
+	status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+	if (status == 0) {
+		checksum = ixgbe_calc_eeprom_checksum(hw);
+		status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
+		                            checksum);
+	} else {
+		hw_dbg(hw, "EEPROM read failed\n");
+	}
+
+	return status;
+}
+
+/**
  *  ixgbe_validate_mac_addr - Validate MAC address
  *  @mac_addr: pointer to MAC address.
  *
@@ -607,61 +990,140 @@
 		status = IXGBE_ERR_INVALID_MAC_ADDR;
 	/* Reject the zero address */
 	else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
-		 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
+	         mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
 		status = IXGBE_ERR_INVALID_MAC_ADDR;
 
 	return status;
 }
 
 /**
- *  ixgbe_set_rar - Set RX address register
+ *  ixgbe_set_rar_generic - Set Rx address register
  *  @hw: pointer to hardware structure
- *  @addr: Address to put into receive address register
  *  @index: Receive address register to write
- *  @vind: Vind to set RAR to
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq "set" or "pool" index
  *  @enable_addr: set flag that address is active
  *
  *  Puts an ethernet address into a receive address register.
  **/
-s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind,
-		  u32 enable_addr)
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+                          u32 enable_addr)
 {
 	u32 rar_low, rar_high;
+	u32 rar_entries = hw->mac.num_rar_entries;
 
-	/*
-	 * HW expects these in little endian so we reverse the byte order from
-	 * network order (big endian) to little endian
-	 */
-	rar_low = ((u32)addr[0] |
-		   ((u32)addr[1] << 8) |
-		   ((u32)addr[2] << 16) |
-		   ((u32)addr[3] << 24));
+	/* setup VMDq pool selection before this RAR gets enabled */
+	hw->mac.ops.set_vmdq(hw, index, vmdq);
 
-	rar_high = ((u32)addr[4] |
-		    ((u32)addr[5] << 8) |
-		    ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK));
+	/* Make sure we are using a valid rar index range */
+	if (index < rar_entries) {
+		/*
+		 * HW expects these in little endian so we reverse the byte
+		 * order from network order (big endian) to little endian
+		 */
+		rar_low = ((u32)addr[0] |
+		           ((u32)addr[1] << 8) |
+		           ((u32)addr[2] << 16) |
+		           ((u32)addr[3] << 24));
+		/*
+		 * Some parts put the VMDq setting in the extra RAH bits,
+		 * so save everything except the lower 16 bits that hold part
+		 * of the address and the address valid bit.
+		 */
+		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+		rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+		rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
 
-	if (enable_addr != 0)
-		rar_high |= IXGBE_RAH_AV;
+		if (enable_addr != 0)
+			rar_high |= IXGBE_RAH_AV;
 
-	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
-	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+		IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+		IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+	} else {
+		hw_dbg(hw, "RAR index %d is out of range.\n", index);
+	}
 
 	return 0;
 }
 
 /**
- *  ixgbe_init_rx_addrs - Initializes receive address filters.
+ *  ixgbe_clear_rar_generic - Remove Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *
+ *  Clears an ethernet address from a receive address register.
+ **/
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+{
+	u32 rar_high;
+	u32 rar_entries = hw->mac.num_rar_entries;
+
+	/* Make sure we are using a valid rar index range */
+	if (index < rar_entries) {
+		/*
+		 * Some parts put the VMDq setting in the extra RAH bits,
+		 * so save everything except the lower 16 bits that hold part
+		 * of the address and the address valid bit.
+		 */
+		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+		rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+		IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+	} else {
+		hw_dbg(hw, "RAR index %d is out of range.\n", index);
+	}
+
+	/* clear VMDq pool/queue selection for this RAR */
+	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_enable_rar - Enable Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: index into the RAR table
+ *
+ *  Enables the select receive address register.
+ **/
+static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
+{
+	u32 rar_high;
+
+	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+	rar_high |= IXGBE_RAH_AV;
+	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+}
+
+/**
+ *  ixgbe_disable_rar - Disable Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: index into the RAR table
+ *
+ *  Disables the select receive address register.
+ **/
+static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
+{
+	u32 rar_high;
+
+	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+	rar_high &= (~IXGBE_RAH_AV);
+	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+}
+
+/**
+ *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
  *  @hw: pointer to hardware structure
  *
  *  Places the MAC address in receive address register 0 and clears the rest
- *  of the receive addresss registers. Clears the multicast table. Assumes
+ *  of the receive address registers. Clears the multicast table. Assumes
  *  the receiver is in reset when the routine is called.
  **/
-static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
 {
 	u32 i;
-	u32 rar_entries = hw->mac.num_rx_addrs;
+	u32 rar_entries = hw->mac.num_rar_entries;
 
 	/*
 	 * If the current mac address is valid, assume it is a software override
@@ -671,29 +1133,30 @@
 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
 	    IXGBE_ERR_INVALID_MAC_ADDR) {
 		/* Get the MAC address from the RAR0 for later reference */
-		ixgbe_get_mac_addr(hw, hw->mac.addr);
+		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
 
 		hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
-			  hw->mac.addr[0], hw->mac.addr[1],
-			  hw->mac.addr[2]);
+		       hw->mac.addr[0], hw->mac.addr[1],
+		       hw->mac.addr[2]);
 		hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
-			  hw->mac.addr[4], hw->mac.addr[5]);
+		       hw->mac.addr[4], hw->mac.addr[5]);
 	} else {
 		/* Setup the receive address. */
 		hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
 		hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
-			  hw->mac.addr[0], hw->mac.addr[1],
-			  hw->mac.addr[2]);
+		       hw->mac.addr[0], hw->mac.addr[1],
+		       hw->mac.addr[2]);
 		hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
-			  hw->mac.addr[4], hw->mac.addr[5]);
+		       hw->mac.addr[4], hw->mac.addr[5]);
 
-		ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 	}
+	hw->addr_ctrl.overflow_promisc = 0;
 
 	hw->addr_ctrl.rar_used_count = 1;
 
 	/* Zero out the other receive addresses. */
-	hw_dbg(hw, "Clearing RAR[1-15]\n");
+	hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
 	for (i = 1; i < rar_entries; i++) {
 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
@@ -705,9 +1168,113 @@
 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
 
 	hw_dbg(hw, " Clearing MTA\n");
-	for (i = 0; i < IXGBE_MC_TBL_SIZE; i++)
+	for (i = 0; i < hw->mac.mcft_size; i++)
 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
 
+	if (hw->mac.ops.init_uta_tables)
+		hw->mac.ops.init_uta_tables(hw);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_add_uc_addr - Adds a secondary unicast address.
+ *  @hw: pointer to hardware structure
+ *  @addr: new address
+ *
+ *  Adds it to unused receive address register or goes into promiscuous mode.
+ **/
+static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+	u32 rar_entries = hw->mac.num_rar_entries;
+	u32 rar;
+
+	hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
+	          addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+	/*
+	 * Place this address in the RAR if there is room,
+	 * else put the controller into promiscuous mode
+	 */
+	if (hw->addr_ctrl.rar_used_count < rar_entries) {
+		rar = hw->addr_ctrl.rar_used_count -
+		      hw->addr_ctrl.mc_addr_in_rar_count;
+		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+		hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
+		hw->addr_ctrl.rar_used_count++;
+	} else {
+		hw->addr_ctrl.overflow_promisc++;
+	}
+
+	hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
+}
+
+/**
+ *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
+ *  @hw: pointer to hardware structure
+ *  @addr_list: the list of new addresses
+ *  @addr_count: number of addresses
+ *  @next: iterator function to walk the address list
+ *
+ *  The given list replaces any existing list.  Clears the secondary addrs from
+ *  receive address registers.  Uses unused receive address registers for the
+ *  first secondary addresses, and falls back to promiscuous mode as needed.
+ *
+ *  Drivers using secondary unicast addresses must set user_set_promisc when
+ *  manually putting the device into promiscuous mode.
+ **/
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+                              u32 addr_count, ixgbe_mc_addr_itr next)
+{
+	u8 *addr;
+	u32 i;
+	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
+	u32 uc_addr_in_use;
+	u32 fctrl;
+	u32 vmdq;
+
+	/*
+	 * Clear accounting of old secondary address list,
+	 * don't count RAR[0]
+	 */
+	uc_addr_in_use = hw->addr_ctrl.rar_used_count -
+	                 hw->addr_ctrl.mc_addr_in_rar_count - 1;
+	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
+	hw->addr_ctrl.overflow_promisc = 0;
+
+	/* Zero out the other receive addresses */
+	hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use);
+	for (i = 1; i <= uc_addr_in_use; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+	}
+
+	/* Add the new addresses */
+	for (i = 0; i < addr_count; i++) {
+		hw_dbg(hw, " Adding the secondary addresses:\n");
+		addr = next(hw, &addr_list, &vmdq);
+		ixgbe_add_uc_addr(hw, addr, vmdq);
+	}
+
+	if (hw->addr_ctrl.overflow_promisc) {
+		/* enable promisc if not already in overflow or set by user */
+		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+			hw_dbg(hw, " Entering address overflow promisc mode\n");
+			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+			fctrl |= IXGBE_FCTRL_UPE;
+			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+		}
+	} else {
+		/* only disable if set by overflow, not by user */
+		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+			hw_dbg(hw, " Leaving address overflow promisc mode\n");
+			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+			fctrl &= ~IXGBE_FCTRL_UPE;
+			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+		}
+	}
+
+	hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
 	return 0;
 }
 
@@ -720,7 +1287,7 @@
  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
  *  incoming rx multicast addresses, to determine the bit-vector to check in
  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
- *  by the MO field of the MCSTCTRL. The MO field is set during initalization
+ *  by the MO field of the MCSTCTRL. The MO field is set during initialization
  *  to mc_filter_type.
  **/
 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
@@ -728,19 +1295,19 @@
 	u32 vector = 0;
 
 	switch (hw->mac.mc_filter_type) {
-	case 0:	  /* use bits [47:36] of the address */
+	case 0:   /* use bits [47:36] of the address */
 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
 		break;
-	case 1:	  /* use bits [46:35] of the address */
+	case 1:   /* use bits [46:35] of the address */
 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
 		break;
-	case 2:	  /* use bits [45:34] of the address */
+	case 2:   /* use bits [45:34] of the address */
 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
 		break;
-	case 3:	  /* use bits [43:32] of the address */
+	case 3:   /* use bits [43:32] of the address */
 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
 		break;
-	default:	 /* Invalid mc_filter_type */
+	default:  /* Invalid mc_filter_type */
 		hw_dbg(hw, "MC filter type param set incorrectly\n");
 		break;
 	}
@@ -794,21 +1361,22 @@
  **/
 static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
 {
-	u32 rar_entries = hw->mac.num_rx_addrs;
+	u32 rar_entries = hw->mac.num_rar_entries;
+	u32 rar;
 
 	hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
-		  mc_addr[0], mc_addr[1], mc_addr[2],
-		  mc_addr[3], mc_addr[4], mc_addr[5]);
+	       mc_addr[0], mc_addr[1], mc_addr[2],
+	       mc_addr[3], mc_addr[4], mc_addr[5]);
 
 	/*
 	 * Place this multicast address in the RAR if there is room,
 	 * else put it in the MTA
 	 */
 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
-		ixgbe_set_rar(hw, hw->addr_ctrl.rar_used_count,
-			      mc_addr, 0, IXGBE_RAH_AV);
-		hw_dbg(hw, "Added a multicast address to RAR[%d]\n",
-			  hw->addr_ctrl.rar_used_count);
+		/* use RAR from the end up for multicast */
+		rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1;
+		hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV);
+		hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar);
 		hw->addr_ctrl.rar_used_count++;
 		hw->addr_ctrl.mc_addr_in_rar_count++;
 	} else {
@@ -819,22 +1387,23 @@
 }
 
 /**
- *  ixgbe_update_mc_addr_list - Updates MAC list of multicast addresses
+ *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
  *  @hw: pointer to hardware structure
  *  @mc_addr_list: the list of new multicast addresses
  *  @mc_addr_count: number of addresses
- *  @pad: number of bytes between addresses in the list
+ *  @next: iterator function to walk the multicast address list
  *
  *  The given list replaces any existing list. Clears the MC addrs from receive
- *  address registers and the multicast table. Uses unsed receive address
+ *  address registers and the multicast table. Uses unused receive address
  *  registers for the first multicast addresses, and hashes the rest into the
  *  multicast table.
  **/
-s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
-			      u32 mc_addr_count, u32 pad)
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+                                      u32 mc_addr_count, ixgbe_mc_addr_itr next)
 {
 	u32 i;
-	u32 rar_entries = hw->mac.num_rx_addrs;
+	u32 rar_entries = hw->mac.num_rar_entries;
+	u32 vmdq;
 
 	/*
 	 * Set the new number of MC addresses that we are being requested to
@@ -846,7 +1415,8 @@
 	hw->addr_ctrl.mta_in_use = 0;
 
 	/* Zero out the other receive addresses. */
-	hw_dbg(hw, "Clearing RAR[1-15]\n");
+	hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count,
+	          rar_entries - 1);
 	for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) {
 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
@@ -854,186 +1424,67 @@
 
 	/* Clear the MTA */
 	hw_dbg(hw, " Clearing MTA\n");
-	for (i = 0; i < IXGBE_MC_TBL_SIZE; i++)
+	for (i = 0; i < hw->mac.mcft_size; i++)
 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
 
 	/* Add the new addresses */
 	for (i = 0; i < mc_addr_count; i++) {
 		hw_dbg(hw, " Adding the multicast addresses:\n");
-		ixgbe_add_mc_addr(hw, mc_addr_list +
-				  (i * (IXGBE_ETH_LENGTH_OF_ADDRESS + pad)));
+		ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq));
 	}
 
 	/* Enable mta */
 	if (hw->addr_ctrl.mta_in_use > 0)
 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
-				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+		                IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
 
-	hw_dbg(hw, "ixgbe_update_mc_addr_list Complete\n");
+	hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
 	return 0;
 }
 
 /**
- *  ixgbe_clear_vfta - Clear VLAN filter table
+ *  ixgbe_enable_mc_generic - Enable multicast address in RAR
  *  @hw: pointer to hardware structure
  *
- *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ *  Enables multicast address in RAR and the use of the multicast hash table.
  **/
-static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
 {
-	u32 offset;
-	u32 vlanbyte;
+	u32 i;
+	u32 rar_entries = hw->mac.num_rar_entries;
+	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
 
-	for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++)
-		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+	if (a->mc_addr_in_rar_count > 0)
+		for (i = (rar_entries - a->mc_addr_in_rar_count);
+		     i < rar_entries; i++)
+			ixgbe_enable_rar(hw, i);
 
-	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
-		for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++)
-			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
-					0);
+	if (a->mta_in_use > 0)
+		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
+		                hw->mac.mc_filter_type);
 
 	return 0;
 }
 
 /**
- *  ixgbe_set_vfta - Set VLAN filter table
+ *  ixgbe_disable_mc_generic - Disable multicast address in RAR
  *  @hw: pointer to hardware structure
- *  @vlan: VLAN id to write to VLAN filter
- *  @vind: VMDq output index that maps queue to VLAN id in VFTA
- *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
  *
- *  Turn on/off specified VLAN in the VLAN filter table.
+ *  Disables multicast address in RAR and the use of the multicast hash table.
  **/
-s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-		   bool vlan_on)
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
 {
-	u32 VftaIndex;
-	u32 BitOffset;
-	u32 VftaReg;
-	u32 VftaByte;
+	u32 i;
+	u32 rar_entries = hw->mac.num_rar_entries;
+	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
 
-	/* Determine 32-bit word position in array */
-	VftaIndex = (vlan >> 5) & 0x7F;   /* upper seven bits */
+	if (a->mc_addr_in_rar_count > 0)
+		for (i = (rar_entries - a->mc_addr_in_rar_count);
+		     i < rar_entries; i++)
+			ixgbe_disable_rar(hw, i);
 
-	/* Determine the location of the (VMD) queue index */
-	VftaByte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
-	BitOffset = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
-
-	/* Set the nibble for VMD queue index */
-	VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex));
-	VftaReg &= (~(0x0F << BitOffset));
-	VftaReg |= (vind << BitOffset);
-	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg);
-
-	/* Determine the location of the bit for this VLAN id */
-	BitOffset = vlan & 0x1F;	   /* lower five bits */
-
-	VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex));
-	if (vlan_on)
-		/* Turn on this VLAN id */
-		VftaReg |= (1 << BitOffset);
-	else
-		/* Turn off this VLAN id */
-		VftaReg &= ~(1 << BitOffset);
-	IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg);
-
-	return 0;
-}
-
-/**
- *  ixgbe_setup_fc - Configure flow control settings
- *  @hw: pointer to hardware structure
- *  @packetbuf_num: packet buffer number (0-7)
- *
- *  Configures the flow control settings based on SW configuration.
- *  This function is used for 802.3x flow control configuration only.
- **/
-s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
-{
-	u32 frctl_reg;
-	u32 rmcs_reg;
-
-	if (packetbuf_num < 0 || packetbuf_num > 7)
-		hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
-		       "is 0-7\n", packetbuf_num);
-
-	frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-	frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
-
-	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
-	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
-
-	/*
-	 * We want to save off the original Flow Control configuration just in
-	 * case we get disconnected and then reconnected into a different hub
-	 * or switch with different Flow Control capabilities.
-	 */
-	hw->fc.type = hw->fc.original_type;
-
-	/*
-	 * The possible values of the "flow_control" parameter are:
-	 * 0: Flow control is completely disabled
-	 * 1: Rx flow control is enabled (we can receive pause frames but not
-	 *    send pause frames).
-	 * 2: Tx flow control is enabled (we can send pause frames but we do not
-	 *    support receiving pause frames)
-	 * 3: Both Rx and TX flow control (symmetric) are enabled.
-	 * other: Invalid.
-	 */
-	switch (hw->fc.type) {
-	case ixgbe_fc_none:
-		break;
-	case ixgbe_fc_rx_pause:
-		/*
-		 * RX Flow control is enabled,
-		 * and TX Flow control is disabled.
-		 */
-		frctl_reg |= IXGBE_FCTRL_RFCE;
-		break;
-	case ixgbe_fc_tx_pause:
-		/*
-		 * TX Flow control is enabled, and RX Flow control is disabled,
-		 * by a software over-ride.
-		 */
-		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
-		break;
-	case ixgbe_fc_full:
-		/*
-		 * Flow control (both RX and TX) is enabled by a software
-		 * over-ride.
-		 */
-		frctl_reg |= IXGBE_FCTRL_RFCE;
-		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
-		break;
-	default:
-		/* We should never get here.  The value should be 0-3. */
-		hw_dbg(hw, "Flow control param set incorrectly\n");
-		break;
-	}
-
-	/* Enable 802.3x based flow control settings. */
-	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
-	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
-
-	/*
-	 * We need to set up the Receive Threshold high and low water
-	 * marks as well as (optionally) enabling the transmission of
-	 * XON frames.
-	 */
-	if (hw->fc.type & ixgbe_fc_tx_pause) {
-		if (hw->fc.send_xon) {
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
-					(hw->fc.low_water | IXGBE_FCRTL_XONE));
-		} else {
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
-					hw->fc.low_water);
-		}
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
-				(hw->fc.high_water)|IXGBE_FCRTH_FCEN);
-	}
-
-	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
-	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+	if (a->mta_in_use > 0)
+		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
 
 	return 0;
 }
@@ -1049,13 +1500,24 @@
  **/
 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
 {
-	u32 ctrl;
-	s32 i;
+	u32 i;
+	u32 reg_val;
+	u32 number_of_queues;
 	s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
 
-	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-	ctrl |= IXGBE_CTRL_GIO_DIS;
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+	/* Disable the receive unit by stopping each queue */
+	number_of_queues = hw->mac.max_rx_queues;
+	for (i = 0; i < number_of_queues; i++) {
+		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+		if (reg_val & IXGBE_RXDCTL_ENABLE) {
+			reg_val &= ~IXGBE_RXDCTL_ENABLE;
+			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
+		}
+	}
+
+	reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
+	reg_val |= IXGBE_CTRL_GIO_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
 
 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
@@ -1070,11 +1532,11 @@
 
 
 /**
- *  ixgbe_acquire_swfw_sync - Aquire SWFW semaphore
+ *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
  *  @hw: pointer to hardware structure
- *  @mask: Mask to specify wich semaphore to acquire
+ *  @mask: Mask to specify which semaphore to acquire
  *
- *  Aquires the SWFW semaphore throught the GSSR register for the specified
+ *  Acquires the SWFW semaphore thought the GSSR register for the specified
  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
  **/
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -1116,9 +1578,9 @@
 /**
  *  ixgbe_release_swfw_sync - Release SWFW semaphore
  *  @hw: pointer to hardware structure
- *  @mask: Mask to specify wich semaphore to release
+ *  @mask: Mask to specify which semaphore to release
  *
- *  Releases the SWFW semaphore throught the GSSR register for the specified
+ *  Releases the SWFW semaphore thought the GSSR register for the specified
  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
  **/
 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -1135,45 +1597,3 @@
 	ixgbe_release_eeprom_semaphore(hw);
 }
 
-/**
- *  ixgbe_read_analog_reg8 - Reads 8 bit Atlas analog register
- *  @hw: pointer to hardware structure
- *  @reg: analog register to read
- *  @val: read value
- *
- *  Performs write operation to analog register specified.
- **/
-s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
-{
-	u32  atlas_ctl;
-
-	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
-			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
-	IXGBE_WRITE_FLUSH(hw);
-	udelay(10);
-	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
-	*val = (u8)atlas_ctl;
-
-	return 0;
-}
-
-/**
- *  ixgbe_write_analog_reg8 - Writes 8 bit Atlas analog register
- *  @hw: pointer to hardware structure
- *  @reg: atlas register to write
- *  @val: value to write
- *
- *  Performs write operation to Atlas analog register specified.
- **/
-s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
-{
-	u32  atlas_ctl;
-
-	atlas_ctl = (reg << 8) | val;
-	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
-	IXGBE_WRITE_FLUSH(hw);
-	udelay(10);
-
-	return 0;
-}
-
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index de6ddd5..192f8d0 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -31,34 +30,45 @@
 
 #include "ixgbe_type.h"
 
-s32 ixgbe_init_hw(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw(struct ixgbe_hw *hw);
-s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
-s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
-s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num);
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
 
-s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
 
-s32 ixgbe_init_eeprom(struct ixgbe_hw *hw);
-s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+                                       u16 *data);
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+                                           u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
 
-s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind,
-		  u32 enable_addr);
-s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
-			      u32 mc_addr_count, u32 pad);
-s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+                          u32 enable_addr);
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+                                      u32 mc_addr_count,
+                                      ixgbe_mc_addr_itr func);
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+                                      u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+
 s32 ixgbe_validate_mac_addr(u8 *mac_addr);
-
-s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num);
-
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
 
-s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
 
 #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
 
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3efe5dd..81a9c4b 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -48,7 +47,7 @@
 };
 
 #define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
-		      offsetof(struct ixgbe_adapter, m)
+                             offsetof(struct ixgbe_adapter, m)
 static struct ixgbe_stats ixgbe_gstrings_stats[] = {
 	{"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
 	{"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
@@ -95,14 +94,15 @@
 };
 
 #define IXGBE_QUEUE_STATS_LEN \
-		((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
-		 ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
-		 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
-#define IXGBE_GLOBAL_STATS_LEN	ARRAY_SIZE(ixgbe_gstrings_stats)
+                ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
+                 ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
+                 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
+#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
 
 static int ixgbe_get_settings(struct net_device *netdev,
-			      struct ethtool_cmd *ecmd)
+                              struct ethtool_cmd *ecmd)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -114,7 +114,7 @@
 	ecmd->transceiver = XCVR_EXTERNAL;
 	if (hw->phy.media_type == ixgbe_media_type_copper) {
 		ecmd->supported |= (SUPPORTED_1000baseT_Full |
-				    SUPPORTED_TP | SUPPORTED_Autoneg);
+		                    SUPPORTED_TP | SUPPORTED_Autoneg);
 
 		ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
@@ -126,14 +126,15 @@
 	} else {
 		ecmd->supported |= SUPPORTED_FIBRE;
 		ecmd->advertising = (ADVERTISED_10000baseT_Full |
-				     ADVERTISED_FIBRE);
+		                     ADVERTISED_FIBRE);
 		ecmd->port = PORT_FIBRE;
+		ecmd->autoneg = AUTONEG_DISABLE;
 	}
 
-	adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up);
+	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
 	if (link_up) {
 		ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
-				SPEED_10000 : SPEED_1000;
+		               SPEED_10000 : SPEED_1000;
 		ecmd->duplex = DUPLEX_FULL;
 	} else {
 		ecmd->speed = -1;
@@ -144,7 +145,7 @@
 }
 
 static int ixgbe_set_settings(struct net_device *netdev,
-			      struct ethtool_cmd *ecmd)
+                              struct ethtool_cmd *ecmd)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -164,7 +165,7 @@
 }
 
 static void ixgbe_get_pauseparam(struct net_device *netdev,
-				 struct ethtool_pauseparam *pause)
+                                 struct ethtool_pauseparam *pause)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -182,7 +183,7 @@
 }
 
 static int ixgbe_set_pauseparam(struct net_device *netdev,
-				struct ethtool_pauseparam *pause)
+                                struct ethtool_pauseparam *pause)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -233,15 +234,15 @@
 
 static u32 ixgbe_get_tx_csum(struct net_device *netdev)
 {
-	return (netdev->features & NETIF_F_HW_CSUM) != 0;
+	return (netdev->features & NETIF_F_IP_CSUM) != 0;
 }
 
 static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
 {
 	if (data)
-		netdev->features |= NETIF_F_HW_CSUM;
+		netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
 	else
-		netdev->features &= ~NETIF_F_HW_CSUM;
+		netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
 
 	return 0;
 }
@@ -281,7 +282,7 @@
 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
 
 static void ixgbe_get_regs(struct net_device *netdev,
-			   struct ethtool_regs *regs, void *p)
+                           struct ethtool_regs *regs, void *p)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -315,7 +316,9 @@
 	regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
 
 	/* Interrupt */
-	regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICR);
+	/* don't read EICR because it can clear interrupt causes, instead
+	 * read EICS which is a shadow but doesn't clear EICR */
+	regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
 	regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
 	regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
 	regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
@@ -325,7 +328,7 @@
 	regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
 	regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
 	regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
-	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL);
+	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
 	regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
 
 	/* Flow Control */
@@ -371,7 +374,7 @@
 		regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
 	for (i = 0; i < 16; i++)
 		regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
-	regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE);
+	regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
 	regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 	regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 	regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
@@ -419,7 +422,6 @@
 	regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
 	regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT);
 
-	/* DCE */
 	regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
 	regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
 	regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -539,21 +541,17 @@
 	/* Diagnostic */
 	regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
 	for (i = 0; i < 8; i++)
-		regs_buff[1072] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
+		regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
 	regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
-	regs_buff[1081] = IXGBE_READ_REG(hw, IXGBE_RIC_DW0);
-	regs_buff[1082] = IXGBE_READ_REG(hw, IXGBE_RIC_DW1);
-	regs_buff[1083] = IXGBE_READ_REG(hw, IXGBE_RIC_DW2);
-	regs_buff[1084] = IXGBE_READ_REG(hw, IXGBE_RIC_DW3);
+	for (i = 0; i < 4; i++)
+		regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
 	regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
 	regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
 	for (i = 0; i < 8; i++)
-		regs_buff[1087] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
+		regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
 	regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
-	regs_buff[1096] = IXGBE_READ_REG(hw, IXGBE_TIC_DW0);
-	regs_buff[1097] = IXGBE_READ_REG(hw, IXGBE_TIC_DW1);
-	regs_buff[1098] = IXGBE_READ_REG(hw, IXGBE_TIC_DW2);
-	regs_buff[1099] = IXGBE_READ_REG(hw, IXGBE_TIC_DW3);
+	for (i = 0; i < 4; i++)
+		regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
 	regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
 	regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
 	regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
@@ -566,7 +564,7 @@
 	regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
 	regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
 	for (i = 0; i < 8; i++)
-		regs_buff[1111] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
+		regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
 	regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
 	regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
 	regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
@@ -585,7 +583,7 @@
 }
 
 static int ixgbe_get_eeprom(struct net_device *netdev,
-			    struct ethtool_eeprom *eeprom, u8 *bytes)
+                            struct ethtool_eeprom *eeprom, u8 *bytes)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -608,8 +606,8 @@
 		return -ENOMEM;
 
 	for (i = 0; i < eeprom_len; i++) {
-		if ((ret_val = ixgbe_read_eeprom(hw, first_word + i,
-						 &eeprom_buff[i])))
+		if ((ret_val = hw->eeprom.ops.read(hw, first_word + i,
+		    &eeprom_buff[i])))
 			break;
 	}
 
@@ -624,7 +622,7 @@
 }
 
 static void ixgbe_get_drvinfo(struct net_device *netdev,
-			      struct ethtool_drvinfo *drvinfo)
+                              struct ethtool_drvinfo *drvinfo)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -637,7 +635,7 @@
 }
 
 static void ixgbe_get_ringparam(struct net_device *netdev,
-				struct ethtool_ringparam *ring)
+                                struct ethtool_ringparam *ring)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_ring *tx_ring = adapter->tx_ring;
@@ -654,15 +652,12 @@
 }
 
 static int ixgbe_set_ringparam(struct net_device *netdev,
-			       struct ethtool_ringparam *ring)
+                               struct ethtool_ringparam *ring)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	struct ixgbe_tx_buffer *old_buf;
-	struct ixgbe_rx_buffer *old_rx_buf;
-	void *old_desc;
+	struct ixgbe_ring *temp_ring;
 	int i, err;
-	u32 new_rx_count, new_tx_count, old_size;
-	dma_addr_t old_dma;
+	u32 new_rx_count, new_tx_count;
 
 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 		return -EINVAL;
@@ -681,6 +676,15 @@
 		return 0;
 	}
 
+	if (adapter->num_tx_queues > adapter->num_rx_queues)
+		temp_ring = vmalloc(adapter->num_tx_queues *
+		                    sizeof(struct ixgbe_ring));
+	else
+		temp_ring = vmalloc(adapter->num_rx_queues *
+		                    sizeof(struct ixgbe_ring));
+	if (!temp_ring)
+		return -ENOMEM;
+
 	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
 		msleep(1);
 
@@ -693,66 +697,61 @@
 	 * to the tx and rx ring structs.
 	 */
 	if (new_tx_count != adapter->tx_ring->count) {
+		memcpy(temp_ring, adapter->tx_ring,
+		       adapter->num_tx_queues * sizeof(struct ixgbe_ring));
+
 		for (i = 0; i < adapter->num_tx_queues; i++) {
-			/* Save existing descriptor ring */
-			old_buf = adapter->tx_ring[i].tx_buffer_info;
-			old_desc = adapter->tx_ring[i].desc;
-			old_size = adapter->tx_ring[i].size;
-			old_dma = adapter->tx_ring[i].dma;
-			/* Try to allocate a new one */
-			adapter->tx_ring[i].tx_buffer_info = NULL;
-			adapter->tx_ring[i].desc = NULL;
-			adapter->tx_ring[i].count = new_tx_count;
-			err = ixgbe_setup_tx_resources(adapter,
-						       &adapter->tx_ring[i]);
+			temp_ring[i].count = new_tx_count;
+			err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
 			if (err) {
-				/* Restore the old one so at least
-				   the adapter still works, even if
-				   we failed the request */
-				adapter->tx_ring[i].tx_buffer_info = old_buf;
-				adapter->tx_ring[i].desc = old_desc;
-				adapter->tx_ring[i].size = old_size;
-				adapter->tx_ring[i].dma = old_dma;
+				while (i) {
+					i--;
+					ixgbe_free_tx_resources(adapter,
+					                        &temp_ring[i]);
+				}
 				goto err_setup;
 			}
-			/* Free the old buffer manually */
-			vfree(old_buf);
-			pci_free_consistent(adapter->pdev, old_size,
-					    old_desc, old_dma);
 		}
+
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
+
+		memcpy(adapter->tx_ring, temp_ring,
+		       adapter->num_tx_queues * sizeof(struct ixgbe_ring));
+
+		adapter->tx_ring_count = new_tx_count;
 	}
 
 	if (new_rx_count != adapter->rx_ring->count) {
+		memcpy(temp_ring, adapter->rx_ring,
+		       adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+
 		for (i = 0; i < adapter->num_rx_queues; i++) {
-
-			old_rx_buf = adapter->rx_ring[i].rx_buffer_info;
-			old_desc = adapter->rx_ring[i].desc;
-			old_size = adapter->rx_ring[i].size;
-			old_dma = adapter->rx_ring[i].dma;
-
-			adapter->rx_ring[i].rx_buffer_info = NULL;
-			adapter->rx_ring[i].desc = NULL;
-			adapter->rx_ring[i].dma = 0;
-			adapter->rx_ring[i].count = new_rx_count;
-			err = ixgbe_setup_rx_resources(adapter,
-						       &adapter->rx_ring[i]);
+			temp_ring[i].count = new_rx_count;
+			err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
 			if (err) {
-				adapter->rx_ring[i].rx_buffer_info = old_rx_buf;
-				adapter->rx_ring[i].desc = old_desc;
-				adapter->rx_ring[i].size = old_size;
-				adapter->rx_ring[i].dma = old_dma;
+				while (i) {
+					i--;
+					ixgbe_free_rx_resources(adapter,
+					                        &temp_ring[i]);
+				}
 				goto err_setup;
 			}
-
-			vfree(old_rx_buf);
-			pci_free_consistent(adapter->pdev, old_size, old_desc,
-					    old_dma);
 		}
+
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
+
+		memcpy(adapter->rx_ring, temp_ring,
+		       adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+
+		adapter->rx_ring_count = new_rx_count;
 	}
 
+	/* success! */
 	err = 0;
 err_setup:
-	if (netif_running(adapter->netdev))
+	if (netif_running(netdev))
 		ixgbe_up(adapter);
 
 	clear_bit(__IXGBE_RESETTING, &adapter->state);
@@ -770,7 +769,7 @@
 }
 
 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
-				    struct ethtool_stats *stats, u64 *data)
+                                    struct ethtool_stats *stats, u64 *data)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	u64 *queue_stat;
@@ -778,12 +777,20 @@
 	int j, k;
 	int i;
 	u64 aggregated = 0, flushed = 0, no_desc = 0;
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
+		flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
+		no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
+	}
+	adapter->lro_aggregated = aggregated;
+	adapter->lro_flushed = flushed;
+	adapter->lro_no_desc = no_desc;
 
 	ixgbe_update_stats(adapter);
 	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
 		char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
 		data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
-			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+		           sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 	}
 	for (j = 0; j < adapter->num_tx_queues; j++) {
 		queue_stat = (u64 *)&adapter->tx_ring[j].stats;
@@ -792,24 +799,18 @@
 		i += k;
 	}
 	for (j = 0; j < adapter->num_rx_queues; j++) {
-		aggregated += adapter->rx_ring[j].lro_mgr.stats.aggregated;
-		flushed += adapter->rx_ring[j].lro_mgr.stats.flushed;
-		no_desc += adapter->rx_ring[j].lro_mgr.stats.no_desc;
 		queue_stat = (u64 *)&adapter->rx_ring[j].stats;
 		for (k = 0; k < stat_count; k++)
 			data[i + k] = queue_stat[k];
 		i += k;
 	}
-	adapter->lro_aggregated = aggregated;
-	adapter->lro_flushed = flushed;
-	adapter->lro_no_desc = no_desc;
 }
 
 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
-			      u8 *data)
+                              u8 *data)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	u8 *p = data;
+	char *p = (char *)data;
 	int i;
 
 	switch (stringset) {
@@ -831,14 +832,14 @@
 			sprintf(p, "rx_queue_%u_bytes", i);
 			p += ETH_GSTRING_LEN;
 		}
-/*		BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
+		/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
 		break;
 	}
 }
 
 
 static void ixgbe_get_wol(struct net_device *netdev,
-			  struct ethtool_wolinfo *wol)
+                          struct ethtool_wolinfo *wol)
 {
 	wol->supported = 0;
 	wol->wolopts = 0;
@@ -859,16 +860,17 @@
 static int ixgbe_phys_id(struct net_device *netdev, u32 data)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
 	u32 i;
 
 	if (!data || data > 300)
 		data = 300;
 
 	for (i = 0; i < (data * 1000); i += 400) {
-		ixgbe_led_on(&adapter->hw, IXGBE_LED_ON);
+		hw->mac.ops.led_on(hw, IXGBE_LED_ON);
 		msleep_interruptible(200);
-		ixgbe_led_off(&adapter->hw, IXGBE_LED_ON);
+		hw->mac.ops.led_off(hw, IXGBE_LED_ON);
 		msleep_interruptible(200);
 	}
 
@@ -879,67 +881,75 @@
 }
 
 static int ixgbe_get_coalesce(struct net_device *netdev,
-			      struct ethtool_coalesce *ec)
+                              struct ethtool_coalesce *ec)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-	if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
-		ec->rx_coalesce_usecs = adapter->rx_eitr;
-	else
-		ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr;
-
-	if (adapter->tx_eitr < IXGBE_MIN_ITR_USECS)
-		ec->tx_coalesce_usecs = adapter->tx_eitr;
-	else
-		ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr;
-
 	ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
+
+	/* only valid if in constant ITR mode */
+	switch (adapter->itr_setting) {
+	case 0:
+		/* throttling disabled */
+		ec->rx_coalesce_usecs = 0;
+		break;
+	case 1:
+		/* dynamic ITR mode */
+		ec->rx_coalesce_usecs = 1;
+		break;
+	default:
+		/* fixed interrupt rate mode */
+		ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
+		break;
+	}
 	return 0;
 }
 
 static int ixgbe_set_coalesce(struct net_device *netdev,
-			      struct ethtool_coalesce *ec)
+                              struct ethtool_coalesce *ec)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-	if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
-	    ((ec->rx_coalesce_usecs != 0) &&
-	     (ec->rx_coalesce_usecs != 1) &&
-	     (ec->rx_coalesce_usecs != 3) &&
-	     (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
-		return -EINVAL;
-	if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
-	    ((ec->tx_coalesce_usecs != 0) &&
-	     (ec->tx_coalesce_usecs != 1) &&
-	     (ec->tx_coalesce_usecs != 3) &&
-	     (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
-		return -EINVAL;
-
-	/* convert to rate of irq's per second */
-	if (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
-		adapter->rx_eitr = ec->rx_coalesce_usecs;
-	else
-		adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs);
-
-	if (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
-		adapter->tx_eitr = ec->rx_coalesce_usecs;
-	else
-		adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs);
+	struct ixgbe_hw *hw = &adapter->hw;
+	int i;
 
 	if (ec->tx_max_coalesced_frames_irq)
-		adapter->tx_ring[0].work_limit =
-					ec->tx_max_coalesced_frames_irq;
+		adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
 
-	if (netif_running(netdev)) {
-		ixgbe_down(adapter);
-		ixgbe_up(adapter);
+	if (ec->rx_coalesce_usecs > 1) {
+		/* store the value in ints/second */
+		adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
+
+		/* static value of interrupt rate */
+		adapter->itr_setting = adapter->eitr_param;
+		/* clear the lower bit */
+		adapter->itr_setting &= ~1;
+	} else if (ec->rx_coalesce_usecs == 1) {
+		/* 1 means dynamic mode */
+		adapter->eitr_param = 20000;
+		adapter->itr_setting = 1;
+	} else {
+		/* any other value means disable eitr, which is best
+		 * served by setting the interrupt rate very high */
+		adapter->eitr_param = 3000000;
+		adapter->itr_setting = 0;
+	}
+
+	for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+		struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+		if (q_vector->txr_count && !q_vector->rxr_count)
+			q_vector->eitr = (adapter->eitr_param >> 1);
+		else
+			/* rx only or mixed */
+			q_vector->eitr = adapter->eitr_param;
+		IXGBE_WRITE_REG(hw, IXGBE_EITR(i),
+		                EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
 	}
 
 	return 0;
 }
 
 
-static struct ethtool_ops ixgbe_ethtool_ops = {
+static const struct ethtool_ops ixgbe_ethtool_ops = {
 	.get_settings           = ixgbe_get_settings,
 	.set_settings           = ixgbe_set_settings,
 	.get_drvinfo            = ixgbe_get_drvinfo,
@@ -966,7 +976,7 @@
 	.set_tso                = ixgbe_set_tso,
 	.get_strings            = ixgbe_get_strings,
 	.phys_id                = ixgbe_phys_id,
-	.get_sset_count		= ixgbe_get_sset_count,
+	.get_sset_count         = ixgbe_get_sset_count,
 	.get_ethtool_stats      = ixgbe_get_ethtool_stats,
 	.get_coalesce           = ixgbe_get_coalesce,
 	.set_coalesce           = ixgbe_set_coalesce,
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a417be7..ca17af4 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -46,15 +45,14 @@
 
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
-	"Intel(R) 10 Gigabit PCI Express Network Driver";
+                              "Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "1.3.18-k4"
+#define DRV_VERSION "1.3.30-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
-static const char ixgbe_copyright[] =
-	 "Copyright (c) 1999-2007 Intel Corporation.";
+static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
-	[board_82598]			= &ixgbe_82598_info,
+	[board_82598] = &ixgbe_82598_info,
 };
 
 /* ixgbe_pci_tbl - PCI Device ID Table
@@ -74,15 +72,17 @@
 	 board_82598 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
 	 board_82598 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
+	 board_82598 },
 
 	/* required last entry */
 	{0, }
 };
 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
 
-#ifdef CONFIG_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
-			    void *p);
+                            void *p);
 static struct notifier_block dca_notifier = {
 	.notifier_call = ixgbe_notify_dca,
 	.next          = NULL,
@@ -104,7 +104,7 @@
 	/* Let firmware take over control of h/w */
 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
-			ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+	                ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
 }
 
 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
@@ -114,24 +114,11 @@
 	/* Let firmware know the driver has taken over */
 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
-			ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+	                ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
 }
 
-#ifdef DEBUG
-/**
- * ixgbe_get_hw_dev_name - return device name string
- * used by hardware layer to print debugging information
- **/
-char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
-{
-	struct ixgbe_adapter *adapter = hw->back;
-	struct net_device *netdev = adapter->netdev;
-	return netdev->name;
-}
-#endif
-
 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
-			   u8 msix_vector)
+                           u8 msix_vector)
 {
 	u32 ivar, index;
 
@@ -144,13 +131,12 @@
 }
 
 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
-					     struct ixgbe_tx_buffer
-					     *tx_buffer_info)
+                                             struct ixgbe_tx_buffer
+                                             *tx_buffer_info)
 {
 	if (tx_buffer_info->dma) {
-		pci_unmap_page(adapter->pdev,
-			       tx_buffer_info->dma,
-			       tx_buffer_info->length, PCI_DMA_TODEVICE);
+		pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
+		               tx_buffer_info->length, PCI_DMA_TODEVICE);
 		tx_buffer_info->dma = 0;
 	}
 	if (tx_buffer_info->skb) {
@@ -161,107 +147,120 @@
 }
 
 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
-				       struct ixgbe_ring *tx_ring,
-				       unsigned int eop,
-				       union ixgbe_adv_tx_desc *eop_desc)
+                                       struct ixgbe_ring *tx_ring,
+                                       unsigned int eop)
 {
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 head, tail;
+
 	/* Detect a transmit hang in hardware, this serializes the
-	 * check with the clearing of time_stamp and movement of i */
+	 * check with the clearing of time_stamp and movement of eop */
+	head = IXGBE_READ_REG(hw, tx_ring->head);
+	tail = IXGBE_READ_REG(hw, tx_ring->tail);
 	adapter->detect_tx_hung = false;
-	if (tx_ring->tx_buffer_info[eop].dma &&
+	if ((head != tail) &&
+	    tx_ring->tx_buffer_info[eop].time_stamp &&
 	    time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
 	    !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
 		/* detected Tx unit hang */
+		union ixgbe_adv_tx_desc *tx_desc;
+		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
 		DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
-			"  TDH                  <%x>\n"
-			"  TDT                  <%x>\n"
+			"  Tx Queue             <%d>\n"
+			"  TDH, TDT             <%x>, <%x>\n"
 			"  next_to_use          <%x>\n"
 			"  next_to_clean        <%x>\n"
 			"tx_buffer_info[next_to_clean]\n"
 			"  time_stamp           <%lx>\n"
-			"  next_to_watch        <%x>\n"
-			"  jiffies              <%lx>\n"
-			"  next_to_watch.status <%x>\n",
-			readl(adapter->hw.hw_addr + tx_ring->head),
-			readl(adapter->hw.hw_addr + tx_ring->tail),
-			tx_ring->next_to_use,
-			tx_ring->next_to_clean,
-			tx_ring->tx_buffer_info[eop].time_stamp,
-			eop, jiffies, eop_desc->wb.status);
+			"  jiffies              <%lx>\n",
+			tx_ring->queue_index,
+			head, tail,
+			tx_ring->next_to_use, eop,
+			tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
 		return true;
 	}
 
 	return false;
 }
 
-#define IXGBE_MAX_TXD_PWR	14
-#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR)
+#define IXGBE_MAX_TXD_PWR       14
+#define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
 
 /* Tx Descriptors needed, worst case */
 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
 			 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
-	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)	/* for context */
+	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+
+#define GET_TX_HEAD_FROM_RING(ring) (\
+	*(volatile u32 *) \
+	((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
+static void ixgbe_tx_timeout(struct net_device *netdev);
 
 /**
  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  * @adapter: board private structure
+ * @tx_ring: tx ring to clean
  **/
 static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
-				    struct ixgbe_ring *tx_ring)
+                               struct ixgbe_ring *tx_ring)
 {
-	struct net_device *netdev = adapter->netdev;
-	union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+	union ixgbe_adv_tx_desc *tx_desc;
 	struct ixgbe_tx_buffer *tx_buffer_info;
-	unsigned int i, eop;
-	bool cleaned = false;
-	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+	struct net_device *netdev = adapter->netdev;
+	struct sk_buff *skb;
+	unsigned int i;
+	u32 head, oldhead;
+	unsigned int count = 0;
+	unsigned int total_bytes = 0, total_packets = 0;
 
+	rmb();
+	head = GET_TX_HEAD_FROM_RING(tx_ring);
+	head = le32_to_cpu(head);
 	i = tx_ring->next_to_clean;
-	eop = tx_ring->tx_buffer_info[i].next_to_watch;
-	eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
-	while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
-		cleaned = false;
-		while (!cleaned) {
+	while (1) {
+		while (i != head) {
 			tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
 			tx_buffer_info = &tx_ring->tx_buffer_info[i];
-			cleaned = (i == eop);
+			skb = tx_buffer_info->skb;
 
-			tx_ring->stats.bytes += tx_buffer_info->length;
-			if (cleaned) {
-				struct sk_buff *skb = tx_buffer_info->skb;
+			if (skb) {
 				unsigned int segs, bytecount;
+
+				/* gso_segs is currently only valid for tcp */
 				segs = skb_shinfo(skb)->gso_segs ?: 1;
 				/* multiply data chunks by size of headers */
 				bytecount = ((segs - 1) * skb_headlen(skb)) +
-					    skb->len;
-				total_tx_packets += segs;
-				total_tx_bytes += bytecount;
+				            skb->len;
+				total_packets += segs;
+				total_bytes += bytecount;
 			}
+
 			ixgbe_unmap_and_free_tx_resource(adapter,
-							 tx_buffer_info);
-			tx_desc->wb.status = 0;
+			                                 tx_buffer_info);
 
 			i++;
 			if (i == tx_ring->count)
 				i = 0;
+
+			count++;
+			if (count == tx_ring->count)
+				goto done_cleaning;
 		}
+		oldhead = head;
+		rmb();
+		head = GET_TX_HEAD_FROM_RING(tx_ring);
+		head = le32_to_cpu(head);
+		if (head == oldhead)
+			goto done_cleaning;
+	} /* while (1) */
 
-		tx_ring->stats.packets++;
-
-		eop = tx_ring->tx_buffer_info[i].next_to_watch;
-		eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
-
-		/* weight of a sort for tx, avoid endless transmit cleanup */
-		if (total_tx_packets >= tx_ring->work_limit)
-			break;
-	}
-
+done_cleaning:
 	tx_ring->next_to_clean = i;
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
-	if (total_tx_packets && netif_carrier_ok(netdev) &&
-	    (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
+	if (unlikely(count && netif_carrier_ok(netdev) &&
+	             (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
 		/* Make sure that anybody stopping the queue after this
 		 * sees the new next_to_clean.
 		 */
@@ -269,59 +268,68 @@
 		if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
 		    !test_bit(__IXGBE_DOWN, &adapter->state)) {
 			netif_wake_subqueue(netdev, tx_ring->queue_index);
-			adapter->restart_queue++;
+			++adapter->restart_queue;
 		}
 	}
 
-	if (adapter->detect_tx_hung)
-		if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
-			netif_stop_subqueue(netdev, tx_ring->queue_index);
+	if (adapter->detect_tx_hung) {
+		if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
+			/* schedule immediate reset if we believe we hung */
+			DPRINTK(PROBE, INFO,
+			        "tx hang %d detected, resetting adapter\n",
+			        adapter->tx_timeout_count + 1);
+			ixgbe_tx_timeout(adapter->netdev);
+		}
+	}
 
-	if (total_tx_packets >= tx_ring->work_limit)
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
+	/* re-arm the interrupt */
+	if ((total_packets >= tx_ring->work_limit) ||
+	    (count == tx_ring->count))
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
 
-	tx_ring->total_bytes += total_tx_bytes;
-	tx_ring->total_packets += total_tx_packets;
-	adapter->net_stats.tx_bytes += total_tx_bytes;
-	adapter->net_stats.tx_packets += total_tx_packets;
-	cleaned = total_tx_packets ? true : false;
-	return cleaned;
+	tx_ring->total_bytes += total_bytes;
+	tx_ring->total_packets += total_packets;
+	tx_ring->stats.bytes += total_bytes;
+	tx_ring->stats.packets += total_packets;
+	adapter->net_stats.tx_bytes += total_bytes;
+	adapter->net_stats.tx_packets += total_packets;
+	return (total_packets ? true : false);
 }
 
-#ifdef CONFIG_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
-				struct ixgbe_ring *rxr)
+                                struct ixgbe_ring *rx_ring)
 {
 	u32 rxctrl;
 	int cpu = get_cpu();
-	int q = rxr - adapter->rx_ring;
+	int q = rx_ring - adapter->rx_ring;
 
-	if (rxr->cpu != cpu) {
+	if (rx_ring->cpu != cpu) {
 		rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
 		rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
-		rxctrl |= dca_get_tag(cpu);
+		rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
 		rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
 		rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
-		rxr->cpu = cpu;
+		rx_ring->cpu = cpu;
 	}
 	put_cpu();
 }
 
 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
-				struct ixgbe_ring *txr)
+                                struct ixgbe_ring *tx_ring)
 {
 	u32 txctrl;
 	int cpu = get_cpu();
-	int q = txr - adapter->tx_ring;
+	int q = tx_ring - adapter->tx_ring;
 
-	if (txr->cpu != cpu) {
+	if (tx_ring->cpu != cpu) {
 		txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
 		txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
-		txctrl |= dca_get_tag(cpu);
+		txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
 		txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
-		txr->cpu = cpu;
+		tx_ring->cpu = cpu;
 	}
 	put_cpu();
 }
@@ -351,11 +359,14 @@
 
 	switch (event) {
 	case DCA_PROVIDER_ADD:
-		adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+		/* if we're already enabled, don't do it again */
+		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+			break;
 		/* Always use CB2 mode, difference is masked
 		 * in the CB driver. */
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
 		if (dca_add_requester(dev) == 0) {
+			adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
 			ixgbe_setup_dca(adapter);
 			break;
 		}
@@ -372,7 +383,7 @@
 	return 0;
 }
 
-#endif /* CONFIG_DCA */
+#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
 /**
  * ixgbe_receive_skb - Send a completed packet up the stack
  * @adapter: board private structure
@@ -382,8 +393,8 @@
  * @rx_desc: rx descriptor
  **/
 static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
-			      struct sk_buff *skb, u8 status,
-			      struct ixgbe_ring *ring,
+                              struct sk_buff *skb, u8 status,
+                              struct ixgbe_ring *ring,
                               union ixgbe_adv_rx_desc *rx_desc)
 {
 	bool is_vlan = (status & IXGBE_RXD_STAT_VP);
@@ -420,14 +431,12 @@
  * @skb: skb currently being received and modified
  **/
 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
-					 u32 status_err,
-					 struct sk_buff *skb)
+                                     u32 status_err, struct sk_buff *skb)
 {
 	skb->ip_summed = CHECKSUM_NONE;
 
-	/* Ignore Checksum bit is set, or rx csum disabled */
-	if ((status_err & IXGBE_RXD_STAT_IXSM) ||
-	    !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+	/* Rx csum disabled */
+	if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
 		return;
 
 	/* if IP and error */
@@ -455,37 +464,44 @@
  * @adapter: address of board private structure
  **/
 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
-				       struct ixgbe_ring *rx_ring,
-				       int cleaned_count)
+                                   struct ixgbe_ring *rx_ring,
+                                   int cleaned_count)
 {
-	struct net_device *netdev = adapter->netdev;
 	struct pci_dev *pdev = adapter->pdev;
 	union ixgbe_adv_rx_desc *rx_desc;
-	struct ixgbe_rx_buffer *rx_buffer_info;
-	struct sk_buff *skb;
+	struct ixgbe_rx_buffer *bi;
 	unsigned int i;
-	unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN;
+	unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
 
 	i = rx_ring->next_to_use;
-	rx_buffer_info = &rx_ring->rx_buffer_info[i];
+	bi = &rx_ring->rx_buffer_info[i];
 
 	while (cleaned_count--) {
 		rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
 
-		if (!rx_buffer_info->page &&
-				(adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
-			rx_buffer_info->page = alloc_page(GFP_ATOMIC);
-			if (!rx_buffer_info->page) {
-				adapter->alloc_rx_page_failed++;
-				goto no_buffers;
+		if (!bi->page_dma &&
+		    (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+			if (!bi->page) {
+				bi->page = alloc_page(GFP_ATOMIC);
+				if (!bi->page) {
+					adapter->alloc_rx_page_failed++;
+					goto no_buffers;
+				}
+				bi->page_offset = 0;
+			} else {
+				/* use a half page if we're re-using */
+				bi->page_offset ^= (PAGE_SIZE / 2);
 			}
-			rx_buffer_info->page_dma =
-			    pci_map_page(pdev, rx_buffer_info->page,
-					 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+			bi->page_dma = pci_map_page(pdev, bi->page,
+			                            bi->page_offset,
+			                            (PAGE_SIZE / 2),
+			                            PCI_DMA_FROMDEVICE);
 		}
 
-		if (!rx_buffer_info->skb) {
-			skb = netdev_alloc_skb(netdev, bufsz);
+		if (!bi->skb) {
+			struct sk_buff *skb = netdev_alloc_skb(adapter->netdev,
+			                                       bufsz);
 
 			if (!skb) {
 				adapter->alloc_rx_buff_failed++;
@@ -499,28 +515,25 @@
 			 */
 			skb_reserve(skb, NET_IP_ALIGN);
 
-			rx_buffer_info->skb = skb;
-			rx_buffer_info->dma = pci_map_single(pdev, skb->data,
-							  bufsz,
-							  PCI_DMA_FROMDEVICE);
+			bi->skb = skb;
+			bi->dma = pci_map_single(pdev, skb->data, bufsz,
+			                         PCI_DMA_FROMDEVICE);
 		}
 		/* Refresh the desc even if buffer_addrs didn't change because
 		 * each write-back erases this info. */
 		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-			rx_desc->read.pkt_addr =
-			    cpu_to_le64(rx_buffer_info->page_dma);
-			rx_desc->read.hdr_addr =
-					cpu_to_le64(rx_buffer_info->dma);
+			rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+			rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
 		} else {
-			rx_desc->read.pkt_addr =
-					cpu_to_le64(rx_buffer_info->dma);
+			rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
 		}
 
 		i++;
 		if (i == rx_ring->count)
 			i = 0;
-		rx_buffer_info = &rx_ring->rx_buffer_info[i];
+		bi = &rx_ring->rx_buffer_info[i];
 	}
+
 no_buffers:
 	if (rx_ring->next_to_use != i) {
 		rx_ring->next_to_use = i;
@@ -538,46 +551,54 @@
 	}
 }
 
-static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
-			       struct ixgbe_ring *rx_ring,
-			       int *work_done, int work_to_do)
+static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
 {
-	struct net_device *netdev = adapter->netdev;
+	return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+}
+
+static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+	return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+}
+
+static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
+                               struct ixgbe_ring *rx_ring,
+                               int *work_done, int work_to_do)
+{
 	struct pci_dev *pdev = adapter->pdev;
 	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
 	struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
 	struct sk_buff *skb;
 	unsigned int i;
-	u32 upper_len, len, staterr;
+	u32 len, staterr;
 	u16 hdr_info;
 	bool cleaned = false;
 	int cleaned_count = 0;
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
 	i = rx_ring->next_to_clean;
-	upper_len = 0;
 	rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
 	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 	rx_buffer_info = &rx_ring->rx_buffer_info[i];
 
 	while (staterr & IXGBE_RXD_STAT_DD) {
+		u32 upper_len = 0;
 		if (*work_done >= work_to_do)
 			break;
 		(*work_done)++;
 
 		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-			hdr_info =
-			    le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info);
-			len =
-			    ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
-			     IXGBE_RXDADV_HDRBUFLEN_SHIFT);
+			hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
+			len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+			       IXGBE_RXDADV_HDRBUFLEN_SHIFT;
 			if (hdr_info & IXGBE_RXDADV_SPH)
 				adapter->rx_hdr_split++;
 			if (len > IXGBE_RX_HDR_SIZE)
 				len = IXGBE_RX_HDR_SIZE;
 			upper_len = le16_to_cpu(rx_desc->wb.upper.length);
-		} else
+		} else {
 			len = le16_to_cpu(rx_desc->wb.upper.length);
+		}
 
 		cleaned = true;
 		skb = rx_buffer_info->skb;
@@ -586,18 +607,25 @@
 
 		if (len && !skb_shinfo(skb)->nr_frags) {
 			pci_unmap_single(pdev, rx_buffer_info->dma,
-					 adapter->rx_buf_len + NET_IP_ALIGN,
-					 PCI_DMA_FROMDEVICE);
+			                 rx_ring->rx_buf_len + NET_IP_ALIGN,
+			                 PCI_DMA_FROMDEVICE);
 			skb_put(skb, len);
 		}
 
 		if (upper_len) {
 			pci_unmap_page(pdev, rx_buffer_info->page_dma,
-				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
+			               PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
 			rx_buffer_info->page_dma = 0;
 			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-					   rx_buffer_info->page, 0, upper_len);
-			rx_buffer_info->page = NULL;
+			                   rx_buffer_info->page,
+			                   rx_buffer_info->page_offset,
+			                   upper_len);
+
+			if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
+			    (page_count(rx_buffer_info->page) != 1))
+				rx_buffer_info->page = NULL;
+			else
+				get_page(rx_buffer_info->page);
 
 			skb->len += upper_len;
 			skb->data_len += upper_len;
@@ -620,6 +648,7 @@
 			rx_buffer_info->skb = next_buffer->skb;
 			rx_buffer_info->dma = next_buffer->dma;
 			next_buffer->skb = skb;
+			next_buffer->dma = 0;
 			adapter->non_eop_descs++;
 			goto next_desc;
 		}
@@ -635,9 +664,9 @@
 		total_rx_bytes += skb->len;
 		total_rx_packets++;
 
-		skb->protocol = eth_type_trans(skb, netdev);
+		skb->protocol = eth_type_trans(skb, adapter->netdev);
 		ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
-		netdev->last_rx = jiffies;
+		adapter->netdev->last_rx = jiffies;
 
 next_desc:
 		rx_desc->wb.upper.status_error = 0;
@@ -666,9 +695,6 @@
 	if (cleaned_count)
 		ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 
-	adapter->net_stats.rx_bytes += total_rx_bytes;
-	adapter->net_stats.rx_packets += total_rx_packets;
-
 	rx_ring->total_packets += total_rx_packets;
 	rx_ring->total_bytes += total_rx_bytes;
 	adapter->net_stats.rx_bytes += total_rx_bytes;
@@ -700,43 +726,43 @@
 		q_vector = &adapter->q_vector[v_idx];
 		/* XXX for_each_bit(...) */
 		r_idx = find_first_bit(q_vector->rxr_idx,
-				      adapter->num_rx_queues);
+		                       adapter->num_rx_queues);
 
 		for (i = 0; i < q_vector->rxr_count; i++) {
 			j = adapter->rx_ring[r_idx].reg_idx;
 			ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
 			r_idx = find_next_bit(q_vector->rxr_idx,
-					      adapter->num_rx_queues,
-					      r_idx + 1);
+			                      adapter->num_rx_queues,
+			                      r_idx + 1);
 		}
 		r_idx = find_first_bit(q_vector->txr_idx,
-				       adapter->num_tx_queues);
+		                       adapter->num_tx_queues);
 
 		for (i = 0; i < q_vector->txr_count; i++) {
 			j = adapter->tx_ring[r_idx].reg_idx;
 			ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
 			r_idx = find_next_bit(q_vector->txr_idx,
-					      adapter->num_tx_queues,
-					      r_idx + 1);
+			                      adapter->num_tx_queues,
+			                      r_idx + 1);
 		}
 
-		/* if this is a tx only vector use half the irq (tx) rate */
+		/* if this is a tx only vector halve the interrupt rate */
 		if (q_vector->txr_count && !q_vector->rxr_count)
-			q_vector->eitr = adapter->tx_eitr;
+			q_vector->eitr = (adapter->eitr_param >> 1);
 		else
-			/* rx only or mixed */
-			q_vector->eitr = adapter->rx_eitr;
+			/* rx only */
+			q_vector->eitr = adapter->eitr_param;
 
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
-				EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
+		                EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
 	}
 
 	ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
 
-	/* set up to autoclear timer, lsc, and the vectors */
+	/* set up to autoclear timer, and the vectors */
 	mask = IXGBE_EIMS_ENABLE_MASK;
-	mask &= ~IXGBE_EIMS_OTHER;
+	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
 }
 
@@ -766,8 +792,8 @@
  *      parameter (see ixgbe_param.c)
  **/
 static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
-			   u32 eitr, u8 itr_setting,
-			   int packets, int bytes)
+                           u32 eitr, u8 itr_setting,
+                           int packets, int bytes)
 {
 	unsigned int retval = itr_setting;
 	u32 timepassed_us;
@@ -814,40 +840,40 @@
 	u32 new_itr;
 	u8 current_itr, ret_itr;
 	int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
-			      sizeof(struct ixgbe_q_vector);
+	                       sizeof(struct ixgbe_q_vector);
 	struct ixgbe_ring *rx_ring, *tx_ring;
 
 	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
 	for (i = 0; i < q_vector->txr_count; i++) {
 		tx_ring = &(adapter->tx_ring[r_idx]);
 		ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
-					   q_vector->tx_eitr,
-					   tx_ring->total_packets,
-					   tx_ring->total_bytes);
+		                           q_vector->tx_itr,
+		                           tx_ring->total_packets,
+		                           tx_ring->total_bytes);
 		/* if the result for this queue would decrease interrupt
 		 * rate for this vector then use that result */
-		q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ?
-				    q_vector->tx_eitr - 1 : ret_itr);
+		q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
+		                    q_vector->tx_itr - 1 : ret_itr);
 		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
-				      r_idx + 1);
+		                      r_idx + 1);
 	}
 
 	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 	for (i = 0; i < q_vector->rxr_count; i++) {
 		rx_ring = &(adapter->rx_ring[r_idx]);
 		ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
-					   q_vector->rx_eitr,
-					   rx_ring->total_packets,
-					   rx_ring->total_bytes);
+		                           q_vector->rx_itr,
+		                           rx_ring->total_packets,
+		                           rx_ring->total_bytes);
 		/* if the result for this queue would decrease interrupt
 		 * rate for this vector then use that result */
-		q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ?
-				    q_vector->rx_eitr - 1 : ret_itr);
+		q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
+		                    q_vector->rx_itr - 1 : ret_itr);
 		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
-				      r_idx + 1);
+		                      r_idx + 1);
 	}
 
-	current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
+	current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
 
 	switch (current_itr) {
 	/* counts and packets in update_itr are dependent on these numbers */
@@ -871,13 +897,27 @@
 		itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
 		/* must write high and low 16 bits to reset counter */
 		DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
-			itr_reg);
+		        itr_reg);
 		IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
 	}
 
 	return;
 }
 
+
+static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	adapter->lsc_int++;
+	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+	adapter->link_check_timeout = jiffies;
+	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
+		schedule_work(&adapter->watchdog_task);
+	}
+}
+
 static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
 {
 	struct net_device *netdev = data;
@@ -885,11 +925,8 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
 
-	if (eicr & IXGBE_EICR_LSC) {
-		adapter->lsc_int++;
-		if (!test_bit(__IXGBE_DOWN, &adapter->state))
-			mod_timer(&adapter->watchdog_timer, jiffies);
-	}
+	if (eicr & IXGBE_EICR_LSC)
+		ixgbe_check_lsc(adapter);
 
 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
@@ -901,7 +938,7 @@
 {
 	struct ixgbe_q_vector *q_vector = data;
 	struct ixgbe_adapter  *adapter = q_vector->adapter;
-	struct ixgbe_ring     *txr;
+	struct ixgbe_ring     *tx_ring;
 	int i, r_idx;
 
 	if (!q_vector->txr_count)
@@ -909,16 +946,16 @@
 
 	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
 	for (i = 0; i < q_vector->txr_count; i++) {
-		txr = &(adapter->tx_ring[r_idx]);
-#ifdef CONFIG_DCA
+		tx_ring = &(adapter->tx_ring[r_idx]);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-			ixgbe_update_tx_dca(adapter, txr);
+			ixgbe_update_tx_dca(adapter, tx_ring);
 #endif
-		txr->total_bytes = 0;
-		txr->total_packets = 0;
-		ixgbe_clean_tx_irq(adapter, txr);
+		tx_ring->total_bytes = 0;
+		tx_ring->total_packets = 0;
+		ixgbe_clean_tx_irq(adapter, tx_ring);
 		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
-				      r_idx + 1);
+		                      r_idx + 1);
 	}
 
 	return IRQ_HANDLED;
@@ -933,18 +970,26 @@
 {
 	struct ixgbe_q_vector *q_vector = data;
 	struct ixgbe_adapter  *adapter = q_vector->adapter;
-	struct ixgbe_ring  *rxr;
+	struct ixgbe_ring  *rx_ring;
 	int r_idx;
+	int i;
 
 	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	for (i = 0;  i < q_vector->rxr_count; i++) {
+		rx_ring = &(adapter->rx_ring[r_idx]);
+		rx_ring->total_bytes = 0;
+		rx_ring->total_packets = 0;
+		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+		                      r_idx + 1);
+	}
+
 	if (!q_vector->rxr_count)
 		return IRQ_HANDLED;
 
-	rxr = &(adapter->rx_ring[r_idx]);
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	rx_ring = &(adapter->rx_ring[r_idx]);
 	/* disable interrupts on this vector only */
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx);
-	rxr->total_bytes = 0;
-	rxr->total_packets = 0;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
 	netif_rx_schedule(adapter->netdev, &q_vector->napi);
 
 	return IRQ_HANDLED;
@@ -963,39 +1008,90 @@
  * @napi: napi struct with our devices info in it
  * @budget: amount of work driver is allowed to do this pass, in packets
  *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
  **/
 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
 {
 	struct ixgbe_q_vector *q_vector =
-			       container_of(napi, struct ixgbe_q_vector, napi);
+	                       container_of(napi, struct ixgbe_q_vector, napi);
 	struct ixgbe_adapter *adapter = q_vector->adapter;
-	struct ixgbe_ring *rxr;
+	struct ixgbe_ring *rx_ring = NULL;
 	int work_done = 0;
 	long r_idx;
 
 	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-	rxr = &(adapter->rx_ring[r_idx]);
-#ifdef CONFIG_DCA
+	rx_ring = &(adapter->rx_ring[r_idx]);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-		ixgbe_update_rx_dca(adapter, rxr);
+		ixgbe_update_rx_dca(adapter, rx_ring);
 #endif
 
-	ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
+	ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
 
 	/* If all Rx work done, exit the polling mode */
 	if (work_done < budget) {
 		netif_rx_complete(adapter->netdev, napi);
-		if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
+		if (adapter->itr_setting & 3)
 			ixgbe_set_itr_msix(q_vector);
 		if (!test_bit(__IXGBE_DOWN, &adapter->state))
-			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx);
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
 	}
 
 	return work_done;
 }
 
+/**
+ * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean more than one rx queue associated with a
+ * q_vector.
+ **/
+static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
+{
+	struct ixgbe_q_vector *q_vector =
+	                       container_of(napi, struct ixgbe_q_vector, napi);
+	struct ixgbe_adapter *adapter = q_vector->adapter;
+	struct ixgbe_ring *rx_ring = NULL;
+	int work_done = 0, i;
+	long r_idx;
+	u16 enable_mask = 0;
+
+	/* attempt to distribute budget to each queue fairly, but don't allow
+	 * the budget to go below 1 because we'll exit polling */
+	budget /= (q_vector->rxr_count ?: 1);
+	budget = max(budget, 1);
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	for (i = 0; i < q_vector->rxr_count; i++) {
+		rx_ring = &(adapter->rx_ring[r_idx]);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+			ixgbe_update_rx_dca(adapter, rx_ring);
+#endif
+		ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
+		enable_mask |= rx_ring->v_idx;
+		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+		                      r_idx + 1);
+	}
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	rx_ring = &(adapter->rx_ring[r_idx]);
+	/* If all Rx work done, exit the polling mode */
+	if (work_done < budget) {
+		netif_rx_complete(adapter->netdev, napi);
+		if (adapter->itr_setting & 3)
+			ixgbe_set_itr_msix(q_vector);
+		if (!test_bit(__IXGBE_DOWN, &adapter->state))
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
+		return 0;
+	}
+
+	return work_done;
+}
 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
-				     int r_idx)
+                                     int r_idx)
 {
 	a->q_vector[v_idx].adapter = a;
 	set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
@@ -1004,7 +1100,7 @@
 }
 
 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
-				     int r_idx)
+                                     int r_idx)
 {
 	a->q_vector[v_idx].adapter = a;
 	set_bit(r_idx, a->q_vector[v_idx].txr_idx);
@@ -1024,7 +1120,7 @@
  * mapping configurations in here.
  **/
 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
-				      int vectors)
+                                      int vectors)
 {
 	int v_start = 0;
 	int rxr_idx = 0, txr_idx = 0;
@@ -1101,28 +1197,28 @@
 		goto out;
 
 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
-			 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
-			 &ixgbe_msix_clean_many)
+                         (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
+                         &ixgbe_msix_clean_many)
 	for (vector = 0; vector < q_vectors; vector++) {
 		handler = SET_HANDLER(&adapter->q_vector[vector]);
 		sprintf(adapter->name[vector], "%s:v%d-%s",
-			netdev->name, vector,
-			(handler == &ixgbe_msix_clean_rx) ? "Rx" :
-			 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
+		        netdev->name, vector,
+		        (handler == &ixgbe_msix_clean_rx) ? "Rx" :
+		         ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
 		err = request_irq(adapter->msix_entries[vector].vector,
-				  handler, 0, adapter->name[vector],
-				  &(adapter->q_vector[vector]));
+		                  handler, 0, adapter->name[vector],
+		                  &(adapter->q_vector[vector]));
 		if (err) {
 			DPRINTK(PROBE, ERR,
-				"request_irq failed for MSIX interrupt "
-				"Error: %d\n", err);
+			        "request_irq failed for MSIX interrupt "
+			        "Error: %d\n", err);
 			goto free_queue_irqs;
 		}
 	}
 
 	sprintf(adapter->name[vector], "%s:lsc", netdev->name);
 	err = request_irq(adapter->msix_entries[vector].vector,
-			  &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+	                  &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
 	if (err) {
 		DPRINTK(PROBE, ERR,
 			"request_irq for msix_lsc failed: %d\n", err);
@@ -1134,7 +1230,7 @@
 free_queue_irqs:
 	for (i = vector - 1; i >= 0; i--)
 		free_irq(adapter->msix_entries[--vector].vector,
-			 &(adapter->q_vector[i]));
+		         &(adapter->q_vector[i]));
 	adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 	pci_disable_msix(adapter->pdev);
 	kfree(adapter->msix_entries);
@@ -1152,16 +1248,16 @@
 	struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
 	struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
 
-	q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr,
-					     q_vector->tx_eitr,
-					     tx_ring->total_packets,
-					     tx_ring->total_bytes);
-	q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr,
-					     q_vector->rx_eitr,
-					     rx_ring->total_packets,
-					     rx_ring->total_bytes);
+	q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
+	                                    q_vector->tx_itr,
+	                                    tx_ring->total_packets,
+	                                    tx_ring->total_bytes);
+	q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
+	                                    q_vector->rx_itr,
+	                                    rx_ring->total_packets,
+	                                    rx_ring->total_bytes);
 
-	current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
+	current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
 
 	switch (current_itr) {
 	/* counts and packets in update_itr are dependent on these numbers */
@@ -1206,19 +1302,19 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 eicr;
 
-
 	/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
 	 * therefore no explict interrupt disable is necessary */
 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
-	if (!eicr)
+	if (!eicr) {
+		/* shared interrupt alert!
+		 * make sure interrupts are enabled because the read will
+		 * have disabled interrupts due to EIAM */
+		ixgbe_irq_enable(adapter);
 		return IRQ_NONE;	/* Not our interrupt */
-
-	if (eicr & IXGBE_EICR_LSC) {
-		adapter->lsc_int++;
-		if (!test_bit(__IXGBE_DOWN, &adapter->state))
-			mod_timer(&adapter->watchdog_timer, jiffies);
 	}
 
+	if (eicr & IXGBE_EICR_LSC)
+		ixgbe_check_lsc(adapter);
 
 	if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
 		adapter->tx_ring[0].total_packets = 0;
@@ -1261,10 +1357,10 @@
 		err = ixgbe_request_msix_irqs(adapter);
 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
 		err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
-				  netdev->name, netdev);
+		                  netdev->name, netdev);
 	} else {
 		err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
-				  netdev->name, netdev);
+		                  netdev->name, netdev);
 	}
 
 	if (err)
@@ -1288,7 +1384,7 @@
 		i--;
 		for (; i >= 0; i--) {
 			free_irq(adapter->msix_entries[i].vector,
-				 &(adapter->q_vector[i]));
+			         &(adapter->q_vector[i]));
 		}
 
 		ixgbe_reset_q_vectors(adapter);
@@ -1335,7 +1431,7 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 
 	IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
-			EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
+	                EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
 
 	ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
 	ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
@@ -1347,26 +1443,31 @@
 }
 
 /**
- * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset
+ * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
  * @adapter: board private structure
  *
  * Configure the Tx unit of the MAC after a reset.
  **/
 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
 {
-	u64 tdba;
+	u64 tdba, tdwba;
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 i, j, tdlen, txctrl;
 
 	/* Setup the HW Tx Head and Tail descriptor pointers */
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		j = adapter->tx_ring[i].reg_idx;
-		tdba = adapter->tx_ring[i].dma;
-		tdlen = adapter->tx_ring[i].count *
-			sizeof(union ixgbe_adv_tx_desc);
+		struct ixgbe_ring *ring = &adapter->tx_ring[i];
+		j = ring->reg_idx;
+		tdba = ring->dma;
+		tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
-				(tdba & DMA_32BIT_MASK));
+		                (tdba & DMA_32BIT_MASK));
 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
+		tdwba = ring->dma +
+		        (ring->count * sizeof(union ixgbe_adv_tx_desc));
+		tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
+		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
@@ -1375,20 +1476,66 @@
 		/* Disable Tx Head Writeback RO bit, since this hoses
 		 * bookkeeping if things aren't delivered in order.
 		 */
-		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
-		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
 	}
 }
 
-#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
-			(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2
 
-#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT			2
+static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
+{
+	struct ixgbe_ring *rx_ring;
+	u32 srrctl;
+	int queue0;
+	unsigned long mask;
+
+	/* program one srrctl register per VMDq index */
+	if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+		long shift, len;
+		mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
+		len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8;
+		shift = find_first_bit(&mask, len);
+		queue0 = index & mask;
+		index = (index & mask) >> shift;
+	/* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */
+	} else {
+		mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
+		queue0 = index & mask;
+		index = index & mask;
+	}
+
+	rx_ring = &adapter->rx_ring[queue0];
+
+	srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
+
+	srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+	srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+
+	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+		srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+		srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+		srrctl |= ((IXGBE_RX_HDR_SIZE <<
+		            IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+		           IXGBE_SRRCTL_BSIZEHDR_MASK);
+	} else {
+		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+		if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+			srrctl |= IXGBE_RXBUFFER_2048 >>
+			          IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+		else
+			srrctl |= rx_ring->rx_buf_len >>
+			          IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+	}
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
+}
+
 /**
  * ixgbe_get_skb_hdr - helper function for LRO header processing
  * @skb: pointer to sk_buff to be added to LRO packet
- * @iphdr: pointer to tcp header structure
+ * @iphdr: pointer to ip header structure
  * @tcph: pointer to tcp header structure
  * @hdr_flags: pointer to header flags
  * @priv: private data
@@ -1399,8 +1546,8 @@
 	union ixgbe_adv_rx_desc *rx_desc = priv;
 
 	/* Verify that this is a valid IPv4 TCP packet */
-	if (!(rx_desc->wb.lower.lo_dword.pkt_info &
-	    (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)))
+	if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
+	     (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
 		return -1;
 
 	/* Set network headers */
@@ -1412,8 +1559,11 @@
 	return 0;
 }
 
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+                           (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+
 /**
- * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
+ * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
  * @adapter: board private structure
  *
  * Configure the Rx unit of the MAC after a reset.
@@ -1426,25 +1576,26 @@
 	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 	int i, j;
 	u32 rdlen, rxctrl, rxcsum;
-	u32 random[10];
+	static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
+	                  0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
+	                  0x6A3E67EA, 0x14364D17, 0x3BED200D};
 	u32 fctrl, hlreg0;
 	u32 pages;
-	u32 reta = 0, mrqc, srrctl;
+	u32 reta = 0, mrqc;
+	u32 rdrxctl;
+	int rx_buf_len;
 
 	/* Decide whether to use packet split mode or not */
-	if (netdev->mtu > ETH_DATA_LEN)
-		adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
-	else
-		adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+	adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 
 	/* Set the RX buffer length according to the mode */
 	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-		adapter->rx_buf_len = IXGBE_RX_HDR_SIZE;
+		rx_buf_len = IXGBE_RX_HDR_SIZE;
 	} else {
 		if (netdev->mtu <= ETH_DATA_LEN)
-			adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+			rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
 		else
-			adapter->rx_buf_len = ALIGN(max_frame, 1024);
+			rx_buf_len = ALIGN(max_frame, 1024);
 	}
 
 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
@@ -1461,28 +1612,6 @@
 
 	pages = PAGE_USE_COUNT(adapter->netdev->mtu);
 
-	srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
-	srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
-	srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
-
-	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-		srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-		srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-		srrctl |= ((IXGBE_RX_HDR_SIZE <<
-			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-			   IXGBE_SRRCTL_BSIZEHDR_MASK);
-	} else {
-		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
-
-		if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
-			srrctl |=
-			     IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-		else
-			srrctl |=
-			     adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-	}
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
-
 	rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
 	/* disable receives while setting up the descriptors */
 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -1492,25 +1621,43 @@
 	 * the Base and Length of the Rx Descriptor Ring */
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		rdba = adapter->rx_ring[i].dma;
-		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK));
-		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
-		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen);
-		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
-		adapter->rx_ring[i].head = IXGBE_RDH(i);
-		adapter->rx_ring[i].tail = IXGBE_RDT(i);
+		j = adapter->rx_ring[i].reg_idx;
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
+		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
+		adapter->rx_ring[i].head = IXGBE_RDH(j);
+		adapter->rx_ring[i].tail = IXGBE_RDT(j);
+		adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+		/* Intitial LRO Settings */
+		adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
+		adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
+		adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
+		adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
+		if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+			adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
+		adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
+		adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
+		adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+
+		ixgbe_configure_srrctl(adapter, j);
 	}
 
-	/* Intitial LRO Settings */
-	adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
-	adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
-	adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
-	adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
-	if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
-		adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
-	adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
-	adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
-	adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+	/*
+	 * For VMDq support of different descriptor types or
+	 * buffer sizes through the use of multiple SRRCTL
+	 * registers, RDRXCTL.MVMEN must be set to 1
+	 *
+	 * also, the manual doesn't mention it clearly but DCA hints
+	 * will only use queue 0's tags unless this bit is set.  Side
+	 * effects of setting this bit are only that SRRCTL must be
+	 * fully programmed [0..15]
+	 */
+	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+	rdrxctl |= IXGBE_RDRXCTL_MVMEN;
+	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+
 
 	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
 		/* Fill out redirection table */
@@ -1525,22 +1672,20 @@
 		}
 
 		/* Fill out hash function seeds */
-		/* XXX use a random constant here to glue certain flows */
-		get_random_bytes(&random[0], 40);
 		for (i = 0; i < 10; i++)
-			IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
+			IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
 
 		mrqc = IXGBE_MRQC_RSSEN
 		    /* Perform hash on these packet types */
-		    | IXGBE_MRQC_RSS_FIELD_IPV4
-		    | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
-		    | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
-		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
-		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX
-		    | IXGBE_MRQC_RSS_FIELD_IPV6
-		    | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
-		    | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
-		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+		       | IXGBE_MRQC_RSS_FIELD_IPV4
+		       | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+		       | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
+		       | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
+		       | IXGBE_MRQC_RSS_FIELD_IPV6_EX
+		       | IXGBE_MRQC_RSS_FIELD_IPV6
+		       | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
+		       | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
+		       | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 	}
 
@@ -1562,7 +1707,7 @@
 }
 
 static void ixgbe_vlan_rx_register(struct net_device *netdev,
-				   struct vlan_group *grp)
+                                   struct vlan_group *grp)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	u32 ctrl;
@@ -1586,14 +1731,16 @@
 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
 
 	/* add VID to filter table */
-	ixgbe_set_vfta(&adapter->hw, vid, 0, true);
+	hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
 }
 
 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
 
 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
 		ixgbe_irq_disable(adapter);
@@ -1604,7 +1751,7 @@
 		ixgbe_irq_enable(adapter);
 
 	/* remove VID from filter table */
-	ixgbe_set_vfta(&adapter->hw, vid, 0, false);
+	hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
 }
 
 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
@@ -1621,23 +1768,37 @@
 	}
 }
 
+static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
+{
+	struct dev_mc_list *mc_ptr;
+	u8 *addr = *mc_addr_ptr;
+	*vmdq = 0;
+
+	mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
+	if (mc_ptr->next)
+		*mc_addr_ptr = mc_ptr->next->dmi_addr;
+	else
+		*mc_addr_ptr = NULL;
+
+	return addr;
+}
+
 /**
- * ixgbe_set_multi - Multicast and Promiscuous mode set
+ * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
  * @netdev: network interface device structure
  *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated.  This routine is
- * responsible for configuring the hardware for proper multicast,
- * promiscuous mode, and all-multi behavior.
+ * The set_rx_method entry point is called whenever the unicast/multicast
+ * address list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper unicast, multicast and
+ * promiscuous mode.
  **/
-static void ixgbe_set_multi(struct net_device *netdev)
+static void ixgbe_set_rx_mode(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
-	struct dev_mc_list *mc_ptr;
-	u8 *mta_list;
 	u32 fctrl, vlnctrl;
-	int i;
+	u8 *addr_list = NULL;
+	int addr_count = 0;
 
 	/* Check for Promiscuous and All Multicast modes */
 
@@ -1645,6 +1806,7 @@
 	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 
 	if (netdev->flags & IFF_PROMISC) {
+		hw->addr_ctrl.user_set_promisc = 1;
 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
 		vlnctrl &= ~IXGBE_VLNCTRL_VFE;
 	} else {
@@ -1655,33 +1817,25 @@
 			fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
 		}
 		vlnctrl |= IXGBE_VLNCTRL_VFE;
+		hw->addr_ctrl.user_set_promisc = 0;
 	}
 
 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
 
-	if (netdev->mc_count) {
-		mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
-		if (!mta_list)
-			return;
+	/* reprogram secondary unicast list */
+	addr_count = netdev->uc_count;
+	if (addr_count)
+		addr_list = netdev->uc_list->dmi_addr;
+	hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
+	                                  ixgbe_addr_list_itr);
 
-		/* Shared function expects packed array of only addresses. */
-		mc_ptr = netdev->mc_list;
-
-		for (i = 0; i < netdev->mc_count; i++) {
-			if (!mc_ptr)
-				break;
-			memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr,
-			       ETH_ALEN);
-			mc_ptr = mc_ptr->next;
-		}
-
-		ixgbe_update_mc_addr_list(hw, mta_list, i, 0);
-		kfree(mta_list);
-	} else {
-		ixgbe_update_mc_addr_list(hw, NULL, 0, 0);
-	}
-
+	/* reprogram multicast list */
+	addr_count = netdev->mc_count;
+	if (addr_count)
+		addr_list = netdev->mc_list->dmi_addr;
+	hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
+	                                ixgbe_addr_list_itr);
 }
 
 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -1695,10 +1849,16 @@
 		q_vectors = 1;
 
 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		struct napi_struct *napi;
 		q_vector = &adapter->q_vector[q_idx];
 		if (!q_vector->rxr_count)
 			continue;
-		napi_enable(&q_vector->napi);
+		napi = &q_vector->napi;
+		if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
+		    (q_vector->rxr_count > 1))
+			napi->poll = &ixgbe_clean_rxonly_many;
+
+		napi_enable(napi);
 	}
 }
 
@@ -1725,7 +1885,7 @@
 	struct net_device *netdev = adapter->netdev;
 	int i;
 
-	ixgbe_set_multi(netdev);
+	ixgbe_set_rx_mode(netdev);
 
 	ixgbe_restore_vlan(adapter);
 
@@ -1733,7 +1893,7 @@
 	ixgbe_configure_rx(adapter);
 	for (i = 0; i < adapter->num_rx_queues; i++)
 		ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
-					   (adapter->rx_ring[i].count - 1));
+		                       (adapter->rx_ring[i].count - 1));
 }
 
 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
@@ -1751,7 +1911,7 @@
 	    (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
 		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 			gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
-				IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
+			        IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
 		} else {
 			/* MSI only */
 			gpie = 0;
@@ -1778,6 +1938,8 @@
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 		j = adapter->tx_ring[i].reg_idx;
 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+		/* enable WTHRESH=8 descriptors, to encourage burst writeback */
+		txdctl |= (8 << 16);
 		txdctl |= IXGBE_TXDCTL_ENABLE;
 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
 	}
@@ -1812,6 +1974,8 @@
 
 	/* bring the link up in the watchdog, this could race with our first
 	 * link up interrupt but shouldn't be a problem */
+	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+	adapter->link_check_timeout = jiffies;
 	mod_timer(&adapter->watchdog_timer, jiffies);
 	return 0;
 }
@@ -1836,58 +2000,22 @@
 
 void ixgbe_reset(struct ixgbe_adapter *adapter)
 {
-	if (ixgbe_init_hw(&adapter->hw))
-		DPRINTK(PROBE, ERR, "Hardware Error\n");
+	struct ixgbe_hw *hw = &adapter->hw;
+	if (hw->mac.ops.init_hw(hw))
+		dev_err(&adapter->pdev->dev, "Hardware Error\n");
 
 	/* reprogram the RAR[0] in case user changed it. */
-	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
+	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 
 }
 
-#ifdef CONFIG_PM
-static int ixgbe_resume(struct pci_dev *pdev)
-{
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	u32 err;
-
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-	err = pci_enable_device(pdev);
-	if (err) {
-		printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
-				"suspend\n");
-		return err;
-	}
-	pci_set_master(pdev);
-
-	pci_enable_wake(pdev, PCI_D3hot, 0);
-	pci_enable_wake(pdev, PCI_D3cold, 0);
-
-	if (netif_running(netdev)) {
-		err = ixgbe_request_irq(adapter);
-		if (err)
-			return err;
-	}
-
-	ixgbe_reset(adapter);
-
-	if (netif_running(netdev))
-		ixgbe_up(adapter);
-
-	netif_device_attach(netdev);
-
-	return 0;
-}
-#endif
-
 /**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  * @adapter: board private structure
  * @rx_ring: ring to free buffers from
  **/
 static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
-				struct ixgbe_ring *rx_ring)
+                                struct ixgbe_ring *rx_ring)
 {
 	struct pci_dev *pdev = adapter->pdev;
 	unsigned long size;
@@ -1901,8 +2029,8 @@
 		rx_buffer_info = &rx_ring->rx_buffer_info[i];
 		if (rx_buffer_info->dma) {
 			pci_unmap_single(pdev, rx_buffer_info->dma,
-					 adapter->rx_buf_len,
-					 PCI_DMA_FROMDEVICE);
+			                 rx_ring->rx_buf_len,
+			                 PCI_DMA_FROMDEVICE);
 			rx_buffer_info->dma = 0;
 		}
 		if (rx_buffer_info->skb) {
@@ -1911,12 +2039,12 @@
 		}
 		if (!rx_buffer_info->page)
 			continue;
-		pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE,
-			       PCI_DMA_FROMDEVICE);
+		pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
+		               PCI_DMA_FROMDEVICE);
 		rx_buffer_info->page_dma = 0;
-
 		put_page(rx_buffer_info->page);
 		rx_buffer_info->page = NULL;
+		rx_buffer_info->page_offset = 0;
 	}
 
 	size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
@@ -1938,7 +2066,7 @@
  * @tx_ring: ring to be cleaned
  **/
 static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
-				struct ixgbe_ring *tx_ring)
+                                struct ixgbe_ring *tx_ring)
 {
 	struct ixgbe_tx_buffer *tx_buffer_info;
 	unsigned long size;
@@ -1991,75 +2119,64 @@
 void ixgbe_down(struct ixgbe_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_hw *hw = &adapter->hw;
 	u32 rxctrl;
+	u32 txdctl;
+	int i, j;
 
 	/* signal that we are down to the interrupt handler */
 	set_bit(__IXGBE_DOWN, &adapter->state);
 
 	/* disable receives */
-	rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
-			rxctrl & ~IXGBE_RXCTRL_RXEN);
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 
 	netif_tx_disable(netdev);
 
-	/* disable transmits in the hardware */
-
-	/* flush both disables */
-	IXGBE_WRITE_FLUSH(&adapter->hw);
+	IXGBE_WRITE_FLUSH(hw);
 	msleep(10);
 
+	netif_tx_stop_all_queues(netdev);
+
 	ixgbe_irq_disable(adapter);
 
 	ixgbe_napi_disable_all(adapter);
+
 	del_timer_sync(&adapter->watchdog_timer);
+	cancel_work_sync(&adapter->watchdog_task);
+
+	/* disable transmits in the hardware now that interrupts are off */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		j = adapter->tx_ring[i].reg_idx;
+		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
+		                (txdctl & ~IXGBE_TXDCTL_ENABLE));
+	}
 
 	netif_carrier_off(netdev);
-	netif_tx_stop_all_queues(netdev);
 
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+		adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+		dca_remove_requester(&adapter->pdev->dev);
+	}
+
+#endif
 	if (!pci_channel_offline(adapter->pdev))
 		ixgbe_reset(adapter);
 	ixgbe_clean_all_tx_rings(adapter);
 	ixgbe_clean_all_rx_rings(adapter);
 
-}
-
-static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_PM
-	int retval = 0;
-#endif
-
-	netif_device_detach(netdev);
-
-	if (netif_running(netdev)) {
-		ixgbe_down(adapter);
-		ixgbe_free_irq(adapter);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+	/* since we reset the hardware DCA settings were cleared */
+	if (dca_add_requester(&adapter->pdev->dev) == 0) {
+		adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+		/* always use CB2 mode, difference is masked
+		 * in the CB driver */
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
+		ixgbe_setup_dca(adapter);
 	}
-
-#ifdef CONFIG_PM
-	retval = pci_save_state(pdev);
-	if (retval)
-		return retval;
 #endif
-
-	pci_enable_wake(pdev, PCI_D3hot, 0);
-	pci_enable_wake(pdev, PCI_D3cold, 0);
-
-	ixgbe_release_hw_control(adapter);
-
-	pci_disable_device(pdev);
-
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
-	return 0;
-}
-
-static void ixgbe_shutdown(struct pci_dev *pdev)
-{
-	ixgbe_suspend(pdev, PMSG_SUSPEND);
 }
 
 /**
@@ -2072,11 +2189,11 @@
 static int ixgbe_poll(struct napi_struct *napi, int budget)
 {
 	struct ixgbe_q_vector *q_vector = container_of(napi,
-					  struct ixgbe_q_vector, napi);
+	                                          struct ixgbe_q_vector, napi);
 	struct ixgbe_adapter *adapter = q_vector->adapter;
-	int tx_cleaned = 0, work_done = 0;
+	int tx_cleaned, work_done = 0;
 
-#ifdef CONFIG_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
 		ixgbe_update_tx_dca(adapter, adapter->tx_ring);
 		ixgbe_update_rx_dca(adapter, adapter->rx_ring);
@@ -2092,12 +2209,11 @@
 	/* If budget not fully consumed, exit the polling mode */
 	if (work_done < budget) {
 		netif_rx_complete(adapter->netdev, napi);
-		if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
+		if (adapter->itr_setting & 3)
 			ixgbe_set_itr(adapter);
 		if (!test_bit(__IXGBE_DOWN, &adapter->state))
 			ixgbe_irq_enable(adapter);
 	}
-
 	return work_done;
 }
 
@@ -2123,56 +2239,9 @@
 	ixgbe_reinit_locked(adapter);
 }
 
-static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
-				       int vectors)
+static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 {
-	int err, vector_threshold;
-
-	/* We'll want at least 3 (vector_threshold):
-	 * 1) TxQ[0] Cleanup
-	 * 2) RxQ[0] Cleanup
-	 * 3) Other (Link Status Change, etc.)
-	 * 4) TCP Timer (optional)
-	 */
-	vector_threshold = MIN_MSIX_COUNT;
-
-	/* The more we get, the more we will assign to Tx/Rx Cleanup
-	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
-	 * Right now, we simply care about how many we'll get; we'll
-	 * set them up later while requesting irq's.
-	 */
-	while (vectors >= vector_threshold) {
-		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
-				      vectors);
-		if (!err) /* Success in acquiring all requested vectors. */
-			break;
-		else if (err < 0)
-			vectors = 0; /* Nasty failure, quit now */
-		else /* err == number of vectors we should try again with */
-			vectors = err;
-	}
-
-	if (vectors < vector_threshold) {
-		/* Can't allocate enough MSI-X interrupts?  Oh well.
-		 * This just means we'll go with either a single MSI
-		 * vector or fall back to legacy interrupts.
-		 */
-		DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
-		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
-		kfree(adapter->msix_entries);
-		adapter->msix_entries = NULL;
-		adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-		adapter->num_tx_queues = 1;
-		adapter->num_rx_queues = 1;
-	} else {
-		adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
-		adapter->num_msix_vectors = vectors;
-	}
-}
-
-static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
-{
-	int nrq, ntq;
+	int nrq = 1, ntq = 1;
 	int feature_mask = 0, rss_i, rss_m;
 
 	/* Number of supported queues */
@@ -2210,6 +2279,52 @@
 	adapter->num_tx_queues = ntq;
 }
 
+static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
+                                       int vectors)
+{
+	int err, vector_threshold;
+
+	/* We'll want at least 3 (vector_threshold):
+	 * 1) TxQ[0] Cleanup
+	 * 2) RxQ[0] Cleanup
+	 * 3) Other (Link Status Change, etc.)
+	 * 4) TCP Timer (optional)
+	 */
+	vector_threshold = MIN_MSIX_COUNT;
+
+	/* The more we get, the more we will assign to Tx/Rx Cleanup
+	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+	 * Right now, we simply care about how many we'll get; we'll
+	 * set them up later while requesting irq's.
+	 */
+	while (vectors >= vector_threshold) {
+		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+		                      vectors);
+		if (!err) /* Success in acquiring all requested vectors. */
+			break;
+		else if (err < 0)
+			vectors = 0; /* Nasty failure, quit now */
+		else /* err == number of vectors we should try again with */
+			vectors = err;
+	}
+
+	if (vectors < vector_threshold) {
+		/* Can't allocate enough MSI-X interrupts?  Oh well.
+		 * This just means we'll go with either a single MSI
+		 * vector or fall back to legacy interrupts.
+		 */
+		DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
+		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+		adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+		ixgbe_set_num_queues(adapter);
+	} else {
+		adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
+		adapter->num_msix_vectors = vectors;
+	}
+}
+
 /**
  * ixgbe_cache_ring_register - Descriptor ring to register mapping
  * @adapter: board private structure to initialize
@@ -2219,9 +2334,6 @@
  **/
 static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
 {
-	/* TODO: Remove all uses of the indices in the cases where multiple
-	 *       features are OR'd together, if the feature set makes sense.
-	 */
 	int feature_mask = 0, rss_i;
 	int i, txr_idx, rxr_idx;
 
@@ -2262,21 +2374,22 @@
 	int i;
 
 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
-				   sizeof(struct ixgbe_ring), GFP_KERNEL);
+	                           sizeof(struct ixgbe_ring), GFP_KERNEL);
 	if (!adapter->tx_ring)
 		goto err_tx_ring_allocation;
 
 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
-				   sizeof(struct ixgbe_ring), GFP_KERNEL);
+	                           sizeof(struct ixgbe_ring), GFP_KERNEL);
 	if (!adapter->rx_ring)
 		goto err_rx_ring_allocation;
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
+		adapter->tx_ring[i].count = adapter->tx_ring_count;
 		adapter->tx_ring[i].queue_index = i;
 	}
+
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
+		adapter->rx_ring[i].count = adapter->rx_ring_count;
 		adapter->rx_ring[i].queue_index = i;
 	}
 
@@ -2298,25 +2411,19 @@
  * capabilities of the hardware and the kernel.
  **/
 static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
-						    *adapter)
+                                                    *adapter)
 {
 	int err = 0;
 	int vector, v_budget;
 
 	/*
-	 * Set the default interrupt throttle rate.
-	 */
-	adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
-	adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
-
-	/*
 	 * It's easy to be greedy for MSI-X vectors, but it really
 	 * doesn't do us much good if we have a lot more vectors
 	 * than CPU's.  So let's be conservative and only ask for
 	 * (roughly) twice the number of vectors as there are CPU's.
 	 */
 	v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
-		       (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+	               (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
 
 	/*
 	 * At the same time, hardware can only support a maximum of
@@ -2330,7 +2437,7 @@
 	/* A failure in MSI-X entry allocation isn't fatal, but it does
 	 * mean we disable MSI-X capabilities of the adapter. */
 	adapter->msix_entries = kcalloc(v_budget,
-					sizeof(struct msix_entry), GFP_KERNEL);
+	                                sizeof(struct msix_entry), GFP_KERNEL);
 	if (!adapter->msix_entries) {
 		adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
 		ixgbe_set_num_queues(adapter);
@@ -2339,7 +2446,7 @@
 		err = ixgbe_alloc_queues(adapter);
 		if (err) {
 			DPRINTK(PROBE, ERR, "Unable to allocate memory "
-					    "for queues\n");
+			        "for queues\n");
 			goto out;
 		}
 
@@ -2360,7 +2467,7 @@
 		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
 	} else {
 		DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
-				   "falling back to legacy.  Error: %d\n", err);
+		        "falling back to legacy.  Error: %d\n", err);
 		/* reset err */
 		err = 0;
 	}
@@ -2416,9 +2523,9 @@
 	}
 
 	DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
-			   "Tx Queue count = %u\n",
-		(adapter->num_rx_queues > 1) ? "Enabled" :
-		"Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+	        "Tx Queue count = %u\n",
+	        (adapter->num_rx_queues > 1) ? "Enabled" :
+	        "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
 
 	set_bit(__IXGBE_DOWN, &adapter->state);
 
@@ -2445,33 +2552,44 @@
 	struct pci_dev *pdev = adapter->pdev;
 	unsigned int rss;
 
+	/* PCI config space info */
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->revision_id = pdev->revision;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+
 	/* Set capability flags */
 	rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
 	adapter->ring_feature[RING_F_RSS].indices = rss;
 	adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
 
-	/* Enable Dynamic interrupt throttling by default */
-	adapter->rx_eitr = 1;
-	adapter->tx_eitr = 1;
-
 	/* default flow control settings */
-	hw->fc.original_type = ixgbe_fc_full;
-	hw->fc.type = ixgbe_fc_full;
+	hw->fc.original_type = ixgbe_fc_none;
+	hw->fc.type = ixgbe_fc_none;
+	hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
+	hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+	hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
+	hw->fc.send_xon = true;
 
 	/* select 10G link by default */
 	hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
-	if (hw->mac.ops.reset(hw)) {
-		dev_err(&pdev->dev, "HW Init failed\n");
-		return -EIO;
-	}
-	if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
-					 false)) {
-		dev_err(&pdev->dev, "Link Speed setup failed\n");
-		return -EIO;
-	}
+
+	/* enable itr by default in dynamic mode */
+	adapter->itr_setting = 1;
+	adapter->eitr_param = 20000;
+
+	/* set defaults for eitr in MegaBytes */
+	adapter->eitr_low = 10;
+	adapter->eitr_high = 20;
+
+	/* set default ring sizes */
+	adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
+	adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
 
 	/* initialize eeprom parameters */
-	if (ixgbe_init_eeprom(hw)) {
+	if (ixgbe_init_eeprom_params_generic(hw)) {
 		dev_err(&pdev->dev, "EEPROM initialization failed\n");
 		return -EIO;
 	}
@@ -2487,105 +2605,157 @@
 /**
  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
  * @adapter: board private structure
- * @txdr:    tx descriptor ring (for a specific queue) to setup
+ * @tx_ring:    tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
 int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
-			     struct ixgbe_ring *txdr)
+                             struct ixgbe_ring *tx_ring)
 {
 	struct pci_dev *pdev = adapter->pdev;
 	int size;
 
-	size = sizeof(struct ixgbe_tx_buffer) * txdr->count;
-	txdr->tx_buffer_info = vmalloc(size);
-	if (!txdr->tx_buffer_info) {
-		DPRINTK(PROBE, ERR,
-		"Unable to allocate memory for the transmit descriptor ring\n");
-		return -ENOMEM;
-	}
-	memset(txdr->tx_buffer_info, 0, size);
+	size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
+	tx_ring->tx_buffer_info = vmalloc(size);
+	if (!tx_ring->tx_buffer_info)
+		goto err;
+	memset(tx_ring->tx_buffer_info, 0, size);
 
 	/* round up to nearest 4K */
-	txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc);
-	txdr->size = ALIGN(txdr->size, 4096);
+	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) +
+	                sizeof(u32);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-	txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
-	if (!txdr->desc) {
-		vfree(txdr->tx_buffer_info);
-		DPRINTK(PROBE, ERR,
-			"Memory allocation failed for the tx desc ring\n");
-		return -ENOMEM;
+	tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+	                                     &tx_ring->dma);
+	if (!tx_ring->desc)
+		goto err;
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	tx_ring->work_limit = tx_ring->count;
+	return 0;
+
+err:
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+	DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
+	                    "descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		if (!err)
+			continue;
+		DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
+		break;
 	}
 
-	txdr->next_to_use = 0;
-	txdr->next_to_clean = 0;
-	txdr->work_limit = txdr->count;
-
-	return 0;
+	return err;
 }
 
 /**
  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
  * @adapter: board private structure
- * @rxdr:    rx descriptor ring (for a specific queue) to setup
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
-			     struct ixgbe_ring *rxdr)
+                             struct ixgbe_ring *rx_ring)
 {
 	struct pci_dev *pdev = adapter->pdev;
 	int size;
 
 	size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
-	rxdr->lro_mgr.lro_arr = vmalloc(size);
-	if (!rxdr->lro_mgr.lro_arr)
+	rx_ring->lro_mgr.lro_arr = vmalloc(size);
+	if (!rx_ring->lro_mgr.lro_arr)
 		return -ENOMEM;
-	memset(rxdr->lro_mgr.lro_arr, 0, size);
+	memset(rx_ring->lro_mgr.lro_arr, 0, size);
 
-	size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
-	rxdr->rx_buffer_info = vmalloc(size);
-	if (!rxdr->rx_buffer_info) {
+	size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+	rx_ring->rx_buffer_info = vmalloc(size);
+	if (!rx_ring->rx_buffer_info) {
 		DPRINTK(PROBE, ERR,
-			"vmalloc allocation failed for the rx desc ring\n");
+		        "vmalloc allocation failed for the rx desc ring\n");
 		goto alloc_failed;
 	}
-	memset(rxdr->rx_buffer_info, 0, size);
+	memset(rx_ring->rx_buffer_info, 0, size);
 
 	/* Round up to nearest 4K */
-	rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc);
-	rxdr->size = ALIGN(rxdr->size, 4096);
+	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
 
-	rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+	rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
 
-	if (!rxdr->desc) {
+	if (!rx_ring->desc) {
 		DPRINTK(PROBE, ERR,
-			"Memory allocation failed for the rx desc ring\n");
-		vfree(rxdr->rx_buffer_info);
+		        "Memory allocation failed for the rx desc ring\n");
+		vfree(rx_ring->rx_buffer_info);
 		goto alloc_failed;
 	}
 
-	rxdr->next_to_clean = 0;
-	rxdr->next_to_use = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
 
 	return 0;
 
 alloc_failed:
-	vfree(rxdr->lro_mgr.lro_arr);
-	rxdr->lro_mgr.lro_arr = NULL;
+	vfree(rx_ring->lro_mgr.lro_arr);
+	rx_ring->lro_mgr.lro_arr = NULL;
 	return -ENOMEM;
 }
 
 /**
+ * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		if (!err)
+			continue;
+		DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
+		break;
+	}
+
+	return err;
+}
+
+/**
  * ixgbe_free_tx_resources - Free Tx Resources per Queue
  * @adapter: board private structure
  * @tx_ring: Tx descriptor ring for a specific queue
  *
  * Free all transmit software resources
  **/
-static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
-				    struct ixgbe_ring *tx_ring)
+void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
+                             struct ixgbe_ring *tx_ring)
 {
 	struct pci_dev *pdev = adapter->pdev;
 
@@ -2620,8 +2790,8 @@
  *
  * Free all receive software resources
  **/
-static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
-				    struct ixgbe_ring *rx_ring)
+void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
+                             struct ixgbe_ring *rx_ring)
 {
 	struct pci_dev *pdev = adapter->pdev;
 
@@ -2653,59 +2823,6 @@
 }
 
 /**
- * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not).  It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
-{
-	int i, err = 0;
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
-		if (err) {
-			DPRINTK(PROBE, ERR,
-				"Allocation for Tx Queue %u failed\n", i);
-			break;
-		}
-	}
-
-	return err;
-}
-
-/**
- * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not).  It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-
-static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
-{
-	int i, err = 0;
-
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
-		if (err) {
-			DPRINTK(PROBE, ERR,
-				"Allocation for Rx Queue %u failed\n", i);
-			break;
-		}
-	}
-
-	return err;
-}
-
-/**
  * ixgbe_change_mtu - Change the Maximum Transfer Unit
  * @netdev: network interface device structure
  * @new_mtu: new value for maximum frame size
@@ -2717,12 +2834,12 @@
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
-	if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) ||
-	    (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+	/* MTU < 68 is an error and causes problems on some kernels */
+	if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
 		return -EINVAL;
 
 	DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
-		netdev->mtu, new_mtu);
+	        netdev->mtu, new_mtu);
 	/* must set new MTU before calling down or up */
 	netdev->mtu = new_mtu;
 
@@ -2817,6 +2934,135 @@
 }
 
 /**
+ * ixgbe_napi_add_all - prep napi structs for use
+ * @adapter: private struct
+ * helper function to napi_add each possible q_vector->napi
+ */
+static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
+{
+	int q_idx, q_vectors;
+	int (*poll)(struct napi_struct *, int);
+
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+		poll = &ixgbe_clean_rxonly;
+		/* Only enable as many vectors as we have rx queues. */
+		q_vectors = adapter->num_rx_queues;
+	} else {
+		poll = &ixgbe_poll;
+		/* only one q_vector for legacy modes */
+		q_vectors = 1;
+	}
+
+	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
+		netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
+	}
+}
+
+static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
+{
+	int q_idx;
+	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	/* legacy and MSI only use one vector */
+	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+		q_vectors = 1;
+
+	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
+		if (!q_vector->rxr_count)
+			continue;
+		netif_napi_del(&q_vector->napi);
+	}
+}
+
+#ifdef CONFIG_PM
+static int ixgbe_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
+				"suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	err = ixgbe_init_interrupt_scheme(adapter);
+	if (err) {
+		printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
+		                "device\n");
+		return err;
+	}
+
+	ixgbe_napi_add_all(adapter);
+	ixgbe_reset(adapter);
+
+	if (netif_running(netdev)) {
+		err = ixgbe_open(adapter->netdev);
+		if (err)
+			return err;
+	}
+
+	netif_device_attach(netdev);
+
+	return 0;
+}
+
+#endif /* CONFIG_PM */
+static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		ixgbe_down(adapter);
+		ixgbe_free_irq(adapter);
+		ixgbe_free_all_tx_resources(adapter);
+		ixgbe_free_all_rx_resources(adapter);
+	}
+	ixgbe_reset_interrupt_capability(adapter);
+	ixgbe_napi_del_all(adapter);
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+#endif
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	ixgbe_release_hw_control(adapter);
+
+	pci_disable_device(pdev);
+
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static void ixgbe_shutdown(struct pci_dev *pdev)
+{
+	ixgbe_suspend(pdev, PMSG_SUSPEND);
+}
+
+/**
  * ixgbe_update_stats - Update the board statistics counters.
  * @adapter: board private structure
  **/
@@ -2889,7 +3135,7 @@
 
 	/* Rx Errors */
 	adapter->net_stats.rx_errors = adapter->stats.crcerrs +
-						adapter->stats.rlec;
+	                               adapter->stats.rlec;
 	adapter->net_stats.rx_dropped = 0;
 	adapter->net_stats.rx_length_errors = adapter->stats.rlec;
 	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
@@ -2903,27 +3149,74 @@
 static void ixgbe_watchdog(unsigned long data)
 {
 	struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
-	struct net_device *netdev = adapter->netdev;
-	bool link_up;
-	u32 link_speed = 0;
+	struct ixgbe_hw *hw = &adapter->hw;
 
-	adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
+	/* Do the watchdog outside of interrupt context due to the lovely
+	 * delays that some of the newer hardware requires */
+	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+		/* Cause software interrupt to ensure rx rings are cleaned */
+		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+			u32 eics =
+			 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
+			IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
+		} else {
+			/* For legacy and MSI interrupts don't set any bits that
+			 * are enabled for EIAM, because this operation would
+			 * set *both* EIMS and EICS for any bit in EIAM */
+			IXGBE_WRITE_REG(hw, IXGBE_EICS,
+                                    (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
+		}
+		/* Reset the timer */
+		mod_timer(&adapter->watchdog_timer,
+		          round_jiffies(jiffies + 2 * HZ));
+	}
+
+	schedule_work(&adapter->watchdog_task);
+}
+
+/**
+ * ixgbe_watchdog_task - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbe_watchdog_task(struct work_struct *work)
+{
+	struct ixgbe_adapter *adapter = container_of(work,
+	                                             struct ixgbe_adapter,
+	                                             watchdog_task);
+	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 link_speed = adapter->link_speed;
+	bool link_up = adapter->link_up;
+
+	adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+
+	if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+		if (link_up ||
+		    time_after(jiffies, (adapter->link_check_timeout +
+		                         IXGBE_TRY_LINK_TIMEOUT))) {
+			IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
+			adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+		}
+		adapter->link_up = link_up;
+		adapter->link_speed = link_speed;
+	}
 
 	if (link_up) {
 		if (!netif_carrier_ok(netdev)) {
-			u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
-			u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS);
+			u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+			u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
 #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
 #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
 			DPRINTK(LINK, INFO, "NIC Link is Up %s, "
-				"Flow Control: %s\n",
-				(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
-				 "10 Gbps" :
-				 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
-				  "1 Gbps" : "unknown speed")),
-				((FLOW_RX && FLOW_TX) ? "RX/TX" :
-				 (FLOW_RX ? "RX" :
-				 (FLOW_TX ? "TX" : "None"))));
+			        "Flow Control: %s\n",
+			        (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
+			         "10 Gbps" :
+			         (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
+			          "1 Gbps" : "unknown speed")),
+			        ((FLOW_RX && FLOW_TX) ? "RX/TX" :
+			         (FLOW_RX ? "RX" :
+			         (FLOW_TX ? "TX" : "None"))));
 
 			netif_carrier_on(netdev);
 			netif_tx_wake_all_queues(netdev);
@@ -2932,6 +3225,8 @@
 			adapter->detect_tx_hung = true;
 		}
 	} else {
+		adapter->link_up = false;
+		adapter->link_speed = 0;
 		if (netif_carrier_ok(netdev)) {
 			DPRINTK(LINK, INFO, "NIC Link is Down\n");
 			netif_carrier_off(netdev);
@@ -2940,36 +3235,19 @@
 	}
 
 	ixgbe_update_stats(adapter);
-
-	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
-		/* Cause software interrupt to ensure rx rings are cleaned */
-		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-			u32 eics =
-			 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
-			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics);
-		} else {
-			/* for legacy and MSI interrupts don't set any bits that
-			 * are enabled for EIAM, because this operation would
-			 * set *both* EIMS and EICS for any bit in EIAM */
-			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
-				     (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
-		}
-		/* Reset the timer */
-		mod_timer(&adapter->watchdog_timer,
-			  round_jiffies(jiffies + 2 * HZ));
-	}
+	adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
 }
 
 static int ixgbe_tso(struct ixgbe_adapter *adapter,
-			 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
-			 u32 tx_flags, u8 *hdr_len)
+                     struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+                     u32 tx_flags, u8 *hdr_len)
 {
 	struct ixgbe_adv_tx_context_desc *context_desc;
 	unsigned int i;
 	int err;
 	struct ixgbe_tx_buffer *tx_buffer_info;
-	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
-	u32 mss_l4len_idx = 0, l4len;
+	u32 vlan_macip_lens = 0, type_tucmd_mlhl;
+	u32 mss_l4len_idx, l4len;
 
 	if (skb_is_gso(skb)) {
 		if (skb_header_cloned(skb)) {
@@ -2985,16 +3263,16 @@
 			iph->tot_len = 0;
 			iph->check = 0;
 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-								 iph->daddr, 0,
-								 IPPROTO_TCP,
-								 0);
+			                                         iph->daddr, 0,
+			                                         IPPROTO_TCP,
+			                                         0);
 			adapter->hw_tso_ctxt++;
 		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
 			ipv6_hdr(skb)->payload_len = 0;
 			tcp_hdr(skb)->check =
 			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-					     &ipv6_hdr(skb)->daddr,
-					     0, IPPROTO_TCP, 0);
+			                     &ipv6_hdr(skb)->daddr,
+			                     0, IPPROTO_TCP, 0);
 			adapter->hw_tso6_ctxt++;
 		}
 
@@ -3008,7 +3286,7 @@
 			vlan_macip_lens |=
 			    (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
 		vlan_macip_lens |= ((skb_network_offset(skb)) <<
-				    IXGBE_ADVTXD_MACLEN_SHIFT);
+		                    IXGBE_ADVTXD_MACLEN_SHIFT);
 		*hdr_len += skb_network_offset(skb);
 		vlan_macip_lens |=
 		    (skb_transport_header(skb) - skb_network_header(skb));
@@ -3018,8 +3296,8 @@
 		context_desc->seqnum_seed = 0;
 
 		/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
-		type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
-				    IXGBE_ADVTXD_DTYP_CTXT);
+		type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
+		                   IXGBE_ADVTXD_DTYP_CTXT);
 
 		if (skb->protocol == htons(ETH_P_IP))
 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -3027,9 +3305,11 @@
 		context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
 
 		/* MSS L4LEN IDX */
-		mss_l4len_idx |=
+		mss_l4len_idx =
 		    (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
 		mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
+		/* use index 1 for TSO */
+		mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
 		context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
 
 		tx_buffer_info->time_stamp = jiffies;
@@ -3046,8 +3326,8 @@
 }
 
 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
-				   struct ixgbe_ring *tx_ring,
-				   struct sk_buff *skb, u32 tx_flags)
+                          struct ixgbe_ring *tx_ring,
+                          struct sk_buff *skb, u32 tx_flags)
 {
 	struct ixgbe_adv_tx_context_desc *context_desc;
 	unsigned int i;
@@ -3064,16 +3344,16 @@
 			vlan_macip_lens |=
 			    (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
 		vlan_macip_lens |= (skb_network_offset(skb) <<
-				    IXGBE_ADVTXD_MACLEN_SHIFT);
+		                    IXGBE_ADVTXD_MACLEN_SHIFT);
 		if (skb->ip_summed == CHECKSUM_PARTIAL)
 			vlan_macip_lens |= (skb_transport_header(skb) -
-					    skb_network_header(skb));
+			                    skb_network_header(skb));
 
 		context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
 		context_desc->seqnum_seed = 0;
 
 		type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
-				    IXGBE_ADVTXD_DTYP_CTXT);
+		                    IXGBE_ADVTXD_DTYP_CTXT);
 
 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 			switch (skb->protocol) {
@@ -3081,16 +3361,14 @@
 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
 				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
 					type_tucmd_mlhl |=
-						IXGBE_ADVTXD_TUCMD_L4T_TCP;
+					        IXGBE_ADVTXD_TUCMD_L4T_TCP;
 				break;
-
 			case __constant_htons(ETH_P_IPV6):
 				/* XXX what about other V6 headers?? */
 				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 					type_tucmd_mlhl |=
-						IXGBE_ADVTXD_TUCMD_L4T_TCP;
+					        IXGBE_ADVTXD_TUCMD_L4T_TCP;
 				break;
-
 			default:
 				if (unlikely(net_ratelimit())) {
 					DPRINTK(PROBE, WARNING,
@@ -3102,10 +3380,12 @@
 		}
 
 		context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+		/* use index zero for tx checksum offload */
 		context_desc->mss_l4len_idx = 0;
 
 		tx_buffer_info->time_stamp = jiffies;
 		tx_buffer_info->next_to_watch = i;
+
 		adapter->hw_csum_tx_good++;
 		i++;
 		if (i == tx_ring->count)
@@ -3114,12 +3394,13 @@
 
 		return true;
 	}
+
 	return false;
 }
 
 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
-			struct ixgbe_ring *tx_ring,
-			struct sk_buff *skb, unsigned int first)
+                        struct ixgbe_ring *tx_ring,
+                        struct sk_buff *skb, unsigned int first)
 {
 	struct ixgbe_tx_buffer *tx_buffer_info;
 	unsigned int len = skb->len;
@@ -3137,8 +3418,8 @@
 
 		tx_buffer_info->length = size;
 		tx_buffer_info->dma = pci_map_single(adapter->pdev,
-						  skb->data + offset,
-						  size, PCI_DMA_TODEVICE);
+		                                     skb->data + offset,
+		                                     size, PCI_DMA_TODEVICE);
 		tx_buffer_info->time_stamp = jiffies;
 		tx_buffer_info->next_to_watch = i;
 
@@ -3163,9 +3444,10 @@
 
 			tx_buffer_info->length = size;
 			tx_buffer_info->dma = pci_map_page(adapter->pdev,
-							frag->page,
-							offset,
-							size, PCI_DMA_TODEVICE);
+			                                   frag->page,
+			                                   offset,
+			                                   size,
+			                                   PCI_DMA_TODEVICE);
 			tx_buffer_info->time_stamp = jiffies;
 			tx_buffer_info->next_to_watch = i;
 
@@ -3188,8 +3470,8 @@
 }
 
 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
-			       struct ixgbe_ring *tx_ring,
-			       int tx_flags, int count, u32 paylen, u8 hdr_len)
+                           struct ixgbe_ring *tx_ring,
+                           int tx_flags, int count, u32 paylen, u8 hdr_len)
 {
 	union ixgbe_adv_tx_desc *tx_desc = NULL;
 	struct ixgbe_tx_buffer *tx_buffer_info;
@@ -3208,15 +3490,17 @@
 		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
 
 		olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
-						IXGBE_ADVTXD_POPTS_SHIFT;
+		                 IXGBE_ADVTXD_POPTS_SHIFT;
 
+		/* use index 1 context for tso */
+		olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
 		if (tx_flags & IXGBE_TX_FLAGS_IPV4)
 			olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
-						IXGBE_ADVTXD_POPTS_SHIFT;
+			                 IXGBE_ADVTXD_POPTS_SHIFT;
 
 	} else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
 		olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
-						IXGBE_ADVTXD_POPTS_SHIFT;
+		                 IXGBE_ADVTXD_POPTS_SHIFT;
 
 	olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
 
@@ -3226,9 +3510,8 @@
 		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
 		tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
 		tx_desc->read.cmd_type_len =
-			cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+		        cpu_to_le32(cmd_type_len | tx_buffer_info->length);
 		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
-
 		i++;
 		if (i == tx_ring->count)
 			i = 0;
@@ -3249,7 +3532,7 @@
 }
 
 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
-				 struct ixgbe_ring *tx_ring, int size)
+                                 struct ixgbe_ring *tx_ring, int size)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -3265,61 +3548,52 @@
 		return -EBUSY;
 
 	/* A reprieve! - use start_queue because it doesn't call schedule */
-	netif_wake_subqueue(netdev, tx_ring->queue_index);
+	netif_start_subqueue(netdev, tx_ring->queue_index);
 	++adapter->restart_queue;
 	return 0;
 }
 
 static int ixgbe_maybe_stop_tx(struct net_device *netdev,
-			       struct ixgbe_ring *tx_ring, int size)
+                              struct ixgbe_ring *tx_ring, int size)
 {
 	if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
 		return 0;
 	return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
 }
 
-
 static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_ring *tx_ring;
-	unsigned int len = skb->len;
 	unsigned int first;
 	unsigned int tx_flags = 0;
 	u8 hdr_len = 0;
 	int r_idx = 0, tso;
-	unsigned int mss = 0;
 	int count = 0;
 	unsigned int f;
-	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
-	len -= skb->data_len;
+
 	r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
 	tx_ring = &adapter->tx_ring[r_idx];
 
-
-	if (skb->len <= 0) {
-		dev_kfree_skb(skb);
-		return NETDEV_TX_OK;
+	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+		tx_flags |= vlan_tx_tag_get(skb);
+		tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+		tx_flags |= IXGBE_TX_FLAGS_VLAN;
 	}
-	mss = skb_shinfo(skb)->gso_size;
-
-	if (mss)
-		count++;
-	else if (skb->ip_summed == CHECKSUM_PARTIAL)
+	/* three things can cause us to need a context descriptor */
+	if (skb_is_gso(skb) ||
+	    (skb->ip_summed == CHECKSUM_PARTIAL) ||
+	    (tx_flags & IXGBE_TX_FLAGS_VLAN))
 		count++;
 
-	count += TXD_USE_COUNT(len);
-	for (f = 0; f < nr_frags; f++)
+	count += TXD_USE_COUNT(skb_headlen(skb));
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
 		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
 	if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
 		adapter->tx_busy++;
 		return NETDEV_TX_BUSY;
 	}
-	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
-		tx_flags |= IXGBE_TX_FLAGS_VLAN;
-		tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
-	}
 
 	if (skb->protocol == htons(ETH_P_IP))
 		tx_flags |= IXGBE_TX_FLAGS_IPV4;
@@ -3333,12 +3607,12 @@
 	if (tso)
 		tx_flags |= IXGBE_TX_FLAGS_TSO;
 	else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
-		 (skb->ip_summed == CHECKSUM_PARTIAL))
+	         (skb->ip_summed == CHECKSUM_PARTIAL))
 		tx_flags |= IXGBE_TX_FLAGS_CSUM;
 
 	ixgbe_tx_queue(adapter, tx_ring, tx_flags,
-			   ixgbe_tx_map(adapter, tx_ring, skb, first),
-			   skb->len, hdr_len);
+	               ixgbe_tx_map(adapter, tx_ring, skb, first),
+	               skb->len, hdr_len);
 
 	netdev->trans_start = jiffies;
 
@@ -3372,15 +3646,16 @@
 static int ixgbe_set_mac(struct net_device *netdev, void *p)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
 	struct sockaddr *addr = p;
 
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-	memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
+	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
+	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 
 	return 0;
 }
@@ -3404,28 +3679,19 @@
 #endif
 
 /**
- * ixgbe_napi_add_all - prep napi structs for use
- * @adapter: private struct
- * helper function to napi_add each possible q_vector->napi
- */
-static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
+ * ixgbe_link_config - set up initial link with default speed and duplex
+ * @hw: pointer to private hardware struct
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbe_link_config(struct ixgbe_hw *hw)
 {
-	int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-	int (*poll)(struct napi_struct *, int);
+	u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL;
 
-	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-		poll = &ixgbe_clean_rxonly;
-	} else {
-		poll = &ixgbe_poll;
-		/* only one q_vector for legacy modes */
-		q_vectors = 1;
-	}
+	/* must always autoneg for both 1G and 10G link */
+	hw->mac.autoneg = true;
 
-	for (i = 0; i < q_vectors; i++) {
-		struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
-		netif_napi_add(adapter->netdev, &q_vector->napi,
-			       (*poll), 64);
-	}
+	return hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
 }
 
 /**
@@ -3440,17 +3706,16 @@
  * and a hardware reset occur.
  **/
 static int __devinit ixgbe_probe(struct pci_dev *pdev,
-				 const struct pci_device_id *ent)
+                                 const struct pci_device_id *ent)
 {
 	struct net_device *netdev;
 	struct ixgbe_adapter *adapter = NULL;
 	struct ixgbe_hw *hw;
 	const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
-	unsigned long mmio_start, mmio_len;
 	static int cards_found;
 	int i, err, pci_using_dac;
 	u16 link_status, link_speed, link_width;
-	u32 part_num;
+	u32 part_num, eec;
 
 	err = pci_enable_device(pdev);
 	if (err)
@@ -3465,7 +3730,7 @@
 			err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
 			if (err) {
 				dev_err(&pdev->dev, "No usable DMA "
-					"configuration, aborting\n");
+				        "configuration, aborting\n");
 				goto err_dma;
 			}
 		}
@@ -3498,10 +3763,8 @@
 	hw->back = adapter;
 	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
 
-	mmio_start = pci_resource_start(pdev, 0);
-	mmio_len = pci_resource_len(pdev, 0);
-
-	hw->hw_addr = ioremap(mmio_start, mmio_len);
+	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+	                      pci_resource_len(pdev, 0));
 	if (!hw->hw_addr) {
 		err = -EIO;
 		goto err_ioremap;
@@ -3516,7 +3779,8 @@
 	netdev->stop = &ixgbe_close;
 	netdev->hard_start_xmit = &ixgbe_xmit_frame;
 	netdev->get_stats = &ixgbe_get_stats;
-	netdev->set_multicast_list = &ixgbe_set_multi;
+	netdev->set_rx_mode = &ixgbe_set_rx_mode;
+	netdev->set_multicast_list = &ixgbe_set_rx_mode;
 	netdev->set_mac_address = &ixgbe_set_mac;
 	netdev->change_mtu = &ixgbe_change_mtu;
 	ixgbe_set_ethtool_ops(netdev);
@@ -3530,22 +3794,23 @@
 #endif
 	strcpy(netdev->name, pci_name(pdev));
 
-	netdev->mem_start = mmio_start;
-	netdev->mem_end = mmio_start + mmio_len;
-
 	adapter->bd_number = cards_found;
 
-	/* PCI config space info */
-	hw->vendor_id = pdev->vendor;
-	hw->device_id = pdev->device;
-	hw->revision_id = pdev->revision;
-	hw->subsystem_vendor_id = pdev->subsystem_vendor;
-	hw->subsystem_device_id = pdev->subsystem_device;
-
 	/* Setup hw api */
 	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
 	hw->mac.type  = ii->mac;
 
+	/* EEPROM */
+	memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+	/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
+	if (!(eec & (1 << 8)))
+		hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+
+	/* PHY */
+	memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
+	/* phy->sfp_type = ixgbe_sfp_type_unknown; */
+
 	err = ii->get_invariants(hw);
 	if (err)
 		goto err_hw_init;
@@ -3555,26 +3820,34 @@
 	if (err)
 		goto err_sw_init;
 
-	netdev->features = NETIF_F_SG |
-			   NETIF_F_HW_CSUM |
-			   NETIF_F_HW_VLAN_TX |
-			   NETIF_F_HW_VLAN_RX |
-			   NETIF_F_HW_VLAN_FILTER;
+	/* reset_hw fills in the perm_addr as well */
+	err = hw->mac.ops.reset_hw(hw);
+	if (err) {
+		dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
+		goto err_sw_init;
+	}
 
-	netdev->features |= NETIF_F_LRO;
+	netdev->features = NETIF_F_SG |
+	                   NETIF_F_IP_CSUM |
+	                   NETIF_F_HW_VLAN_TX |
+	                   NETIF_F_HW_VLAN_RX |
+	                   NETIF_F_HW_VLAN_FILTER;
+
+	netdev->features |= NETIF_F_IPV6_CSUM;
 	netdev->features |= NETIF_F_TSO;
 	netdev->features |= NETIF_F_TSO6;
+	netdev->features |= NETIF_F_LRO;
 
 	netdev->vlan_features |= NETIF_F_TSO;
 	netdev->vlan_features |= NETIF_F_TSO6;
-	netdev->vlan_features |= NETIF_F_HW_CSUM;
+	netdev->vlan_features |= NETIF_F_IP_CSUM;
 	netdev->vlan_features |= NETIF_F_SG;
 
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
 	/* make sure the EEPROM is good */
-	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
+	if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
 		dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
 		err = -EIO;
 		goto err_eeprom;
@@ -3583,7 +3856,8 @@
 	memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
 	memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
 
-	if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
+	if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
+		dev_err(&pdev->dev, "invalid MAC address\n");
 		err = -EIO;
 		goto err_eeprom;
 	}
@@ -3593,13 +3867,7 @@
 	adapter->watchdog_timer.data = (unsigned long)adapter;
 
 	INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
-
-	/* initialize default flow control settings */
-	hw->fc.original_type = ixgbe_fc_full;
-	hw->fc.type = ixgbe_fc_full;
-	hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
-	hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
-	hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
+	INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
 
 	err = ixgbe_init_interrupt_scheme(adapter);
 	if (err)
@@ -3610,32 +3878,39 @@
 	link_speed = link_status & IXGBE_PCI_LINK_SPEED;
 	link_width = link_status & IXGBE_PCI_LINK_WIDTH;
 	dev_info(&pdev->dev, "(PCI Express:%s:%s) "
-		 "%02x:%02x:%02x:%02x:%02x:%02x\n",
-		((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
-		 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
-		 "Unknown"),
-		((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
-		 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
-		 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
-		 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
-		 "Unknown"),
-		netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
-		netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
-	ixgbe_read_part_num(hw, &part_num);
+	         "%02x:%02x:%02x:%02x:%02x:%02x\n",
+	        ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
+	         (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
+	         "Unknown"),
+	        ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
+	         (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
+	         (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
+	         (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
+	         "Unknown"),
+	        netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
+	        netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
+	ixgbe_read_pba_num_generic(hw, &part_num);
 	dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
-		 hw->mac.type, hw->phy.type,
-		 (part_num >> 8), (part_num & 0xff));
+	         hw->mac.type, hw->phy.type,
+	         (part_num >> 8), (part_num & 0xff));
 
 	if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
 		dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
-			 "this card is not sufficient for optimal "
-			 "performance.\n");
+		         "this card is not sufficient for optimal "
+		         "performance.\n");
 		dev_warn(&pdev->dev, "For optimal performance a x8 "
-			 "PCI-Express slot is required.\n");
+		         "PCI-Express slot is required.\n");
 	}
 
 	/* reset the hardware with the new settings */
-	ixgbe_start_hw(hw);
+	hw->mac.ops.start_hw(hw);
+
+	/* link_config depends on start_hw being called at least once */
+	err = ixgbe_link_config(hw);
+	if (err) {
+		dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err);
+		goto err_register;
+	}
 
 	netif_carrier_off(netdev);
 	netif_tx_stop_all_queues(netdev);
@@ -3647,7 +3922,7 @@
 	if (err)
 		goto err_register;
 
-#ifdef CONFIG_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 	if (dca_add_requester(&pdev->dev) == 0) {
 		adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
 		/* always use CB2 mode, difference is masked
@@ -3697,7 +3972,7 @@
 
 	flush_scheduled_work();
 
-#ifdef CONFIG_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
 		adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
 		dca_remove_requester(&pdev->dev);
@@ -3715,6 +3990,7 @@
 	pci_release_regions(pdev);
 
 	DPRINTK(PROBE, INFO, "complete\n");
+	ixgbe_napi_del_all(adapter);
 	kfree(adapter->tx_ring);
 	kfree(adapter->rx_ring);
 
@@ -3732,7 +4008,7 @@
  * this device has been detected.
  */
 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
-						pci_channel_state_t state)
+                                                pci_channel_state_t state)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct ixgbe_adapter *adapter = netdev->priv;
@@ -3743,7 +4019,7 @@
 		ixgbe_down(adapter);
 	pci_disable_device(pdev);
 
-	/* Request a slot slot reset. */
+	/* Request a slot reset. */
 	return PCI_ERS_RESULT_NEED_RESET;
 }
 
@@ -3760,7 +4036,7 @@
 
 	if (pci_enable_device(pdev)) {
 		DPRINTK(PROBE, ERR,
-			"Cannot re-enable PCI device after reset.\n");
+		        "Cannot re-enable PCI device after reset.\n");
 		return PCI_ERS_RESULT_DISCONNECT;
 	}
 	pci_set_master(pdev);
@@ -3794,7 +4070,6 @@
 	}
 
 	netif_device_attach(netdev);
-
 }
 
 static struct pci_error_handlers ixgbe_err_handler = {
@@ -3830,13 +4105,14 @@
 
 	printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
 
-#ifdef CONFIG_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 	dca_register_notify(&dca_notifier);
 
 #endif
 	ret = pci_register_driver(&ixgbe_driver);
 	return ret;
 }
+
 module_init(ixgbe_init_module);
 
 /**
@@ -3847,24 +4123,24 @@
  **/
 static void __exit ixgbe_exit_module(void)
 {
-#ifdef CONFIG_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 	dca_unregister_notify(&dca_notifier);
 #endif
 	pci_unregister_driver(&ixgbe_driver);
 }
 
-#ifdef CONFIG_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
-			    void *p)
+                            void *p)
 {
 	int ret_val;
 
 	ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
-					 __ixgbe_notify_dca);
+	                                 __ixgbe_notify_dca);
 
 	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
 }
-#endif /* CONFIG_DCA */
+#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
 
 module_exit(ixgbe_exit_module);
 
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 8002931..764035a 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -33,32 +32,36 @@
 #include "ixgbe_common.h"
 #include "ixgbe_phy.h"
 
+static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
 static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
-static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
-static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
-			       u32 device_type, u16 phy_data);
 
 /**
- *  ixgbe_identify_phy - Get physical layer module
+ *  ixgbe_identify_phy_generic - Get physical layer module
  *  @hw: pointer to hardware structure
  *
  *  Determines the physical layer module found on the current adapter.
  **/
-s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
 {
 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
 	u32 phy_addr;
 
-	for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
-		if (ixgbe_validate_phy_addr(hw, phy_addr)) {
-			hw->phy.addr = phy_addr;
-			ixgbe_get_phy_id(hw);
-			hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
-			status = 0;
-			break;
+	if (hw->phy.type == ixgbe_phy_unknown) {
+		for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+			if (ixgbe_validate_phy_addr(hw, phy_addr)) {
+				hw->phy.addr = phy_addr;
+				ixgbe_get_phy_id(hw);
+				hw->phy.type =
+				        ixgbe_get_phy_type_from_id(hw->phy.id);
+				status = 0;
+				break;
+			}
 		}
+	} else {
+		status = 0;
 	}
+
 	return status;
 }
 
@@ -73,10 +76,8 @@
 	bool valid = false;
 
 	hw->phy.addr = phy_addr;
-	ixgbe_read_phy_reg(hw,
-			   IXGBE_MDIO_PHY_ID_HIGH,
-			   IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-			   &phy_id);
+	hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+	                     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
 
 	if (phy_id != 0xFFFF && phy_id != 0x0)
 		valid = true;
@@ -95,21 +96,18 @@
 	u16 phy_id_high = 0;
 	u16 phy_id_low = 0;
 
-	status = ixgbe_read_phy_reg(hw,
-				   IXGBE_MDIO_PHY_ID_HIGH,
-				   IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-				   &phy_id_high);
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+	                              IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+	                              &phy_id_high);
 
 	if (status == 0) {
 		hw->phy.id = (u32)(phy_id_high << 16);
-		status = ixgbe_read_phy_reg(hw,
-					   IXGBE_MDIO_PHY_ID_LOW,
-					   IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-					   &phy_id_low);
+		status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
+		                              IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+		                              &phy_id_low);
 		hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
 		hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
 	}
-
 	return status;
 }
 
@@ -123,9 +121,6 @@
 	enum ixgbe_phy_type phy_type;
 
 	switch (phy_id) {
-	case TN1010_PHY_ID:
-		phy_type = ixgbe_phy_tn;
-		break;
 	case QT2022_PHY_ID:
 		phy_type = ixgbe_phy_qt;
 		break;
@@ -138,32 +133,31 @@
 }
 
 /**
- *  ixgbe_reset_phy - Performs a PHY reset
+ *  ixgbe_reset_phy_generic - Performs a PHY reset
  *  @hw: pointer to hardware structure
  **/
-s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
 {
 	/*
 	 * Perform soft PHY reset to the PHY_XS.
 	 * This will cause a soft reset to the PHY
 	 */
-	return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
-				   IXGBE_MDIO_PHY_XS_DEV_TYPE,
-				   IXGBE_MDIO_PHY_XS_RESET);
+	return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+	                             IXGBE_MDIO_PHY_XS_DEV_TYPE,
+	                             IXGBE_MDIO_PHY_XS_RESET);
 }
 
 /**
- *  ixgbe_read_phy_reg - Reads a value from a specified PHY register
+ *  ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
  *  @hw: pointer to hardware structure
  *  @reg_addr: 32 bit address of PHY register to read
  *  @phy_data: Pointer to read data from PHY register
  **/
-s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
-		       u32 device_type, u16 *phy_data)
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+                               u32 device_type, u16 *phy_data)
 {
 	u32 command;
 	u32 i;
-	u32 timeout = 10;
 	u32 data;
 	s32 status = 0;
 	u16 gssr;
@@ -179,9 +173,9 @@
 	if (status == 0) {
 		/* Setup and write the address cycle command */
 		command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-			   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-			   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-			   (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+		           (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+		           (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+		           (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
 
 		IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
 
@@ -190,7 +184,7 @@
 		 * The MDI Command bit will clear when the operation is
 		 * complete
 		 */
-		for (i = 0; i < timeout; i++) {
+		for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
 			udelay(10);
 
 			command = IXGBE_READ_REG(hw, IXGBE_MSCA);
@@ -210,9 +204,9 @@
 			 * command
 			 */
 			command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-				   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-				   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-				   (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+			           (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+			           (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+			           (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
 
 			IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
 
@@ -221,7 +215,7 @@
 			 * completed. The MDI Command bit will clear when the
 			 * operation is complete
 			 */
-			for (i = 0; i < timeout; i++) {
+			for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
 				udelay(10);
 
 				command = IXGBE_READ_REG(hw, IXGBE_MSCA);
@@ -231,8 +225,7 @@
 			}
 
 			if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-				hw_dbg(hw,
-				       "PHY read command didn't complete\n");
+				hw_dbg(hw, "PHY read command didn't complete\n");
 				status = IXGBE_ERR_PHY;
 			} else {
 				/*
@@ -247,22 +240,22 @@
 
 		ixgbe_release_swfw_sync(hw, gssr);
 	}
+
 	return status;
 }
 
 /**
- *  ixgbe_write_phy_reg - Writes a value to specified PHY register
+ *  ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
  *  @hw: pointer to hardware structure
  *  @reg_addr: 32 bit PHY register to write
  *  @device_type: 5 bit device type
  *  @phy_data: Data to write to the PHY register
  **/
-static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
-			       u32 device_type, u16 phy_data)
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+                                u32 device_type, u16 phy_data)
 {
 	u32 command;
 	u32 i;
-	u32 timeout = 10;
 	s32 status = 0;
 	u16 gssr;
 
@@ -280,9 +273,9 @@
 
 		/* Setup and write the address cycle command */
 		command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-			   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-			   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-			   (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+		           (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+		           (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+		           (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
 
 		IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
 
@@ -291,19 +284,19 @@
 		 * The MDI Command bit will clear when the operation is
 		 * complete
 		 */
-		for (i = 0; i < timeout; i++) {
+		for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
 			udelay(10);
 
 			command = IXGBE_READ_REG(hw, IXGBE_MSCA);
 
-			if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
-				hw_dbg(hw, "PHY address cmd didn't complete\n");
+			if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
 				break;
-			}
 		}
 
-		if ((command & IXGBE_MSCA_MDI_COMMAND) != 0)
+		if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+			hw_dbg(hw, "PHY address cmd didn't complete\n");
 			status = IXGBE_ERR_PHY;
+		}
 
 		if (status == 0) {
 			/*
@@ -311,9 +304,9 @@
 			 * command
 			 */
 			command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-				   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-				   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-				   (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+			           (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+			           (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+			           (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
 
 			IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
 
@@ -322,20 +315,19 @@
 			 * completed. The MDI Command bit will clear when the
 			 * operation is complete
 			 */
-			for (i = 0; i < timeout; i++) {
+			for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
 				udelay(10);
 
 				command = IXGBE_READ_REG(hw, IXGBE_MSCA);
 
-				if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
-					hw_dbg(hw, "PHY write command did not "
-						  "complete.\n");
+				if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
 					break;
-				}
 			}
 
-			if ((command & IXGBE_MSCA_MDI_COMMAND) != 0)
+			if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+				hw_dbg(hw, "PHY address cmd didn't complete\n");
 				status = IXGBE_ERR_PHY;
+			}
 		}
 
 		ixgbe_release_swfw_sync(hw, gssr);
@@ -345,67 +337,54 @@
 }
 
 /**
- *  ixgbe_setup_tnx_phy_link - Set and restart autoneg
+ *  ixgbe_setup_phy_link_generic - Set and restart autoneg
  *  @hw: pointer to hardware structure
  *
  *  Restart autonegotiation and PHY and waits for completion.
  **/
-s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw)
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
 {
 	s32 status = IXGBE_NOT_IMPLEMENTED;
 	u32 time_out;
 	u32 max_time_out = 10;
-	u16 autoneg_speed_selection_register = 0x10;
-	u16 autoneg_restart_mask = 0x0200;
-	u16 autoneg_complete_mask = 0x0020;
-	u16 autoneg_reg = 0;
+	u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
 
 	/*
 	 * Set advertisement settings in PHY based on autoneg_advertised
 	 * settings. If autoneg_advertised = 0, then advertise default values
-	 * txn devices cannot be "forced" to a autoneg 10G and fail.  But can
+	 * tnx devices cannot be "forced" to a autoneg 10G and fail.  But can
 	 * for a 1G.
 	 */
-	ixgbe_read_phy_reg(hw,
-		  autoneg_speed_selection_register,
-		  IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-		  &autoneg_reg);
+	hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
+	                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
 
 	if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
 		autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */
 	else
 		autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */
 
-	ixgbe_write_phy_reg(hw,
-		  autoneg_speed_selection_register,
-		  IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-		  autoneg_reg);
-
+	hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
+	                      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
 
 	/* Restart PHY autonegotiation and wait for completion */
-	ixgbe_read_phy_reg(hw,
-		  IXGBE_MDIO_AUTO_NEG_CONTROL,
-		  IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-		  &autoneg_reg);
+	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+	                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
 
-	autoneg_reg |= autoneg_restart_mask;
+	autoneg_reg |= IXGBE_MII_RESTART;
 
-	ixgbe_write_phy_reg(hw,
-		  IXGBE_MDIO_AUTO_NEG_CONTROL,
-		  IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-		  autoneg_reg);
+	hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+	                      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
 
 	/* Wait for autonegotiation to finish */
 	for (time_out = 0; time_out < max_time_out; time_out++) {
 		udelay(10);
 		/* Restart PHY autonegotiation and wait for completion */
-		status = ixgbe_read_phy_reg(hw,
-					    IXGBE_MDIO_AUTO_NEG_STATUS,
-					    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-					    &autoneg_reg);
+		status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+		                              IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+		                              &autoneg_reg);
 
-		autoneg_reg &= autoneg_complete_mask;
-		if (autoneg_reg == autoneg_complete_mask) {
+		autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
+		if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
 			status = 0;
 			break;
 		}
@@ -418,64 +397,17 @@
 }
 
 /**
- *  ixgbe_check_tnx_phy_link - Determine link and speed status
- *  @hw: pointer to hardware structure
- *
- *  Reads the VS1 register to determine if link is up and the current speed for
- *  the PHY.
- **/
-s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed,
-			     bool *link_up)
-{
-	s32 status = 0;
-	u32 time_out;
-	u32 max_time_out = 10;
-	u16 phy_link = 0;
-	u16 phy_speed = 0;
-	u16 phy_data = 0;
-
-	/* Initialize speed and link to default case */
-	*link_up = false;
-	*speed = IXGBE_LINK_SPEED_10GB_FULL;
-
-	/*
-	 * Check current speed and link status of the PHY register.
-	 * This is a vendor specific register and may have to
-	 * be changed for other copper PHYs.
-	 */
-	for (time_out = 0; time_out < max_time_out; time_out++) {
-		udelay(10);
-		if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
-			*link_up = true;
-			if (phy_speed ==
-			    IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
-				*speed = IXGBE_LINK_SPEED_1GB_FULL;
-			break;
-		} else {
-			status = ixgbe_read_phy_reg(hw,
-				     IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
-				     IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-				     &phy_data);
-			phy_link = phy_data &
-				IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
-			phy_speed = phy_data &
-				IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
-		}
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities
+ *  ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
  *  @autoneg: true if autonegotiation enabled
  **/
-s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed,
-				   bool autoneg,
-				   bool autoneg_wait_to_complete)
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+                                       ixgbe_link_speed speed,
+                                       bool autoneg,
+                                       bool autoneg_wait_to_complete)
 {
+
 	/*
 	 * Clear autoneg_advertised and set new values based on input link
 	 * speed.
@@ -484,11 +416,13 @@
 
 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 
 	/* Setup link based on the new speed settings */
-	ixgbe_setup_tnx_phy_link(hw);
+	hw->phy.ops.setup_link(hw);
 
 	return 0;
 }
+
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index aa3ea72..9bfe3f2 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -30,20 +29,52 @@
 #define _IXGBE_PHY_H_
 
 #include "ixgbe_type.h"
+#define IXGBE_I2C_EEPROM_DEV_ADDR    0xA0
 
-s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
-s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up);
-s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg,
-			       bool autoneg_wait_to_complete);
-s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
-s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
-			       u32 device_type, u16 *phy_data);
+/* EEPROM byte offsets */
+#define IXGBE_SFF_IDENTIFIER         0x0
+#define IXGBE_SFF_IDENTIFIER_SFP     0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0   0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1   0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2   0x27
+#define IXGBE_SFF_1GBE_COMP_CODES    0x6
+#define IXGBE_SFF_10GBE_COMP_CODES   0x3
+#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9
 
-/* PHY specific */
-s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw);
-s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up);
-s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg,
-				  bool autoneg_wait_to_complete);
+/* Bitmasks */
+#define IXGBE_SFF_TWIN_AX_CAPABLE            0x80
+#define IXGBE_SFF_1GBASESX_CAPABLE           0x1
+#define IXGBE_SFF_10GBASESR_CAPABLE          0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE          0x20
+#define IXGBE_I2C_EEPROM_READ_MASK           0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK         0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS         0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL         0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS  0x3
+
+/* Bit-shift macros */
+#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT    12
+#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT    8
+#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT    4
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define IXGBE_SFF_VENDOR_OUI_TYCO     0x00407600
+#define IXGBE_SFF_VENDOR_OUI_FTL      0x00906500
+#define IXGBE_SFF_VENDOR_OUI_AVAGO    0x00176A00
+
+
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+                               u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+                                u32 device_type, u16 phy_data);
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+                                       ixgbe_link_speed speed,
+                                       bool autoneg,
+                                       bool autoneg_wait_to_complete);
 
 #endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index c0282a2..c6f8fa1 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -37,9 +36,9 @@
 /* Device IDs */
 #define IXGBE_DEV_ID_82598AF_DUAL_PORT   0x10C6
 #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
-#define IXGBE_DEV_ID_82598AT_DUAL_PORT   0x10C8
 #define IXGBE_DEV_ID_82598EB_CX4         0x10DD
 #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
+#define IXGBE_DEV_ID_82598EB_XF_LR       0x10F4
 
 /* General Registers */
 #define IXGBE_CTRL      0x00000
@@ -70,11 +69,11 @@
 #define IXGBE_EIMC      0x00888
 #define IXGBE_EIAC      0x00810
 #define IXGBE_EIAM      0x00890
-#define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */
-#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
+#define IXGBE_EITR(_i)  (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : (0x012300 + ((_i) * 4)))
+#define IXGBE_IVAR(_i)  (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
 #define IXGBE_MSIXT     0x00000 /* MSI-X Table. 0x0000 - 0x01C */
 #define IXGBE_MSIXPBA   0x02000 /* MSI-X Pending bit array */
-#define IXGBE_PBACL     0x11068
+#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
 #define IXGBE_GPIE      0x00898
 
 /* Flow Control Registers */
@@ -86,20 +85,33 @@
 #define IXGBE_TFCS      0x0CE00
 
 /* Receive DMA Registers */
-#define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/
-#define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40))
-#define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40))
-#define IXGBE_RDH(_i)   (0x01010 + ((_i) * 0x40))
-#define IXGBE_RDT(_i)   (0x01018 + ((_i) * 0x40))
-#define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40))
-#define IXGBE_RSCCTL(_i) (0x0102C + ((_i) * 0x40))
-#define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4))
-					     /* array of 16 (0x02100-0x0213C) */
-#define IXGBE_DCA_RXCTRL(_i)    (0x02200 + ((_i) * 4))
-					     /* array of 16 (0x02200-0x0223C) */
-#define IXGBE_RDRXCTL    0x02F00
+#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40)))
+#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40)))
+#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40)))
+#define IXGBE_RDH(_i)   (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40)))
+#define IXGBE_RDT(_i)   (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40)))
+#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40)))
+/*
+ * Split and Replication Receive Control Registers
+ * 00-15 : 0x02100 + n*4
+ * 16-64 : 0x01014 + n*0x40
+ * 64-127: 0x0D014 + (n-64)*0x40
+ */
+#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
+                          (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+                          (0x0D014 + ((_i - 64) * 0x40))))
+/*
+ * Rx DCA Control Register:
+ * 00-15 : 0x02200 + n*4
+ * 16-64 : 0x0100C + n*0x40
+ * 64-127: 0x0D00C + (n-64)*0x40
+ */
+#define IXGBE_DCA_RXCTRL(_i)    (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
+                                 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+                                 (0x0D00C + ((_i - 64) * 0x40))))
+#define IXGBE_RDRXCTL           0x02F00
 #define IXGBE_RXPBSIZE(_i)      (0x03C00 + ((_i) * 4))
-					     /* 8 of these 0x03C00 - 0x03C1C */
+                                             /* 8 of these 0x03C00 - 0x03C1C */
 #define IXGBE_RXCTRL    0x03000
 #define IXGBE_DROPEN    0x03D04
 #define IXGBE_RXPBSIZE_SHIFT 10
@@ -107,29 +119,32 @@
 /* Receive Registers */
 #define IXGBE_RXCSUM    0x05000
 #define IXGBE_RFCTL     0x05008
+#define IXGBE_DRECCCTL  0x02F08
+#define IXGBE_DRECCCTL_DISABLE 0
+/* Multicast Table Array - 128 entries */
 #define IXGBE_MTA(_i)   (0x05200 + ((_i) * 4))
-				   /* Multicast Table Array - 128 entries */
-#define IXGBE_RAL(_i)   (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */
-#define IXGBE_RAH(_i)   (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */
-#define IXGBE_PSRTYPE   0x05480
-				   /* 0x5480-0x54BC Packet split receive type */
+#define IXGBE_RAL(_i)   (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8)))
+#define IXGBE_RAH(_i)   (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8)))
+/* Packet split receive type */
+#define IXGBE_PSRTYPE(_i)    (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4)))
+/* array of 4096 1-bit vlan filters */
 #define IXGBE_VFTA(_i)  (0x0A000 + ((_i) * 4))
-					 /* array of 4096 1-bit vlan filters */
+/*array of 4096 4-bit vlan vmdq indices */
 #define IXGBE_VFTAVIND(_j, _i)  (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
-				     /*array of 4096 4-bit vlan vmdq indicies */
 #define IXGBE_FCTRL     0x05080
 #define IXGBE_VLNCTRL   0x05088
 #define IXGBE_MCSTCTRL  0x05090
 #define IXGBE_MRQC      0x05818
-#define IXGBE_VMD_CTL   0x0581C
 #define IXGBE_IMIR(_i)  (0x05A80 + ((_i) * 4))  /* 8 of these (0-7) */
 #define IXGBE_IMIREXT(_i)       (0x05AA0 + ((_i) * 4))  /* 8 of these (0-7) */
 #define IXGBE_IMIRVP    0x05AC0
+#define IXGBE_VMD_CTL   0x0581C
 #define IXGBE_RETA(_i)  (0x05C00 + ((_i) * 4))  /* 32 of these (0-31) */
 #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4))  /* 10 of these (0-9) */
 
+
 /* Transmit DMA registers */
-#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/
+#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
 #define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
 #define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
 #define IXGBE_TDH(_i)   (0x06010 + ((_i) * 0x40))
@@ -138,11 +153,10 @@
 #define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
 #define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
 #define IXGBE_DTXCTL    0x07E00
-#define IXGBE_DCA_TXCTRL(_i)    (0x07200 + ((_i) * 4))
-					      /* there are 16 of these (0-15) */
+
+#define IXGBE_DCA_TXCTRL(_i)    (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
 #define IXGBE_TIPG      0x0CB00
-#define IXGBE_TXPBSIZE(_i)      (0x0CC00 + ((_i) *0x04))
-						      /* there are 8 of these */
+#define IXGBE_TXPBSIZE(_i)      (0x0CC00 + ((_i) * 4)) /* 8 of these */
 #define IXGBE_MNGTXMAP  0x0CD10
 #define IXGBE_TIPG_FIBER_DEFAULT 3
 #define IXGBE_TXPBSIZE_SHIFT    10
@@ -154,6 +168,7 @@
 #define IXGBE_IPAV      0x05838
 #define IXGBE_IP4AT     0x05840 /* IPv4 table 0x5840-0x5858 */
 #define IXGBE_IP6AT     0x05880 /* IPv6 table 0x5880-0x588F */
+
 #define IXGBE_WUPL      0x05900
 #define IXGBE_WUPM      0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
 #define IXGBE_FHFT      0x09000 /* Flex host filter table 9000-93FC */
@@ -170,6 +185,8 @@
 #define IXGBE_TDPT2TCCR(_i)     (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
 #define IXGBE_TDPT2TCSR(_i)     (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
 
+
+
 /* Stats registers */
 #define IXGBE_CRCERRS   0x04000
 #define IXGBE_ILLERRC   0x04004
@@ -224,7 +241,7 @@
 #define IXGBE_XEC       0x04120
 
 #define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */
-#define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4)))
 
 #define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
@@ -275,23 +292,17 @@
 #define IXGBE_DCA_CTRL  0x11074
 
 /* Diagnostic Registers */
-#define IXGBE_RDSTATCTL 0x02C20
-#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
-#define IXGBE_RDHMPN    0x02F08
-#define IXGBE_RIC_DW0   0x02F10
-#define IXGBE_RIC_DW1   0x02F14
-#define IXGBE_RIC_DW2   0x02F18
-#define IXGBE_RIC_DW3   0x02F1C
-#define IXGBE_RDPROBE   0x02F20
-#define IXGBE_TDSTATCTL 0x07C20
-#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
-#define IXGBE_TDHMPN    0x07F08
-#define IXGBE_TIC_DW0   0x07F10
-#define IXGBE_TIC_DW1   0x07F14
-#define IXGBE_TIC_DW2   0x07F18
-#define IXGBE_TIC_DW3   0x07F1C
-#define IXGBE_TDPROBE   0x07F20
-#define IXGBE_TXBUFCTRL 0x0C600
+#define IXGBE_RDSTATCTL   0x02C20
+#define IXGBE_RDSTAT(_i)  (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
+#define IXGBE_RDHMPN      0x02F08
+#define IXGBE_RIC_DW(_i)  (0x02F10 + ((_i) * 4))
+#define IXGBE_RDPROBE     0x02F20
+#define IXGBE_TDSTATCTL   0x07C20
+#define IXGBE_TDSTAT(_i)  (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
+#define IXGBE_TDHMPN      0x07F08
+#define IXGBE_TIC_DW(_i)  (0x07F10 + ((_i) * 4))
+#define IXGBE_TDPROBE     0x07F20
+#define IXGBE_TXBUFCTRL   0x0C600
 #define IXGBE_TXBUFDATA0  0x0C610
 #define IXGBE_TXBUFDATA1  0x0C614
 #define IXGBE_TXBUFDATA2  0x0C618
@@ -356,12 +367,10 @@
 #define IXGBE_ANLP2     0x042B4
 #define IXGBE_ATLASCTL  0x04800
 
-/* RSCCTL Bit Masks */
-#define IXGBE_RSCCTL_RSCEN          0x01
-#define IXGBE_RSCCTL_MAXDESC_1      0x00
-#define IXGBE_RSCCTL_MAXDESC_4      0x04
-#define IXGBE_RSCCTL_MAXDESC_8      0x08
-#define IXGBE_RSCCTL_MAXDESC_16     0x0C
+/* RDRXCTL Bit Masks */
+#define IXGBE_RDRXCTL_RDMTS_1_2     0x00000000 /* Rx Desc Min Threshold Size */
+#define IXGBE_RDRXCTL_MVMEN         0x00000020
+#define IXGBE_RDRXCTL_DMAIDONE      0x00000008 /* DMA init cycle done */
 
 /* CTRL Bit Masks */
 #define IXGBE_CTRL_GIO_DIS      0x00000004 /* Global IO Master Disable bit */
@@ -394,7 +403,7 @@
 
 #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
 #define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
 #define IXGBE_DCA_MAX_QUEUES_82598   16 /* DCA regs only on 16 queues */
 
 /* MSCA Bit Masks */
@@ -418,10 +427,10 @@
 #define IXGBE_MSCA_MDI_IN_PROG_EN    0x80000000 /* MDI in progress enable */
 
 /* MSRWD bit masks */
-#define IXGBE_MSRWD_WRITE_DATA_MASK  0x0000FFFF
-#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
-#define IXGBE_MSRWD_READ_DATA_MASK   0xFFFF0000
-#define IXGBE_MSRWD_READ_DATA_SHIFT  16
+#define IXGBE_MSRWD_WRITE_DATA_MASK     0x0000FFFF
+#define IXGBE_MSRWD_WRITE_DATA_SHIFT    0
+#define IXGBE_MSRWD_READ_DATA_MASK      0xFFFF0000
+#define IXGBE_MSRWD_READ_DATA_SHIFT     16
 
 /* Atlas registers */
 #define IXGBE_ATLAS_PDN_LPBK    0x24
@@ -436,6 +445,7 @@
 #define IXGBE_ATLAS_PDN_TX_1G_QL_ALL    0xF0
 #define IXGBE_ATLAS_PDN_TX_AN_QL_ALL    0xF0
 
+
 /* Device Type definitions for new protocol MDIO commands */
 #define IXGBE_MDIO_PMA_PMD_DEV_TYPE               0x1
 #define IXGBE_MDIO_PCS_DEV_TYPE                   0x3
@@ -443,6 +453,8 @@
 #define IXGBE_MDIO_AUTO_NEG_DEV_TYPE              0x7
 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE     0x1E   /* Device 30 */
 
+#define IXGBE_MDIO_COMMAND_TIMEOUT     100 /* PHY Timeout for 1 GB mode */
+
 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL      0x0    /* VS1 Control Reg */
 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS       0x1    /* VS1 Status Reg */
 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS  0x0008 /* 1 = Link Up */
@@ -456,23 +468,39 @@
 #define IXGBE_MDIO_PHY_XS_RESET        0x8000 /* PHY_XS Reset */
 #define IXGBE_MDIO_PHY_ID_HIGH         0x2 /* PHY ID High Reg*/
 #define IXGBE_MDIO_PHY_ID_LOW          0x3 /* PHY ID Low Reg*/
-#define IXGBE_MDIO_PHY_SPEED_ABILITY   0x4 /* Speed Abilty Reg */
+#define IXGBE_MDIO_PHY_SPEED_ABILITY   0x4 /* Speed Ability Reg */
 #define IXGBE_MDIO_PHY_SPEED_10G       0x0001 /* 10G capable */
 #define IXGBE_MDIO_PHY_SPEED_1G        0x0010 /* 1G capable */
 
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR     0xC30A /* PHY_XS SDA/SCL Address Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA     0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT     0xC30C /* PHY_XS SDA/SCL Status Reg */
+
+/* MII clause 22/28 definitions */
+#define IXGBE_MDIO_PHY_LOW_POWER_MODE  0x0800
+
+#define IXGBE_MII_SPEED_SELECTION_REG  0x10
+#define IXGBE_MII_RESTART              0x200
+#define IXGBE_MII_AUTONEG_COMPLETE     0x20
+#define IXGBE_MII_AUTONEG_REG          0x0
+
 #define IXGBE_PHY_REVISION_MASK        0xFFFFFFF0
 #define IXGBE_MAX_PHY_ADDR             32
 
 /* PHY IDs*/
-#define TN1010_PHY_ID    0x00A19410
 #define QT2022_PHY_ID    0x0043A400
 
+/* PHY Types */
+#define IXGBE_M88E1145_E_PHY_ID  0x01410CD0
+
 /* General purpose Interrupt Enable */
-#define IXGBE_GPIE_MSIX_MODE      0x00000010 /* MSI-X mode */
-#define IXGBE_GPIE_OCD            0x00000020 /* Other Clear Disable */
-#define IXGBE_GPIE_EIMEN          0x00000040 /* Immediate Interrupt Enable */
-#define IXGBE_GPIE_EIAME          0x40000000
-#define IXGBE_GPIE_PBA_SUPPORT    0x80000000
+#define IXGBE_SDP0_GPIEN         0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN         0x00000002 /* SDP1 */
+#define IXGBE_GPIE_MSIX_MODE     0x00000010 /* MSI-X mode */
+#define IXGBE_GPIE_OCD           0x00000020 /* Other Clear Disable */
+#define IXGBE_GPIE_EIMEN         0x00000040 /* Immediate Interrupt Enable */
+#define IXGBE_GPIE_EIAME         0x40000000
+#define IXGBE_GPIE_PBA_SUPPORT   0x80000000
 
 /* Transmit Flow Control status */
 #define IXGBE_TFCS_TXOFF         0x00000001
@@ -533,7 +561,7 @@
 #define IXGBE_PAP_TXPAUSECNT_MASK   0x0000FFFF /* Pause counter mask */
 
 /* RMCS Bit Masks */
-#define IXGBE_RMCS_RRM          0x00000002 /* Receive Recylce Mode enable */
+#define IXGBE_RMCS_RRM          0x00000002 /* Receive Recycle Mode enable */
 /* Receive Arbitration Control: 0 Round Robin, 1 DFP */
 #define IXGBE_RMCS_RAC          0x00000004
 #define IXGBE_RMCS_DFP          IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
@@ -541,12 +569,15 @@
 #define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */
 #define IXGBE_RMCS_ARBDIS       0x00000040 /* Arbitration disable bit */
 
+
 /* Interrupt register bitmasks */
 
 /* Extended Interrupt Cause Read */
 #define IXGBE_EICR_RTX_QUEUE    0x0000FFFF /* RTx Queue Interrupt */
 #define IXGBE_EICR_LSC          0x00100000 /* Link Status Change */
-#define IXGBE_EICR_MNG          0x00400000 /* Managability Event Interrupt */
+#define IXGBE_EICR_MNG          0x00400000 /* Manageability Event Interrupt */
+#define IXGBE_EICR_GPI_SDP0     0x01000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1     0x02000000 /* Gen Purpose Interrupt on SDP1 */
 #define IXGBE_EICR_PBUR         0x10000000 /* Packet Buffer Handler Error */
 #define IXGBE_EICR_DHER         0x20000000 /* Descriptor Handler Error */
 #define IXGBE_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
@@ -554,11 +585,12 @@
 
 /* Extended Interrupt Cause Set */
 #define IXGBE_EICS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EICS_LSC          IXGBE_EICR_LSC /* Link Status Change */
-#define IXGBE_EICR_GPI_SDP0     0x01000000 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EICS_MNG          IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EICS_PBUR         IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
-#define IXGBE_EICS_DHER         IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EICS_LSC          IXGBE_EICR_LSC       /* Link Status Change */
+#define IXGBE_EICS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
+#define IXGBE_EICS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
+#define IXGBE_EICS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
+#define IXGBE_EICS_DHER         IXGBE_EICR_DHER      /* Desc Handler Error */
 #define IXGBE_EICS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
 #define IXGBE_EICS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
 
@@ -566,7 +598,9 @@
 #define IXGBE_EIMS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
 #define IXGBE_EIMS_LSC          IXGBE_EICR_LSC       /* Link Status Change */
 #define IXGBE_EIMS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
-#define IXGBE_EIMS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Error */
+#define IXGBE_EIMS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
 #define IXGBE_EIMS_DHER         IXGBE_EICR_DHER      /* Descr Handler Error */
 #define IXGBE_EIMS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
 #define IXGBE_EIMS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
@@ -575,18 +609,20 @@
 #define IXGBE_EIMC_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
 #define IXGBE_EIMC_LSC          IXGBE_EICR_LSC       /* Link Status Change */
 #define IXGBE_EIMC_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
-#define IXGBE_EIMC_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Error */
-#define IXGBE_EIMC_DHER         IXGBE_EICR_DHER      /* Desc Handler Error */
+#define IXGBE_EIMC_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMC_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
+#define IXGBE_EIMC_DHER         IXGBE_EICR_DHER      /* Desc Handler Err */
 #define IXGBE_EIMC_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
 #define IXGBE_EIMC_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
 
-#define IXGBE_EIMS_ENABLE_MASK (\
-				IXGBE_EIMS_RTX_QUEUE       | \
-				IXGBE_EIMS_LSC             | \
-				IXGBE_EIMS_TCP_TIMER       | \
-				IXGBE_EIMS_OTHER)
+#define IXGBE_EIMS_ENABLE_MASK ( \
+                                IXGBE_EIMS_RTX_QUEUE       | \
+                                IXGBE_EIMS_LSC             | \
+                                IXGBE_EIMS_TCP_TIMER       | \
+                                IXGBE_EIMS_OTHER)
 
-/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
 #define IXGBE_IMIR_PORT_IM_EN     0x00010000  /* TCP port enable */
 #define IXGBE_IMIR_PORT_BP        0x00020000  /* TCP port check bypass */
 #define IXGBE_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
@@ -623,6 +659,7 @@
 #define IXGBE_VLNCTRL_VFE       0x40000000  /* bit 30 */
 #define IXGBE_VLNCTRL_VME       0x80000000  /* bit 31 */
 
+
 #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.1q protocol */
 
 /* STATUS Bit Masks */
@@ -670,16 +707,16 @@
 #define IXGBE_AUTOC_AN_RESTART  0x00001000
 #define IXGBE_AUTOC_FLU         0x00000001
 #define IXGBE_AUTOC_LMS_SHIFT   13
-#define IXGBE_AUTOC_LMS_MASK   (0x7 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN  (0x0 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_1G_AN  (0x2 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN   (0x6 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_ATTACH_TYPE    (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_LMS_MASK            (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN   (0x0 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN  (0x1 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_AN           (0x2 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN          (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN    (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_ATTACH_TYPE     (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
 
-#define IXGBE_AUTOC_1G_PMA_PMD      0x00000200
-#define IXGBE_AUTOC_10G_PMA_PMD     0x00000180
+#define IXGBE_AUTOC_1G_PMA_PMD         0x00000200
+#define IXGBE_AUTOC_10G_PMA_PMD        0x00000180
 #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
 #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
 #define IXGBE_AUTOC_10G_XAUI   (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
@@ -705,6 +742,7 @@
 #define IXGBE_LINKS_TL_FAULT    0x00001000
 #define IXGBE_LINKS_SIGNAL      0x00000F00
 
+#define IXGBE_LINK_UP_TIME      90 /* 9.0 Seconds */
 #define IXGBE_AUTO_NEG_TIME     45 /* 4.5 Seconds */
 
 /* SW Semaphore Register bitmasks */
@@ -759,6 +797,11 @@
 #define IXGBE_PBANUM0_PTR       0x15
 #define IXGBE_PBANUM1_PTR       0x16
 
+/* Legacy EEPROM word offsets */
+#define IXGBE_ISCSI_BOOT_CAPS           0x0033
+#define IXGBE_ISCSI_SETUP_PORT_0        0x0030
+#define IXGBE_ISCSI_SETUP_PORT_1        0x0034
+
 /* EEPROM Commands - SPI */
 #define IXGBE_EEPROM_MAX_RETRY_SPI      5000 /* Max wait 5ms for RDY signal */
 #define IXGBE_EEPROM_STATUS_RDY_SPI     0x01
@@ -766,7 +809,7 @@
 #define IXGBE_EEPROM_WRITE_OPCODE_SPI   0x02  /* EEPROM write opcode */
 #define IXGBE_EEPROM_A8_OPCODE_SPI      0x08  /* opcode bit-3 = addr bit-8 */
 #define IXGBE_EEPROM_WREN_OPCODE_SPI    0x06  /* EEPROM set Write Ena latch */
-/* EEPROM reset Write Enbale latch */
+/* EEPROM reset Write Enable latch */
 #define IXGBE_EEPROM_WRDI_OPCODE_SPI    0x04
 #define IXGBE_EEPROM_RDSR_OPCODE_SPI    0x05  /* EEPROM read Status reg */
 #define IXGBE_EEPROM_WRSR_OPCODE_SPI    0x01  /* EEPROM write Status reg */
@@ -805,26 +848,20 @@
 /* Number of 100 microseconds we wait for PCI Express master disable */
 #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
 
-/* PHY Types */
-#define IXGBE_M88E1145_E_PHY_ID  0x01410CD0
-
 /* Check whether address is multicast.  This is little-endian specific check.*/
 #define IXGBE_IS_MULTICAST(Address) \
-		(bool)(((u8 *)(Address))[0] & ((u8)0x01))
+                (bool)(((u8 *)(Address))[0] & ((u8)0x01))
 
 /* Check whether an address is broadcast. */
 #define IXGBE_IS_BROADCAST(Address)                      \
-		((((u8 *)(Address))[0] == ((u8)0xff)) && \
-		(((u8 *)(Address))[1] == ((u8)0xff)))
+                ((((u8 *)(Address))[0] == ((u8)0xff)) && \
+                (((u8 *)(Address))[1] == ((u8)0xff)))
 
 /* RAH */
 #define IXGBE_RAH_VIND_MASK     0x003C0000
 #define IXGBE_RAH_VIND_SHIFT    18
 #define IXGBE_RAH_AV            0x80000000
-
-/* Filters */
-#define IXGBE_MC_TBL_SIZE       128  /* Multicast Filter Table (4096 bits) */
-#define IXGBE_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+#define IXGBE_CLEAR_VMDQ_ALL    0xFFFFFFFF
 
 /* Header split receive */
 #define IXGBE_RFCTL_ISCSI_DIS       0x00000001
@@ -853,7 +890,7 @@
 #define IXGBE_MAX_FRAME_SZ      0x40040000
 
 #define IXGBE_TDWBAL_HEAD_WB_ENABLE   0x1      /* Tx head write-back enable */
-#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2      /* Tx seq. # write-back enable */
+#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2      /* Tx seq# write-back enable */
 
 /* Receive Config masks */
 #define IXGBE_RXCTRL_RXEN       0x00000001  /* Enable Receiver */
@@ -866,7 +903,7 @@
 #define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
 #define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
 #define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
-/* Receive Priority Flow Control Enbale */
+/* Receive Priority Flow Control Enable */
 #define IXGBE_FCTRL_RPFCE 0x00004000
 #define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
 
@@ -896,9 +933,8 @@
 /* Receive Descriptor bit definitions */
 #define IXGBE_RXD_STAT_DD       0x01    /* Descriptor Done */
 #define IXGBE_RXD_STAT_EOP      0x02    /* End of Packet */
-#define IXGBE_RXD_STAT_IXSM     0x04    /* Ignore checksum */
 #define IXGBE_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
-#define IXGBE_RXD_STAT_UDPCS    0x10    /* UDP xsum caculated */
+#define IXGBE_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
 #define IXGBE_RXD_STAT_L4CS     0x20    /* L4 xsum calculated */
 #define IXGBE_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
 #define IXGBE_RXD_STAT_PIF      0x80    /* passed in-exact filter */
@@ -914,7 +950,7 @@
 #define IXGBE_RXD_ERR_USE       0x20    /* Undersize Error */
 #define IXGBE_RXD_ERR_TCPE      0x40    /* TCP/UDP Checksum Error */
 #define IXGBE_RXD_ERR_IPE       0x80    /* IP Checksum Error */
-#define IXGBE_RXDADV_HBO        0x00800000
+#define IXGBE_RXDADV_ERR_HBO    0x00800000 /*Header Buffer Overflow */
 #define IXGBE_RXDADV_ERR_CE     0x01000000 /* CRC Error */
 #define IXGBE_RXDADV_ERR_LE     0x02000000 /* Length Error */
 #define IXGBE_RXDADV_ERR_PE     0x08000000 /* Packet Error */
@@ -928,15 +964,17 @@
 #define IXGBE_RXD_CFI_MASK      0x1000  /* CFI is bit 12 */
 #define IXGBE_RXD_CFI_SHIFT     12
 
+
 /* SRRCTL bit definitions */
-#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10     /* so many KBs */
-#define IXGBE_SRRCTL_BSIZEPKT_MASK  0x0000007F
-#define IXGBE_SRRCTL_BSIZEHDR_MASK  0x00003F00
-#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT     10     /* so many KBs */
+#define IXGBE_SRRCTL_BSIZEPKT_MASK      0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK      0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY    0x00000000
 #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT  0x04000000
 #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK      0x0E000000
 
 #define IXGBE_RXDPS_HDRSTAT_HDRSP       0x00008000
 #define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
@@ -970,21 +1008,20 @@
 #define IXGBE_RXDADV_PKTTYPE_UDP        0x00000200 /* UDP hdr present */
 #define IXGBE_RXDADV_PKTTYPE_SCTP       0x00000400 /* SCTP hdr present */
 #define IXGBE_RXDADV_PKTTYPE_NFS        0x00000800 /* NFS hdr present */
-
 /* Masks to determine if packets should be dropped due to frame errors */
-#define IXGBE_RXD_ERR_FRAME_ERR_MASK (\
-				      IXGBE_RXD_ERR_CE | \
-				      IXGBE_RXD_ERR_LE | \
-				      IXGBE_RXD_ERR_PE | \
-				      IXGBE_RXD_ERR_OSE | \
-				      IXGBE_RXD_ERR_USE)
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+                                      IXGBE_RXD_ERR_CE | \
+                                      IXGBE_RXD_ERR_LE | \
+                                      IXGBE_RXD_ERR_PE | \
+                                      IXGBE_RXD_ERR_OSE | \
+                                      IXGBE_RXD_ERR_USE)
 
-#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK (\
-				      IXGBE_RXDADV_ERR_CE | \
-				      IXGBE_RXDADV_ERR_LE | \
-				      IXGBE_RXDADV_ERR_PE | \
-				      IXGBE_RXDADV_ERR_OSE | \
-				      IXGBE_RXDADV_ERR_USE)
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+                                      IXGBE_RXDADV_ERR_CE | \
+                                      IXGBE_RXDADV_ERR_LE | \
+                                      IXGBE_RXDADV_ERR_PE | \
+                                      IXGBE_RXDADV_ERR_OSE | \
+                                      IXGBE_RXDADV_ERR_USE)
 
 /* Multicast bit mask */
 #define IXGBE_MCSTCTRL_MFE      0x4
@@ -1000,6 +1037,7 @@
 #define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT  0x000D /* Priority in upper 3 of 16 */
 #define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT  IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
 
+
 /* Transmit Descriptor - Legacy */
 struct ixgbe_legacy_tx_desc {
 	u64 buffer_addr;       /* Address of the descriptor's data buffer */
@@ -1007,15 +1045,15 @@
 		__le32 data;
 		struct {
 			__le16 length;    /* Data buffer length */
-			u8 cso; /* Checksum offset */
-			u8 cmd; /* Descriptor control */
+			u8 cso;           /* Checksum offset */
+			u8 cmd;           /* Descriptor control */
 		} flags;
 	} lower;
 	union {
 		__le32 data;
 		struct {
-			u8 status;     /* Descriptor status */
-			u8 css; /* Checksum start */
+			u8 status;        /* Descriptor status */
+			u8 css;           /* Checksum start */
 			__le16 vlan;
 		} fields;
 	} upper;
@@ -1024,7 +1062,7 @@
 /* Transmit Descriptor - Advanced */
 union ixgbe_adv_tx_desc {
 	struct {
-		__le64 buffer_addr;       /* Address of descriptor's data buf */
+		__le64 buffer_addr;      /* Address of descriptor's data buf */
 		__le32 cmd_type_len;
 		__le32 olinfo_status;
 	} read;
@@ -1039,9 +1077,9 @@
 struct ixgbe_legacy_rx_desc {
 	__le64 buffer_addr; /* Address of the descriptor's data buffer */
 	__le16 length;      /* Length of data DMAed into data buffer */
-	u16 csum;        /* Packet checksum */
-	u8 status;       /* Descriptor status */
-	u8 errors;       /* Descriptor Errors */
+	__le16 csum;        /* Packet checksum */
+	u8 status;          /* Descriptor status */
+	u8 errors;          /* Descriptor Errors */
 	__le16 vlan;
 };
 
@@ -1053,15 +1091,18 @@
 	} read;
 	struct {
 		struct {
-			struct {
-				__le16 pkt_info; /* RSS type, Packet type */
-				__le16 hdr_info; /* Split Header, header len */
+			union {
+				__le32 data;
+				struct {
+					__le16 pkt_info; /* RSS, Pkt type */
+					__le16 hdr_info; /* Splithdr, hdrlen */
+				} hs_rss;
 			} lo_dword;
 			union {
 				__le32 rss; /* RSS Hash */
 				struct {
 					__le16 ip_id; /* IP id */
-					u16 csum; /* Packet Checksum */
+					__le16 csum; /* Packet Checksum */
 				} csum_ip;
 			} hi_dword;
 		} lower;
@@ -1082,49 +1123,69 @@
 };
 
 /* Adv Transmit Descriptor Config Masks */
-#define IXGBE_ADVTXD_DTALEN_MASK      0x0000FFFF /* Data buffer length(bytes) */
+#define IXGBE_ADVTXD_DTALEN_MASK      0x0000FFFF /* Data buf length(bytes) */
 #define IXGBE_ADVTXD_DTYP_MASK  0x00F00000 /* DTYP mask */
 #define IXGBE_ADVTXD_DTYP_CTXT  0x00200000 /* Advanced Context Desc */
 #define IXGBE_ADVTXD_DTYP_DATA  0x00300000 /* Advanced Data Descriptor */
 #define IXGBE_ADVTXD_DCMD_EOP   IXGBE_TXD_CMD_EOP  /* End of Packet */
 #define IXGBE_ADVTXD_DCMD_IFCS  IXGBE_TXD_CMD_IFCS /* Insert FCS */
-#define IXGBE_ADVTXD_DCMD_RDMA  0x04000000 /* RDMA */
 #define IXGBE_ADVTXD_DCMD_RS    IXGBE_TXD_CMD_RS   /* Report Status */
-#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000     /* DDP hdr type or iSCSI */
+#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000    /* DDP hdr type or iSCSI */
 #define IXGBE_ADVTXD_DCMD_DEXT  IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
 #define IXGBE_ADVTXD_DCMD_VLE   IXGBE_TXD_CMD_VLE  /* VLAN pkt enable */
 #define IXGBE_ADVTXD_DCMD_TSE   0x80000000 /* TCP Seg enable */
 #define IXGBE_ADVTXD_STAT_DD    IXGBE_TXD_STAT_DD  /* Descriptor Done */
-#define IXGBE_ADVTXD_STAT_SN_CRC      0x00000002 /* NXTSEQ/SEED present in WB */
+#define IXGBE_ADVTXD_STAT_SN_CRC      0x00000002 /* NXTSEQ/SEED pres in WB */
 #define IXGBE_ADVTXD_STAT_RSV   0x0000000C /* STA Reserved */
 #define IXGBE_ADVTXD_IDX_SHIFT  4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC         0x00000080 /* Check Context */
 #define IXGBE_ADVTXD_POPTS_SHIFT      8  /* Adv desc POPTS shift */
 #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
-				IXGBE_ADVTXD_POPTS_SHIFT)
+                                 IXGBE_ADVTXD_POPTS_SHIFT)
 #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
-				IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_POPTS_EOM  0x00000400 /* Enable L bit-RDMA DDP hdr */
-#define IXGBE_ADVTXD_POPTS_ISCO_1ST   0x00000000 /* 1st TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_MDL   0x00000800 /* Middle TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_LAST  0x00001000 /* Last TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/
-#define IXGBE_ADVTXD_POPTS_RSV  0x00002000 /* POPTS Reserved */
-#define IXGBE_ADVTXD_PAYLEN_SHIFT  14 /* Adv desc PAYLEN shift */
-#define IXGBE_ADVTXD_MACLEN_SHIFT  9  /* Adv ctxt desc mac len shift */
-#define IXGBE_ADVTXD_VLAN_SHIFT    16  /* Adv ctxt vlan tag shift */
-#define IXGBE_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
-#define IXGBE_ADVTXD_TUCMD_IPV6    0x00000000  /* IP Packet Type: 0=IPv6 */
-#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000  /* L4 Packet TYPE of UDP */
-#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
-#define IXGBE_ADVTXD_TUCMD_MKRREQ  0x00002000 /* Req requires Markers and CRC */
-#define IXGBE_ADVTXD_L4LEN_SHIFT   8  /* Adv ctxt L4LEN shift */
-#define IXGBE_ADVTXD_MSS_SHIFT     16  /* Adv ctxt MSS shift */
+                                 IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_ISCO_1ST  0x00000000 /* 1st TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_MDL  0x00000800 /* Middle TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_RSV       0x00002000 /* POPTS Reserved */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT      16  /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_TUCMD_IPV4      0x00000400  /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6      0x00000000  /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP   0x00000000  /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP   0x00000800  /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP  0x00001000  /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_TUCMD_MKRREQ    0x00002000 /*Req requires Markers and CRC*/
+#define IXGBE_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT       16  /* Adv ctxt MSS shift */
 
+/* Autonegotiation advertised speeds */
+typedef u32 ixgbe_autoneg_advertised;
 /* Link speed */
+typedef u32 ixgbe_link_speed;
 #define IXGBE_LINK_SPEED_UNKNOWN   0
 #define IXGBE_LINK_SPEED_100_FULL  0x0008
 #define IXGBE_LINK_SPEED_1GB_FULL  0x0020
 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
+                                        IXGBE_LINK_SPEED_10GB_FULL)
+
+/* Physical layer type */
+typedef u32 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN      0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T    0x0001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T   0x0002
+#define IXGBE_PHYSICAL_LAYER_100BASE_T    0x0004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU  0x0008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR   0x0010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM  0x0020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR   0x0040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4  0x0080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4  0x0100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX  0x0200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX  0x0400
 
 
 enum ixgbe_eeprom_type {
@@ -1141,16 +1202,38 @@
 
 enum ixgbe_phy_type {
 	ixgbe_phy_unknown = 0,
-	ixgbe_phy_tn,
 	ixgbe_phy_qt,
-	ixgbe_phy_xaui
+	ixgbe_phy_xaui,
+	ixgbe_phy_tw_tyco,
+	ixgbe_phy_tw_unknown,
+	ixgbe_phy_sfp_avago,
+	ixgbe_phy_sfp_ftl,
+	ixgbe_phy_sfp_unknown,
+	ixgbe_phy_generic
+};
+
+/*
+ * SFP+ module type IDs:
+ *
+ * ID	Module Type
+ * =============
+ * 0	SFP_DA_CU
+ * 1	SFP_SR
+ * 2	SFP_LR
+ */
+enum ixgbe_sfp_type {
+	ixgbe_sfp_type_da_cu = 0,
+	ixgbe_sfp_type_sr = 1,
+	ixgbe_sfp_type_lr = 2,
+	ixgbe_sfp_type_unknown = 0xFFFF
 };
 
 enum ixgbe_media_type {
 	ixgbe_media_type_unknown = 0,
 	ixgbe_media_type_fiber,
 	ixgbe_media_type_copper,
-	ixgbe_media_type_backplane
+	ixgbe_media_type_backplane,
+	ixgbe_media_type_virtual
 };
 
 /* Flow Control Settings */
@@ -1167,6 +1250,8 @@
 	u32 rar_used_count;
 	u32 mc_addr_in_rar_count;
 	u32 mta_in_use;
+	u32 overflow_promisc;
+	bool user_set_promisc;
 };
 
 /* Flow control parameters */
@@ -1242,57 +1327,118 @@
 /* forward declaration */
 struct ixgbe_hw;
 
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+                                  u32 *vmdq);
+
+/* Function pointer table */
+struct ixgbe_eeprom_operations {
+	s32 (*init_params)(struct ixgbe_hw *);
+	s32 (*read)(struct ixgbe_hw *, u16, u16 *);
+	s32 (*write)(struct ixgbe_hw *, u16, u16);
+	s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
+	s32 (*update_checksum)(struct ixgbe_hw *);
+};
+
 struct ixgbe_mac_operations {
-	s32 (*reset)(struct ixgbe_hw *);
+	s32 (*init_hw)(struct ixgbe_hw *);
+	s32 (*reset_hw)(struct ixgbe_hw *);
+	s32 (*start_hw)(struct ixgbe_hw *);
+	s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
 	enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+	s32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+	s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+	s32 (*stop_adapter)(struct ixgbe_hw *);
+	s32 (*get_bus_info)(struct ixgbe_hw *);
+	s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
+	s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
+
+	/* Link */
 	s32 (*setup_link)(struct ixgbe_hw *);
-	s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *);
-	s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool);
-	s32 (*get_link_settings)(struct ixgbe_hw *, u32 *, bool *);
+	s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
+	                        bool);
+	s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+	s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+	                             bool *);
+
+	/* LED */
+	s32 (*led_on)(struct ixgbe_hw *, u32);
+	s32 (*led_off)(struct ixgbe_hw *, u32);
+	s32 (*blink_led_start)(struct ixgbe_hw *, u32);
+	s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
+
+	/* RAR, Multicast, VLAN */
+	s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+	s32 (*clear_rar)(struct ixgbe_hw *, u32);
+	s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+	s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
+	s32 (*init_rx_addrs)(struct ixgbe_hw *);
+	s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+	                           ixgbe_mc_addr_itr);
+	s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+	                           ixgbe_mc_addr_itr);
+	s32 (*enable_mc)(struct ixgbe_hw *);
+	s32 (*disable_mc)(struct ixgbe_hw *);
+	s32 (*clear_vfta)(struct ixgbe_hw *);
+	s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+	s32 (*init_uta_tables)(struct ixgbe_hw *);
+
+	/* Flow Control */
+	s32 (*setup_fc)(struct ixgbe_hw *, s32);
 };
 
 struct ixgbe_phy_operations {
+	s32 (*identify)(struct ixgbe_hw *);
+	s32 (*identify_sfp)(struct ixgbe_hw *);
+	s32 (*reset)(struct ixgbe_hw *);
+	s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+	s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
 	s32 (*setup_link)(struct ixgbe_hw *);
-	s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *);
-	s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool);
-};
-
-struct ixgbe_mac_info {
-	struct ixgbe_mac_operations	ops;
-	enum ixgbe_mac_type		type;
-	u8				addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-	u8				perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-	s32				mc_filter_type;
-	u32				num_rx_queues;
-	u32				num_tx_queues;
-	u32				num_rx_addrs;
-	u32				link_attach_type;
-	u32				link_mode_select;
-	bool				link_settings_loaded;
+	s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
+	                        bool);
+	s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+	s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+	s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
+	s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
 };
 
 struct ixgbe_eeprom_info {
-	enum ixgbe_eeprom_type		type;
-	u16				word_size;
-	u16				address_bits;
+	struct ixgbe_eeprom_operations  ops;
+	enum ixgbe_eeprom_type          type;
+	u32				semaphore_delay;
+	u16                             word_size;
+	u16                             address_bits;
+};
+
+struct ixgbe_mac_info {
+	struct ixgbe_mac_operations     ops;
+	enum ixgbe_mac_type             type;
+	u8                              addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	u8                              perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	s32                             mc_filter_type;
+	u32                             mcft_size;
+	u32                             vft_size;
+	u32                             num_rar_entries;
+	u32                             max_tx_queues;
+	u32                             max_rx_queues;
+	u32                             link_attach_type;
+	u32                             link_mode_select;
+	bool                            link_settings_loaded;
+	bool                            autoneg;
+	bool                            autoneg_failed;
 };
 
 struct ixgbe_phy_info {
-	struct ixgbe_phy_operations	ops;
-
-	enum ixgbe_phy_type		type;
-	u32				addr;
-	u32				id;
-	u32				revision;
-	enum ixgbe_media_type		media_type;
-	u32				autoneg_advertised;
-	bool				autoneg_wait_to_complete;
-};
-
-struct ixgbe_info {
-	enum ixgbe_mac_type		mac;
-	s32 				(*get_invariants)(struct ixgbe_hw *);
-	struct ixgbe_mac_operations	*mac_ops;
+	struct ixgbe_phy_operations     ops;
+	enum ixgbe_phy_type             type;
+	u32                             addr;
+	u32                             id;
+	enum ixgbe_sfp_type             sfp_type;
+	u32                             revision;
+	enum ixgbe_media_type           media_type;
+	bool                            reset_disable;
+	ixgbe_autoneg_advertised        autoneg_advertised;
+	bool                            autoneg_wait_to_complete;
 };
 
 struct ixgbe_hw {
@@ -1311,6 +1457,15 @@
 	bool				adapter_stopped;
 };
 
+struct ixgbe_info {
+	enum ixgbe_mac_type		mac;
+	s32 				(*get_invariants)(struct ixgbe_hw *);
+	struct ixgbe_mac_operations	*mac_ops;
+	struct ixgbe_eeprom_operations	*eeprom_ops;
+	struct ixgbe_phy_operations	*phy_ops;
+};
+
+
 /* Error Codes */
 #define IXGBE_ERR_EEPROM                        -1
 #define IXGBE_ERR_EEPROM_CHECKSUM               -2
@@ -1329,6 +1484,8 @@
 #define IXGBE_ERR_RESET_FAILED                  -15
 #define IXGBE_ERR_SWFW_SYNC                     -16
 #define IXGBE_ERR_PHY_ADDR_INVALID              -17
+#define IXGBE_ERR_I2C                           -18
+#define IXGBE_ERR_SFP_NOT_SUPPORTED             -19
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
 
 #endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
new file mode 100644
index 0000000..f292df5
--- /dev/null
+++ b/drivers/net/jme.c
@@ -0,0 +1,3019 @@
+/*
+ * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
+ *
+ * Copyright 2008 JMicron Technology Corporation
+ * http://www.jmicron.com/
+ *
+ * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_vlan.h>
+#include "jme.h"
+
+static int force_pseudohp = -1;
+static int no_pseudohp = -1;
+static int no_extplug = -1;
+module_param(force_pseudohp, int, 0);
+MODULE_PARM_DESC(force_pseudohp,
+	"Enable pseudo hot-plug feature manually by driver instead of BIOS.");
+module_param(no_pseudohp, int, 0);
+MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
+module_param(no_extplug, int, 0);
+MODULE_PARM_DESC(no_extplug,
+	"Do not use external plug signal for pseudo hot-plug.");
+
+static int
+jme_mdio_read(struct net_device *netdev, int phy, int reg)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int i, val, again = (reg == MII_BMSR) ? 1 : 0;
+
+read_again:
+	jwrite32(jme, JME_SMI, SMI_OP_REQ |
+				smi_phy_addr(phy) |
+				smi_reg_addr(reg));
+
+	wmb();
+	for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
+		udelay(20);
+		val = jread32(jme, JME_SMI);
+		if ((val & SMI_OP_REQ) == 0)
+			break;
+	}
+
+	if (i == 0) {
+		jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg);
+		return 0;
+	}
+
+	if (again--)
+		goto read_again;
+
+	return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
+}
+
+static void
+jme_mdio_write(struct net_device *netdev,
+				int phy, int reg, int val)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int i;
+
+	jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
+		((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
+		smi_phy_addr(phy) | smi_reg_addr(reg));
+
+	wmb();
+	for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
+		udelay(20);
+		if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
+			break;
+	}
+
+	if (i == 0)
+		jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg);
+
+	return;
+}
+
+static inline void
+jme_reset_phy_processor(struct jme_adapter *jme)
+{
+	u32 val;
+
+	jme_mdio_write(jme->dev,
+			jme->mii_if.phy_id,
+			MII_ADVERTISE, ADVERTISE_ALL |
+			ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+	if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
+		jme_mdio_write(jme->dev,
+				jme->mii_if.phy_id,
+				MII_CTRL1000,
+				ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+
+	val = jme_mdio_read(jme->dev,
+				jme->mii_if.phy_id,
+				MII_BMCR);
+
+	jme_mdio_write(jme->dev,
+			jme->mii_if.phy_id,
+			MII_BMCR, val | BMCR_RESET);
+
+	return;
+}
+
+static void
+jme_setup_wakeup_frame(struct jme_adapter *jme,
+		u32 *mask, u32 crc, int fnr)
+{
+	int i;
+
+	/*
+	 * Setup CRC pattern
+	 */
+	jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
+	wmb();
+	jwrite32(jme, JME_WFODP, crc);
+	wmb();
+
+	/*
+	 * Setup Mask
+	 */
+	for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
+		jwrite32(jme, JME_WFOI,
+				((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
+				(fnr & WFOI_FRAME_SEL));
+		wmb();
+		jwrite32(jme, JME_WFODP, mask[i]);
+		wmb();
+	}
+}
+
+static inline void
+jme_reset_mac_processor(struct jme_adapter *jme)
+{
+	u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
+	u32 crc = 0xCDCDCDCD;
+	u32 gpreg0;
+	int i;
+
+	jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
+	udelay(2);
+	jwrite32(jme, JME_GHC, jme->reg_ghc);
+
+	jwrite32(jme, JME_RXDBA_LO, 0x00000000);
+	jwrite32(jme, JME_RXDBA_HI, 0x00000000);
+	jwrite32(jme, JME_RXQDC, 0x00000000);
+	jwrite32(jme, JME_RXNDA, 0x00000000);
+	jwrite32(jme, JME_TXDBA_LO, 0x00000000);
+	jwrite32(jme, JME_TXDBA_HI, 0x00000000);
+	jwrite32(jme, JME_TXQDC, 0x00000000);
+	jwrite32(jme, JME_TXNDA, 0x00000000);
+
+	jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
+	jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
+	for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
+		jme_setup_wakeup_frame(jme, mask, crc, i);
+	if (jme->fpgaver)
+		gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
+	else
+		gpreg0 = GPREG0_DEFAULT;
+	jwrite32(jme, JME_GPREG0, gpreg0);
+	jwrite32(jme, JME_GPREG1, 0);
+}
+
+static inline void
+jme_reset_ghc_speed(struct jme_adapter *jme)
+{
+	jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
+	jwrite32(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_clear_pm(struct jme_adapter *jme)
+{
+	jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
+	pci_set_power_state(jme->pdev, PCI_D0);
+	pci_enable_wake(jme->pdev, PCI_D0, false);
+}
+
+static int
+jme_reload_eeprom(struct jme_adapter *jme)
+{
+	u32 val;
+	int i;
+
+	val = jread32(jme, JME_SMBCSR);
+
+	if (val & SMBCSR_EEPROMD) {
+		val |= SMBCSR_CNACK;
+		jwrite32(jme, JME_SMBCSR, val);
+		val |= SMBCSR_RELOAD;
+		jwrite32(jme, JME_SMBCSR, val);
+		mdelay(12);
+
+		for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
+			mdelay(1);
+			if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
+				break;
+		}
+
+		if (i == 0) {
+			jeprintk(jme->pdev, "eeprom reload timeout\n");
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static void
+jme_load_macaddr(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	unsigned char macaddr[6];
+	u32 val;
+
+	spin_lock_bh(&jme->macaddr_lock);
+	val = jread32(jme, JME_RXUMA_LO);
+	macaddr[0] = (val >>  0) & 0xFF;
+	macaddr[1] = (val >>  8) & 0xFF;
+	macaddr[2] = (val >> 16) & 0xFF;
+	macaddr[3] = (val >> 24) & 0xFF;
+	val = jread32(jme, JME_RXUMA_HI);
+	macaddr[4] = (val >>  0) & 0xFF;
+	macaddr[5] = (val >>  8) & 0xFF;
+	memcpy(netdev->dev_addr, macaddr, 6);
+	spin_unlock_bh(&jme->macaddr_lock);
+}
+
+static inline void
+jme_set_rx_pcc(struct jme_adapter *jme, int p)
+{
+	switch (p) {
+	case PCC_OFF:
+		jwrite32(jme, JME_PCCRX0,
+			((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+			((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+		break;
+	case PCC_P1:
+		jwrite32(jme, JME_PCCRX0,
+			((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+			((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+		break;
+	case PCC_P2:
+		jwrite32(jme, JME_PCCRX0,
+			((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+			((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+		break;
+	case PCC_P3:
+		jwrite32(jme, JME_PCCRX0,
+			((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+			((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+		break;
+	default:
+		break;
+	}
+	wmb();
+
+	if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
+		msg_rx_status(jme, "Switched to PCC_P%d\n", p);
+}
+
+static void
+jme_start_irq(struct jme_adapter *jme)
+{
+	register struct dynpcc_info *dpi = &(jme->dpi);
+
+	jme_set_rx_pcc(jme, PCC_P1);
+	dpi->cur		= PCC_P1;
+	dpi->attempt		= PCC_P1;
+	dpi->cnt		= 0;
+
+	jwrite32(jme, JME_PCCTX,
+			((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
+			((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
+			PCCTXQ0_EN
+		);
+
+	/*
+	 * Enable Interrupts
+	 */
+	jwrite32(jme, JME_IENS, INTR_ENABLE);
+}
+
+static inline void
+jme_stop_irq(struct jme_adapter *jme)
+{
+	/*
+	 * Disable Interrupts
+	 */
+	jwrite32f(jme, JME_IENC, INTR_ENABLE);
+}
+
+static inline void
+jme_enable_shadow(struct jme_adapter *jme)
+{
+	jwrite32(jme,
+		 JME_SHBA_LO,
+		 ((u32)jme->shadow_dma & ~((u32)0x1F)) | SHBA_POSTEN);
+}
+
+static inline void
+jme_disable_shadow(struct jme_adapter *jme)
+{
+	jwrite32(jme, JME_SHBA_LO, 0x0);
+}
+
+static u32
+jme_linkstat_from_phy(struct jme_adapter *jme)
+{
+	u32 phylink, bmsr;
+
+	phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
+	bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
+	if (bmsr & BMSR_ANCOMP)
+		phylink |= PHY_LINK_AUTONEG_COMPLETE;
+
+	return phylink;
+}
+
+static inline void
+jme_set_phyfifoa(struct jme_adapter *jme)
+{
+	jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
+}
+
+static inline void
+jme_set_phyfifob(struct jme_adapter *jme)
+{
+	jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
+}
+
+static int
+jme_check_link(struct net_device *netdev, int testonly)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
+	char linkmsg[64];
+	int rc = 0;
+
+	linkmsg[0] = '\0';
+
+	if (jme->fpgaver)
+		phylink = jme_linkstat_from_phy(jme);
+	else
+		phylink = jread32(jme, JME_PHY_LINK);
+
+	if (phylink & PHY_LINK_UP) {
+		if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
+			/*
+			 * If we did not enable AN
+			 * Speed/Duplex Info should be obtained from SMI
+			 */
+			phylink = PHY_LINK_UP;
+
+			bmcr = jme_mdio_read(jme->dev,
+						jme->mii_if.phy_id,
+						MII_BMCR);
+
+			phylink |= ((bmcr & BMCR_SPEED1000) &&
+					(bmcr & BMCR_SPEED100) == 0) ?
+					PHY_LINK_SPEED_1000M :
+					(bmcr & BMCR_SPEED100) ?
+					PHY_LINK_SPEED_100M :
+					PHY_LINK_SPEED_10M;
+
+			phylink |= (bmcr & BMCR_FULLDPLX) ?
+					 PHY_LINK_DUPLEX : 0;
+
+			strcat(linkmsg, "Forced: ");
+		} else {
+			/*
+			 * Keep polling for speed/duplex resolve complete
+			 */
+			while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
+				--cnt) {
+
+				udelay(1);
+
+				if (jme->fpgaver)
+					phylink = jme_linkstat_from_phy(jme);
+				else
+					phylink = jread32(jme, JME_PHY_LINK);
+			}
+			if (!cnt)
+				jeprintk(jme->pdev,
+					"Waiting speed resolve timeout.\n");
+
+			strcat(linkmsg, "ANed: ");
+		}
+
+		if (jme->phylink == phylink) {
+			rc = 1;
+			goto out;
+		}
+		if (testonly)
+			goto out;
+
+		jme->phylink = phylink;
+
+		ghc = jme->reg_ghc & ~(GHC_SPEED_10M |
+					GHC_SPEED_100M |
+					GHC_SPEED_1000M |
+					GHC_DPX);
+		switch (phylink & PHY_LINK_SPEED_MASK) {
+		case PHY_LINK_SPEED_10M:
+			ghc |= GHC_SPEED_10M;
+			strcat(linkmsg, "10 Mbps, ");
+			if (is_buggy250(jme->pdev->device, jme->chiprev))
+				jme_set_phyfifoa(jme);
+			break;
+		case PHY_LINK_SPEED_100M:
+			ghc |= GHC_SPEED_100M;
+			strcat(linkmsg, "100 Mbps, ");
+			if (is_buggy250(jme->pdev->device, jme->chiprev))
+				jme_set_phyfifob(jme);
+			break;
+		case PHY_LINK_SPEED_1000M:
+			ghc |= GHC_SPEED_1000M;
+			strcat(linkmsg, "1000 Mbps, ");
+			if (is_buggy250(jme->pdev->device, jme->chiprev))
+				jme_set_phyfifoa(jme);
+			break;
+		default:
+			break;
+		}
+		ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
+
+		strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
+					"Full-Duplex, " :
+					"Half-Duplex, ");
+
+		if (phylink & PHY_LINK_MDI_STAT)
+			strcat(linkmsg, "MDI-X");
+		else
+			strcat(linkmsg, "MDI");
+
+		if (phylink & PHY_LINK_DUPLEX) {
+			jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
+		} else {
+			jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
+						TXMCS_BACKOFF |
+						TXMCS_CARRIERSENSE |
+						TXMCS_COLLISION);
+			jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
+				((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
+				TXTRHD_TXREN |
+				((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
+		}
+
+		jme->reg_ghc = ghc;
+		jwrite32(jme, JME_GHC, ghc);
+
+		msg_link(jme, "Link is up at %s.\n", linkmsg);
+		netif_carrier_on(netdev);
+	} else {
+		if (testonly)
+			goto out;
+
+		msg_link(jme, "Link is down.\n");
+		jme->phylink = 0;
+		netif_carrier_off(netdev);
+	}
+
+out:
+	return rc;
+}
+
+static int
+jme_setup_tx_resources(struct jme_adapter *jme)
+{
+	struct jme_ring *txring = &(jme->txring[0]);
+
+	txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
+				   TX_RING_ALLOC_SIZE(jme->tx_ring_size),
+				   &(txring->dmaalloc),
+				   GFP_ATOMIC);
+
+	if (!txring->alloc) {
+		txring->desc = NULL;
+		txring->dmaalloc = 0;
+		txring->dma = 0;
+		return -ENOMEM;
+	}
+
+	/*
+	 * 16 Bytes align
+	 */
+	txring->desc		= (void *)ALIGN((unsigned long)(txring->alloc),
+						RING_DESC_ALIGN);
+	txring->dma		= ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
+	txring->next_to_use	= 0;
+	atomic_set(&txring->next_to_clean, 0);
+	atomic_set(&txring->nr_free, jme->tx_ring_size);
+
+	/*
+	 * Initialize Transmit Descriptors
+	 */
+	memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
+	memset(txring->bufinf, 0,
+		sizeof(struct jme_buffer_info) * jme->tx_ring_size);
+
+	return 0;
+}
+
+static void
+jme_free_tx_resources(struct jme_adapter *jme)
+{
+	int i;
+	struct jme_ring *txring = &(jme->txring[0]);
+	struct jme_buffer_info *txbi = txring->bufinf;
+
+	if (txring->alloc) {
+		for (i = 0 ; i < jme->tx_ring_size ; ++i) {
+			txbi = txring->bufinf + i;
+			if (txbi->skb) {
+				dev_kfree_skb(txbi->skb);
+				txbi->skb = NULL;
+			}
+			txbi->mapping		= 0;
+			txbi->len		= 0;
+			txbi->nr_desc		= 0;
+			txbi->start_xmit	= 0;
+		}
+
+		dma_free_coherent(&(jme->pdev->dev),
+				  TX_RING_ALLOC_SIZE(jme->tx_ring_size),
+				  txring->alloc,
+				  txring->dmaalloc);
+
+		txring->alloc		= NULL;
+		txring->desc		= NULL;
+		txring->dmaalloc	= 0;
+		txring->dma		= 0;
+	}
+	txring->next_to_use	= 0;
+	atomic_set(&txring->next_to_clean, 0);
+	atomic_set(&txring->nr_free, 0);
+
+}
+
+static inline void
+jme_enable_tx_engine(struct jme_adapter *jme)
+{
+	/*
+	 * Select Queue 0
+	 */
+	jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
+	wmb();
+
+	/*
+	 * Setup TX Queue 0 DMA Bass Address
+	 */
+	jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
+	jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
+	jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
+
+	/*
+	 * Setup TX Descptor Count
+	 */
+	jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
+
+	/*
+	 * Enable TX Engine
+	 */
+	wmb();
+	jwrite32(jme, JME_TXCS, jme->reg_txcs |
+				TXCS_SELECT_QUEUE0 |
+				TXCS_ENABLE);
+
+}
+
+static inline void
+jme_restart_tx_engine(struct jme_adapter *jme)
+{
+	/*
+	 * Restart TX Engine
+	 */
+	jwrite32(jme, JME_TXCS, jme->reg_txcs |
+				TXCS_SELECT_QUEUE0 |
+				TXCS_ENABLE);
+}
+
+static inline void
+jme_disable_tx_engine(struct jme_adapter *jme)
+{
+	int i;
+	u32 val;
+
+	/*
+	 * Disable TX Engine
+	 */
+	jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
+	wmb();
+
+	val = jread32(jme, JME_TXCS);
+	for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
+		mdelay(1);
+		val = jread32(jme, JME_TXCS);
+		rmb();
+	}
+
+	if (!i)
+		jeprintk(jme->pdev, "Disable TX engine timeout.\n");
+}
+
+static void
+jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
+{
+	struct jme_ring *rxring = jme->rxring;
+	register struct rxdesc *rxdesc = rxring->desc;
+	struct jme_buffer_info *rxbi = rxring->bufinf;
+	rxdesc += i;
+	rxbi += i;
+
+	rxdesc->dw[0] = 0;
+	rxdesc->dw[1] = 0;
+	rxdesc->desc1.bufaddrh	= cpu_to_le32((__u64)rxbi->mapping >> 32);
+	rxdesc->desc1.bufaddrl	= cpu_to_le32(
+					(__u64)rxbi->mapping & 0xFFFFFFFFUL);
+	rxdesc->desc1.datalen	= cpu_to_le16(rxbi->len);
+	if (jme->dev->features & NETIF_F_HIGHDMA)
+		rxdesc->desc1.flags = RXFLAG_64BIT;
+	wmb();
+	rxdesc->desc1.flags	|= RXFLAG_OWN | RXFLAG_INT;
+}
+
+static int
+jme_make_new_rx_buf(struct jme_adapter *jme, int i)
+{
+	struct jme_ring *rxring = &(jme->rxring[0]);
+	struct jme_buffer_info *rxbi = rxring->bufinf + i;
+	struct sk_buff *skb;
+
+	skb = netdev_alloc_skb(jme->dev,
+		jme->dev->mtu + RX_EXTRA_LEN);
+	if (unlikely(!skb))
+		return -ENOMEM;
+
+	rxbi->skb = skb;
+	rxbi->len = skb_tailroom(skb);
+	rxbi->mapping = pci_map_page(jme->pdev,
+					virt_to_page(skb->data),
+					offset_in_page(skb->data),
+					rxbi->len,
+					PCI_DMA_FROMDEVICE);
+
+	return 0;
+}
+
+static void
+jme_free_rx_buf(struct jme_adapter *jme, int i)
+{
+	struct jme_ring *rxring = &(jme->rxring[0]);
+	struct jme_buffer_info *rxbi = rxring->bufinf;
+	rxbi += i;
+
+	if (rxbi->skb) {
+		pci_unmap_page(jme->pdev,
+				 rxbi->mapping,
+				 rxbi->len,
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(rxbi->skb);
+		rxbi->skb = NULL;
+		rxbi->mapping = 0;
+		rxbi->len = 0;
+	}
+}
+
+static void
+jme_free_rx_resources(struct jme_adapter *jme)
+{
+	int i;
+	struct jme_ring *rxring = &(jme->rxring[0]);
+
+	if (rxring->alloc) {
+		for (i = 0 ; i < jme->rx_ring_size ; ++i)
+			jme_free_rx_buf(jme, i);
+
+		dma_free_coherent(&(jme->pdev->dev),
+				  RX_RING_ALLOC_SIZE(jme->rx_ring_size),
+				  rxring->alloc,
+				  rxring->dmaalloc);
+		rxring->alloc    = NULL;
+		rxring->desc     = NULL;
+		rxring->dmaalloc = 0;
+		rxring->dma      = 0;
+	}
+	rxring->next_to_use   = 0;
+	atomic_set(&rxring->next_to_clean, 0);
+}
+
+static int
+jme_setup_rx_resources(struct jme_adapter *jme)
+{
+	int i;
+	struct jme_ring *rxring = &(jme->rxring[0]);
+
+	rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
+				   RX_RING_ALLOC_SIZE(jme->rx_ring_size),
+				   &(rxring->dmaalloc),
+				   GFP_ATOMIC);
+	if (!rxring->alloc) {
+		rxring->desc = NULL;
+		rxring->dmaalloc = 0;
+		rxring->dma = 0;
+		return -ENOMEM;
+	}
+
+	/*
+	 * 16 Bytes align
+	 */
+	rxring->desc		= (void *)ALIGN((unsigned long)(rxring->alloc),
+						RING_DESC_ALIGN);
+	rxring->dma		= ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
+	rxring->next_to_use	= 0;
+	atomic_set(&rxring->next_to_clean, 0);
+
+	/*
+	 * Initiallize Receive Descriptors
+	 */
+	for (i = 0 ; i < jme->rx_ring_size ; ++i) {
+		if (unlikely(jme_make_new_rx_buf(jme, i))) {
+			jme_free_rx_resources(jme);
+			return -ENOMEM;
+		}
+
+		jme_set_clean_rxdesc(jme, i);
+	}
+
+	return 0;
+}
+
+static inline void
+jme_enable_rx_engine(struct jme_adapter *jme)
+{
+	/*
+	 * Select Queue 0
+	 */
+	jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+				RXCS_QUEUESEL_Q0);
+	wmb();
+
+	/*
+	 * Setup RX DMA Bass Address
+	 */
+	jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
+	jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
+	jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
+
+	/*
+	 * Setup RX Descriptor Count
+	 */
+	jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
+
+	/*
+	 * Setup Unicast Filter
+	 */
+	jme_set_multi(jme->dev);
+
+	/*
+	 * Enable RX Engine
+	 */
+	wmb();
+	jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+				RXCS_QUEUESEL_Q0 |
+				RXCS_ENABLE |
+				RXCS_QST);
+}
+
+static inline void
+jme_restart_rx_engine(struct jme_adapter *jme)
+{
+	/*
+	 * Start RX Engine
+	 */
+	jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+				RXCS_QUEUESEL_Q0 |
+				RXCS_ENABLE |
+				RXCS_QST);
+}
+
+static inline void
+jme_disable_rx_engine(struct jme_adapter *jme)
+{
+	int i;
+	u32 val;
+
+	/*
+	 * Disable RX Engine
+	 */
+	jwrite32(jme, JME_RXCS, jme->reg_rxcs);
+	wmb();
+
+	val = jread32(jme, JME_RXCS);
+	for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
+		mdelay(1);
+		val = jread32(jme, JME_RXCS);
+		rmb();
+	}
+
+	if (!i)
+		jeprintk(jme->pdev, "Disable RX engine timeout.\n");
+
+}
+
+static int
+jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
+{
+	if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
+		return false;
+
+	if (unlikely(!(flags & RXWBFLAG_MF) &&
+	(flags & RXWBFLAG_TCPON) && !(flags & RXWBFLAG_TCPCS))) {
+		msg_rx_err(jme, "TCP Checksum error.\n");
+		goto out_sumerr;
+	}
+
+	if (unlikely(!(flags & RXWBFLAG_MF) &&
+	(flags & RXWBFLAG_UDPON) && !(flags & RXWBFLAG_UDPCS))) {
+		msg_rx_err(jme, "UDP Checksum error.\n");
+		goto out_sumerr;
+	}
+
+	if (unlikely((flags & RXWBFLAG_IPV4) && !(flags & RXWBFLAG_IPCS))) {
+		msg_rx_err(jme, "IPv4 Checksum error.\n");
+		goto out_sumerr;
+	}
+
+	return true;
+
+out_sumerr:
+	return false;
+}
+
+static void
+jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
+{
+	struct jme_ring *rxring = &(jme->rxring[0]);
+	struct rxdesc *rxdesc = rxring->desc;
+	struct jme_buffer_info *rxbi = rxring->bufinf;
+	struct sk_buff *skb;
+	int framesize;
+
+	rxdesc += idx;
+	rxbi += idx;
+
+	skb = rxbi->skb;
+	pci_dma_sync_single_for_cpu(jme->pdev,
+					rxbi->mapping,
+					rxbi->len,
+					PCI_DMA_FROMDEVICE);
+
+	if (unlikely(jme_make_new_rx_buf(jme, idx))) {
+		pci_dma_sync_single_for_device(jme->pdev,
+						rxbi->mapping,
+						rxbi->len,
+						PCI_DMA_FROMDEVICE);
+
+		++(NET_STAT(jme).rx_dropped);
+	} else {
+		framesize = le16_to_cpu(rxdesc->descwb.framesize)
+				- RX_PREPAD_SIZE;
+
+		skb_reserve(skb, RX_PREPAD_SIZE);
+		skb_put(skb, framesize);
+		skb->protocol = eth_type_trans(skb, jme->dev);
+
+		if (jme_rxsum_ok(jme, rxdesc->descwb.flags))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		if (rxdesc->descwb.flags & RXWBFLAG_TAGON) {
+			if (jme->vlgrp) {
+				jme->jme_vlan_rx(skb, jme->vlgrp,
+					le32_to_cpu(rxdesc->descwb.vlan));
+				NET_STAT(jme).rx_bytes += 4;
+			}
+		} else {
+			jme->jme_rx(skb);
+		}
+
+		if ((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
+				RXWBFLAG_DEST_MUL)
+			++(NET_STAT(jme).multicast);
+
+		jme->dev->last_rx = jiffies;
+		NET_STAT(jme).rx_bytes += framesize;
+		++(NET_STAT(jme).rx_packets);
+	}
+
+	jme_set_clean_rxdesc(jme, idx);
+
+}
+
+static int
+jme_process_receive(struct jme_adapter *jme, int limit)
+{
+	struct jme_ring *rxring = &(jme->rxring[0]);
+	struct rxdesc *rxdesc = rxring->desc;
+	int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
+
+	if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
+		goto out_inc;
+
+	if (unlikely(atomic_read(&jme->link_changing) != 1))
+		goto out_inc;
+
+	if (unlikely(!netif_carrier_ok(jme->dev)))
+		goto out_inc;
+
+	i = atomic_read(&rxring->next_to_clean);
+	while (limit-- > 0) {
+		rxdesc = rxring->desc;
+		rxdesc += i;
+
+		if ((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
+		!(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
+			goto out;
+
+		desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
+
+		if (unlikely(desccnt > 1 ||
+		rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
+
+			if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
+				++(NET_STAT(jme).rx_crc_errors);
+			else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
+				++(NET_STAT(jme).rx_fifo_errors);
+			else
+				++(NET_STAT(jme).rx_errors);
+
+			if (desccnt > 1)
+				limit -= desccnt - 1;
+
+			for (j = i, ccnt = desccnt ; ccnt-- ; ) {
+				jme_set_clean_rxdesc(jme, j);
+				j = (j + 1) & (mask);
+			}
+
+		} else {
+			jme_alloc_and_feed_skb(jme, i);
+		}
+
+		i = (i + desccnt) & (mask);
+	}
+
+out:
+	atomic_set(&rxring->next_to_clean, i);
+
+out_inc:
+	atomic_inc(&jme->rx_cleaning);
+
+	return limit > 0 ? limit : 0;
+
+}
+
+static void
+jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
+{
+	if (likely(atmp == dpi->cur)) {
+		dpi->cnt = 0;
+		return;
+	}
+
+	if (dpi->attempt == atmp) {
+		++(dpi->cnt);
+	} else {
+		dpi->attempt = atmp;
+		dpi->cnt = 0;
+	}
+
+}
+
+static void
+jme_dynamic_pcc(struct jme_adapter *jme)
+{
+	register struct dynpcc_info *dpi = &(jme->dpi);
+
+	if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
+		jme_attempt_pcc(dpi, PCC_P3);
+	else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
+	|| dpi->intr_cnt > PCC_INTR_THRESHOLD)
+		jme_attempt_pcc(dpi, PCC_P2);
+	else
+		jme_attempt_pcc(dpi, PCC_P1);
+
+	if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
+		if (dpi->attempt < dpi->cur)
+			tasklet_schedule(&jme->rxclean_task);
+		jme_set_rx_pcc(jme, dpi->attempt);
+		dpi->cur = dpi->attempt;
+		dpi->cnt = 0;
+	}
+}
+
+static void
+jme_start_pcc_timer(struct jme_adapter *jme)
+{
+	struct dynpcc_info *dpi = &(jme->dpi);
+	dpi->last_bytes		= NET_STAT(jme).rx_bytes;
+	dpi->last_pkts		= NET_STAT(jme).rx_packets;
+	dpi->intr_cnt		= 0;
+	jwrite32(jme, JME_TMCSR,
+		TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
+}
+
+static inline void
+jme_stop_pcc_timer(struct jme_adapter *jme)
+{
+	jwrite32(jme, JME_TMCSR, 0);
+}
+
+static void
+jme_shutdown_nic(struct jme_adapter *jme)
+{
+	u32 phylink;
+
+	phylink = jme_linkstat_from_phy(jme);
+
+	if (!(phylink & PHY_LINK_UP)) {
+		/*
+		 * Disable all interrupt before issue timer
+		 */
+		jme_stop_irq(jme);
+		jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
+	}
+}
+
+static void
+jme_pcc_tasklet(unsigned long arg)
+{
+	struct jme_adapter *jme = (struct jme_adapter *)arg;
+	struct net_device *netdev = jme->dev;
+
+	if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
+		jme_shutdown_nic(jme);
+		return;
+	}
+
+	if (unlikely(!netif_carrier_ok(netdev) ||
+		(atomic_read(&jme->link_changing) != 1)
+	)) {
+		jme_stop_pcc_timer(jme);
+		return;
+	}
+
+	if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
+		jme_dynamic_pcc(jme);
+
+	jme_start_pcc_timer(jme);
+}
+
+static inline void
+jme_polling_mode(struct jme_adapter *jme)
+{
+	jme_set_rx_pcc(jme, PCC_OFF);
+}
+
+static inline void
+jme_interrupt_mode(struct jme_adapter *jme)
+{
+	jme_set_rx_pcc(jme, PCC_P1);
+}
+
+static inline int
+jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
+{
+	u32 apmc;
+	apmc = jread32(jme, JME_APMC);
+	return apmc & JME_APMC_PSEUDO_HP_EN;
+}
+
+static void
+jme_start_shutdown_timer(struct jme_adapter *jme)
+{
+	u32 apmc;
+
+	apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
+	apmc &= ~JME_APMC_EPIEN_CTRL;
+	if (!no_extplug) {
+		jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
+		wmb();
+	}
+	jwrite32f(jme, JME_APMC, apmc);
+
+	jwrite32f(jme, JME_TIMER2, 0);
+	set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
+	jwrite32(jme, JME_TMCSR,
+		TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
+}
+
+static void
+jme_stop_shutdown_timer(struct jme_adapter *jme)
+{
+	u32 apmc;
+
+	jwrite32f(jme, JME_TMCSR, 0);
+	jwrite32f(jme, JME_TIMER2, 0);
+	clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
+
+	apmc = jread32(jme, JME_APMC);
+	apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
+	jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
+	wmb();
+	jwrite32f(jme, JME_APMC, apmc);
+}
+
+static void
+jme_link_change_tasklet(unsigned long arg)
+{
+	struct jme_adapter *jme = (struct jme_adapter *)arg;
+	struct net_device *netdev = jme->dev;
+	int rc;
+
+	while (!atomic_dec_and_test(&jme->link_changing)) {
+		atomic_inc(&jme->link_changing);
+		msg_intr(jme, "Get link change lock failed.\n");
+		while (atomic_read(&jme->link_changing) != 1)
+			msg_intr(jme, "Waiting link change lock.\n");
+	}
+
+	if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
+		goto out;
+
+	jme->old_mtu = netdev->mtu;
+	netif_stop_queue(netdev);
+	if (jme_pseudo_hotplug_enabled(jme))
+		jme_stop_shutdown_timer(jme);
+
+	jme_stop_pcc_timer(jme);
+	tasklet_disable(&jme->txclean_task);
+	tasklet_disable(&jme->rxclean_task);
+	tasklet_disable(&jme->rxempty_task);
+
+	if (netif_carrier_ok(netdev)) {
+		jme_reset_ghc_speed(jme);
+		jme_disable_rx_engine(jme);
+		jme_disable_tx_engine(jme);
+		jme_reset_mac_processor(jme);
+		jme_free_rx_resources(jme);
+		jme_free_tx_resources(jme);
+
+		if (test_bit(JME_FLAG_POLL, &jme->flags))
+			jme_polling_mode(jme);
+
+		netif_carrier_off(netdev);
+	}
+
+	jme_check_link(netdev, 0);
+	if (netif_carrier_ok(netdev)) {
+		rc = jme_setup_rx_resources(jme);
+		if (rc) {
+			jeprintk(jme->pdev, "Allocating resources for RX error"
+				", Device STOPPED!\n");
+			goto out_enable_tasklet;
+		}
+
+		rc = jme_setup_tx_resources(jme);
+		if (rc) {
+			jeprintk(jme->pdev, "Allocating resources for TX error"
+				", Device STOPPED!\n");
+			goto err_out_free_rx_resources;
+		}
+
+		jme_enable_rx_engine(jme);
+		jme_enable_tx_engine(jme);
+
+		netif_start_queue(netdev);
+
+		if (test_bit(JME_FLAG_POLL, &jme->flags))
+			jme_interrupt_mode(jme);
+
+		jme_start_pcc_timer(jme);
+	} else if (jme_pseudo_hotplug_enabled(jme)) {
+		jme_start_shutdown_timer(jme);
+	}
+
+	goto out_enable_tasklet;
+
+err_out_free_rx_resources:
+	jme_free_rx_resources(jme);
+out_enable_tasklet:
+	tasklet_enable(&jme->txclean_task);
+	tasklet_hi_enable(&jme->rxclean_task);
+	tasklet_hi_enable(&jme->rxempty_task);
+out:
+	atomic_inc(&jme->link_changing);
+}
+
+static void
+jme_rx_clean_tasklet(unsigned long arg)
+{
+	struct jme_adapter *jme = (struct jme_adapter *)arg;
+	struct dynpcc_info *dpi = &(jme->dpi);
+
+	jme_process_receive(jme, jme->rx_ring_size);
+	++(dpi->intr_cnt);
+
+}
+
+static int
+jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
+{
+	struct jme_adapter *jme = jme_napi_priv(holder);
+	struct net_device *netdev = jme->dev;
+	int rest;
+
+	rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
+
+	while (atomic_read(&jme->rx_empty) > 0) {
+		atomic_dec(&jme->rx_empty);
+		++(NET_STAT(jme).rx_dropped);
+		jme_restart_rx_engine(jme);
+	}
+	atomic_inc(&jme->rx_empty);
+
+	if (rest) {
+		JME_RX_COMPLETE(netdev, holder);
+		jme_interrupt_mode(jme);
+	}
+
+	JME_NAPI_WEIGHT_SET(budget, rest);
+	return JME_NAPI_WEIGHT_VAL(budget) - rest;
+}
+
+static void
+jme_rx_empty_tasklet(unsigned long arg)
+{
+	struct jme_adapter *jme = (struct jme_adapter *)arg;
+
+	if (unlikely(atomic_read(&jme->link_changing) != 1))
+		return;
+
+	if (unlikely(!netif_carrier_ok(jme->dev)))
+		return;
+
+	msg_rx_status(jme, "RX Queue Full!\n");
+
+	jme_rx_clean_tasklet(arg);
+
+	while (atomic_read(&jme->rx_empty) > 0) {
+		atomic_dec(&jme->rx_empty);
+		++(NET_STAT(jme).rx_dropped);
+		jme_restart_rx_engine(jme);
+	}
+	atomic_inc(&jme->rx_empty);
+}
+
+static void
+jme_wake_queue_if_stopped(struct jme_adapter *jme)
+{
+	struct jme_ring *txring = jme->txring;
+
+	smp_wmb();
+	if (unlikely(netif_queue_stopped(jme->dev) &&
+	atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
+		msg_tx_done(jme, "TX Queue Waked.\n");
+		netif_wake_queue(jme->dev);
+	}
+
+}
+
+static void
+jme_tx_clean_tasklet(unsigned long arg)
+{
+	struct jme_adapter *jme = (struct jme_adapter *)arg;
+	struct jme_ring *txring = &(jme->txring[0]);
+	struct txdesc *txdesc = txring->desc;
+	struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
+	int i, j, cnt = 0, max, err, mask;
+
+	tx_dbg(jme, "Into txclean.\n");
+
+	if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
+		goto out;
+
+	if (unlikely(atomic_read(&jme->link_changing) != 1))
+		goto out;
+
+	if (unlikely(!netif_carrier_ok(jme->dev)))
+		goto out;
+
+	max = jme->tx_ring_size - atomic_read(&txring->nr_free);
+	mask = jme->tx_ring_mask;
+
+	for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
+
+		ctxbi = txbi + i;
+
+		if (likely(ctxbi->skb &&
+		!(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
+
+			tx_dbg(jme, "txclean: %d+%d@%lu\n",
+					i, ctxbi->nr_desc, jiffies);
+
+			err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
+
+			for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
+				ttxbi = txbi + ((i + j) & (mask));
+				txdesc[(i + j) & (mask)].dw[0] = 0;
+
+				pci_unmap_page(jme->pdev,
+						 ttxbi->mapping,
+						 ttxbi->len,
+						 PCI_DMA_TODEVICE);
+
+				ttxbi->mapping = 0;
+				ttxbi->len = 0;
+			}
+
+			dev_kfree_skb(ctxbi->skb);
+
+			cnt += ctxbi->nr_desc;
+
+			if (unlikely(err)) {
+				++(NET_STAT(jme).tx_carrier_errors);
+			} else {
+				++(NET_STAT(jme).tx_packets);
+				NET_STAT(jme).tx_bytes += ctxbi->len;
+			}
+
+			ctxbi->skb = NULL;
+			ctxbi->len = 0;
+			ctxbi->start_xmit = 0;
+
+		} else {
+			break;
+		}
+
+		i = (i + ctxbi->nr_desc) & mask;
+
+		ctxbi->nr_desc = 0;
+	}
+
+	tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies);
+	atomic_set(&txring->next_to_clean, i);
+	atomic_add(cnt, &txring->nr_free);
+
+	jme_wake_queue_if_stopped(jme);
+
+out:
+	atomic_inc(&jme->tx_cleaning);
+}
+
+static void
+jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
+{
+	/*
+	 * Disable interrupt
+	 */
+	jwrite32f(jme, JME_IENC, INTR_ENABLE);
+
+	if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
+		/*
+		 * Link change event is critical
+		 * all other events are ignored
+		 */
+		jwrite32(jme, JME_IEVE, intrstat);
+		tasklet_schedule(&jme->linkch_task);
+		goto out_reenable;
+	}
+
+	if (intrstat & INTR_TMINTR) {
+		jwrite32(jme, JME_IEVE, INTR_TMINTR);
+		tasklet_schedule(&jme->pcc_task);
+	}
+
+	if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
+		jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
+		tasklet_schedule(&jme->txclean_task);
+	}
+
+	if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
+		jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
+						     INTR_PCCRX0 |
+						     INTR_RX0EMP)) |
+					INTR_RX0);
+	}
+
+	if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+		if (intrstat & INTR_RX0EMP)
+			atomic_inc(&jme->rx_empty);
+
+		if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
+			if (likely(JME_RX_SCHEDULE_PREP(jme))) {
+				jme_polling_mode(jme);
+				JME_RX_SCHEDULE(jme);
+			}
+		}
+	} else {
+		if (intrstat & INTR_RX0EMP) {
+			atomic_inc(&jme->rx_empty);
+			tasklet_hi_schedule(&jme->rxempty_task);
+		} else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
+			tasklet_hi_schedule(&jme->rxclean_task);
+		}
+	}
+
+out_reenable:
+	/*
+	 * Re-enable interrupt
+	 */
+	jwrite32f(jme, JME_IENS, INTR_ENABLE);
+}
+
+static irqreturn_t
+jme_intr(int irq, void *dev_id)
+{
+	struct net_device *netdev = dev_id;
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 intrstat;
+
+	intrstat = jread32(jme, JME_IEVE);
+
+	/*
+	 * Check if it's really an interrupt for us
+	 */
+	if (unlikely(intrstat == 0))
+		return IRQ_NONE;
+
+	/*
+	 * Check if the device still exist
+	 */
+	if (unlikely(intrstat == ~((typeof(intrstat))0)))
+		return IRQ_NONE;
+
+	jme_intr_msi(jme, intrstat);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+jme_msi(int irq, void *dev_id)
+{
+	struct net_device *netdev = dev_id;
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 intrstat;
+
+	pci_dma_sync_single_for_cpu(jme->pdev,
+				    jme->shadow_dma,
+				    sizeof(u32) * SHADOW_REG_NR,
+				    PCI_DMA_FROMDEVICE);
+	intrstat = jme->shadow_regs[SHADOW_IEVE];
+	jme->shadow_regs[SHADOW_IEVE] = 0;
+
+	jme_intr_msi(jme, intrstat);
+
+	return IRQ_HANDLED;
+}
+
+static void
+jme_reset_link(struct jme_adapter *jme)
+{
+	jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
+}
+
+static void
+jme_restart_an(struct jme_adapter *jme)
+{
+	u32 bmcr;
+
+	spin_lock_bh(&jme->phy_lock);
+	bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+	bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+	jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+	spin_unlock_bh(&jme->phy_lock);
+}
+
+static int
+jme_request_irq(struct jme_adapter *jme)
+{
+	int rc;
+	struct net_device *netdev = jme->dev;
+	irq_handler_t handler = jme_intr;
+	int irq_flags = IRQF_SHARED;
+
+	if (!pci_enable_msi(jme->pdev)) {
+		set_bit(JME_FLAG_MSI, &jme->flags);
+		handler = jme_msi;
+		irq_flags = 0;
+	}
+
+	rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
+			  netdev);
+	if (rc) {
+		jeprintk(jme->pdev,
+			"Unable to request %s interrupt (return: %d)\n",
+			test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
+			rc);
+
+		if (test_bit(JME_FLAG_MSI, &jme->flags)) {
+			pci_disable_msi(jme->pdev);
+			clear_bit(JME_FLAG_MSI, &jme->flags);
+		}
+	} else {
+		netdev->irq = jme->pdev->irq;
+	}
+
+	return rc;
+}
+
+static void
+jme_free_irq(struct jme_adapter *jme)
+{
+	free_irq(jme->pdev->irq, jme->dev);
+	if (test_bit(JME_FLAG_MSI, &jme->flags)) {
+		pci_disable_msi(jme->pdev);
+		clear_bit(JME_FLAG_MSI, &jme->flags);
+		jme->dev->irq = jme->pdev->irq;
+	}
+}
+
+static int
+jme_open(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int rc;
+
+	jme_clear_pm(jme);
+	JME_NAPI_ENABLE(jme);
+
+	tasklet_enable(&jme->txclean_task);
+	tasklet_hi_enable(&jme->rxclean_task);
+	tasklet_hi_enable(&jme->rxempty_task);
+
+	rc = jme_request_irq(jme);
+	if (rc)
+		goto err_out;
+
+	jme_enable_shadow(jme);
+	jme_start_irq(jme);
+
+	if (test_bit(JME_FLAG_SSET, &jme->flags))
+		jme_set_settings(netdev, &jme->old_ecmd);
+	else
+		jme_reset_phy_processor(jme);
+
+	jme_reset_link(jme);
+
+	return 0;
+
+err_out:
+	netif_stop_queue(netdev);
+	netif_carrier_off(netdev);
+	return rc;
+}
+
+static void
+jme_set_100m_half(struct jme_adapter *jme)
+{
+	u32 bmcr, tmp;
+
+	bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+	tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
+		       BMCR_SPEED1000 | BMCR_FULLDPLX);
+	tmp |= BMCR_SPEED100;
+
+	if (bmcr != tmp)
+		jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
+
+	if (jme->fpgaver)
+		jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
+	else
+		jwrite32(jme, JME_GHC, GHC_SPEED_100M);
+}
+
+#define JME_WAIT_LINK_TIME 2000 /* 2000ms */
+static void
+jme_wait_link(struct jme_adapter *jme)
+{
+	u32 phylink, to = JME_WAIT_LINK_TIME;
+
+	mdelay(1000);
+	phylink = jme_linkstat_from_phy(jme);
+	while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
+		mdelay(10);
+		phylink = jme_linkstat_from_phy(jme);
+	}
+}
+
+static inline void
+jme_phy_off(struct jme_adapter *jme)
+{
+	jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
+}
+
+static int
+jme_close(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	netif_stop_queue(netdev);
+	netif_carrier_off(netdev);
+
+	jme_stop_irq(jme);
+	jme_disable_shadow(jme);
+	jme_free_irq(jme);
+
+	JME_NAPI_DISABLE(jme);
+
+	tasklet_kill(&jme->linkch_task);
+	tasklet_kill(&jme->txclean_task);
+	tasklet_kill(&jme->rxclean_task);
+	tasklet_kill(&jme->rxempty_task);
+
+	jme_reset_ghc_speed(jme);
+	jme_disable_rx_engine(jme);
+	jme_disable_tx_engine(jme);
+	jme_reset_mac_processor(jme);
+	jme_free_rx_resources(jme);
+	jme_free_tx_resources(jme);
+	jme->phylink = 0;
+	jme_phy_off(jme);
+
+	return 0;
+}
+
+static int
+jme_alloc_txdesc(struct jme_adapter *jme,
+			struct sk_buff *skb)
+{
+	struct jme_ring *txring = jme->txring;
+	int idx, nr_alloc, mask = jme->tx_ring_mask;
+
+	idx = txring->next_to_use;
+	nr_alloc = skb_shinfo(skb)->nr_frags + 2;
+
+	if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
+		return -1;
+
+	atomic_sub(nr_alloc, &txring->nr_free);
+
+	txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
+
+	return idx;
+}
+
+static void
+jme_fill_tx_map(struct pci_dev *pdev,
+		struct txdesc *txdesc,
+		struct jme_buffer_info *txbi,
+		struct page *page,
+		u32 page_offset,
+		u32 len,
+		u8 hidma)
+{
+	dma_addr_t dmaaddr;
+
+	dmaaddr = pci_map_page(pdev,
+				page,
+				page_offset,
+				len,
+				PCI_DMA_TODEVICE);
+
+	pci_dma_sync_single_for_device(pdev,
+				       dmaaddr,
+				       len,
+				       PCI_DMA_TODEVICE);
+
+	txdesc->dw[0] = 0;
+	txdesc->dw[1] = 0;
+	txdesc->desc2.flags	= TXFLAG_OWN;
+	txdesc->desc2.flags	|= (hidma) ? TXFLAG_64BIT : 0;
+	txdesc->desc2.datalen	= cpu_to_le16(len);
+	txdesc->desc2.bufaddrh	= cpu_to_le32((__u64)dmaaddr >> 32);
+	txdesc->desc2.bufaddrl	= cpu_to_le32(
+					(__u64)dmaaddr & 0xFFFFFFFFUL);
+
+	txbi->mapping = dmaaddr;
+	txbi->len = len;
+}
+
+static void
+jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
+{
+	struct jme_ring *txring = jme->txring;
+	struct txdesc *txdesc = txring->desc, *ctxdesc;
+	struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+	u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
+	int i, nr_frags = skb_shinfo(skb)->nr_frags;
+	int mask = jme->tx_ring_mask;
+	struct skb_frag_struct *frag;
+	u32 len;
+
+	for (i = 0 ; i < nr_frags ; ++i) {
+		frag = &skb_shinfo(skb)->frags[i];
+		ctxdesc = txdesc + ((idx + i + 2) & (mask));
+		ctxbi = txbi + ((idx + i + 2) & (mask));
+
+		jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
+				 frag->page_offset, frag->size, hidma);
+	}
+
+	len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
+	ctxdesc = txdesc + ((idx + 1) & (mask));
+	ctxbi = txbi + ((idx + 1) & (mask));
+	jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+			offset_in_page(skb->data), len, hidma);
+
+}
+
+static int
+jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
+{
+	if (unlikely(skb_shinfo(skb)->gso_size &&
+			skb_header_cloned(skb) &&
+			pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
+		dev_kfree_skb(skb);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+jme_tx_tso(struct sk_buff *skb,
+		u16 *mss, u8 *flags)
+{
+	*mss = skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT;
+	if (*mss) {
+		*flags |= TXFLAG_LSEN;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *iph = ip_hdr(skb);
+
+			iph->check = 0;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								iph->daddr, 0,
+								IPPROTO_TCP,
+								0);
+		} else {
+			struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+			tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
+								&ip6h->daddr, 0,
+								IPPROTO_TCP,
+								0);
+		}
+
+		return 0;
+	}
+
+	return 1;
+}
+
+static void
+jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
+{
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 ip_proto;
+
+		switch (skb->protocol) {
+		case htons(ETH_P_IP):
+			ip_proto = ip_hdr(skb)->protocol;
+			break;
+		case htons(ETH_P_IPV6):
+			ip_proto = ipv6_hdr(skb)->nexthdr;
+			break;
+		default:
+			ip_proto = 0;
+			break;
+		}
+
+		switch (ip_proto) {
+		case IPPROTO_TCP:
+			*flags |= TXFLAG_TCPCS;
+			break;
+		case IPPROTO_UDP:
+			*flags |= TXFLAG_UDPCS;
+			break;
+		default:
+			msg_tx_err(jme, "Error upper layer protocol.\n");
+			break;
+		}
+	}
+}
+
+static inline void
+jme_tx_vlan(struct sk_buff *skb, u16 *vlan, u8 *flags)
+{
+	if (vlan_tx_tag_present(skb)) {
+		*flags |= TXFLAG_TAGON;
+		*vlan = vlan_tx_tag_get(skb);
+	}
+}
+
+static int
+jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
+{
+	struct jme_ring *txring = jme->txring;
+	struct txdesc *txdesc;
+	struct jme_buffer_info *txbi;
+	u8 flags;
+
+	txdesc = (struct txdesc *)txring->desc + idx;
+	txbi = txring->bufinf + idx;
+
+	txdesc->dw[0] = 0;
+	txdesc->dw[1] = 0;
+	txdesc->dw[2] = 0;
+	txdesc->dw[3] = 0;
+	txdesc->desc1.pktsize = cpu_to_le16(skb->len);
+	/*
+	 * Set OWN bit at final.
+	 * When kernel transmit faster than NIC.
+	 * And NIC trying to send this descriptor before we tell
+	 * it to start sending this TX queue.
+	 * Other fields are already filled correctly.
+	 */
+	wmb();
+	flags = TXFLAG_OWN | TXFLAG_INT;
+	/*
+	 * Set checksum flags while not tso
+	 */
+	if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
+		jme_tx_csum(jme, skb, &flags);
+	jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
+	txdesc->desc1.flags = flags;
+	/*
+	 * Set tx buffer info after telling NIC to send
+	 * For better tx_clean timing
+	 */
+	wmb();
+	txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
+	txbi->skb = skb;
+	txbi->len = skb->len;
+	txbi->start_xmit = jiffies;
+	if (!txbi->start_xmit)
+		txbi->start_xmit = (0UL-1);
+
+	return 0;
+}
+
+static void
+jme_stop_queue_if_full(struct jme_adapter *jme)
+{
+	struct jme_ring *txring = jme->txring;
+	struct jme_buffer_info *txbi = txring->bufinf;
+	int idx = atomic_read(&txring->next_to_clean);
+
+	txbi += idx;
+
+	smp_wmb();
+	if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
+		netif_stop_queue(jme->dev);
+		msg_tx_queued(jme, "TX Queue Paused.\n");
+		smp_wmb();
+		if (atomic_read(&txring->nr_free)
+			>= (jme->tx_wake_threshold)) {
+			netif_wake_queue(jme->dev);
+			msg_tx_queued(jme, "TX Queue Fast Waked.\n");
+		}
+	}
+
+	if (unlikely(txbi->start_xmit &&
+			(jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
+			txbi->skb)) {
+		netif_stop_queue(jme->dev);
+		msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
+	}
+}
+
+/*
+ * This function is already protected by netif_tx_lock()
+ */
+
+static int
+jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int idx;
+
+	if (unlikely(jme_expand_header(jme, skb))) {
+		++(NET_STAT(jme).tx_dropped);
+		return NETDEV_TX_OK;
+	}
+
+	idx = jme_alloc_txdesc(jme, skb);
+
+	if (unlikely(idx < 0)) {
+		netif_stop_queue(netdev);
+		msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n");
+
+		return NETDEV_TX_BUSY;
+	}
+
+	jme_map_tx_skb(jme, skb, idx);
+	jme_fill_first_tx_desc(jme, skb, idx);
+
+	jwrite32(jme, JME_TXCS, jme->reg_txcs |
+				TXCS_SELECT_QUEUE0 |
+				TXCS_QUEUE0S |
+				TXCS_ENABLE);
+	netdev->trans_start = jiffies;
+
+	tx_dbg(jme, "xmit: %d+%d@%lu\n", idx,
+			skb_shinfo(skb)->nr_frags + 2,
+			jiffies);
+	jme_stop_queue_if_full(jme);
+
+	return NETDEV_TX_OK;
+}
+
+static int
+jme_set_macaddr(struct net_device *netdev, void *p)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+	u32 val;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	spin_lock_bh(&jme->macaddr_lock);
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+	val = (addr->sa_data[3] & 0xff) << 24 |
+	      (addr->sa_data[2] & 0xff) << 16 |
+	      (addr->sa_data[1] & 0xff) <<  8 |
+	      (addr->sa_data[0] & 0xff);
+	jwrite32(jme, JME_RXUMA_LO, val);
+	val = (addr->sa_data[5] & 0xff) << 8 |
+	      (addr->sa_data[4] & 0xff);
+	jwrite32(jme, JME_RXUMA_HI, val);
+	spin_unlock_bh(&jme->macaddr_lock);
+
+	return 0;
+}
+
+static void
+jme_set_multi(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 mc_hash[2] = {};
+	int i;
+
+	spin_lock_bh(&jme->rxmcs_lock);
+
+	jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
+
+	if (netdev->flags & IFF_PROMISC) {
+		jme->reg_rxmcs |= RXMCS_ALLFRAME;
+	} else if (netdev->flags & IFF_ALLMULTI) {
+		jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
+	} else if (netdev->flags & IFF_MULTICAST) {
+		struct dev_mc_list *mclist;
+		int bit_nr;
+
+		jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
+		for (i = 0, mclist = netdev->mc_list;
+			mclist && i < netdev->mc_count;
+			++i, mclist = mclist->next) {
+
+			bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
+			mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
+		}
+
+		jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
+		jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
+	}
+
+	wmb();
+	jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+
+	spin_unlock_bh(&jme->rxmcs_lock);
+}
+
+static int
+jme_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	if (new_mtu == jme->old_mtu)
+		return 0;
+
+	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
+		((new_mtu) < IPV6_MIN_MTU))
+		return -EINVAL;
+
+	if (new_mtu > 4000) {
+		jme->reg_rxcs &= ~RXCS_FIFOTHNP;
+		jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
+		jme_restart_rx_engine(jme);
+	} else {
+		jme->reg_rxcs &= ~RXCS_FIFOTHNP;
+		jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
+		jme_restart_rx_engine(jme);
+	}
+
+	if (new_mtu > 1900) {
+		netdev->features &= ~(NETIF_F_HW_CSUM |
+				NETIF_F_TSO |
+				NETIF_F_TSO6);
+	} else {
+		if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
+			netdev->features |= NETIF_F_HW_CSUM;
+		if (test_bit(JME_FLAG_TSO, &jme->flags))
+			netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+	}
+
+	netdev->mtu = new_mtu;
+	jme_reset_link(jme);
+
+	return 0;
+}
+
+static void
+jme_tx_timeout(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	jme->phylink = 0;
+	jme_reset_phy_processor(jme);
+	if (test_bit(JME_FLAG_SSET, &jme->flags))
+		jme_set_settings(netdev, &jme->old_ecmd);
+
+	/*
+	 * Force to Reset the link again
+	 */
+	jme_reset_link(jme);
+}
+
+static void
+jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	jme->vlgrp = grp;
+}
+
+static void
+jme_get_drvinfo(struct net_device *netdev,
+		     struct ethtool_drvinfo *info)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	strcpy(info->bus_info, pci_name(jme->pdev));
+}
+
+static int
+jme_get_regs_len(struct net_device *netdev)
+{
+	return JME_REG_LEN;
+}
+
+static void
+mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
+{
+	int i;
+
+	for (i = 0 ; i < len ; i += 4)
+		p[i >> 2] = jread32(jme, reg + i);
+}
+
+static void
+mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
+{
+	int i;
+	u16 *p16 = (u16 *)p;
+
+	for (i = 0 ; i < reg_nr ; ++i)
+		p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
+}
+
+static void
+jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 *p32 = (u32 *)p;
+
+	memset(p, 0xFF, JME_REG_LEN);
+
+	regs->version = 1;
+	mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
+
+	p32 += 0x100 >> 2;
+	mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
+
+	p32 += 0x100 >> 2;
+	mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
+
+	p32 += 0x100 >> 2;
+	mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
+
+	p32 += 0x100 >> 2;
+	mdio_memcpy(jme, p32, JME_PHY_REG_NR);
+}
+
+static int
+jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	ecmd->tx_coalesce_usecs = PCC_TX_TO;
+	ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
+
+	if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+		ecmd->use_adaptive_rx_coalesce = false;
+		ecmd->rx_coalesce_usecs = 0;
+		ecmd->rx_max_coalesced_frames = 0;
+		return 0;
+	}
+
+	ecmd->use_adaptive_rx_coalesce = true;
+
+	switch (jme->dpi.cur) {
+	case PCC_P1:
+		ecmd->rx_coalesce_usecs = PCC_P1_TO;
+		ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
+		break;
+	case PCC_P2:
+		ecmd->rx_coalesce_usecs = PCC_P2_TO;
+		ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
+		break;
+	case PCC_P3:
+		ecmd->rx_coalesce_usecs = PCC_P3_TO;
+		ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int
+jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	struct dynpcc_info *dpi = &(jme->dpi);
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	if (ecmd->use_adaptive_rx_coalesce
+	&& test_bit(JME_FLAG_POLL, &jme->flags)) {
+		clear_bit(JME_FLAG_POLL, &jme->flags);
+		jme->jme_rx = netif_rx;
+		jme->jme_vlan_rx = vlan_hwaccel_rx;
+		dpi->cur		= PCC_P1;
+		dpi->attempt		= PCC_P1;
+		dpi->cnt		= 0;
+		jme_set_rx_pcc(jme, PCC_P1);
+		jme_interrupt_mode(jme);
+	} else if (!(ecmd->use_adaptive_rx_coalesce)
+	&& !(test_bit(JME_FLAG_POLL, &jme->flags))) {
+		set_bit(JME_FLAG_POLL, &jme->flags);
+		jme->jme_rx = netif_receive_skb;
+		jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
+		jme_interrupt_mode(jme);
+	}
+
+	return 0;
+}
+
+static void
+jme_get_pauseparam(struct net_device *netdev,
+			struct ethtool_pauseparam *ecmd)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 val;
+
+	ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
+	ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
+
+	spin_lock_bh(&jme->phy_lock);
+	val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
+	spin_unlock_bh(&jme->phy_lock);
+
+	ecmd->autoneg =
+		(val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
+}
+
+static int
+jme_set_pauseparam(struct net_device *netdev,
+			struct ethtool_pauseparam *ecmd)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 val;
+
+	if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
+		(ecmd->tx_pause != 0)) {
+
+		if (ecmd->tx_pause)
+			jme->reg_txpfc |= TXPFC_PF_EN;
+		else
+			jme->reg_txpfc &= ~TXPFC_PF_EN;
+
+		jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
+	}
+
+	spin_lock_bh(&jme->rxmcs_lock);
+	if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
+		(ecmd->rx_pause != 0)) {
+
+		if (ecmd->rx_pause)
+			jme->reg_rxmcs |= RXMCS_FLOWCTRL;
+		else
+			jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
+
+		jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+	}
+	spin_unlock_bh(&jme->rxmcs_lock);
+
+	spin_lock_bh(&jme->phy_lock);
+	val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
+	if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
+		(ecmd->autoneg != 0)) {
+
+		if (ecmd->autoneg)
+			val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+		else
+			val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+		jme_mdio_write(jme->dev, jme->mii_if.phy_id,
+				MII_ADVERTISE, val);
+	}
+	spin_unlock_bh(&jme->phy_lock);
+
+	return 0;
+}
+
+static void
+jme_get_wol(struct net_device *netdev,
+		struct ethtool_wolinfo *wol)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	wol->supported = WAKE_MAGIC | WAKE_PHY;
+
+	wol->wolopts = 0;
+
+	if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+		wol->wolopts |= WAKE_PHY;
+
+	if (jme->reg_pmcs & PMCS_MFEN)
+		wol->wolopts |= WAKE_MAGIC;
+
+}
+
+static int
+jme_set_wol(struct net_device *netdev,
+		struct ethtool_wolinfo *wol)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	if (wol->wolopts & (WAKE_MAGICSECURE |
+				WAKE_UCAST |
+				WAKE_MCAST |
+				WAKE_BCAST |
+				WAKE_ARP))
+		return -EOPNOTSUPP;
+
+	jme->reg_pmcs = 0;
+
+	if (wol->wolopts & WAKE_PHY)
+		jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
+
+	if (wol->wolopts & WAKE_MAGIC)
+		jme->reg_pmcs |= PMCS_MFEN;
+
+	jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+
+	return 0;
+}
+
+static int
+jme_get_settings(struct net_device *netdev,
+		     struct ethtool_cmd *ecmd)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int rc;
+
+	spin_lock_bh(&jme->phy_lock);
+	rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
+	spin_unlock_bh(&jme->phy_lock);
+	return rc;
+}
+
+static int
+jme_set_settings(struct net_device *netdev,
+		     struct ethtool_cmd *ecmd)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int rc, fdc = 0;
+
+	if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
+		return -EINVAL;
+
+	if (jme->mii_if.force_media &&
+	ecmd->autoneg != AUTONEG_ENABLE &&
+	(jme->mii_if.full_duplex != ecmd->duplex))
+		fdc = 1;
+
+	spin_lock_bh(&jme->phy_lock);
+	rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
+	spin_unlock_bh(&jme->phy_lock);
+
+	if (!rc && fdc)
+		jme_reset_link(jme);
+
+	if (!rc) {
+		set_bit(JME_FLAG_SSET, &jme->flags);
+		jme->old_ecmd = *ecmd;
+	}
+
+	return rc;
+}
+
+static u32
+jme_get_link(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
+}
+
+static u32
+jme_get_msglevel(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	return jme->msg_enable;
+}
+
+static void
+jme_set_msglevel(struct net_device *netdev, u32 value)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	jme->msg_enable = value;
+}
+
+static u32
+jme_get_rx_csum(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	return jme->reg_rxmcs & RXMCS_CHECKSUM;
+}
+
+static int
+jme_set_rx_csum(struct net_device *netdev, u32 on)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	spin_lock_bh(&jme->rxmcs_lock);
+	if (on)
+		jme->reg_rxmcs |= RXMCS_CHECKSUM;
+	else
+		jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
+	jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+	spin_unlock_bh(&jme->rxmcs_lock);
+
+	return 0;
+}
+
+static int
+jme_set_tx_csum(struct net_device *netdev, u32 on)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	if (on) {
+		set_bit(JME_FLAG_TXCSUM, &jme->flags);
+		if (netdev->mtu <= 1900)
+			netdev->features |= NETIF_F_HW_CSUM;
+	} else {
+		clear_bit(JME_FLAG_TXCSUM, &jme->flags);
+		netdev->features &= ~NETIF_F_HW_CSUM;
+	}
+
+	return 0;
+}
+
+static int
+jme_set_tso(struct net_device *netdev, u32 on)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	if (on) {
+		set_bit(JME_FLAG_TSO, &jme->flags);
+		if (netdev->mtu <= 1900)
+			netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+	} else {
+		clear_bit(JME_FLAG_TSO, &jme->flags);
+		netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+	}
+
+	return 0;
+}
+
+static int
+jme_nway_reset(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	jme_restart_an(jme);
+	return 0;
+}
+
+static u8
+jme_smb_read(struct jme_adapter *jme, unsigned int addr)
+{
+	u32 val;
+	int to;
+
+	val = jread32(jme, JME_SMBCSR);
+	to = JME_SMB_BUSY_TIMEOUT;
+	while ((val & SMBCSR_BUSY) && --to) {
+		msleep(1);
+		val = jread32(jme, JME_SMBCSR);
+	}
+	if (!to) {
+		msg_hw(jme, "SMB Bus Busy.\n");
+		return 0xFF;
+	}
+
+	jwrite32(jme, JME_SMBINTF,
+		((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
+		SMBINTF_HWRWN_READ |
+		SMBINTF_HWCMD);
+
+	val = jread32(jme, JME_SMBINTF);
+	to = JME_SMB_BUSY_TIMEOUT;
+	while ((val & SMBINTF_HWCMD) && --to) {
+		msleep(1);
+		val = jread32(jme, JME_SMBINTF);
+	}
+	if (!to) {
+		msg_hw(jme, "SMB Bus Busy.\n");
+		return 0xFF;
+	}
+
+	return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
+}
+
+static void
+jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
+{
+	u32 val;
+	int to;
+
+	val = jread32(jme, JME_SMBCSR);
+	to = JME_SMB_BUSY_TIMEOUT;
+	while ((val & SMBCSR_BUSY) && --to) {
+		msleep(1);
+		val = jread32(jme, JME_SMBCSR);
+	}
+	if (!to) {
+		msg_hw(jme, "SMB Bus Busy.\n");
+		return;
+	}
+
+	jwrite32(jme, JME_SMBINTF,
+		((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
+		((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
+		SMBINTF_HWRWN_WRITE |
+		SMBINTF_HWCMD);
+
+	val = jread32(jme, JME_SMBINTF);
+	to = JME_SMB_BUSY_TIMEOUT;
+	while ((val & SMBINTF_HWCMD) && --to) {
+		msleep(1);
+		val = jread32(jme, JME_SMBINTF);
+	}
+	if (!to) {
+		msg_hw(jme, "SMB Bus Busy.\n");
+		return;
+	}
+
+	mdelay(2);
+}
+
+static int
+jme_get_eeprom_len(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 val;
+	val = jread32(jme, JME_SMBCSR);
+	return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
+}
+
+static int
+jme_get_eeprom(struct net_device *netdev,
+		struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int i, offset = eeprom->offset, len = eeprom->len;
+
+	/*
+	 * ethtool will check the boundary for us
+	 */
+	eeprom->magic = JME_EEPROM_MAGIC;
+	for (i = 0 ; i < len ; ++i)
+		data[i] = jme_smb_read(jme, i + offset);
+
+	return 0;
+}
+
+static int
+jme_set_eeprom(struct net_device *netdev,
+		struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	int i, offset = eeprom->offset, len = eeprom->len;
+
+	if (eeprom->magic != JME_EEPROM_MAGIC)
+		return -EINVAL;
+
+	/*
+	 * ethtool will check the boundary for us
+	 */
+	for (i = 0 ; i < len ; ++i)
+		jme_smb_write(jme, i + offset, data[i]);
+
+	return 0;
+}
+
+static const struct ethtool_ops jme_ethtool_ops = {
+	.get_drvinfo            = jme_get_drvinfo,
+	.get_regs_len		= jme_get_regs_len,
+	.get_regs		= jme_get_regs,
+	.get_coalesce		= jme_get_coalesce,
+	.set_coalesce		= jme_set_coalesce,
+	.get_pauseparam		= jme_get_pauseparam,
+	.set_pauseparam		= jme_set_pauseparam,
+	.get_wol		= jme_get_wol,
+	.set_wol		= jme_set_wol,
+	.get_settings		= jme_get_settings,
+	.set_settings		= jme_set_settings,
+	.get_link		= jme_get_link,
+	.get_msglevel           = jme_get_msglevel,
+	.set_msglevel           = jme_set_msglevel,
+	.get_rx_csum		= jme_get_rx_csum,
+	.set_rx_csum		= jme_set_rx_csum,
+	.set_tx_csum		= jme_set_tx_csum,
+	.set_tso		= jme_set_tso,
+	.set_sg			= ethtool_op_set_sg,
+	.nway_reset             = jme_nway_reset,
+	.get_eeprom_len		= jme_get_eeprom_len,
+	.get_eeprom		= jme_get_eeprom,
+	.set_eeprom		= jme_set_eeprom,
+};
+
+static int
+jme_pci_dma64(struct pci_dev *pdev)
+{
+	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
+		if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
+			return 1;
+
+	if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
+		if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
+			return 1;
+
+	if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
+		if (!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
+			return 0;
+
+	return -1;
+}
+
+static inline void
+jme_phy_init(struct jme_adapter *jme)
+{
+	u16 reg26;
+
+	reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
+	jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
+}
+
+static inline void
+jme_check_hw_ver(struct jme_adapter *jme)
+{
+	u32 chipmode;
+
+	chipmode = jread32(jme, JME_CHIPMODE);
+
+	jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
+	jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
+}
+
+static int __devinit
+jme_init_one(struct pci_dev *pdev,
+	     const struct pci_device_id *ent)
+{
+	int rc = 0, using_dac, i;
+	struct net_device *netdev;
+	struct jme_adapter *jme;
+	u16 bmcr, bmsr;
+	u32 apmc;
+
+	/*
+	 * set up PCI device basics
+	 */
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		jeprintk(pdev, "Cannot enable PCI device.\n");
+		goto err_out;
+	}
+
+	using_dac = jme_pci_dma64(pdev);
+	if (using_dac < 0) {
+		jeprintk(pdev, "Cannot set PCI DMA Mask.\n");
+		rc = -EIO;
+		goto err_out_disable_pdev;
+	}
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		jeprintk(pdev, "No PCI resource region found.\n");
+		rc = -ENOMEM;
+		goto err_out_disable_pdev;
+	}
+
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc) {
+		jeprintk(pdev, "Cannot obtain PCI resource region.\n");
+		goto err_out_disable_pdev;
+	}
+
+	pci_set_master(pdev);
+
+	/*
+	 * alloc and init net device
+	 */
+	netdev = alloc_etherdev(sizeof(*jme));
+	if (!netdev) {
+		jeprintk(pdev, "Cannot allocate netdev structure.\n");
+		rc = -ENOMEM;
+		goto err_out_release_regions;
+	}
+	netdev->open			= jme_open;
+	netdev->stop			= jme_close;
+	netdev->hard_start_xmit		= jme_start_xmit;
+	netdev->set_mac_address		= jme_set_macaddr;
+	netdev->set_multicast_list	= jme_set_multi;
+	netdev->change_mtu		= jme_change_mtu;
+	netdev->ethtool_ops		= &jme_ethtool_ops;
+	netdev->tx_timeout		= jme_tx_timeout;
+	netdev->watchdog_timeo		= TX_TIMEOUT;
+	netdev->vlan_rx_register	= jme_vlan_rx_register;
+	NETDEV_GET_STATS(netdev, &jme_get_stats);
+	netdev->features		=	NETIF_F_HW_CSUM |
+						NETIF_F_SG |
+						NETIF_F_TSO |
+						NETIF_F_TSO6 |
+						NETIF_F_HW_VLAN_TX |
+						NETIF_F_HW_VLAN_RX;
+	if (using_dac)
+		netdev->features	|=	NETIF_F_HIGHDMA;
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	pci_set_drvdata(pdev, netdev);
+
+	/*
+	 * init adapter info
+	 */
+	jme = netdev_priv(netdev);
+	jme->pdev = pdev;
+	jme->dev = netdev;
+	jme->jme_rx = netif_rx;
+	jme->jme_vlan_rx = vlan_hwaccel_rx;
+	jme->old_mtu = netdev->mtu = 1500;
+	jme->phylink = 0;
+	jme->tx_ring_size = 1 << 10;
+	jme->tx_ring_mask = jme->tx_ring_size - 1;
+	jme->tx_wake_threshold = 1 << 9;
+	jme->rx_ring_size = 1 << 9;
+	jme->rx_ring_mask = jme->rx_ring_size - 1;
+	jme->msg_enable = JME_DEF_MSG_ENABLE;
+	jme->regs = ioremap(pci_resource_start(pdev, 0),
+			     pci_resource_len(pdev, 0));
+	if (!(jme->regs)) {
+		jeprintk(pdev, "Mapping PCI resource region error.\n");
+		rc = -ENOMEM;
+		goto err_out_free_netdev;
+	}
+	jme->shadow_regs = pci_alloc_consistent(pdev,
+						sizeof(u32) * SHADOW_REG_NR,
+						&(jme->shadow_dma));
+	if (!(jme->shadow_regs)) {
+		jeprintk(pdev, "Allocating shadow register mapping error.\n");
+		rc = -ENOMEM;
+		goto err_out_unmap;
+	}
+
+	if (no_pseudohp) {
+		apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
+		jwrite32(jme, JME_APMC, apmc);
+	} else if (force_pseudohp) {
+		apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
+		jwrite32(jme, JME_APMC, apmc);
+	}
+
+	NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
+
+	spin_lock_init(&jme->phy_lock);
+	spin_lock_init(&jme->macaddr_lock);
+	spin_lock_init(&jme->rxmcs_lock);
+
+	atomic_set(&jme->link_changing, 1);
+	atomic_set(&jme->rx_cleaning, 1);
+	atomic_set(&jme->tx_cleaning, 1);
+	atomic_set(&jme->rx_empty, 1);
+
+	tasklet_init(&jme->pcc_task,
+		     &jme_pcc_tasklet,
+		     (unsigned long) jme);
+	tasklet_init(&jme->linkch_task,
+		     &jme_link_change_tasklet,
+		     (unsigned long) jme);
+	tasklet_init(&jme->txclean_task,
+		     &jme_tx_clean_tasklet,
+		     (unsigned long) jme);
+	tasklet_init(&jme->rxclean_task,
+		     &jme_rx_clean_tasklet,
+		     (unsigned long) jme);
+	tasklet_init(&jme->rxempty_task,
+		     &jme_rx_empty_tasklet,
+		     (unsigned long) jme);
+	tasklet_disable_nosync(&jme->txclean_task);
+	tasklet_disable_nosync(&jme->rxclean_task);
+	tasklet_disable_nosync(&jme->rxempty_task);
+	jme->dpi.cur = PCC_P1;
+
+	jme->reg_ghc = 0;
+	jme->reg_rxcs = RXCS_DEFAULT;
+	jme->reg_rxmcs = RXMCS_DEFAULT;
+	jme->reg_txpfc = 0;
+	jme->reg_pmcs = PMCS_MFEN;
+	set_bit(JME_FLAG_TXCSUM, &jme->flags);
+	set_bit(JME_FLAG_TSO, &jme->flags);
+
+	/*
+	 * Get Max Read Req Size from PCI Config Space
+	 */
+	pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
+	jme->mrrs &= PCI_DCSR_MRRS_MASK;
+	switch (jme->mrrs) {
+	case MRRS_128B:
+		jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
+		break;
+	case MRRS_256B:
+		jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
+		break;
+	default:
+		jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
+		break;
+	};
+
+	/*
+	 * Must check before reset_mac_processor
+	 */
+	jme_check_hw_ver(jme);
+	jme->mii_if.dev = netdev;
+	if (jme->fpgaver) {
+		jme->mii_if.phy_id = 0;
+		for (i = 1 ; i < 32 ; ++i) {
+			bmcr = jme_mdio_read(netdev, i, MII_BMCR);
+			bmsr = jme_mdio_read(netdev, i, MII_BMSR);
+			if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
+				jme->mii_if.phy_id = i;
+				break;
+			}
+		}
+
+		if (!jme->mii_if.phy_id) {
+			rc = -EIO;
+			jeprintk(pdev, "Can not find phy_id.\n");
+			 goto err_out_free_shadow;
+		}
+
+		jme->reg_ghc |= GHC_LINK_POLL;
+	} else {
+		jme->mii_if.phy_id = 1;
+	}
+	if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
+		jme->mii_if.supports_gmii = true;
+	else
+		jme->mii_if.supports_gmii = false;
+	jme->mii_if.mdio_read = jme_mdio_read;
+	jme->mii_if.mdio_write = jme_mdio_write;
+
+	jme_clear_pm(jme);
+	jme_set_phyfifoa(jme);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev);
+	if (!jme->fpgaver)
+		jme_phy_init(jme);
+	jme_phy_off(jme);
+
+	/*
+	 * Reset MAC processor and reload EEPROM for MAC Address
+	 */
+	jme_reset_mac_processor(jme);
+	rc = jme_reload_eeprom(jme);
+	if (rc) {
+		jeprintk(pdev,
+			"Reload eeprom for reading MAC Address error.\n");
+		goto err_out_free_shadow;
+	}
+	jme_load_macaddr(netdev);
+
+	/*
+	 * Tell stack that we are not ready to work until open()
+	 */
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+
+	/*
+	 * Register netdev
+	 */
+	rc = register_netdev(netdev);
+	if (rc) {
+		jeprintk(pdev, "Cannot register net device.\n");
+		goto err_out_free_shadow;
+	}
+
+	msg_probe(jme,
+		"JMC250 gigabit%s ver:%x rev:%x "
+		"macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+		(jme->fpgaver != 0) ? " (FPGA)" : "",
+		(jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
+		jme->rev,
+		netdev->dev_addr[0],
+		netdev->dev_addr[1],
+		netdev->dev_addr[2],
+		netdev->dev_addr[3],
+		netdev->dev_addr[4],
+		netdev->dev_addr[5]);
+
+	return 0;
+
+err_out_free_shadow:
+	pci_free_consistent(pdev,
+			    sizeof(u32) * SHADOW_REG_NR,
+			    jme->shadow_regs,
+			    jme->shadow_dma);
+err_out_unmap:
+	iounmap(jme->regs);
+err_out_free_netdev:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+err_out_release_regions:
+	pci_release_regions(pdev);
+err_out_disable_pdev:
+	pci_disable_device(pdev);
+err_out:
+	return rc;
+}
+
+static void __devexit
+jme_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+	pci_free_consistent(pdev,
+			    sizeof(u32) * SHADOW_REG_NR,
+			    jme->shadow_regs,
+			    jme->shadow_dma);
+	iounmap(jme->regs);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+}
+
+static int
+jme_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	atomic_dec(&jme->link_changing);
+
+	netif_device_detach(netdev);
+	netif_stop_queue(netdev);
+	jme_stop_irq(jme);
+
+	tasklet_disable(&jme->txclean_task);
+	tasklet_disable(&jme->rxclean_task);
+	tasklet_disable(&jme->rxempty_task);
+
+	jme_disable_shadow(jme);
+
+	if (netif_carrier_ok(netdev)) {
+		if (test_bit(JME_FLAG_POLL, &jme->flags))
+			jme_polling_mode(jme);
+
+		jme_stop_pcc_timer(jme);
+		jme_reset_ghc_speed(jme);
+		jme_disable_rx_engine(jme);
+		jme_disable_tx_engine(jme);
+		jme_reset_mac_processor(jme);
+		jme_free_rx_resources(jme);
+		jme_free_tx_resources(jme);
+		netif_carrier_off(netdev);
+		jme->phylink = 0;
+	}
+
+	tasklet_enable(&jme->txclean_task);
+	tasklet_hi_enable(&jme->rxclean_task);
+	tasklet_hi_enable(&jme->rxempty_task);
+
+	pci_save_state(pdev);
+	if (jme->reg_pmcs) {
+		jme_set_100m_half(jme);
+
+		if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+			jme_wait_link(jme);
+
+		jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+
+		pci_enable_wake(pdev, PCI_D3cold, true);
+	} else {
+		jme_phy_off(jme);
+	}
+	pci_set_power_state(pdev, PCI_D3cold);
+
+	return 0;
+}
+
+static int
+jme_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct jme_adapter *jme = netdev_priv(netdev);
+
+	jme_clear_pm(jme);
+	pci_restore_state(pdev);
+
+	if (test_bit(JME_FLAG_SSET, &jme->flags))
+		jme_set_settings(netdev, &jme->old_ecmd);
+	else
+		jme_reset_phy_processor(jme);
+
+	jme_enable_shadow(jme);
+	jme_start_irq(jme);
+	netif_device_attach(netdev);
+
+	atomic_inc(&jme->link_changing);
+
+	jme_reset_link(jme);
+
+	return 0;
+}
+
+static struct pci_device_id jme_pci_tbl[] = {
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
+	{ }
+};
+
+static struct pci_driver jme_driver = {
+	.name           = DRV_NAME,
+	.id_table       = jme_pci_tbl,
+	.probe          = jme_init_one,
+	.remove         = __devexit_p(jme_remove_one),
+#ifdef CONFIG_PM
+	.suspend        = jme_suspend,
+	.resume         = jme_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init
+jme_init_module(void)
+{
+	printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
+	       "driver version %s\n", DRV_VERSION);
+	return pci_register_driver(&jme_driver);
+}
+
+static void __exit
+jme_cleanup_module(void)
+{
+	pci_unregister_driver(&jme_driver);
+}
+
+module_init(jme_init_module);
+module_exit(jme_cleanup_module);
+
+MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
+MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
+
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
new file mode 100644
index 0000000..b296884
--- /dev/null
+++ b/drivers/net/jme.h
@@ -0,0 +1,1199 @@
+/*
+ * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
+ *
+ * Copyright 2008 JMicron Technology Corporation
+ * http://www.jmicron.com/
+ *
+ * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __JME_H_INCLUDED__
+#define __JME_H_INCLUDEE__
+
+#define DRV_NAME	"jme"
+#define DRV_VERSION	"1.0.2"
+#define PFX		DRV_NAME ": "
+
+#define PCI_DEVICE_ID_JMICRON_JMC250	0x0250
+#define PCI_DEVICE_ID_JMICRON_JMC260	0x0260
+
+/*
+ * Message related definitions
+ */
+#define JME_DEF_MSG_ENABLE \
+	(NETIF_MSG_PROBE | \
+	NETIF_MSG_LINK | \
+	NETIF_MSG_RX_ERR | \
+	NETIF_MSG_TX_ERR | \
+	NETIF_MSG_HW)
+
+#define jeprintk(pdev, fmt, args...) \
+	printk(KERN_ERR PFX fmt, ## args)
+
+#ifdef TX_DEBUG
+#define tx_dbg(priv, fmt, args...) \
+	printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ## args)
+#else
+#define tx_dbg(priv, fmt, args...)
+#endif
+
+#define jme_msg(msglvl, type, priv, fmt, args...) \
+	if (netif_msg_##type(priv)) \
+		printk(msglvl "%s: " fmt, (priv)->dev->name, ## args)
+
+#define msg_probe(priv, fmt, args...) \
+	jme_msg(KERN_INFO, probe, priv, fmt, ## args)
+
+#define msg_link(priv, fmt, args...) \
+	jme_msg(KERN_INFO, link, priv, fmt, ## args)
+
+#define msg_intr(priv, fmt, args...) \
+	jme_msg(KERN_INFO, intr, priv, fmt, ## args)
+
+#define msg_rx_err(priv, fmt, args...) \
+	jme_msg(KERN_ERR, rx_err, priv, fmt, ## args)
+
+#define msg_rx_status(priv, fmt, args...) \
+	jme_msg(KERN_INFO, rx_status, priv, fmt, ## args)
+
+#define msg_tx_err(priv, fmt, args...) \
+	jme_msg(KERN_ERR, tx_err, priv, fmt, ## args)
+
+#define msg_tx_done(priv, fmt, args...) \
+	jme_msg(KERN_INFO, tx_done, priv, fmt, ## args)
+
+#define msg_tx_queued(priv, fmt, args...) \
+	jme_msg(KERN_INFO, tx_queued, priv, fmt, ## args)
+
+#define msg_hw(priv, fmt, args...) \
+	jme_msg(KERN_ERR, hw, priv, fmt, ## args)
+
+/*
+ * Extra PCI Configuration space interface
+ */
+#define PCI_DCSR_MRRS		0x59
+#define PCI_DCSR_MRRS_MASK	0x70
+
+enum pci_dcsr_mrrs_vals {
+	MRRS_128B	= 0x00,
+	MRRS_256B	= 0x10,
+	MRRS_512B	= 0x20,
+	MRRS_1024B	= 0x30,
+	MRRS_2048B	= 0x40,
+	MRRS_4096B	= 0x50,
+};
+
+#define PCI_SPI			0xB0
+
+enum pci_spi_bits {
+	SPI_EN		= 0x10,
+	SPI_MISO	= 0x08,
+	SPI_MOSI	= 0x04,
+	SPI_SCLK	= 0x02,
+	SPI_CS		= 0x01,
+};
+
+struct jme_spi_op {
+	void __user *uwbuf;
+	void __user *urbuf;
+	__u8	wn;	/* Number of write actions */
+	__u8	rn;	/* Number of read actions */
+	__u8	bitn;	/* Number of bits per action */
+	__u8	spd;	/* The maxim acceptable speed of controller, in MHz.*/
+	__u8	mode;	/* CPOL, CPHA, and Duplex mode of SPI */
+
+	/* Internal use only */
+	u8	*kwbuf;
+	u8	*krbuf;
+	u8	sr;
+	u16	halfclk; /* Half of clock cycle calculated from spd, in ns */
+};
+
+enum jme_spi_op_bits {
+	SPI_MODE_CPHA	= 0x01,
+	SPI_MODE_CPOL	= 0x02,
+	SPI_MODE_DUP	= 0x80,
+};
+
+#define HALF_US 500	/* 500 ns */
+#define JMESPIIOCTL	SIOCDEVPRIVATE
+
+/*
+ * Dynamic(adaptive)/Static PCC values
+ */
+enum dynamic_pcc_values {
+	PCC_OFF		= 0,
+	PCC_P1		= 1,
+	PCC_P2		= 2,
+	PCC_P3		= 3,
+
+	PCC_OFF_TO	= 0,
+	PCC_P1_TO	= 1,
+	PCC_P2_TO	= 64,
+	PCC_P3_TO	= 128,
+
+	PCC_OFF_CNT	= 0,
+	PCC_P1_CNT	= 1,
+	PCC_P2_CNT	= 16,
+	PCC_P3_CNT	= 32,
+};
+struct dynpcc_info {
+	unsigned long	last_bytes;
+	unsigned long	last_pkts;
+	unsigned long	intr_cnt;
+	unsigned char	cur;
+	unsigned char	attempt;
+	unsigned char	cnt;
+};
+#define PCC_INTERVAL_US	100000
+#define PCC_INTERVAL (HZ / (1000000 / PCC_INTERVAL_US))
+#define PCC_P3_THRESHOLD (2 * 1024 * 1024)
+#define PCC_P2_THRESHOLD 800
+#define PCC_INTR_THRESHOLD 800
+#define PCC_TX_TO 1000
+#define PCC_TX_CNT 8
+
+/*
+ * TX/RX Descriptors
+ *
+ * TX/RX Ring DESC Count Must be multiple of 16 and <= 1024
+ */
+#define RING_DESC_ALIGN		16	/* Descriptor alignment */
+#define TX_DESC_SIZE		16
+#define TX_RING_NR		8
+#define TX_RING_ALLOC_SIZE(s)	((s * TX_DESC_SIZE) + RING_DESC_ALIGN)
+
+struct txdesc {
+	union {
+		__u8	all[16];
+		__le32	dw[4];
+		struct {
+			/* DW0 */
+			__le16	vlan;
+			__u8	rsv1;
+			__u8	flags;
+
+			/* DW1 */
+			__le16	datalen;
+			__le16	mss;
+
+			/* DW2 */
+			__le16	pktsize;
+			__le16	rsv2;
+
+			/* DW3 */
+			__le32	bufaddr;
+		} desc1;
+		struct {
+			/* DW0 */
+			__le16	rsv1;
+			__u8	rsv2;
+			__u8	flags;
+
+			/* DW1 */
+			__le16	datalen;
+			__le16	rsv3;
+
+			/* DW2 */
+			__le32	bufaddrh;
+
+			/* DW3 */
+			__le32	bufaddrl;
+		} desc2;
+		struct {
+			/* DW0 */
+			__u8	ehdrsz;
+			__u8	rsv1;
+			__u8	rsv2;
+			__u8	flags;
+
+			/* DW1 */
+			__le16	trycnt;
+			__le16	segcnt;
+
+			/* DW2 */
+			__le16	pktsz;
+			__le16	rsv3;
+
+			/* DW3 */
+			__le32	bufaddrl;
+		} descwb;
+	};
+};
+
+enum jme_txdesc_flags_bits {
+	TXFLAG_OWN	= 0x80,
+	TXFLAG_INT	= 0x40,
+	TXFLAG_64BIT	= 0x20,
+	TXFLAG_TCPCS	= 0x10,
+	TXFLAG_UDPCS	= 0x08,
+	TXFLAG_IPCS	= 0x04,
+	TXFLAG_LSEN	= 0x02,
+	TXFLAG_TAGON	= 0x01,
+};
+
+#define TXDESC_MSS_SHIFT	2
+enum jme_rxdescwb_flags_bits {
+	TXWBFLAG_OWN	= 0x80,
+	TXWBFLAG_INT	= 0x40,
+	TXWBFLAG_TMOUT	= 0x20,
+	TXWBFLAG_TRYOUT	= 0x10,
+	TXWBFLAG_COL	= 0x08,
+
+	TXWBFLAG_ALLERR	= TXWBFLAG_TMOUT |
+			  TXWBFLAG_TRYOUT |
+			  TXWBFLAG_COL,
+};
+
+#define RX_DESC_SIZE		16
+#define RX_RING_NR		4
+#define RX_RING_ALLOC_SIZE(s)	((s * RX_DESC_SIZE) + RING_DESC_ALIGN)
+#define RX_BUF_DMA_ALIGN	8
+#define RX_PREPAD_SIZE		10
+#define ETH_CRC_LEN		2
+#define RX_VLANHDR_LEN		2
+#define RX_EXTRA_LEN		(RX_PREPAD_SIZE + \
+				ETH_HLEN + \
+				ETH_CRC_LEN + \
+				RX_VLANHDR_LEN + \
+				RX_BUF_DMA_ALIGN)
+
+struct rxdesc {
+	union {
+		__u8	all[16];
+		__le32	dw[4];
+		struct {
+			/* DW0 */
+			__le16	rsv2;
+			__u8	rsv1;
+			__u8	flags;
+
+			/* DW1 */
+			__le16	datalen;
+			__le16	wbcpl;
+
+			/* DW2 */
+			__le32	bufaddrh;
+
+			/* DW3 */
+			__le32	bufaddrl;
+		} desc1;
+		struct {
+			/* DW0 */
+			__le16	vlan;
+			__le16	flags;
+
+			/* DW1 */
+			__le16	framesize;
+			__u8	errstat;
+			__u8	desccnt;
+
+			/* DW2 */
+			__le32	rsshash;
+
+			/* DW3 */
+			__u8	hashfun;
+			__u8	hashtype;
+			__le16	resrv;
+		} descwb;
+	};
+};
+
+enum jme_rxdesc_flags_bits {
+	RXFLAG_OWN	= 0x80,
+	RXFLAG_INT	= 0x40,
+	RXFLAG_64BIT	= 0x20,
+};
+
+enum jme_rxwbdesc_flags_bits {
+	RXWBFLAG_OWN		= 0x8000,
+	RXWBFLAG_INT		= 0x4000,
+	RXWBFLAG_MF		= 0x2000,
+	RXWBFLAG_64BIT		= 0x2000,
+	RXWBFLAG_TCPON		= 0x1000,
+	RXWBFLAG_UDPON		= 0x0800,
+	RXWBFLAG_IPCS		= 0x0400,
+	RXWBFLAG_TCPCS		= 0x0200,
+	RXWBFLAG_UDPCS		= 0x0100,
+	RXWBFLAG_TAGON		= 0x0080,
+	RXWBFLAG_IPV4		= 0x0040,
+	RXWBFLAG_IPV6		= 0x0020,
+	RXWBFLAG_PAUSE		= 0x0010,
+	RXWBFLAG_MAGIC		= 0x0008,
+	RXWBFLAG_WAKEUP		= 0x0004,
+	RXWBFLAG_DEST		= 0x0003,
+	RXWBFLAG_DEST_UNI	= 0x0001,
+	RXWBFLAG_DEST_MUL	= 0x0002,
+	RXWBFLAG_DEST_BRO	= 0x0003,
+};
+
+enum jme_rxwbdesc_desccnt_mask {
+	RXWBDCNT_WBCPL	= 0x80,
+	RXWBDCNT_DCNT	= 0x7F,
+};
+
+enum jme_rxwbdesc_errstat_bits {
+	RXWBERR_LIMIT	= 0x80,
+	RXWBERR_MIIER	= 0x40,
+	RXWBERR_NIBON	= 0x20,
+	RXWBERR_COLON	= 0x10,
+	RXWBERR_ABORT	= 0x08,
+	RXWBERR_SHORT	= 0x04,
+	RXWBERR_OVERUN	= 0x02,
+	RXWBERR_CRCERR	= 0x01,
+	RXWBERR_ALLERR	= 0xFF,
+};
+
+/*
+ * Buffer information corresponding to ring descriptors.
+ */
+struct jme_buffer_info {
+	struct sk_buff *skb;
+	dma_addr_t mapping;
+	int len;
+	int nr_desc;
+	unsigned long start_xmit;
+};
+
+/*
+ * The structure holding buffer information and ring descriptors all together.
+ */
+#define MAX_RING_DESC_NR	1024
+struct jme_ring {
+	void *alloc;		/* pointer to allocated memory */
+	void *desc;		/* pointer to ring memory  */
+	dma_addr_t dmaalloc;	/* phys address of ring alloc */
+	dma_addr_t dma;		/* phys address for ring dma */
+
+	/* Buffer information corresponding to each descriptor */
+	struct jme_buffer_info bufinf[MAX_RING_DESC_NR];
+
+	int next_to_use;
+	atomic_t next_to_clean;
+	atomic_t nr_free;
+};
+
+#define NET_STAT(priv) (priv->dev->stats)
+#define NETDEV_GET_STATS(netdev, fun_ptr)
+#define DECLARE_NET_DEVICE_STATS
+
+#define DECLARE_NAPI_STRUCT struct napi_struct napi;
+#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
+	netif_napi_add(dev, napis, pollfn, q);
+#define JME_NAPI_HOLDER(holder) struct napi_struct *holder
+#define JME_NAPI_WEIGHT(w) int w
+#define JME_NAPI_WEIGHT_VAL(w) w
+#define JME_NAPI_WEIGHT_SET(w, r)
+#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev, napis)
+#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
+#define JME_NAPI_DISABLE(priv) \
+	if (!napi_disable_pending(&priv->napi)) \
+		napi_disable(&priv->napi);
+#define JME_RX_SCHEDULE_PREP(priv) \
+	netif_rx_schedule_prep(priv->dev, &priv->napi)
+#define JME_RX_SCHEDULE(priv) \
+	__netif_rx_schedule(priv->dev, &priv->napi);
+
+/*
+ * Jmac Adapter Private data
+ */
+#define SHADOW_REG_NR 8
+struct jme_adapter {
+	struct pci_dev          *pdev;
+	struct net_device       *dev;
+	void __iomem            *regs;
+	dma_addr_t		shadow_dma;
+	u32			*shadow_regs;
+	struct mii_if_info	mii_if;
+	struct jme_ring		rxring[RX_RING_NR];
+	struct jme_ring		txring[TX_RING_NR];
+	spinlock_t		phy_lock;
+	spinlock_t		macaddr_lock;
+	spinlock_t		rxmcs_lock;
+	struct tasklet_struct	rxempty_task;
+	struct tasklet_struct	rxclean_task;
+	struct tasklet_struct	txclean_task;
+	struct tasklet_struct	linkch_task;
+	struct tasklet_struct	pcc_task;
+	unsigned long		flags;
+	u32			reg_txcs;
+	u32			reg_txpfc;
+	u32			reg_rxcs;
+	u32			reg_rxmcs;
+	u32			reg_ghc;
+	u32			reg_pmcs;
+	u32			phylink;
+	u32			tx_ring_size;
+	u32			tx_ring_mask;
+	u32			tx_wake_threshold;
+	u32			rx_ring_size;
+	u32			rx_ring_mask;
+	u8			mrrs;
+	unsigned int		fpgaver;
+	unsigned int		chiprev;
+	u8			rev;
+	u32			msg_enable;
+	struct ethtool_cmd	old_ecmd;
+	unsigned int		old_mtu;
+	struct vlan_group	*vlgrp;
+	struct dynpcc_info	dpi;
+	atomic_t		intr_sem;
+	atomic_t		link_changing;
+	atomic_t		tx_cleaning;
+	atomic_t		rx_cleaning;
+	atomic_t		rx_empty;
+	int			(*jme_rx)(struct sk_buff *skb);
+	int			(*jme_vlan_rx)(struct sk_buff *skb,
+					  struct vlan_group *grp,
+					  unsigned short vlan_tag);
+	DECLARE_NAPI_STRUCT
+	DECLARE_NET_DEVICE_STATS
+};
+
+enum shadow_reg_val {
+	SHADOW_IEVE = 0,
+};
+
+enum jme_flags_bits {
+	JME_FLAG_MSI		= 1,
+	JME_FLAG_SSET		= 2,
+	JME_FLAG_TXCSUM		= 3,
+	JME_FLAG_TSO		= 4,
+	JME_FLAG_POLL		= 5,
+	JME_FLAG_SHUTDOWN	= 6,
+};
+
+#define TX_TIMEOUT		(5 * HZ)
+#define JME_REG_LEN		0x500
+#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9216
+
+static inline struct jme_adapter*
+jme_napi_priv(struct napi_struct *napi)
+{
+	struct jme_adapter *jme;
+	jme = container_of(napi, struct jme_adapter, napi);
+	return jme;
+}
+
+/*
+ * MMaped I/O Resters
+ */
+enum jme_iomap_offsets {
+	JME_MAC		= 0x0000,
+	JME_PHY		= 0x0400,
+	JME_MISC	= 0x0800,
+	JME_RSS		= 0x0C00,
+};
+
+enum jme_iomap_lens {
+	JME_MAC_LEN	= 0x80,
+	JME_PHY_LEN	= 0x58,
+	JME_MISC_LEN	= 0x98,
+	JME_RSS_LEN	= 0xFF,
+};
+
+enum jme_iomap_regs {
+	JME_TXCS	= JME_MAC | 0x00, /* Transmit Control and Status */
+	JME_TXDBA_LO	= JME_MAC | 0x04, /* Transmit Queue Desc Base Addr */
+	JME_TXDBA_HI	= JME_MAC | 0x08, /* Transmit Queue Desc Base Addr */
+	JME_TXQDC	= JME_MAC | 0x0C, /* Transmit Queue Desc Count */
+	JME_TXNDA	= JME_MAC | 0x10, /* Transmit Queue Next Desc Addr */
+	JME_TXMCS	= JME_MAC | 0x14, /* Transmit MAC Control Status */
+	JME_TXPFC	= JME_MAC | 0x18, /* Transmit Pause Frame Control */
+	JME_TXTRHD	= JME_MAC | 0x1C, /* Transmit Timer/Retry@Half-Dup */
+
+	JME_RXCS	= JME_MAC | 0x20, /* Receive Control and Status */
+	JME_RXDBA_LO	= JME_MAC | 0x24, /* Receive Queue Desc Base Addr */
+	JME_RXDBA_HI	= JME_MAC | 0x28, /* Receive Queue Desc Base Addr */
+	JME_RXQDC	= JME_MAC | 0x2C, /* Receive Queue Desc Count */
+	JME_RXNDA	= JME_MAC | 0x30, /* Receive Queue Next Desc Addr */
+	JME_RXMCS	= JME_MAC | 0x34, /* Receive MAC Control Status */
+	JME_RXUMA_LO	= JME_MAC | 0x38, /* Receive Unicast MAC Address */
+	JME_RXUMA_HI	= JME_MAC | 0x3C, /* Receive Unicast MAC Address */
+	JME_RXMCHT_LO	= JME_MAC | 0x40, /* Recv Multicast Addr HashTable */
+	JME_RXMCHT_HI	= JME_MAC | 0x44, /* Recv Multicast Addr HashTable */
+	JME_WFODP	= JME_MAC | 0x48, /* Wakeup Frame Output Data Port */
+	JME_WFOI	= JME_MAC | 0x4C, /* Wakeup Frame Output Interface */
+
+	JME_SMI		= JME_MAC | 0x50, /* Station Management Interface */
+	JME_GHC		= JME_MAC | 0x54, /* Global Host Control */
+	JME_PMCS	= JME_MAC | 0x60, /* Power Management Control/Stat */
+
+
+	JME_PHY_CS	= JME_PHY | 0x28, /* PHY Ctrl and Status Register */
+	JME_PHY_LINK	= JME_PHY | 0x30, /* PHY Link Status Register */
+	JME_SMBCSR	= JME_PHY | 0x40, /* SMB Control and Status */
+	JME_SMBINTF	= JME_PHY | 0x44, /* SMB Interface */
+
+
+	JME_TMCSR	= JME_MISC | 0x00, /* Timer Control/Status Register */
+	JME_GPREG0	= JME_MISC | 0x08, /* General purpose REG-0 */
+	JME_GPREG1	= JME_MISC | 0x0C, /* General purpose REG-1 */
+	JME_IEVE	= JME_MISC | 0x20, /* Interrupt Event Status */
+	JME_IREQ	= JME_MISC | 0x24, /* Intr Req Status(For Debug) */
+	JME_IENS	= JME_MISC | 0x28, /* Intr Enable - Setting Port */
+	JME_IENC	= JME_MISC | 0x2C, /* Interrupt Enable - Clear Port */
+	JME_PCCRX0	= JME_MISC | 0x30, /* PCC Control for RX Queue 0 */
+	JME_PCCTX	= JME_MISC | 0x40, /* PCC Control for TX Queues */
+	JME_CHIPMODE	= JME_MISC | 0x44, /* Identify FPGA Version */
+	JME_SHBA_HI	= JME_MISC | 0x48, /* Shadow Register Base HI */
+	JME_SHBA_LO	= JME_MISC | 0x4C, /* Shadow Register Base LO */
+	JME_TIMER1	= JME_MISC | 0x70, /* Timer1 */
+	JME_TIMER2	= JME_MISC | 0x74, /* Timer2 */
+	JME_APMC	= JME_MISC | 0x7C, /* Aggressive Power Mode Control */
+	JME_PCCSRX0	= JME_MISC | 0x80, /* PCC Status of RX0 */
+};
+
+/*
+ * TX Control/Status Bits
+ */
+enum jme_txcs_bits {
+	TXCS_QUEUE7S	= 0x00008000,
+	TXCS_QUEUE6S	= 0x00004000,
+	TXCS_QUEUE5S	= 0x00002000,
+	TXCS_QUEUE4S	= 0x00001000,
+	TXCS_QUEUE3S	= 0x00000800,
+	TXCS_QUEUE2S	= 0x00000400,
+	TXCS_QUEUE1S	= 0x00000200,
+	TXCS_QUEUE0S	= 0x00000100,
+	TXCS_FIFOTH	= 0x000000C0,
+	TXCS_DMASIZE	= 0x00000030,
+	TXCS_BURST	= 0x00000004,
+	TXCS_ENABLE	= 0x00000001,
+};
+
+enum jme_txcs_value {
+	TXCS_FIFOTH_16QW	= 0x000000C0,
+	TXCS_FIFOTH_12QW	= 0x00000080,
+	TXCS_FIFOTH_8QW		= 0x00000040,
+	TXCS_FIFOTH_4QW		= 0x00000000,
+
+	TXCS_DMASIZE_64B	= 0x00000000,
+	TXCS_DMASIZE_128B	= 0x00000010,
+	TXCS_DMASIZE_256B	= 0x00000020,
+	TXCS_DMASIZE_512B	= 0x00000030,
+
+	TXCS_SELECT_QUEUE0	= 0x00000000,
+	TXCS_SELECT_QUEUE1	= 0x00010000,
+	TXCS_SELECT_QUEUE2	= 0x00020000,
+	TXCS_SELECT_QUEUE3	= 0x00030000,
+	TXCS_SELECT_QUEUE4	= 0x00040000,
+	TXCS_SELECT_QUEUE5	= 0x00050000,
+	TXCS_SELECT_QUEUE6	= 0x00060000,
+	TXCS_SELECT_QUEUE7	= 0x00070000,
+
+	TXCS_DEFAULT		= TXCS_FIFOTH_4QW |
+				  TXCS_BURST,
+};
+
+#define JME_TX_DISABLE_TIMEOUT 10 /* 10 msec */
+
+/*
+ * TX MAC Control/Status Bits
+ */
+enum jme_txmcs_bit_masks {
+	TXMCS_IFG2		= 0xC0000000,
+	TXMCS_IFG1		= 0x30000000,
+	TXMCS_TTHOLD		= 0x00000300,
+	TXMCS_FBURST		= 0x00000080,
+	TXMCS_CARRIEREXT	= 0x00000040,
+	TXMCS_DEFER		= 0x00000020,
+	TXMCS_BACKOFF		= 0x00000010,
+	TXMCS_CARRIERSENSE	= 0x00000008,
+	TXMCS_COLLISION		= 0x00000004,
+	TXMCS_CRC		= 0x00000002,
+	TXMCS_PADDING		= 0x00000001,
+};
+
+enum jme_txmcs_values {
+	TXMCS_IFG2_6_4		= 0x00000000,
+	TXMCS_IFG2_8_5		= 0x40000000,
+	TXMCS_IFG2_10_6		= 0x80000000,
+	TXMCS_IFG2_12_7		= 0xC0000000,
+
+	TXMCS_IFG1_8_4		= 0x00000000,
+	TXMCS_IFG1_12_6		= 0x10000000,
+	TXMCS_IFG1_16_8		= 0x20000000,
+	TXMCS_IFG1_20_10	= 0x30000000,
+
+	TXMCS_TTHOLD_1_8	= 0x00000000,
+	TXMCS_TTHOLD_1_4	= 0x00000100,
+	TXMCS_TTHOLD_1_2	= 0x00000200,
+	TXMCS_TTHOLD_FULL	= 0x00000300,
+
+	TXMCS_DEFAULT		= TXMCS_IFG2_8_5 |
+				  TXMCS_IFG1_16_8 |
+				  TXMCS_TTHOLD_FULL |
+				  TXMCS_DEFER |
+				  TXMCS_CRC |
+				  TXMCS_PADDING,
+};
+
+enum jme_txpfc_bits_masks {
+	TXPFC_VLAN_TAG		= 0xFFFF0000,
+	TXPFC_VLAN_EN		= 0x00008000,
+	TXPFC_PF_EN		= 0x00000001,
+};
+
+enum jme_txtrhd_bits_masks {
+	TXTRHD_TXPEN		= 0x80000000,
+	TXTRHD_TXP		= 0x7FFFFF00,
+	TXTRHD_TXREN		= 0x00000080,
+	TXTRHD_TXRL		= 0x0000007F,
+};
+
+enum jme_txtrhd_shifts {
+	TXTRHD_TXP_SHIFT	= 8,
+	TXTRHD_TXRL_SHIFT	= 0,
+};
+
+/*
+ * RX Control/Status Bits
+ */
+enum jme_rxcs_bit_masks {
+	/* FIFO full threshold for transmitting Tx Pause Packet */
+	RXCS_FIFOTHTP	= 0x30000000,
+	/* FIFO threshold for processing next packet */
+	RXCS_FIFOTHNP	= 0x0C000000,
+	RXCS_DMAREQSZ	= 0x03000000, /* DMA Request Size */
+	RXCS_QUEUESEL	= 0x00030000, /* Queue selection */
+	RXCS_RETRYGAP	= 0x0000F000, /* RX Desc full retry gap */
+	RXCS_RETRYCNT	= 0x00000F00, /* RX Desc full retry counter */
+	RXCS_WAKEUP	= 0x00000040, /* Enable receive wakeup packet */
+	RXCS_MAGIC	= 0x00000020, /* Enable receive magic packet */
+	RXCS_SHORT	= 0x00000010, /* Enable receive short packet */
+	RXCS_ABORT	= 0x00000008, /* Enable receive errorr packet */
+	RXCS_QST	= 0x00000004, /* Receive queue start */
+	RXCS_SUSPEND	= 0x00000002,
+	RXCS_ENABLE	= 0x00000001,
+};
+
+enum jme_rxcs_values {
+	RXCS_FIFOTHTP_16T	= 0x00000000,
+	RXCS_FIFOTHTP_32T	= 0x10000000,
+	RXCS_FIFOTHTP_64T	= 0x20000000,
+	RXCS_FIFOTHTP_128T	= 0x30000000,
+
+	RXCS_FIFOTHNP_16QW	= 0x00000000,
+	RXCS_FIFOTHNP_32QW	= 0x04000000,
+	RXCS_FIFOTHNP_64QW	= 0x08000000,
+	RXCS_FIFOTHNP_128QW	= 0x0C000000,
+
+	RXCS_DMAREQSZ_16B	= 0x00000000,
+	RXCS_DMAREQSZ_32B	= 0x01000000,
+	RXCS_DMAREQSZ_64B	= 0x02000000,
+	RXCS_DMAREQSZ_128B	= 0x03000000,
+
+	RXCS_QUEUESEL_Q0	= 0x00000000,
+	RXCS_QUEUESEL_Q1	= 0x00010000,
+	RXCS_QUEUESEL_Q2	= 0x00020000,
+	RXCS_QUEUESEL_Q3	= 0x00030000,
+
+	RXCS_RETRYGAP_256ns	= 0x00000000,
+	RXCS_RETRYGAP_512ns	= 0x00001000,
+	RXCS_RETRYGAP_1024ns	= 0x00002000,
+	RXCS_RETRYGAP_2048ns	= 0x00003000,
+	RXCS_RETRYGAP_4096ns	= 0x00004000,
+	RXCS_RETRYGAP_8192ns	= 0x00005000,
+	RXCS_RETRYGAP_16384ns	= 0x00006000,
+	RXCS_RETRYGAP_32768ns	= 0x00007000,
+
+	RXCS_RETRYCNT_0		= 0x00000000,
+	RXCS_RETRYCNT_4		= 0x00000100,
+	RXCS_RETRYCNT_8		= 0x00000200,
+	RXCS_RETRYCNT_12	= 0x00000300,
+	RXCS_RETRYCNT_16	= 0x00000400,
+	RXCS_RETRYCNT_20	= 0x00000500,
+	RXCS_RETRYCNT_24	= 0x00000600,
+	RXCS_RETRYCNT_28	= 0x00000700,
+	RXCS_RETRYCNT_32	= 0x00000800,
+	RXCS_RETRYCNT_36	= 0x00000900,
+	RXCS_RETRYCNT_40	= 0x00000A00,
+	RXCS_RETRYCNT_44	= 0x00000B00,
+	RXCS_RETRYCNT_48	= 0x00000C00,
+	RXCS_RETRYCNT_52	= 0x00000D00,
+	RXCS_RETRYCNT_56	= 0x00000E00,
+	RXCS_RETRYCNT_60	= 0x00000F00,
+
+	RXCS_DEFAULT		= RXCS_FIFOTHTP_128T |
+				  RXCS_FIFOTHNP_128QW |
+				  RXCS_DMAREQSZ_128B |
+				  RXCS_RETRYGAP_256ns |
+				  RXCS_RETRYCNT_32,
+};
+
+#define JME_RX_DISABLE_TIMEOUT 10 /* 10 msec */
+
+/*
+ * RX MAC Control/Status Bits
+ */
+enum jme_rxmcs_bits {
+	RXMCS_ALLFRAME		= 0x00000800,
+	RXMCS_BRDFRAME		= 0x00000400,
+	RXMCS_MULFRAME		= 0x00000200,
+	RXMCS_UNIFRAME		= 0x00000100,
+	RXMCS_ALLMULFRAME	= 0x00000080,
+	RXMCS_MULFILTERED	= 0x00000040,
+	RXMCS_RXCOLLDEC		= 0x00000020,
+	RXMCS_FLOWCTRL		= 0x00000008,
+	RXMCS_VTAGRM		= 0x00000004,
+	RXMCS_PREPAD		= 0x00000002,
+	RXMCS_CHECKSUM		= 0x00000001,
+
+	RXMCS_DEFAULT		= RXMCS_VTAGRM |
+				  RXMCS_PREPAD |
+				  RXMCS_FLOWCTRL |
+				  RXMCS_CHECKSUM,
+};
+
+/*
+ * Wakeup Frame setup interface registers
+ */
+#define WAKEUP_FRAME_NR	8
+#define WAKEUP_FRAME_MASK_DWNR	4
+
+enum jme_wfoi_bit_masks {
+	WFOI_MASK_SEL		= 0x00000070,
+	WFOI_CRC_SEL		= 0x00000008,
+	WFOI_FRAME_SEL		= 0x00000007,
+};
+
+enum jme_wfoi_shifts {
+	WFOI_MASK_SHIFT		= 4,
+};
+
+/*
+ * SMI Related definitions
+ */
+enum jme_smi_bit_mask {
+	SMI_DATA_MASK		= 0xFFFF0000,
+	SMI_REG_ADDR_MASK	= 0x0000F800,
+	SMI_PHY_ADDR_MASK	= 0x000007C0,
+	SMI_OP_WRITE		= 0x00000020,
+	/* Set to 1, after req done it'll be cleared to 0 */
+	SMI_OP_REQ		= 0x00000010,
+	SMI_OP_MDIO		= 0x00000008, /* Software assess In/Out */
+	SMI_OP_MDOE		= 0x00000004, /* Software Output Enable */
+	SMI_OP_MDC		= 0x00000002, /* Software CLK Control */
+	SMI_OP_MDEN		= 0x00000001, /* Software access Enable */
+};
+
+enum jme_smi_bit_shift {
+	SMI_DATA_SHIFT		= 16,
+	SMI_REG_ADDR_SHIFT	= 11,
+	SMI_PHY_ADDR_SHIFT	= 6,
+};
+
+static inline u32 smi_reg_addr(int x)
+{
+	return (x << SMI_REG_ADDR_SHIFT) & SMI_REG_ADDR_MASK;
+}
+
+static inline u32 smi_phy_addr(int x)
+{
+	return (x << SMI_PHY_ADDR_SHIFT) & SMI_PHY_ADDR_MASK;
+}
+
+#define JME_PHY_TIMEOUT 100 /* 100 msec */
+#define JME_PHY_REG_NR 32
+
+/*
+ * Global Host Control
+ */
+enum jme_ghc_bit_mask {
+	GHC_SWRST	= 0x40000000,
+	GHC_DPX		= 0x00000040,
+	GHC_SPEED	= 0x00000030,
+	GHC_LINK_POLL	= 0x00000001,
+};
+
+enum jme_ghc_speed_val {
+	GHC_SPEED_10M	= 0x00000010,
+	GHC_SPEED_100M	= 0x00000020,
+	GHC_SPEED_1000M	= 0x00000030,
+};
+
+/*
+ * Power management control and status register
+ */
+enum jme_pmcs_bit_masks {
+	PMCS_WF7DET	= 0x80000000,
+	PMCS_WF6DET	= 0x40000000,
+	PMCS_WF5DET	= 0x20000000,
+	PMCS_WF4DET	= 0x10000000,
+	PMCS_WF3DET	= 0x08000000,
+	PMCS_WF2DET	= 0x04000000,
+	PMCS_WF1DET	= 0x02000000,
+	PMCS_WF0DET	= 0x01000000,
+	PMCS_LFDET	= 0x00040000,
+	PMCS_LRDET	= 0x00020000,
+	PMCS_MFDET	= 0x00010000,
+	PMCS_WF7EN	= 0x00008000,
+	PMCS_WF6EN	= 0x00004000,
+	PMCS_WF5EN	= 0x00002000,
+	PMCS_WF4EN	= 0x00001000,
+	PMCS_WF3EN	= 0x00000800,
+	PMCS_WF2EN	= 0x00000400,
+	PMCS_WF1EN	= 0x00000200,
+	PMCS_WF0EN	= 0x00000100,
+	PMCS_LFEN	= 0x00000004,
+	PMCS_LREN	= 0x00000002,
+	PMCS_MFEN	= 0x00000001,
+};
+
+/*
+ * Giga PHY Status Registers
+ */
+enum jme_phy_link_bit_mask {
+	PHY_LINK_SPEED_MASK		= 0x0000C000,
+	PHY_LINK_DUPLEX			= 0x00002000,
+	PHY_LINK_SPEEDDPU_RESOLVED	= 0x00000800,
+	PHY_LINK_UP			= 0x00000400,
+	PHY_LINK_AUTONEG_COMPLETE	= 0x00000200,
+	PHY_LINK_MDI_STAT		= 0x00000040,
+};
+
+enum jme_phy_link_speed_val {
+	PHY_LINK_SPEED_10M		= 0x00000000,
+	PHY_LINK_SPEED_100M		= 0x00004000,
+	PHY_LINK_SPEED_1000M		= 0x00008000,
+};
+
+#define JME_SPDRSV_TIMEOUT	500	/* 500 us */
+
+/*
+ * SMB Control and Status
+ */
+enum jme_smbcsr_bit_mask {
+	SMBCSR_CNACK	= 0x00020000,
+	SMBCSR_RELOAD	= 0x00010000,
+	SMBCSR_EEPROMD	= 0x00000020,
+	SMBCSR_INITDONE	= 0x00000010,
+	SMBCSR_BUSY	= 0x0000000F,
+};
+
+enum jme_smbintf_bit_mask {
+	SMBINTF_HWDATR	= 0xFF000000,
+	SMBINTF_HWDATW	= 0x00FF0000,
+	SMBINTF_HWADDR	= 0x0000FF00,
+	SMBINTF_HWRWN	= 0x00000020,
+	SMBINTF_HWCMD	= 0x00000010,
+	SMBINTF_FASTM	= 0x00000008,
+	SMBINTF_GPIOSCL	= 0x00000004,
+	SMBINTF_GPIOSDA	= 0x00000002,
+	SMBINTF_GPIOEN	= 0x00000001,
+};
+
+enum jme_smbintf_vals {
+	SMBINTF_HWRWN_READ	= 0x00000020,
+	SMBINTF_HWRWN_WRITE	= 0x00000000,
+};
+
+enum jme_smbintf_shifts {
+	SMBINTF_HWDATR_SHIFT	= 24,
+	SMBINTF_HWDATW_SHIFT	= 16,
+	SMBINTF_HWADDR_SHIFT	= 8,
+};
+
+#define JME_EEPROM_RELOAD_TIMEOUT 2000 /* 2000 msec */
+#define JME_SMB_BUSY_TIMEOUT 20 /* 20 msec */
+#define JME_SMB_LEN 256
+#define JME_EEPROM_MAGIC 0x250
+
+/*
+ * Timer Control/Status Register
+ */
+enum jme_tmcsr_bit_masks {
+	TMCSR_SWIT	= 0x80000000,
+	TMCSR_EN	= 0x01000000,
+	TMCSR_CNT	= 0x00FFFFFF,
+};
+
+/*
+ * General Purpose REG-0
+ */
+enum jme_gpreg0_masks {
+	GPREG0_DISSH		= 0xFF000000,
+	GPREG0_PCIRLMT		= 0x00300000,
+	GPREG0_PCCNOMUTCLR	= 0x00040000,
+	GPREG0_LNKINTPOLL	= 0x00001000,
+	GPREG0_PCCTMR		= 0x00000300,
+	GPREG0_PHYADDR		= 0x0000001F,
+};
+
+enum jme_gpreg0_vals {
+	GPREG0_DISSH_DW7	= 0x80000000,
+	GPREG0_DISSH_DW6	= 0x40000000,
+	GPREG0_DISSH_DW5	= 0x20000000,
+	GPREG0_DISSH_DW4	= 0x10000000,
+	GPREG0_DISSH_DW3	= 0x08000000,
+	GPREG0_DISSH_DW2	= 0x04000000,
+	GPREG0_DISSH_DW1	= 0x02000000,
+	GPREG0_DISSH_DW0	= 0x01000000,
+	GPREG0_DISSH_ALL	= 0xFF000000,
+
+	GPREG0_PCIRLMT_8	= 0x00000000,
+	GPREG0_PCIRLMT_6	= 0x00100000,
+	GPREG0_PCIRLMT_5	= 0x00200000,
+	GPREG0_PCIRLMT_4	= 0x00300000,
+
+	GPREG0_PCCTMR_16ns	= 0x00000000,
+	GPREG0_PCCTMR_256ns	= 0x00000100,
+	GPREG0_PCCTMR_1us	= 0x00000200,
+	GPREG0_PCCTMR_1ms	= 0x00000300,
+
+	GPREG0_PHYADDR_1	= 0x00000001,
+
+	GPREG0_DEFAULT		= GPREG0_PCIRLMT_4 |
+				  GPREG0_PCCTMR_1us |
+				  GPREG0_PHYADDR_1,
+};
+
+/*
+ * Interrupt Status Bits
+ */
+enum jme_interrupt_bits {
+	INTR_SWINTR	= 0x80000000,
+	INTR_TMINTR	= 0x40000000,
+	INTR_LINKCH	= 0x20000000,
+	INTR_PAUSERCV	= 0x10000000,
+	INTR_MAGICRCV	= 0x08000000,
+	INTR_WAKERCV	= 0x04000000,
+	INTR_PCCRX0TO	= 0x02000000,
+	INTR_PCCRX1TO	= 0x01000000,
+	INTR_PCCRX2TO	= 0x00800000,
+	INTR_PCCRX3TO	= 0x00400000,
+	INTR_PCCTXTO	= 0x00200000,
+	INTR_PCCRX0	= 0x00100000,
+	INTR_PCCRX1	= 0x00080000,
+	INTR_PCCRX2	= 0x00040000,
+	INTR_PCCRX3	= 0x00020000,
+	INTR_PCCTX	= 0x00010000,
+	INTR_RX3EMP	= 0x00008000,
+	INTR_RX2EMP	= 0x00004000,
+	INTR_RX1EMP	= 0x00002000,
+	INTR_RX0EMP	= 0x00001000,
+	INTR_RX3	= 0x00000800,
+	INTR_RX2	= 0x00000400,
+	INTR_RX1	= 0x00000200,
+	INTR_RX0	= 0x00000100,
+	INTR_TX7	= 0x00000080,
+	INTR_TX6	= 0x00000040,
+	INTR_TX5	= 0x00000020,
+	INTR_TX4	= 0x00000010,
+	INTR_TX3	= 0x00000008,
+	INTR_TX2	= 0x00000004,
+	INTR_TX1	= 0x00000002,
+	INTR_TX0	= 0x00000001,
+};
+
+static const u32 INTR_ENABLE = INTR_SWINTR |
+				 INTR_TMINTR |
+				 INTR_LINKCH |
+				 INTR_PCCRX0TO |
+				 INTR_PCCRX0 |
+				 INTR_PCCTXTO |
+				 INTR_PCCTX |
+				 INTR_RX0EMP;
+
+/*
+ * PCC Control Registers
+ */
+enum jme_pccrx_masks {
+	PCCRXTO_MASK	= 0xFFFF0000,
+	PCCRX_MASK	= 0x0000FF00,
+};
+
+enum jme_pcctx_masks {
+	PCCTXTO_MASK	= 0xFFFF0000,
+	PCCTX_MASK	= 0x0000FF00,
+	PCCTX_QS_MASK	= 0x000000FF,
+};
+
+enum jme_pccrx_shifts {
+	PCCRXTO_SHIFT	= 16,
+	PCCRX_SHIFT	= 8,
+};
+
+enum jme_pcctx_shifts {
+	PCCTXTO_SHIFT	= 16,
+	PCCTX_SHIFT	= 8,
+};
+
+enum jme_pcctx_bits {
+	PCCTXQ0_EN	= 0x00000001,
+	PCCTXQ1_EN	= 0x00000002,
+	PCCTXQ2_EN	= 0x00000004,
+	PCCTXQ3_EN	= 0x00000008,
+	PCCTXQ4_EN	= 0x00000010,
+	PCCTXQ5_EN	= 0x00000020,
+	PCCTXQ6_EN	= 0x00000040,
+	PCCTXQ7_EN	= 0x00000080,
+};
+
+/*
+ * Chip Mode Register
+ */
+enum jme_chipmode_bit_masks {
+	CM_FPGAVER_MASK		= 0xFFFF0000,
+	CM_CHIPREV_MASK		= 0x0000FF00,
+	CM_CHIPMODE_MASK	= 0x0000000F,
+};
+
+enum jme_chipmode_shifts {
+	CM_FPGAVER_SHIFT	= 16,
+	CM_CHIPREV_SHIFT	= 8,
+};
+
+/*
+ * Shadow base address register bits
+ */
+enum jme_shadow_base_address_bits {
+	SHBA_POSTEN	= 0x1,
+};
+
+/*
+ * Aggressive Power Mode Control
+ */
+enum jme_apmc_bits {
+	JME_APMC_PCIE_SD_EN	= 0x40000000,
+	JME_APMC_PSEUDO_HP_EN	= 0x20000000,
+	JME_APMC_EPIEN		= 0x04000000,
+	JME_APMC_EPIEN_CTRL	= 0x03000000,
+};
+
+enum jme_apmc_values {
+	JME_APMC_EPIEN_CTRL_EN	= 0x02000000,
+	JME_APMC_EPIEN_CTRL_DIS	= 0x01000000,
+};
+
+#define APMC_PHP_SHUTDOWN_DELAY	(10 * 1000 * 1000)
+
+#ifdef REG_DEBUG
+static char *MAC_REG_NAME[] = {
+	"JME_TXCS",      "JME_TXDBA_LO",  "JME_TXDBA_HI", "JME_TXQDC",
+	"JME_TXNDA",     "JME_TXMCS",     "JME_TXPFC",    "JME_TXTRHD",
+	"JME_RXCS",      "JME_RXDBA_LO",  "JME_RXDBA_HI", "JME_RXQDC",
+	"JME_RXNDA",     "JME_RXMCS",     "JME_RXUMA_LO", "JME_RXUMA_HI",
+	"JME_RXMCHT_LO", "JME_RXMCHT_HI", "JME_WFODP",    "JME_WFOI",
+	"JME_SMI",       "JME_GHC",       "UNKNOWN",      "UNKNOWN",
+	"JME_PMCS"};
+
+static char *PE_REG_NAME[] = {
+	"UNKNOWN",      "UNKNOWN",     "UNKNOWN",    "UNKNOWN",
+	"UNKNOWN",      "UNKNOWN",     "UNKNOWN",    "UNKNOWN",
+	"UNKNOWN",      "UNKNOWN",     "JME_PHY_CS", "UNKNOWN",
+	"JME_PHY_LINK", "UNKNOWN",     "UNKNOWN",    "UNKNOWN",
+	"JME_SMBCSR",   "JME_SMBINTF"};
+
+static char *MISC_REG_NAME[] = {
+	"JME_TMCSR",  "JME_GPIO",     "JME_GPREG0",  "JME_GPREG1",
+	"JME_IEVE",   "JME_IREQ",     "JME_IENS",    "JME_IENC",
+	"JME_PCCRX0", "JME_PCCRX1",   "JME_PCCRX2",  "JME_PCCRX3",
+	"JME_PCCTX0", "JME_CHIPMODE", "JME_SHBA_HI", "JME_SHBA_LO",
+	"UNKNOWN",    "UNKNOWN",      "UNKNOWN",     "UNKNOWN",
+	"UNKNOWN",    "UNKNOWN",      "UNKNOWN",     "UNKNOWN",
+	"UNKNOWN",    "UNKNOWN",      "UNKNOWN",     "UNKNOWN",
+	"JME_TIMER1", "JME_TIMER2",   "UNKNOWN",     "JME_APMC",
+	"JME_PCCSRX0"};
+
+static inline void reg_dbg(const struct jme_adapter *jme,
+		const char *msg, u32 val, u32 reg)
+{
+	const char *regname;
+	switch (reg & 0xF00) {
+	case 0x000:
+		regname = MAC_REG_NAME[(reg & 0xFF) >> 2];
+		break;
+	case 0x400:
+		regname = PE_REG_NAME[(reg & 0xFF) >> 2];
+		break;
+	case 0x800:
+		regname = MISC_REG_NAME[(reg & 0xFF) >> 2];
+		break;
+	default:
+		regname = PE_REG_NAME[0];
+	}
+	printk(KERN_DEBUG "%s: %-20s %08x@%s\n", jme->dev->name,
+			msg, val, regname);
+}
+#else
+static inline void reg_dbg(const struct jme_adapter *jme,
+		const char *msg, u32 val, u32 reg) {}
+#endif
+
+/*
+ * Read/Write MMaped I/O Registers
+ */
+static inline u32 jread32(struct jme_adapter *jme, u32 reg)
+{
+	return readl(jme->regs + reg);
+}
+
+static inline void jwrite32(struct jme_adapter *jme, u32 reg, u32 val)
+{
+	reg_dbg(jme, "REG WRITE", val, reg);
+	writel(val, jme->regs + reg);
+	reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg);
+}
+
+static inline void jwrite32f(struct jme_adapter *jme, u32 reg, u32 val)
+{
+	/*
+	 * Read after write should cause flush
+	 */
+	reg_dbg(jme, "REG WRITE FLUSH", val, reg);
+	writel(val, jme->regs + reg);
+	readl(jme->regs + reg);
+	reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg);
+}
+
+/*
+ * PHY Regs
+ */
+enum jme_phy_reg17_bit_masks {
+	PREG17_SPEED		= 0xC000,
+	PREG17_DUPLEX		= 0x2000,
+	PREG17_SPDRSV		= 0x0800,
+	PREG17_LNKUP		= 0x0400,
+	PREG17_MDI		= 0x0040,
+};
+
+enum jme_phy_reg17_vals {
+	PREG17_SPEED_10M	= 0x0000,
+	PREG17_SPEED_100M	= 0x4000,
+	PREG17_SPEED_1000M	= 0x8000,
+};
+
+#define BMSR_ANCOMP               0x0020
+
+/*
+ * Workaround
+ */
+static inline int is_buggy250(unsigned short device, unsigned int chiprev)
+{
+	return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
+}
+
+/*
+ * Function prototypes
+ */
+static int jme_set_settings(struct net_device *netdev,
+				struct ethtool_cmd *ecmd);
+static void jme_set_multi(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 0a97c26..a1e22ed 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -41,7 +41,7 @@
 #endif
 
 #if MFE_DEBUG>=1
-#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __FUNCTION__ , ## args)
+#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args)
 #define MFE_RX_DEBUG 2
 #else
 #define DPRINTK(str,args...)
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 6d343ef..4e7a5fa 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -203,7 +203,7 @@
 
 out_badirq:
 	printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
-	       dev->name, __FUNCTION__, irq);
+	       dev->name, __func__, irq);
 	return ret;
 }
 
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 096bca5..b411b79 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -33,6 +33,7 @@
 
 #include <linux/errno.h>
 #include <linux/slab.h>
+#include <linux/mm.h>
 #include <linux/bitmap.h>
 #include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 0a18b9e..372811a 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -48,30 +48,28 @@
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
 #include <linux/mv643xx_eth.h>
 #include <asm/io.h>
 #include <asm/types.h>
 #include <asm/system.h>
 
 static char mv643xx_eth_driver_name[] = "mv643xx_eth";
-static char mv643xx_eth_driver_version[] = "1.3";
+static char mv643xx_eth_driver_version[] = "1.4";
 
-#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
-#define MV643XX_ETH_NAPI
-#define MV643XX_ETH_TX_FAST_REFILL
-
-#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
-#define MAX_DESCS_PER_SKB	(MAX_SKB_FRAGS + 1)
-#else
-#define MAX_DESCS_PER_SKB	1
-#endif
 
 /*
  * Registers shared between all ports.
  */
 #define PHY_ADDR			0x0000
 #define SMI_REG				0x0004
+#define  SMI_BUSY			0x10000000
+#define  SMI_READ_VALID			0x08000000
+#define  SMI_OPCODE_READ		0x04000000
+#define  SMI_OPCODE_WRITE		0x00000000
+#define ERR_INT_CAUSE			0x0080
+#define  ERR_INT_SMI_DONE		0x00000010
+#define ERR_INT_MASK			0x0084
 #define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
 #define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
 #define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
@@ -104,16 +102,12 @@
 #define TX_BW_MTU(p)			(0x0458 + ((p) << 10))
 #define TX_BW_BURST(p)			(0x045c + ((p) << 10))
 #define INT_CAUSE(p)			(0x0460 + ((p) << 10))
-#define  INT_TX_END_0			0x00080000
 #define  INT_TX_END			0x07f80000
-#define  INT_RX				0x0007fbfc
+#define  INT_RX				0x000003fc
 #define  INT_EXT			0x00000002
 #define INT_CAUSE_EXT(p)		(0x0464 + ((p) << 10))
-#define  INT_EXT_LINK			0x00100000
-#define  INT_EXT_PHY			0x00010000
-#define  INT_EXT_TX_ERROR_0		0x00000100
-#define  INT_EXT_TX_0			0x00000001
-#define  INT_EXT_TX			0x0000ffff
+#define  INT_EXT_LINK_PHY		0x00110000
+#define  INT_EXT_TX			0x000000ff
 #define INT_MASK(p)			(0x0468 + ((p) << 10))
 #define INT_MASK_EXT(p)			(0x046c + ((p) << 10))
 #define TX_FIFO_URGENT_THRESHOLD(p)	(0x0474 + ((p) << 10))
@@ -171,8 +165,8 @@
 #define FORCE_LINK_PASS				(1 << 1)
 #define SERIAL_PORT_ENABLE			(1 << 0)
 
-#define DEFAULT_RX_QUEUE_SIZE		400
-#define DEFAULT_TX_QUEUE_SIZE		800
+#define DEFAULT_RX_QUEUE_SIZE		128
+#define DEFAULT_TX_QUEUE_SIZE		256
 
 
 /*
@@ -249,9 +243,23 @@
 	void __iomem *base;
 
 	/*
-	 * Protects access to SMI_REG, which is shared between ports.
+	 * Points at the right SMI instance to use.
 	 */
-	spinlock_t phy_lock;
+	struct mv643xx_eth_shared_private *smi;
+
+	/*
+	 * Provides access to local SMI interface.
+	 */
+	struct mii_bus smi_bus;
+
+	/*
+	 * If we have access to the error interrupt pin (which is
+	 * somewhat misnamed as it not only reflects internal errors
+	 * but also reflects SMI completion), use that to wait for
+	 * SMI access completion instead of polling the SMI busy bit.
+	 */
+	int err_interrupt;
+	wait_queue_head_t smi_busy_wait;
 
 	/*
 	 * Per-port MBUS window access register value.
@@ -263,9 +271,13 @@
 	 */
 	unsigned int t_clk;
 	int extended_rx_coal_limit;
-	int tx_bw_control_moved;
+	int tx_bw_control;
 };
 
+#define TX_BW_CONTROL_ABSENT		0
+#define TX_BW_CONTROL_OLD_LAYOUT	1
+#define TX_BW_CONTROL_NEW_LAYOUT	2
+
 
 /* per-port *****************************************************************/
 struct mib_counters {
@@ -314,8 +326,6 @@
 	dma_addr_t rx_desc_dma;
 	int rx_desc_area_size;
 	struct sk_buff **rx_skb;
-
-	struct timer_list rx_oom;
 };
 
 struct tx_queue {
@@ -330,7 +340,12 @@
 	struct tx_desc *tx_desc_area;
 	dma_addr_t tx_desc_dma;
 	int tx_desc_area_size;
-	struct sk_buff **tx_skb;
+
+	struct sk_buff_head tx_skb;
+
+	unsigned long tx_packets;
+	unsigned long tx_bytes;
+	unsigned long tx_dropped;
 };
 
 struct mv643xx_eth_private {
@@ -339,14 +354,24 @@
 
 	struct net_device *dev;
 
-	struct mv643xx_eth_shared_private *shared_smi;
-	int phy_addr;
+	struct phy_device *phy;
 
-	spinlock_t lock;
-
+	struct timer_list mib_counters_timer;
+	spinlock_t mib_counters_lock;
 	struct mib_counters mib_counters;
+
 	struct work_struct tx_timeout_task;
-	struct mii_if_info mii;
+
+	struct napi_struct napi;
+	u8 work_link;
+	u8 work_tx;
+	u8 work_tx_end;
+	u8 work_rx;
+	u8 work_rx_refill;
+	u8 work_rx_oom;
+
+	int skb_size;
+	struct sk_buff_head rx_recycle;
 
 	/*
 	 * RX state.
@@ -354,9 +379,8 @@
 	int default_rx_ring_size;
 	unsigned long rx_desc_sram_addr;
 	int rx_desc_sram_size;
-	u8 rxq_mask;
-	int rxq_primary;
-	struct napi_struct napi;
+	int rxq_count;
+	struct timer_list rx_oom;
 	struct rx_queue rxq[8];
 
 	/*
@@ -365,12 +389,8 @@
 	int default_tx_ring_size;
 	unsigned long tx_desc_sram_addr;
 	int tx_desc_sram_size;
-	u8 txq_mask;
-	int txq_primary;
+	int txq_count;
 	struct tx_queue txq[8];
-#ifdef MV643XX_ETH_TX_FAST_REFILL
-	int tx_clean_threshold;
-#endif
 };
 
 
@@ -440,94 +460,21 @@
 		udelay(10);
 }
 
-static void __txq_maybe_wake(struct tx_queue *txq)
+static void txq_maybe_wake(struct tx_queue *txq)
 {
 	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
 
-	/*
-	 * netif_{stop,wake}_queue() flow control only applies to
-	 * the primary queue.
-	 */
-	BUG_ON(txq->index != mp->txq_primary);
-
-	if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB)
-		netif_wake_queue(mp->dev);
-}
-
-
-/* rx ***********************************************************************/
-static void txq_reclaim(struct tx_queue *txq, int force);
-
-static void rxq_refill(struct rx_queue *rxq)
-{
-	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
-	unsigned long flags;
-
-	spin_lock_irqsave(&mp->lock, flags);
-
-	while (rxq->rx_desc_count < rxq->rx_ring_size) {
-		int skb_size;
-		struct sk_buff *skb;
-		int unaligned;
-		int rx;
-
-		/*
-		 * Reserve 2+14 bytes for an ethernet header (the
-		 * hardware automatically prepends 2 bytes of dummy
-		 * data to each received packet), 16 bytes for up to
-		 * four VLAN tags, and 4 bytes for the trailing FCS
-		 * -- 36 bytes total.
-		 */
-		skb_size = mp->dev->mtu + 36;
-
-		/*
-		 * Make sure that the skb size is a multiple of 8
-		 * bytes, as the lower three bits of the receive
-		 * descriptor's buffer size field are ignored by
-		 * the hardware.
-		 */
-		skb_size = (skb_size + 7) & ~7;
-
-		skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
-		if (skb == NULL)
-			break;
-
-		unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
-		if (unaligned)
-			skb_reserve(skb, dma_get_cache_alignment() - unaligned);
-
-		rxq->rx_desc_count++;
-		rx = rxq->rx_used_desc;
-		rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size;
-
-		rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
-						skb_size, DMA_FROM_DEVICE);
-		rxq->rx_desc_area[rx].buf_size = skb_size;
-		rxq->rx_skb[rx] = skb;
-		wmb();
-		rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
-						RX_ENABLE_INTERRUPT;
-		wmb();
-
-		/*
-		 * The hardware automatically prepends 2 bytes of
-		 * dummy data to each received packet, so that the
-		 * IP header ends up 16-byte aligned.
-		 */
-		skb_reserve(skb, 2);
+	if (netif_tx_queue_stopped(nq)) {
+		__netif_tx_lock(nq, smp_processor_id());
+		if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
+			netif_tx_wake_queue(nq);
+		__netif_tx_unlock(nq);
 	}
-
-	if (rxq->rx_desc_count != rxq->rx_ring_size)
-		mod_timer(&rxq->rx_oom, jiffies + (HZ / 10));
-
-	spin_unlock_irqrestore(&mp->lock, flags);
 }
 
-static inline void rxq_refill_timer_wrapper(unsigned long data)
-{
-	rxq_refill((struct rx_queue *)data);
-}
 
+/* rx napi ******************************************************************/
 static int rxq_process(struct rx_queue *rxq, int budget)
 {
 	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
@@ -539,31 +486,31 @@
 		struct rx_desc *rx_desc;
 		unsigned int cmd_sts;
 		struct sk_buff *skb;
-		unsigned long flags;
-
-		spin_lock_irqsave(&mp->lock, flags);
+		u16 byte_cnt;
 
 		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
 
 		cmd_sts = rx_desc->cmd_sts;
-		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
-			spin_unlock_irqrestore(&mp->lock, flags);
+		if (cmd_sts & BUFFER_OWNED_BY_DMA)
 			break;
-		}
 		rmb();
 
 		skb = rxq->rx_skb[rxq->rx_curr_desc];
 		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
 
-		rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size;
+		rxq->rx_curr_desc++;
+		if (rxq->rx_curr_desc == rxq->rx_ring_size)
+			rxq->rx_curr_desc = 0;
 
-		spin_unlock_irqrestore(&mp->lock, flags);
-
-		dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
+		dma_unmap_single(NULL, rx_desc->buf_ptr,
 				 rx_desc->buf_size, DMA_FROM_DEVICE);
 		rxq->rx_desc_count--;
 		rx++;
 
+		mp->work_rx_refill |= 1 << rxq->index;
+
+		byte_cnt = rx_desc->byte_cnt;
+
 		/*
 		 * Update statistics.
 		 *
@@ -573,7 +520,7 @@
 		 * byte CRC at the end of the packet (which we do count).
 		 */
 		stats->rx_packets++;
-		stats->rx_bytes += rx_desc->byte_cnt - 2;
+		stats->rx_bytes += byte_cnt - 2;
 
 		/*
 		 * In case we received a packet without first / last bits
@@ -596,72 +543,84 @@
 			if (cmd_sts & ERROR_SUMMARY)
 				stats->rx_errors++;
 
-			dev_kfree_skb_irq(skb);
+			dev_kfree_skb(skb);
 		} else {
 			/*
 			 * The -4 is for the CRC in the trailer of the
 			 * received packet
 			 */
-			skb_put(skb, rx_desc->byte_cnt - 2 - 4);
+			skb_put(skb, byte_cnt - 2 - 4);
 
-			if (cmd_sts & LAYER_4_CHECKSUM_OK) {
+			if (cmd_sts & LAYER_4_CHECKSUM_OK)
 				skb->ip_summed = CHECKSUM_UNNECESSARY;
-				skb->csum = htons(
-					(cmd_sts & 0x0007fff8) >> 3);
-			}
 			skb->protocol = eth_type_trans(skb, mp->dev);
-#ifdef MV643XX_ETH_NAPI
 			netif_receive_skb(skb);
-#else
-			netif_rx(skb);
-#endif
 		}
 
 		mp->dev->last_rx = jiffies;
 	}
 
-	rxq_refill(rxq);
+	if (rx < budget)
+		mp->work_rx &= ~(1 << rxq->index);
 
 	return rx;
 }
 
-#ifdef MV643XX_ETH_NAPI
-static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
+static int rxq_refill(struct rx_queue *rxq, int budget)
 {
-	struct mv643xx_eth_private *mp;
-	int rx;
-	int i;
+	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+	int refilled;
 
-	mp = container_of(napi, struct mv643xx_eth_private, napi);
+	refilled = 0;
+	while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
+		struct sk_buff *skb;
+		int unaligned;
+		int rx;
 
-#ifdef MV643XX_ETH_TX_FAST_REFILL
-	if (++mp->tx_clean_threshold > 5) {
-		mp->tx_clean_threshold = 0;
-		for (i = 0; i < 8; i++)
-			if (mp->txq_mask & (1 << i))
-				txq_reclaim(mp->txq + i, 0);
+		skb = __skb_dequeue(&mp->rx_recycle);
+		if (skb == NULL)
+			skb = dev_alloc_skb(mp->skb_size +
+					    dma_get_cache_alignment() - 1);
 
-		if (netif_carrier_ok(mp->dev)) {
-			spin_lock_irq(&mp->lock);
-			__txq_maybe_wake(mp->txq + mp->txq_primary);
-			spin_unlock_irq(&mp->lock);
+		if (skb == NULL) {
+			mp->work_rx_oom |= 1 << rxq->index;
+			goto oom;
 		}
-	}
-#endif
 
-	rx = 0;
-	for (i = 7; rx < budget && i >= 0; i--)
-		if (mp->rxq_mask & (1 << i))
-			rx += rxq_process(mp->rxq + i, budget - rx);
+		unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
+		if (unaligned)
+			skb_reserve(skb, dma_get_cache_alignment() - unaligned);
 
-	if (rx < budget) {
-		netif_rx_complete(mp->dev, napi);
-		wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
+		refilled++;
+		rxq->rx_desc_count++;
+
+		rx = rxq->rx_used_desc++;
+		if (rxq->rx_used_desc == rxq->rx_ring_size)
+			rxq->rx_used_desc = 0;
+
+		rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
+						mp->skb_size, DMA_FROM_DEVICE);
+		rxq->rx_desc_area[rx].buf_size = mp->skb_size;
+		rxq->rx_skb[rx] = skb;
+		wmb();
+		rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
+						RX_ENABLE_INTERRUPT;
+		wmb();
+
+		/*
+		 * The hardware automatically prepends 2 bytes of
+		 * dummy data to each received packet, so that the
+		 * IP header ends up 16-byte aligned.
+		 */
+		skb_reserve(skb, 2);
 	}
 
-	return rx;
+	if (refilled < budget)
+		mp->work_rx_refill &= ~(1 << rxq->index);
+
+oom:
+	return refilled;
 }
-#endif
 
 
 /* tx ***********************************************************************/
@@ -684,8 +643,9 @@
 
 	BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
 
-	tx_desc_curr = txq->tx_curr_desc;
-	txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size;
+	tx_desc_curr = txq->tx_curr_desc++;
+	if (txq->tx_curr_desc == txq->tx_ring_size)
+		txq->tx_curr_desc = 0;
 
 	BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
 
@@ -714,10 +674,8 @@
 			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
 					ZERO_PADDING | TX_LAST_DESC |
 					TX_ENABLE_INTERRUPT;
-			txq->tx_skb[tx_index] = skb;
 		} else {
 			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
-			txq->tx_skb[tx_index] = NULL;
 		}
 
 		desc->l4i_chk = 0;
@@ -734,144 +692,228 @@
 	return (__force __be16)sum;
 }
 
-static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
+static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
 {
 	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 	int nr_frags = skb_shinfo(skb)->nr_frags;
 	int tx_index;
 	struct tx_desc *desc;
 	u32 cmd_sts;
+	u16 l4i_chk;
 	int length;
 
 	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
+	l4i_chk = 0;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		int tag_bytes;
+
+		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
+		       skb->protocol != htons(ETH_P_8021Q));
+
+		tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN;
+		if (unlikely(tag_bytes & ~12)) {
+			if (skb_checksum_help(skb) == 0)
+				goto no_csum;
+			kfree_skb(skb);
+			return 1;
+		}
+
+		if (tag_bytes & 4)
+			cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
+		if (tag_bytes & 8)
+			cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
+
+		cmd_sts |= GEN_TCP_UDP_CHECKSUM |
+			   GEN_IP_V4_CHECKSUM   |
+			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
+
+		switch (ip_hdr(skb)->protocol) {
+		case IPPROTO_UDP:
+			cmd_sts |= UDP_FRAME;
+			l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
+			break;
+		case IPPROTO_TCP:
+			l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
+			break;
+		default:
+			BUG();
+		}
+	} else {
+no_csum:
+		/* Errata BTS #50, IHL must be 5 if no HW checksum */
+		cmd_sts |= 5 << TX_IHL_SHIFT;
+	}
 
 	tx_index = txq_alloc_desc_index(txq);
 	desc = &txq->tx_desc_area[tx_index];
 
 	if (nr_frags) {
 		txq_submit_frag_skb(txq, skb);
-
 		length = skb_headlen(skb);
-		txq->tx_skb[tx_index] = NULL;
 	} else {
 		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
 		length = skb->len;
-		txq->tx_skb[tx_index] = skb;
 	}
 
+	desc->l4i_chk = l4i_chk;
 	desc->byte_cnt = length;
 	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
 
-	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		int mac_hdr_len;
-
-		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
-		       skb->protocol != htons(ETH_P_8021Q));
-
-		cmd_sts |= GEN_TCP_UDP_CHECKSUM |
-			   GEN_IP_V4_CHECKSUM   |
-			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
-
-		mac_hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
-		switch (mac_hdr_len - ETH_HLEN) {
-		case 0:
-			break;
-		case 4:
-			cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
-			break;
-		case 8:
-			cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
-			break;
-		case 12:
-			cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
-			cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
-			break;
-		default:
-			if (net_ratelimit())
-				dev_printk(KERN_ERR, &txq_to_mp(txq)->dev->dev,
-				   "mac header length is %d?!\n", mac_hdr_len);
-			break;
-		}
-
-		switch (ip_hdr(skb)->protocol) {
-		case IPPROTO_UDP:
-			cmd_sts |= UDP_FRAME;
-			desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
-			break;
-		case IPPROTO_TCP:
-			desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
-			break;
-		default:
-			BUG();
-		}
-	} else {
-		/* Errata BTS #50, IHL must be 5 if no HW checksum */
-		cmd_sts |= 5 << TX_IHL_SHIFT;
-		desc->l4i_chk = 0;
-	}
+	__skb_queue_tail(&txq->tx_skb, skb);
 
 	/* ensure all other descriptors are written before first cmd_sts */
 	wmb();
 	desc->cmd_sts = cmd_sts;
 
-	/* clear TX_END interrupt status */
-	wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index));
-	rdl(mp, INT_CAUSE(mp->port_num));
+	/* clear TX_END status */
+	mp->work_tx_end &= ~(1 << txq->index);
 
 	/* ensure all descriptors are written before poking hardware */
 	wmb();
 	txq_enable(txq);
 
 	txq->tx_desc_count += nr_frags + 1;
+
+	return 0;
 }
 
 static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct mv643xx_eth_private *mp = netdev_priv(dev);
-	struct net_device_stats *stats = &dev->stats;
+	int queue;
 	struct tx_queue *txq;
-	unsigned long flags;
+	struct netdev_queue *nq;
+
+	queue = skb_get_queue_mapping(skb);
+	txq = mp->txq + queue;
+	nq = netdev_get_tx_queue(dev, queue);
 
 	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
-		stats->tx_dropped++;
+		txq->tx_dropped++;
 		dev_printk(KERN_DEBUG, &dev->dev,
 			   "failed to linearize skb with tiny "
 			   "unaligned fragment\n");
 		return NETDEV_TX_BUSY;
 	}
 
-	spin_lock_irqsave(&mp->lock, flags);
-
-	txq = mp->txq + mp->txq_primary;
-
-	if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) {
-		spin_unlock_irqrestore(&mp->lock, flags);
-		if (txq->index == mp->txq_primary && net_ratelimit())
-			dev_printk(KERN_ERR, &dev->dev,
-				   "primary tx queue full?!\n");
+	if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
+		if (net_ratelimit())
+			dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
 		kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
 
-	txq_submit_skb(txq, skb);
-	stats->tx_bytes += skb->len;
-	stats->tx_packets++;
-	dev->trans_start = jiffies;
-
-	if (txq->index == mp->txq_primary) {
+	if (!txq_submit_skb(txq, skb)) {
 		int entries_left;
 
+		txq->tx_bytes += skb->len;
+		txq->tx_packets++;
+		dev->trans_start = jiffies;
+
 		entries_left = txq->tx_ring_size - txq->tx_desc_count;
-		if (entries_left < MAX_DESCS_PER_SKB)
-			netif_stop_queue(dev);
+		if (entries_left < MAX_SKB_FRAGS + 1)
+			netif_tx_stop_queue(nq);
 	}
 
-	spin_unlock_irqrestore(&mp->lock, flags);
-
 	return NETDEV_TX_OK;
 }
 
 
+/* tx napi ******************************************************************/
+static void txq_kick(struct tx_queue *txq)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
+	u32 hw_desc_ptr;
+	u32 expected_ptr;
+
+	__netif_tx_lock(nq, smp_processor_id());
+
+	if (rdl(mp, TXQ_COMMAND(mp->port_num)) & (1 << txq->index))
+		goto out;
+
+	hw_desc_ptr = rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index));
+	expected_ptr = (u32)txq->tx_desc_dma +
+				txq->tx_curr_desc * sizeof(struct tx_desc);
+
+	if (hw_desc_ptr != expected_ptr)
+		txq_enable(txq);
+
+out:
+	__netif_tx_unlock(nq);
+
+	mp->work_tx_end &= ~(1 << txq->index);
+}
+
+static int txq_reclaim(struct tx_queue *txq, int budget, int force)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
+	int reclaimed;
+
+	__netif_tx_lock(nq, smp_processor_id());
+
+	reclaimed = 0;
+	while (reclaimed < budget && txq->tx_desc_count > 0) {
+		int tx_index;
+		struct tx_desc *desc;
+		u32 cmd_sts;
+		struct sk_buff *skb;
+
+		tx_index = txq->tx_used_desc;
+		desc = &txq->tx_desc_area[tx_index];
+		cmd_sts = desc->cmd_sts;
+
+		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
+			if (!force)
+				break;
+			desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
+		}
+
+		txq->tx_used_desc = tx_index + 1;
+		if (txq->tx_used_desc == txq->tx_ring_size)
+			txq->tx_used_desc = 0;
+
+		reclaimed++;
+		txq->tx_desc_count--;
+
+		skb = NULL;
+		if (cmd_sts & TX_LAST_DESC)
+			skb = __skb_dequeue(&txq->tx_skb);
+
+		if (cmd_sts & ERROR_SUMMARY) {
+			dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
+			mp->dev->stats.tx_errors++;
+		}
+
+		if (cmd_sts & TX_FIRST_DESC) {
+			dma_unmap_single(NULL, desc->buf_ptr,
+					 desc->byte_cnt, DMA_TO_DEVICE);
+		} else {
+			dma_unmap_page(NULL, desc->buf_ptr,
+				       desc->byte_cnt, DMA_TO_DEVICE);
+		}
+
+		if (skb != NULL) {
+			if (skb_queue_len(&mp->rx_recycle) <
+					mp->default_rx_ring_size &&
+			    skb_recycle_check(skb, mp->skb_size))
+				__skb_queue_head(&mp->rx_recycle, skb);
+			else
+				dev_kfree_skb(skb);
+		}
+	}
+
+	__netif_tx_unlock(nq);
+
+	if (reclaimed < budget)
+		mp->work_tx &= ~(1 << txq->index);
+
+	return reclaimed;
+}
+
+
 /* tx rate control **********************************************************/
 /*
  * Set total maximum TX rate (shared by all TX queues for this port)
@@ -895,14 +937,17 @@
 	if (bucket_size > 65535)
 		bucket_size = 65535;
 
-	if (mp->shared->tx_bw_control_moved) {
-		wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate);
-		wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu);
-		wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size);
-	} else {
+	switch (mp->shared->tx_bw_control) {
+	case TX_BW_CONTROL_OLD_LAYOUT:
 		wrl(mp, TX_BW_RATE(mp->port_num), token_rate);
 		wrl(mp, TX_BW_MTU(mp->port_num), mtu);
 		wrl(mp, TX_BW_BURST(mp->port_num), bucket_size);
+		break;
+	case TX_BW_CONTROL_NEW_LAYOUT:
+		wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate);
+		wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu);
+		wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size);
+		break;
 	}
 }
 
@@ -934,14 +979,21 @@
 	/*
 	 * Turn on fixed priority mode.
 	 */
-	if (mp->shared->tx_bw_control_moved)
-		off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
-	else
+	off = 0;
+	switch (mp->shared->tx_bw_control) {
+	case TX_BW_CONTROL_OLD_LAYOUT:
 		off = TXQ_FIX_PRIO_CONF(mp->port_num);
+		break;
+	case TX_BW_CONTROL_NEW_LAYOUT:
+		off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
+		break;
+	}
 
-	val = rdl(mp, off);
-	val |= 1 << txq->index;
-	wrl(mp, off, val);
+	if (off) {
+		val = rdl(mp, off);
+		val |= 1 << txq->index;
+		wrl(mp, off, val);
+	}
 }
 
 static void txq_set_wrr(struct tx_queue *txq, int weight)
@@ -953,95 +1005,147 @@
 	/*
 	 * Turn off fixed priority mode.
 	 */
-	if (mp->shared->tx_bw_control_moved)
-		off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
-	else
+	off = 0;
+	switch (mp->shared->tx_bw_control) {
+	case TX_BW_CONTROL_OLD_LAYOUT:
 		off = TXQ_FIX_PRIO_CONF(mp->port_num);
+		break;
+	case TX_BW_CONTROL_NEW_LAYOUT:
+		off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
+		break;
+	}
 
-	val = rdl(mp, off);
-	val &= ~(1 << txq->index);
-	wrl(mp, off, val);
+	if (off) {
+		val = rdl(mp, off);
+		val &= ~(1 << txq->index);
+		wrl(mp, off, val);
 
-	/*
-	 * Configure WRR weight for this queue.
-	 */
-	off = TXQ_BW_WRR_CONF(mp->port_num, txq->index);
+		/*
+		 * Configure WRR weight for this queue.
+		 */
+		off = TXQ_BW_WRR_CONF(mp->port_num, txq->index);
 
-	val = rdl(mp, off);
-	val = (val & ~0xff) | (weight & 0xff);
-	wrl(mp, off, val);
+		val = rdl(mp, off);
+		val = (val & ~0xff) | (weight & 0xff);
+		wrl(mp, off, val);
+	}
 }
 
 
 /* mii management interface *************************************************/
-#define SMI_BUSY		0x10000000
-#define SMI_READ_VALID		0x08000000
-#define SMI_OPCODE_READ		0x04000000
-#define SMI_OPCODE_WRITE	0x00000000
-
-static void smi_reg_read(struct mv643xx_eth_private *mp, unsigned int addr,
-			 unsigned int reg, unsigned int *value)
+static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
 {
-	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
-	unsigned long flags;
-	int i;
+	struct mv643xx_eth_shared_private *msp = dev_id;
 
-	/* the SMI register is a shared resource */
-	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
+	if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
+		writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
+		wake_up(&msp->smi_busy_wait);
+		return IRQ_HANDLED;
+	}
 
-	/* wait for the SMI register to become available */
-	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
-		if (i == 1000) {
-			printk("%s: PHY busy timeout\n", mp->dev->name);
-			goto out;
+	return IRQ_NONE;
+}
+
+static int smi_is_done(struct mv643xx_eth_shared_private *msp)
+{
+	return !(readl(msp->base + SMI_REG) & SMI_BUSY);
+}
+
+static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
+{
+	if (msp->err_interrupt == NO_IRQ) {
+		int i;
+
+		for (i = 0; !smi_is_done(msp); i++) {
+			if (i == 10)
+				return -ETIMEDOUT;
+			msleep(10);
 		}
-		udelay(10);
+
+		return 0;
+	}
+
+	if (!wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
+				msecs_to_jiffies(100)))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
+{
+	struct mv643xx_eth_shared_private *msp = bus->priv;
+	void __iomem *smi_reg = msp->base + SMI_REG;
+	int ret;
+
+	if (smi_wait_ready(msp)) {
+		printk("mv643xx_eth: SMI bus busy timeout\n");
+		return -ETIMEDOUT;
 	}
 
 	writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
 
-	/* now wait for the data to be valid */
-	for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) {
-		if (i == 1000) {
-			printk("%s: PHY read timeout\n", mp->dev->name);
-			goto out;
-		}
-		udelay(10);
+	if (smi_wait_ready(msp)) {
+		printk("mv643xx_eth: SMI bus busy timeout\n");
+		return -ETIMEDOUT;
 	}
 
-	*value = readl(smi_reg) & 0xffff;
-out:
-	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
+	ret = readl(smi_reg);
+	if (!(ret & SMI_READ_VALID)) {
+		printk("mv643xx_eth: SMI bus read not valid\n");
+		return -ENODEV;
+	}
+
+	return ret & 0xffff;
 }
 
-static void smi_reg_write(struct mv643xx_eth_private *mp,
-			  unsigned int addr,
-			  unsigned int reg, unsigned int value)
+static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
 {
-	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
-	unsigned long flags;
-	int i;
+	struct mv643xx_eth_shared_private *msp = bus->priv;
+	void __iomem *smi_reg = msp->base + SMI_REG;
 
-	/* the SMI register is a shared resource */
-	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
-
-	/* wait for the SMI register to become available */
-	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
-		if (i == 1000) {
-			printk("%s: PHY busy timeout\n", mp->dev->name);
-			goto out;
-		}
-		udelay(10);
+	if (smi_wait_ready(msp)) {
+		printk("mv643xx_eth: SMI bus busy timeout\n");
+		return -ETIMEDOUT;
 	}
 
 	writel(SMI_OPCODE_WRITE | (reg << 21) |
-		(addr << 16) | (value & 0xffff), smi_reg);
-out:
-	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
+		(addr << 16) | (val & 0xffff), smi_reg);
+
+	if (smi_wait_ready(msp)) {
+		printk("mv643xx_eth: SMI bus busy timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
 }
 
 
-/* mib counters *************************************************************/
+/* statistics ***************************************************************/
+static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	unsigned long tx_packets = 0;
+	unsigned long tx_bytes = 0;
+	unsigned long tx_dropped = 0;
+	int i;
+
+	for (i = 0; i < mp->txq_count; i++) {
+		struct tx_queue *txq = mp->txq + i;
+
+		tx_packets += txq->tx_packets;
+		tx_bytes += txq->tx_bytes;
+		tx_dropped += txq->tx_dropped;
+	}
+
+	stats->tx_packets = tx_packets;
+	stats->tx_bytes = tx_bytes;
+	stats->tx_dropped = tx_dropped;
+
+	return stats;
+}
+
 static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
 {
 	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
@@ -1059,6 +1163,7 @@
 {
 	struct mib_counters *p = &mp->mib_counters;
 
+	spin_lock(&mp->mib_counters_lock);
 	p->good_octets_received += mib_read(mp, 0x00);
 	p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
 	p->bad_octets_received += mib_read(mp, 0x08);
@@ -1091,6 +1196,16 @@
 	p->bad_crc_event += mib_read(mp, 0x74);
 	p->collision += mib_read(mp, 0x78);
 	p->late_collision += mib_read(mp, 0x7c);
+	spin_unlock(&mp->mib_counters_lock);
+
+	mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
+}
+
+static void mib_counters_timer_wrapper(unsigned long _mp)
+{
+	struct mv643xx_eth_private *mp = (void *)_mp;
+
+	mib_counters_update(mp);
 }
 
 
@@ -1156,9 +1271,9 @@
 	struct mv643xx_eth_private *mp = netdev_priv(dev);
 	int err;
 
-	spin_lock_irq(&mp->lock);
-	err = mii_ethtool_gset(&mp->mii, cmd);
-	spin_unlock_irq(&mp->lock);
+	err = phy_read_status(mp->phy);
+	if (err == 0)
+		err = phy_ethtool_gset(mp->phy, cmd);
 
 	/*
 	 * The MAC does not support 1000baseT_Half.
@@ -1206,18 +1321,13 @@
 static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct mv643xx_eth_private *mp = netdev_priv(dev);
-	int err;
 
 	/*
 	 * The MAC does not support 1000baseT_Half.
 	 */
 	cmd->advertising &= ~ADVERTISED_1000baseT_Half;
 
-	spin_lock_irq(&mp->lock);
-	err = mii_ethtool_sset(&mp->mii, cmd);
-	spin_unlock_irq(&mp->lock);
-
-	return err;
+	return phy_ethtool_sset(mp->phy, cmd);
 }
 
 static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -1239,7 +1349,7 @@
 {
 	struct mv643xx_eth_private *mp = netdev_priv(dev);
 
-	return mii_nway_restart(&mp->mii);
+	return genphy_restart_aneg(mp->phy);
 }
 
 static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
@@ -1249,14 +1359,7 @@
 
 static u32 mv643xx_eth_get_link(struct net_device *dev)
 {
-	struct mv643xx_eth_private *mp = netdev_priv(dev);
-
-	return mii_link_ok(&mp->mii);
-}
-
-static u32 mv643xx_eth_get_link_phyless(struct net_device *dev)
-{
-	return 1;
+	return !!netif_carrier_ok(dev);
 }
 
 static void mv643xx_eth_get_strings(struct net_device *dev,
@@ -1277,9 +1380,10 @@
 					  struct ethtool_stats *stats,
 					  uint64_t *data)
 {
-	struct mv643xx_eth_private *mp = dev->priv;
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
 	int i;
 
+	mv643xx_eth_get_stats(dev);
 	mib_counters_update(mp);
 
 	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
@@ -1323,7 +1427,7 @@
 	.set_settings		= mv643xx_eth_set_settings_phyless,
 	.get_drvinfo		= mv643xx_eth_get_drvinfo,
 	.nway_reset		= mv643xx_eth_nway_reset_phyless,
-	.get_link		= mv643xx_eth_get_link_phyless,
+	.get_link		= mv643xx_eth_get_link,
 	.set_sg			= ethtool_op_set_sg,
 	.get_strings		= mv643xx_eth_get_strings,
 	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
@@ -1487,7 +1591,7 @@
 
 	size = rxq->rx_ring_size * sizeof(struct rx_desc);
 
-	if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) {
+	if (index == 0 && size <= mp->rx_desc_sram_size) {
 		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
 						mp->rx_desc_sram_size);
 		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
@@ -1515,20 +1619,21 @@
 
 	rx_desc = (struct rx_desc *)rxq->rx_desc_area;
 	for (i = 0; i < rxq->rx_ring_size; i++) {
-		int nexti = (i + 1) % rxq->rx_ring_size;
+		int nexti;
+
+		nexti = i + 1;
+		if (nexti == rxq->rx_ring_size)
+			nexti = 0;
+
 		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
 					nexti * sizeof(struct rx_desc);
 	}
 
-	init_timer(&rxq->rx_oom);
-	rxq->rx_oom.data = (unsigned long)rxq;
-	rxq->rx_oom.function = rxq_refill_timer_wrapper;
-
 	return 0;
 
 
 out_free:
-	if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size)
+	if (index == 0 && size <= mp->rx_desc_sram_size)
 		iounmap(rxq->rx_desc_area);
 	else
 		dma_free_coherent(NULL, size,
@@ -1546,8 +1651,6 @@
 
 	rxq_disable(rxq);
 
-	del_timer_sync(&rxq->rx_oom);
-
 	for (i = 0; i < rxq->rx_ring_size; i++) {
 		if (rxq->rx_skb[i]) {
 			dev_kfree_skb(rxq->rx_skb[i]);
@@ -1561,7 +1664,7 @@
 			   rxq->rx_desc_count);
 	}
 
-	if (rxq->index == mp->rxq_primary &&
+	if (rxq->index == 0 &&
 	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
 		iounmap(rxq->rx_desc_area);
 	else
@@ -1588,7 +1691,7 @@
 
 	size = txq->tx_ring_size * sizeof(struct tx_desc);
 
-	if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) {
+	if (index == 0 && size <= mp->tx_desc_sram_size) {
 		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
 						mp->tx_desc_sram_size);
 		txq->tx_desc_dma = mp->tx_desc_sram_addr;
@@ -1601,97 +1704,29 @@
 	if (txq->tx_desc_area == NULL) {
 		dev_printk(KERN_ERR, &mp->dev->dev,
 			   "can't allocate tx ring (%d bytes)\n", size);
-		goto out;
+		return -ENOMEM;
 	}
 	memset(txq->tx_desc_area, 0, size);
 
 	txq->tx_desc_area_size = size;
-	txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
-								GFP_KERNEL);
-	if (txq->tx_skb == NULL) {
-		dev_printk(KERN_ERR, &mp->dev->dev,
-			   "can't allocate tx skb ring\n");
-		goto out_free;
-	}
 
 	tx_desc = (struct tx_desc *)txq->tx_desc_area;
 	for (i = 0; i < txq->tx_ring_size; i++) {
 		struct tx_desc *txd = tx_desc + i;
-		int nexti = (i + 1) % txq->tx_ring_size;
+		int nexti;
+
+		nexti = i + 1;
+		if (nexti == txq->tx_ring_size)
+			nexti = 0;
 
 		txd->cmd_sts = 0;
 		txd->next_desc_ptr = txq->tx_desc_dma +
 					nexti * sizeof(struct tx_desc);
 	}
 
+	skb_queue_head_init(&txq->tx_skb);
+
 	return 0;
-
-
-out_free:
-	if (index == mp->txq_primary && size <= mp->tx_desc_sram_size)
-		iounmap(txq->tx_desc_area);
-	else
-		dma_free_coherent(NULL, size,
-				  txq->tx_desc_area,
-				  txq->tx_desc_dma);
-
-out:
-	return -ENOMEM;
-}
-
-static void txq_reclaim(struct tx_queue *txq, int force)
-{
-	struct mv643xx_eth_private *mp = txq_to_mp(txq);
-	unsigned long flags;
-
-	spin_lock_irqsave(&mp->lock, flags);
-	while (txq->tx_desc_count > 0) {
-		int tx_index;
-		struct tx_desc *desc;
-		u32 cmd_sts;
-		struct sk_buff *skb;
-		dma_addr_t addr;
-		int count;
-
-		tx_index = txq->tx_used_desc;
-		desc = &txq->tx_desc_area[tx_index];
-		cmd_sts = desc->cmd_sts;
-
-		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
-			if (!force)
-				break;
-			desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
-		}
-
-		txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
-		txq->tx_desc_count--;
-
-		addr = desc->buf_ptr;
-		count = desc->byte_cnt;
-		skb = txq->tx_skb[tx_index];
-		txq->tx_skb[tx_index] = NULL;
-
-		if (cmd_sts & ERROR_SUMMARY) {
-			dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
-			mp->dev->stats.tx_errors++;
-		}
-
-		/*
-		 * Drop mp->lock while we free the skb.
-		 */
-		spin_unlock_irqrestore(&mp->lock, flags);
-
-		if (cmd_sts & TX_FIRST_DESC)
-			dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
-		else
-			dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
-
-		if (skb)
-			dev_kfree_skb_irq(skb);
-
-		spin_lock_irqsave(&mp->lock, flags);
-	}
-	spin_unlock_irqrestore(&mp->lock, flags);
 }
 
 static void txq_deinit(struct tx_queue *txq)
@@ -1699,22 +1734,67 @@
 	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 
 	txq_disable(txq);
-	txq_reclaim(txq, 1);
+	txq_reclaim(txq, txq->tx_ring_size, 1);
 
 	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
 
-	if (txq->index == mp->txq_primary &&
+	if (txq->index == 0 &&
 	    txq->tx_desc_area_size <= mp->tx_desc_sram_size)
 		iounmap(txq->tx_desc_area);
 	else
 		dma_free_coherent(NULL, txq->tx_desc_area_size,
 				  txq->tx_desc_area, txq->tx_desc_dma);
-
-	kfree(txq->tx_skb);
 }
 
 
 /* netdev ops and related ***************************************************/
+static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
+{
+	u32 int_cause;
+	u32 int_cause_ext;
+
+	int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
+			(INT_TX_END | INT_RX | INT_EXT);
+	if (int_cause == 0)
+		return 0;
+
+	int_cause_ext = 0;
+	if (int_cause & INT_EXT)
+		int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num));
+
+	int_cause &= INT_TX_END | INT_RX;
+	if (int_cause) {
+		wrl(mp, INT_CAUSE(mp->port_num), ~int_cause);
+		mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
+				~(rdl(mp, TXQ_COMMAND(mp->port_num)) & 0xff);
+		mp->work_rx |= (int_cause & INT_RX) >> 2;
+	}
+
+	int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
+	if (int_cause_ext) {
+		wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
+		if (int_cause_ext & INT_EXT_LINK_PHY)
+			mp->work_link = 1;
+		mp->work_tx |= int_cause_ext & INT_EXT_TX;
+	}
+
+	return 1;
+}
+
+static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+	if (unlikely(!mv643xx_eth_collect_events(mp)))
+		return IRQ_NONE;
+
+	wrl(mp, INT_MASK(mp->port_num), 0);
+	napi_schedule(&mp->napi);
+
+	return IRQ_HANDLED;
+}
+
 static void handle_link_event(struct mv643xx_eth_private *mp)
 {
 	struct net_device *dev = mp->dev;
@@ -1731,15 +1811,12 @@
 			printk(KERN_INFO "%s: link down\n", dev->name);
 
 			netif_carrier_off(dev);
-			netif_stop_queue(dev);
 
-			for (i = 0; i < 8; i++) {
+			for (i = 0; i < mp->txq_count; i++) {
 				struct tx_queue *txq = mp->txq + i;
 
-				if (mp->txq_mask & (1 << i)) {
-					txq_reclaim(txq, 1);
-					txq_reset_hw_ptr(txq);
-				}
+				txq_reclaim(txq, txq->tx_ring_size, 1);
+				txq_reset_hw_ptr(txq);
 			}
 		}
 		return;
@@ -1767,119 +1844,93 @@
 			 speed, duplex ? "full" : "half",
 			 fc ? "en" : "dis");
 
-	if (!netif_carrier_ok(dev)) {
+	if (!netif_carrier_ok(dev))
 		netif_carrier_on(dev);
-		netif_wake_queue(dev);
-	}
 }
 
-static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
+static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
 {
-	struct net_device *dev = (struct net_device *)dev_id;
-	struct mv643xx_eth_private *mp = netdev_priv(dev);
-	u32 int_cause;
-	u32 int_cause_ext;
+	struct mv643xx_eth_private *mp;
+	int work_done;
 
-	int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
-			(INT_TX_END | INT_RX | INT_EXT);
-	if (int_cause == 0)
-		return IRQ_NONE;
+	mp = container_of(napi, struct mv643xx_eth_private, napi);
 
-	int_cause_ext = 0;
-	if (int_cause & INT_EXT) {
-		int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
-				& (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
-		wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
-	}
+	mp->work_rx_refill |= mp->work_rx_oom;
+	mp->work_rx_oom = 0;
 
-	if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK))
-		handle_link_event(mp);
+	work_done = 0;
+	while (work_done < budget) {
+		u8 queue_mask;
+		int queue;
+		int work_tbd;
 
-	/*
-	 * RxBuffer or RxError set for any of the 8 queues?
-	 */
-#ifdef MV643XX_ETH_NAPI
-	if (int_cause & INT_RX) {
-		wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX));
-		wrl(mp, INT_MASK(mp->port_num), 0x00000000);
-		rdl(mp, INT_MASK(mp->port_num));
-
-		netif_rx_schedule(dev, &mp->napi);
-	}
-#else
-	if (int_cause & INT_RX) {
-		int i;
-
-		for (i = 7; i >= 0; i--)
-			if (mp->rxq_mask & (1 << i))
-				rxq_process(mp->rxq + i, INT_MAX);
-	}
-#endif
-
-	/*
-	 * TxBuffer or TxError set for any of the 8 queues?
-	 */
-	if (int_cause_ext & INT_EXT_TX) {
-		int i;
-
-		for (i = 0; i < 8; i++)
-			if (mp->txq_mask & (1 << i))
-				txq_reclaim(mp->txq + i, 0);
-
-		/*
-		 * Enough space again in the primary TX queue for a
-		 * full packet?
-		 */
-		if (netif_carrier_ok(dev)) {
-			spin_lock(&mp->lock);
-			__txq_maybe_wake(mp->txq + mp->txq_primary);
-			spin_unlock(&mp->lock);
+		if (mp->work_link) {
+			mp->work_link = 0;
+			handle_link_event(mp);
+			continue;
 		}
-	}
 
-	/*
-	 * Any TxEnd interrupts?
-	 */
-	if (int_cause & INT_TX_END) {
-		int i;
-
-		wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END));
-
-		spin_lock(&mp->lock);
-		for (i = 0; i < 8; i++) {
-			struct tx_queue *txq = mp->txq + i;
-			u32 hw_desc_ptr;
-			u32 expected_ptr;
-
-			if ((int_cause & (INT_TX_END_0 << i)) == 0)
+		queue_mask = mp->work_tx | mp->work_tx_end |
+				mp->work_rx | mp->work_rx_refill;
+		if (!queue_mask) {
+			if (mv643xx_eth_collect_events(mp))
 				continue;
-
-			hw_desc_ptr =
-				rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i));
-			expected_ptr = (u32)txq->tx_desc_dma +
-				txq->tx_curr_desc * sizeof(struct tx_desc);
-
-			if (hw_desc_ptr != expected_ptr)
-				txq_enable(txq);
+			break;
 		}
-		spin_unlock(&mp->lock);
+
+		queue = fls(queue_mask) - 1;
+		queue_mask = 1 << queue;
+
+		work_tbd = budget - work_done;
+		if (work_tbd > 16)
+			work_tbd = 16;
+
+		if (mp->work_tx_end & queue_mask) {
+			txq_kick(mp->txq + queue);
+		} else if (mp->work_tx & queue_mask) {
+			work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
+			txq_maybe_wake(mp->txq + queue);
+		} else if (mp->work_rx & queue_mask) {
+			work_done += rxq_process(mp->rxq + queue, work_tbd);
+		} else if (mp->work_rx_refill & queue_mask) {
+			work_done += rxq_refill(mp->rxq + queue, work_tbd);
+		} else {
+			BUG();
+		}
 	}
 
-	return IRQ_HANDLED;
+	if (work_done < budget) {
+		if (mp->work_rx_oom)
+			mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
+		napi_complete(napi);
+		wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
+	}
+
+	return work_done;
+}
+
+static inline void oom_timer_wrapper(unsigned long data)
+{
+	struct mv643xx_eth_private *mp = (void *)data;
+
+	napi_schedule(&mp->napi);
 }
 
 static void phy_reset(struct mv643xx_eth_private *mp)
 {
-	unsigned int data;
+	int data;
 
-	smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
+	data = phy_read(mp->phy, MII_BMCR);
+	if (data < 0)
+		return;
+
 	data |= BMCR_RESET;
-	smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
+	if (phy_write(mp->phy, MII_BMCR, data) < 0)
+		return;
 
 	do {
-		udelay(1);
-		smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
-	} while (data & BMCR_RESET);
+		data = phy_read(mp->phy, MII_BMCR);
+	} while (data >= 0 && data & BMCR_RESET);
 }
 
 static void port_start(struct mv643xx_eth_private *mp)
@@ -1890,7 +1941,7 @@
 	/*
 	 * Perform PHY reset, if there is a PHY.
 	 */
-	if (mp->phy_addr != -1) {
+	if (mp->phy != NULL) {
 		struct ethtool_cmd cmd;
 
 		mv643xx_eth_get_settings(mp->dev, &cmd);
@@ -1907,7 +1958,7 @@
 	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
 
 	pscr |= DO_NOT_FORCE_LINK_FAIL;
-	if (mp->phy_addr == -1)
+	if (mp->phy == NULL)
 		pscr |= FORCE_LINK_PASS;
 	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
 
@@ -1917,12 +1968,9 @@
 	 * Configure TX path and queues.
 	 */
 	tx_set_rate(mp, 1000000000, 16777216);
-	for (i = 0; i < 8; i++) {
+	for (i = 0; i < mp->txq_count; i++) {
 		struct tx_queue *txq = mp->txq + i;
 
-		if ((mp->txq_mask & (1 << i)) == 0)
-			continue;
-
 		txq_reset_hw_ptr(txq);
 		txq_set_rate(txq, 1000000000, 16777216);
 		txq_set_fixed_prio_mode(txq);
@@ -1935,9 +1983,10 @@
 
 	/*
 	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
-	 * frames to RX queue #0.
+	 * frames to RX queue #0, and include the pseudo-header when
+	 * calculating receive checksums.
 	 */
-	wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000);
+	wrl(mp, PORT_CONFIG(mp->port_num), 0x02000000);
 
 	/*
 	 * Treat BPDUs as normal multicasts, and disable partition mode.
@@ -1947,14 +1996,11 @@
 	/*
 	 * Enable the receive queues.
 	 */
-	for (i = 0; i < 8; i++) {
+	for (i = 0; i < mp->rxq_count; i++) {
 		struct rx_queue *rxq = mp->rxq + i;
 		int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
 		u32 addr;
 
-		if ((mp->rxq_mask & (1 << i)) == 0)
-			continue;
-
 		addr = (u32)rxq->rx_desc_dma;
 		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
 		wrl(mp, off, addr);
@@ -1993,6 +2039,26 @@
 	wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
 }
 
+static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
+{
+	int skb_size;
+
+	/*
+	 * Reserve 2+14 bytes for an ethernet header (the hardware
+	 * automatically prepends 2 bytes of dummy data to each
+	 * received packet), 16 bytes for up to four VLAN tags, and
+	 * 4 bytes for the trailing FCS -- 36 bytes total.
+	 */
+	skb_size = mp->dev->mtu + 36;
+
+	/*
+	 * Make sure that the skb size is a multiple of 8 bytes, as
+	 * the lower three bits of the receive descriptor's buffer
+	 * size field are ignored by the hardware.
+	 */
+	mp->skb_size = (skb_size + 7) & ~7;
+}
+
 static int mv643xx_eth_open(struct net_device *dev)
 {
 	struct mv643xx_eth_private *mp = netdev_priv(dev);
@@ -2004,8 +2070,7 @@
 	rdl(mp, INT_CAUSE_EXT(mp->port_num));
 
 	err = request_irq(dev->irq, mv643xx_eth_irq,
-			  IRQF_SHARED | IRQF_SAMPLE_RANDOM,
-			  dev->name, dev);
+			  IRQF_SHARED, dev->name, dev);
 	if (err) {
 		dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
 		return -EAGAIN;
@@ -2013,58 +2078,53 @@
 
 	init_mac_tables(mp);
 
-	for (i = 0; i < 8; i++) {
-		if ((mp->rxq_mask & (1 << i)) == 0)
-			continue;
+	mv643xx_eth_recalc_skb_size(mp);
 
+	napi_enable(&mp->napi);
+
+	skb_queue_head_init(&mp->rx_recycle);
+
+	for (i = 0; i < mp->rxq_count; i++) {
 		err = rxq_init(mp, i);
 		if (err) {
 			while (--i >= 0)
-				if (mp->rxq_mask & (1 << i))
-					rxq_deinit(mp->rxq + i);
+				rxq_deinit(mp->rxq + i);
 			goto out;
 		}
 
-		rxq_refill(mp->rxq + i);
+		rxq_refill(mp->rxq + i, INT_MAX);
 	}
 
-	for (i = 0; i < 8; i++) {
-		if ((mp->txq_mask & (1 << i)) == 0)
-			continue;
+	if (mp->work_rx_oom) {
+		mp->rx_oom.expires = jiffies + (HZ / 10);
+		add_timer(&mp->rx_oom);
+	}
 
+	for (i = 0; i < mp->txq_count; i++) {
 		err = txq_init(mp, i);
 		if (err) {
 			while (--i >= 0)
-				if (mp->txq_mask & (1 << i))
-					txq_deinit(mp->txq + i);
+				txq_deinit(mp->txq + i);
 			goto out_free;
 		}
 	}
 
-#ifdef MV643XX_ETH_NAPI
-	napi_enable(&mp->napi);
-#endif
-
 	netif_carrier_off(dev);
-	netif_stop_queue(dev);
 
 	port_start(mp);
 
 	set_rx_coal(mp, 0);
 	set_tx_coal(mp, 0);
 
-	wrl(mp, INT_MASK_EXT(mp->port_num),
-	    INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
-
+	wrl(mp, INT_MASK_EXT(mp->port_num), INT_EXT_LINK_PHY | INT_EXT_TX);
 	wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
 
 	return 0;
 
 
 out_free:
-	for (i = 0; i < 8; i++)
-		if (mp->rxq_mask & (1 << i))
-			rxq_deinit(mp->rxq + i);
+	for (i = 0; i < mp->rxq_count; i++)
+		rxq_deinit(mp->rxq + i);
 out:
 	free_irq(dev->irq, dev);
 
@@ -2076,12 +2136,10 @@
 	unsigned int data;
 	int i;
 
-	for (i = 0; i < 8; i++) {
-		if (mp->rxq_mask & (1 << i))
-			rxq_disable(mp->rxq + i);
-		if (mp->txq_mask & (1 << i))
-			txq_disable(mp->txq + i);
-	}
+	for (i = 0; i < mp->rxq_count; i++)
+		rxq_disable(mp->rxq + i);
+	for (i = 0; i < mp->txq_count; i++)
+		txq_disable(mp->txq + i);
 
 	while (1) {
 		u32 ps = rdl(mp, PORT_STATUS(mp->port_num));
@@ -2107,23 +2165,26 @@
 	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
 	rdl(mp, INT_MASK(mp->port_num));
 
-#ifdef MV643XX_ETH_NAPI
+	del_timer_sync(&mp->mib_counters_timer);
+
 	napi_disable(&mp->napi);
-#endif
+
+	del_timer_sync(&mp->rx_oom);
+
 	netif_carrier_off(dev);
-	netif_stop_queue(dev);
 
 	free_irq(dev->irq, dev);
 
 	port_reset(mp);
+	mv643xx_eth_get_stats(dev);
 	mib_counters_update(mp);
 
-	for (i = 0; i < 8; i++) {
-		if (mp->rxq_mask & (1 << i))
-			rxq_deinit(mp->rxq + i);
-		if (mp->txq_mask & (1 << i))
-			txq_deinit(mp->txq + i);
-	}
+	skb_queue_purge(&mp->rx_recycle);
+
+	for (i = 0; i < mp->rxq_count; i++)
+		rxq_deinit(mp->rxq + i);
+	for (i = 0; i < mp->txq_count; i++)
+		txq_deinit(mp->txq + i);
 
 	return 0;
 }
@@ -2132,8 +2193,8 @@
 {
 	struct mv643xx_eth_private *mp = netdev_priv(dev);
 
-	if (mp->phy_addr != -1)
-		return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
+	if (mp->phy != NULL)
+		return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd);
 
 	return -EOPNOTSUPP;
 }
@@ -2146,6 +2207,7 @@
 		return -EINVAL;
 
 	dev->mtu = new_mtu;
+	mv643xx_eth_recalc_skb_size(mp);
 	tx_set_rate(mp, 1000000000, 16777216);
 
 	if (!netif_running(dev))
@@ -2173,12 +2235,10 @@
 
 	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
 	if (netif_running(mp->dev)) {
-		netif_stop_queue(mp->dev);
-
+		netif_tx_stop_all_queues(mp->dev);
 		port_reset(mp);
 		port_start(mp);
-
-		__txq_maybe_wake(mp->txq + mp->txq_primary);
+		netif_tx_wake_all_queues(mp->dev);
 	}
 }
 
@@ -2205,22 +2265,6 @@
 }
 #endif
 
-static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg)
-{
-	struct mv643xx_eth_private *mp = netdev_priv(dev);
-	int val;
-
-	smi_reg_read(mp, addr, reg, &val);
-
-	return val;
-}
-
-static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val)
-{
-	struct mv643xx_eth_private *mp = netdev_priv(dev);
-	smi_reg_write(mp, addr, reg, val);
-}
-
 
 /* platform glue ************************************************************/
 static void
@@ -2272,14 +2316,20 @@
 		msp->extended_rx_coal_limit = 0;
 
 	/*
-	 * Check whether the TX rate control registers are in the
-	 * old or the new place.
+	 * Check whether the MAC supports TX rate control, and if
+	 * yes, whether its associated registers are in the old or
+	 * the new place.
 	 */
 	writel(1, msp->base + TX_BW_MTU_MOVED(0));
-	if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1)
-		msp->tx_bw_control_moved = 1;
-	else
-		msp->tx_bw_control_moved = 0;
+	if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1) {
+		msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
+	} else {
+		writel(7, msp->base + TX_BW_RATE(0));
+		if (readl(msp->base + TX_BW_RATE(0)) & 7)
+			msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
+		else
+			msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
+	}
 }
 
 static int mv643xx_eth_shared_probe(struct platform_device *pdev)
@@ -2309,7 +2359,41 @@
 	if (msp->base == NULL)
 		goto out_free;
 
-	spin_lock_init(&msp->phy_lock);
+	/*
+	 * Set up and register SMI bus.
+	 */
+	if (pd == NULL || pd->shared_smi == NULL) {
+		msp->smi_bus.priv = msp;
+		msp->smi_bus.name = "mv643xx_eth smi";
+		msp->smi_bus.read = smi_bus_read;
+		msp->smi_bus.write = smi_bus_write,
+		snprintf(msp->smi_bus.id, MII_BUS_ID_SIZE, "%d", pdev->id);
+		msp->smi_bus.dev = &pdev->dev;
+		msp->smi_bus.phy_mask = 0xffffffff;
+		if (mdiobus_register(&msp->smi_bus) < 0)
+			goto out_unmap;
+		msp->smi = msp;
+	} else {
+		msp->smi = platform_get_drvdata(pd->shared_smi);
+	}
+
+	msp->err_interrupt = NO_IRQ;
+	init_waitqueue_head(&msp->smi_busy_wait);
+
+	/*
+	 * Check whether the error interrupt is hooked up.
+	 */
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res != NULL) {
+		int err;
+
+		err = request_irq(res->start, mv643xx_eth_err_irq,
+				  IRQF_SHARED, "mv643xx_eth", msp);
+		if (!err) {
+			writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
+			msp->err_interrupt = res->start;
+		}
+	}
 
 	/*
 	 * (Re-)program MBUS remapping windows if we are asked to.
@@ -2327,6 +2411,8 @@
 
 	return 0;
 
+out_unmap:
+	iounmap(msp->base);
 out_free:
 	kfree(msp);
 out:
@@ -2336,7 +2422,12 @@
 static int mv643xx_eth_shared_remove(struct platform_device *pdev)
 {
 	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
+	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
 
+	if (pd == NULL || pd->shared_smi == NULL)
+		mdiobus_unregister(&msp->smi_bus);
+	if (msp->err_interrupt != NO_IRQ)
+		free_irq(msp->err_interrupt, msp);
 	iounmap(msp->base);
 	kfree(msp);
 
@@ -2382,33 +2473,13 @@
 	else
 		uc_addr_get(mp, dev->dev_addr);
 
-	if (pd->phy_addr == -1) {
-		mp->shared_smi = NULL;
-		mp->phy_addr = -1;
-	} else {
-		mp->shared_smi = mp->shared;
-		if (pd->shared_smi != NULL)
-			mp->shared_smi = platform_get_drvdata(pd->shared_smi);
-
-		if (pd->force_phy_addr || pd->phy_addr) {
-			mp->phy_addr = pd->phy_addr & 0x3f;
-			phy_addr_set(mp, mp->phy_addr);
-		} else {
-			mp->phy_addr = phy_addr_get(mp);
-		}
-	}
-
 	mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
 	if (pd->rx_queue_size)
 		mp->default_rx_ring_size = pd->rx_queue_size;
 	mp->rx_desc_sram_addr = pd->rx_sram_addr;
 	mp->rx_desc_sram_size = pd->rx_sram_size;
 
-	if (pd->rx_queue_mask)
-		mp->rxq_mask = pd->rx_queue_mask;
-	else
-		mp->rxq_mask = 0x01;
-	mp->rxq_primary = fls(mp->rxq_mask) - 1;
+	mp->rxq_count = pd->rx_queue_count ? : 1;
 
 	mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
 	if (pd->tx_queue_size)
@@ -2416,76 +2487,63 @@
 	mp->tx_desc_sram_addr = pd->tx_sram_addr;
 	mp->tx_desc_sram_size = pd->tx_sram_size;
 
-	if (pd->tx_queue_mask)
-		mp->txq_mask = pd->tx_queue_mask;
-	else
-		mp->txq_mask = 0x01;
-	mp->txq_primary = fls(mp->txq_mask) - 1;
+	mp->txq_count = pd->tx_queue_count ? : 1;
 }
 
-static int phy_detect(struct mv643xx_eth_private *mp)
+static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
+				   int phy_addr)
 {
-	unsigned int data;
-	unsigned int data2;
+	struct mii_bus *bus = &mp->shared->smi->smi_bus;
+	struct phy_device *phydev;
+	int start;
+	int num;
+	int i;
 
-	smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
-	smi_reg_write(mp, mp->phy_addr, MII_BMCR, data ^ BMCR_ANENABLE);
-
-	smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data2);
-	if (((data ^ data2) & BMCR_ANENABLE) == 0)
-		return -ENODEV;
-
-	smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
-
-	return 0;
-}
-
-static int phy_init(struct mv643xx_eth_private *mp,
-		    struct mv643xx_eth_platform_data *pd)
-{
-	struct ethtool_cmd cmd;
-	int err;
-
-	err = phy_detect(mp);
-	if (err) {
-		dev_printk(KERN_INFO, &mp->dev->dev,
-			   "no PHY detected at addr %d\n", mp->phy_addr);
-		return err;
+	if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
+		start = phy_addr_get(mp) & 0x1f;
+		num = 32;
+	} else {
+		start = phy_addr & 0x1f;
+		num = 1;
 	}
+
+	phydev = NULL;
+	for (i = 0; i < num; i++) {
+		int addr = (start + i) & 0x1f;
+
+		if (bus->phy_map[addr] == NULL)
+			mdiobus_scan(bus, addr);
+
+		if (phydev == NULL) {
+			phydev = bus->phy_map[addr];
+			if (phydev != NULL)
+				phy_addr_set(mp, addr);
+		}
+	}
+
+	return phydev;
+}
+
+static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
+{
+	struct phy_device *phy = mp->phy;
+
 	phy_reset(mp);
 
-	mp->mii.phy_id = mp->phy_addr;
-	mp->mii.phy_id_mask = 0x3f;
-	mp->mii.reg_num_mask = 0x1f;
-	mp->mii.dev = mp->dev;
-	mp->mii.mdio_read = mv643xx_eth_mdio_read;
-	mp->mii.mdio_write = mv643xx_eth_mdio_write;
+	phy_attach(mp->dev, phy->dev.bus_id, 0, PHY_INTERFACE_MODE_GMII);
 
-	mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
-
-	memset(&cmd, 0, sizeof(cmd));
-
-	cmd.port = PORT_MII;
-	cmd.transceiver = XCVR_INTERNAL;
-	cmd.phy_address = mp->phy_addr;
-	if (pd->speed == 0) {
-		cmd.autoneg = AUTONEG_ENABLE;
-		cmd.speed = SPEED_100;
-		cmd.advertising = ADVERTISED_10baseT_Half  |
-				  ADVERTISED_10baseT_Full  |
-				  ADVERTISED_100baseT_Half |
-				  ADVERTISED_100baseT_Full;
-		if (mp->mii.supports_gmii)
-			cmd.advertising |= ADVERTISED_1000baseT_Full;
+	if (speed == 0) {
+		phy->autoneg = AUTONEG_ENABLE;
+		phy->speed = 0;
+		phy->duplex = 0;
+		phy->advertising = phy->supported | ADVERTISED_Autoneg;
 	} else {
-		cmd.autoneg = AUTONEG_DISABLE;
-		cmd.speed = pd->speed;
-		cmd.duplex = pd->duplex;
+		phy->autoneg = AUTONEG_DISABLE;
+		phy->advertising = 0;
+		phy->speed = speed;
+		phy->duplex = duplex;
 	}
-
-	mv643xx_eth_set_settings(mp->dev, &cmd);
-
-	return 0;
+	phy_start_aneg(phy);
 }
 
 static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
@@ -2499,7 +2557,7 @@
 	}
 
 	pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
-	if (mp->phy_addr == -1) {
+	if (mp->phy == NULL) {
 		pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
 		if (speed == SPEED_1000)
 			pscr |= SET_GMII_SPEED_TO_1000;
@@ -2538,7 +2596,7 @@
 		return -ENODEV;
 	}
 
-	dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
+	dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
 	if (!dev)
 		return -ENOMEM;
 
@@ -2549,33 +2607,47 @@
 	mp->port_num = pd->port_number;
 
 	mp->dev = dev;
-#ifdef MV643XX_ETH_NAPI
-	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
-#endif
 
 	set_params(mp, pd);
+	dev->real_num_tx_queues = mp->txq_count;
 
-	spin_lock_init(&mp->lock);
+	if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
+		mp->phy = phy_scan(mp, pd->phy_addr);
 
-	mib_counters_clear(mp);
-	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
-
-	if (mp->phy_addr != -1) {
-		err = phy_init(mp, pd);
-		if (err)
-			goto out;
-
+	if (mp->phy != NULL) {
+		phy_init(mp, pd->speed, pd->duplex);
 		SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
 	} else {
 		SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
 	}
+
 	init_pscr(mp, pd->speed, pd->duplex);
 
 
+	mib_counters_clear(mp);
+
+	init_timer(&mp->mib_counters_timer);
+	mp->mib_counters_timer.data = (unsigned long)mp;
+	mp->mib_counters_timer.function = mib_counters_timer_wrapper;
+	mp->mib_counters_timer.expires = jiffies + 30 * HZ;
+	add_timer(&mp->mib_counters_timer);
+
+	spin_lock_init(&mp->mib_counters_lock);
+
+	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
+
+	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
+
+	init_timer(&mp->rx_oom);
+	mp->rx_oom.data = (unsigned long)mp;
+	mp->rx_oom.function = oom_timer_wrapper;
+
+
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	BUG_ON(!res);
 	dev->irq = res->start;
 
+	dev->get_stats = mv643xx_eth_get_stats;
 	dev->hard_start_xmit = mv643xx_eth_xmit;
 	dev->open = mv643xx_eth_open;
 	dev->stop = mv643xx_eth_stop;
@@ -2590,14 +2662,8 @@
 	dev->watchdog_timeo = 2 * HZ;
 	dev->base_addr = 0;
 
-#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
-	/*
-	 * Zero copy can only work if we use Discovery II memory. Else, we will
-	 * have to map the buffers to ISA memory which is only 16 MB
-	 */
 	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
-#endif
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
@@ -2611,16 +2677,6 @@
 	dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
 		   mp->port_num, print_mac(mac, dev->dev_addr));
 
-	if (dev->features & NETIF_F_SG)
-		dev_printk(KERN_NOTICE, &dev->dev, "scatter/gather enabled\n");
-
-	if (dev->features & NETIF_F_IP_CSUM)
-		dev_printk(KERN_NOTICE, &dev->dev, "tx checksum offload\n");
-
-#ifdef MV643XX_ETH_NAPI
-	dev_printk(KERN_NOTICE, &dev->dev, "napi enabled\n");
-#endif
-
 	if (mp->tx_desc_sram_size > 0)
 		dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
 
@@ -2637,6 +2693,8 @@
 	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
 
 	unregister_netdev(mp->dev);
+	if (mp->phy != NULL)
+		phy_detach(mp->phy);
 	flush_scheduled_work();
 	free_netdev(mp->dev);
 
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index d6524db..005f2aa 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -183,7 +183,7 @@
 	dma_addr_t fw_stats_bus;
 	int watchdog_tx_done;
 	int watchdog_tx_req;
-#ifdef CONFIG_DCA
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
 	int cached_dca_tag;
 	int cpu;
 	__be32 __iomem *dca_tag;
@@ -215,7 +215,7 @@
 	int msi_enabled;
 	int msix_enabled;
 	struct msix_entry *msix_vectors;
-#ifdef CONFIG_DCA
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
 	int dca_enabled;
 #endif
 	u32 link_state;
@@ -891,7 +891,7 @@
 	struct myri10ge_slice_state *ss;
 	int i, status;
 	size_t bytes;
-#ifdef CONFIG_DCA
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
 	unsigned long dca_tag_off;
 #endif
 
@@ -986,7 +986,7 @@
 	}
 	put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
 
-#ifdef CONFIG_DCA
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
 	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
 	dca_tag_off = cmd.data0;
 	for (i = 0; i < mgp->num_slices; i++) {
@@ -1025,7 +1025,7 @@
 	return status;
 }
 
-#ifdef CONFIG_DCA
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
 static void
 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
 {
@@ -1060,8 +1060,9 @@
 	}
 	err = dca_add_requester(&pdev->dev);
 	if (err) {
-		dev_err(&pdev->dev,
-			"dca_add_requester() failed, err=%d\n", err);
+		if (err != -ENODEV)
+			dev_err(&pdev->dev,
+				"dca_add_requester() failed, err=%d\n", err);
 		return;
 	}
 	mgp->dca_enabled = 1;
@@ -1457,7 +1458,7 @@
 	struct net_device *netdev = ss->mgp->dev;
 	int work_done;
 
-#ifdef CONFIG_DCA
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
 	if (ss->mgp->dca_enabled)
 		myri10ge_update_dca(ss);
 #endif
@@ -1686,8 +1687,8 @@
 	"tx_boundary", "WC", "irq", "MSI", "MSIX",
 	"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
 	"serial_number", "watchdog_resets",
-#ifdef CONFIG_DCA
-	"dca_capable", "dca_enabled",
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
+	"dca_capable_firmware", "dca_device_present",
 #endif
 	"link_changes", "link_up", "dropped_link_overflow",
 	"dropped_link_error_or_filtered",
@@ -1765,7 +1766,7 @@
 	data[i++] = (unsigned int)mgp->read_write_dma;
 	data[i++] = (unsigned int)mgp->serial_number;
 	data[i++] = (unsigned int)mgp->watchdog_resets;
-#ifdef CONFIG_DCA
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
 	data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
 	data[i++] = (unsigned int)(mgp->dca_enabled);
 #endif
@@ -3763,7 +3764,7 @@
 		dev_err(&pdev->dev, "failed reset\n");
 		goto abort_with_slices;
 	}
-#ifdef CONFIG_DCA
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
 	myri10ge_setup_dca(mgp);
 #endif
 	pci_set_drvdata(pdev, mgp);
@@ -3866,7 +3867,7 @@
 	netdev = mgp->dev;
 	unregister_netdev(netdev);
 
-#ifdef CONFIG_DCA
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
 	myri10ge_teardown_dca(mgp);
 #endif
 	myri10ge_dummy_rdma(mgp, 0);
@@ -3911,7 +3912,7 @@
 #endif
 };
 
-#ifdef CONFIG_DCA
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
 static int
 myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
 {
@@ -3943,7 +3944,7 @@
 		       myri10ge_driver.name, myri10ge_rss_hash);
 		myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
 	}
-#ifdef CONFIG_DCA
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
 	dca_register_notify(&myri10ge_dca_notifier);
 #endif
 
@@ -3954,7 +3955,7 @@
 
 static __exit void myri10ge_cleanup_module(void)
 {
-#ifdef CONFIG_DCA
+#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
 	dca_unregister_notify(&myri10ge_dca_notifier);
 #endif
 	pci_unregister_driver(&myri10ge_driver);
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index fa3ceca..eb681c0 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -64,6 +64,25 @@
 
 /* Do we support clones that don't adhere to 14,15 of the SAprom ? */
 #define SUPPORT_NE_BAD_CLONES
+/* 0xbad = bad sig or no reset ack */
+#define BAD 0xbad
+
+#define MAX_NE_CARDS	4	/* Max number of NE cards per module */
+static struct platform_device *pdev_ne[MAX_NE_CARDS];
+static int io[MAX_NE_CARDS];
+static int irq[MAX_NE_CARDS];
+static int bad[MAX_NE_CARDS];
+
+#ifdef MODULE
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(bad, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es),required");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures");
+MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver");
+MODULE_LICENSE("GPL");
+#endif /* MODULE */
 
 /* Do we perform extra sanity checks on stuff ? */
 /* #define NE_SANITY_CHECK */
@@ -74,6 +93,10 @@
 /* Do we have a non std. amount of memory? (in units of 256 byte pages) */
 /* #define PACKETBUF_MEMSIZE	0x40 */
 
+/* This is set up so that no ISA autoprobe takes place. We can't guarantee
+that the ne2k probe is the last 8390 based probe to take place (as it
+is at boot) and so the probe will get confused by any other 8390 cards.
+ISA device autoprobes on a running machine are not recommended anyway. */
 #if !defined(MODULE) && (defined(CONFIG_ISA) || defined(CONFIG_M32R))
 /* Do we need a portlist for the ISA auto-probe ? */
 #define NEEDS_PORTLIST
@@ -192,8 +215,13 @@
 #endif
 
 	/* First check any supplied i/o locations. User knows best. <cough> */
-	if (base_addr > 0x1ff)	/* Check a single specified location. */
-		return ne_probe1(dev, base_addr);
+	if (base_addr > 0x1ff) {	/* Check a single specified location. */
+		int ret = ne_probe1(dev, base_addr);
+		if (ret)
+			printk(KERN_WARNING "ne.c: No NE*000 card found at "
+				"i/o = %#lx\n", base_addr);
+		return ret;
+	}
 	else if (base_addr != 0)	/* Don't probe at all. */
 		return -ENXIO;
 
@@ -214,28 +242,6 @@
 	return -ENODEV;
 }
 
-#ifndef MODULE
-struct net_device * __init ne_probe(int unit)
-{
-	struct net_device *dev = alloc_eip_netdev();
-	int err;
-
-	if (!dev)
-		return ERR_PTR(-ENOMEM);
-
-	sprintf(dev->name, "eth%d", unit);
-	netdev_boot_setup_check(dev);
-
-	err = do_ne_probe(dev);
-	if (err)
-		goto out;
-	return dev;
-out:
-	free_netdev(dev);
-	return ERR_PTR(err);
-}
-#endif
-
 static int __init ne_probe_isapnp(struct net_device *dev)
 {
 	int i;
@@ -329,7 +335,7 @@
 	   with an otherwise unused dev->mem_end value of "0xBAD" will
 	   cause the driver to skip these parts of the probe. */
 
-	bad_card = ((dev->base_addr != 0) && (dev->mem_end == 0xbad));
+	bad_card = ((dev->base_addr != 0) && (dev->mem_end == BAD));
 
 	/* Reset card. Who knows what dain-bramaged state it was left in. */
 
@@ -806,46 +812,95 @@
 static int __init ne_drv_probe(struct platform_device *pdev)
 {
 	struct net_device *dev;
+	int err, this_dev = pdev->id;
 	struct resource *res;
-	int err, irq;
-
-	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
-	irq = platform_get_irq(pdev, 0);
-	if (!res || irq < 0)
-		return -ENODEV;
 
 	dev = alloc_eip_netdev();
 	if (!dev)
 		return -ENOMEM;
-	dev->irq = irq;
-	dev->base_addr = res->start;
+
+	/* ne.c doesn't populate resources in platform_device, but
+	 * rbtx4927_ne_init and rbtx4938_ne_init do register devices
+	 * with resources.
+	 */
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (res) {
+		dev->base_addr = res->start;
+		dev->irq = platform_get_irq(pdev, 0);
+	} else {
+		if (this_dev < 0 || this_dev >= MAX_NE_CARDS)
+			return -EINVAL;
+		dev->base_addr = io[this_dev];
+		dev->irq = irq[this_dev];
+		dev->mem_end = bad[this_dev];
+	}
 	err = do_ne_probe(dev);
 	if (err) {
 		free_netdev(dev);
 		return err;
 	}
 	platform_set_drvdata(pdev, dev);
+
+	/* Update with any values found by probing, don't update if
+	 * resources were specified.
+	 */
+	if (!res) {
+		io[this_dev] = dev->base_addr;
+		irq[this_dev] = dev->irq;
+	}
 	return 0;
 }
 
-static int __exit ne_drv_remove(struct platform_device *pdev)
+static int ne_drv_remove(struct platform_device *pdev)
 {
 	struct net_device *dev = platform_get_drvdata(pdev);
 
-	unregister_netdev(dev);
-	free_irq(dev->irq, dev);
-	release_region(dev->base_addr, NE_IO_EXTENT);
-	free_netdev(dev);
+	if (dev) {
+		struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
+		netif_device_detach(dev);
+		unregister_netdev(dev);
+		if (idev)
+			pnp_device_detach(idev);
+		/* Careful ne_drv_remove can be called twice, once from
+		 * the platform_driver.remove and again when the
+		 * platform_device is being removed.
+		 */
+		ei_status.priv = 0;
+		free_irq(dev->irq, dev);
+		release_region(dev->base_addr, NE_IO_EXTENT);
+		free_netdev(dev);
+		platform_set_drvdata(pdev, NULL);
+	}
 	return 0;
 }
 
+/* Remove unused devices or all if true. */
+static void ne_loop_rm_unreg(int all)
+{
+	int this_dev;
+	struct platform_device *pdev;
+	for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+		pdev = pdev_ne[this_dev];
+		/* No network device == unused */
+		if (pdev && (!platform_get_drvdata(pdev) || all)) {
+			ne_drv_remove(pdev);
+			platform_device_unregister(pdev);
+			pdev_ne[this_dev] = NULL;
+		}
+	}
+}
+
 #ifdef CONFIG_PM
 static int ne_drv_suspend(struct platform_device *pdev, pm_message_t state)
 {
 	struct net_device *dev = platform_get_drvdata(pdev);
 
-	if (netif_running(dev))
+	if (netif_running(dev)) {
+		struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
 		netif_device_detach(dev);
+		if (idev)
+			pnp_stop_dev(idev);
+	}
 	return 0;
 }
 
@@ -854,6 +909,9 @@
 	struct net_device *dev = platform_get_drvdata(pdev);
 
 	if (netif_running(dev)) {
+		struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
+		if (idev)
+			pnp_start_dev(idev);
 		ne_reset_8390(dev);
 		NS8390p_init(dev, 1);
 		netif_device_attach(dev);
@@ -866,7 +924,7 @@
 #endif
 
 static struct platform_driver ne_driver = {
-	.remove		= __exit_p(ne_drv_remove),
+	.remove		= ne_drv_remove,
 	.suspend	= ne_drv_suspend,
 	.resume		= ne_drv_resume,
 	.driver		= {
@@ -875,91 +933,96 @@
 	},
 };
 
+static void __init ne_add_devices(void)
+{
+	int this_dev;
+	struct platform_device *pdev;
+
+	for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+		if (pdev_ne[this_dev])
+			continue;
+		pdev = platform_device_register_simple(
+			DRV_NAME, this_dev, NULL, 0);
+		if (IS_ERR(pdev))
+			continue;
+		pdev_ne[this_dev] = pdev;
+	}
+}
+
+#ifdef MODULE
+int __init init_module()
+{
+	int retval;
+	ne_add_devices();
+	retval = platform_driver_probe(&ne_driver, ne_drv_probe);
+	if (retval) {
+		if (io[0] == 0)
+			printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\""
+				" value(s) for ISA cards.\n");
+		ne_loop_rm_unreg(1);
+		return retval;
+	}
+
+	/* Unregister unused platform_devices. */
+	ne_loop_rm_unreg(0);
+	return retval;
+}
+#else /* MODULE */
 static int __init ne_init(void)
 {
-	return platform_driver_probe(&ne_driver, ne_drv_probe);
+	int retval = platform_driver_probe(&ne_driver, ne_drv_probe);
+
+	/* Unregister unused platform_devices. */
+	ne_loop_rm_unreg(0);
+	return retval;
 }
+module_init(ne_init);
+
+struct net_device * __init ne_probe(int unit)
+{
+	int this_dev;
+	struct net_device *dev;
+
+	/* Find an empty slot, that is no net_device and zero io port. */
+	this_dev = 0;
+	while ((pdev_ne[this_dev] && platform_get_drvdata(pdev_ne[this_dev])) ||
+		io[this_dev]) {
+		if (++this_dev == MAX_NE_CARDS)
+			return ERR_PTR(-ENOMEM);
+	}
+
+	/* Get irq, io from kernel command line */
+	dev = alloc_eip_netdev();
+	if (!dev)
+		return ERR_PTR(-ENOMEM);
+
+	sprintf(dev->name, "eth%d", unit);
+	netdev_boot_setup_check(dev);
+
+	io[this_dev] = dev->base_addr;
+	irq[this_dev] = dev->irq;
+	bad[this_dev] = dev->mem_end;
+
+	free_netdev(dev);
+
+	ne_add_devices();
+
+	/* return the first device found */
+	for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+		if (pdev_ne[this_dev]) {
+			dev = platform_get_drvdata(pdev_ne[this_dev]);
+			if (dev)
+				return dev;
+		}
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+#endif /* MODULE */
 
 static void __exit ne_exit(void)
 {
 	platform_driver_unregister(&ne_driver);
+	ne_loop_rm_unreg(1);
 }
-
-#ifdef MODULE
-#define MAX_NE_CARDS	4	/* Max number of NE cards per module */
-static struct net_device *dev_ne[MAX_NE_CARDS];
-static int io[MAX_NE_CARDS];
-static int irq[MAX_NE_CARDS];
-static int bad[MAX_NE_CARDS];	/* 0xbad = bad sig or no reset ack */
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(bad, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es),required");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures");
-MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that no ISA autoprobe takes place. We can't guarantee
-that the ne2k probe is the last 8390 based probe to take place (as it
-is at boot) and so the probe will get confused by any other 8390 cards.
-ISA device autoprobes on a running machine are not recommended anyway. */
-
-int __init init_module(void)
-{
-	int this_dev, found = 0;
-	int plat_found = !ne_init();
-
-	for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
-		struct net_device *dev = alloc_eip_netdev();
-		if (!dev)
-			break;
-		dev->irq = irq[this_dev];
-		dev->mem_end = bad[this_dev];
-		dev->base_addr = io[this_dev];
-		if (do_ne_probe(dev) == 0) {
-			dev_ne[found++] = dev;
-			continue;
-		}
-		free_netdev(dev);
-		if (found || plat_found)
-			break;
-		if (io[this_dev] != 0)
-			printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", io[this_dev]);
-		else
-			printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n");
-		return -ENXIO;
-	}
-	if (found || plat_found)
-		return 0;
-	return -ENODEV;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
-	struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
-	if (idev)
-		pnp_device_detach(idev);
-	free_irq(dev->irq, dev);
-	release_region(dev->base_addr, NE_IO_EXTENT);
-}
-
-void __exit cleanup_module(void)
-{
-	int this_dev;
-
-	ne_exit();
-	for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
-		struct net_device *dev = dev_ne[this_dev];
-		if (dev) {
-			unregister_netdev(dev);
-			cleanup_card(dev);
-			free_netdev(dev);
-		}
-	}
-}
-#else /* MODULE */
-module_init(ne_init);
 module_exit(ne_exit);
-#endif /* MODULE */
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 3f9af75..b9bed82 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -189,7 +189,7 @@
 
 		if ((status & ISR_CON_HI) || (status & ISR_IND_HI))
 			printk("%s: unexpected status: 0x%08x\n",
-			    __FUNCTION__, status);
+			    __func__, status);
 
 		fill_level =
 		    readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id)));
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 244ab49..f8e601c 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -742,7 +742,7 @@
 	} while (0)
 #else
 #define DPRINTK(klevel, fmt, args...)	do { \
-	printk(KERN_##klevel PFX "%s: %s: " fmt, __FUNCTION__,\
+	printk(KERN_##klevel PFX "%s: %s: " fmt, __func__,\
 		(adapter != NULL && adapter->netdev != NULL) ? \
 		adapter->netdev->name : NULL, \
 		## args); } while(0)
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 008fd66..6ef3f0d 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -77,18 +77,18 @@
 
 /*  PCI Device ID Table  */
 #define ENTRY(device) \
-	{PCI_DEVICE(0x4040, (device)), \
+	{PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
 	.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
 
 static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
-	ENTRY(0x0001),
-	ENTRY(0x0002),
-	ENTRY(0x0003),
-	ENTRY(0x0004),
-	ENTRY(0x0005),
-	ENTRY(0x0024),
-	ENTRY(0x0025),
-	ENTRY(0x0100),
+	ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
+	ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
+	ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
+	ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
+	ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
+	ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
+	ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
+	ENTRY(PCI_DEVICE_ID_NX3031),
 	{0,}
 };
 
@@ -241,7 +241,7 @@
 	case NETXEN_BRDTYPE_P3_REF_QG:
 	case NETXEN_BRDTYPE_P3_4_GB:
 	case NETXEN_BRDTYPE_P3_4_GB_MM:
-		adapter->msix_supported = 0;
+		adapter->msix_supported = !!use_msi_x;
 		adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
 		break;
 
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 53451c3..0a575fe 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -119,7 +119,7 @@
 
 #ifdef NETDRV_DEBUG
 /* note: prints function name for you */
-#  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+#  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
 #else
 #  define DPRINTK(fmt, args...)
 #endif
@@ -130,7 +130,7 @@
 #  define assert(expr) \
         if(!(expr)) {					\
         printk( "Assertion failed! %s,%s,%s,line=%d\n",	\
-        #expr,__FILE__,__FUNCTION__,__LINE__);		\
+        #expr,__FILE__,__func__,__LINE__);		\
         }
 #endif
 
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 94e0b7e..e7508c1 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -60,49 +60,14 @@
 		bus->reset(bus);
 
 	for (i = 0; i < PHY_MAX_ADDR; i++) {
-		struct phy_device *phydev;
+		bus->phy_map[i] = NULL;
+		if ((bus->phy_mask & (1 << i)) == 0) {
+			struct phy_device *phydev;
 
-		if (bus->phy_mask & (1 << i)) {
-			bus->phy_map[i] = NULL;
-			continue;
+			phydev = mdiobus_scan(bus, i);
+			if (IS_ERR(phydev))
+				err = PTR_ERR(phydev);
 		}
-
-		phydev = get_phy_device(bus, i);
-
-		if (IS_ERR(phydev))
-			return PTR_ERR(phydev);
-
-		/* There's a PHY at this address
-		 * We need to set:
-		 * 1) IRQ
-		 * 2) bus_id
-		 * 3) parent
-		 * 4) bus
-		 * 5) mii_bus
-		 * And, we need to register it */
-		if (phydev) {
-			phydev->irq = bus->irq[i];
-
-			phydev->dev.parent = bus->dev;
-			phydev->dev.bus = &mdio_bus_type;
-			snprintf(phydev->dev.bus_id, BUS_ID_SIZE, PHY_ID_FMT, bus->id, i);
-
-			phydev->bus = bus;
-
-			/* Run all of the fixups for this PHY */
-			phy_scan_fixups(phydev);
-
-			err = device_register(&phydev->dev);
-
-			if (err) {
-				printk(KERN_ERR "phy %d failed to register\n",
-						i);
-				phy_device_free(phydev);
-				phydev = NULL;
-			}
-		}
-
-		bus->phy_map[i] = phydev;
 	}
 
 	pr_info("%s: probed\n", bus->name);
@@ -122,6 +87,48 @@
 }
 EXPORT_SYMBOL(mdiobus_unregister);
 
+struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
+{
+	struct phy_device *phydev;
+	int err;
+
+	phydev = get_phy_device(bus, addr);
+	if (IS_ERR(phydev) || phydev == NULL)
+		return phydev;
+
+	/* There's a PHY at this address
+	 * We need to set:
+	 * 1) IRQ
+	 * 2) bus_id
+	 * 3) parent
+	 * 4) bus
+	 * 5) mii_bus
+	 * And, we need to register it */
+
+	phydev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
+
+	phydev->dev.parent = bus->dev;
+	phydev->dev.bus = &mdio_bus_type;
+	snprintf(phydev->dev.bus_id, BUS_ID_SIZE, PHY_ID_FMT, bus->id, addr);
+
+	phydev->bus = bus;
+
+	/* Run all of the fixups for this PHY */
+	phy_scan_fixups(phydev);
+
+	err = device_register(&phydev->dev);
+	if (err) {
+		printk(KERN_ERR "phy %d failed to register\n", addr);
+		phy_device_free(phydev);
+		phydev = NULL;
+	}
+
+	bus->phy_map[addr] = phydev;
+
+	return phydev;
+}
+EXPORT_SYMBOL(mdiobus_scan);
+
 /**
  * mdio_bus_match - determine if given PHY driver supports the given PHY device
  * @dev: target PHY device
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index ddccc07..5d4d215 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1833,9 +1833,11 @@
 
 	/* If the queue is getting long, don't wait any longer for packets
 	   before the start of the queue. */
-	if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN
-	    && seq_before(ppp->minseq, ppp->mrq.next->sequence))
-		ppp->minseq = ppp->mrq.next->sequence;
+	if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
+		struct sk_buff *skb = skb_peek(&ppp->mrq);
+		if (seq_before(ppp->minseq, skb->sequence))
+			ppp->minseq = skb->sequence;
+	}
 
 	/* Pull completed packets off the queue and receive them. */
 	while ((skb = ppp_mp_reconstruct(ppp)))
@@ -1864,7 +1866,7 @@
 	for (p = list->next; p != (struct sk_buff *)list; p = p->next)
 		if (seq_before(seq, p->sequence))
 			break;
-	__skb_insert(skb, p->prev, p, list);
+	__skb_queue_before(list, p, skb);
 }
 
 /*
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index ff175e8..185b1df 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -353,7 +353,7 @@
 	spin_lock_bh(&session->reorder_q.lock);
 	skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
 		if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
-			__skb_insert(skb, skbp->prev, skbp, &session->reorder_q);
+			__skb_queue_before(&session->reorder_q, skbp, skb);
 			PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
 			       "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
 			       session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
diff --git a/drivers/net/qlge/Makefile b/drivers/net/qlge/Makefile
new file mode 100644
index 0000000..8a19765
--- /dev/null
+++ b/drivers/net/qlge/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Qlogic 10GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_QLGE) += qlge.o
+
+qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
new file mode 100644
index 0000000..c37ea43
--- /dev/null
+++ b/drivers/net/qlge/qlge.h
@@ -0,0 +1,1593 @@
+/*
+ * QLogic QLA41xx NIC HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qlge for copyright and licensing details.
+ */
+#ifndef _QLGE_H_
+#define _QLGE_H_
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+/*
+ * General definitions...
+ */
+#define DRV_NAME  	"qlge"
+#define DRV_STRING 	"QLogic 10 Gigabit PCI-E Ethernet Driver "
+#define DRV_VERSION	"v1.00.00-b3"
+
+#define PFX "qlge: "
+#define QPRINTK(qdev, nlevel, klevel, fmt, args...)     \
+       do {       \
+	if (!((qdev)->msg_enable & NETIF_MSG_##nlevel))		\
+		;						\
+	else							\
+		dev_printk(KERN_##klevel, &((qdev)->pdev->dev),	\
+			   "%s: " fmt, __func__, ##args);  \
+       } while (0)
+
+#define QLGE_VENDOR_ID    0x1077
+#define QLGE_DEVICE_ID1    0x8012
+#define QLGE_DEVICE_ID   0x8000
+
+#define MAX_RX_RINGS 128
+#define MAX_TX_RINGS 128
+
+#define NUM_TX_RING_ENTRIES	256
+#define NUM_RX_RING_ENTRIES	256
+
+#define NUM_SMALL_BUFFERS   512
+#define NUM_LARGE_BUFFERS   512
+
+#define SMALL_BUFFER_SIZE 256
+#define LARGE_BUFFER_SIZE	PAGE_SIZE
+#define MAX_SPLIT_SIZE 1023
+#define QLGE_SB_PAD 32
+
+#define DFLT_COALESCE_WAIT 100	/* 100 usec wait for coalescing */
+#define MAX_INTER_FRAME_WAIT 10	/* 10 usec max interframe-wait for coalescing */
+#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
+#define UDELAY_COUNT 3
+#define UDELAY_DELAY 10
+
+
+#define TX_DESC_PER_IOCB 8
+/* The maximum number of frags we handle is based
+ * on PAGE_SIZE...
+ */
+#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13)	/* 4k & 8k pages */
+#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
+#elif (PAGE_SHIFT == 16)	/* 64k pages */
+#define TX_DESC_PER_OAL 0
+#endif
+
+#define DB_PAGE_SIZE 4096
+
+/*
+ * Processor Address Register (PROC_ADDR) bit definitions.
+ */
+enum {
+
+	/* Misc. stuff */
+	MAILBOX_COUNT = 16,
+
+	PROC_ADDR_RDY = (1 << 31),
+	PROC_ADDR_R = (1 << 30),
+	PROC_ADDR_ERR = (1 << 29),
+	PROC_ADDR_DA = (1 << 28),
+	PROC_ADDR_FUNC0_MBI = 0x00001180,
+	PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
+	PROC_ADDR_FUNC0_CTL = 0x000011a1,
+	PROC_ADDR_FUNC2_MBI = 0x00001280,
+	PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
+	PROC_ADDR_FUNC2_CTL = 0x000012a1,
+	PROC_ADDR_MPI_RISC = 0x00000000,
+	PROC_ADDR_MDE = 0x00010000,
+	PROC_ADDR_REGBLOCK = 0x00020000,
+	PROC_ADDR_RISC_REG = 0x00030000,
+};
+
+/*
+ * System Register (SYS) bit definitions.
+ */
+enum {
+	SYS_EFE = (1 << 0),
+	SYS_FAE = (1 << 1),
+	SYS_MDC = (1 << 2),
+	SYS_DST = (1 << 3),
+	SYS_DWC = (1 << 4),
+	SYS_EVW = (1 << 5),
+	SYS_OMP_DLY_MASK = 0x3f000000,
+	/*
+	 * There are no values defined as of edit #15.
+	 */
+	SYS_ODI = (1 << 14),
+};
+
+/*
+ *  Reset/Failover Register (RST_FO) bit definitions.
+ */
+enum {
+	RST_FO_TFO = (1 << 0),
+	RST_FO_RR_MASK = 0x00060000,
+	RST_FO_RR_CQ_CAM = 0x00000000,
+	RST_FO_RR_DROP = 0x00000001,
+	RST_FO_RR_DQ = 0x00000002,
+	RST_FO_RR_RCV_FUNC_CQ = 0x00000003,
+	RST_FO_FRB = (1 << 12),
+	RST_FO_MOP = (1 << 13),
+	RST_FO_REG = (1 << 14),
+	RST_FO_FR = (1 << 15),
+};
+
+/*
+ * Function Specific Control Register (FSC) bit definitions.
+ */
+enum {
+	FSC_DBRST_MASK = 0x00070000,
+	FSC_DBRST_256 = 0x00000000,
+	FSC_DBRST_512 = 0x00000001,
+	FSC_DBRST_768 = 0x00000002,
+	FSC_DBRST_1024 = 0x00000003,
+	FSC_DBL_MASK = 0x00180000,
+	FSC_DBL_DBRST = 0x00000000,
+	FSC_DBL_MAX_PLD = 0x00000008,
+	FSC_DBL_MAX_BRST = 0x00000010,
+	FSC_DBL_128_BYTES = 0x00000018,
+	FSC_EC = (1 << 5),
+	FSC_EPC_MASK = 0x00c00000,
+	FSC_EPC_INBOUND = (1 << 6),
+	FSC_EPC_OUTBOUND = (1 << 7),
+	FSC_VM_PAGESIZE_MASK = 0x07000000,
+	FSC_VM_PAGE_2K = 0x00000100,
+	FSC_VM_PAGE_4K = 0x00000200,
+	FSC_VM_PAGE_8K = 0x00000300,
+	FSC_VM_PAGE_64K = 0x00000600,
+	FSC_SH = (1 << 11),
+	FSC_DSB = (1 << 12),
+	FSC_STE = (1 << 13),
+	FSC_FE = (1 << 15),
+};
+
+/*
+ *  Host Command Status Register (CSR) bit definitions.
+ */
+enum {
+	CSR_ERR_STS_MASK = 0x0000003f,
+	/*
+	 * There are no valued defined as of edit #15.
+	 */
+	CSR_RR = (1 << 8),
+	CSR_HRI = (1 << 9),
+	CSR_RP = (1 << 10),
+	CSR_CMD_PARM_SHIFT = 22,
+	CSR_CMD_NOP = 0x00000000,
+	CSR_CMD_SET_RST = 0x1000000,
+	CSR_CMD_CLR_RST = 0x20000000,
+	CSR_CMD_SET_PAUSE = 0x30000000,
+	CSR_CMD_CLR_PAUSE = 0x40000000,
+	CSR_CMD_SET_H2R_INT = 0x50000000,
+	CSR_CMD_CLR_H2R_INT = 0x60000000,
+	CSR_CMD_PAR_EN = 0x70000000,
+	CSR_CMD_SET_BAD_PAR = 0x80000000,
+	CSR_CMD_CLR_BAD_PAR = 0x90000000,
+	CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
+};
+
+/*
+ *  Configuration Register (CFG) bit definitions.
+ */
+enum {
+	CFG_LRQ = (1 << 0),
+	CFG_DRQ = (1 << 1),
+	CFG_LR = (1 << 2),
+	CFG_DR = (1 << 3),
+	CFG_LE = (1 << 5),
+	CFG_LCQ = (1 << 6),
+	CFG_DCQ = (1 << 7),
+	CFG_Q_SHIFT = 8,
+	CFG_Q_MASK = 0x7f000000,
+};
+
+/*
+ *  Status Register (STS) bit definitions.
+ */
+enum {
+	STS_FE = (1 << 0),
+	STS_PI = (1 << 1),
+	STS_PL0 = (1 << 2),
+	STS_PL1 = (1 << 3),
+	STS_PI0 = (1 << 4),
+	STS_PI1 = (1 << 5),
+	STS_FUNC_ID_MASK = 0x000000c0,
+	STS_FUNC_ID_SHIFT = 6,
+	STS_F0E = (1 << 8),
+	STS_F1E = (1 << 9),
+	STS_F2E = (1 << 10),
+	STS_F3E = (1 << 11),
+	STS_NFE = (1 << 12),
+};
+
+/*
+ * Interrupt Enable Register (INTR_EN) bit definitions.
+ */
+enum {
+	INTR_EN_INTR_MASK = 0x007f0000,
+	INTR_EN_TYPE_MASK = 0x03000000,
+	INTR_EN_TYPE_ENABLE = 0x00000100,
+	INTR_EN_TYPE_DISABLE = 0x00000200,
+	INTR_EN_TYPE_READ = 0x00000300,
+	INTR_EN_IHD = (1 << 13),
+	INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
+	INTR_EN_EI = (1 << 14),
+	INTR_EN_EN = (1 << 15),
+};
+
+/*
+ * Interrupt Mask Register (INTR_MASK) bit definitions.
+ */
+enum {
+	INTR_MASK_PI = (1 << 0),
+	INTR_MASK_HL0 = (1 << 1),
+	INTR_MASK_LH0 = (1 << 2),
+	INTR_MASK_HL1 = (1 << 3),
+	INTR_MASK_LH1 = (1 << 4),
+	INTR_MASK_SE = (1 << 5),
+	INTR_MASK_LSC = (1 << 6),
+	INTR_MASK_MC = (1 << 7),
+	INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
+};
+
+/*
+ *  Register (REV_ID) bit definitions.
+ */
+enum {
+	REV_ID_MASK = 0x0000000f,
+	REV_ID_NICROLL_SHIFT = 0,
+	REV_ID_NICREV_SHIFT = 4,
+	REV_ID_XGROLL_SHIFT = 8,
+	REV_ID_XGREV_SHIFT = 12,
+	REV_ID_CHIPREV_SHIFT = 28,
+};
+
+/*
+ *  Force ECC Error Register (FRC_ECC_ERR) bit definitions.
+ */
+enum {
+	FRC_ECC_ERR_VW = (1 << 12),
+	FRC_ECC_ERR_VB = (1 << 13),
+	FRC_ECC_ERR_NI = (1 << 14),
+	FRC_ECC_ERR_NO = (1 << 15),
+	FRC_ECC_PFE_SHIFT = 16,
+	FRC_ECC_ERR_DO = (1 << 18),
+	FRC_ECC_P14 = (1 << 19),
+};
+
+/*
+ *  Error Status Register (ERR_STS) bit definitions.
+ */
+enum {
+	ERR_STS_NOF = (1 << 0),
+	ERR_STS_NIF = (1 << 1),
+	ERR_STS_DRP = (1 << 2),
+	ERR_STS_XGP = (1 << 3),
+	ERR_STS_FOU = (1 << 4),
+	ERR_STS_FOC = (1 << 5),
+	ERR_STS_FOF = (1 << 6),
+	ERR_STS_FIU = (1 << 7),
+	ERR_STS_FIC = (1 << 8),
+	ERR_STS_FIF = (1 << 9),
+	ERR_STS_MOF = (1 << 10),
+	ERR_STS_TA = (1 << 11),
+	ERR_STS_MA = (1 << 12),
+	ERR_STS_MPE = (1 << 13),
+	ERR_STS_SCE = (1 << 14),
+	ERR_STS_STE = (1 << 15),
+	ERR_STS_FOW = (1 << 16),
+	ERR_STS_UE = (1 << 17),
+	ERR_STS_MCH = (1 << 26),
+	ERR_STS_LOC_SHIFT = 27,
+};
+
+/*
+ *  RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
+ */
+enum {
+	RAM_DBG_ADDR_FW = (1 << 30),
+	RAM_DBG_ADDR_FR = (1 << 31),
+};
+
+/*
+ * Semaphore Register (SEM) bit definitions.
+ */
+enum {
+	/*
+	 * Example:
+	 * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
+	 */
+	SEM_CLEAR = 0,
+	SEM_SET = 1,
+	SEM_FORCE = 3,
+	SEM_XGMAC0_SHIFT = 0,
+	SEM_XGMAC1_SHIFT = 2,
+	SEM_ICB_SHIFT = 4,
+	SEM_MAC_ADDR_SHIFT = 6,
+	SEM_FLASH_SHIFT = 8,
+	SEM_PROBE_SHIFT = 10,
+	SEM_RT_IDX_SHIFT = 12,
+	SEM_PROC_REG_SHIFT = 14,
+	SEM_XGMAC0_MASK = 0x00030000,
+	SEM_XGMAC1_MASK = 0x000c0000,
+	SEM_ICB_MASK = 0x00300000,
+	SEM_MAC_ADDR_MASK = 0x00c00000,
+	SEM_FLASH_MASK = 0x03000000,
+	SEM_PROBE_MASK = 0x0c000000,
+	SEM_RT_IDX_MASK = 0x30000000,
+	SEM_PROC_REG_MASK = 0xc0000000,
+};
+
+/*
+ *  10G MAC Address  Register (XGMAC_ADDR) bit definitions.
+ */
+enum {
+	XGMAC_ADDR_RDY = (1 << 31),
+	XGMAC_ADDR_R = (1 << 30),
+	XGMAC_ADDR_XME = (1 << 29),
+
+	/* XGMAC control registers */
+	PAUSE_SRC_LO = 0x00000100,
+	PAUSE_SRC_HI = 0x00000104,
+	GLOBAL_CFG = 0x00000108,
+	GLOBAL_CFG_RESET = (1 << 0),
+	GLOBAL_CFG_JUMBO = (1 << 6),
+	GLOBAL_CFG_TX_STAT_EN = (1 << 10),
+	GLOBAL_CFG_RX_STAT_EN = (1 << 11),
+	TX_CFG = 0x0000010c,
+	TX_CFG_RESET = (1 << 0),
+	TX_CFG_EN = (1 << 1),
+	TX_CFG_PREAM = (1 << 2),
+	RX_CFG = 0x00000110,
+	RX_CFG_RESET = (1 << 0),
+	RX_CFG_EN = (1 << 1),
+	RX_CFG_PREAM = (1 << 2),
+	FLOW_CTL = 0x0000011c,
+	PAUSE_OPCODE = 0x00000120,
+	PAUSE_TIMER = 0x00000124,
+	PAUSE_FRM_DEST_LO = 0x00000128,
+	PAUSE_FRM_DEST_HI = 0x0000012c,
+	MAC_TX_PARAMS = 0x00000134,
+	MAC_TX_PARAMS_JUMBO = (1 << 31),
+	MAC_TX_PARAMS_SIZE_SHIFT = 16,
+	MAC_RX_PARAMS = 0x00000138,
+	MAC_SYS_INT = 0x00000144,
+	MAC_SYS_INT_MASK = 0x00000148,
+	MAC_MGMT_INT = 0x0000014c,
+	MAC_MGMT_IN_MASK = 0x00000150,
+	EXT_ARB_MODE = 0x000001fc,
+
+	/* XGMAC TX statistics  registers */
+	TX_PKTS = 0x00000200,
+	TX_BYTES = 0x00000208,
+	TX_MCAST_PKTS = 0x00000210,
+	TX_BCAST_PKTS = 0x00000218,
+	TX_UCAST_PKTS = 0x00000220,
+	TX_CTL_PKTS = 0x00000228,
+	TX_PAUSE_PKTS = 0x00000230,
+	TX_64_PKT = 0x00000238,
+	TX_65_TO_127_PKT = 0x00000240,
+	TX_128_TO_255_PKT = 0x00000248,
+	TX_256_511_PKT = 0x00000250,
+	TX_512_TO_1023_PKT = 0x00000258,
+	TX_1024_TO_1518_PKT = 0x00000260,
+	TX_1519_TO_MAX_PKT = 0x00000268,
+	TX_UNDERSIZE_PKT = 0x00000270,
+	TX_OVERSIZE_PKT = 0x00000278,
+
+	/* XGMAC statistics control registers */
+	RX_HALF_FULL_DET = 0x000002a0,
+	TX_HALF_FULL_DET = 0x000002a4,
+	RX_OVERFLOW_DET = 0x000002a8,
+	TX_OVERFLOW_DET = 0x000002ac,
+	RX_HALF_FULL_MASK = 0x000002b0,
+	TX_HALF_FULL_MASK = 0x000002b4,
+	RX_OVERFLOW_MASK = 0x000002b8,
+	TX_OVERFLOW_MASK = 0x000002bc,
+	STAT_CNT_CTL = 0x000002c0,
+	STAT_CNT_CTL_CLEAR_TX = (1 << 0),
+	STAT_CNT_CTL_CLEAR_RX = (1 << 1),
+	AUX_RX_HALF_FULL_DET = 0x000002d0,
+	AUX_TX_HALF_FULL_DET = 0x000002d4,
+	AUX_RX_OVERFLOW_DET = 0x000002d8,
+	AUX_TX_OVERFLOW_DET = 0x000002dc,
+	AUX_RX_HALF_FULL_MASK = 0x000002f0,
+	AUX_TX_HALF_FULL_MASK = 0x000002f4,
+	AUX_RX_OVERFLOW_MASK = 0x000002f8,
+	AUX_TX_OVERFLOW_MASK = 0x000002fc,
+
+	/* XGMAC RX statistics  registers */
+	RX_BYTES = 0x00000300,
+	RX_BYTES_OK = 0x00000308,
+	RX_PKTS = 0x00000310,
+	RX_PKTS_OK = 0x00000318,
+	RX_BCAST_PKTS = 0x00000320,
+	RX_MCAST_PKTS = 0x00000328,
+	RX_UCAST_PKTS = 0x00000330,
+	RX_UNDERSIZE_PKTS = 0x00000338,
+	RX_OVERSIZE_PKTS = 0x00000340,
+	RX_JABBER_PKTS = 0x00000348,
+	RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
+	RX_DROP_EVENTS = 0x00000358,
+	RX_FCERR_PKTS = 0x00000360,
+	RX_ALIGN_ERR = 0x00000368,
+	RX_SYMBOL_ERR = 0x00000370,
+	RX_MAC_ERR = 0x00000378,
+	RX_CTL_PKTS = 0x00000380,
+	RX_PAUSE_PKTS = 0x00000384,
+	RX_64_PKTS = 0x00000390,
+	RX_65_TO_127_PKTS = 0x00000398,
+	RX_128_255_PKTS = 0x000003a0,
+	RX_256_511_PKTS = 0x000003a8,
+	RX_512_TO_1023_PKTS = 0x000003b0,
+	RX_1024_TO_1518_PKTS = 0x000003b8,
+	RX_1519_TO_MAX_PKTS = 0x000003c0,
+	RX_LEN_ERR_PKTS = 0x000003c8,
+
+	/* XGMAC MDIO control registers */
+	MDIO_TX_DATA = 0x00000400,
+	MDIO_RX_DATA = 0x00000410,
+	MDIO_CMD = 0x00000420,
+	MDIO_PHY_ADDR = 0x00000430,
+	MDIO_PORT = 0x00000440,
+	MDIO_STATUS = 0x00000450,
+
+	/* XGMAC AUX statistics  registers */
+};
+
+/*
+ *  Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
+ */
+enum {
+	ETS_QUEUE_SHIFT = 29,
+	ETS_REF = (1 << 26),
+	ETS_RS = (1 << 27),
+	ETS_P = (1 << 28),
+	ETS_FC_COS_SHIFT = 23,
+};
+
+/*
+ *  Flash Address Register (FLASH_ADDR) bit definitions.
+ */
+enum {
+	FLASH_ADDR_RDY = (1 << 31),
+	FLASH_ADDR_R = (1 << 30),
+	FLASH_ADDR_ERR = (1 << 29),
+};
+
+/*
+ *  Stop CQ Processing Register (CQ_STOP) bit definitions.
+ */
+enum {
+	CQ_STOP_QUEUE_MASK = (0x007f0000),
+	CQ_STOP_TYPE_MASK = (0x03000000),
+	CQ_STOP_TYPE_START = 0x00000100,
+	CQ_STOP_TYPE_STOP = 0x00000200,
+	CQ_STOP_TYPE_READ = 0x00000300,
+	CQ_STOP_EN = (1 << 15),
+};
+
+/*
+ *  MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
+ */
+enum {
+	MAC_ADDR_IDX_SHIFT = 4,
+	MAC_ADDR_TYPE_SHIFT = 16,
+	MAC_ADDR_TYPE_MASK = 0x000f0000,
+	MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
+	MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
+	MAC_ADDR_TYPE_VLAN = 0x00020000,
+	MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
+	MAC_ADDR_TYPE_FC_MAC = 0x00040000,
+	MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
+	MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
+	MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
+	MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
+	MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
+	MAC_ADDR_ADR = (1 << 25),
+	MAC_ADDR_RS = (1 << 26),
+	MAC_ADDR_E = (1 << 27),
+	MAC_ADDR_MR = (1 << 30),
+	MAC_ADDR_MW = (1 << 31),
+	MAX_MULTICAST_ENTRIES = 32,
+};
+
+/*
+ *  MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
+ */
+enum {
+	SPLT_HDR_EP = (1 << 31),
+};
+
+/*
+ *  FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
+ */
+enum {
+	FC_RCV_CFG_ECT = (1 << 15),
+	FC_RCV_CFG_DFH = (1 << 20),
+	FC_RCV_CFG_DVF = (1 << 21),
+	FC_RCV_CFG_RCE = (1 << 27),
+	FC_RCV_CFG_RFE = (1 << 28),
+	FC_RCV_CFG_TEE = (1 << 29),
+	FC_RCV_CFG_TCE = (1 << 30),
+	FC_RCV_CFG_TFE = (1 << 31),
+};
+
+/*
+ *  NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
+ */
+enum {
+	NIC_RCV_CFG_PPE = (1 << 0),
+	NIC_RCV_CFG_VLAN_MASK = 0x00060000,
+	NIC_RCV_CFG_VLAN_ALL = 0x00000000,
+	NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
+	NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
+	NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
+	NIC_RCV_CFG_RV = (1 << 3),
+	NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
+	NIC_RCV_CFG_DFQ_SHIFT = 8,
+	NIC_RCV_CFG_DFQ = 0,	/* HARDCODE default queue to 0. */
+};
+
+/*
+ *   Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
+ */
+enum {
+	MGMT_RCV_CFG_ARP = (1 << 0),
+	MGMT_RCV_CFG_DHC = (1 << 1),
+	MGMT_RCV_CFG_DHS = (1 << 2),
+	MGMT_RCV_CFG_NP = (1 << 3),
+	MGMT_RCV_CFG_I6N = (1 << 4),
+	MGMT_RCV_CFG_I6R = (1 << 5),
+	MGMT_RCV_CFG_DH6 = (1 << 6),
+	MGMT_RCV_CFG_UD1 = (1 << 7),
+	MGMT_RCV_CFG_UD0 = (1 << 8),
+	MGMT_RCV_CFG_BCT = (1 << 9),
+	MGMT_RCV_CFG_MCT = (1 << 10),
+	MGMT_RCV_CFG_DM = (1 << 11),
+	MGMT_RCV_CFG_RM = (1 << 12),
+	MGMT_RCV_CFG_STL = (1 << 13),
+	MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
+	MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
+	MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
+	MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
+	MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
+};
+
+/*
+ *  Routing Index Register (RT_IDX) bit definitions.
+ */
+enum {
+	RT_IDX_IDX_SHIFT = 8,
+	RT_IDX_TYPE_MASK = 0x000f0000,
+	RT_IDX_TYPE_RT = 0x00000000,
+	RT_IDX_TYPE_RT_INV = 0x00010000,
+	RT_IDX_TYPE_NICQ = 0x00020000,
+	RT_IDX_TYPE_NICQ_INV = 0x00030000,
+	RT_IDX_DST_MASK = 0x00700000,
+	RT_IDX_DST_RSS = 0x00000000,
+	RT_IDX_DST_CAM_Q = 0x00100000,
+	RT_IDX_DST_COS_Q = 0x00200000,
+	RT_IDX_DST_DFLT_Q = 0x00300000,
+	RT_IDX_DST_DEST_Q = 0x00400000,
+	RT_IDX_RS = (1 << 26),
+	RT_IDX_E = (1 << 27),
+	RT_IDX_MR = (1 << 30),
+	RT_IDX_MW = (1 << 31),
+
+	/* Nic Queue format - type 2 bits */
+	RT_IDX_BCAST = (1 << 0),
+	RT_IDX_MCAST = (1 << 1),
+	RT_IDX_MCAST_MATCH = (1 << 2),
+	RT_IDX_MCAST_REG_MATCH = (1 << 3),
+	RT_IDX_MCAST_HASH_MATCH = (1 << 4),
+	RT_IDX_FC_MACH = (1 << 5),
+	RT_IDX_ETH_FCOE = (1 << 6),
+	RT_IDX_CAM_HIT = (1 << 7),
+	RT_IDX_CAM_BIT0 = (1 << 8),
+	RT_IDX_CAM_BIT1 = (1 << 9),
+	RT_IDX_VLAN_TAG = (1 << 10),
+	RT_IDX_VLAN_MATCH = (1 << 11),
+	RT_IDX_VLAN_FILTER = (1 << 12),
+	RT_IDX_ETH_SKIP1 = (1 << 13),
+	RT_IDX_ETH_SKIP2 = (1 << 14),
+	RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
+	RT_IDX_802_3 = (1 << 16),
+	RT_IDX_LLDP = (1 << 17),
+	RT_IDX_UNUSED018 = (1 << 18),
+	RT_IDX_UNUSED019 = (1 << 19),
+	RT_IDX_UNUSED20 = (1 << 20),
+	RT_IDX_UNUSED21 = (1 << 21),
+	RT_IDX_ERR = (1 << 22),
+	RT_IDX_VALID = (1 << 23),
+	RT_IDX_TU_CSUM_ERR = (1 << 24),
+	RT_IDX_IP_CSUM_ERR = (1 << 25),
+	RT_IDX_MAC_ERR = (1 << 26),
+	RT_IDX_RSS_TCP6 = (1 << 27),
+	RT_IDX_RSS_TCP4 = (1 << 28),
+	RT_IDX_RSS_IPV6 = (1 << 29),
+	RT_IDX_RSS_IPV4 = (1 << 30),
+	RT_IDX_RSS_MATCH = (1 << 31),
+
+	/* Hierarchy for the NIC Queue Mask */
+	RT_IDX_ALL_ERR_SLOT = 0,
+	RT_IDX_MAC_ERR_SLOT = 0,
+	RT_IDX_IP_CSUM_ERR_SLOT = 1,
+	RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
+	RT_IDX_BCAST_SLOT = 3,
+	RT_IDX_MCAST_MATCH_SLOT = 4,
+	RT_IDX_ALLMULTI_SLOT = 5,
+	RT_IDX_UNUSED6_SLOT = 6,
+	RT_IDX_UNUSED7_SLOT = 7,
+	RT_IDX_RSS_MATCH_SLOT = 8,
+	RT_IDX_RSS_IPV4_SLOT = 8,
+	RT_IDX_RSS_IPV6_SLOT = 9,
+	RT_IDX_RSS_TCP4_SLOT = 10,
+	RT_IDX_RSS_TCP6_SLOT = 11,
+	RT_IDX_CAM_HIT_SLOT = 12,
+	RT_IDX_UNUSED013 = 13,
+	RT_IDX_UNUSED014 = 14,
+	RT_IDX_PROMISCUOUS_SLOT = 15,
+	RT_IDX_MAX_SLOTS = 16,
+};
+
+/*
+ * Control Register Set Map
+ */
+enum {
+	PROC_ADDR = 0,		/* Use semaphore */
+	PROC_DATA = 0x04,	/* Use semaphore */
+	SYS = 0x08,
+	RST_FO = 0x0c,
+	FSC = 0x10,
+	CSR = 0x14,
+	LED = 0x18,
+	ICB_RID = 0x1c,		/* Use semaphore */
+	ICB_L = 0x20,		/* Use semaphore */
+	ICB_H = 0x24,		/* Use semaphore */
+	CFG = 0x28,
+	BIOS_ADDR = 0x2c,
+	STS = 0x30,
+	INTR_EN = 0x34,
+	INTR_MASK = 0x38,
+	ISR1 = 0x3c,
+	ISR2 = 0x40,
+	ISR3 = 0x44,
+	ISR4 = 0x48,
+	REV_ID = 0x4c,
+	FRC_ECC_ERR = 0x50,
+	ERR_STS = 0x54,
+	RAM_DBG_ADDR = 0x58,
+	RAM_DBG_DATA = 0x5c,
+	ECC_ERR_CNT = 0x60,
+	SEM = 0x64,
+	GPIO_1 = 0x68,		/* Use semaphore */
+	GPIO_2 = 0x6c,		/* Use semaphore */
+	GPIO_3 = 0x70,		/* Use semaphore */
+	RSVD2 = 0x74,
+	XGMAC_ADDR = 0x78,	/* Use semaphore */
+	XGMAC_DATA = 0x7c,	/* Use semaphore */
+	NIC_ETS = 0x80,
+	CNA_ETS = 0x84,
+	FLASH_ADDR = 0x88,	/* Use semaphore */
+	FLASH_DATA = 0x8c,	/* Use semaphore */
+	CQ_STOP = 0x90,
+	PAGE_TBL_RID = 0x94,
+	WQ_PAGE_TBL_LO = 0x98,
+	WQ_PAGE_TBL_HI = 0x9c,
+	CQ_PAGE_TBL_LO = 0xa0,
+	CQ_PAGE_TBL_HI = 0xa4,
+	MAC_ADDR_IDX = 0xa8,	/* Use semaphore */
+	MAC_ADDR_DATA = 0xac,	/* Use semaphore */
+	COS_DFLT_CQ1 = 0xb0,
+	COS_DFLT_CQ2 = 0xb4,
+	ETYPE_SKIP1 = 0xb8,
+	ETYPE_SKIP2 = 0xbc,
+	SPLT_HDR = 0xc0,
+	FC_PAUSE_THRES = 0xc4,
+	NIC_PAUSE_THRES = 0xc8,
+	FC_ETHERTYPE = 0xcc,
+	FC_RCV_CFG = 0xd0,
+	NIC_RCV_CFG = 0xd4,
+	FC_COS_TAGS = 0xd8,
+	NIC_COS_TAGS = 0xdc,
+	MGMT_RCV_CFG = 0xe0,
+	RT_IDX = 0xe4,
+	RT_DATA = 0xe8,
+	RSVD7 = 0xec,
+	XG_SERDES_ADDR = 0xf0,
+	XG_SERDES_DATA = 0xf4,
+	PRB_MX_ADDR = 0xf8,	/* Use semaphore */
+	PRB_MX_DATA = 0xfc,	/* Use semaphore */
+};
+
+/*
+ * CAM output format.
+ */
+enum {
+	CAM_OUT_ROUTE_FC = 0,
+	CAM_OUT_ROUTE_NIC = 1,
+	CAM_OUT_FUNC_SHIFT = 2,
+	CAM_OUT_RV = (1 << 4),
+	CAM_OUT_SH = (1 << 15),
+	CAM_OUT_CQ_ID_SHIFT = 5,
+};
+
+/*
+ * Mailbox  definitions
+ */
+enum {
+	/* Asynchronous Event Notifications */
+	AEN_SYS_ERR = 0x00008002,
+	AEN_LINK_UP = 0x00008011,
+	AEN_LINK_DOWN = 0x00008012,
+	AEN_IDC_CMPLT = 0x00008100,
+	AEN_IDC_REQ = 0x00008101,
+	AEN_FW_INIT_DONE = 0x00008400,
+	AEN_FW_INIT_FAIL = 0x00008401,
+
+	/* Mailbox Command Opcodes. */
+	MB_CMD_NOP = 0x00000000,
+	MB_CMD_EX_FW = 0x00000002,
+	MB_CMD_MB_TEST = 0x00000006,
+	MB_CMD_CSUM_TEST = 0x00000007,	/* Verify Checksum */
+	MB_CMD_ABOUT_FW = 0x00000008,
+	MB_CMD_LOAD_RISC_RAM = 0x0000000b,
+	MB_CMD_DUMP_RISC_RAM = 0x0000000c,
+	MB_CMD_WRITE_RAM = 0x0000000d,
+	MB_CMD_READ_RAM = 0x0000000f,
+	MB_CMD_STOP_FW = 0x00000014,
+	MB_CMD_MAKE_SYS_ERR = 0x0000002a,
+	MB_CMD_INIT_FW = 0x00000060,
+	MB_CMD_GET_INIT_CB = 0x00000061,
+	MB_CMD_GET_FW_STATE = 0x00000069,
+	MB_CMD_IDC_REQ = 0x00000100,	/* Inter-Driver Communication */
+	MB_CMD_IDC_ACK = 0x00000101,	/* Inter-Driver Communication */
+	MB_CMD_SET_WOL_MODE = 0x00000110,	/* Wake On Lan */
+	MB_WOL_DISABLE = 0x00000000,
+	MB_WOL_MAGIC_PKT = 0x00000001,
+	MB_WOL_FLTR = 0x00000002,
+	MB_WOL_UCAST = 0x00000004,
+	MB_WOL_MCAST = 0x00000008,
+	MB_WOL_BCAST = 0x00000010,
+	MB_WOL_LINK_UP = 0x00000020,
+	MB_WOL_LINK_DOWN = 0x00000040,
+	MB_CMD_SET_WOL_FLTR = 0x00000111,	/* Wake On Lan Filter */
+	MB_CMD_CLEAR_WOL_FLTR = 0x00000112,	/* Wake On Lan Filter */
+	MB_CMD_SET_WOL_MAGIC = 0x00000113,	/* Wake On Lan Magic Packet */
+	MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,	/* Wake On Lan Magic Packet */
+	MB_CMD_PORT_RESET = 0x00000120,
+	MB_CMD_SET_PORT_CFG = 0x00000122,
+	MB_CMD_GET_PORT_CFG = 0x00000123,
+	MB_CMD_SET_ASIC_VOLTS = 0x00000130,
+	MB_CMD_GET_SNS_DATA = 0x00000131,	/* Temp and Volt Sense data. */
+
+	/* Mailbox Command Status. */
+	MB_CMD_STS_GOOD = 0x00004000,	/* Success. */
+	MB_CMD_STS_INTRMDT = 0x00001000,	/* Intermediate Complete. */
+	MB_CMD_STS_ERR = 0x00004005,	/* Error. */
+};
+
+struct mbox_params {
+	u32 mbox_in[MAILBOX_COUNT];
+	u32 mbox_out[MAILBOX_COUNT];
+	int in_count;
+	int out_count;
+};
+
+struct flash_params {
+	u8 dev_id_str[4];
+	u16 size;
+	u16 csum;
+	u16 ver;
+	u16 sub_dev_id;
+	u8 mac_addr[6];
+	u16 res;
+};
+
+
+/*
+ * doorbell space for the rx ring context
+ */
+struct rx_doorbell_context {
+	u32 cnsmr_idx;		/* 0x00 */
+	u32 valid;		/* 0x04 */
+	u32 reserved[4];	/* 0x08-0x14 */
+	u32 lbq_prod_idx;	/* 0x18 */
+	u32 sbq_prod_idx;	/* 0x1c */
+};
+
+/*
+ * doorbell space for the tx ring context
+ */
+struct tx_doorbell_context {
+	u32 prod_idx;		/* 0x00 */
+	u32 valid;		/* 0x04 */
+	u32 reserved[4];	/* 0x08-0x14 */
+	u32 lbq_prod_idx;	/* 0x18 */
+	u32 sbq_prod_idx;	/* 0x1c */
+};
+
+/* DATA STRUCTURES SHARED WITH HARDWARE. */
+
+struct bq_element {
+	u32 addr_lo;
+#define BQ_END	0x00000001
+#define BQ_CONT	0x00000002
+#define BQ_MASK	0x00000003
+	u32 addr_hi;
+} __attribute((packed));
+
+struct tx_buf_desc {
+	__le64 addr;
+	__le32 len;
+#define TX_DESC_LEN_MASK	0x000fffff
+#define TX_DESC_C	0x40000000
+#define TX_DESC_E	0x80000000
+} __attribute((packed));
+
+/*
+ * IOCB Definitions...
+ */
+
+#define OPCODE_OB_MAC_IOCB 			0x01
+#define OPCODE_OB_MAC_TSO_IOCB		0x02
+#define OPCODE_IB_MAC_IOCB			0x20
+#define OPCODE_IB_MPI_IOCB			0x21
+#define OPCODE_IB_AE_IOCB			0x3f
+
+struct ob_mac_iocb_req {
+	u8 opcode;
+	u8 flags1;
+#define OB_MAC_IOCB_REQ_OI	0x01
+#define OB_MAC_IOCB_REQ_I	0x02
+#define OB_MAC_IOCB_REQ_D	0x08
+#define OB_MAC_IOCB_REQ_F	0x10
+	u8 flags2;
+	u8 flags3;
+#define OB_MAC_IOCB_DFP	0x02
+#define OB_MAC_IOCB_V	0x04
+	__le32 reserved1[2];
+	__le16 frame_len;
+#define OB_MAC_IOCB_LEN_MASK 0x3ffff
+	__le16 reserved2;
+	__le32 tid;
+	__le32 txq_idx;
+	__le32 reserved3;
+	__le16 vlan_tci;
+	__le16 reserved4;
+	struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
+} __attribute((packed));
+
+struct ob_mac_iocb_rsp {
+	u8 opcode;		/* */
+	u8 flags1;		/* */
+#define OB_MAC_IOCB_RSP_OI	0x01	/* */
+#define OB_MAC_IOCB_RSP_I	0x02	/* */
+#define OB_MAC_IOCB_RSP_E	0x08	/* */
+#define OB_MAC_IOCB_RSP_S	0x10	/* too Short */
+#define OB_MAC_IOCB_RSP_L	0x20	/* too Large */
+#define OB_MAC_IOCB_RSP_P	0x40	/* Padded */
+	u8 flags2;		/* */
+	u8 flags3;		/* */
+#define OB_MAC_IOCB_RSP_B	0x80	/* */
+	__le32 tid;
+	__le32 txq_idx;
+	__le32 reserved[13];
+} __attribute((packed));
+
+struct ob_mac_tso_iocb_req {
+	u8 opcode;
+	u8 flags1;
+#define OB_MAC_TSO_IOCB_OI	0x01
+#define OB_MAC_TSO_IOCB_I	0x02
+#define OB_MAC_TSO_IOCB_D	0x08
+#define OB_MAC_TSO_IOCB_IP4	0x40
+#define OB_MAC_TSO_IOCB_IP6	0x80
+	u8 flags2;
+#define OB_MAC_TSO_IOCB_LSO	0x20
+#define OB_MAC_TSO_IOCB_UC	0x40
+#define OB_MAC_TSO_IOCB_TC	0x80
+	u8 flags3;
+#define OB_MAC_TSO_IOCB_IC	0x01
+#define OB_MAC_TSO_IOCB_DFP	0x02
+#define OB_MAC_TSO_IOCB_V	0x04
+	__le32 reserved1[2];
+	__le32 frame_len;
+	__le32 tid;
+	__le32 txq_idx;
+	__le16 total_hdrs_len;
+	__le16 net_trans_offset;
+#define OB_MAC_TRANSPORT_HDR_SHIFT 6
+	__le16 vlan_tci;
+	__le16 mss;
+	struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
+} __attribute((packed));
+
+struct ob_mac_tso_iocb_rsp {
+	u8 opcode;
+	u8 flags1;
+#define OB_MAC_TSO_IOCB_RSP_OI	0x01
+#define OB_MAC_TSO_IOCB_RSP_I	0x02
+#define OB_MAC_TSO_IOCB_RSP_E	0x08
+#define OB_MAC_TSO_IOCB_RSP_S	0x10
+#define OB_MAC_TSO_IOCB_RSP_L	0x20
+#define OB_MAC_TSO_IOCB_RSP_P	0x40
+	u8 flags2;		/* */
+	u8 flags3;		/* */
+#define OB_MAC_TSO_IOCB_RSP_B	0x8000
+	__le32 tid;
+	__le32 txq_idx;
+	__le32 reserved2[13];
+} __attribute((packed));
+
+struct ib_mac_iocb_rsp {
+	u8 opcode;		/* 0x20 */
+	u8 flags1;
+#define IB_MAC_IOCB_RSP_OI	0x01	/* Overide intr delay */
+#define IB_MAC_IOCB_RSP_I	0x02	/* Disble Intr Generation */
+#define IB_MAC_IOCB_RSP_TE	0x04	/* Checksum error */
+#define IB_MAC_IOCB_RSP_NU	0x08	/* No checksum rcvd */
+#define IB_MAC_IOCB_RSP_IE	0x10	/* IPv4 checksum error */
+#define IB_MAC_IOCB_RSP_M_MASK	0x60	/* Multicast info */
+#define IB_MAC_IOCB_RSP_M_NONE	0x00	/* Not mcast frame */
+#define IB_MAC_IOCB_RSP_M_HASH	0x20	/* HASH mcast frame */
+#define IB_MAC_IOCB_RSP_M_REG 	0x40	/* Registered mcast frame */
+#define IB_MAC_IOCB_RSP_M_PROM 	0x60	/* Promiscuous mcast frame */
+#define IB_MAC_IOCB_RSP_B	0x80	/* Broadcast frame */
+	u8 flags2;
+#define IB_MAC_IOCB_RSP_P	0x01	/* Promiscuous frame */
+#define IB_MAC_IOCB_RSP_V	0x02	/* Vlan tag present */
+#define IB_MAC_IOCB_RSP_ERR_MASK	0x1c	/*  */
+#define IB_MAC_IOCB_RSP_ERR_CODE_ERR	0x04
+#define IB_MAC_IOCB_RSP_ERR_OVERSIZE	0x08
+#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE	0x10
+#define IB_MAC_IOCB_RSP_ERR_PREAMBLE	0x14
+#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN	0x18
+#define IB_MAC_IOCB_RSP_ERR_CRC		0x1c
+#define IB_MAC_IOCB_RSP_U	0x20	/* UDP packet */
+#define IB_MAC_IOCB_RSP_T	0x40	/* TCP packet */
+#define IB_MAC_IOCB_RSP_FO	0x80	/* Failover port */
+	u8 flags3;
+#define IB_MAC_IOCB_RSP_RSS_MASK	0x07	/* RSS mask */
+#define IB_MAC_IOCB_RSP_M_NONE	0x00	/* No RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV4	0x04	/* IPv4 RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV6	0x02	/* IPv6 RSS match */
+#define IB_MAC_IOCB_RSP_M_TCP_V4 	0x05	/* TCP with IPv4 */
+#define IB_MAC_IOCB_RSP_M_TCP_V6 	0x03	/* TCP with IPv6 */
+#define IB_MAC_IOCB_RSP_V4	0x08	/* IPV4 */
+#define IB_MAC_IOCB_RSP_V6	0x10	/* IPV6 */
+#define IB_MAC_IOCB_RSP_IH	0x20	/* Split after IP header */
+#define IB_MAC_IOCB_RSP_DS	0x40	/* data is in small buffer */
+#define IB_MAC_IOCB_RSP_DL	0x80	/* data is in large buffer */
+	__le32 data_len;	/* */
+	__le32 data_addr_lo;	/* */
+	__le32 data_addr_hi;	/* */
+	__le32 rss;		/* */
+	__le16 vlan_id;		/* 12 bits */
+#define IB_MAC_IOCB_RSP_C	0x1000	/* VLAN CFI bit */
+#define IB_MAC_IOCB_RSP_COS_SHIFT	12	/* class of service value */
+
+	__le16 reserved1;
+	__le32 reserved2[6];
+	__le32 flags4;
+#define IB_MAC_IOCB_RSP_HV	0x20000000	/* */
+#define IB_MAC_IOCB_RSP_HS	0x40000000	/* */
+#define IB_MAC_IOCB_RSP_HL	0x80000000	/* */
+	__le32 hdr_len;		/* */
+	__le32 hdr_addr_lo;	/* */
+	__le32 hdr_addr_hi;	/* */
+} __attribute((packed));
+
+struct ib_ae_iocb_rsp {
+	u8 opcode;
+	u8 flags1;
+#define IB_AE_IOCB_RSP_OI		0x01
+#define IB_AE_IOCB_RSP_I		0x02
+	u8 event;
+#define LINK_UP_EVENT              0x00
+#define LINK_DOWN_EVENT            0x01
+#define CAM_LOOKUP_ERR_EVENT       0x06
+#define SOFT_ECC_ERROR_EVENT       0x07
+#define MGMT_ERR_EVENT             0x08
+#define TEN_GIG_MAC_EVENT          0x09
+#define GPI0_H2L_EVENT       	0x10
+#define GPI0_L2H_EVENT       	0x20
+#define GPI1_H2L_EVENT       	0x11
+#define GPI1_L2H_EVENT       	0x21
+#define PCI_ERR_ANON_BUF_RD        0x40
+	u8 q_id;
+	__le32 reserved[15];
+} __attribute((packed));
+
+/*
+ * These three structures are for generic
+ * handling of ib and ob iocbs.
+ */
+struct ql_net_rsp_iocb {
+	u8 opcode;
+	u8 flags0;
+	__le16 length;
+	__le32 tid;
+	__le32 reserved[14];
+} __attribute((packed));
+
+struct net_req_iocb {
+	u8 opcode;
+	u8 flags0;
+	__le16 flags1;
+	__le32 tid;
+	__le32 reserved1[30];
+} __attribute((packed));
+
+/*
+ * tx ring initialization control block for chip.
+ * It is defined as:
+ * "Work Queue Initialization Control Block"
+ */
+struct wqicb {
+	__le16 len;
+#define Q_LEN_V		(1 << 4)
+#define Q_LEN_CPP_CONT	0x0000
+#define Q_LEN_CPP_16	0x0001
+#define Q_LEN_CPP_32	0x0002
+#define Q_LEN_CPP_64	0x0003
+	__le16 flags;
+#define Q_PRI_SHIFT	1
+#define Q_FLAGS_LC	0x1000
+#define Q_FLAGS_LB	0x2000
+#define Q_FLAGS_LI	0x4000
+#define Q_FLAGS_LO	0x8000
+	__le16 cq_id_rss;
+#define Q_CQ_ID_RSS_RV 0x8000
+	__le16 rid;
+	__le32 addr_lo;
+	__le32 addr_hi;
+	__le32 cnsmr_idx_addr_lo;
+	__le32 cnsmr_idx_addr_hi;
+} __attribute((packed));
+
+/*
+ * rx ring initialization control block for chip.
+ * It is defined as:
+ * "Completion Queue Initialization Control Block"
+ */
+struct cqicb {
+	u8 msix_vect;
+	u8 reserved1;
+	u8 reserved2;
+	u8 flags;
+#define FLAGS_LV	0x08
+#define FLAGS_LS	0x10
+#define FLAGS_LL	0x20
+#define FLAGS_LI	0x40
+#define FLAGS_LC	0x80
+	__le16 len;
+#define LEN_V		(1 << 4)
+#define LEN_CPP_CONT	0x0000
+#define LEN_CPP_32	0x0001
+#define LEN_CPP_64	0x0002
+#define LEN_CPP_128	0x0003
+	__le16 rid;
+	__le32 addr_lo;
+	__le32 addr_hi;
+	__le32 prod_idx_addr_lo;
+	__le32 prod_idx_addr_hi;
+	__le16 pkt_delay;
+	__le16 irq_delay;
+	__le32 lbq_addr_lo;
+	__le32 lbq_addr_hi;
+	__le16 lbq_buf_size;
+	__le16 lbq_len;		/* entry count */
+	__le32 sbq_addr_lo;
+	__le32 sbq_addr_hi;
+	__le16 sbq_buf_size;
+	__le16 sbq_len;		/* entry count */
+} __attribute((packed));
+
+struct ricb {
+	u8 base_cq;
+#define RSS_L4K 0x80
+	u8 flags;
+#define RSS_L6K 0x01
+#define RSS_LI  0x02
+#define RSS_LB  0x04
+#define RSS_LM  0x08
+#define RSS_RI4 0x10
+#define RSS_RT4 0x20
+#define RSS_RI6 0x40
+#define RSS_RT6 0x80
+	__le16 mask;
+	__le32 hash_cq_id[256];
+	__le32 ipv6_hash_key[10];
+	__le32 ipv4_hash_key[4];
+} __attribute((packed));
+
+/* SOFTWARE/DRIVER DATA STRUCTURES. */
+
+struct oal {
+	struct tx_buf_desc oal[TX_DESC_PER_OAL];
+};
+
+struct map_list {
+	DECLARE_PCI_UNMAP_ADDR(mapaddr);
+	DECLARE_PCI_UNMAP_LEN(maplen);
+};
+
+struct tx_ring_desc {
+	struct sk_buff *skb;
+	struct ob_mac_iocb_req *queue_entry;
+	int index;
+	struct oal oal;
+	struct map_list map[MAX_SKB_FRAGS + 1];
+	int map_cnt;
+	struct tx_ring_desc *next;
+};
+
+struct bq_desc {
+	union {
+		struct page *lbq_page;
+		struct sk_buff *skb;
+	} p;
+	struct bq_element *bq;
+	int index;
+	 DECLARE_PCI_UNMAP_ADDR(mapaddr);
+	 DECLARE_PCI_UNMAP_LEN(maplen);
+};
+
+#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
+
+struct tx_ring {
+	/*
+	 * queue info.
+	 */
+	struct wqicb wqicb;	/* structure used to inform chip of new queue */
+	void *wq_base;		/* pci_alloc:virtual addr for tx */
+	dma_addr_t wq_base_dma;	/* pci_alloc:dma addr for tx */
+	u32 *cnsmr_idx_sh_reg;	/* shadow copy of consumer idx */
+	dma_addr_t cnsmr_idx_sh_reg_dma;	/* dma-shadow copy of consumer */
+	u32 wq_size;		/* size in bytes of queue area */
+	u32 wq_len;		/* number of entries in queue */
+	void __iomem *prod_idx_db_reg;	/* doorbell area index reg at offset 0x00 */
+	void __iomem *valid_db_reg;	/* doorbell area valid reg at offset 0x04 */
+	u16 prod_idx;		/* current value for prod idx */
+	u16 cq_id;		/* completion (rx) queue for tx completions */
+	u8 wq_id;		/* queue id for this entry */
+	u8 reserved1[3];
+	struct tx_ring_desc *q;	/* descriptor list for the queue */
+	spinlock_t lock;
+	atomic_t tx_count;	/* counts down for every outstanding IO */
+	atomic_t queue_stopped;	/* Turns queue off when full. */
+	struct delayed_work tx_work;
+	struct ql_adapter *qdev;
+};
+
+/*
+ * Type of inbound queue.
+ */
+enum {
+	DEFAULT_Q = 2,		/* Handles slow queue and chip/MPI events. */
+	TX_Q = 3,		/* Handles outbound completions. */
+	RX_Q = 4,		/* Handles inbound completions. */
+};
+
+struct rx_ring {
+	struct cqicb cqicb;	/* The chip's completion queue init control block. */
+
+	/* Completion queue elements. */
+	void *cq_base;
+	dma_addr_t cq_base_dma;
+	u32 cq_size;
+	u32 cq_len;
+	u16 cq_id;
+	u32 *prod_idx_sh_reg;	/* Shadowed producer register. */
+	dma_addr_t prod_idx_sh_reg_dma;
+	void __iomem *cnsmr_idx_db_reg;	/* PCI doorbell mem area + 0 */
+	u32 cnsmr_idx;		/* current sw idx */
+	struct ql_net_rsp_iocb *curr_entry;	/* next entry on queue */
+	void __iomem *valid_db_reg;	/* PCI doorbell mem area + 0x04 */
+
+	/* Large buffer queue elements. */
+	u32 lbq_len;		/* entry count */
+	u32 lbq_size;		/* size in bytes of queue */
+	u32 lbq_buf_size;
+	void *lbq_base;
+	dma_addr_t lbq_base_dma;
+	void *lbq_base_indirect;
+	dma_addr_t lbq_base_indirect_dma;
+	struct bq_desc *lbq;	/* array of control blocks */
+	void __iomem *lbq_prod_idx_db_reg;	/* PCI doorbell mem area + 0x18 */
+	u32 lbq_prod_idx;	/* current sw prod idx */
+	u32 lbq_curr_idx;	/* next entry we expect */
+	u32 lbq_clean_idx;	/* beginning of new descs */
+	u32 lbq_free_cnt;	/* free buffer desc cnt */
+
+	/* Small buffer queue elements. */
+	u32 sbq_len;		/* entry count */
+	u32 sbq_size;		/* size in bytes of queue */
+	u32 sbq_buf_size;
+	void *sbq_base;
+	dma_addr_t sbq_base_dma;
+	void *sbq_base_indirect;
+	dma_addr_t sbq_base_indirect_dma;
+	struct bq_desc *sbq;	/* array of control blocks */
+	void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
+	u32 sbq_prod_idx;	/* current sw prod idx */
+	u32 sbq_curr_idx;	/* next entry we expect */
+	u32 sbq_clean_idx;	/* beginning of new descs */
+	u32 sbq_free_cnt;	/* free buffer desc cnt */
+
+	/* Misc. handler elements. */
+	u32 type;		/* Type of queue, tx, rx, or default. */
+	u32 irq;		/* Which vector this ring is assigned. */
+	u32 cpu;		/* Which CPU this should run on. */
+	char name[IFNAMSIZ + 5];
+	struct napi_struct napi;
+	struct delayed_work rx_work;
+	u8 reserved;
+	struct ql_adapter *qdev;
+};
+
+/*
+ * RSS Initialization Control Block
+ */
+struct hash_id {
+	u8 value[4];
+};
+
+struct nic_stats {
+	/*
+	 * These stats come from offset 200h to 278h
+	 * in the XGMAC register.
+	 */
+	u64 tx_pkts;
+	u64 tx_bytes;
+	u64 tx_mcast_pkts;
+	u64 tx_bcast_pkts;
+	u64 tx_ucast_pkts;
+	u64 tx_ctl_pkts;
+	u64 tx_pause_pkts;
+	u64 tx_64_pkt;
+	u64 tx_65_to_127_pkt;
+	u64 tx_128_to_255_pkt;
+	u64 tx_256_511_pkt;
+	u64 tx_512_to_1023_pkt;
+	u64 tx_1024_to_1518_pkt;
+	u64 tx_1519_to_max_pkt;
+	u64 tx_undersize_pkt;
+	u64 tx_oversize_pkt;
+
+	/*
+	 * These stats come from offset 300h to 3C8h
+	 * in the XGMAC register.
+	 */
+	u64 rx_bytes;
+	u64 rx_bytes_ok;
+	u64 rx_pkts;
+	u64 rx_pkts_ok;
+	u64 rx_bcast_pkts;
+	u64 rx_mcast_pkts;
+	u64 rx_ucast_pkts;
+	u64 rx_undersize_pkts;
+	u64 rx_oversize_pkts;
+	u64 rx_jabber_pkts;
+	u64 rx_undersize_fcerr_pkts;
+	u64 rx_drop_events;
+	u64 rx_fcerr_pkts;
+	u64 rx_align_err;
+	u64 rx_symbol_err;
+	u64 rx_mac_err;
+	u64 rx_ctl_pkts;
+	u64 rx_pause_pkts;
+	u64 rx_64_pkts;
+	u64 rx_65_to_127_pkts;
+	u64 rx_128_255_pkts;
+	u64 rx_256_511_pkts;
+	u64 rx_512_to_1023_pkts;
+	u64 rx_1024_to_1518_pkts;
+	u64 rx_1519_to_max_pkts;
+	u64 rx_len_err_pkts;
+};
+
+/*
+ * intr_context structure is used during initialization
+ * to hook the interrupts.  It is also used in a single
+ * irq environment as a context to the ISR.
+ */
+struct intr_context {
+	struct ql_adapter *qdev;
+	u32 intr;
+	u32 hooked;
+	u32 intr_en_mask;	/* value/mask used to enable this intr */
+	u32 intr_dis_mask;	/* value/mask used to disable this intr */
+	u32 intr_read_mask;	/* value/mask used to read this intr */
+	char name[IFNAMSIZ * 2];
+	atomic_t irq_cnt;	/* irq_cnt is used in single vector
+				 * environment.  It's incremented for each
+				 * irq handler that is scheduled.  When each
+				 * handler finishes it decrements irq_cnt and
+				 * enables interrupts if it's zero. */
+	irq_handler_t handler;
+};
+
+/* adapter flags definitions. */
+enum {
+	QL_ADAPTER_UP = (1 << 0),	/* Adapter has been brought up. */
+	QL_LEGACY_ENABLED = (1 << 3),
+	QL_MSI_ENABLED = (1 << 3),
+	QL_MSIX_ENABLED = (1 << 4),
+	QL_DMA64 = (1 << 5),
+	QL_PROMISCUOUS = (1 << 6),
+	QL_ALLMULTI = (1 << 7),
+};
+
+/* link_status bit definitions */
+enum {
+	LOOPBACK_MASK = 0x00000700,
+	LOOPBACK_PCS = 0x00000100,
+	LOOPBACK_HSS = 0x00000200,
+	LOOPBACK_EXT = 0x00000300,
+	PAUSE_MASK = 0x000000c0,
+	PAUSE_STD = 0x00000040,
+	PAUSE_PRI = 0x00000080,
+	SPEED_MASK = 0x00000038,
+	SPEED_100Mb = 0x00000000,
+	SPEED_1Gb = 0x00000008,
+	SPEED_10Gb = 0x00000010,
+	LINK_TYPE_MASK = 0x00000007,
+	LINK_TYPE_XFI = 0x00000001,
+	LINK_TYPE_XAUI = 0x00000002,
+	LINK_TYPE_XFI_BP = 0x00000003,
+	LINK_TYPE_XAUI_BP = 0x00000004,
+	LINK_TYPE_10GBASET = 0x00000005,
+};
+
+/*
+ * The main Adapter structure definition.
+ * This structure has all fields relevant to the hardware.
+ */
+struct ql_adapter {
+	struct ricb ricb;
+	unsigned long flags;
+	u32 wol;
+
+	struct nic_stats nic_stats;
+
+	struct vlan_group *vlgrp;
+
+	/* PCI Configuration information for this device */
+	struct pci_dev *pdev;
+	struct net_device *ndev;	/* Parent NET device */
+
+	/* Hardware information */
+	u32 chip_rev_id;
+	u32 func;		/* PCI function for this adapter */
+
+	spinlock_t adapter_lock;
+	spinlock_t hw_lock;
+	spinlock_t stats_lock;
+	spinlock_t legacy_lock;	/* used for maintaining legacy intr sync */
+
+	/* PCI Bus Relative Register Addresses */
+	void __iomem *reg_base;
+	void __iomem *doorbell_area;
+	u32 doorbell_area_size;
+
+	u32 msg_enable;
+
+	/* Page for Shadow Registers */
+	void *rx_ring_shadow_reg_area;
+	dma_addr_t rx_ring_shadow_reg_dma;
+	void *tx_ring_shadow_reg_area;
+	dma_addr_t tx_ring_shadow_reg_dma;
+
+	u32 mailbox_in;
+	u32 mailbox_out;
+
+	int tx_ring_size;
+	int rx_ring_size;
+	u32 intr_count;
+	struct msix_entry *msi_x_entry;
+	struct intr_context intr_context[MAX_RX_RINGS];
+
+	int (*legacy_check) (struct ql_adapter *);
+
+	int tx_ring_count;	/* One per online CPU. */
+	u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */
+	u32 rss_ring_count;	/* One per online CPU.  */
+	/*
+	 * rx_ring_count =
+	 *  one default queue +
+	 *  (CPU count * outbound completion rx_ring) +
+	 *  (CPU count * inbound (RSS) completion rx_ring)
+	 */
+	int rx_ring_count;
+	int ring_mem_size;
+	void *ring_mem;
+	struct rx_ring *rx_ring;
+	int rx_csum;
+	struct tx_ring *tx_ring;
+	u32 default_rx_queue;
+
+	u16 rx_coalesce_usecs;	/* cqicb->int_delay */
+	u16 rx_max_coalesced_frames;	/* cqicb->pkt_int_delay */
+	u16 tx_coalesce_usecs;	/* cqicb->int_delay */
+	u16 tx_max_coalesced_frames;	/* cqicb->pkt_int_delay */
+
+	u32 xg_sem_mask;
+	u32 port_link_up;
+	u32 port_init;
+	u32 link_status;
+
+	struct flash_params flash;
+
+	struct net_device_stats stats;
+	struct workqueue_struct *q_workqueue;
+	struct workqueue_struct *workqueue;
+	struct delayed_work asic_reset_work;
+	struct delayed_work mpi_reset_work;
+	struct delayed_work mpi_work;
+};
+
+/*
+ * Typical Register accessor for memory mapped device.
+ */
+static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
+{
+	return readl(qdev->reg_base + reg);
+}
+
+/*
+ * Typical Register accessor for memory mapped device.
+ */
+static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
+{
+	writel(val, qdev->reg_base + reg);
+}
+
+/*
+ * Doorbell Registers:
+ * Doorbell registers are virtual registers in the PCI memory space.
+ * The space is allocated by the chip during PCI initialization.  The
+ * device driver finds the doorbell address in BAR 3 in PCI config space.
+ * The registers are used to control outbound and inbound queues. For
+ * example, the producer index for an outbound queue.  Each queue uses
+ * 1 4k chunk of memory.  The lower half of the space is for outbound
+ * queues. The upper half is for inbound queues.
+ */
+static inline void ql_write_db_reg(u32 val, void __iomem *addr)
+{
+	writel(val, addr);
+	mmiowb();
+}
+
+/*
+ * Shadow Registers:
+ * Outbound queues have a consumer index that is maintained by the chip.
+ * Inbound queues have a producer index that is maintained by the chip.
+ * For lower overhead, these registers are "shadowed" to host memory
+ * which allows the device driver to track the queue progress without
+ * PCI reads. When an entry is placed on an inbound queue, the chip will
+ * update the relevant index register and then copy the value to the
+ * shadow register in host memory.
+ */
+static inline unsigned int ql_read_sh_reg(const volatile void  *addr)
+{
+	return *(volatile unsigned int __force *)addr;
+}
+
+extern char qlge_driver_name[];
+extern const char qlge_driver_version[];
+extern const struct ethtool_ops qlge_ethtool_ops;
+
+extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+			       u32 *value);
+extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+			u16 q_id);
+void ql_queue_fw_error(struct ql_adapter *qdev);
+void ql_mpi_work(struct work_struct *work);
+void ql_mpi_reset_work(struct work_struct *work);
+int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
+void ql_queue_asic_error(struct ql_adapter *qdev);
+void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
+void ql_set_ethtool_ops(struct net_device *ndev);
+int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
+
+#if 1
+#define QL_ALL_DUMP
+#define QL_REG_DUMP
+#define QL_DEV_DUMP
+#define QL_CB_DUMP
+/* #define QL_IB_DUMP */
+/* #define QL_OB_DUMP */
+#endif
+
+#ifdef QL_REG_DUMP
+extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+extern void ql_dump_routing_entries(struct ql_adapter *qdev);
+extern void ql_dump_regs(struct ql_adapter *qdev);
+#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
+#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
+#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
+#else
+#define QL_DUMP_REGS(qdev)
+#define QL_DUMP_ROUTE(qdev)
+#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
+#endif
+
+#ifdef QL_STAT_DUMP
+extern void ql_dump_stat(struct ql_adapter *qdev);
+#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
+#else
+#define QL_DUMP_STAT(qdev)
+#endif
+
+#ifdef QL_DEV_DUMP
+extern void ql_dump_qdev(struct ql_adapter *qdev);
+#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
+#else
+#define QL_DUMP_QDEV(qdev)
+#endif
+
+#ifdef QL_CB_DUMP
+extern void ql_dump_wqicb(struct wqicb *wqicb);
+extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
+extern void ql_dump_ricb(struct ricb *ricb);
+extern void ql_dump_cqicb(struct cqicb *cqicb);
+extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
+extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
+#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
+#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
+#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
+#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
+#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
+		ql_dump_hw_cb(qdev, size, bit, q_id)
+#else
+#define QL_DUMP_RICB(ricb)
+#define QL_DUMP_WQICB(wqicb)
+#define QL_DUMP_TX_RING(tx_ring)
+#define QL_DUMP_CQICB(cqicb)
+#define QL_DUMP_RX_RING(rx_ring)
+#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
+#endif
+
+#ifdef QL_OB_DUMP
+extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
+#else
+#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
+#endif
+
+#ifdef QL_IB_DUMP
+extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
+#else
+#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
+#endif
+
+#ifdef	QL_ALL_DUMP
+extern void ql_dump_all(struct ql_adapter *qdev);
+#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
+#else
+#define QL_DUMP_ALL(qdev)
+#endif
+
+#endif /* _QLGE_H_ */
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
new file mode 100644
index 0000000..47df304
--- /dev/null
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -0,0 +1,858 @@
+#include "qlge.h"
+
+#ifdef QL_REG_DUMP
+static void ql_dump_intr_states(struct ql_adapter *qdev)
+{
+	int i;
+	u32 value;
+	for (i = 0; i < qdev->intr_count; i++) {
+		ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
+		value = ql_read32(qdev, INTR_EN);
+		printk(KERN_ERR PFX
+		       "%s: Interrupt %d is %s.\n",
+		       qdev->ndev->name, i,
+		       (value & INTR_EN_EN ? "enabled" : "disabled"));
+	}
+}
+
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
+{
+	u32 data;
+	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+		printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__);
+		return;
+	}
+	ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
+	printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, TX_CFG, &data);
+	printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, RX_CFG, &data);
+	printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, FLOW_CTL, &data);
+	printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
+	       qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
+	       qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
+	printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
+	printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
+	printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
+	printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
+	       qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
+	printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
+	printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
+	       qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
+	printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
+	       data);
+	ql_sem_unlock(qdev, qdev->xg_sem_mask);
+
+}
+
+static void ql_dump_ets_regs(struct ql_adapter *qdev)
+{
+}
+
+static void ql_dump_cam_entries(struct ql_adapter *qdev)
+{
+	int i;
+	u32 value[3];
+	for (i = 0; i < 4; i++) {
+		if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
+			printk(KERN_ERR PFX
+			       "%s: Failed read of mac index register.\n",
+			       __func__);
+			return;
+		} else {
+			if (value[0])
+				printk(KERN_ERR PFX
+				       "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n",
+				       qdev->ndev->name, i, value[1], value[0],
+				       value[2]);
+		}
+	}
+	for (i = 0; i < 32; i++) {
+		if (ql_get_mac_addr_reg
+		    (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
+			printk(KERN_ERR PFX
+			       "%s: Failed read of mac index register.\n",
+			       __func__);
+			return;
+		} else {
+			if (value[0])
+				printk(KERN_ERR PFX
+				       "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n",
+				       qdev->ndev->name, i, value[1], value[0]);
+		}
+	}
+}
+
+void ql_dump_routing_entries(struct ql_adapter *qdev)
+{
+	int i;
+	u32 value;
+	for (i = 0; i < 16; i++) {
+		value = 0;
+		if (ql_get_routing_reg(qdev, i, &value)) {
+			printk(KERN_ERR PFX
+			       "%s: Failed read of routing index register.\n",
+			       __func__);
+			return;
+		} else {
+			if (value)
+				printk(KERN_ERR PFX
+				       "%s: Routing Mask %d = 0x%.08x.\n",
+				       qdev->ndev->name, i, value);
+		}
+	}
+}
+
+void ql_dump_regs(struct ql_adapter *qdev)
+{
+	printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func);
+	printk(KERN_ERR PFX "SYS	 			= 0x%x.\n",
+	       ql_read32(qdev, SYS));
+	printk(KERN_ERR PFX "RST_FO 			= 0x%x.\n",
+	       ql_read32(qdev, RST_FO));
+	printk(KERN_ERR PFX "FSC 				= 0x%x.\n",
+	       ql_read32(qdev, FSC));
+	printk(KERN_ERR PFX "CSR 				= 0x%x.\n",
+	       ql_read32(qdev, CSR));
+	printk(KERN_ERR PFX "ICB_RID 			= 0x%x.\n",
+	       ql_read32(qdev, ICB_RID));
+	printk(KERN_ERR PFX "ICB_L 				= 0x%x.\n",
+	       ql_read32(qdev, ICB_L));
+	printk(KERN_ERR PFX "ICB_H 				= 0x%x.\n",
+	       ql_read32(qdev, ICB_H));
+	printk(KERN_ERR PFX "CFG 				= 0x%x.\n",
+	       ql_read32(qdev, CFG));
+	printk(KERN_ERR PFX "BIOS_ADDR 			= 0x%x.\n",
+	       ql_read32(qdev, BIOS_ADDR));
+	printk(KERN_ERR PFX "STS 				= 0x%x.\n",
+	       ql_read32(qdev, STS));
+	printk(KERN_ERR PFX "INTR_EN			= 0x%x.\n",
+	       ql_read32(qdev, INTR_EN));
+	printk(KERN_ERR PFX "INTR_MASK 			= 0x%x.\n",
+	       ql_read32(qdev, INTR_MASK));
+	printk(KERN_ERR PFX "ISR1 				= 0x%x.\n",
+	       ql_read32(qdev, ISR1));
+	printk(KERN_ERR PFX "ISR2 				= 0x%x.\n",
+	       ql_read32(qdev, ISR2));
+	printk(KERN_ERR PFX "ISR3 				= 0x%x.\n",
+	       ql_read32(qdev, ISR3));
+	printk(KERN_ERR PFX "ISR4 				= 0x%x.\n",
+	       ql_read32(qdev, ISR4));
+	printk(KERN_ERR PFX "REV_ID 			= 0x%x.\n",
+	       ql_read32(qdev, REV_ID));
+	printk(KERN_ERR PFX "FRC_ECC_ERR 			= 0x%x.\n",
+	       ql_read32(qdev, FRC_ECC_ERR));
+	printk(KERN_ERR PFX "ERR_STS 			= 0x%x.\n",
+	       ql_read32(qdev, ERR_STS));
+	printk(KERN_ERR PFX "RAM_DBG_ADDR 			= 0x%x.\n",
+	       ql_read32(qdev, RAM_DBG_ADDR));
+	printk(KERN_ERR PFX "RAM_DBG_DATA 			= 0x%x.\n",
+	       ql_read32(qdev, RAM_DBG_DATA));
+	printk(KERN_ERR PFX "ECC_ERR_CNT 			= 0x%x.\n",
+	       ql_read32(qdev, ECC_ERR_CNT));
+	printk(KERN_ERR PFX "SEM 				= 0x%x.\n",
+	       ql_read32(qdev, SEM));
+	printk(KERN_ERR PFX "GPIO_1 			= 0x%x.\n",
+	       ql_read32(qdev, GPIO_1));
+	printk(KERN_ERR PFX "GPIO_2 			= 0x%x.\n",
+	       ql_read32(qdev, GPIO_2));
+	printk(KERN_ERR PFX "GPIO_3 			= 0x%x.\n",
+	       ql_read32(qdev, GPIO_3));
+	printk(KERN_ERR PFX "XGMAC_ADDR 			= 0x%x.\n",
+	       ql_read32(qdev, XGMAC_ADDR));
+	printk(KERN_ERR PFX "XGMAC_DATA 			= 0x%x.\n",
+	       ql_read32(qdev, XGMAC_DATA));
+	printk(KERN_ERR PFX "NIC_ETS 			= 0x%x.\n",
+	       ql_read32(qdev, NIC_ETS));
+	printk(KERN_ERR PFX "CNA_ETS 			= 0x%x.\n",
+	       ql_read32(qdev, CNA_ETS));
+	printk(KERN_ERR PFX "FLASH_ADDR 			= 0x%x.\n",
+	       ql_read32(qdev, FLASH_ADDR));
+	printk(KERN_ERR PFX "FLASH_DATA 			= 0x%x.\n",
+	       ql_read32(qdev, FLASH_DATA));
+	printk(KERN_ERR PFX "CQ_STOP 			= 0x%x.\n",
+	       ql_read32(qdev, CQ_STOP));
+	printk(KERN_ERR PFX "PAGE_TBL_RID 			= 0x%x.\n",
+	       ql_read32(qdev, PAGE_TBL_RID));
+	printk(KERN_ERR PFX "WQ_PAGE_TBL_LO 		= 0x%x.\n",
+	       ql_read32(qdev, WQ_PAGE_TBL_LO));
+	printk(KERN_ERR PFX "WQ_PAGE_TBL_HI 		= 0x%x.\n",
+	       ql_read32(qdev, WQ_PAGE_TBL_HI));
+	printk(KERN_ERR PFX "CQ_PAGE_TBL_LO 		= 0x%x.\n",
+	       ql_read32(qdev, CQ_PAGE_TBL_LO));
+	printk(KERN_ERR PFX "CQ_PAGE_TBL_HI 		= 0x%x.\n",
+	       ql_read32(qdev, CQ_PAGE_TBL_HI));
+	printk(KERN_ERR PFX "COS_DFLT_CQ1 			= 0x%x.\n",
+	       ql_read32(qdev, COS_DFLT_CQ1));
+	printk(KERN_ERR PFX "COS_DFLT_CQ2 			= 0x%x.\n",
+	       ql_read32(qdev, COS_DFLT_CQ2));
+	printk(KERN_ERR PFX "SPLT_HDR 			= 0x%x.\n",
+	       ql_read32(qdev, SPLT_HDR));
+	printk(KERN_ERR PFX "FC_PAUSE_THRES 		= 0x%x.\n",
+	       ql_read32(qdev, FC_PAUSE_THRES));
+	printk(KERN_ERR PFX "NIC_PAUSE_THRES 		= 0x%x.\n",
+	       ql_read32(qdev, NIC_PAUSE_THRES));
+	printk(KERN_ERR PFX "FC_ETHERTYPE 			= 0x%x.\n",
+	       ql_read32(qdev, FC_ETHERTYPE));
+	printk(KERN_ERR PFX "FC_RCV_CFG 			= 0x%x.\n",
+	       ql_read32(qdev, FC_RCV_CFG));
+	printk(KERN_ERR PFX "NIC_RCV_CFG 			= 0x%x.\n",
+	       ql_read32(qdev, NIC_RCV_CFG));
+	printk(KERN_ERR PFX "FC_COS_TAGS 			= 0x%x.\n",
+	       ql_read32(qdev, FC_COS_TAGS));
+	printk(KERN_ERR PFX "NIC_COS_TAGS 			= 0x%x.\n",
+	       ql_read32(qdev, NIC_COS_TAGS));
+	printk(KERN_ERR PFX "MGMT_RCV_CFG 			= 0x%x.\n",
+	       ql_read32(qdev, MGMT_RCV_CFG));
+	printk(KERN_ERR PFX "XG_SERDES_ADDR 		= 0x%x.\n",
+	       ql_read32(qdev, XG_SERDES_ADDR));
+	printk(KERN_ERR PFX "XG_SERDES_DATA 		= 0x%x.\n",
+	       ql_read32(qdev, XG_SERDES_DATA));
+	printk(KERN_ERR PFX "PRB_MX_ADDR 			= 0x%x.\n",
+	       ql_read32(qdev, PRB_MX_ADDR));
+	printk(KERN_ERR PFX "PRB_MX_DATA 			= 0x%x.\n",
+	       ql_read32(qdev, PRB_MX_DATA));
+	ql_dump_intr_states(qdev);
+	ql_dump_xgmac_control_regs(qdev);
+	ql_dump_ets_regs(qdev);
+	ql_dump_cam_entries(qdev);
+	ql_dump_routing_entries(qdev);
+}
+#endif
+
+#ifdef QL_STAT_DUMP
+void ql_dump_stat(struct ql_adapter *qdev)
+{
+	printk(KERN_ERR "%s: Enter.\n", __func__);
+	printk(KERN_ERR "tx_pkts = %ld\n",
+	       (unsigned long)qdev->nic_stats.tx_pkts);
+	printk(KERN_ERR "tx_bytes = %ld\n",
+	       (unsigned long)qdev->nic_stats.tx_bytes);
+	printk(KERN_ERR "tx_mcast_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_mcast_pkts);
+	printk(KERN_ERR "tx_bcast_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_bcast_pkts);
+	printk(KERN_ERR "tx_ucast_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_ucast_pkts);
+	printk(KERN_ERR "tx_ctl_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_ctl_pkts);
+	printk(KERN_ERR "tx_pause_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_pause_pkts);
+	printk(KERN_ERR "tx_64_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_64_pkt);
+	printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_65_to_127_pkt);
+	printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_128_to_255_pkt);
+	printk(KERN_ERR "tx_256_511_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_256_511_pkt);
+	printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt);
+	printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt);
+	printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt);
+	printk(KERN_ERR "tx_undersize_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_undersize_pkt);
+	printk(KERN_ERR "tx_oversize_pkt = %ld.\n",
+	       (unsigned long)qdev->nic_stats.tx_oversize_pkt);
+	printk(KERN_ERR "rx_bytes = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_bytes);
+	printk(KERN_ERR "rx_bytes_ok = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_bytes_ok);
+	printk(KERN_ERR "rx_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_pkts);
+	printk(KERN_ERR "rx_pkts_ok = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_pkts_ok);
+	printk(KERN_ERR "rx_bcast_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_bcast_pkts);
+	printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_mcast_pkts);
+	printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_ucast_pkts);
+	printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_undersize_pkts);
+	printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_oversize_pkts);
+	printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_jabber_pkts);
+	printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
+	printk(KERN_ERR "rx_drop_events = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_drop_events);
+	printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_fcerr_pkts);
+	printk(KERN_ERR "rx_align_err = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_align_err);
+	printk(KERN_ERR "rx_symbol_err = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_symbol_err);
+	printk(KERN_ERR "rx_mac_err = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_mac_err);
+	printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_ctl_pkts);
+	printk(KERN_ERR "rx_pause_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_pause_pkts);
+	printk(KERN_ERR "rx_64_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_64_pkts);
+	printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
+	printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_128_255_pkts);
+	printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_256_511_pkts);
+	printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
+	printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
+	printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
+	printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
+	       (unsigned long)qdev->nic_stats.rx_len_err_pkts);
+};
+#endif
+
+#ifdef QL_DEV_DUMP
+void ql_dump_qdev(struct ql_adapter *qdev)
+{
+	int i;
+	printk(KERN_ERR PFX "qdev->flags 			= %lx.\n",
+	       qdev->flags);
+	printk(KERN_ERR PFX "qdev->vlgrp 			= %p.\n",
+	       qdev->vlgrp);
+	printk(KERN_ERR PFX "qdev->pdev 			= %p.\n",
+	       qdev->pdev);
+	printk(KERN_ERR PFX "qdev->ndev 			= %p.\n",
+	       qdev->ndev);
+	printk(KERN_ERR PFX "qdev->chip_rev_id 		= %d.\n",
+	       qdev->chip_rev_id);
+	printk(KERN_ERR PFX "qdev->reg_base 		= %p.\n",
+	       qdev->reg_base);
+	printk(KERN_ERR PFX "qdev->doorbell_area 	= %p.\n",
+	       qdev->doorbell_area);
+	printk(KERN_ERR PFX "qdev->doorbell_area_size 	= %d.\n",
+	       qdev->doorbell_area_size);
+	printk(KERN_ERR PFX "msg_enable 		= %x.\n",
+	       qdev->msg_enable);
+	printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area	= %p.\n",
+	       qdev->rx_ring_shadow_reg_area);
+	printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma 	= %llx.\n",
+	       (unsigned long long) qdev->rx_ring_shadow_reg_dma);
+	printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area	= %p.\n",
+	       qdev->tx_ring_shadow_reg_area);
+	printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma	= %llx.\n",
+	       (unsigned long long) qdev->tx_ring_shadow_reg_dma);
+	printk(KERN_ERR PFX "qdev->intr_count 		= %d.\n",
+	       qdev->intr_count);
+	if (qdev->msi_x_entry)
+		for (i = 0; i < qdev->intr_count; i++) {
+			printk(KERN_ERR PFX
+			       "msi_x_entry.[%d]vector	= %d.\n", i,
+			       qdev->msi_x_entry[i].vector);
+			printk(KERN_ERR PFX
+			       "msi_x_entry.[%d]entry	= %d.\n", i,
+			       qdev->msi_x_entry[i].entry);
+		}
+	for (i = 0; i < qdev->intr_count; i++) {
+		printk(KERN_ERR PFX
+		       "intr_context[%d].qdev		= %p.\n", i,
+		       qdev->intr_context[i].qdev);
+		printk(KERN_ERR PFX
+		       "intr_context[%d].intr		= %d.\n", i,
+		       qdev->intr_context[i].intr);
+		printk(KERN_ERR PFX
+		       "intr_context[%d].hooked		= %d.\n", i,
+		       qdev->intr_context[i].hooked);
+		printk(KERN_ERR PFX
+		       "intr_context[%d].intr_en_mask	= 0x%08x.\n", i,
+		       qdev->intr_context[i].intr_en_mask);
+		printk(KERN_ERR PFX
+		       "intr_context[%d].intr_dis_mask	= 0x%08x.\n", i,
+		       qdev->intr_context[i].intr_dis_mask);
+		printk(KERN_ERR PFX
+		       "intr_context[%d].intr_read_mask	= 0x%08x.\n", i,
+		       qdev->intr_context[i].intr_read_mask);
+	}
+	printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count);
+	printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count);
+	printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size);
+	printk(KERN_ERR PFX "qdev->ring_mem 	= %p.\n", qdev->ring_mem);
+	printk(KERN_ERR PFX "qdev->intr_count 	= %d.\n", qdev->intr_count);
+	printk(KERN_ERR PFX "qdev->tx_ring		= %p.\n",
+	       qdev->tx_ring);
+	printk(KERN_ERR PFX "qdev->rss_ring_first_cq_id 	= %d.\n",
+	       qdev->rss_ring_first_cq_id);
+	printk(KERN_ERR PFX "qdev->rss_ring_count 	= %d.\n",
+	       qdev->rss_ring_count);
+	printk(KERN_ERR PFX "qdev->rx_ring	= %p.\n", qdev->rx_ring);
+	printk(KERN_ERR PFX "qdev->default_rx_queue	= %d.\n",
+	       qdev->default_rx_queue);
+	printk(KERN_ERR PFX "qdev->xg_sem_mask		= 0x%08x.\n",
+	       qdev->xg_sem_mask);
+	printk(KERN_ERR PFX "qdev->port_link_up		= 0x%08x.\n",
+	       qdev->port_link_up);
+	printk(KERN_ERR PFX "qdev->port_init		= 0x%08x.\n",
+	       qdev->port_init);
+
+}
+#endif
+
+#ifdef QL_CB_DUMP
+void ql_dump_wqicb(struct wqicb *wqicb)
+{
+	printk(KERN_ERR PFX "Dumping wqicb stuff...\n");
+	printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len));
+	printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags));
+	printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
+	       le16_to_cpu(wqicb->cq_id_rss));
+	printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
+	printk(KERN_ERR PFX "wqicb->wq_addr_lo = 0x%.08x.\n",
+	       le32_to_cpu(wqicb->addr_lo));
+	printk(KERN_ERR PFX "wqicb->wq_addr_hi = 0x%.08x.\n",
+	       le32_to_cpu(wqicb->addr_hi));
+	printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_lo = 0x%.08x.\n",
+	       le32_to_cpu(wqicb->cnsmr_idx_addr_lo));
+	printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_hi = 0x%.08x.\n",
+	       le32_to_cpu(wqicb->cnsmr_idx_addr_hi));
+}
+
+void ql_dump_tx_ring(struct tx_ring *tx_ring)
+{
+	if (tx_ring == NULL)
+		return;
+	printk(KERN_ERR PFX
+	       "===================== Dumping tx_ring %d ===============.\n",
+	       tx_ring->wq_id);
+	printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
+	printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
+	       (unsigned long long) tx_ring->wq_base_dma);
+	printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg = %p.\n",
+	       tx_ring->cnsmr_idx_sh_reg);
+	printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg_dma = 0x%llx.\n",
+	       (unsigned long long) tx_ring->cnsmr_idx_sh_reg_dma);
+	printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
+	printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
+	printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
+	       tx_ring->prod_idx_db_reg);
+	printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n",
+	       tx_ring->valid_db_reg);
+	printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx);
+	printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id);
+	printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id);
+	printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
+	printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
+	       atomic_read(&tx_ring->tx_count));
+}
+
+void ql_dump_ricb(struct ricb *ricb)
+{
+	int i;
+	printk(KERN_ERR PFX
+	       "===================== Dumping ricb ===============.\n");
+	printk(KERN_ERR PFX "Dumping ricb stuff...\n");
+
+	printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f);
+	printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n",
+	       ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
+	       ricb->flags & RSS_L6K ? "RSS_L6K " : "",
+	       ricb->flags & RSS_LI ? "RSS_LI " : "",
+	       ricb->flags & RSS_LB ? "RSS_LB " : "",
+	       ricb->flags & RSS_LM ? "RSS_LM " : "",
+	       ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
+	       ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
+	       ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
+	       ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
+	printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask));
+	for (i = 0; i < 16; i++)
+		printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i,
+		       le32_to_cpu(ricb->hash_cq_id[i]));
+	for (i = 0; i < 10; i++)
+		printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i,
+		       le32_to_cpu(ricb->ipv6_hash_key[i]));
+	for (i = 0; i < 4; i++)
+		printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i,
+		       le32_to_cpu(ricb->ipv4_hash_key[i]));
+}
+
+void ql_dump_cqicb(struct cqicb *cqicb)
+{
+	printk(KERN_ERR PFX "Dumping cqicb stuff...\n");
+
+	printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
+	printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
+	printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
+	printk(KERN_ERR PFX "cqicb->addr_lo = %x.\n",
+	       le32_to_cpu(cqicb->addr_lo));
+	printk(KERN_ERR PFX "cqicb->addr_hi = %x.\n",
+	       le32_to_cpu(cqicb->addr_hi));
+	printk(KERN_ERR PFX "cqicb->prod_idx_addr_lo = %x.\n",
+	       le32_to_cpu(cqicb->prod_idx_addr_lo));
+	printk(KERN_ERR PFX "cqicb->prod_idx_addr_hi = %x.\n",
+	       le32_to_cpu(cqicb->prod_idx_addr_hi));
+	printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
+	       le16_to_cpu(cqicb->pkt_delay));
+	printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
+	       le16_to_cpu(cqicb->irq_delay));
+	printk(KERN_ERR PFX "cqicb->lbq_addr_lo = %x.\n",
+	       le32_to_cpu(cqicb->lbq_addr_lo));
+	printk(KERN_ERR PFX "cqicb->lbq_addr_hi = %x.\n",
+	       le32_to_cpu(cqicb->lbq_addr_hi));
+	printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
+	       le16_to_cpu(cqicb->lbq_buf_size));
+	printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
+	       le16_to_cpu(cqicb->lbq_len));
+	printk(KERN_ERR PFX "cqicb->sbq_addr_lo = %x.\n",
+	       le32_to_cpu(cqicb->sbq_addr_lo));
+	printk(KERN_ERR PFX "cqicb->sbq_addr_hi = %x.\n",
+	       le32_to_cpu(cqicb->sbq_addr_hi));
+	printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
+	       le16_to_cpu(cqicb->sbq_buf_size));
+	printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
+	       le16_to_cpu(cqicb->sbq_len));
+}
+
+void ql_dump_rx_ring(struct rx_ring *rx_ring)
+{
+	if (rx_ring == NULL)
+		return;
+	printk(KERN_ERR PFX
+	       "===================== Dumping rx_ring %d ===============.\n",
+	       rx_ring->cq_id);
+	printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n",
+	       rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
+	       rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
+	       rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
+	printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb);
+	printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base);
+	printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n",
+	       (unsigned long long) rx_ring->cq_base_dma);
+	printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
+	printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
+	printk(KERN_ERR PFX
+	       "rx_ring->prod_idx_sh_reg, addr = %p, value = %d.\n",
+	       rx_ring->prod_idx_sh_reg,
+	       rx_ring->prod_idx_sh_reg ? *(rx_ring->prod_idx_sh_reg) : 0);
+	printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
+	       (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
+	printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
+	       rx_ring->cnsmr_idx_db_reg);
+	printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx);
+	printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry);
+	printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n",
+	       rx_ring->valid_db_reg);
+
+	printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base);
+	printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n",
+	       (unsigned long long) rx_ring->lbq_base_dma);
+	printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n",
+	       rx_ring->lbq_base_indirect);
+	printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n",
+	       (unsigned long long) rx_ring->lbq_base_indirect_dma);
+	printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq);
+	printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len);
+	printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size);
+	printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n",
+	       rx_ring->lbq_prod_idx_db_reg);
+	printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n",
+	       rx_ring->lbq_prod_idx);
+	printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n",
+	       rx_ring->lbq_curr_idx);
+	printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n",
+	       rx_ring->lbq_clean_idx);
+	printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n",
+	       rx_ring->lbq_free_cnt);
+	printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n",
+	       rx_ring->lbq_buf_size);
+
+	printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
+	printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
+	       (unsigned long long) rx_ring->sbq_base_dma);
+	printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n",
+	       rx_ring->sbq_base_indirect);
+	printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n",
+	       (unsigned long long) rx_ring->sbq_base_indirect_dma);
+	printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq);
+	printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len);
+	printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size);
+	printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n",
+	       rx_ring->sbq_prod_idx_db_reg);
+	printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n",
+	       rx_ring->sbq_prod_idx);
+	printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n",
+	       rx_ring->sbq_curr_idx);
+	printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n",
+	       rx_ring->sbq_clean_idx);
+	printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n",
+	       rx_ring->sbq_free_cnt);
+	printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n",
+	       rx_ring->sbq_buf_size);
+	printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
+	printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
+	printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
+	printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
+}
+
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
+{
+	void *ptr;
+
+	printk(KERN_ERR PFX "%s: Enter.\n", __func__);
+
+	ptr = kmalloc(size, GFP_ATOMIC);
+	if (ptr == NULL) {
+		printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n",
+		       __func__);
+		return;
+	}
+
+	if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
+		printk(KERN_ERR "%s: Failed to upload control block!\n",
+		       __func__);
+		goto fail_it;
+	}
+	switch (bit) {
+	case CFG_DRQ:
+		ql_dump_wqicb((struct wqicb *)ptr);
+		break;
+	case CFG_DCQ:
+		ql_dump_cqicb((struct cqicb *)ptr);
+		break;
+	case CFG_DR:
+		ql_dump_ricb((struct ricb *)ptr);
+		break;
+	default:
+		printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n",
+		       __func__, bit);
+		break;
+	}
+fail_it:
+	kfree(ptr);
+}
+#endif
+
+#ifdef QL_OB_DUMP
+void ql_dump_tx_desc(struct tx_buf_desc *tbd)
+{
+	printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+	       le64_to_cpu((u64) tbd->addr));
+	printk(KERN_ERR PFX "tbd->len   = %d\n",
+	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+	printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+	       tbd->len & TX_DESC_C ? "C" : ".",
+	       tbd->len & TX_DESC_E ? "E" : ".");
+	tbd++;
+	printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+	       le64_to_cpu((u64) tbd->addr));
+	printk(KERN_ERR PFX "tbd->len   = %d\n",
+	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+	printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+	       tbd->len & TX_DESC_C ? "C" : ".",
+	       tbd->len & TX_DESC_E ? "E" : ".");
+	tbd++;
+	printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+	       le64_to_cpu((u64) tbd->addr));
+	printk(KERN_ERR PFX "tbd->len   = %d\n",
+	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+	printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+	       tbd->len & TX_DESC_C ? "C" : ".",
+	       tbd->len & TX_DESC_E ? "E" : ".");
+
+}
+
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
+{
+	struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
+	    (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
+	struct tx_buf_desc *tbd;
+	u16 frame_len;
+
+	printk(KERN_ERR PFX "%s\n", __func__);
+	printk(KERN_ERR PFX "opcode         = %s\n",
+	       (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
+	printk(KERN_ERR PFX "flags1          = %s %s %s %s %s\n",
+	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
+	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
+	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
+	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
+	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
+	printk(KERN_ERR PFX "flags2          = %s %s %s\n",
+	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
+	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
+	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
+	printk(KERN_ERR PFX "flags3          = %s %s %s \n",
+	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
+	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
+	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
+	printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid);
+	printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx);
+	printk(KERN_ERR PFX "vlan_tci      = %x\n", ob_mac_tso_iocb->vlan_tci);
+	if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
+		printk(KERN_ERR PFX "frame_len      = %d\n",
+		       le32_to_cpu(ob_mac_tso_iocb->frame_len));
+		printk(KERN_ERR PFX "mss      = %d\n",
+		       le16_to_cpu(ob_mac_tso_iocb->mss));
+		printk(KERN_ERR PFX "prot_hdr_len   = %d\n",
+		       le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
+		printk(KERN_ERR PFX "hdr_offset     = 0x%.04x\n",
+		       le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
+		frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
+	} else {
+		printk(KERN_ERR PFX "frame_len      = %d\n",
+		       le16_to_cpu(ob_mac_iocb->frame_len));
+		frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
+	}
+	tbd = &ob_mac_iocb->tbd[0];
+	ql_dump_tx_desc(tbd);
+}
+
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
+{
+	printk(KERN_ERR PFX "%s\n", __func__);
+	printk(KERN_ERR PFX "opcode         = %d\n", ob_mac_rsp->opcode);
+	printk(KERN_ERR PFX "flags          = %s %s %s %s %s %s %s\n",
+	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
+	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
+	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
+	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
+	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
+	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
+	       ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
+	printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid);
+}
+#endif
+
+#ifdef QL_IB_DUMP
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+	printk(KERN_ERR PFX "%s\n", __func__);
+	printk(KERN_ERR PFX "opcode         = 0x%x\n", ib_mac_rsp->opcode);
+	printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n",
+	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
+	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
+	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
+	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
+	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
+	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
+
+	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
+		printk(KERN_ERR PFX "%s%s%s Multicast.\n",
+		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+		       IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
+		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+		       IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
+		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+		       IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+
+	printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n",
+	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
+	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
+	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
+	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
+	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
+
+	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
+		printk(KERN_ERR PFX "%s%s%s%s%s error.\n",
+		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+		       IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
+		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+		       IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
+		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+		       IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
+		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+		       IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
+		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+		       IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
+
+	printk(KERN_ERR PFX "flags3 = %s%s.\n",
+	       ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
+	       ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
+
+	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
+		printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n",
+		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+			IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
+		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+			IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
+		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+			IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
+		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+			IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
+
+	printk(KERN_ERR PFX "data_len	= %d\n",
+	       le32_to_cpu(ib_mac_rsp->data_len));
+	printk(KERN_ERR PFX "data_addr_hi    = 0x%x\n",
+	       le32_to_cpu(ib_mac_rsp->data_addr_hi));
+	printk(KERN_ERR PFX "data_addr_lo    = 0x%x\n",
+	       le32_to_cpu(ib_mac_rsp->data_addr_lo));
+	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
+		printk(KERN_ERR PFX "rss    = %x\n",
+		       le32_to_cpu(ib_mac_rsp->rss));
+	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
+		printk(KERN_ERR PFX "vlan_id    = %x\n",
+		       le16_to_cpu(ib_mac_rsp->vlan_id));
+
+	printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
+	       le32_to_cpu(ib_mac_rsp->
+			   flags4) & IB_MAC_IOCB_RSP_HV ? "HV " : "",
+	       le32_to_cpu(ib_mac_rsp->
+			   flags4) & IB_MAC_IOCB_RSP_HS ? "HS " : "",
+	       le32_to_cpu(ib_mac_rsp->
+			   flags4) & IB_MAC_IOCB_RSP_HL ? "HL " : "");
+
+	if (le32_to_cpu(ib_mac_rsp->flags4) & IB_MAC_IOCB_RSP_HV) {
+		printk(KERN_ERR PFX "hdr length	= %d.\n",
+		       le32_to_cpu(ib_mac_rsp->hdr_len));
+		printk(KERN_ERR PFX "hdr addr_hi    = 0x%x.\n",
+		       le32_to_cpu(ib_mac_rsp->hdr_addr_hi));
+		printk(KERN_ERR PFX "hdr addr_lo    = 0x%x.\n",
+		       le32_to_cpu(ib_mac_rsp->hdr_addr_lo));
+	}
+}
+#endif
+
+#ifdef QL_ALL_DUMP
+void ql_dump_all(struct ql_adapter *qdev)
+{
+	int i;
+
+	QL_DUMP_REGS(qdev);
+	QL_DUMP_QDEV(qdev);
+	for (i = 0; i < qdev->tx_ring_count; i++) {
+		QL_DUMP_TX_RING(&qdev->tx_ring[i]);
+		QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
+	}
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		QL_DUMP_RX_RING(&qdev->rx_ring[i]);
+		QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
+	}
+}
+#endif
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
new file mode 100644
index 0000000..6457f8c
--- /dev/null
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -0,0 +1,415 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include <linux/version.h>
+
+#include "qlge.h"
+
+static int ql_update_ring_coalescing(struct ql_adapter *qdev)
+{
+	int i, status = 0;
+	struct rx_ring *rx_ring;
+	struct cqicb *cqicb;
+
+	if (!netif_running(qdev->ndev))
+		return status;
+
+	spin_lock(&qdev->hw_lock);
+	/* Skip the default queue, and update the outbound handler
+	 * queues if they changed.
+	 */
+	cqicb = (struct cqicb *)&qdev->rx_ring[1];
+	if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
+	    le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) {
+		for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) {
+			rx_ring = &qdev->rx_ring[i];
+			cqicb = (struct cqicb *)rx_ring;
+			cqicb->irq_delay = le16_to_cpu(qdev->tx_coalesce_usecs);
+			cqicb->pkt_delay =
+			    le16_to_cpu(qdev->tx_max_coalesced_frames);
+			cqicb->flags = FLAGS_LI;
+			status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
+						CFG_LCQ, rx_ring->cq_id);
+			if (status) {
+				QPRINTK(qdev, IFUP, ERR,
+					"Failed to load CQICB.\n");
+				goto exit;
+			}
+		}
+	}
+
+	/* Update the inbound (RSS) handler queues if they changed. */
+	cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_first_cq_id];
+	if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
+	    le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) {
+		for (i = qdev->rss_ring_first_cq_id;
+		     i <= qdev->rss_ring_first_cq_id + qdev->rss_ring_count;
+		     i++) {
+			rx_ring = &qdev->rx_ring[i];
+			cqicb = (struct cqicb *)rx_ring;
+			cqicb->irq_delay = le16_to_cpu(qdev->rx_coalesce_usecs);
+			cqicb->pkt_delay =
+			    le16_to_cpu(qdev->rx_max_coalesced_frames);
+			cqicb->flags = FLAGS_LI;
+			status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
+						CFG_LCQ, rx_ring->cq_id);
+			if (status) {
+				QPRINTK(qdev, IFUP, ERR,
+					"Failed to load CQICB.\n");
+				goto exit;
+			}
+		}
+	}
+exit:
+	spin_unlock(&qdev->hw_lock);
+	return status;
+}
+
+void ql_update_stats(struct ql_adapter *qdev)
+{
+	u32 i;
+	u64 data;
+	u64 *iter = &qdev->nic_stats.tx_pkts;
+
+	spin_lock(&qdev->stats_lock);
+	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Couldn't get xgmac sem.\n");
+		goto quit;
+	}
+	/*
+	 * Get TX statistics.
+	 */
+	for (i = 0x200; i < 0x280; i += 8) {
+		if (ql_read_xgmac_reg64(qdev, i, &data)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Error reading status register 0x%.04x.\n", i);
+			goto end;
+		} else
+			*iter = data;
+		iter++;
+	}
+
+	/*
+	 * Get RX statistics.
+	 */
+	for (i = 0x300; i < 0x3d0; i += 8) {
+		if (ql_read_xgmac_reg64(qdev, i, &data)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Error reading status register 0x%.04x.\n", i);
+			goto end;
+		} else
+			*iter = data;
+		iter++;
+	}
+
+end:
+	ql_sem_unlock(qdev, qdev->xg_sem_mask);
+quit:
+	spin_unlock(&qdev->stats_lock);
+
+	QL_DUMP_STAT(qdev);
+
+	return;
+}
+
+static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
+	{"tx_pkts"},
+	{"tx_bytes"},
+	{"tx_mcast_pkts"},
+	{"tx_bcast_pkts"},
+	{"tx_ucast_pkts"},
+	{"tx_ctl_pkts"},
+	{"tx_pause_pkts"},
+	{"tx_64_pkts"},
+	{"tx_65_to_127_pkts"},
+	{"tx_128_to_255_pkts"},
+	{"tx_256_511_pkts"},
+	{"tx_512_to_1023_pkts"},
+	{"tx_1024_to_1518_pkts"},
+	{"tx_1519_to_max_pkts"},
+	{"tx_undersize_pkts"},
+	{"tx_oversize_pkts"},
+	{"rx_bytes"},
+	{"rx_bytes_ok"},
+	{"rx_pkts"},
+	{"rx_pkts_ok"},
+	{"rx_bcast_pkts"},
+	{"rx_mcast_pkts"},
+	{"rx_ucast_pkts"},
+	{"rx_undersize_pkts"},
+	{"rx_oversize_pkts"},
+	{"rx_jabber_pkts"},
+	{"rx_undersize_fcerr_pkts"},
+	{"rx_drop_events"},
+	{"rx_fcerr_pkts"},
+	{"rx_align_err"},
+	{"rx_symbol_err"},
+	{"rx_mac_err"},
+	{"rx_ctl_pkts"},
+	{"rx_pause_pkts"},
+	{"rx_64_pkts"},
+	{"rx_65_to_127_pkts"},
+	{"rx_128_255_pkts"},
+	{"rx_256_511_pkts"},
+	{"rx_512_to_1023_pkts"},
+	{"rx_1024_to_1518_pkts"},
+	{"rx_1519_to_max_pkts"},
+	{"rx_len_err_pkts"},
+};
+
+static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
+		break;
+	}
+}
+
+static int ql_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(ql_stats_str_arr);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void
+ql_get_ethtool_stats(struct net_device *ndev,
+		     struct ethtool_stats *stats, u64 *data)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	struct nic_stats *s = &qdev->nic_stats;
+
+	ql_update_stats(qdev);
+
+	*data++ = s->tx_pkts;
+	*data++ = s->tx_bytes;
+	*data++ = s->tx_mcast_pkts;
+	*data++ = s->tx_bcast_pkts;
+	*data++ = s->tx_ucast_pkts;
+	*data++ = s->tx_ctl_pkts;
+	*data++ = s->tx_pause_pkts;
+	*data++ = s->tx_64_pkt;
+	*data++ = s->tx_65_to_127_pkt;
+	*data++ = s->tx_128_to_255_pkt;
+	*data++ = s->tx_256_511_pkt;
+	*data++ = s->tx_512_to_1023_pkt;
+	*data++ = s->tx_1024_to_1518_pkt;
+	*data++ = s->tx_1519_to_max_pkt;
+	*data++ = s->tx_undersize_pkt;
+	*data++ = s->tx_oversize_pkt;
+	*data++ = s->rx_bytes;
+	*data++ = s->rx_bytes_ok;
+	*data++ = s->rx_pkts;
+	*data++ = s->rx_pkts_ok;
+	*data++ = s->rx_bcast_pkts;
+	*data++ = s->rx_mcast_pkts;
+	*data++ = s->rx_ucast_pkts;
+	*data++ = s->rx_undersize_pkts;
+	*data++ = s->rx_oversize_pkts;
+	*data++ = s->rx_jabber_pkts;
+	*data++ = s->rx_undersize_fcerr_pkts;
+	*data++ = s->rx_drop_events;
+	*data++ = s->rx_fcerr_pkts;
+	*data++ = s->rx_align_err;
+	*data++ = s->rx_symbol_err;
+	*data++ = s->rx_mac_err;
+	*data++ = s->rx_ctl_pkts;
+	*data++ = s->rx_pause_pkts;
+	*data++ = s->rx_64_pkts;
+	*data++ = s->rx_65_to_127_pkts;
+	*data++ = s->rx_128_255_pkts;
+	*data++ = s->rx_256_511_pkts;
+	*data++ = s->rx_512_to_1023_pkts;
+	*data++ = s->rx_1024_to_1518_pkts;
+	*data++ = s->rx_1519_to_max_pkts;
+	*data++ = s->rx_len_err_pkts;
+}
+
+static int ql_get_settings(struct net_device *ndev,
+			      struct ethtool_cmd *ecmd)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	ecmd->supported = SUPPORTED_10000baseT_Full;
+	ecmd->advertising = ADVERTISED_10000baseT_Full;
+	ecmd->autoneg = AUTONEG_ENABLE;
+	ecmd->transceiver = XCVR_EXTERNAL;
+	if ((qdev->link_status & LINK_TYPE_MASK) == LINK_TYPE_10GBASET) {
+		ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+		ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
+		ecmd->port = PORT_TP;
+	} else {
+		ecmd->supported |= SUPPORTED_FIBRE;
+		ecmd->advertising |= ADVERTISED_FIBRE;
+		ecmd->port = PORT_FIBRE;
+	}
+
+	ecmd->speed = SPEED_10000;
+	ecmd->duplex = DUPLEX_FULL;
+
+	return 0;
+}
+
+static void ql_get_drvinfo(struct net_device *ndev,
+			   struct ethtool_drvinfo *drvinfo)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	strncpy(drvinfo->driver, qlge_driver_name, 32);
+	strncpy(drvinfo->version, qlge_driver_version, 32);
+	strncpy(drvinfo->fw_version, "N/A", 32);
+	strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+	drvinfo->n_stats = 0;
+	drvinfo->testinfo_len = 0;
+	drvinfo->regdump_len = 0;
+	drvinfo->eedump_len = 0;
+}
+
+static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct ql_adapter *qdev = netdev_priv(dev);
+
+	c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
+	c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
+
+	/* This chip coalesces as follows:
+	 * If a packet arrives, hold off interrupts until
+	 * cqicb->int_delay expires, but if no other packets arrive don't
+	 * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
+	 * timer to coalesce on a frame basis.  So, we have to take ethtool's
+	 * max_coalesced_frames value and convert it to a delay in microseconds.
+	 * We do this by using a basic thoughput of 1,000,000 frames per
+	 * second @ (1024 bytes).  This means one frame per usec. So it's a
+	 * simple one to one ratio.
+	 */
+	c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
+	c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
+
+	return 0;
+}
+
+static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	/* Validate user parameters. */
+	if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
+		return -EINVAL;
+       /* Don't wait more than 10 usec. */
+	if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
+		return -EINVAL;
+	if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
+		return -EINVAL;
+	if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
+		return -EINVAL;
+
+	/* Verify a change took place before updating the hardware. */
+	if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
+	    qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
+	    qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
+	    qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
+		return 0;
+
+	qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
+	qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
+	qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
+	qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
+
+	return ql_update_ring_coalescing(qdev);
+}
+
+static u32 ql_get_rx_csum(struct net_device *netdev)
+{
+	struct ql_adapter *qdev = netdev_priv(netdev);
+	return qdev->rx_csum;
+}
+
+static int ql_set_rx_csum(struct net_device *netdev, uint32_t data)
+{
+	struct ql_adapter *qdev = netdev_priv(netdev);
+	qdev->rx_csum = data;
+	return 0;
+}
+
+static int ql_set_tso(struct net_device *ndev, uint32_t data)
+{
+
+	if (data) {
+		ndev->features |= NETIF_F_TSO;
+		ndev->features |= NETIF_F_TSO6;
+	} else {
+		ndev->features &= ~NETIF_F_TSO;
+		ndev->features &= ~NETIF_F_TSO6;
+	}
+	return 0;
+}
+
+static u32 ql_get_msglevel(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	return qdev->msg_enable;
+}
+
+static void ql_set_msglevel(struct net_device *ndev, u32 value)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	qdev->msg_enable = value;
+}
+
+const struct ethtool_ops qlge_ethtool_ops = {
+	.get_settings = ql_get_settings,
+	.get_drvinfo = ql_get_drvinfo,
+	.get_msglevel = ql_get_msglevel,
+	.set_msglevel = ql_set_msglevel,
+	.get_link = ethtool_op_get_link,
+	.get_rx_csum = ql_get_rx_csum,
+	.set_rx_csum = ql_set_rx_csum,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = ethtool_op_set_sg,
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = ql_set_tso,
+	.get_coalesce = ql_get_coalesce,
+	.set_coalesce = ql_set_coalesce,
+	.get_sset_count = ql_get_sset_count,
+	.get_strings = ql_get_strings,
+	.get_ethtool_stats = ql_get_ethtool_stats,
+};
+
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
new file mode 100644
index 0000000..3af822b
--- /dev/null
+++ b/drivers/net/qlge/qlge_main.c
@@ -0,0 +1,3956 @@
+/*
+ * QLogic qlge NIC HBA Driver
+ * Copyright (c)  2003-2008 QLogic Corporation
+ * See LICENSE.qlge for copyright and licensing details.
+ * Author:     Linux qlge network device driver by
+ *                      Ron Mercer <ron.mercer@qlogic.com>
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include "qlge.h"
+
+char qlge_driver_name[] = DRV_NAME;
+const char qlge_driver_version[] = DRV_VERSION;
+
+MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
+MODULE_DESCRIPTION(DRV_STRING " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg =
+    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
+/* NETIF_MSG_TIMER |	*/
+    NETIF_MSG_IFDOWN |
+    NETIF_MSG_IFUP |
+    NETIF_MSG_RX_ERR |
+    NETIF_MSG_TX_ERR |
+    NETIF_MSG_TX_QUEUED |
+    NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
+/* NETIF_MSG_PKTDATA | */
+    NETIF_MSG_HW | NETIF_MSG_WOL | 0;
+
+static int debug = 0x00007fff;	/* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+#define MSIX_IRQ 0
+#define MSI_IRQ 1
+#define LEG_IRQ 2
+static int irq_type = MSIX_IRQ;
+module_param(irq_type, int, MSIX_IRQ);
+MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
+
+static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
+	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
+	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
+	/* required last entry */
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
+
+/* This hardware semaphore causes exclusive access to
+ * resources shared between the NIC driver, MPI firmware,
+ * FCOE firmware and the FC driver.
+ */
+static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
+{
+	u32 sem_bits = 0;
+
+	switch (sem_mask) {
+	case SEM_XGMAC0_MASK:
+		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
+		break;
+	case SEM_XGMAC1_MASK:
+		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
+		break;
+	case SEM_ICB_MASK:
+		sem_bits = SEM_SET << SEM_ICB_SHIFT;
+		break;
+	case SEM_MAC_ADDR_MASK:
+		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
+		break;
+	case SEM_FLASH_MASK:
+		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
+		break;
+	case SEM_PROBE_MASK:
+		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
+		break;
+	case SEM_RT_IDX_MASK:
+		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
+		break;
+	case SEM_PROC_REG_MASK:
+		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
+		break;
+	default:
+		QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
+		return -EINVAL;
+	}
+
+	ql_write32(qdev, SEM, sem_bits | sem_mask);
+	return !(ql_read32(qdev, SEM) & sem_bits);
+}
+
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
+{
+	unsigned int seconds = 3;
+	do {
+		if (!ql_sem_trylock(qdev, sem_mask))
+			return 0;
+		ssleep(1);
+	} while (--seconds);
+	return -ETIMEDOUT;
+}
+
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
+{
+	ql_write32(qdev, SEM, sem_mask);
+	ql_read32(qdev, SEM);	/* flush */
+}
+
+/* This function waits for a specific bit to come ready
+ * in a given register.  It is used mostly by the initialize
+ * process, but is also used in kernel thread API such as
+ * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
+ */
+int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
+{
+	u32 temp;
+	int count = UDELAY_COUNT;
+
+	while (count) {
+		temp = ql_read32(qdev, reg);
+
+		/* check for errors */
+		if (temp & err_bit) {
+			QPRINTK(qdev, PROBE, ALERT,
+				"register 0x%.08x access error, value = 0x%.08x!.\n",
+				reg, temp);
+			return -EIO;
+		} else if (temp & bit)
+			return 0;
+		udelay(UDELAY_DELAY);
+		count--;
+	}
+	QPRINTK(qdev, PROBE, ALERT,
+		"Timed out waiting for reg %x to come ready.\n", reg);
+	return -ETIMEDOUT;
+}
+
+/* The CFG register is used to download TX and RX control blocks
+ * to the chip. This function waits for an operation to complete.
+ */
+static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
+{
+	int count = UDELAY_COUNT;
+	u32 temp;
+
+	while (count) {
+		temp = ql_read32(qdev, CFG);
+		if (temp & CFG_LE)
+			return -EIO;
+		if (!(temp & bit))
+			return 0;
+		udelay(UDELAY_DELAY);
+		count--;
+	}
+	return -ETIMEDOUT;
+}
+
+
+/* Used to issue init control blocks to hw. Maps control block,
+ * sets address, triggers download, waits for completion.
+ */
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+		 u16 q_id)
+{
+	u64 map;
+	int status = 0;
+	int direction;
+	u32 mask;
+	u32 value;
+
+	direction =
+	    (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
+	    PCI_DMA_FROMDEVICE;
+
+	map = pci_map_single(qdev->pdev, ptr, size, direction);
+	if (pci_dma_mapping_error(qdev->pdev, map)) {
+		QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
+		return -ENOMEM;
+	}
+
+	status = ql_wait_cfg(qdev, bit);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Timed out waiting for CFG to come ready.\n");
+		goto exit;
+	}
+
+	status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
+	if (status)
+		goto exit;
+	ql_write32(qdev, ICB_L, (u32) map);
+	ql_write32(qdev, ICB_H, (u32) (map >> 32));
+	ql_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
+
+	mask = CFG_Q_MASK | (bit << 16);
+	value = bit | (q_id << CFG_Q_SHIFT);
+	ql_write32(qdev, CFG, (mask | value));
+
+	/*
+	 * Wait for the bit to clear after signaling hw.
+	 */
+	status = ql_wait_cfg(qdev, bit);
+exit:
+	pci_unmap_single(qdev->pdev, map, size, direction);
+	return status;
+}
+
+/* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+			u32 *value)
+{
+	u32 offset = 0;
+	int status;
+
+	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+	if (status)
+		return status;
+	switch (type) {
+	case MAC_ADDR_TYPE_MULTI_MAC:
+	case MAC_ADDR_TYPE_CAM_MAC:
+		{
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
+				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
+				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
+			if (type == MAC_ADDR_TYPE_CAM_MAC) {
+				status =
+				    ql_wait_reg_rdy(qdev,
+					MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+				if (status)
+					goto exit;
+				ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+					   (index << MAC_ADDR_IDX_SHIFT) | /* index */
+					   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+				status =
+				    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
+						    MAC_ADDR_MR, MAC_ADDR_E);
+				if (status)
+					goto exit;
+				*value++ = ql_read32(qdev, MAC_ADDR_DATA);
+			}
+			break;
+		}
+	case MAC_ADDR_TYPE_VLAN:
+	case MAC_ADDR_TYPE_MULTI_FLTR:
+	default:
+		QPRINTK(qdev, IFUP, CRIT,
+			"Address type %d not yet supported.\n", type);
+		status = -EPERM;
+	}
+exit:
+	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+	return status;
+}
+
+/* Set up a MAC, multicast or VLAN address for the
+ * inbound frame matching.
+ */
+static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
+			       u16 index)
+{
+	u32 offset = 0;
+	int status = 0;
+
+	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+	if (status)
+		return status;
+	switch (type) {
+	case MAC_ADDR_TYPE_MULTI_MAC:
+	case MAC_ADDR_TYPE_CAM_MAC:
+		{
+			u32 cam_output;
+			u32 upper = (addr[0] << 8) | addr[1];
+			u32 lower =
+			    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
+			    (addr[5]);
+
+			QPRINTK(qdev, IFUP, INFO,
+				"Adding %s address %02x:%02x:%02x:%02x:%02x:%02x"
+				" at index %d in the CAM.\n",
+				((type ==
+				  MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
+				 "UNICAST"), addr[0], addr[1], addr[2], addr[3],
+				addr[4], addr[5], index);
+
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
+				   type);	/* type */
+			ql_write32(qdev, MAC_ADDR_DATA, lower);
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
+				   type);	/* type */
+			ql_write32(qdev, MAC_ADDR_DATA, upper);
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset) |	/* offset */
+				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
+				   type);	/* type */
+			/* This field should also include the queue id
+			   and possibly the function id.  Right now we hardcode
+			   the route field to NIC core.
+			 */
+			if (type == MAC_ADDR_TYPE_CAM_MAC) {
+				cam_output = (CAM_OUT_ROUTE_NIC |
+					      (qdev->
+					       func << CAM_OUT_FUNC_SHIFT) |
+					      (qdev->
+					       rss_ring_first_cq_id <<
+					       CAM_OUT_CQ_ID_SHIFT));
+				if (qdev->vlgrp)
+					cam_output |= CAM_OUT_RV;
+				/* route to NIC core */
+				ql_write32(qdev, MAC_ADDR_DATA, cam_output);
+			}
+			break;
+		}
+	case MAC_ADDR_TYPE_VLAN:
+		{
+			u32 enable_bit = *((u32 *) &addr[0]);
+			/* For VLAN, the addr actually holds a bit that
+			 * either enables or disables the vlan id we are
+			 * addressing. It's either MAC_ADDR_E on or off.
+			 * That's bit-27 we're talking about.
+			 */
+			QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
+				(enable_bit ? "Adding" : "Removing"),
+				index, (enable_bit ? "to" : "from"));
+
+			status =
+			    ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, offset |	/* offset */
+				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
+				   type |	/* type */
+				   enable_bit);	/* enable/disable */
+			break;
+		}
+	case MAC_ADDR_TYPE_MULTI_FLTR:
+	default:
+		QPRINTK(qdev, IFUP, CRIT,
+			"Address type %d not yet supported.\n", type);
+		status = -EPERM;
+	}
+exit:
+	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+	return status;
+}
+
+/* Get a specific frame routing value from the CAM.
+ * Used for debug and reg dump.
+ */
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
+{
+	int status = 0;
+
+	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+	if (status)
+		goto exit;
+
+	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, RT_IDX_E);
+	if (status)
+		goto exit;
+
+	ql_write32(qdev, RT_IDX,
+		   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
+	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, RT_IDX_E);
+	if (status)
+		goto exit;
+	*value = ql_read32(qdev, RT_DATA);
+exit:
+	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+	return status;
+}
+
+/* The NIC function for this chip has 16 routing indexes.  Each one can be used
+ * to route different frame types to various inbound queues.  We send broadcast/
+ * multicast/error frames to the default queue for slow handling,
+ * and CAM hit/RSS frames to the fast handling queues.
+ */
+static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
+			      int enable)
+{
+	int status;
+	u32 value = 0;
+
+	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+	if (status)
+		return status;
+
+	QPRINTK(qdev, IFUP, DEBUG,
+		"%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
+		(enable ? "Adding" : "Removing"),
+		((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
+		((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
+		((index ==
+		  RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
+		((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
+		((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
+		((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
+		((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
+		((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
+		((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
+		((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
+		((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
+		((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
+		((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
+		((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
+		((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
+		((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
+		(enable ? "to" : "from"));
+
+	switch (mask) {
+	case RT_IDX_CAM_HIT:
+		{
+			value = RT_IDX_DST_CAM_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_VALID:	/* Promiscuous Mode frames. */
+		{
+			value = RT_IDX_DST_DFLT_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
+		{
+			value = RT_IDX_DST_DFLT_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
+		{
+			value = RT_IDX_DST_DFLT_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
+		{
+			value = RT_IDX_DST_CAM_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
+		{
+			value = RT_IDX_DST_CAM_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
+		{
+			value = RT_IDX_DST_RSS |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case 0:		/* Clear the E-bit on an entry. */
+		{
+			value = RT_IDX_DST_DFLT_Q |	/* dest */
+			    RT_IDX_TYPE_NICQ |	/* type */
+			    (index << RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	default:
+		QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
+			mask);
+		status = -EPERM;
+		goto exit;
+	}
+
+	if (value) {
+		status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
+		if (status)
+			goto exit;
+		value |= (enable ? RT_IDX_E : 0);
+		ql_write32(qdev, RT_IDX, value);
+		ql_write32(qdev, RT_DATA, enable ? mask : 0);
+	}
+exit:
+	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+	return status;
+}
+
+static void ql_enable_interrupts(struct ql_adapter *qdev)
+{
+	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
+}
+
+static void ql_disable_interrupts(struct ql_adapter *qdev)
+{
+	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
+}
+
+/* If we're running with multiple MSI-X vectors then we enable on the fly.
+ * Otherwise, we may have multiple outstanding workers and don't want to
+ * enable until the last one finishes. In this case, the irq_cnt gets
+ * incremented everytime we queue a worker and decremented everytime
+ * a worker finishes.  Once it hits zero we enable the interrupt.
+ */
+void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+{
+	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
+		ql_write32(qdev, INTR_EN,
+			   qdev->intr_context[intr].intr_en_mask);
+	else {
+		if (qdev->legacy_check)
+			spin_lock(&qdev->legacy_lock);
+		if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) {
+			QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n",
+				intr);
+			ql_write32(qdev, INTR_EN,
+				   qdev->intr_context[intr].intr_en_mask);
+		} else {
+			QPRINTK(qdev, INTR, ERR,
+				"Skip enable, other queue(s) are active.\n");
+		}
+		if (qdev->legacy_check)
+			spin_unlock(&qdev->legacy_lock);
+	}
+}
+
+static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+{
+	u32 var = 0;
+
+	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
+		goto exit;
+	else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) {
+		ql_write32(qdev, INTR_EN,
+			   qdev->intr_context[intr].intr_dis_mask);
+		var = ql_read32(qdev, STS);
+	}
+	atomic_inc(&qdev->intr_context[intr].irq_cnt);
+exit:
+	return var;
+}
+
+static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
+{
+	int i;
+	for (i = 0; i < qdev->intr_count; i++) {
+		/* The enable call does a atomic_dec_and_test
+		 * and enables only if the result is zero.
+		 * So we precharge it here.
+		 */
+		atomic_set(&qdev->intr_context[i].irq_cnt, 1);
+		ql_enable_completion_interrupt(qdev, i);
+	}
+
+}
+
+int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
+{
+	int status = 0;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* set up for reg read */
+	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* get the data */
+	*data = ql_read32(qdev, FLASH_DATA);
+exit:
+	return status;
+}
+
+static int ql_get_flash_params(struct ql_adapter *qdev)
+{
+	int i;
+	int status;
+	u32 *p = (u32 *)&qdev->flash;
+
+	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+		return -ETIMEDOUT;
+
+	for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
+		status = ql_read_flash_word(qdev, i, p);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+			goto exit;
+		}
+
+	}
+exit:
+	ql_sem_unlock(qdev, SEM_FLASH_MASK);
+	return status;
+}
+
+/* xgmac register are located behind the xgmac_addr and xgmac_data
+ * register pair.  Each read/write requires us to wait for the ready
+ * bit before reading/writing the data.
+ */
+static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+{
+	int status;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+	if (status)
+		return status;
+	/* write the data to the data reg */
+	ql_write32(qdev, XGMAC_DATA, data);
+	/* trigger the write */
+	ql_write32(qdev, XGMAC_ADDR, reg);
+	return status;
+}
+
+/* xgmac register are located behind the xgmac_addr and xgmac_data
+ * register pair.  Each read/write requires us to wait for the ready
+ * bit before reading/writing the data.
+ */
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+	int status = 0;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+	if (status)
+		goto exit;
+	/* set up for reg read */
+	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+	if (status)
+		goto exit;
+	/* get the data */
+	*data = ql_read32(qdev, XGMAC_DATA);
+exit:
+	return status;
+}
+
+/* This is used for reading the 64-bit statistics regs. */
+int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
+{
+	int status = 0;
+	u32 hi = 0;
+	u32 lo = 0;
+
+	status = ql_read_xgmac_reg(qdev, reg, &lo);
+	if (status)
+		goto exit;
+
+	status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
+	if (status)
+		goto exit;
+
+	*data = (u64) lo | ((u64) hi << 32);
+
+exit:
+	return status;
+}
+
+/* Take the MAC Core out of reset.
+ * Enable statistics counting.
+ * Take the transmitter/receiver out of reset.
+ * This functionality may be done in the MPI firmware at a
+ * later date.
+ */
+static int ql_port_initialize(struct ql_adapter *qdev)
+{
+	int status = 0;
+	u32 data;
+
+	if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
+		/* Another function has the semaphore, so
+		 * wait for the port init bit to come ready.
+		 */
+		QPRINTK(qdev, LINK, INFO,
+			"Another function has the semaphore, so wait for the port init bit to come ready.\n");
+		status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
+		if (status) {
+			QPRINTK(qdev, LINK, CRIT,
+				"Port initialize timed out.\n");
+		}
+		return status;
+	}
+
+	QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
+	/* Set the core reset. */
+	status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
+	if (status)
+		goto end;
+	data |= GLOBAL_CFG_RESET;
+	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+	if (status)
+		goto end;
+
+	/* Clear the core reset and turn on jumbo for receiver. */
+	data &= ~GLOBAL_CFG_RESET;	/* Clear core reset. */
+	data |= GLOBAL_CFG_JUMBO;	/* Turn on jumbo. */
+	data |= GLOBAL_CFG_TX_STAT_EN;
+	data |= GLOBAL_CFG_RX_STAT_EN;
+	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+	if (status)
+		goto end;
+
+	/* Enable transmitter, and clear it's reset. */
+	status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
+	if (status)
+		goto end;
+	data &= ~TX_CFG_RESET;	/* Clear the TX MAC reset. */
+	data |= TX_CFG_EN;	/* Enable the transmitter. */
+	status = ql_write_xgmac_reg(qdev, TX_CFG, data);
+	if (status)
+		goto end;
+
+	/* Enable receiver and clear it's reset. */
+	status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
+	if (status)
+		goto end;
+	data &= ~RX_CFG_RESET;	/* Clear the RX MAC reset. */
+	data |= RX_CFG_EN;	/* Enable the receiver. */
+	status = ql_write_xgmac_reg(qdev, RX_CFG, data);
+	if (status)
+		goto end;
+
+	/* Turn on jumbo. */
+	status =
+	    ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
+	if (status)
+		goto end;
+	status =
+	    ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
+	if (status)
+		goto end;
+
+	/* Signal to the world that the port is enabled.        */
+	ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
+end:
+	ql_sem_unlock(qdev, qdev->xg_sem_mask);
+	return status;
+}
+
+/* Get the next large buffer. */
+struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
+{
+	struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
+	rx_ring->lbq_curr_idx++;
+	if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
+		rx_ring->lbq_curr_idx = 0;
+	rx_ring->lbq_free_cnt++;
+	return lbq_desc;
+}
+
+/* Get the next small buffer. */
+struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
+{
+	struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
+	rx_ring->sbq_curr_idx++;
+	if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
+		rx_ring->sbq_curr_idx = 0;
+	rx_ring->sbq_free_cnt++;
+	return sbq_desc;
+}
+
+/* Update an rx ring index. */
+static void ql_update_cq(struct rx_ring *rx_ring)
+{
+	rx_ring->cnsmr_idx++;
+	rx_ring->curr_entry++;
+	if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
+		rx_ring->cnsmr_idx = 0;
+		rx_ring->curr_entry = rx_ring->cq_base;
+	}
+}
+
+static void ql_write_cq_idx(struct rx_ring *rx_ring)
+{
+	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
+}
+
+/* Process (refill) a large buffer queue. */
+static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	int clean_idx = rx_ring->lbq_clean_idx;
+	struct bq_desc *lbq_desc;
+	struct bq_element *bq;
+	u64 map;
+	int i;
+
+	while (rx_ring->lbq_free_cnt > 16) {
+		for (i = 0; i < 16; i++) {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"lbq: try cleaning clean_idx = %d.\n",
+				clean_idx);
+			lbq_desc = &rx_ring->lbq[clean_idx];
+			bq = lbq_desc->bq;
+			if (lbq_desc->p.lbq_page == NULL) {
+				QPRINTK(qdev, RX_STATUS, DEBUG,
+					"lbq: getting new page for index %d.\n",
+					lbq_desc->index);
+				lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
+				if (lbq_desc->p.lbq_page == NULL) {
+					QPRINTK(qdev, RX_STATUS, ERR,
+						"Couldn't get a page.\n");
+					return;
+				}
+				map = pci_map_page(qdev->pdev,
+						   lbq_desc->p.lbq_page,
+						   0, PAGE_SIZE,
+						   PCI_DMA_FROMDEVICE);
+				if (pci_dma_mapping_error(qdev->pdev, map)) {
+					QPRINTK(qdev, RX_STATUS, ERR,
+						"PCI mapping failed.\n");
+					return;
+				}
+				pci_unmap_addr_set(lbq_desc, mapaddr, map);
+				pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
+				bq->addr_lo =	/*lbq_desc->addr_lo = */
+				    cpu_to_le32(map);
+				bq->addr_hi =	/*lbq_desc->addr_hi = */
+				    cpu_to_le32(map >> 32);
+			}
+			clean_idx++;
+			if (clean_idx == rx_ring->lbq_len)
+				clean_idx = 0;
+		}
+
+		rx_ring->lbq_clean_idx = clean_idx;
+		rx_ring->lbq_prod_idx += 16;
+		if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
+			rx_ring->lbq_prod_idx = 0;
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"lbq: updating prod idx = %d.\n",
+			rx_ring->lbq_prod_idx);
+		ql_write_db_reg(rx_ring->lbq_prod_idx,
+				rx_ring->lbq_prod_idx_db_reg);
+		rx_ring->lbq_free_cnt -= 16;
+	}
+}
+
+/* Process (refill) a small buffer queue. */
+static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	int clean_idx = rx_ring->sbq_clean_idx;
+	struct bq_desc *sbq_desc;
+	struct bq_element *bq;
+	u64 map;
+	int i;
+
+	while (rx_ring->sbq_free_cnt > 16) {
+		for (i = 0; i < 16; i++) {
+			sbq_desc = &rx_ring->sbq[clean_idx];
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"sbq: try cleaning clean_idx = %d.\n",
+				clean_idx);
+			bq = sbq_desc->bq;
+			if (sbq_desc->p.skb == NULL) {
+				QPRINTK(qdev, RX_STATUS, DEBUG,
+					"sbq: getting new skb for index %d.\n",
+					sbq_desc->index);
+				sbq_desc->p.skb =
+				    netdev_alloc_skb(qdev->ndev,
+						     rx_ring->sbq_buf_size);
+				if (sbq_desc->p.skb == NULL) {
+					QPRINTK(qdev, PROBE, ERR,
+						"Couldn't get an skb.\n");
+					rx_ring->sbq_clean_idx = clean_idx;
+					return;
+				}
+				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
+				map = pci_map_single(qdev->pdev,
+						     sbq_desc->p.skb->data,
+						     rx_ring->sbq_buf_size /
+						     2, PCI_DMA_FROMDEVICE);
+				pci_unmap_addr_set(sbq_desc, mapaddr, map);
+				pci_unmap_len_set(sbq_desc, maplen,
+						  rx_ring->sbq_buf_size / 2);
+				bq->addr_lo = cpu_to_le32(map);
+				bq->addr_hi = cpu_to_le32(map >> 32);
+			}
+
+			clean_idx++;
+			if (clean_idx == rx_ring->sbq_len)
+				clean_idx = 0;
+		}
+		rx_ring->sbq_clean_idx = clean_idx;
+		rx_ring->sbq_prod_idx += 16;
+		if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
+			rx_ring->sbq_prod_idx = 0;
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"sbq: updating prod idx = %d.\n",
+			rx_ring->sbq_prod_idx);
+		ql_write_db_reg(rx_ring->sbq_prod_idx,
+				rx_ring->sbq_prod_idx_db_reg);
+
+		rx_ring->sbq_free_cnt -= 16;
+	}
+}
+
+static void ql_update_buffer_queues(struct ql_adapter *qdev,
+				    struct rx_ring *rx_ring)
+{
+	ql_update_sbq(qdev, rx_ring);
+	ql_update_lbq(qdev, rx_ring);
+}
+
+/* Unmaps tx buffers.  Can be called from send() if a pci mapping
+ * fails at some stage, or from the interrupt when a tx completes.
+ */
+static void ql_unmap_send(struct ql_adapter *qdev,
+			  struct tx_ring_desc *tx_ring_desc, int mapped)
+{
+	int i;
+	for (i = 0; i < mapped; i++) {
+		if (i == 0 || (i == 7 && mapped > 7)) {
+			/*
+			 * Unmap the skb->data area, or the
+			 * external sglist (AKA the Outbound
+			 * Address List (OAL)).
+			 * If its the zeroeth element, then it's
+			 * the skb->data area.  If it's the 7th
+			 * element and there is more than 6 frags,
+			 * then its an OAL.
+			 */
+			if (i == 7) {
+				QPRINTK(qdev, TX_DONE, DEBUG,
+					"unmapping OAL area.\n");
+			}
+			pci_unmap_single(qdev->pdev,
+					 pci_unmap_addr(&tx_ring_desc->map[i],
+							mapaddr),
+					 pci_unmap_len(&tx_ring_desc->map[i],
+						       maplen),
+					 PCI_DMA_TODEVICE);
+		} else {
+			QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
+				i);
+			pci_unmap_page(qdev->pdev,
+				       pci_unmap_addr(&tx_ring_desc->map[i],
+						      mapaddr),
+				       pci_unmap_len(&tx_ring_desc->map[i],
+						     maplen), PCI_DMA_TODEVICE);
+		}
+	}
+
+}
+
+/* Map the buffers for this transmit.  This will return
+ * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
+ */
+static int ql_map_send(struct ql_adapter *qdev,
+		       struct ob_mac_iocb_req *mac_iocb_ptr,
+		       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
+{
+	int len = skb_headlen(skb);
+	dma_addr_t map;
+	int frag_idx, err, map_idx = 0;
+	struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
+	int frag_cnt = skb_shinfo(skb)->nr_frags;
+
+	if (frag_cnt) {
+		QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
+	}
+	/*
+	 * Map the skb buffer first.
+	 */
+	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+	err = pci_dma_mapping_error(qdev->pdev, map);
+	if (err) {
+		QPRINTK(qdev, TX_QUEUED, ERR,
+			"PCI mapping failed with error: %d\n", err);
+
+		return NETDEV_TX_BUSY;
+	}
+
+	tbd->len = cpu_to_le32(len);
+	tbd->addr = cpu_to_le64(map);
+	pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+	pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
+	map_idx++;
+
+	/*
+	 * This loop fills the remainder of the 8 address descriptors
+	 * in the IOCB.  If there are more than 7 fragments, then the
+	 * eighth address desc will point to an external list (OAL).
+	 * When this happens, the remainder of the frags will be stored
+	 * in this list.
+	 */
+	for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
+		tbd++;
+		if (frag_idx == 6 && frag_cnt > 7) {
+			/* Let's tack on an sglist.
+			 * Our control block will now
+			 * look like this:
+			 * iocb->seg[0] = skb->data
+			 * iocb->seg[1] = frag[0]
+			 * iocb->seg[2] = frag[1]
+			 * iocb->seg[3] = frag[2]
+			 * iocb->seg[4] = frag[3]
+			 * iocb->seg[5] = frag[4]
+			 * iocb->seg[6] = frag[5]
+			 * iocb->seg[7] = ptr to OAL (external sglist)
+			 * oal->seg[0] = frag[6]
+			 * oal->seg[1] = frag[7]
+			 * oal->seg[2] = frag[8]
+			 * oal->seg[3] = frag[9]
+			 * oal->seg[4] = frag[10]
+			 *      etc...
+			 */
+			/* Tack on the OAL in the eighth segment of IOCB. */
+			map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
+					     sizeof(struct oal),
+					     PCI_DMA_TODEVICE);
+			err = pci_dma_mapping_error(qdev->pdev, map);
+			if (err) {
+				QPRINTK(qdev, TX_QUEUED, ERR,
+					"PCI mapping outbound address list with error: %d\n",
+					err);
+				goto map_error;
+			}
+
+			tbd->addr = cpu_to_le64(map);
+			/*
+			 * The length is the number of fragments
+			 * that remain to be mapped times the length
+			 * of our sglist (OAL).
+			 */
+			tbd->len =
+			    cpu_to_le32((sizeof(struct tx_buf_desc) *
+					 (frag_cnt - frag_idx)) | TX_DESC_C);
+			pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
+					   map);
+			pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+					  sizeof(struct oal));
+			tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
+			map_idx++;
+		}
+
+		map =
+		    pci_map_page(qdev->pdev, frag->page,
+				 frag->page_offset, frag->size,
+				 PCI_DMA_TODEVICE);
+
+		err = pci_dma_mapping_error(qdev->pdev, map);
+		if (err) {
+			QPRINTK(qdev, TX_QUEUED, ERR,
+				"PCI mapping frags failed with error: %d.\n",
+				err);
+			goto map_error;
+		}
+
+		tbd->addr = cpu_to_le64(map);
+		tbd->len = cpu_to_le32(frag->size);
+		pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+		pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+				  frag->size);
+
+	}
+	/* Save the number of segments we've mapped. */
+	tx_ring_desc->map_cnt = map_idx;
+	/* Terminate the last segment. */
+	tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
+	return NETDEV_TX_OK;
+
+map_error:
+	/*
+	 * If the first frag mapping failed, then i will be zero.
+	 * This causes the unmap of the skb->data area.  Otherwise
+	 * we pass in the number of frags that mapped successfully
+	 * so they can be umapped.
+	 */
+	ql_unmap_send(qdev, tx_ring_desc, map_idx);
+	return NETDEV_TX_BUSY;
+}
+
+void ql_realign_skb(struct sk_buff *skb, int len)
+{
+	void *temp_addr = skb->data;
+
+	/* Undo the skb_reserve(skb,32) we did before
+	 * giving to hardware, and realign data on
+	 * a 2-byte boundary.
+	 */
+	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
+	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
+	skb_copy_to_linear_data(skb, temp_addr,
+		(unsigned int)len);
+}
+
+/*
+ * This function builds an skb for the given inbound
+ * completion.  It will be rewritten for readability in the near
+ * future, but for not it works well.
+ */
+static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
+				       struct rx_ring *rx_ring,
+				       struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+	struct bq_desc *lbq_desc;
+	struct bq_desc *sbq_desc;
+	struct sk_buff *skb = NULL;
+	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+       u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+
+	/*
+	 * Handle the header buffer if present.
+	 */
+	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
+	    ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+		QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
+		/*
+		 * Headers fit nicely into a small buffer.
+		 */
+		sbq_desc = ql_get_curr_sbuf(rx_ring);
+		pci_unmap_single(qdev->pdev,
+				pci_unmap_addr(sbq_desc, mapaddr),
+				pci_unmap_len(sbq_desc, maplen),
+				PCI_DMA_FROMDEVICE);
+		skb = sbq_desc->p.skb;
+		ql_realign_skb(skb, hdr_len);
+		skb_put(skb, hdr_len);
+		sbq_desc->p.skb = NULL;
+	}
+
+	/*
+	 * Handle the data buffer(s).
+	 */
+	if (unlikely(!length)) {	/* Is there data too? */
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"No Data buffer in this packet.\n");
+		return skb;
+	}
+
+	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Headers in small, data of %d bytes in small, combine them.\n", length);
+			/*
+			 * Data is less than small buffer size so it's
+			 * stuffed in a small buffer.
+			 * For this case we append the data
+			 * from the "data" small buffer to the "header" small
+			 * buffer.
+			 */
+			sbq_desc = ql_get_curr_sbuf(rx_ring);
+			pci_dma_sync_single_for_cpu(qdev->pdev,
+						    pci_unmap_addr
+						    (sbq_desc, mapaddr),
+						    pci_unmap_len
+						    (sbq_desc, maplen),
+						    PCI_DMA_FROMDEVICE);
+			memcpy(skb_put(skb, length),
+			       sbq_desc->p.skb->data, length);
+			pci_dma_sync_single_for_device(qdev->pdev,
+						       pci_unmap_addr
+						       (sbq_desc,
+							mapaddr),
+						       pci_unmap_len
+						       (sbq_desc,
+							maplen),
+						       PCI_DMA_FROMDEVICE);
+		} else {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"%d bytes in a single small buffer.\n", length);
+			sbq_desc = ql_get_curr_sbuf(rx_ring);
+			skb = sbq_desc->p.skb;
+			ql_realign_skb(skb, length);
+			skb_put(skb, length);
+			pci_unmap_single(qdev->pdev,
+					 pci_unmap_addr(sbq_desc,
+							mapaddr),
+					 pci_unmap_len(sbq_desc,
+						       maplen),
+					 PCI_DMA_FROMDEVICE);
+			sbq_desc->p.skb = NULL;
+		}
+	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Header in small, %d bytes in large. Chain large to small!\n", length);
+			/*
+			 * The data is in a single large buffer.  We
+			 * chain it to the header buffer's skb and let
+			 * it rip.
+			 */
+			lbq_desc = ql_get_curr_lbuf(rx_ring);
+			pci_unmap_page(qdev->pdev,
+				       pci_unmap_addr(lbq_desc,
+						      mapaddr),
+				       pci_unmap_len(lbq_desc, maplen),
+				       PCI_DMA_FROMDEVICE);
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Chaining page to skb.\n");
+			skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
+					   0, length);
+			skb->len += length;
+			skb->data_len += length;
+			skb->truesize += length;
+			lbq_desc->p.lbq_page = NULL;
+		} else {
+			/*
+			 * The headers and data are in a single large buffer. We
+			 * copy it to a new skb and let it go. This can happen with
+			 * jumbo mtu on a non-TCP/UDP frame.
+			 */
+			lbq_desc = ql_get_curr_lbuf(rx_ring);
+			skb = netdev_alloc_skb(qdev->ndev, length);
+			if (skb == NULL) {
+				QPRINTK(qdev, PROBE, DEBUG,
+					"No skb available, drop the packet.\n");
+				return NULL;
+			}
+			skb_reserve(skb, NET_IP_ALIGN);
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
+			skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
+					   0, length);
+			skb->len += length;
+			skb->data_len += length;
+			skb->truesize += length;
+			length -= length;
+			lbq_desc->p.lbq_page = NULL;
+			__pskb_pull_tail(skb,
+				(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+				VLAN_ETH_HLEN : ETH_HLEN);
+		}
+	} else {
+		/*
+		 * The data is in a chain of large buffers
+		 * pointed to by a small buffer.  We loop
+		 * thru and chain them to the our small header
+		 * buffer's skb.
+		 * frags:  There are 18 max frags and our small
+		 *         buffer will hold 32 of them. The thing is,
+		 *         we'll use 3 max for our 9000 byte jumbo
+		 *         frames.  If the MTU goes up we could
+		 *          eventually be in trouble.
+		 */
+		int size, offset, i = 0;
+		struct bq_element *bq, bq_array[8];
+		sbq_desc = ql_get_curr_sbuf(rx_ring);
+		pci_unmap_single(qdev->pdev,
+				 pci_unmap_addr(sbq_desc, mapaddr),
+				 pci_unmap_len(sbq_desc, maplen),
+				 PCI_DMA_FROMDEVICE);
+		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
+			/*
+			 * This is an non TCP/UDP IP frame, so
+			 * the headers aren't split into a small
+			 * buffer.  We have to use the small buffer
+			 * that contains our sg list as our skb to
+			 * send upstairs. Copy the sg list here to
+			 * a local buffer and use it to find the
+			 * pages to chain.
+			 */
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"%d bytes of headers & data in chain of large.\n", length);
+			skb = sbq_desc->p.skb;
+			bq = &bq_array[0];
+			memcpy(bq, skb->data, sizeof(bq_array));
+			sbq_desc->p.skb = NULL;
+			skb_reserve(skb, NET_IP_ALIGN);
+		} else {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Headers in small, %d bytes of data in chain of large.\n", length);
+			bq = (struct bq_element *)sbq_desc->p.skb->data;
+		}
+		while (length > 0) {
+			lbq_desc = ql_get_curr_lbuf(rx_ring);
+			if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) {
+				QPRINTK(qdev, RX_STATUS, ERR,
+					"Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n",
+					lbq_desc->bq->addr_lo, bq->addr_lo);
+				return NULL;
+			}
+			pci_unmap_page(qdev->pdev,
+				       pci_unmap_addr(lbq_desc,
+						      mapaddr),
+				       pci_unmap_len(lbq_desc,
+						     maplen),
+				       PCI_DMA_FROMDEVICE);
+			size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
+			offset = 0;
+
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Adding page %d to skb for %d bytes.\n",
+				i, size);
+			skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
+					   offset, size);
+			skb->len += size;
+			skb->data_len += size;
+			skb->truesize += size;
+			length -= size;
+			lbq_desc->p.lbq_page = NULL;
+			bq++;
+			i++;
+		}
+		__pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+				VLAN_ETH_HLEN : ETH_HLEN);
+	}
+	return skb;
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
+				   struct rx_ring *rx_ring,
+				   struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+	struct net_device *ndev = qdev->ndev;
+	struct sk_buff *skb = NULL;
+
+	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
+	if (unlikely(!skb)) {
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"No skb available, drop packet.\n");
+		return;
+	}
+
+	prefetch(skb->data);
+	skb->dev = ndev;
+	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+		QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
+			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+			IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
+			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+			IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
+			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+			IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+	}
+	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
+		QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
+	}
+	if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
+		QPRINTK(qdev, RX_STATUS, ERR,
+			"Bad checksum for this %s packet.\n",
+			((ib_mac_rsp->
+			  flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
+		skb->ip_summed = CHECKSUM_NONE;
+	} else if (qdev->rx_csum &&
+		   ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
+		    ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+		     !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
+		QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	}
+	qdev->stats.rx_packets++;
+	qdev->stats.rx_bytes += skb->len;
+	skb->protocol = eth_type_trans(skb, ndev);
+	if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"Passing a VLAN packet upstream.\n");
+		vlan_hwaccel_rx(skb, qdev->vlgrp,
+				le16_to_cpu(ib_mac_rsp->vlan_id));
+	} else {
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"Passing a normal packet upstream.\n");
+		netif_rx(skb);
+	}
+	ndev->last_rx = jiffies;
+}
+
+/* Process an outbound completion from an rx ring. */
+static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
+				   struct ob_mac_iocb_rsp *mac_rsp)
+{
+	struct tx_ring *tx_ring;
+	struct tx_ring_desc *tx_ring_desc;
+
+	QL_DUMP_OB_MAC_RSP(mac_rsp);
+	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
+	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
+	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
+	qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
+	qdev->stats.tx_packets++;
+	dev_kfree_skb(tx_ring_desc->skb);
+	tx_ring_desc->skb = NULL;
+
+	if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
+					OB_MAC_IOCB_RSP_S |
+					OB_MAC_IOCB_RSP_L |
+					OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
+		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
+			QPRINTK(qdev, TX_DONE, WARNING,
+				"Total descriptor length did not match transfer length.\n");
+		}
+		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
+			QPRINTK(qdev, TX_DONE, WARNING,
+				"Frame too short to be legal, not sent.\n");
+		}
+		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
+			QPRINTK(qdev, TX_DONE, WARNING,
+				"Frame too long, but sent anyway.\n");
+		}
+		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
+			QPRINTK(qdev, TX_DONE, WARNING,
+				"PCI backplane error. Frame not sent.\n");
+		}
+	}
+	atomic_inc(&tx_ring->tx_count);
+}
+
+/* Fire up a handler to reset the MPI processor. */
+void ql_queue_fw_error(struct ql_adapter *qdev)
+{
+	netif_stop_queue(qdev->ndev);
+	netif_carrier_off(qdev->ndev);
+	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
+}
+
+void ql_queue_asic_error(struct ql_adapter *qdev)
+{
+	netif_stop_queue(qdev->ndev);
+	netif_carrier_off(qdev->ndev);
+	ql_disable_interrupts(qdev);
+	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
+}
+
+static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
+				    struct ib_ae_iocb_rsp *ib_ae_rsp)
+{
+	switch (ib_ae_rsp->event) {
+	case MGMT_ERR_EVENT:
+		QPRINTK(qdev, RX_ERR, ERR,
+			"Management Processor Fatal Error.\n");
+		ql_queue_fw_error(qdev);
+		return;
+
+	case CAM_LOOKUP_ERR_EVENT:
+		QPRINTK(qdev, LINK, ERR,
+			"Multiple CAM hits lookup occurred.\n");
+		QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
+		ql_queue_asic_error(qdev);
+		return;
+
+	case SOFT_ECC_ERROR_EVENT:
+		QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
+		ql_queue_asic_error(qdev);
+		break;
+
+	case PCI_ERR_ANON_BUF_RD:
+		QPRINTK(qdev, RX_ERR, ERR,
+			"PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
+			ib_ae_rsp->q_id);
+		ql_queue_asic_error(qdev);
+		break;
+
+	default:
+		QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
+			ib_ae_rsp->event);
+		ql_queue_asic_error(qdev);
+		break;
+	}
+}
+
+static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
+{
+	struct ql_adapter *qdev = rx_ring->qdev;
+	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+	struct ob_mac_iocb_rsp *net_rsp = NULL;
+	int count = 0;
+
+	/* While there are entries in the completion queue. */
+	while (prod != rx_ring->cnsmr_idx) {
+
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
+			prod, rx_ring->cnsmr_idx);
+
+		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
+		rmb();
+		switch (net_rsp->opcode) {
+
+		case OPCODE_OB_MAC_TSO_IOCB:
+		case OPCODE_OB_MAC_IOCB:
+			ql_process_mac_tx_intr(qdev, net_rsp);
+			break;
+		default:
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Hit default case, not handled! dropping the packet, opcode = %x.\n",
+				net_rsp->opcode);
+		}
+		count++;
+		ql_update_cq(rx_ring);
+		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+	}
+	ql_write_cq_idx(rx_ring);
+	if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
+		struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
+		if (atomic_read(&tx_ring->queue_stopped) &&
+		    (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+			/*
+			 * The queue got stopped because the tx_ring was full.
+			 * Wake it up, because it's now at least 25% empty.
+			 */
+			netif_wake_queue(qdev->ndev);
+	}
+
+	return count;
+}
+
+static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
+{
+	struct ql_adapter *qdev = rx_ring->qdev;
+	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+	struct ql_net_rsp_iocb *net_rsp;
+	int count = 0;
+
+	/* While there are entries in the completion queue. */
+	while (prod != rx_ring->cnsmr_idx) {
+
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
+			prod, rx_ring->cnsmr_idx);
+
+		net_rsp = rx_ring->curr_entry;
+		rmb();
+		switch (net_rsp->opcode) {
+		case OPCODE_IB_MAC_IOCB:
+			ql_process_mac_rx_intr(qdev, rx_ring,
+					       (struct ib_mac_iocb_rsp *)
+					       net_rsp);
+			break;
+
+		case OPCODE_IB_AE_IOCB:
+			ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
+						net_rsp);
+			break;
+		default:
+			{
+				QPRINTK(qdev, RX_STATUS, DEBUG,
+					"Hit default case, not handled! dropping the packet, opcode = %x.\n",
+					net_rsp->opcode);
+			}
+		}
+		count++;
+		ql_update_cq(rx_ring);
+		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+		if (count == budget)
+			break;
+	}
+	ql_update_buffer_queues(qdev, rx_ring);
+	ql_write_cq_idx(rx_ring);
+	return count;
+}
+
+static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
+{
+	struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
+	struct ql_adapter *qdev = rx_ring->qdev;
+	int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
+
+	QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
+		rx_ring->cq_id);
+
+	if (work_done < budget) {
+		__netif_rx_complete(qdev->ndev, napi);
+		ql_enable_completion_interrupt(qdev, rx_ring->irq);
+	}
+	return work_done;
+}
+
+static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	qdev->vlgrp = grp;
+	if (grp) {
+		QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
+		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
+			   NIC_RCV_CFG_VLAN_MATCH_AND_NON);
+	} else {
+		QPRINTK(qdev, IFUP, DEBUG,
+			"Turning off VLAN in NIC_RCV_CFG.\n");
+		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
+	}
+}
+
+static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	u32 enable_bit = MAC_ADDR_E;
+
+	spin_lock(&qdev->hw_lock);
+	if (ql_set_mac_addr_reg
+	    (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
+	}
+	spin_unlock(&qdev->hw_lock);
+}
+
+static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	u32 enable_bit = 0;
+
+	spin_lock(&qdev->hw_lock);
+	if (ql_set_mac_addr_reg
+	    (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
+	}
+	spin_unlock(&qdev->hw_lock);
+
+}
+
+/* Worker thread to process a given rx_ring that is dedicated
+ * to outbound completions.
+ */
+static void ql_tx_clean(struct work_struct *work)
+{
+	struct rx_ring *rx_ring =
+	    container_of(work, struct rx_ring, rx_work.work);
+	ql_clean_outbound_rx_ring(rx_ring);
+	ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
+
+}
+
+/* Worker thread to process a given rx_ring that is dedicated
+ * to inbound completions.
+ */
+static void ql_rx_clean(struct work_struct *work)
+{
+	struct rx_ring *rx_ring =
+	    container_of(work, struct rx_ring, rx_work.work);
+	ql_clean_inbound_rx_ring(rx_ring, 64);
+	ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
+}
+
+/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
+static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
+{
+	struct rx_ring *rx_ring = dev_id;
+	queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
+			      &rx_ring->rx_work, 0);
+	return IRQ_HANDLED;
+}
+
+/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
+static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
+{
+	struct rx_ring *rx_ring = dev_id;
+	struct ql_adapter *qdev = rx_ring->qdev;
+	netif_rx_schedule(qdev->ndev, &rx_ring->napi);
+	return IRQ_HANDLED;
+}
+
+/* We check here to see if we're already handling a legacy
+ * interrupt.  If we are, then it must belong to another
+ * chip with which we're sharing the interrupt line.
+ */
+int ql_legacy_check(struct ql_adapter *qdev)
+{
+	int err;
+	spin_lock(&qdev->legacy_lock);
+	err = atomic_read(&qdev->intr_context[0].irq_cnt);
+	spin_unlock(&qdev->legacy_lock);
+	return err;
+}
+
+/* This handles a fatal error, MPI activity, and the default
+ * rx_ring in an MSI-X multiple vector environment.
+ * In MSI/Legacy environment it also process the rest of
+ * the rx_rings.
+ */
+static irqreturn_t qlge_isr(int irq, void *dev_id)
+{
+	struct rx_ring *rx_ring = dev_id;
+	struct ql_adapter *qdev = rx_ring->qdev;
+	struct intr_context *intr_context = &qdev->intr_context[0];
+	u32 var;
+	int i;
+	int work_done = 0;
+
+	if (qdev->legacy_check && qdev->legacy_check(qdev)) {
+		QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n");
+		return IRQ_NONE;	/* Not our interrupt */
+	}
+
+	var = ql_read32(qdev, STS);
+
+	/*
+	 * Check for fatal error.
+	 */
+	if (var & STS_FE) {
+		ql_queue_asic_error(qdev);
+		QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
+		var = ql_read32(qdev, ERR_STS);
+		QPRINTK(qdev, INTR, ERR,
+			"Resetting chip. Error Status Register = 0x%x\n", var);
+		return IRQ_HANDLED;
+	}
+
+	/*
+	 * Check MPI processor activity.
+	 */
+	if (var & STS_PI) {
+		/*
+		 * We've got an async event or mailbox completion.
+		 * Handle it and clear the source of the interrupt.
+		 */
+		QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
+		ql_disable_completion_interrupt(qdev, intr_context->intr);
+		queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
+				      &qdev->mpi_work, 0);
+		work_done++;
+	}
+
+	/*
+	 * Check the default queue and wake handler if active.
+	 */
+	rx_ring = &qdev->rx_ring[0];
+	if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
+		QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
+		ql_disable_completion_interrupt(qdev, intr_context->intr);
+		queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
+				      &rx_ring->rx_work, 0);
+		work_done++;
+	}
+
+	if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+		/*
+		 * Start the DPC for each active queue.
+		 */
+		for (i = 1; i < qdev->rx_ring_count; i++) {
+			rx_ring = &qdev->rx_ring[i];
+			if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
+			    rx_ring->cnsmr_idx) {
+				QPRINTK(qdev, INTR, INFO,
+					"Waking handler for rx_ring[%d].\n", i);
+				ql_disable_completion_interrupt(qdev,
+								intr_context->
+								intr);
+				if (i < qdev->rss_ring_first_cq_id)
+					queue_delayed_work_on(rx_ring->cpu,
+							      qdev->q_workqueue,
+							      &rx_ring->rx_work,
+							      0);
+				else
+					netif_rx_schedule(qdev->ndev,
+							  &rx_ring->napi);
+				work_done++;
+			}
+		}
+	}
+	return work_done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+{
+
+	if (skb_is_gso(skb)) {
+		int err;
+		if (skb_header_cloned(skb)) {
+			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+			if (err)
+				return err;
+		}
+
+		mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
+		mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
+		mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+		mac_iocb_ptr->total_hdrs_len =
+		    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
+		mac_iocb_ptr->net_trans_offset =
+		    cpu_to_le16(skb_network_offset(skb) |
+				skb_transport_offset(skb)
+				<< OB_MAC_TRANSPORT_HDR_SHIFT);
+		mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
+		if (likely(skb->protocol == htons(ETH_P_IP))) {
+			struct iphdr *iph = ip_hdr(skb);
+			iph->check = 0;
+			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								 iph->daddr, 0,
+								 IPPROTO_TCP,
+								 0);
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
+			tcp_hdr(skb)->check =
+			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+					     &ipv6_hdr(skb)->daddr,
+					     0, IPPROTO_TCP, 0);
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static void ql_hw_csum_setup(struct sk_buff *skb,
+			     struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+{
+	int len;
+	struct iphdr *iph = ip_hdr(skb);
+	u16 *check;
+	mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
+	mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+	mac_iocb_ptr->net_trans_offset =
+		cpu_to_le16(skb_network_offset(skb) |
+		skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
+
+	mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
+	len = (ntohs(iph->tot_len) - (iph->ihl << 2));
+	if (likely(iph->protocol == IPPROTO_TCP)) {
+		check = &(tcp_hdr(skb)->check);
+		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
+		mac_iocb_ptr->total_hdrs_len =
+		    cpu_to_le16(skb_transport_offset(skb) +
+				(tcp_hdr(skb)->doff << 2));
+	} else {
+		check = &(udp_hdr(skb)->check);
+		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
+		mac_iocb_ptr->total_hdrs_len =
+		    cpu_to_le16(skb_transport_offset(skb) +
+				sizeof(struct udphdr));
+	}
+	*check = ~csum_tcpudp_magic(iph->saddr,
+				    iph->daddr, len, iph->protocol, 0);
+}
+
+static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct tx_ring_desc *tx_ring_desc;
+	struct ob_mac_iocb_req *mac_iocb_ptr;
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int tso;
+	struct tx_ring *tx_ring;
+	u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
+
+	tx_ring = &qdev->tx_ring[tx_ring_idx];
+
+	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
+		QPRINTK(qdev, TX_QUEUED, INFO,
+			"%s: shutting down tx queue %d du to lack of resources.\n",
+			__func__, tx_ring_idx);
+		netif_stop_queue(ndev);
+		atomic_inc(&tx_ring->queue_stopped);
+		return NETDEV_TX_BUSY;
+	}
+	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
+	mac_iocb_ptr = tx_ring_desc->queue_entry;
+	memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
+	if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
+		QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
+	mac_iocb_ptr->tid = tx_ring_desc->index;
+	/* We use the upper 32-bits to store the tx queue for this IO.
+	 * When we get the completion we can use it to establish the context.
+	 */
+	mac_iocb_ptr->txq_idx = tx_ring_idx;
+	tx_ring_desc->skb = skb;
+
+	mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
+
+	if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
+		QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
+			vlan_tx_tag_get(skb));
+		mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
+		mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
+	}
+	tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+	if (tso < 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
+		ql_hw_csum_setup(skb,
+				 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+	}
+	QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
+	tx_ring->prod_idx++;
+	if (tx_ring->prod_idx == tx_ring->wq_len)
+		tx_ring->prod_idx = 0;
+	wmb();
+
+	ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
+	ndev->trans_start = jiffies;
+	QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
+		tx_ring->prod_idx, skb->len);
+
+	atomic_dec(&tx_ring->tx_count);
+	return NETDEV_TX_OK;
+}
+
+static void ql_free_shadow_space(struct ql_adapter *qdev)
+{
+	if (qdev->rx_ring_shadow_reg_area) {
+		pci_free_consistent(qdev->pdev,
+				    PAGE_SIZE,
+				    qdev->rx_ring_shadow_reg_area,
+				    qdev->rx_ring_shadow_reg_dma);
+		qdev->rx_ring_shadow_reg_area = NULL;
+	}
+	if (qdev->tx_ring_shadow_reg_area) {
+		pci_free_consistent(qdev->pdev,
+				    PAGE_SIZE,
+				    qdev->tx_ring_shadow_reg_area,
+				    qdev->tx_ring_shadow_reg_dma);
+		qdev->tx_ring_shadow_reg_area = NULL;
+	}
+}
+
+static int ql_alloc_shadow_space(struct ql_adapter *qdev)
+{
+	qdev->rx_ring_shadow_reg_area =
+	    pci_alloc_consistent(qdev->pdev,
+				 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
+	if (qdev->rx_ring_shadow_reg_area == NULL) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Allocation of RX shadow space failed.\n");
+		return -ENOMEM;
+	}
+	qdev->tx_ring_shadow_reg_area =
+	    pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
+				 &qdev->tx_ring_shadow_reg_dma);
+	if (qdev->tx_ring_shadow_reg_area == NULL) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Allocation of TX shadow space failed.\n");
+		goto err_wqp_sh_area;
+	}
+	return 0;
+
+err_wqp_sh_area:
+	pci_free_consistent(qdev->pdev,
+			    PAGE_SIZE,
+			    qdev->rx_ring_shadow_reg_area,
+			    qdev->rx_ring_shadow_reg_dma);
+	return -ENOMEM;
+}
+
+static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+{
+	struct tx_ring_desc *tx_ring_desc;
+	int i;
+	struct ob_mac_iocb_req *mac_iocb_ptr;
+
+	mac_iocb_ptr = tx_ring->wq_base;
+	tx_ring_desc = tx_ring->q;
+	for (i = 0; i < tx_ring->wq_len; i++) {
+		tx_ring_desc->index = i;
+		tx_ring_desc->skb = NULL;
+		tx_ring_desc->queue_entry = mac_iocb_ptr;
+		mac_iocb_ptr++;
+		tx_ring_desc++;
+	}
+	atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
+	atomic_set(&tx_ring->queue_stopped, 0);
+}
+
+static void ql_free_tx_resources(struct ql_adapter *qdev,
+				 struct tx_ring *tx_ring)
+{
+	if (tx_ring->wq_base) {
+		pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+				    tx_ring->wq_base, tx_ring->wq_base_dma);
+		tx_ring->wq_base = NULL;
+	}
+	kfree(tx_ring->q);
+	tx_ring->q = NULL;
+}
+
+static int ql_alloc_tx_resources(struct ql_adapter *qdev,
+				 struct tx_ring *tx_ring)
+{
+	tx_ring->wq_base =
+	    pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
+				 &tx_ring->wq_base_dma);
+
+	if ((tx_ring->wq_base == NULL)
+	    || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
+		QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
+		return -ENOMEM;
+	}
+	tx_ring->q =
+	    kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
+	if (tx_ring->q == NULL)
+		goto err;
+
+	return 0;
+err:
+	pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+			    tx_ring->wq_base, tx_ring->wq_base_dma);
+	return -ENOMEM;
+}
+
+void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	int i;
+	struct bq_desc *lbq_desc;
+
+	for (i = 0; i < rx_ring->lbq_len; i++) {
+		lbq_desc = &rx_ring->lbq[i];
+		if (lbq_desc->p.lbq_page) {
+			pci_unmap_page(qdev->pdev,
+				       pci_unmap_addr(lbq_desc, mapaddr),
+				       pci_unmap_len(lbq_desc, maplen),
+				       PCI_DMA_FROMDEVICE);
+
+			put_page(lbq_desc->p.lbq_page);
+			lbq_desc->p.lbq_page = NULL;
+		}
+		lbq_desc->bq->addr_lo = 0;
+		lbq_desc->bq->addr_hi = 0;
+	}
+}
+
+/*
+ * Allocate and map a page for each element of the lbq.
+ */
+static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
+				struct rx_ring *rx_ring)
+{
+	int i;
+	struct bq_desc *lbq_desc;
+	u64 map;
+	struct bq_element *bq = rx_ring->lbq_base;
+
+	for (i = 0; i < rx_ring->lbq_len; i++) {
+		lbq_desc = &rx_ring->lbq[i];
+		memset(lbq_desc, 0, sizeof(lbq_desc));
+		lbq_desc->bq = bq;
+		lbq_desc->index = i;
+		lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
+		if (unlikely(!lbq_desc->p.lbq_page)) {
+			QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
+			goto mem_error;
+		} else {
+			map = pci_map_page(qdev->pdev,
+					   lbq_desc->p.lbq_page,
+					   0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+			if (pci_dma_mapping_error(qdev->pdev, map)) {
+				QPRINTK(qdev, IFUP, ERR,
+					"PCI mapping failed.\n");
+				goto mem_error;
+			}
+			pci_unmap_addr_set(lbq_desc, mapaddr, map);
+			pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
+			bq->addr_lo = cpu_to_le32(map);
+			bq->addr_hi = cpu_to_le32(map >> 32);
+		}
+		bq++;
+	}
+	return 0;
+mem_error:
+	ql_free_lbq_buffers(qdev, rx_ring);
+	return -ENOMEM;
+}
+
+void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	int i;
+	struct bq_desc *sbq_desc;
+
+	for (i = 0; i < rx_ring->sbq_len; i++) {
+		sbq_desc = &rx_ring->sbq[i];
+		if (sbq_desc == NULL) {
+			QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
+			return;
+		}
+		if (sbq_desc->p.skb) {
+			pci_unmap_single(qdev->pdev,
+					 pci_unmap_addr(sbq_desc, mapaddr),
+					 pci_unmap_len(sbq_desc, maplen),
+					 PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(sbq_desc->p.skb);
+			sbq_desc->p.skb = NULL;
+		}
+		if (sbq_desc->bq == NULL) {
+			QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n",
+				i);
+			return;
+		}
+		sbq_desc->bq->addr_lo = 0;
+		sbq_desc->bq->addr_hi = 0;
+	}
+}
+
+/* Allocate and map an skb for each element of the sbq. */
+static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
+				struct rx_ring *rx_ring)
+{
+	int i;
+	struct bq_desc *sbq_desc;
+	struct sk_buff *skb;
+	u64 map;
+	struct bq_element *bq = rx_ring->sbq_base;
+
+	for (i = 0; i < rx_ring->sbq_len; i++) {
+		sbq_desc = &rx_ring->sbq[i];
+		memset(sbq_desc, 0, sizeof(sbq_desc));
+		sbq_desc->index = i;
+		sbq_desc->bq = bq;
+		skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
+		if (unlikely(!skb)) {
+			/* Better luck next round */
+			QPRINTK(qdev, IFUP, ERR,
+				"small buff alloc failed for %d bytes at index %d.\n",
+				rx_ring->sbq_buf_size, i);
+			goto mem_err;
+		}
+		skb_reserve(skb, QLGE_SB_PAD);
+		sbq_desc->p.skb = skb;
+		/*
+		 * Map only half the buffer. Because the
+		 * other half may get some data copied to it
+		 * when the completion arrives.
+		 */
+		map = pci_map_single(qdev->pdev,
+				     skb->data,
+				     rx_ring->sbq_buf_size / 2,
+				     PCI_DMA_FROMDEVICE);
+		if (pci_dma_mapping_error(qdev->pdev, map)) {
+			QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
+			goto mem_err;
+		}
+		pci_unmap_addr_set(sbq_desc, mapaddr, map);
+		pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
+		bq->addr_lo =	/*sbq_desc->addr_lo = */
+		    cpu_to_le32(map);
+		bq->addr_hi =	/*sbq_desc->addr_hi = */
+		    cpu_to_le32(map >> 32);
+		bq++;
+	}
+	return 0;
+mem_err:
+	ql_free_sbq_buffers(qdev, rx_ring);
+	return -ENOMEM;
+}
+
+static void ql_free_rx_resources(struct ql_adapter *qdev,
+				 struct rx_ring *rx_ring)
+{
+	if (rx_ring->sbq_len)
+		ql_free_sbq_buffers(qdev, rx_ring);
+	if (rx_ring->lbq_len)
+		ql_free_lbq_buffers(qdev, rx_ring);
+
+	/* Free the small buffer queue. */
+	if (rx_ring->sbq_base) {
+		pci_free_consistent(qdev->pdev,
+				    rx_ring->sbq_size,
+				    rx_ring->sbq_base, rx_ring->sbq_base_dma);
+		rx_ring->sbq_base = NULL;
+	}
+
+	/* Free the small buffer queue control blocks. */
+	kfree(rx_ring->sbq);
+	rx_ring->sbq = NULL;
+
+	/* Free the large buffer queue. */
+	if (rx_ring->lbq_base) {
+		pci_free_consistent(qdev->pdev,
+				    rx_ring->lbq_size,
+				    rx_ring->lbq_base, rx_ring->lbq_base_dma);
+		rx_ring->lbq_base = NULL;
+	}
+
+	/* Free the large buffer queue control blocks. */
+	kfree(rx_ring->lbq);
+	rx_ring->lbq = NULL;
+
+	/* Free the rx queue. */
+	if (rx_ring->cq_base) {
+		pci_free_consistent(qdev->pdev,
+				    rx_ring->cq_size,
+				    rx_ring->cq_base, rx_ring->cq_base_dma);
+		rx_ring->cq_base = NULL;
+	}
+}
+
+/* Allocate queues and buffers for this completions queue based
+ * on the values in the parameter structure. */
+static int ql_alloc_rx_resources(struct ql_adapter *qdev,
+				 struct rx_ring *rx_ring)
+{
+
+	/*
+	 * Allocate the completion queue for this rx_ring.
+	 */
+	rx_ring->cq_base =
+	    pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
+				 &rx_ring->cq_base_dma);
+
+	if (rx_ring->cq_base == NULL) {
+		QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
+		return -ENOMEM;
+	}
+
+	if (rx_ring->sbq_len) {
+		/*
+		 * Allocate small buffer queue.
+		 */
+		rx_ring->sbq_base =
+		    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
+					 &rx_ring->sbq_base_dma);
+
+		if (rx_ring->sbq_base == NULL) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Small buffer queue allocation failed.\n");
+			goto err_mem;
+		}
+
+		/*
+		 * Allocate small buffer queue control blocks.
+		 */
+		rx_ring->sbq =
+		    kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
+			    GFP_KERNEL);
+		if (rx_ring->sbq == NULL) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Small buffer queue control block allocation failed.\n");
+			goto err_mem;
+		}
+
+		if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Small buffer allocation failed.\n");
+			goto err_mem;
+		}
+	}
+
+	if (rx_ring->lbq_len) {
+		/*
+		 * Allocate large buffer queue.
+		 */
+		rx_ring->lbq_base =
+		    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
+					 &rx_ring->lbq_base_dma);
+
+		if (rx_ring->lbq_base == NULL) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Large buffer queue allocation failed.\n");
+			goto err_mem;
+		}
+		/*
+		 * Allocate large buffer queue control blocks.
+		 */
+		rx_ring->lbq =
+		    kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
+			    GFP_KERNEL);
+		if (rx_ring->lbq == NULL) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Large buffer queue control block allocation failed.\n");
+			goto err_mem;
+		}
+
+		/*
+		 * Allocate the buffers.
+		 */
+		if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Large buffer allocation failed.\n");
+			goto err_mem;
+		}
+	}
+
+	return 0;
+
+err_mem:
+	ql_free_rx_resources(qdev, rx_ring);
+	return -ENOMEM;
+}
+
+static void ql_tx_ring_clean(struct ql_adapter *qdev)
+{
+	struct tx_ring *tx_ring;
+	struct tx_ring_desc *tx_ring_desc;
+	int i, j;
+
+	/*
+	 * Loop through all queues and free
+	 * any resources.
+	 */
+	for (j = 0; j < qdev->tx_ring_count; j++) {
+		tx_ring = &qdev->tx_ring[j];
+		for (i = 0; i < tx_ring->wq_len; i++) {
+			tx_ring_desc = &tx_ring->q[i];
+			if (tx_ring_desc && tx_ring_desc->skb) {
+				QPRINTK(qdev, IFDOWN, ERR,
+				"Freeing lost SKB %p, from queue %d, index %d.\n",
+					tx_ring_desc->skb, j,
+					tx_ring_desc->index);
+				ql_unmap_send(qdev, tx_ring_desc,
+					      tx_ring_desc->map_cnt);
+				dev_kfree_skb(tx_ring_desc->skb);
+				tx_ring_desc->skb = NULL;
+			}
+		}
+	}
+}
+
+static void ql_free_ring_cb(struct ql_adapter *qdev)
+{
+	kfree(qdev->ring_mem);
+}
+
+static int ql_alloc_ring_cb(struct ql_adapter *qdev)
+{
+	/* Allocate space for tx/rx ring control blocks. */
+	qdev->ring_mem_size =
+	    (qdev->tx_ring_count * sizeof(struct tx_ring)) +
+	    (qdev->rx_ring_count * sizeof(struct rx_ring));
+	qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
+	if (qdev->ring_mem == NULL) {
+		return -ENOMEM;
+	} else {
+		qdev->rx_ring = qdev->ring_mem;
+		qdev->tx_ring = qdev->ring_mem +
+		    (qdev->rx_ring_count * sizeof(struct rx_ring));
+	}
+	return 0;
+}
+
+static void ql_free_mem_resources(struct ql_adapter *qdev)
+{
+	int i;
+
+	for (i = 0; i < qdev->tx_ring_count; i++)
+		ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
+	for (i = 0; i < qdev->rx_ring_count; i++)
+		ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
+	ql_free_shadow_space(qdev);
+}
+
+static int ql_alloc_mem_resources(struct ql_adapter *qdev)
+{
+	int i;
+
+	/* Allocate space for our shadow registers and such. */
+	if (ql_alloc_shadow_space(qdev))
+		return -ENOMEM;
+
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
+			QPRINTK(qdev, IFUP, ERR,
+				"RX resource allocation failed.\n");
+			goto err_mem;
+		}
+	}
+	/* Allocate tx queue resources */
+	for (i = 0; i < qdev->tx_ring_count; i++) {
+		if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
+			QPRINTK(qdev, IFUP, ERR,
+				"TX resource allocation failed.\n");
+			goto err_mem;
+		}
+	}
+	return 0;
+
+err_mem:
+	ql_free_mem_resources(qdev);
+	return -ENOMEM;
+}
+
+/* Set up the rx ring control block and pass it to the chip.
+ * The control block is defined as
+ * "Completion Queue Initialization Control Block", or cqicb.
+ */
+static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	struct cqicb *cqicb = &rx_ring->cqicb;
+	void *shadow_reg = qdev->rx_ring_shadow_reg_area +
+	    (rx_ring->cq_id * sizeof(u64) * 4);
+	u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
+	    (rx_ring->cq_id * sizeof(u64) * 4);
+	void __iomem *doorbell_area =
+	    qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
+	int err = 0;
+	u16 bq_len;
+
+	/* Set up the shadow registers for this ring. */
+	rx_ring->prod_idx_sh_reg = shadow_reg;
+	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
+	shadow_reg += sizeof(u64);
+	shadow_reg_dma += sizeof(u64);
+	rx_ring->lbq_base_indirect = shadow_reg;
+	rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
+	shadow_reg += sizeof(u64);
+	shadow_reg_dma += sizeof(u64);
+	rx_ring->sbq_base_indirect = shadow_reg;
+	rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
+
+	/* PCI doorbell mem area + 0x00 for consumer index register */
+	rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area;
+	rx_ring->cnsmr_idx = 0;
+	rx_ring->curr_entry = rx_ring->cq_base;
+
+	/* PCI doorbell mem area + 0x04 for valid register */
+	rx_ring->valid_db_reg = doorbell_area + 0x04;
+
+	/* PCI doorbell mem area + 0x18 for large buffer consumer */
+	rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18);
+
+	/* PCI doorbell mem area + 0x1c */
+	rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c);
+
+	memset((void *)cqicb, 0, sizeof(struct cqicb));
+	cqicb->msix_vect = rx_ring->irq;
+
+	cqicb->len = cpu_to_le16(rx_ring->cq_len | LEN_V | LEN_CPP_CONT);
+
+	cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma);
+	cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
+
+	cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma);
+	cqicb->prod_idx_addr_hi =
+	    cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
+
+	/*
+	 * Set up the control block load flags.
+	 */
+	cqicb->flags = FLAGS_LC |	/* Load queue base address */
+	    FLAGS_LV |		/* Load MSI-X vector */
+	    FLAGS_LI;		/* Load irq delay values */
+	if (rx_ring->lbq_len) {
+		cqicb->flags |= FLAGS_LL;	/* Load lbq values */
+		*((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
+		cqicb->lbq_addr_lo =
+		    cpu_to_le32(rx_ring->lbq_base_indirect_dma);
+		cqicb->lbq_addr_hi =
+		    cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
+		cqicb->lbq_buf_size = cpu_to_le32(rx_ring->lbq_buf_size);
+		bq_len = (u16) rx_ring->lbq_len;
+		cqicb->lbq_len = cpu_to_le16(bq_len);
+		rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
+		rx_ring->lbq_curr_idx = 0;
+		rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
+		rx_ring->lbq_free_cnt = 16;
+	}
+	if (rx_ring->sbq_len) {
+		cqicb->flags |= FLAGS_LS;	/* Load sbq values */
+		*((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
+		cqicb->sbq_addr_lo =
+		    cpu_to_le32(rx_ring->sbq_base_indirect_dma);
+		cqicb->sbq_addr_hi =
+		    cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
+		cqicb->sbq_buf_size =
+		    cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
+		bq_len = (u16) rx_ring->sbq_len;
+		cqicb->sbq_len = cpu_to_le16(bq_len);
+		rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
+		rx_ring->sbq_curr_idx = 0;
+		rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
+		rx_ring->sbq_free_cnt = 16;
+	}
+	switch (rx_ring->type) {
+	case TX_Q:
+		/* If there's only one interrupt, then we use
+		 * worker threads to process the outbound
+		 * completion handling rx_rings. We do this so
+		 * they can be run on multiple CPUs. There is
+		 * room to play with this more where we would only
+		 * run in a worker if there are more than x number
+		 * of outbound completions on the queue and more
+		 * than one queue active.  Some threshold that
+		 * would indicate a benefit in spite of the cost
+		 * of a context switch.
+		 * If there's more than one interrupt, then the
+		 * outbound completions are processed in the ISR.
+		 */
+		if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
+			INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
+		else {
+			/* With all debug warnings on we see a WARN_ON message
+			 * when we free the skb in the interrupt context.
+			 */
+			INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
+		}
+		cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+		cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
+		break;
+	case DEFAULT_Q:
+		INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
+		cqicb->irq_delay = 0;
+		cqicb->pkt_delay = 0;
+		break;
+	case RX_Q:
+		/* Inbound completion handling rx_rings run in
+		 * separate NAPI contexts.
+		 */
+		netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
+			       64);
+		cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
+		cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
+		break;
+	default:
+		QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
+			rx_ring->type);
+	}
+	QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
+	err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
+			   CFG_LCQ, rx_ring->cq_id);
+	if (err) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
+		return err;
+	}
+	QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
+	/*
+	 * Advance the producer index for the buffer queues.
+	 */
+	wmb();
+	if (rx_ring->lbq_len)
+		ql_write_db_reg(rx_ring->lbq_prod_idx,
+				rx_ring->lbq_prod_idx_db_reg);
+	if (rx_ring->sbq_len)
+		ql_write_db_reg(rx_ring->sbq_prod_idx,
+				rx_ring->sbq_prod_idx_db_reg);
+	return err;
+}
+
+static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+{
+	struct wqicb *wqicb = (struct wqicb *)tx_ring;
+	void __iomem *doorbell_area =
+	    qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
+	void *shadow_reg = qdev->tx_ring_shadow_reg_area +
+	    (tx_ring->wq_id * sizeof(u64));
+	u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
+	    (tx_ring->wq_id * sizeof(u64));
+	int err = 0;
+
+	/*
+	 * Assign doorbell registers for this tx_ring.
+	 */
+	/* TX PCI doorbell mem area for tx producer index */
+	tx_ring->prod_idx_db_reg = (u32 *) doorbell_area;
+	tx_ring->prod_idx = 0;
+	/* TX PCI doorbell mem area + 0x04 */
+	tx_ring->valid_db_reg = doorbell_area + 0x04;
+
+	/*
+	 * Assign shadow registers for this tx_ring.
+	 */
+	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
+	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
+
+	wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
+	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
+				   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
+	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
+	wqicb->rid = 0;
+	wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma);
+	wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
+
+	wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma);
+	wqicb->cnsmr_idx_addr_hi =
+	    cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
+
+	ql_init_tx_ring(qdev, tx_ring);
+
+	err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
+			   (u16) tx_ring->wq_id);
+	if (err) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
+		return err;
+	}
+	QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
+	return err;
+}
+
+static void ql_disable_msix(struct ql_adapter *qdev)
+{
+	if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+		pci_disable_msix(qdev->pdev);
+		clear_bit(QL_MSIX_ENABLED, &qdev->flags);
+		kfree(qdev->msi_x_entry);
+		qdev->msi_x_entry = NULL;
+	} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+		pci_disable_msi(qdev->pdev);
+		clear_bit(QL_MSI_ENABLED, &qdev->flags);
+	}
+}
+
+static void ql_enable_msix(struct ql_adapter *qdev)
+{
+	int i;
+
+	qdev->intr_count = 1;
+	/* Get the MSIX vectors. */
+	if (irq_type == MSIX_IRQ) {
+		/* Try to alloc space for the msix struct,
+		 * if it fails then go to MSI/legacy.
+		 */
+		qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
+					    sizeof(struct msix_entry),
+					    GFP_KERNEL);
+		if (!qdev->msi_x_entry) {
+			irq_type = MSI_IRQ;
+			goto msi;
+		}
+
+		for (i = 0; i < qdev->rx_ring_count; i++)
+			qdev->msi_x_entry[i].entry = i;
+
+		if (!pci_enable_msix
+		    (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
+			set_bit(QL_MSIX_ENABLED, &qdev->flags);
+			qdev->intr_count = qdev->rx_ring_count;
+			QPRINTK(qdev, IFUP, INFO,
+				"MSI-X Enabled, got %d vectors.\n",
+				qdev->intr_count);
+			return;
+		} else {
+			kfree(qdev->msi_x_entry);
+			qdev->msi_x_entry = NULL;
+			QPRINTK(qdev, IFUP, WARNING,
+				"MSI-X Enable failed, trying MSI.\n");
+			irq_type = MSI_IRQ;
+		}
+	}
+msi:
+	if (irq_type == MSI_IRQ) {
+		if (!pci_enable_msi(qdev->pdev)) {
+			set_bit(QL_MSI_ENABLED, &qdev->flags);
+			QPRINTK(qdev, IFUP, INFO,
+				"Running with MSI interrupts.\n");
+			return;
+		}
+	}
+	irq_type = LEG_IRQ;
+	spin_lock_init(&qdev->legacy_lock);
+	qdev->legacy_check = ql_legacy_check;
+	QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
+}
+
+/*
+ * Here we build the intr_context structures based on
+ * our rx_ring count and intr vector count.
+ * The intr_context structure is used to hook each vector
+ * to possibly different handlers.
+ */
+static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
+{
+	int i = 0;
+	struct intr_context *intr_context = &qdev->intr_context[0];
+
+	ql_enable_msix(qdev);
+
+	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
+		/* Each rx_ring has it's
+		 * own intr_context since we have separate
+		 * vectors for each queue.
+		 * This only true when MSI-X is enabled.
+		 */
+		for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+			qdev->rx_ring[i].irq = i;
+			intr_context->intr = i;
+			intr_context->qdev = qdev;
+			/*
+			 * We set up each vectors enable/disable/read bits so
+			 * there's no bit/mask calculations in the critical path.
+			 */
+			intr_context->intr_en_mask =
+			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
+			    | i;
+			intr_context->intr_dis_mask =
+			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
+			    INTR_EN_IHD | i;
+			intr_context->intr_read_mask =
+			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
+			    i;
+
+			if (i == 0) {
+				/*
+				 * Default queue handles bcast/mcast plus
+				 * async events.  Needs buffers.
+				 */
+				intr_context->handler = qlge_isr;
+				sprintf(intr_context->name, "%s-default-queue",
+					qdev->ndev->name);
+			} else if (i < qdev->rss_ring_first_cq_id) {
+				/*
+				 * Outbound queue is for outbound completions only.
+				 */
+				intr_context->handler = qlge_msix_tx_isr;
+				sprintf(intr_context->name, "%s-txq-%d",
+					qdev->ndev->name, i);
+			} else {
+				/*
+				 * Inbound queues handle unicast frames only.
+				 */
+				intr_context->handler = qlge_msix_rx_isr;
+				sprintf(intr_context->name, "%s-rxq-%d",
+					qdev->ndev->name, i);
+			}
+		}
+	} else {
+		/*
+		 * All rx_rings use the same intr_context since
+		 * there is only one vector.
+		 */
+		intr_context->intr = 0;
+		intr_context->qdev = qdev;
+		/*
+		 * We set up each vectors enable/disable/read bits so
+		 * there's no bit/mask calculations in the critical path.
+		 */
+		intr_context->intr_en_mask =
+		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
+		intr_context->intr_dis_mask =
+		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+		    INTR_EN_TYPE_DISABLE;
+		intr_context->intr_read_mask =
+		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
+		/*
+		 * Single interrupt means one handler for all rings.
+		 */
+		intr_context->handler = qlge_isr;
+		sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
+		for (i = 0; i < qdev->rx_ring_count; i++)
+			qdev->rx_ring[i].irq = 0;
+	}
+}
+
+static void ql_free_irq(struct ql_adapter *qdev)
+{
+	int i;
+	struct intr_context *intr_context = &qdev->intr_context[0];
+
+	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+		if (intr_context->hooked) {
+			if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+				free_irq(qdev->msi_x_entry[i].vector,
+					 &qdev->rx_ring[i]);
+				QPRINTK(qdev, IFDOWN, ERR,
+					"freeing msix interrupt %d.\n", i);
+			} else {
+				free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
+				QPRINTK(qdev, IFDOWN, ERR,
+					"freeing msi interrupt %d.\n", i);
+			}
+		}
+	}
+	ql_disable_msix(qdev);
+}
+
+static int ql_request_irq(struct ql_adapter *qdev)
+{
+	int i;
+	int status = 0;
+	struct pci_dev *pdev = qdev->pdev;
+	struct intr_context *intr_context = &qdev->intr_context[0];
+
+	ql_resolve_queues_to_irqs(qdev);
+
+	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+		atomic_set(&intr_context->irq_cnt, 0);
+		if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+			status = request_irq(qdev->msi_x_entry[i].vector,
+					     intr_context->handler,
+					     0,
+					     intr_context->name,
+					     &qdev->rx_ring[i]);
+			if (status) {
+				QPRINTK(qdev, IFUP, ERR,
+					"Failed request for MSIX interrupt %d.\n",
+					i);
+				goto err_irq;
+			} else {
+				QPRINTK(qdev, IFUP, INFO,
+					"Hooked intr %d, queue type %s%s%s, with name %s.\n",
+					i,
+					qdev->rx_ring[i].type ==
+					DEFAULT_Q ? "DEFAULT_Q" : "",
+					qdev->rx_ring[i].type ==
+					TX_Q ? "TX_Q" : "",
+					qdev->rx_ring[i].type ==
+					RX_Q ? "RX_Q" : "", intr_context->name);
+			}
+		} else {
+			QPRINTK(qdev, IFUP, DEBUG,
+				"trying msi or legacy interrupts.\n");
+			QPRINTK(qdev, IFUP, DEBUG,
+				"%s: irq = %d.\n", __func__, pdev->irq);
+			QPRINTK(qdev, IFUP, DEBUG,
+				"%s: context->name = %s.\n", __func__,
+			       intr_context->name);
+			QPRINTK(qdev, IFUP, DEBUG,
+				"%s: dev_id = 0x%p.\n", __func__,
+			       &qdev->rx_ring[0]);
+			status =
+			    request_irq(pdev->irq, qlge_isr,
+					test_bit(QL_MSI_ENABLED,
+						 &qdev->
+						 flags) ? 0 : IRQF_SHARED,
+					intr_context->name, &qdev->rx_ring[0]);
+			if (status)
+				goto err_irq;
+
+			QPRINTK(qdev, IFUP, ERR,
+				"Hooked intr %d, queue type %s%s%s, with name %s.\n",
+				i,
+				qdev->rx_ring[0].type ==
+				DEFAULT_Q ? "DEFAULT_Q" : "",
+				qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
+				qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+				intr_context->name);
+		}
+		intr_context->hooked = 1;
+	}
+	return status;
+err_irq:
+	QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
+	ql_free_irq(qdev);
+	return status;
+}
+
+static int ql_start_rss(struct ql_adapter *qdev)
+{
+	struct ricb *ricb = &qdev->ricb;
+	int status = 0;
+	int i;
+	u8 *hash_id = (u8 *) ricb->hash_cq_id;
+
+	memset((void *)ricb, 0, sizeof(ricb));
+
+	ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
+	ricb->flags =
+	    (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
+	     RSS_RT6);
+	ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
+
+	/*
+	 * Fill out the Indirection Table.
+	 */
+	for (i = 0; i < 32; i++)
+		hash_id[i] = i & 1;
+
+	/*
+	 * Random values for the IPv6 and IPv4 Hash Keys.
+	 */
+	get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
+	get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
+
+	QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
+
+	status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
+		return status;
+	}
+	QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
+	return status;
+}
+
+/* Initialize the frame-to-queue routing. */
+static int ql_route_initialize(struct ql_adapter *qdev)
+{
+	int status = 0;
+	int i;
+
+	/* Clear all the entries in the routing table. */
+	for (i = 0; i < 16; i++) {
+		status = ql_set_routing_reg(qdev, i, 0, 0);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to init routing register for CAM packets.\n");
+			return status;
+		}
+	}
+
+	status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Failed to init routing register for error packets.\n");
+		return status;
+	}
+	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Failed to init routing register for broadcast packets.\n");
+		return status;
+	}
+	/* If we have more than one inbound queue, then turn on RSS in the
+	 * routing block.
+	 */
+	if (qdev->rss_ring_count > 1) {
+		status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
+					RT_IDX_RSS_MATCH, 1);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to init routing register for MATCH RSS packets.\n");
+			return status;
+		}
+	}
+
+	status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
+				    RT_IDX_CAM_HIT, 1);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Failed to init routing register for CAM packets.\n");
+		return status;
+	}
+	return status;
+}
+
+static int ql_adapter_initialize(struct ql_adapter *qdev)
+{
+	u32 value, mask;
+	int i;
+	int status = 0;
+
+	/*
+	 * Set up the System register to halt on errors.
+	 */
+	value = SYS_EFE | SYS_FAE;
+	mask = value << 16;
+	ql_write32(qdev, SYS, mask | value);
+
+	/* Set the default queue. */
+	value = NIC_RCV_CFG_DFQ;
+	mask = NIC_RCV_CFG_DFQ_MASK;
+	ql_write32(qdev, NIC_RCV_CFG, (mask | value));
+
+	/* Set the MPI interrupt to enabled. */
+	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+
+	/* Enable the function, set pagesize, enable error checking. */
+	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
+	    FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
+
+	/* Set/clear header splitting. */
+	mask = FSC_VM_PAGESIZE_MASK |
+	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
+	ql_write32(qdev, FSC, mask | value);
+
+	ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
+		min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
+
+	/* Start up the rx queues. */
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to start rx ring[%d].\n", i);
+			return status;
+		}
+	}
+
+	/* If there is more than one inbound completion queue
+	 * then download a RICB to configure RSS.
+	 */
+	if (qdev->rss_ring_count > 1) {
+		status = ql_start_rss(qdev);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
+			return status;
+		}
+	}
+
+	/* Start up the tx queues. */
+	for (i = 0; i < qdev->tx_ring_count; i++) {
+		status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to start tx ring[%d].\n", i);
+			return status;
+		}
+	}
+
+	status = ql_port_initialize(qdev);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
+		return status;
+	}
+
+	status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
+				     MAC_ADDR_TYPE_CAM_MAC, qdev->func);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
+		return status;
+	}
+
+	status = ql_route_initialize(qdev);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
+		return status;
+	}
+
+	/* Start NAPI for the RSS queues. */
+	for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
+		QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
+			i);
+		napi_enable(&qdev->rx_ring[i].napi);
+	}
+
+	return status;
+}
+
+/* Issue soft reset to chip. */
+static int ql_adapter_reset(struct ql_adapter *qdev)
+{
+	u32 value;
+	int max_wait_time;
+	int status = 0;
+	int resetCnt = 0;
+
+#define MAX_RESET_CNT   1
+issueReset:
+	resetCnt++;
+	QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
+	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
+	/* Wait for reset to complete. */
+	max_wait_time = 3;
+	QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
+		max_wait_time);
+	do {
+		value = ql_read32(qdev, RST_FO);
+		if ((value & RST_FO_FR) == 0)
+			break;
+
+		ssleep(1);
+	} while ((--max_wait_time));
+	if (value & RST_FO_FR) {
+		QPRINTK(qdev, IFDOWN, ERR,
+			"Stuck in SoftReset:  FSC_SR:0x%08x\n", value);
+		if (resetCnt < MAX_RESET_CNT)
+			goto issueReset;
+	}
+	if (max_wait_time == 0) {
+		status = -ETIMEDOUT;
+		QPRINTK(qdev, IFDOWN, ERR,
+			"ETIMEOUT!!! errored out of resetting the chip!\n");
+	}
+
+	return status;
+}
+
+static void ql_display_dev_info(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+
+	QPRINTK(qdev, PROBE, INFO,
+		"Function #%d, NIC Roll %d, NIC Rev = %d, "
+		"XG Roll = %d, XG Rev = %d.\n",
+		qdev->func,
+		qdev->chip_rev_id & 0x0000000f,
+		qdev->chip_rev_id >> 4 & 0x0000000f,
+		qdev->chip_rev_id >> 8 & 0x0000000f,
+		qdev->chip_rev_id >> 12 & 0x0000000f);
+	QPRINTK(qdev, PROBE, INFO,
+		"MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
+		ndev->dev_addr[0], ndev->dev_addr[1],
+		ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
+		ndev->dev_addr[5]);
+}
+
+static int ql_adapter_down(struct ql_adapter *qdev)
+{
+	struct net_device *ndev = qdev->ndev;
+	int i, status = 0;
+	struct rx_ring *rx_ring;
+
+	netif_stop_queue(ndev);
+	netif_carrier_off(ndev);
+
+	cancel_delayed_work_sync(&qdev->asic_reset_work);
+	cancel_delayed_work_sync(&qdev->mpi_reset_work);
+	cancel_delayed_work_sync(&qdev->mpi_work);
+
+	/* The default queue at index 0 is always processed in
+	 * a workqueue.
+	 */
+	cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
+
+	/* The rest of the rx_rings are processed in
+	 * a workqueue only if it's a single interrupt
+	 * environment (MSI/Legacy).
+	 */
+	for (i = 1; i > qdev->rx_ring_count; i++) {
+		rx_ring = &qdev->rx_ring[i];
+		/* Only the RSS rings use NAPI on multi irq
+		 * environment.  Outbound completion processing
+		 * is done in interrupt context.
+		 */
+		if (i >= qdev->rss_ring_first_cq_id) {
+			napi_disable(&rx_ring->napi);
+		} else {
+			cancel_delayed_work_sync(&rx_ring->rx_work);
+		}
+	}
+
+	clear_bit(QL_ADAPTER_UP, &qdev->flags);
+
+	ql_disable_interrupts(qdev);
+
+	ql_tx_ring_clean(qdev);
+
+	spin_lock(&qdev->hw_lock);
+	status = ql_adapter_reset(qdev);
+	if (status)
+		QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
+			qdev->func);
+	spin_unlock(&qdev->hw_lock);
+	return status;
+}
+
+static int ql_adapter_up(struct ql_adapter *qdev)
+{
+	int err = 0;
+
+	spin_lock(&qdev->hw_lock);
+	err = ql_adapter_initialize(qdev);
+	if (err) {
+		QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
+		spin_unlock(&qdev->hw_lock);
+		goto err_init;
+	}
+	spin_unlock(&qdev->hw_lock);
+	set_bit(QL_ADAPTER_UP, &qdev->flags);
+	ql_enable_interrupts(qdev);
+	ql_enable_all_completion_interrupts(qdev);
+	if ((ql_read32(qdev, STS) & qdev->port_init)) {
+		netif_carrier_on(qdev->ndev);
+		netif_start_queue(qdev->ndev);
+	}
+
+	return 0;
+err_init:
+	ql_adapter_reset(qdev);
+	return err;
+}
+
+static int ql_cycle_adapter(struct ql_adapter *qdev)
+{
+	int status;
+
+	status = ql_adapter_down(qdev);
+	if (status)
+		goto error;
+
+	status = ql_adapter_up(qdev);
+	if (status)
+		goto error;
+
+	return status;
+error:
+	QPRINTK(qdev, IFUP, ALERT,
+		"Driver up/down cycle failed, closing device\n");
+	rtnl_lock();
+	dev_close(qdev->ndev);
+	rtnl_unlock();
+	return status;
+}
+
+static void ql_release_adapter_resources(struct ql_adapter *qdev)
+{
+	ql_free_mem_resources(qdev);
+	ql_free_irq(qdev);
+}
+
+static int ql_get_adapter_resources(struct ql_adapter *qdev)
+{
+	int status = 0;
+
+	if (ql_alloc_mem_resources(qdev)) {
+		QPRINTK(qdev, IFUP, ERR, "Unable to  allocate memory.\n");
+		return -ENOMEM;
+	}
+	status = ql_request_irq(qdev);
+	if (status)
+		goto err_irq;
+	return status;
+err_irq:
+	ql_free_mem_resources(qdev);
+	return status;
+}
+
+static int qlge_close(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	/*
+	 * Wait for device to recover from a reset.
+	 * (Rarely happens, but possible.)
+	 */
+	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
+		msleep(1);
+	ql_adapter_down(qdev);
+	ql_release_adapter_resources(qdev);
+	ql_free_ring_cb(qdev);
+	return 0;
+}
+
+static int ql_configure_rings(struct ql_adapter *qdev)
+{
+	int i;
+	struct rx_ring *rx_ring;
+	struct tx_ring *tx_ring;
+	int cpu_cnt = num_online_cpus();
+
+	/*
+	 * For each processor present we allocate one
+	 * rx_ring for outbound completions, and one
+	 * rx_ring for inbound completions.  Plus there is
+	 * always the one default queue.  For the CPU
+	 * counts we end up with the following rx_rings:
+	 * rx_ring count =
+	 *  one default queue +
+	 *  (CPU count * outbound completion rx_ring) +
+	 *  (CPU count * inbound (RSS) completion rx_ring)
+	 * To keep it simple we limit the total number of
+	 * queues to < 32, so we truncate CPU to 8.
+	 * This limitation can be removed when requested.
+	 */
+
+	if (cpu_cnt > 8)
+		cpu_cnt = 8;
+
+	/*
+	 * rx_ring[0] is always the default queue.
+	 */
+	/* Allocate outbound completion ring for each CPU. */
+	qdev->tx_ring_count = cpu_cnt;
+	/* Allocate inbound completion (RSS) ring for each CPU. */
+	qdev->rss_ring_count = cpu_cnt;
+	/* cq_id for the first inbound ring handler. */
+	qdev->rss_ring_first_cq_id = cpu_cnt + 1;
+	/*
+	 * qdev->rx_ring_count:
+	 * Total number of rx_rings.  This includes the one
+	 * default queue, a number of outbound completion
+	 * handler rx_rings, and the number of inbound
+	 * completion handler rx_rings.
+	 */
+	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
+
+	if (ql_alloc_ring_cb(qdev))
+		return -ENOMEM;
+
+	for (i = 0; i < qdev->tx_ring_count; i++) {
+		tx_ring = &qdev->tx_ring[i];
+		memset((void *)tx_ring, 0, sizeof(tx_ring));
+		tx_ring->qdev = qdev;
+		tx_ring->wq_id = i;
+		tx_ring->wq_len = qdev->tx_ring_size;
+		tx_ring->wq_size =
+		    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
+
+		/*
+		 * The completion queue ID for the tx rings start
+		 * immediately after the default Q ID, which is zero.
+		 */
+		tx_ring->cq_id = i + 1;
+	}
+
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		rx_ring = &qdev->rx_ring[i];
+		memset((void *)rx_ring, 0, sizeof(rx_ring));
+		rx_ring->qdev = qdev;
+		rx_ring->cq_id = i;
+		rx_ring->cpu = i % cpu_cnt;	/* CPU to run handler on. */
+		if (i == 0) {	/* Default queue at index 0. */
+			/*
+			 * Default queue handles bcast/mcast plus
+			 * async events.  Needs buffers.
+			 */
+			rx_ring->cq_len = qdev->rx_ring_size;
+			rx_ring->cq_size =
+			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
+			rx_ring->lbq_size =
+			    rx_ring->lbq_len * sizeof(struct bq_element);
+			rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
+			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
+			rx_ring->sbq_size =
+			    rx_ring->sbq_len * sizeof(struct bq_element);
+			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
+			rx_ring->type = DEFAULT_Q;
+		} else if (i < qdev->rss_ring_first_cq_id) {
+			/*
+			 * Outbound queue handles outbound completions only.
+			 */
+			/* outbound cq is same size as tx_ring it services. */
+			rx_ring->cq_len = qdev->tx_ring_size;
+			rx_ring->cq_size =
+			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+			rx_ring->lbq_len = 0;
+			rx_ring->lbq_size = 0;
+			rx_ring->lbq_buf_size = 0;
+			rx_ring->sbq_len = 0;
+			rx_ring->sbq_size = 0;
+			rx_ring->sbq_buf_size = 0;
+			rx_ring->type = TX_Q;
+		} else {	/* Inbound completions (RSS) queues */
+			/*
+			 * Inbound queues handle unicast frames only.
+			 */
+			rx_ring->cq_len = qdev->rx_ring_size;
+			rx_ring->cq_size =
+			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
+			rx_ring->lbq_size =
+			    rx_ring->lbq_len * sizeof(struct bq_element);
+			rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
+			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
+			rx_ring->sbq_size =
+			    rx_ring->sbq_len * sizeof(struct bq_element);
+			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
+			rx_ring->type = RX_Q;
+		}
+	}
+	return 0;
+}
+
+static int qlge_open(struct net_device *ndev)
+{
+	int err = 0;
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	err = ql_configure_rings(qdev);
+	if (err)
+		return err;
+
+	err = ql_get_adapter_resources(qdev);
+	if (err)
+		goto error_up;
+
+	err = ql_adapter_up(qdev);
+	if (err)
+		goto error_up;
+
+	return err;
+
+error_up:
+	ql_release_adapter_resources(qdev);
+	ql_free_ring_cb(qdev);
+	return err;
+}
+
+static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (ndev->mtu == 1500 && new_mtu == 9000) {
+		QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
+	} else if (ndev->mtu == 9000 && new_mtu == 1500) {
+		QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
+	} else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
+		   (ndev->mtu == 9000 && new_mtu == 9000)) {
+		return 0;
+	} else
+		return -EINVAL;
+	ndev->mtu = new_mtu;
+	return 0;
+}
+
+static struct net_device_stats *qlge_get_stats(struct net_device
+					       *ndev)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	return &qdev->stats;
+}
+
+static void qlge_set_multicast_list(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	struct dev_mc_list *mc_ptr;
+	int i;
+
+	spin_lock(&qdev->hw_lock);
+	/*
+	 * Set or clear promiscuous mode if a
+	 * transition is taking place.
+	 */
+	if (ndev->flags & IFF_PROMISC) {
+		if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
+			if (ql_set_routing_reg
+			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to set promiscous mode.\n");
+			} else {
+				set_bit(QL_PROMISCUOUS, &qdev->flags);
+			}
+		}
+	} else {
+		if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
+			if (ql_set_routing_reg
+			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to clear promiscous mode.\n");
+			} else {
+				clear_bit(QL_PROMISCUOUS, &qdev->flags);
+			}
+		}
+	}
+
+	/*
+	 * Set or clear all multicast mode if a
+	 * transition is taking place.
+	 */
+	if ((ndev->flags & IFF_ALLMULTI) ||
+	    (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
+		if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
+			if (ql_set_routing_reg
+			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to set all-multi mode.\n");
+			} else {
+				set_bit(QL_ALLMULTI, &qdev->flags);
+			}
+		}
+	} else {
+		if (test_bit(QL_ALLMULTI, &qdev->flags)) {
+			if (ql_set_routing_reg
+			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to clear all-multi mode.\n");
+			} else {
+				clear_bit(QL_ALLMULTI, &qdev->flags);
+			}
+		}
+	}
+
+	if (ndev->mc_count) {
+		for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
+		     i++, mc_ptr = mc_ptr->next)
+			if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
+						MAC_ADDR_TYPE_MULTI_MAC, i)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to loadmulticast address.\n");
+				goto exit;
+			}
+		if (ql_set_routing_reg
+		    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
+			QPRINTK(qdev, HW, ERR,
+				"Failed to set multicast match mode.\n");
+		} else {
+			set_bit(QL_ALLMULTI, &qdev->flags);
+		}
+	}
+exit:
+	spin_unlock(&qdev->hw_lock);
+}
+
+static int qlge_set_mac_address(struct net_device *ndev, void *p)
+{
+	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	struct sockaddr *addr = p;
+
+	if (netif_running(ndev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+	spin_lock(&qdev->hw_lock);
+	if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
+			MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
+		QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
+		return -1;
+	}
+	spin_unlock(&qdev->hw_lock);
+
+	return 0;
+}
+
+static void qlge_tx_timeout(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
+}
+
+static void ql_asic_reset_work(struct work_struct *work)
+{
+	struct ql_adapter *qdev =
+	    container_of(work, struct ql_adapter, asic_reset_work.work);
+	ql_cycle_adapter(qdev);
+}
+
+static void ql_get_board_info(struct ql_adapter *qdev)
+{
+	qdev->func =
+	    (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
+	if (qdev->func) {
+		qdev->xg_sem_mask = SEM_XGMAC1_MASK;
+		qdev->port_link_up = STS_PL1;
+		qdev->port_init = STS_PI1;
+		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
+		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
+	} else {
+		qdev->xg_sem_mask = SEM_XGMAC0_MASK;
+		qdev->port_link_up = STS_PL0;
+		qdev->port_init = STS_PI0;
+		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
+		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
+	}
+	qdev->chip_rev_id = ql_read32(qdev, REV_ID);
+}
+
+static void ql_release_all(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (qdev->workqueue) {
+		destroy_workqueue(qdev->workqueue);
+		qdev->workqueue = NULL;
+	}
+	if (qdev->q_workqueue) {
+		destroy_workqueue(qdev->q_workqueue);
+		qdev->q_workqueue = NULL;
+	}
+	if (qdev->reg_base)
+		iounmap((void *)qdev->reg_base);
+	if (qdev->doorbell_area)
+		iounmap(qdev->doorbell_area);
+	pci_release_regions(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static int __devinit ql_init_device(struct pci_dev *pdev,
+				    struct net_device *ndev, int cards_found)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int pos, err = 0;
+	u16 val16;
+
+	memset((void *)qdev, 0, sizeof(qdev));
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "PCI device enable failed.\n");
+		return err;
+	}
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (pos <= 0) {
+		dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
+			"aborting.\n");
+		goto err_out;
+	} else {
+		pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
+		val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
+		val16 |= (PCI_EXP_DEVCTL_CERE |
+			  PCI_EXP_DEVCTL_NFERE |
+			  PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
+		pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "PCI region request failed.\n");
+		goto err_out;
+	}
+
+	pci_set_master(pdev);
+	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+		set_bit(QL_DMA64, &qdev->flags);
+		err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		if (!err)
+		       err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+	}
+
+	if (err) {
+		dev_err(&pdev->dev, "No usable DMA configuration.\n");
+		goto err_out;
+	}
+
+	pci_set_drvdata(pdev, ndev);
+	qdev->reg_base =
+	    ioremap_nocache(pci_resource_start(pdev, 1),
+			    pci_resource_len(pdev, 1));
+	if (!qdev->reg_base) {
+		dev_err(&pdev->dev, "Register mapping failed.\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	qdev->doorbell_area_size = pci_resource_len(pdev, 3);
+	qdev->doorbell_area =
+	    ioremap_nocache(pci_resource_start(pdev, 3),
+			    pci_resource_len(pdev, 3));
+	if (!qdev->doorbell_area) {
+		dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	ql_get_board_info(qdev);
+	qdev->ndev = ndev;
+	qdev->pdev = pdev;
+	qdev->msg_enable = netif_msg_init(debug, default_msg);
+	spin_lock_init(&qdev->hw_lock);
+	spin_lock_init(&qdev->stats_lock);
+
+	/* make sure the EEPROM is good */
+	err = ql_get_flash_params(qdev);
+	if (err) {
+		dev_err(&pdev->dev, "Invalid FLASH.\n");
+		goto err_out;
+	}
+
+	if (!is_valid_ether_addr(qdev->flash.mac_addr))
+		goto err_out;
+
+	memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
+	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+	/* Set up the default ring sizes. */
+	qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
+	qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
+
+	/* Set up the coalescing parameters. */
+	qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
+	qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
+	qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
+	qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
+
+	/*
+	 * Set up the operating parameters.
+	 */
+	qdev->rx_csum = 1;
+
+	qdev->q_workqueue = create_workqueue(ndev->name);
+	qdev->workqueue = create_singlethread_workqueue(ndev->name);
+	INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
+	INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
+	INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
+
+	if (!cards_found) {
+		dev_info(&pdev->dev, "%s\n", DRV_STRING);
+		dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
+			 DRV_NAME, DRV_VERSION);
+	}
+	return 0;
+err_out:
+	ql_release_all(pdev);
+	pci_disable_device(pdev);
+	return err;
+}
+
+static int __devinit qlge_probe(struct pci_dev *pdev,
+				const struct pci_device_id *pci_entry)
+{
+	struct net_device *ndev = NULL;
+	struct ql_adapter *qdev = NULL;
+	static int cards_found = 0;
+	int err = 0;
+
+	ndev = alloc_etherdev(sizeof(struct ql_adapter));
+	if (!ndev)
+		return -ENOMEM;
+
+	err = ql_init_device(pdev, ndev, cards_found);
+	if (err < 0) {
+		free_netdev(ndev);
+		return err;
+	}
+
+	qdev = netdev_priv(ndev);
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	ndev->features = (0
+			  | NETIF_F_IP_CSUM
+			  | NETIF_F_SG
+			  | NETIF_F_TSO
+			  | NETIF_F_TSO6
+			  | NETIF_F_TSO_ECN
+			  | NETIF_F_HW_VLAN_TX
+			  | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
+
+	if (test_bit(QL_DMA64, &qdev->flags))
+		ndev->features |= NETIF_F_HIGHDMA;
+
+	/*
+	 * Set up net_device structure.
+	 */
+	ndev->tx_queue_len = qdev->tx_ring_size;
+	ndev->irq = pdev->irq;
+	ndev->open = qlge_open;
+	ndev->stop = qlge_close;
+	ndev->hard_start_xmit = qlge_send;
+	SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
+	ndev->change_mtu = qlge_change_mtu;
+	ndev->get_stats = qlge_get_stats;
+	ndev->set_multicast_list = qlge_set_multicast_list;
+	ndev->set_mac_address = qlge_set_mac_address;
+	ndev->tx_timeout = qlge_tx_timeout;
+	ndev->watchdog_timeo = 10 * HZ;
+	ndev->vlan_rx_register = ql_vlan_rx_register;
+	ndev->vlan_rx_add_vid = ql_vlan_rx_add_vid;
+	ndev->vlan_rx_kill_vid = ql_vlan_rx_kill_vid;
+	err = register_netdev(ndev);
+	if (err) {
+		dev_err(&pdev->dev, "net device registration failed.\n");
+		ql_release_all(pdev);
+		pci_disable_device(pdev);
+		return err;
+	}
+	netif_carrier_off(ndev);
+	netif_stop_queue(ndev);
+	ql_display_dev_info(ndev);
+	cards_found++;
+	return 0;
+}
+
+static void __devexit qlge_remove(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	unregister_netdev(ndev);
+	ql_release_all(pdev);
+	pci_disable_device(pdev);
+	free_netdev(ndev);
+}
+
+/*
+ * This callback is called by the PCI subsystem whenever
+ * a PCI bus error is detected.
+ */
+static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
+					       enum pci_channel_state state)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (netif_running(ndev))
+		ql_adapter_down(qdev);
+
+	pci_disable_device(pdev);
+
+	/* Request a slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * This callback is called after the PCI buss has been reset.
+ * Basically, this tries to restart the card from scratch.
+ * This is a shortened version of the device probe/discovery code,
+ * it resembles the first-half of the () routine.
+ */
+static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (pci_enable_device(pdev)) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Cannot re-enable PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_set_master(pdev);
+
+	netif_carrier_off(ndev);
+	netif_stop_queue(ndev);
+	ql_adapter_reset(qdev);
+
+	/* Make sure the EEPROM is good */
+	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+	if (!is_valid_ether_addr(ndev->perm_addr)) {
+		QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void qlge_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	pci_set_master(pdev);
+
+	if (netif_running(ndev)) {
+		if (ql_adapter_up(qdev)) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Device initialization failed after reset.\n");
+			return;
+		}
+	}
+
+	netif_device_attach(ndev);
+}
+
+static struct pci_error_handlers qlge_err_handler = {
+	.error_detected = qlge_io_error_detected,
+	.slot_reset = qlge_io_slot_reset,
+	.resume = qlge_io_resume,
+};
+
+static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int err;
+
+	netif_device_detach(ndev);
+
+	if (netif_running(ndev)) {
+		err = ql_adapter_down(qdev);
+		if (!err)
+			return err;
+	}
+
+	err = pci_save_state(pdev);
+	if (err)
+		return err;
+
+	pci_disable_device(pdev);
+
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int qlge_resume(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	err = pci_enable_device(pdev);
+	if (err) {
+		QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	if (netif_running(ndev)) {
+		err = ql_adapter_up(qdev);
+		if (err)
+			return err;
+	}
+
+	netif_device_attach(ndev);
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static void qlge_shutdown(struct pci_dev *pdev)
+{
+	qlge_suspend(pdev, PMSG_SUSPEND);
+}
+
+static struct pci_driver qlge_driver = {
+	.name = DRV_NAME,
+	.id_table = qlge_pci_tbl,
+	.probe = qlge_probe,
+	.remove = __devexit_p(qlge_remove),
+#ifdef CONFIG_PM
+	.suspend = qlge_suspend,
+	.resume = qlge_resume,
+#endif
+	.shutdown = qlge_shutdown,
+	.err_handler = &qlge_err_handler
+};
+
+static int __init qlge_init_module(void)
+{
+	return pci_register_driver(&qlge_driver);
+}
+
+static void __exit qlge_exit(void)
+{
+	pci_unregister_driver(&qlge_driver);
+}
+
+module_init(qlge_init_module);
+module_exit(qlge_exit);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
new file mode 100644
index 0000000..24fe344
--- /dev/null
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -0,0 +1,150 @@
+#include "qlge.h"
+
+static int ql_read_mbox_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+	int status;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* set up for reg read */
+	ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* get the data */
+	*data = ql_read32(qdev, PROC_DATA);
+exit:
+	return status;
+}
+
+int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	int i, status;
+
+	status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+	if (status)
+		return -EBUSY;
+	for (i = 0; i < mbcp->out_count; i++) {
+		status =
+		    ql_read_mbox_reg(qdev, qdev->mailbox_out + i,
+				     &mbcp->mbox_out[i]);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
+			break;
+		}
+	}
+	ql_sem_unlock(qdev, SEM_PROC_REG_MASK);	/* does flush too */
+	return status;
+}
+
+static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	mbcp->out_count = 2;
+
+	if (ql_get_mb_sts(qdev, mbcp))
+		goto exit;
+
+	qdev->link_status = mbcp->mbox_out[1];
+	QPRINTK(qdev, DRV, ERR, "Link Up.\n");
+	QPRINTK(qdev, DRV, INFO, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
+	if (!netif_carrier_ok(qdev->ndev)) {
+		QPRINTK(qdev, LINK, INFO, "Link is Up.\n");
+		netif_carrier_on(qdev->ndev);
+		netif_wake_queue(qdev->ndev);
+	}
+exit:
+	/* Clear the MPI firmware status. */
+	ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+}
+
+static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	mbcp->out_count = 3;
+
+	if (ql_get_mb_sts(qdev, mbcp)) {
+		QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
+		goto exit;
+	}
+
+	if (netif_carrier_ok(qdev->ndev)) {
+		QPRINTK(qdev, LINK, INFO, "Link is Down.\n");
+		netif_carrier_off(qdev->ndev);
+		netif_stop_queue(qdev->ndev);
+	}
+	QPRINTK(qdev, DRV, ERR, "Link Down.\n");
+	QPRINTK(qdev, DRV, ERR, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
+exit:
+	/* Clear the MPI firmware status. */
+	ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+}
+
+static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	mbcp->out_count = 2;
+
+	if (ql_get_mb_sts(qdev, mbcp)) {
+		QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
+		goto exit;
+	}
+	QPRINTK(qdev, DRV, ERR, "Firmware initialized!\n");
+	QPRINTK(qdev, DRV, ERR, "Firmware status = 0x%.08x.\n",
+		mbcp->mbox_out[0]);
+	QPRINTK(qdev, DRV, ERR, "Firmware Revision  = 0x%.08x.\n",
+		mbcp->mbox_out[1]);
+exit:
+	/* Clear the MPI firmware status. */
+	ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+}
+
+void ql_mpi_work(struct work_struct *work)
+{
+	struct ql_adapter *qdev =
+	    container_of(work, struct ql_adapter, mpi_work.work);
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	mbcp->out_count = 1;
+
+	while (ql_read32(qdev, STS) & STS_PI) {
+		if (ql_get_mb_sts(qdev, mbcp)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Could not read MPI, resetting ASIC!\n");
+			ql_queue_asic_error(qdev);
+		}
+
+		switch (mbcp->mbox_out[0]) {
+		case AEN_LINK_UP:
+			ql_link_up(qdev, mbcp);
+			break;
+		case AEN_LINK_DOWN:
+			ql_link_down(qdev, mbcp);
+			break;
+		case AEN_FW_INIT_DONE:
+			ql_init_fw_done(qdev, mbcp);
+			break;
+		case MB_CMD_STS_GOOD:
+			break;
+		case AEN_FW_INIT_FAIL:
+		case AEN_SYS_ERR:
+		case MB_CMD_STS_ERR:
+			ql_queue_fw_error(qdev);
+		default:
+			/* Clear the MPI firmware status. */
+			ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+			break;
+		}
+	}
+	ql_enable_completion_interrupt(qdev, 0);
+}
+
+void ql_mpi_reset_work(struct work_struct *work)
+{
+	struct ql_adapter *qdev =
+	    container_of(work, struct ql_adapter, mpi_reset_work.work);
+	QPRINTK(qdev, DRV, ERR,
+		"Enter, qdev = %p..\n", qdev);
+	ql_write32(qdev, CSR, CSR_CMD_SET_RST);
+	msleep(50);
+	ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
+}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 5d86281..34fe7ef 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -265,7 +265,7 @@
 				le32_to_cpu(lp->tx_insert_ptr->buf),
 				MAX_BUF_SIZE, PCI_DMA_TODEVICE);
 			dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
-			lp->rx_insert_ptr->skb_ptr = NULL;
+			lp->tx_insert_ptr->skb_ptr = NULL;
 		}
 		lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
 	}
@@ -370,7 +370,7 @@
 	/* Reset internal state machine */
 	iowrite16(2, ioaddr + MAC_SM);
 	iowrite16(0, ioaddr + MAC_SM);
-	udelay(5000);
+	mdelay(5);
 
 	/* MAC Bus Control Register */
 	iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
@@ -806,7 +806,7 @@
 	iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */
 	iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */
 	iowrite16(0, ioaddr + MAC_SM);
-	udelay(5000);
+	mdelay(5);
 
 	/* Restore MAC Address */
 	adrp = (u16 *) dev->dev_addr;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0f6f974..fb899c6 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -36,7 +36,7 @@
 #define assert(expr) \
 	if (!(expr)) {					\
 		printk( "Assertion failed! %s,%s,%s,line=%d\n",	\
-		#expr,__FILE__,__FUNCTION__,__LINE__);		\
+		#expr,__FILE__,__func__,__LINE__);		\
 	}
 #define dprintk(fmt, args...) \
 	do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
@@ -61,6 +61,7 @@
 /* MAC address length */
 #define MAC_ADDR_LEN	6
 
+#define MAX_READ_REQUEST_SHIFT	12
 #define RX_FIFO_THRESH	7	/* 7 means NO threshold, Rx buffer level before first PCI xfer. */
 #define RX_DMA_BURST	6	/* Maximum PCI burst, '6' is 1024 */
 #define TX_DMA_BURST	6	/* Maximum PCI burst, '6' is 1024 */
@@ -95,6 +96,10 @@
 	RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB
 	RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
 	RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
+	RTL_GIGA_MAC_VER_07 = 0x07, // 8102e
+	RTL_GIGA_MAC_VER_08 = 0x08, // 8102e
+	RTL_GIGA_MAC_VER_09 = 0x09, // 8102e
+	RTL_GIGA_MAC_VER_10 = 0x0a, // 8101e
 	RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
 	RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be
 	RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb
@@ -121,6 +126,10 @@
 	_R("RTL8169sb/8110sb",	RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
 	_R("RTL8169sc/8110sc",	RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
 	_R("RTL8169sc/8110sc",	RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe
+	_R("RTL8102e",		RTL_GIGA_MAC_VER_07, 0xff7e1880), // PCI-E
+	_R("RTL8102e",		RTL_GIGA_MAC_VER_08, 0xff7e1880), // PCI-E
+	_R("RTL8102e",		RTL_GIGA_MAC_VER_09, 0xff7e1880), // PCI-E
+	_R("RTL8101e",		RTL_GIGA_MAC_VER_10, 0xff7e1880), // PCI-E
 	_R("RTL8168b/8111b",	RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
 	_R("RTL8168b/8111b",	RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
 	_R("RTL8101e",		RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
@@ -196,9 +205,6 @@
 	Config5		= 0x56,
 	MultiIntr	= 0x5c,
 	PHYAR		= 0x60,
-	TBICSR		= 0x64,
-	TBI_ANAR	= 0x68,
-	TBI_LPAR	= 0x6a,
 	PHYstatus	= 0x6c,
 	RxMaxSize	= 0xda,
 	CPlusCmd	= 0xe0,
@@ -212,6 +218,32 @@
 	FuncForceEvent	= 0xfc,
 };
 
+enum rtl8110_registers {
+	TBICSR			= 0x64,
+	TBI_ANAR		= 0x68,
+	TBI_LPAR		= 0x6a,
+};
+
+enum rtl8168_8101_registers {
+	CSIDR			= 0x64,
+	CSIAR			= 0x68,
+#define	CSIAR_FLAG			0x80000000
+#define	CSIAR_WRITE_CMD			0x80000000
+#define	CSIAR_BYTE_ENABLE		0x0f
+#define	CSIAR_BYTE_ENABLE_SHIFT		12
+#define	CSIAR_ADDR_MASK			0x0fff
+
+	EPHYAR			= 0x80,
+#define	EPHYAR_FLAG			0x80000000
+#define	EPHYAR_WRITE_CMD		0x80000000
+#define	EPHYAR_REG_MASK			0x1f
+#define	EPHYAR_REG_SHIFT		16
+#define	EPHYAR_DATA_MASK		0xffff
+	DBG_REG			= 0xd1,
+#define	FIX_NAK_1			(1 << 4)
+#define	FIX_NAK_2			(1 << 3)
+};
+
 enum rtl_register_content {
 	/* InterruptStatusBits */
 	SYSErr		= 0x8000,
@@ -265,7 +297,13 @@
 	TxDMAShift = 8,	/* DMA burst value (0-7) is shift this many bits */
 
 	/* Config1 register p.24 */
+	LEDS1		= (1 << 7),
+	LEDS0		= (1 << 6),
 	MSIEnable	= (1 << 5),	/* Enable Message Signaled Interrupt */
+	Speed_down	= (1 << 4),
+	MEMMAP		= (1 << 3),
+	IOMAP		= (1 << 2),
+	VPD		= (1 << 1),
 	PMEnable	= (1 << 0),	/* Power Management Enable */
 
 	/* Config2 register p. 25 */
@@ -275,6 +313,7 @@
 	/* Config3 register p.25 */
 	MagicPacket	= (1 << 5),	/* Wake up when receives a Magic Packet */
 	LinkUp		= (1 << 4),	/* Wake up when the cable connection is re-established */
+	Beacon_en	= (1 << 0),	/* 8168 only. Reserved in the 8168b */
 
 	/* Config5 register p.27 */
 	BWF		= (1 << 6),	/* Accept Broadcast wakeup frame */
@@ -292,7 +331,16 @@
 	TBINwComplete	= 0x01000000,
 
 	/* CPlusCmd p.31 */
-	PktCntrDisable	= (1 << 7),	// 8168
+	EnableBist	= (1 << 15),	// 8168 8101
+	Mac_dbgo_oe	= (1 << 14),	// 8168 8101
+	Normal_mode	= (1 << 13),	// unused
+	Force_half_dup	= (1 << 12),	// 8168 8101
+	Force_rxflow_en	= (1 << 11),	// 8168 8101
+	Force_txflow_en	= (1 << 10),	// 8168 8101
+	Cxpl_dbg_sel	= (1 << 9),	// 8168 8101
+	ASF		= (1 << 8),	// 8168 8101
+	PktCntrDisable	= (1 << 7),	// 8168 8101
+	Mac_dbgo_sel	= 0x001c,	// 8168
 	RxVlan		= (1 << 6),
 	RxChkSum	= (1 << 5),
 	PCIDAC		= (1 << 4),
@@ -370,8 +418,9 @@
 };
 
 enum features {
-	RTL_FEATURE_WOL	= (1 << 0),
-	RTL_FEATURE_MSI	= (1 << 1),
+	RTL_FEATURE_WOL		= (1 << 0),
+	RTL_FEATURE_MSI		= (1 << 1),
+	RTL_FEATURE_GMII	= (1 << 2),
 };
 
 struct rtl8169_private {
@@ -406,13 +455,16 @@
 	struct vlan_group *vlgrp;
 #endif
 	int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
-	void (*get_settings)(struct net_device *, struct ethtool_cmd *);
+	int (*get_settings)(struct net_device *, struct ethtool_cmd *);
 	void (*phy_reset_enable)(void __iomem *);
 	void (*hw_start)(struct net_device *);
 	unsigned int (*phy_reset_pending)(void __iomem *);
 	unsigned int (*link_ok)(void __iomem *);
+	int pcie_cap;
 	struct delayed_work task;
 	unsigned features;
+
+	struct mii_if_info mii;
 };
 
 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -482,6 +534,94 @@
 	return value;
 }
 
+static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value)
+{
+	mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value);
+}
+
+static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
+			   int val)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+
+	mdio_write(ioaddr, location, val);
+}
+
+static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+
+	return mdio_read(ioaddr, location);
+}
+
+static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
+{
+	unsigned int i;
+
+	RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
+		(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
+
+	for (i = 0; i < 100; i++) {
+		if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
+			break;
+		udelay(10);
+	}
+}
+
+static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
+{
+	u16 value = 0xffff;
+	unsigned int i;
+
+	RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
+
+	for (i = 0; i < 100; i++) {
+		if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
+			value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
+			break;
+		}
+		udelay(10);
+	}
+
+	return value;
+}
+
+static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
+{
+	unsigned int i;
+
+	RTL_W32(CSIDR, value);
+	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
+		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+	for (i = 0; i < 100; i++) {
+		if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
+			break;
+		udelay(10);
+	}
+}
+
+static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
+{
+	u32 value = ~0x00;
+	unsigned int i;
+
+	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
+		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+	for (i = 0; i < 100; i++) {
+		if (RTL_R32(CSIAR) & CSIAR_FLAG) {
+			value = RTL_R32(CSIDR);
+			break;
+		}
+		udelay(10);
+	}
+
+	return value;
+}
+
 static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
 {
 	RTL_W16(IntrMask, 0x0000);
@@ -705,8 +845,12 @@
 		}
 	}
 
-	/* The 8100e/8101e do Fast Ethernet only. */
-	if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
+	/* The 8100e/8101e/8102e do Fast Ethernet only. */
+	if ((tp->mac_version == RTL_GIGA_MAC_VER_07) ||
+	    (tp->mac_version == RTL_GIGA_MAC_VER_08) ||
+	    (tp->mac_version == RTL_GIGA_MAC_VER_09) ||
+	    (tp->mac_version == RTL_GIGA_MAC_VER_10) ||
+	    (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
 	    (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
 	    (tp->mac_version == RTL_GIGA_MAC_VER_15) ||
 	    (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
@@ -850,7 +994,7 @@
 
 #endif
 
-static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 	void __iomem *ioaddr = tp->mmio_addr;
@@ -867,65 +1011,29 @@
 
 	cmd->speed = SPEED_1000;
 	cmd->duplex = DUPLEX_FULL; /* Always set */
+
+	return 0;
 }
 
-static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
-	void __iomem *ioaddr = tp->mmio_addr;
-	u8 status;
 
-	cmd->supported = SUPPORTED_10baseT_Half |
-			 SUPPORTED_10baseT_Full |
-			 SUPPORTED_100baseT_Half |
-			 SUPPORTED_100baseT_Full |
-			 SUPPORTED_1000baseT_Full |
-			 SUPPORTED_Autoneg |
-			 SUPPORTED_TP;
-
-	cmd->autoneg = 1;
-	cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
-
-	if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
-		cmd->advertising |= ADVERTISED_10baseT_Half;
-	if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
-		cmd->advertising |= ADVERTISED_10baseT_Full;
-	if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
-		cmd->advertising |= ADVERTISED_100baseT_Half;
-	if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
-		cmd->advertising |= ADVERTISED_100baseT_Full;
-	if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
-		cmd->advertising |= ADVERTISED_1000baseT_Full;
-
-	status = RTL_R8(PHYstatus);
-
-	if (status & _1000bpsF)
-		cmd->speed = SPEED_1000;
-	else if (status & _100bps)
-		cmd->speed = SPEED_100;
-	else if (status & _10bps)
-		cmd->speed = SPEED_10;
-
-	if (status & TxFlowCtrl)
-		cmd->advertising |= ADVERTISED_Asym_Pause;
-	if (status & RxFlowCtrl)
-		cmd->advertising |= ADVERTISED_Pause;
-
-	cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
-		      DUPLEX_FULL : DUPLEX_HALF;
+	return mii_ethtool_gset(&tp->mii, cmd);
 }
 
 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 	unsigned long flags;
+	int rc;
 
 	spin_lock_irqsave(&tp->lock, flags);
 
-	tp->get_settings(dev, cmd);
+	rc = tp->get_settings(dev, cmd);
 
 	spin_unlock_irqrestore(&tp->lock, flags);
-	return 0;
+	return rc;
 }
 
 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -1116,8 +1224,17 @@
 		{ 0x7c800000, 0x30000000,	RTL_GIGA_MAC_VER_11 },
 
 		/* 8101 family. */
+		{ 0x7cf00000, 0x34a00000,	RTL_GIGA_MAC_VER_09 },
+		{ 0x7cf00000, 0x24a00000,	RTL_GIGA_MAC_VER_09 },
+		{ 0x7cf00000, 0x34900000,	RTL_GIGA_MAC_VER_08 },
+		{ 0x7cf00000, 0x24900000,	RTL_GIGA_MAC_VER_08 },
+		{ 0x7cf00000, 0x34800000,	RTL_GIGA_MAC_VER_07 },
+		{ 0x7cf00000, 0x24800000,	RTL_GIGA_MAC_VER_07 },
 		{ 0x7cf00000, 0x34000000,	RTL_GIGA_MAC_VER_13 },
+		{ 0x7cf00000, 0x34300000,	RTL_GIGA_MAC_VER_10 },
 		{ 0x7cf00000, 0x34200000,	RTL_GIGA_MAC_VER_16 },
+		{ 0x7c800000, 0x34800000,	RTL_GIGA_MAC_VER_09 },
+		{ 0x7c800000, 0x24800000,	RTL_GIGA_MAC_VER_09 },
 		{ 0x7c800000, 0x34000000,	RTL_GIGA_MAC_VER_16 },
 		/* FIXME: where did these entries come from ? -- FR */
 		{ 0xfc800000, 0x38800000,	RTL_GIGA_MAC_VER_15 },
@@ -1279,6 +1396,22 @@
 	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 }
 
+static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
+{
+	struct phy_reg phy_reg_init[] = {
+		{ 0x1f, 0x0003 },
+		{ 0x08, 0x441d },
+		{ 0x01, 0x9100 },
+		{ 0x1f, 0x0000 }
+	};
+
+	mdio_write(ioaddr, 0x1f, 0x0000);
+	mdio_patch(ioaddr, 0x11, 1 << 12);
+	mdio_patch(ioaddr, 0x19, 1 << 13);
+
+	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
 static void rtl_hw_phy_config(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
@@ -1296,6 +1429,11 @@
 	case RTL_GIGA_MAC_VER_04:
 		rtl8169sb_hw_phy_config(ioaddr);
 		break;
+	case RTL_GIGA_MAC_VER_07:
+	case RTL_GIGA_MAC_VER_08:
+	case RTL_GIGA_MAC_VER_09:
+		rtl8102e_hw_phy_config(ioaddr);
+		break;
 	case RTL_GIGA_MAC_VER_18:
 		rtl8168cp_hw_phy_config(ioaddr);
 		break;
@@ -1513,7 +1651,7 @@
 	unsigned int align;
 	u16 intr_event;
 	u16 napi_event;
-	unsigned msi;
+	unsigned features;
 } rtl_cfg_infos [] = {
 	[RTL_CFG_0] = {
 		.hw_start	= rtl_hw_start_8169,
@@ -1522,7 +1660,7 @@
 		.intr_event	= SYSErr | LinkChg | RxOverflow |
 				  RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
 		.napi_event	= RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
-		.msi		= 0
+		.features	= RTL_FEATURE_GMII
 	},
 	[RTL_CFG_1] = {
 		.hw_start	= rtl_hw_start_8168,
@@ -1531,7 +1669,7 @@
 		.intr_event	= SYSErr | LinkChg | RxOverflow |
 				  TxErr | TxOK | RxOK | RxErr,
 		.napi_event	= TxErr | TxOK | RxOK | RxOverflow,
-		.msi		= RTL_FEATURE_MSI
+		.features	= RTL_FEATURE_GMII | RTL_FEATURE_MSI
 	},
 	[RTL_CFG_2] = {
 		.hw_start	= rtl_hw_start_8101,
@@ -1540,7 +1678,7 @@
 		.intr_event	= SYSErr | LinkChg | RxOverflow | PCSTimeout |
 				  RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
 		.napi_event	= RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
-		.msi		= RTL_FEATURE_MSI
+		.features	= RTL_FEATURE_MSI
 	}
 };
 
@@ -1552,7 +1690,7 @@
 	u8 cfg2;
 
 	cfg2 = RTL_R8(Config2) & ~MSIEnable;
-	if (cfg->msi) {
+	if (cfg->features & RTL_FEATURE_MSI) {
 		if (pci_enable_msi(pdev)) {
 			dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
 		} else {
@@ -1578,6 +1716,7 @@
 	const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
 	const unsigned int region = cfg->region;
 	struct rtl8169_private *tp;
+	struct mii_if_info *mii;
 	struct net_device *dev;
 	void __iomem *ioaddr;
 	unsigned int i;
@@ -1602,6 +1741,14 @@
 	tp->pci_dev = pdev;
 	tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
 
+	mii = &tp->mii;
+	mii->dev = dev;
+	mii->mdio_read = rtl_mdio_read;
+	mii->mdio_write = rtl_mdio_write;
+	mii->phy_id_mask = 0x1f;
+	mii->reg_num_mask = 0x1f;
+	mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+
 	/* enable device (incl. PCI PM wakeup and hotplug setup) */
 	rc = pci_enable_device(pdev);
 	if (rc < 0) {
@@ -1670,6 +1817,10 @@
 		goto err_out_free_res_4;
 	}
 
+	tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (!tp->pcie_cap && netif_msg_probe(tp))
+		dev_info(&pdev->dev, "no PCI Express capability\n");
+
 	/* Unneeded ? Don't mess with Mrs. Murphy. */
 	rtl8169_irq_mask_and_ack(ioaddr);
 
@@ -2061,12 +2212,51 @@
 	RTL_W16(IntrMask, tp->intr_event);
 }
 
+static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct rtl8169_private *tp = netdev_priv(dev);
+	int cap = tp->pcie_cap;
+
+	if (cap) {
+		u16 ctl;
+
+		pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
+		ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
+		pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
+	}
+}
+
+static void rtl_csi_access_enable(void __iomem *ioaddr)
+{
+	u32 csi;
+
+	csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
+	rtl_csi_write(ioaddr, 0x070c, csi | 0x27000000);
+}
+
+struct ephy_info {
+	unsigned int offset;
+	u16 mask;
+	u16 bits;
+};
+
+static void rtl_ephy_init(void __iomem *ioaddr, struct ephy_info *e, int len)
+{
+	u16 w;
+
+	while (len-- > 0) {
+		w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
+		rtl_ephy_write(ioaddr, e->offset, w);
+		e++;
+	}
+}
+
 static void rtl_hw_start_8168(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 	void __iomem *ioaddr = tp->mmio_addr;
 	struct pci_dev *pdev = tp->pci_dev;
-	u8 ctl;
 
 	RTL_W8(Cfg9346, Cfg9346_Unlock);
 
@@ -2080,10 +2270,7 @@
 
 	RTL_W16(CPlusCmd, tp->cp_cmd);
 
-	/* Tx performance tweak. */
-	pci_read_config_byte(pdev, 0x69, &ctl);
-	ctl = (ctl & ~0x70) | 0x50;
-	pci_write_config_byte(pdev, 0x69, ctl);
+	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
 	RTL_W16(IntrMitigate, 0x5151);
 
@@ -2099,8 +2286,6 @@
 
 	RTL_R8(IntrMask);
 
-	RTL_W32(RxMissed, 0);
-
 	rtl_set_rx_mode(dev);
 
 	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -2110,6 +2295,70 @@
 	RTL_W16(IntrMask, tp->intr_event);
 }
 
+#define R810X_CPCMD_QUIRK_MASK (\
+	EnableBist | \
+	Mac_dbgo_oe | \
+	Force_half_dup | \
+	Force_half_dup | \
+	Force_txflow_en | \
+	Cxpl_dbg_sel | \
+	ASF | \
+	PktCntrDisable | \
+	PCIDAC | \
+	PCIMulRW)
+
+static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+	static struct ephy_info e_info_8102e_1[] = {
+		{ 0x01,	0, 0x6e65 },
+		{ 0x02,	0, 0x091f },
+		{ 0x03,	0, 0xc2f9 },
+		{ 0x06,	0, 0xafb5 },
+		{ 0x07,	0, 0x0e00 },
+		{ 0x19,	0, 0xec80 },
+		{ 0x01,	0, 0x2e65 },
+		{ 0x01,	0, 0x6e65 }
+	};
+	u8 cfg1;
+
+	rtl_csi_access_enable(ioaddr);
+
+	RTL_W8(DBG_REG, FIX_NAK_1);
+
+	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+	RTL_W8(Config1,
+	       LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
+	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+	cfg1 = RTL_R8(Config1);
+	if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
+		RTL_W8(Config1, cfg1 & ~LEDS0);
+
+	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
+
+	rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
+}
+
+static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+	rtl_csi_access_enable(ioaddr);
+
+	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+	RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
+	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
+}
+
+static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+	rtl_hw_start_8102e_2(ioaddr, pdev);
+
+	rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
+}
+
 static void rtl_hw_start_8101(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
@@ -2118,8 +2367,26 @@
 
 	if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
 	    (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
-		pci_write_config_word(pdev, 0x68, 0x00);
-		pci_write_config_word(pdev, 0x69, 0x08);
+		int cap = tp->pcie_cap;
+
+		if (cap) {
+			pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
+					      PCI_EXP_DEVCTL_NOSNOOP_EN);
+		}
+	}
+
+	switch (tp->mac_version) {
+	case RTL_GIGA_MAC_VER_07:
+		rtl_hw_start_8102e_1(ioaddr, pdev);
+		break;
+
+	case RTL_GIGA_MAC_VER_08:
+		rtl_hw_start_8102e_3(ioaddr, pdev);
+		break;
+
+	case RTL_GIGA_MAC_VER_09:
+		rtl_hw_start_8102e_2(ioaddr, pdev);
+		break;
 	}
 
 	RTL_W8(Cfg9346, Cfg9346_Unlock);
@@ -2143,8 +2410,6 @@
 
 	RTL_R8(IntrMask);
 
-	RTL_W32(RxMissed, 0);
-
 	rtl_set_rx_mode(dev);
 
 	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -2922,6 +3187,17 @@
 	return work_done;
 }
 
+static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+
+	if (tp->mac_version > RTL_GIGA_MAC_VER_06)
+		return;
+
+	dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
+	RTL_W32(RxMissed, 0);
+}
+
 static void rtl8169_down(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
@@ -2939,9 +3215,7 @@
 
 	rtl8169_asic_down(ioaddr);
 
-	/* Update the error counts. */
-	dev->stats.rx_missed_errors += RTL_R32(RxMissed);
-	RTL_W32(RxMissed, 0);
+	rtl8169_rx_missed(dev, ioaddr);
 
 	spin_unlock_irq(&tp->lock);
 
@@ -3063,8 +3337,7 @@
 
 	if (netif_running(dev)) {
 		spin_lock_irqsave(&tp->lock, flags);
-		dev->stats.rx_missed_errors += RTL_R32(RxMissed);
-		RTL_W32(RxMissed, 0);
+		rtl8169_rx_missed(dev, ioaddr);
 		spin_unlock_irqrestore(&tp->lock, flags);
 	}
 
@@ -3089,8 +3362,7 @@
 
 	rtl8169_asic_down(ioaddr);
 
-	dev->stats.rx_missed_errors += RTL_R32(RxMissed);
-	RTL_W32(RxMissed, 0);
+	rtl8169_rx_missed(dev, ioaddr);
 
 	spin_unlock_irq(&tp->lock);
 
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index a2b0730..6a1375f 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -371,9 +371,6 @@
 				flags[i]);
 }
 
-/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
-static int vlan_strip_flag;
-
 /* Unregister the vlan */
 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
 {
@@ -2303,7 +2300,7 @@
 		val64 = readq(&bar0->rx_pa_cfg);
 		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
 		writeq(val64, &bar0->rx_pa_cfg);
-		vlan_strip_flag = 0;
+		nic->vlan_strip_flag = 0;
 	}
 
 	/*
@@ -3136,7 +3133,7 @@
 		if (skb == NULL) {
 			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
 			DBG_PRINT(ERR_DBG, "%s: Null skb ",
-			__FUNCTION__);
+			__func__);
 			DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
 			return;
 		}
@@ -3496,7 +3493,7 @@
 	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
 
 	DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
-			__FUNCTION__, sp->dev->name);
+			__func__, sp->dev->name);
 
 	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
@@ -3518,7 +3515,7 @@
 	}
 
 	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
-		DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
+		DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __func__);
 	}
 
 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
@@ -3768,7 +3765,7 @@
 		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
 		writeq(val64, &bar0->xmsi_access);
 		if (wait_for_msix_trans(nic, msix_index)) {
-			DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
+			DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
 			continue;
 		}
 	}
@@ -3789,7 +3786,7 @@
 		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
 		writeq(val64, &bar0->xmsi_access);
 		if (wait_for_msix_trans(nic, msix_index)) {
-			DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
+			DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
 			continue;
 		}
 		addr = readq(&bar0->xmsi_address);
@@ -3812,7 +3809,7 @@
 			       GFP_KERNEL);
 	if (!nic->entries) {
 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
-			__FUNCTION__);
+			__func__);
 		nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
 		return -ENOMEM;
 	}
@@ -3826,7 +3823,7 @@
 				   GFP_KERNEL);
 	if (!nic->s2io_entries) {
 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
-			__FUNCTION__);
+			__func__);
 		nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
 		kfree(nic->entries);
 		nic->mac_control.stats_info->sw_stat.mem_freed
@@ -5010,7 +5007,7 @@
 			val64 = readq(&bar0->rx_pa_cfg);
 			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
 			writeq(val64, &bar0->rx_pa_cfg);
-			vlan_strip_flag = 0;
+			sp->vlan_strip_flag = 0;
 		}
 
 		val64 = readq(&bar0->mac_cfg);
@@ -5032,7 +5029,7 @@
 			val64 = readq(&bar0->rx_pa_cfg);
 			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
 			writeq(val64, &bar0->rx_pa_cfg);
-			vlan_strip_flag = 1;
+			sp->vlan_strip_flag = 1;
 		}
 
 		val64 = readq(&bar0->mac_cfg);
@@ -6746,7 +6743,7 @@
 		ret = s2io_card_up(sp);
 		if (ret) {
 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
-				  __FUNCTION__);
+				  __func__);
 			return ret;
 		}
 		s2io_wake_all_tx_queue(sp);
@@ -7530,7 +7527,7 @@
 					default:
 						DBG_PRINT(ERR_DBG,
 							"%s: Samadhana!!\n",
-							 __FUNCTION__);
+							 __func__);
 						BUG();
 				}
 			}
@@ -7781,7 +7778,7 @@
 		return -ENOMEM;
 	}
 	if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
-		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
+		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __func__, ret);
 		pci_disable_device(pdev);
 		return -ENODEV;
 	}
@@ -7998,7 +7995,7 @@
 	if (sp->device_type & XFRAME_II_DEVICE) {
 		mode = s2io_verify_pci_mode(sp);
 		if (mode < 0) {
-			DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
+			DBG_PRINT(ERR_DBG, "%s: ", __func__);
 			DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
 			ret = -EBADSLT;
 			goto set_swap_failed;
@@ -8175,8 +8172,8 @@
 		    break;
 	}
 	if (sp->config.multiq) {
-	for (i = 0; i < sp->config.tx_fifo_num; i++)
-		mac_control->fifos[i].multiq = config->multiq;
+		for (i = 0; i < sp->config.tx_fifo_num; i++)
+			mac_control->fifos[i].multiq = config->multiq;
 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
 			dev->name);
 	} else
@@ -8206,6 +8203,11 @@
 	/* Initialize device name */
 	sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
 
+	if (vlan_tag_strip)
+		sp->vlan_strip_flag = 1;
+	else
+		sp->vlan_strip_flag = 0;
+
 	/*
 	 * Make Link state as off at this point, when the Link change
 	 * interrupt comes the state will be automatically changed to
@@ -8299,7 +8301,7 @@
 
 	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
 		DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
-			  __FUNCTION__);
+			  __func__);
 		return -1;
 	}
 
@@ -8311,7 +8313,7 @@
 		 * If vlan stripping is disabled and the frame is VLAN tagged,
 		 * shift the offset by the VLAN header size bytes.
 		 */
-		if ((!vlan_strip_flag) &&
+		if ((!sp->vlan_strip_flag) &&
 			(rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
 			ip_off += HEADER_VLAN_SIZE;
 	} else {
@@ -8330,7 +8332,7 @@
 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
 				  struct tcphdr *tcp)
 {
-	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
+	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
 	if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
 	   (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
 		return -1;
@@ -8345,7 +8347,7 @@
 static void initiate_new_session(struct lro *lro, u8 *l2h,
 	struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
 {
-	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
+	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
 	lro->l2h = l2h;
 	lro->iph = ip;
 	lro->tcph = tcp;
@@ -8375,7 +8377,7 @@
 	struct tcphdr *tcp = lro->tcph;
 	__sum16 nchk;
 	struct stat_block *statinfo = sp->mac_control.stats_info;
-	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
+	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
 
 	/* Update L3 header */
 	ip->tot_len = htons(lro->total_len);
@@ -8403,7 +8405,7 @@
 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
 		struct tcphdr *tcp, u32 l4_pyld)
 {
-	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
+	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
 	lro->total_len += l4_pyld;
 	lro->frags_len += l4_pyld;
 	lro->tcp_next_seq += l4_pyld;
@@ -8427,7 +8429,7 @@
 {
 	u8 *ptr;
 
-	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
+	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
 
 	if (!tcp_pyld_len) {
 		/* Runt frame or a pure ack */
@@ -8509,7 +8511,7 @@
 
 			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
 				DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
-					  "0x%x, actual 0x%x\n", __FUNCTION__,
+					  "0x%x, actual 0x%x\n", __func__,
 					  (*lro)->tcp_next_seq,
 					  ntohl(tcph->seq));
 
@@ -8549,7 +8551,7 @@
 
 	if (ret == 0) { /* sessions exceeded */
 		DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
-			  __FUNCTION__);
+			  __func__);
 		*lro = NULL;
 		return ret;
 	}
@@ -8571,7 +8573,7 @@
 			break;
 		default:
 			DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
-				__FUNCTION__);
+				__func__);
 			break;
 	}
 
@@ -8592,7 +8594,7 @@
 
 	skb->protocol = eth_type_trans(skb, dev);
 	if (sp->vlgrp && vlan_tag
-		&& (vlan_strip_flag)) {
+		&& (sp->vlan_strip_flag)) {
 		/* Queueing the vlan frame to the upper layer */
 		if (sp->config.napi)
 			vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 6722a2f..55cb943 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -962,6 +962,7 @@
 	int task_flag;
 	unsigned long long start_time;
 	struct vlan_group *vlgrp;
+	int vlan_strip_flag;
 #define MSIX_FLG                0xA5
 	int num_entries;
 	struct msix_entry *entries;
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index fe41e4e..ce10cfa 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2069,9 +2069,10 @@
 static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
 {
 	struct sbmac_softc *sc = netdev_priv(dev);
+	unsigned long flags;
 
 	/* lock eth irq */
-	spin_lock_irq (&sc->sbm_lock);
+	spin_lock_irqsave(&sc->sbm_lock, flags);
 
 	/*
 	 * Put the buffer on the transmit ring.  If we
@@ -2081,14 +2082,14 @@
 	if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
 		/* XXX save skb that we could not send */
 		netif_stop_queue(dev);
-		spin_unlock_irq(&sc->sbm_lock);
+		spin_unlock_irqrestore(&sc->sbm_lock, flags);
 
 		return 1;
 	}
 
 	dev->trans_start = jiffies;
 
-	spin_unlock_irq (&sc->sbm_lock);
+	spin_unlock_irqrestore(&sc->sbm_lock, flags);
 
 	return 0;
 }
@@ -2568,14 +2569,15 @@
 static void sbmac_tx_timeout (struct net_device *dev)
 {
 	struct sbmac_softc *sc = netdev_priv(dev);
+	unsigned long flags;
 
-	spin_lock_irq (&sc->sbm_lock);
+	spin_lock_irqsave(&sc->sbm_lock, flags);
 
 
 	dev->trans_start = jiffies;
 	dev->stats.tx_errors++;
 
-	spin_unlock_irq (&sc->sbm_lock);
+	spin_unlock_irqrestore(&sc->sbm_lock, flags);
 
 	printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
 }
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index 2c79d27..d95c218 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -52,9 +52,9 @@
  *
  * The maximum width mask that can be generated is 64 bits.
  */
-#define EFX_MASK64(field)					\
-	(EFX_WIDTH(field) == 64 ? ~((u64) 0) :		\
-	 (((((u64) 1) << EFX_WIDTH(field))) - 1))
+#define EFX_MASK64(width)			\
+	((width) == 64 ? ~((u64) 0) :		\
+	 (((((u64) 1) << (width))) - 1))
 
 /* Mask equal in width to the specified field.
  *
@@ -63,9 +63,9 @@
  * The maximum width mask that can be generated is 32 bits.  Use
  * EFX_MASK64 for higher width fields.
  */
-#define EFX_MASK32(field)					\
-	(EFX_WIDTH(field) == 32 ? ~((u32) 0) :		\
-	 (((((u32) 1) << EFX_WIDTH(field))) - 1))
+#define EFX_MASK32(width)			\
+	((width) == 32 ? ~((u32) 0) :		\
+	 (((((u32) 1) << (width))) - 1))
 
 /* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
 typedef union efx_dword {
@@ -138,44 +138,49 @@
 	EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
 
 #define EFX_EXTRACT_OWORD64(oword, low, high)				\
-	(EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) |		\
-	 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high))
+	((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) |		\
+	  EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) &		\
+	 EFX_MASK64(high + 1 - low))
 
 #define EFX_EXTRACT_QWORD64(qword, low, high)				\
-	EFX_EXTRACT64((qword).u64[0], 0, 63, low, high)
+	(EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) &		\
+	 EFX_MASK64(high + 1 - low))
 
 #define EFX_EXTRACT_OWORD32(oword, low, high)				\
-	(EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) |		\
-	 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) |		\
-	 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) |		\
-	 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high))
+	((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) |		\
+	  EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) |		\
+	  EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) |		\
+	  EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) &		\
+	 EFX_MASK32(high + 1 - low))
 
 #define EFX_EXTRACT_QWORD32(qword, low, high)				\
-	(EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) |		\
-	 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high))
+	((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) |		\
+	  EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) &		\
+	 EFX_MASK32(high + 1 - low))
 
-#define EFX_EXTRACT_DWORD(dword, low, high)				\
-	EFX_EXTRACT32((dword).u32[0], 0, 31, low, high)
+#define EFX_EXTRACT_DWORD(dword, low, high)			\
+	(EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) &	\
+	 EFX_MASK32(high + 1 - low))
 
-#define EFX_OWORD_FIELD64(oword, field)					\
-	(EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
-	 & EFX_MASK64(field))
+#define EFX_OWORD_FIELD64(oword, field)				\
+	EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field),		\
+			    EFX_HIGH_BIT(field))
 
-#define EFX_QWORD_FIELD64(qword, field)					\
-	(EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
-	 & EFX_MASK64(field))
+#define EFX_QWORD_FIELD64(qword, field)				\
+	EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field),		\
+			    EFX_HIGH_BIT(field))
 
-#define EFX_OWORD_FIELD32(oword, field)					\
-	(EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
-	 & EFX_MASK32(field))
+#define EFX_OWORD_FIELD32(oword, field)				\
+	EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field),		\
+			    EFX_HIGH_BIT(field))
 
-#define EFX_QWORD_FIELD32(qword, field)					\
-	(EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
-	 & EFX_MASK32(field))
+#define EFX_QWORD_FIELD32(qword, field)				\
+	EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field),		\
+			    EFX_HIGH_BIT(field))
 
-#define EFX_DWORD_FIELD(dword, field)					   \
-	(EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
-	 & EFX_MASK32(field))
+#define EFX_DWORD_FIELD(dword, field)				\
+	EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field),		\
+			  EFX_HIGH_BIT(field))
 
 #define EFX_OWORD_IS_ZERO64(oword)					\
 	(((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
@@ -411,69 +416,102 @@
  * for read-modify-write operations.
  *
  */
-
 #define EFX_INVERT_OWORD(oword) do {		\
 	(oword).u64[0] = ~((oword).u64[0]);	\
 	(oword).u64[1] = ~((oword).u64[1]);	\
 	} while (0)
 
-#define EFX_INSERT_FIELD64(...)					\
-	cpu_to_le64(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
+#define EFX_AND_OWORD(oword, from, mask)			\
+	do {							\
+		(oword).u64[0] = (from).u64[0] & (mask).u64[0];	\
+		(oword).u64[1] = (from).u64[1] & (mask).u64[1];	\
+	} while (0)
 
-#define EFX_INSERT_FIELD32(...)					\
-	cpu_to_le32(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
+#define EFX_OR_OWORD(oword, from, mask)				\
+	do {							\
+		(oword).u64[0] = (from).u64[0] | (mask).u64[0];	\
+		(oword).u64[1] = (from).u64[1] | (mask).u64[1];	\
+	} while (0)
 
-#define EFX_INPLACE_MASK64(min, max, field)			\
-	EFX_INSERT_FIELD64(min, max, field, EFX_MASK64(field))
+#define EFX_INSERT64(min, max, low, high, value)			\
+	cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value))
 
-#define EFX_INPLACE_MASK32(min, max, field)			\
-	EFX_INSERT_FIELD32(min, max, field, EFX_MASK32(field))
+#define EFX_INSERT32(min, max, low, high, value)			\
+	cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
 
-#define EFX_SET_OWORD_FIELD64(oword, field, value) do {			\
+#define EFX_INPLACE_MASK64(min, max, low, high)				\
+	EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low))
+
+#define EFX_INPLACE_MASK32(min, max, low, high)				\
+	EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
+
+#define EFX_SET_OWORD64(oword, low, high, value) do {			\
 	(oword).u64[0] = (((oword).u64[0] 				\
-			   & ~EFX_INPLACE_MASK64(0,  63, field))	\
-			  | EFX_INSERT_FIELD64(0,  63, field, value));  \
+			   & ~EFX_INPLACE_MASK64(0,  63, low, high))	\
+			  | EFX_INSERT64(0,  63, low, high, value));	\
 	(oword).u64[1] = (((oword).u64[1] 				\
-			   & ~EFX_INPLACE_MASK64(64, 127, field))	\
-			  | EFX_INSERT_FIELD64(64, 127, field, value)); \
+			   & ~EFX_INPLACE_MASK64(64, 127, low, high))	\
+			  | EFX_INSERT64(64, 127, low, high, value));	\
 	} while (0)
 
-#define EFX_SET_QWORD_FIELD64(qword, field, value) do {			\
+#define EFX_SET_QWORD64(qword, low, high, value) do {			\
 	(qword).u64[0] = (((qword).u64[0] 				\
-			   & ~EFX_INPLACE_MASK64(0, 63, field))		\
-			  | EFX_INSERT_FIELD64(0, 63, field, value));	\
+			   & ~EFX_INPLACE_MASK64(0, 63, low, high))	\
+			  | EFX_INSERT64(0, 63, low, high, value));	\
 	} while (0)
 
-#define EFX_SET_OWORD_FIELD32(oword, field, value) do {			\
+#define EFX_SET_OWORD32(oword, low, high, value) do {			\
 	(oword).u32[0] = (((oword).u32[0] 				\
-			   & ~EFX_INPLACE_MASK32(0, 31, field))		\
-			  | EFX_INSERT_FIELD32(0, 31, field, value));	\
+			   & ~EFX_INPLACE_MASK32(0, 31, low, high))	\
+			  | EFX_INSERT32(0, 31, low, high, value));	\
 	(oword).u32[1] = (((oword).u32[1] 				\
-			   & ~EFX_INPLACE_MASK32(32, 63, field))	\
-			  | EFX_INSERT_FIELD32(32, 63, field, value));	\
+			   & ~EFX_INPLACE_MASK32(32, 63, low, high))	\
+			  | EFX_INSERT32(32, 63, low, high, value));	\
 	(oword).u32[2] = (((oword).u32[2] 				\
-			   & ~EFX_INPLACE_MASK32(64, 95, field))	\
-			  | EFX_INSERT_FIELD32(64, 95, field, value));	\
+			   & ~EFX_INPLACE_MASK32(64, 95, low, high))	\
+			  | EFX_INSERT32(64, 95, low, high, value));	\
 	(oword).u32[3] = (((oword).u32[3] 				\
-			   & ~EFX_INPLACE_MASK32(96, 127, field))	\
-			  | EFX_INSERT_FIELD32(96, 127, field, value));	\
+			   & ~EFX_INPLACE_MASK32(96, 127, low, high))	\
+			  | EFX_INSERT32(96, 127, low, high, value));	\
 	} while (0)
 
-#define EFX_SET_QWORD_FIELD32(qword, field, value) do {			\
+#define EFX_SET_QWORD32(qword, low, high, value) do {			\
 	(qword).u32[0] = (((qword).u32[0] 				\
-			   & ~EFX_INPLACE_MASK32(0, 31, field))		\
-			  | EFX_INSERT_FIELD32(0, 31, field, value));	\
+			   & ~EFX_INPLACE_MASK32(0, 31, low, high))	\
+			  | EFX_INSERT32(0, 31, low, high, value));	\
 	(qword).u32[1] = (((qword).u32[1] 				\
-			   & ~EFX_INPLACE_MASK32(32, 63, field))	\
-			  | EFX_INSERT_FIELD32(32, 63, field, value));	\
+			   & ~EFX_INPLACE_MASK32(32, 63, low, high))	\
+			  | EFX_INSERT32(32, 63, low, high, value));	\
 	} while (0)
 
-#define EFX_SET_DWORD_FIELD(dword, field, value) do {			\
-	(dword).u32[0] = (((dword).u32[0] 				\
-			   & ~EFX_INPLACE_MASK32(0, 31, field))		\
-			  | EFX_INSERT_FIELD32(0, 31, field, value));	\
+#define EFX_SET_DWORD32(dword, low, high, value) do {			\
+	(dword).u32[0] = (((dword).u32[0]				\
+			   & ~EFX_INPLACE_MASK32(0, 31, low, high))	\
+			  | EFX_INSERT32(0, 31, low, high, value));	\
 	} while (0)
 
+#define EFX_SET_OWORD_FIELD64(oword, field, value)			\
+	EFX_SET_OWORD64(oword, EFX_LOW_BIT(field),			\
+			 EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_QWORD_FIELD64(qword, field, value)			\
+	EFX_SET_QWORD64(qword, EFX_LOW_BIT(field),			\
+			 EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_OWORD_FIELD32(oword, field, value)			\
+	EFX_SET_OWORD32(oword, EFX_LOW_BIT(field),			\
+			 EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_QWORD_FIELD32(qword, field, value)			\
+	EFX_SET_QWORD32(qword, EFX_LOW_BIT(field),			\
+			 EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_DWORD_FIELD(dword, field, value)			\
+	EFX_SET_DWORD32(dword, EFX_LOW_BIT(field),			\
+			 EFX_HIGH_BIT(field), value)
+
+
+
 #if BITS_PER_LONG == 64
 #define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
 #define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
@@ -502,4 +540,10 @@
 #define EFX_DMA_TYPE_WIDTH(width) \
 	(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
 
+
+/* Static initialiser */
+#define EFX_OWORD32(a, b, c, d)						\
+	{ .u32 = { __constant_cpu_to_le32(a), __constant_cpu_to_le32(b), \
+		   __constant_cpu_to_le32(c), __constant_cpu_to_le32(d) } }
+
 #endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index d3d3dd0..99e6023 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -31,23 +31,23 @@
 		mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
 }
 
-static void board_blink(struct efx_nic *efx, int blink)
+static void board_blink(struct efx_nic *efx, bool blink)
 {
 	struct efx_blinker *blinker = &efx->board_info.blinker;
 
 	/* The rtnl mutex serialises all ethtool ioctls, so
 	 * nothing special needs doing here. */
 	if (blink) {
-		blinker->resubmit = 1;
-		blinker->state = 0;
+		blinker->resubmit = true;
+		blinker->state = false;
 		setup_timer(&blinker->timer, blink_led_timer,
 			    (unsigned long)efx);
 		mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
 	} else {
-		blinker->resubmit = 0;
+		blinker->resubmit = false;
 		if (blinker->timer.function)
 			del_timer_sync(&blinker->timer);
-		efx->board_info.set_fault_led(efx, 0);
+		efx->board_info.set_fault_led(efx, false);
 	}
 }
 
@@ -78,7 +78,7 @@
 	return 0;
 }
 
-static void sfe4002_fault_led(struct efx_nic *efx, int state)
+static void sfe4002_fault_led(struct efx_nic *efx, bool state)
 {
 	xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
 			QUAKE_LED_OFF);
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
index e5e8443..c6e01b6 100644
--- a/drivers/net/sfc/boards.h
+++ b/drivers/net/sfc/boards.h
@@ -21,7 +21,5 @@
 
 extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
 extern int sfe4001_init(struct efx_nic *efx);
-/* Are we putting the PHY into flash config mode */
-extern unsigned int sfe4001_phy_flash_cfg;
 
 #endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 45c72ee..06ea71c 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -28,7 +28,6 @@
 #include "efx.h"
 #include "mdio_10g.h"
 #include "falcon.h"
-#include "workarounds.h"
 #include "mac.h"
 
 #define EFX_MAX_MTU (9 * 1024)
@@ -52,7 +51,7 @@
  * This sets the default for new devices.  It can be controlled later
  * using ethtool.
  */
-static int lro = 1;
+static int lro = true;
 module_param(lro, int, 0644);
 MODULE_PARM_DESC(lro, "Large receive offload acceleration");
 
@@ -65,7 +64,7 @@
  * This is forced to 0 for MSI interrupt mode as the interrupt vector
  * is not written
  */
-static unsigned int separate_tx_and_rx_channels = 1;
+static unsigned int separate_tx_and_rx_channels = true;
 
 /* This is the weight assigned to each of the (per-channel) virtual
  * NAPI devices.
@@ -81,7 +80,7 @@
 /* This controls whether or not the hardware monitor will trigger a
  * reset when it detects an error condition.
  */
-static unsigned int monitor_reset = 1;
+static unsigned int monitor_reset = true;
 
 /* This controls whether or not the driver will initialise devices
  * with invalid MAC addresses stored in the EEPROM or flash.  If true,
@@ -141,8 +140,7 @@
 
 #define EFX_ASSERT_RESET_SERIALISED(efx)		\
 	do {						\
-		if ((efx->state == STATE_RUNNING) ||	\
-		    (efx->state == STATE_RESETTING))	\
+		if (efx->state == STATE_RUNNING)	\
 			ASSERT_RTNL();			\
 	} while (0)
 
@@ -159,16 +157,18 @@
  * never be concurrently called more than once on the same channel,
  * though different channels may be being processed concurrently.
  */
-static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
+static int efx_process_channel(struct efx_channel *channel, int rx_quota)
 {
-	int rxdmaqs;
-	struct efx_rx_queue *rx_queue;
+	struct efx_nic *efx = channel->efx;
+	int rx_packets;
 
-	if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE ||
+	if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
 		     !channel->enabled))
-		return rx_quota;
+		return 0;
 
-	rxdmaqs = falcon_process_eventq(channel, &rx_quota);
+	rx_packets = falcon_process_eventq(channel, rx_quota);
+	if (rx_packets == 0)
+		return 0;
 
 	/* Deliver last RX packet. */
 	if (channel->rx_pkt) {
@@ -180,16 +180,9 @@
 	efx_flush_lro(channel);
 	efx_rx_strategy(channel);
 
-	/* Refill descriptor rings as necessary */
-	rx_queue = &channel->efx->rx_queue[0];
-	while (rxdmaqs) {
-		if (rxdmaqs & 0x01)
-			efx_fast_push_rx_descriptors(rx_queue);
-		rx_queue++;
-		rxdmaqs >>= 1;
-	}
+	efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
 
-	return rx_quota;
+	return rx_packets;
 }
 
 /* Mark channel as finished processing
@@ -203,7 +196,7 @@
 	/* The interrupt handler for this channel may set work_pending
 	 * as soon as we acknowledge the events we've seen.  Make sure
 	 * it's cleared before then. */
-	channel->work_pending = 0;
+	channel->work_pending = false;
 	smp_wmb();
 
 	falcon_eventq_read_ack(channel);
@@ -219,14 +212,12 @@
 	struct efx_channel *channel =
 		container_of(napi, struct efx_channel, napi_str);
 	struct net_device *napi_dev = channel->napi_dev;
-	int unused;
 	int rx_packets;
 
 	EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
 		  channel->channel, raw_smp_processor_id());
 
-	unused = efx_process_channel(channel, budget);
-	rx_packets = (budget - unused);
+	rx_packets = efx_process_channel(channel, budget);
 
 	if (rx_packets < budget) {
 		/* There is no race here; although napi_disable() will
@@ -260,7 +251,7 @@
 	falcon_disable_interrupts(efx);
 	if (efx->legacy_irq)
 		synchronize_irq(efx->legacy_irq);
-	if (channel->has_interrupt && channel->irq)
+	if (channel->irq)
 		synchronize_irq(channel->irq);
 
 	/* Wait for any NAPI processing to complete */
@@ -290,13 +281,13 @@
 }
 
 /* Prepare channel's event queue */
-static int efx_init_eventq(struct efx_channel *channel)
+static void efx_init_eventq(struct efx_channel *channel)
 {
 	EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
 
 	channel->eventq_read_ptr = 0;
 
-	return falcon_init_eventq(channel);
+	falcon_init_eventq(channel);
 }
 
 static void efx_fini_eventq(struct efx_channel *channel)
@@ -362,12 +353,11 @@
  * to propagate configuration changes (mtu, checksum offload), or
  * to clear hardware error conditions
  */
-static int efx_init_channels(struct efx_nic *efx)
+static void efx_init_channels(struct efx_nic *efx)
 {
 	struct efx_tx_queue *tx_queue;
 	struct efx_rx_queue *rx_queue;
 	struct efx_channel *channel;
-	int rc = 0;
 
 	/* Calculate the rx buffer allocation parameters required to
 	 * support the current MTU, including padding for header
@@ -382,36 +372,20 @@
 	efx_for_each_channel(channel, efx) {
 		EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
 
-		rc = efx_init_eventq(channel);
-		if (rc)
-			goto err;
+		efx_init_eventq(channel);
 
-		efx_for_each_channel_tx_queue(tx_queue, channel) {
-			rc = efx_init_tx_queue(tx_queue);
-			if (rc)
-				goto err;
-		}
+		efx_for_each_channel_tx_queue(tx_queue, channel)
+			efx_init_tx_queue(tx_queue);
 
 		/* The rx buffer allocation strategy is MTU dependent */
 		efx_rx_strategy(channel);
 
-		efx_for_each_channel_rx_queue(rx_queue, channel) {
-			rc = efx_init_rx_queue(rx_queue);
-			if (rc)
-				goto err;
-		}
+		efx_for_each_channel_rx_queue(rx_queue, channel)
+			efx_init_rx_queue(rx_queue);
 
 		WARN_ON(channel->rx_pkt != NULL);
 		efx_rx_strategy(channel);
 	}
-
-	return 0;
-
- err:
-	EFX_ERR(efx, "failed to initialise channel %d\n",
-		channel ? channel->channel : -1);
-	efx_fini_channels(efx);
-	return rc;
 }
 
 /* This enables event queue processing and packet transmission.
@@ -432,8 +406,8 @@
 	/* The interrupt handler for this channel may set work_pending
 	 * as soon as we enable it.  Make sure it's cleared before
 	 * then.  Similarly, make sure it sees the enabled flag set. */
-	channel->work_pending = 0;
-	channel->enabled = 1;
+	channel->work_pending = false;
+	channel->enabled = true;
 	smp_wmb();
 
 	napi_enable(&channel->napi_str);
@@ -456,7 +430,7 @@
 
 	EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
 
-	channel->enabled = 0;
+	channel->enabled = false;
 	napi_disable(&channel->napi_str);
 
 	/* Ensure that any worker threads have exited or will be no-ops */
@@ -471,10 +445,17 @@
 	struct efx_channel *channel;
 	struct efx_tx_queue *tx_queue;
 	struct efx_rx_queue *rx_queue;
+	int rc;
 
 	EFX_ASSERT_RESET_SERIALISED(efx);
 	BUG_ON(efx->port_enabled);
 
+	rc = falcon_flush_queues(efx);
+	if (rc)
+		EFX_ERR(efx, "failed to flush queues\n");
+	else
+		EFX_LOG(efx, "successfully flushed all queues\n");
+
 	efx_for_each_channel(channel, efx) {
 		EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
 
@@ -482,13 +463,6 @@
 			efx_fini_rx_queue(rx_queue);
 		efx_for_each_channel_tx_queue(tx_queue, channel)
 			efx_fini_tx_queue(tx_queue);
-	}
-
-	/* Do the event queues last so that we can handle flush events
-	 * for all DMA queues. */
-	efx_for_each_channel(channel, efx) {
-		EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
-
 		efx_fini_eventq(channel);
 	}
 }
@@ -526,8 +500,6 @@
  */
 static void efx_link_status_changed(struct efx_nic *efx)
 {
-	int carrier_ok;
-
 	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
 	 * that no events are triggered between unregister_netdev() and the
 	 * driver unloading. A more general condition is that NETDEV_CHANGE
@@ -535,8 +507,12 @@
 	if (!netif_running(efx->net_dev))
 		return;
 
-	carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
-	if (efx->link_up != carrier_ok) {
+	if (efx->port_inhibited) {
+		netif_carrier_off(efx->net_dev);
+		return;
+	}
+
+	if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
 		efx->n_link_state_changes++;
 
 		if (efx->link_up)
@@ -577,13 +553,19 @@
 
 /* This call reinitialises the MAC to pick up new PHY settings. The
  * caller must hold the mac_lock */
-static void __efx_reconfigure_port(struct efx_nic *efx)
+void __efx_reconfigure_port(struct efx_nic *efx)
 {
 	WARN_ON(!mutex_is_locked(&efx->mac_lock));
 
 	EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
 		raw_smp_processor_id());
 
+	/* Serialise the promiscuous flag with efx_set_multicast_list. */
+	if (efx_dev_registered(efx)) {
+		netif_addr_lock_bh(efx->net_dev);
+		netif_addr_unlock_bh(efx->net_dev);
+	}
+
 	falcon_reconfigure_xmac(efx);
 
 	/* Inform kernel of loss/gain of carrier */
@@ -661,7 +643,8 @@
 	if (rc)
 		return rc;
 
-	efx->port_initialized = 1;
+	efx->port_initialized = true;
+	efx->stats_enabled = true;
 
 	/* Reconfigure port to program MAC registers */
 	falcon_reconfigure_xmac(efx);
@@ -678,7 +661,7 @@
 	BUG_ON(efx->port_enabled);
 
 	mutex_lock(&efx->mac_lock);
-	efx->port_enabled = 1;
+	efx->port_enabled = true;
 	__efx_reconfigure_port(efx);
 	mutex_unlock(&efx->mac_lock);
 }
@@ -692,7 +675,7 @@
 	EFX_LOG(efx, "stop port\n");
 
 	mutex_lock(&efx->mac_lock);
-	efx->port_enabled = 0;
+	efx->port_enabled = false;
 	mutex_unlock(&efx->mac_lock);
 
 	/* Serialise against efx_set_multicast_list() */
@@ -710,9 +693,9 @@
 		return;
 
 	falcon_fini_xmac(efx);
-	efx->port_initialized = 0;
+	efx->port_initialized = false;
 
-	efx->link_up = 0;
+	efx->link_up = false;
 	efx_link_status_changed(efx);
 }
 
@@ -797,7 +780,7 @@
 	return 0;
 
  fail4:
-	release_mem_region(efx->membase_phys, efx->type->mem_map_size);
+	pci_release_region(efx->pci_dev, efx->type->mem_bar);
  fail3:
 	efx->membase_phys = 0;
  fail2:
@@ -823,53 +806,61 @@
 	pci_disable_device(efx->pci_dev);
 }
 
-/* Probe the number and type of interrupts we are able to obtain. */
+/* Get number of RX queues wanted.  Return number of online CPU
+ * packages in the expectation that an IRQ balancer will spread
+ * interrupts across them. */
+static int efx_wanted_rx_queues(void)
+{
+	cpumask_t core_mask;
+	int count;
+	int cpu;
+
+	cpus_clear(core_mask);
+	count = 0;
+	for_each_online_cpu(cpu) {
+		if (!cpu_isset(cpu, core_mask)) {
+			++count;
+			cpus_or(core_mask, core_mask,
+				topology_core_siblings(cpu));
+		}
+	}
+
+	return count;
+}
+
+/* Probe the number and type of interrupts we are able to obtain, and
+ * the resulting numbers of channels and RX queues.
+ */
 static void efx_probe_interrupts(struct efx_nic *efx)
 {
-	int max_channel = efx->type->phys_addr_channels - 1;
-	struct msix_entry xentries[EFX_MAX_CHANNELS];
+	int max_channels =
+		min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
 	int rc, i;
 
 	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
-		BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
+		struct msix_entry xentries[EFX_MAX_CHANNELS];
+		int wanted_ints;
 
-		if (rss_cpus == 0) {
-			cpumask_t core_mask;
-			int cpu;
+		/* We want one RX queue and interrupt per CPU package
+		 * (or as specified by the rss_cpus module parameter).
+		 * We will need one channel per interrupt.
+		 */
+		wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
+		efx->n_rx_queues = min(wanted_ints, max_channels);
 
-			cpus_clear(core_mask);
-			efx->rss_queues = 0;
-			for_each_online_cpu(cpu) {
-				if (!cpu_isset(cpu, core_mask)) {
-					++efx->rss_queues;
-					cpus_or(core_mask, core_mask,
-						topology_core_siblings(cpu));
-				}
-			}
-		} else {
-			efx->rss_queues = rss_cpus;
-		}
-
-		efx->rss_queues = min(efx->rss_queues, max_channel + 1);
-		efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
-
-		/* Request maximum number of MSI interrupts, and fill out
-		 * the channel interrupt information the allowed allocation */
-		for (i = 0; i < efx->rss_queues; i++)
+		for (i = 0; i < efx->n_rx_queues; i++)
 			xentries[i].entry = i;
-		rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
+		rc = pci_enable_msix(efx->pci_dev, xentries, efx->n_rx_queues);
 		if (rc > 0) {
-			EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
-			efx->rss_queues = rc;
+			EFX_BUG_ON_PARANOID(rc >= efx->n_rx_queues);
+			efx->n_rx_queues = rc;
 			rc = pci_enable_msix(efx->pci_dev, xentries,
-					     efx->rss_queues);
+					     efx->n_rx_queues);
 		}
 
 		if (rc == 0) {
-			for (i = 0; i < efx->rss_queues; i++) {
-				efx->channel[i].has_interrupt = 1;
+			for (i = 0; i < efx->n_rx_queues; i++)
 				efx->channel[i].irq = xentries[i].vector;
-			}
 		} else {
 			/* Fall back to single channel MSI */
 			efx->interrupt_mode = EFX_INT_MODE_MSI;
@@ -879,11 +870,10 @@
 
 	/* Try single interrupt MSI */
 	if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
-		efx->rss_queues = 1;
+		efx->n_rx_queues = 1;
 		rc = pci_enable_msi(efx->pci_dev);
 		if (rc == 0) {
 			efx->channel[0].irq = efx->pci_dev->irq;
-			efx->channel[0].has_interrupt = 1;
 		} else {
 			EFX_ERR(efx, "could not enable MSI\n");
 			efx->interrupt_mode = EFX_INT_MODE_LEGACY;
@@ -892,10 +882,7 @@
 
 	/* Assume legacy interrupts */
 	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
-		efx->rss_queues = 1;
-		/* Every channel is interruptible */
-		for (i = 0; i < EFX_MAX_CHANNELS; i++)
-			efx->channel[i].has_interrupt = 1;
+		efx->n_rx_queues = 1;
 		efx->legacy_irq = efx->pci_dev->irq;
 	}
 }
@@ -905,7 +892,7 @@
 	struct efx_channel *channel;
 
 	/* Remove MSI/MSI-X interrupts */
-	efx_for_each_channel_with_interrupt(channel, efx)
+	efx_for_each_channel(channel, efx)
 		channel->irq = 0;
 	pci_disable_msi(efx->pci_dev);
 	pci_disable_msix(efx->pci_dev);
@@ -914,45 +901,22 @@
 	efx->legacy_irq = 0;
 }
 
-/* Select number of used resources
- * Should be called after probe_interrupts()
- */
-static void efx_select_used(struct efx_nic *efx)
+static void efx_set_channels(struct efx_nic *efx)
 {
 	struct efx_tx_queue *tx_queue;
 	struct efx_rx_queue *rx_queue;
-	int i;
 
-	/* TX queues.  One per port per channel with TX capability
-	 * (more than one per port won't work on Linux, due to out
-	 *  of order issues... but will be fine on Solaris)
-	 */
-	tx_queue = &efx->tx_queue[0];
+	efx_for_each_tx_queue(tx_queue, efx) {
+		if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels)
+			tx_queue->channel = &efx->channel[1];
+		else
+			tx_queue->channel = &efx->channel[0];
+		tx_queue->channel->used_flags |= EFX_USED_BY_TX;
+	}
 
-	/* Perform this for each channel with TX capabilities.
-	 * At the moment, we only support a single TX queue
-	 */
-	tx_queue->used = 1;
-	if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
-		tx_queue->channel = &efx->channel[1];
-	else
-		tx_queue->channel = &efx->channel[0];
-	tx_queue->channel->used_flags |= EFX_USED_BY_TX;
-	tx_queue++;
-
-	/* RX queues.  Each has a dedicated channel. */
-	for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
-		rx_queue = &efx->rx_queue[i];
-
-		if (i < efx->rss_queues) {
-			rx_queue->used = 1;
-			/* If we allow multiple RX queues per channel
-			 * we need to decide that here
-			 */
-			rx_queue->channel = &efx->channel[rx_queue->queue];
-			rx_queue->channel->used_flags |= EFX_USED_BY_RX;
-			rx_queue++;
-		}
+	efx_for_each_rx_queue(rx_queue, efx) {
+		rx_queue->channel = &efx->channel[rx_queue->queue];
+		rx_queue->channel->used_flags |= EFX_USED_BY_RX;
 	}
 }
 
@@ -971,8 +935,7 @@
 	 * in MSI-X interrupts. */
 	efx_probe_interrupts(efx);
 
-	/* Determine number of RX queues and TX queues */
-	efx_select_used(efx);
+	efx_set_channels(efx);
 
 	/* Initialise the interrupt moderation settings */
 	efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
@@ -1058,7 +1021,8 @@
 	/* Mark the port as enabled so port reconfigurations can start, then
 	 * restart the transmit interface early so the watchdog timer stops */
 	efx_start_port(efx);
-	efx_wake_queue(efx);
+	if (efx_dev_registered(efx))
+		efx_wake_queue(efx);
 
 	efx_for_each_channel(channel, efx)
 		efx_start_channel(channel);
@@ -1109,7 +1073,7 @@
 	falcon_disable_interrupts(efx);
 	if (efx->legacy_irq)
 		synchronize_irq(efx->legacy_irq);
-	efx_for_each_channel_with_interrupt(channel, efx) {
+	efx_for_each_channel(channel, efx) {
 		if (channel->irq)
 			synchronize_irq(channel->irq);
 	}
@@ -1128,13 +1092,12 @@
 
 	/* Isolate the MAC from the TX and RX engines, so that queue
 	 * flushes will complete in a timely fashion. */
-	falcon_deconfigure_mac_wrapper(efx);
 	falcon_drain_tx_fifo(efx);
 
 	/* Stop the kernel transmit interface late, so the watchdog
 	 * timer isn't ticking over the flush */
-	efx_stop_queue(efx);
 	if (efx_dev_registered(efx)) {
+		efx_stop_queue(efx);
 		netif_tx_lock_bh(efx->net_dev);
 		netif_tx_unlock_bh(efx->net_dev);
 	}
@@ -1151,24 +1114,16 @@
 }
 
 /* A convinience function to safely flush all the queues */
-int efx_flush_queues(struct efx_nic *efx)
+void efx_flush_queues(struct efx_nic *efx)
 {
-	int rc;
-
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
 	efx_stop_all(efx);
 
 	efx_fini_channels(efx);
-	rc = efx_init_channels(efx);
-	if (rc) {
-		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
-		return rc;
-	}
+	efx_init_channels(efx);
 
 	efx_start_all(efx);
-
-	return 0;
 }
 
 /**************************************************************************
@@ -1249,7 +1204,7 @@
  */
 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
@@ -1303,10 +1258,10 @@
  */
 static void efx_netpoll(struct net_device *net_dev)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_channel *channel;
 
-	efx_for_each_channel_with_interrupt(channel, efx)
+	efx_for_each_channel(channel, efx)
 		efx_schedule_channel(channel);
 }
 
@@ -1321,12 +1276,15 @@
 /* Context: process, rtnl_lock() held. */
 static int efx_net_open(struct net_device *net_dev)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
 	EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
 		raw_smp_processor_id());
 
+	if (efx->phy_mode & PHY_MODE_SPECIAL)
+		return -EBUSY;
+
 	efx_start_all(efx);
 	return 0;
 }
@@ -1337,8 +1295,7 @@
  */
 static int efx_net_stop(struct net_device *net_dev)
 {
-	struct efx_nic *efx = net_dev->priv;
-	int rc;
+	struct efx_nic *efx = netdev_priv(net_dev);
 
 	EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
 		raw_smp_processor_id());
@@ -1346,9 +1303,7 @@
 	/* Stop the device and flush all the channels */
 	efx_stop_all(efx);
 	efx_fini_channels(efx);
-	rc = efx_init_channels(efx);
-	if (rc)
-		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+	efx_init_channels(efx);
 
 	return 0;
 }
@@ -1356,7 +1311,7 @@
 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
 static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_mac_stats *mac_stats = &efx->mac_stats;
 	struct net_device_stats *stats = &net_dev->stats;
 
@@ -1366,7 +1321,7 @@
 	 */
 	if (!spin_trylock(&efx->stats_lock))
 		return stats;
-	if (efx->state == STATE_RUNNING) {
+	if (efx->stats_enabled) {
 		falcon_update_stats_xmac(efx);
 		falcon_update_nic_stats(efx);
 	}
@@ -1403,7 +1358,7 @@
 /* Context: netif_tx_lock held, BHs disabled. */
 static void efx_watchdog(struct net_device *net_dev)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 
 	EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
 		atomic_read(&efx->netif_stop_count), efx->port_enabled,
@@ -1417,7 +1372,7 @@
 /* Context: process, rtnl_lock() held. */
 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	int rc = 0;
 
 	EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1431,21 +1386,15 @@
 
 	efx_fini_channels(efx);
 	net_dev->mtu = new_mtu;
-	rc = efx_init_channels(efx);
-	if (rc)
-		goto fail;
+	efx_init_channels(efx);
 
 	efx_start_all(efx);
 	return rc;
-
- fail:
-	efx_schedule_reset(efx, RESET_TYPE_DISABLE);
-	return rc;
 }
 
 static int efx_set_mac_address(struct net_device *net_dev, void *data)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	struct sockaddr *addr = data;
 	char *new_addr = addr->sa_data;
 
@@ -1466,26 +1415,19 @@
 	return 0;
 }
 
-/* Context: netif_tx_lock held, BHs disabled. */
+/* Context: netif_addr_lock held, BHs disabled. */
 static void efx_set_multicast_list(struct net_device *net_dev)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	struct dev_mc_list *mc_list = net_dev->mc_list;
 	union efx_multicast_hash *mc_hash = &efx->multicast_hash;
-	int promiscuous;
+	bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
+	bool changed = (efx->promiscuous != promiscuous);
 	u32 crc;
 	int bit;
 	int i;
 
-	/* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
-	promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
-	if (efx->promiscuous != promiscuous) {
-		efx->promiscuous = promiscuous;
-		/* Close the window between efx_stop_port() and efx_flush_all()
-		 * by only queuing work when the port is enabled. */
-		if (efx->port_enabled)
-			queue_work(efx->workqueue, &efx->reconfigure_work);
-	}
+	efx->promiscuous = promiscuous;
 
 	/* Build multicast hash table */
 	if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
@@ -1500,6 +1442,13 @@
 		}
 	}
 
+	if (!efx->port_enabled)
+		/* Delay pushing settings until efx_start_port() */
+		return;
+
+	if (changed)
+		queue_work(efx->workqueue, &efx->reconfigure_work);
+
 	/* Create and activate new global multicast hash table */
 	falcon_set_multicast_hash(efx);
 }
@@ -1510,7 +1459,7 @@
 	struct net_device *net_dev = ptr;
 
 	if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
-		struct efx_nic *efx = net_dev->priv;
+		struct efx_nic *efx = netdev_priv(net_dev);
 
 		strcpy(efx->name, net_dev->name);
 	}
@@ -1568,7 +1517,7 @@
 	if (!efx->net_dev)
 		return;
 
-	BUG_ON(efx->net_dev->priv != efx);
+	BUG_ON(netdev_priv(efx->net_dev) != efx);
 
 	/* Free up any skbs still remaining. This has to happen before
 	 * we try to unregister the netdev as running their destructors
@@ -1588,49 +1537,60 @@
  *
  **************************************************************************/
 
-/* The final hardware and software finalisation before reset. */
-static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+/* Tears down the entire software state and most of the hardware state
+ * before reset.  */
+void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 {
 	int rc;
 
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
+	/* The net_dev->get_stats handler is quite slow, and will fail
+	 * if a fetch is pending over reset. Serialise against it. */
+	spin_lock(&efx->stats_lock);
+	efx->stats_enabled = false;
+	spin_unlock(&efx->stats_lock);
+
+	efx_stop_all(efx);
+	mutex_lock(&efx->mac_lock);
+
 	rc = falcon_xmac_get_settings(efx, ecmd);
-	if (rc) {
+	if (rc)
 		EFX_ERR(efx, "could not back up PHY settings\n");
-		goto fail;
-	}
 
 	efx_fini_channels(efx);
-	return 0;
-
- fail:
-	return rc;
 }
 
-/* The first part of software initialisation after a hardware reset
- * This function does not handle serialisation with the kernel, it
- * assumes the caller has done this */
-static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+/* This function will always ensure that the locks acquired in
+ * efx_reset_down() are released. A failure return code indicates
+ * that we were unable to reinitialise the hardware, and the
+ * driver should be disabled. If ok is false, then the rx and tx
+ * engines are not restarted, pending a RESET_DISABLE. */
+int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
 {
 	int rc;
 
-	rc = efx_init_channels(efx);
-	if (rc)
-		goto fail1;
+	EFX_ASSERT_RESET_SERIALISED(efx);
 
-	/* Restore MAC and PHY settings. */
-	rc = falcon_xmac_set_settings(efx, ecmd);
+	rc = falcon_init_nic(efx);
 	if (rc) {
-		EFX_ERR(efx, "could not restore PHY settings\n");
-		goto fail2;
+		EFX_ERR(efx, "failed to initialise NIC\n");
+		ok = false;
 	}
 
-	return 0;
+	if (ok) {
+		efx_init_channels(efx);
 
- fail2:
-	efx_fini_channels(efx);
- fail1:
+		if (falcon_xmac_set_settings(efx, ecmd))
+			EFX_ERR(efx, "could not restore PHY settings\n");
+	}
+
+	mutex_unlock(&efx->mac_lock);
+
+	if (ok) {
+		efx_start_all(efx);
+		efx->stats_enabled = true;
+	}
 	return rc;
 }
 
@@ -1659,25 +1619,14 @@
 		goto unlock_rtnl;
 	}
 
-	efx->state = STATE_RESETTING;
 	EFX_INFO(efx, "resetting (%d)\n", method);
 
-	/* The net_dev->get_stats handler is quite slow, and will fail
-	 * if a fetch is pending over reset. Serialise against it. */
-	spin_lock(&efx->stats_lock);
-	spin_unlock(&efx->stats_lock);
-
-	efx_stop_all(efx);
-	mutex_lock(&efx->mac_lock);
-
-	rc = efx_reset_down(efx, &ecmd);
-	if (rc)
-		goto fail1;
+	efx_reset_down(efx, &ecmd);
 
 	rc = falcon_reset_hw(efx, method);
 	if (rc) {
 		EFX_ERR(efx, "failed to reset hardware\n");
-		goto fail2;
+		goto fail;
 	}
 
 	/* Allow resets to be rescheduled. */
@@ -1689,46 +1638,27 @@
 	 * can respond to requests. */
 	pci_set_master(efx->pci_dev);
 
-	/* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
-	 * case so the driver can talk to external SRAM */
-	rc = falcon_init_nic(efx);
-	if (rc) {
-		EFX_ERR(efx, "failed to initialise NIC\n");
-		goto fail3;
-	}
-
 	/* Leave device stopped if necessary */
 	if (method == RESET_TYPE_DISABLE) {
-		/* Reinitialise the device anyway so the driver unload sequence
-		 * can talk to the external SRAM */
-		falcon_init_nic(efx);
 		rc = -EIO;
-		goto fail4;
+		goto fail;
 	}
 
-	rc = efx_reset_up(efx, &ecmd);
+	rc = efx_reset_up(efx, &ecmd, true);
 	if (rc)
-		goto fail5;
+		goto disable;
 
-	mutex_unlock(&efx->mac_lock);
 	EFX_LOG(efx, "reset complete\n");
-
-	efx->state = STATE_RUNNING;
-	efx_start_all(efx);
-
  unlock_rtnl:
 	rtnl_unlock();
 	return 0;
 
- fail5:
- fail4:
- fail3:
- fail2:
- fail1:
+ fail:
+	efx_reset_up(efx, &ecmd, false);
+ disable:
 	EFX_ERR(efx, "has been disabled\n");
 	efx->state = STATE_DISABLED;
 
-	mutex_unlock(&efx->mac_lock);
 	rtnl_unlock();
 	efx_unregister_netdev(efx);
 	efx_fini_port(efx);
@@ -1801,7 +1731,7 @@
  *
  * Dummy PHY/MAC/Board operations
  *
- * Can be used where the MAC does not implement this operation
+ * Can be used for some unimplemented operations
  * Needed so all function pointers are valid and do not have to be tested
  * before use
  *
@@ -1811,7 +1741,7 @@
 	return 0;
 }
 void efx_port_dummy_op_void(struct efx_nic *efx) {}
-void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {}
+void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
 
 static struct efx_phy_operations efx_dummy_phy_operations = {
 	.init		 = efx_port_dummy_op_int,
@@ -1819,20 +1749,14 @@
 	.check_hw        = efx_port_dummy_op_int,
 	.fini		 = efx_port_dummy_op_void,
 	.clear_interrupt = efx_port_dummy_op_void,
-	.reset_xaui      = efx_port_dummy_op_void,
 };
 
-/* Dummy board operations */
-static int efx_nic_dummy_op_int(struct efx_nic *nic)
-{
-	return 0;
-}
-
 static struct efx_board efx_dummy_board_info = {
-	.init    = efx_nic_dummy_op_int,
-	.init_leds = efx_port_dummy_op_int,
-	.set_fault_led = efx_port_dummy_op_blink,
-	.fini	= efx_port_dummy_op_void,
+	.init		= efx_port_dummy_op_int,
+	.init_leds	= efx_port_dummy_op_int,
+	.set_fault_led	= efx_port_dummy_op_blink,
+	.blink		= efx_port_dummy_op_blink,
+	.fini		= efx_port_dummy_op_void,
 };
 
 /**************************************************************************
@@ -1865,7 +1789,7 @@
 	efx->board_info = efx_dummy_board_info;
 
 	efx->net_dev = net_dev;
-	efx->rx_checksum_enabled = 1;
+	efx->rx_checksum_enabled = true;
 	spin_lock_init(&efx->netif_stop_lock);
 	spin_lock_init(&efx->stats_lock);
 	mutex_init(&efx->mac_lock);
@@ -1878,10 +1802,9 @@
 		channel = &efx->channel[i];
 		channel->efx = efx;
 		channel->channel = i;
-		channel->evqnum = i;
-		channel->work_pending = 0;
+		channel->work_pending = false;
 	}
-	for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
+	for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
 		tx_queue = &efx->tx_queue[i];
 		tx_queue->efx = efx;
 		tx_queue->queue = i;
@@ -2056,19 +1979,16 @@
 		goto fail5;
 	}
 
-	rc = efx_init_channels(efx);
-	if (rc)
-		goto fail6;
+	efx_init_channels(efx);
 
 	rc = falcon_init_interrupt(efx);
 	if (rc)
-		goto fail7;
+		goto fail6;
 
 	return 0;
 
- fail7:
-	efx_fini_channels(efx);
  fail6:
+	efx_fini_channels(efx);
 	efx_fini_port(efx);
  fail5:
  fail4:
@@ -2105,7 +2025,10 @@
 			      NETIF_F_HIGHDMA | NETIF_F_TSO);
 	if (lro)
 		net_dev->features |= NETIF_F_LRO;
-	efx = net_dev->priv;
+	/* Mask for features that also apply to VLAN devices */
+	net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
+				   NETIF_F_HIGHDMA | NETIF_F_TSO);
+	efx = netdev_priv(net_dev);
 	pci_set_drvdata(pci_dev, efx);
 	rc = efx_init_struct(efx, type, pci_dev, net_dev);
 	if (rc)
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 3b2f69f..d02937b 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -28,15 +28,21 @@
 /* RX */
 extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
 extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
-			  unsigned int len, int checksummed, int discard);
+			  unsigned int len, bool checksummed, bool discard);
 extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
 
 /* Channels */
 extern void efx_process_channel_now(struct efx_channel *channel);
-extern int efx_flush_queues(struct efx_nic *efx);
+extern void efx_flush_queues(struct efx_nic *efx);
 
 /* Ports */
 extern void efx_reconfigure_port(struct efx_nic *efx);
+extern void __efx_reconfigure_port(struct efx_nic *efx);
+
+/* Reset handling */
+extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd,
+			bool ok);
 
 /* Global */
 extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
@@ -50,7 +56,7 @@
 /* Dummy PHY ops for PHY drivers */
 extern int efx_port_dummy_op_int(struct efx_nic *efx);
 extern void efx_port_dummy_op_void(struct efx_nic *efx);
-extern void efx_port_dummy_op_blink(struct efx_nic *efx, int blink);
+extern void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink);
 
 
 extern unsigned int efx_monitor_interval;
@@ -59,7 +65,7 @@
 {
 	EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
 		  channel->channel, raw_smp_processor_id());
-	channel->work_pending = 1;
+	channel->work_pending = true;
 
 	netif_rx_schedule(channel->napi_dev, &channel->napi_str);
 }
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index c53290d..cec15db 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -52,12 +52,11 @@
 #define LOOPBACK_MASK(_efx)			\
 	(1 << (_efx)->loopback_mode)
 
-#define LOOPBACK_INTERNAL(_efx)						\
-	((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0)
+#define LOOPBACK_INTERNAL(_efx)				\
+	(!!(LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)))
 
-#define LOOPBACK_OUT_OF(_from, _to, _mask)		\
-	(((LOOPBACK_MASK(_from) & (_mask)) &&		\
-	  ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0)
+#define LOOPBACK_OUT_OF(_from, _to, _mask)				\
+	((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
 
 /*****************************************************************************/
 
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index e2c75d1..fa98af5 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -17,6 +17,7 @@
 #include "ethtool.h"
 #include "falcon.h"
 #include "gmii.h"
+#include "spi.h"
 #include "mac.h"
 
 const char *efx_loopback_mode_names[] = {
@@ -32,8 +33,6 @@
 	[LOOPBACK_NETWORK]	= "NETWORK",
 };
 
-static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
-
 struct ethtool_string {
 	char name[ETH_GSTRING_LEN];
 };
@@ -173,6 +172,11 @@
 /* Number of ethtool statistics */
 #define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
 
+/* EEPROM range with gPXE configuration */
+#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
+#define EFX_ETHTOOL_EEPROM_MIN 0x100U
+#define EFX_ETHTOOL_EEPROM_MAX 0x400U
+
 /**************************************************************************
  *
  * Ethtool operations
@@ -183,7 +187,7 @@
 /* Identify device by flashing LEDs */
 static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 
 	efx->board_info.blink(efx, 1);
 	schedule_timeout_interruptible(seconds * HZ);
@@ -195,7 +199,7 @@
 int efx_ethtool_get_settings(struct net_device *net_dev,
 			     struct ethtool_cmd *ecmd)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	int rc;
 
 	mutex_lock(&efx->mac_lock);
@@ -209,7 +213,7 @@
 int efx_ethtool_set_settings(struct net_device *net_dev,
 			     struct ethtool_cmd *ecmd)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	int rc;
 
 	mutex_lock(&efx->mac_lock);
@@ -224,7 +228,7 @@
 static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
 				    struct ethtool_drvinfo *info)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 
 	strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
 	strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
@@ -329,7 +333,10 @@
 	unsigned int n = 0;
 	enum efx_loopback_mode mode;
 
-	/* Interrupt */
+	efx_fill_test(n++, strings, data, &tests->mii,
+		      "core", 0, "mii", NULL);
+	efx_fill_test(n++, strings, data, &tests->nvram,
+		      "core", 0, "nvram", NULL);
 	efx_fill_test(n++, strings, data, &tests->interrupt,
 		      "core", 0, "interrupt", NULL);
 
@@ -349,16 +356,17 @@
 			      "eventq.poll", NULL);
 	}
 
-	/* PHY presence */
-	efx_fill_test(n++, strings, data, &tests->phy_ok,
-		      EFX_PORT_NAME, "phy_ok", NULL);
+	efx_fill_test(n++, strings, data, &tests->registers,
+		      "core", 0, "registers", NULL);
+	efx_fill_test(n++, strings, data, &tests->phy,
+		      EFX_PORT_NAME, "phy", NULL);
 
 	/* Loopback tests */
 	efx_fill_test(n++, strings, data, &tests->loopback_speed,
 		      EFX_PORT_NAME, "loopback.speed", NULL);
 	efx_fill_test(n++, strings, data, &tests->loopback_full_duplex,
 		      EFX_PORT_NAME, "loopback.full_duplex", NULL);
-	for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
+	for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
 		if (!(efx->loopback_modes & (1 << mode)))
 			continue;
 		n = efx_fill_loopback_test(efx,
@@ -369,22 +377,24 @@
 	return n;
 }
 
-static int efx_ethtool_get_stats_count(struct net_device *net_dev)
+static int efx_ethtool_get_sset_count(struct net_device *net_dev,
+				      int string_set)
 {
-	return EFX_ETHTOOL_NUM_STATS;
-}
-
-static int efx_ethtool_self_test_count(struct net_device *net_dev)
-{
-	struct efx_nic *efx = net_dev->priv;
-
-	return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+	switch (string_set) {
+	case ETH_SS_STATS:
+		return EFX_ETHTOOL_NUM_STATS;
+	case ETH_SS_TEST:
+		return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
+						   NULL, NULL, NULL);
+	default:
+		return -EINVAL;
+	}
 }
 
 static void efx_ethtool_get_strings(struct net_device *net_dev,
 				    u32 string_set, u8 *strings)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	struct ethtool_string *ethtool_strings =
 		(struct ethtool_string *)strings;
 	int i;
@@ -410,7 +420,7 @@
 				  struct ethtool_stats *stats,
 				  u64 *data)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_mac_stats *mac_stats = &efx->mac_stats;
 	struct efx_ethtool_stat *stat;
 	struct efx_channel *channel;
@@ -442,60 +452,21 @@
 	}
 }
 
-static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
-{
-	int rc;
-
-	/* Our TSO requires TX checksumming, so force TX checksumming
-	 * on when TSO is enabled.
-	 */
-	if (enable) {
-		rc = efx_ethtool_set_tx_csum(net_dev, 1);
-		if (rc)
-			return rc;
-	}
-
-	return ethtool_op_set_tso(net_dev, enable);
-}
-
-static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
-{
-	struct efx_nic *efx = net_dev->priv;
-	int rc;
-
-	rc = ethtool_op_set_tx_csum(net_dev, enable);
-	if (rc)
-		return rc;
-
-	efx_flush_queues(efx);
-
-	/* Our TSO requires TX checksumming, so disable TSO when
-	 * checksumming is disabled
-	 */
-	if (!enable) {
-		rc = efx_ethtool_set_tso(net_dev, 0);
-		if (rc)
-			return rc;
-	}
-
-	return 0;
-}
-
 static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 
 	/* No way to stop the hardware doing the checks; we just
 	 * ignore the result.
 	 */
-	efx->rx_checksum_enabled = (enable ? 1 : 0);
+	efx->rx_checksum_enabled = !!enable;
 
 	return 0;
 }
 
 static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 
 	return efx->rx_checksum_enabled;
 }
@@ -503,7 +474,7 @@
 static void efx_ethtool_self_test(struct net_device *net_dev,
 				  struct ethtool_test *test, u64 *data)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_self_tests efx_tests;
 	int offline, already_up;
 	int rc;
@@ -533,15 +504,9 @@
 		goto out;
 
 	/* Perform offline tests only if online tests passed */
-	if (offline) {
-		/* Stop the kernel from sending packets during the test. */
-		efx_stop_queue(efx);
-		rc = efx_flush_queues(efx);
-		if (!rc)
-			rc = efx_offline_test(efx, &efx_tests,
-					      efx->loopback_modes);
-		efx_wake_queue(efx);
-	}
+	if (offline)
+		rc = efx_offline_test(efx, &efx_tests,
+				      efx->loopback_modes);
 
  out:
 	if (!already_up)
@@ -561,22 +526,65 @@
 /* Restart autonegotiation */
 static int efx_ethtool_nway_reset(struct net_device *net_dev)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 
 	return mii_nway_restart(&efx->mii);
 }
 
 static u32 efx_ethtool_get_link(struct net_device *net_dev)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 
 	return efx->link_up;
 }
 
+static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_spi_device *spi = efx->spi_eeprom;
+
+	if (!spi)
+		return 0;
+	return min(spi->size, EFX_ETHTOOL_EEPROM_MAX) -
+		min(spi->size, EFX_ETHTOOL_EEPROM_MIN);
+}
+
+static int efx_ethtool_get_eeprom(struct net_device *net_dev,
+				  struct ethtool_eeprom *eeprom, u8 *buf)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_spi_device *spi = efx->spi_eeprom;
+	size_t len;
+	int rc;
+
+	rc = falcon_spi_read(spi, eeprom->offset + EFX_ETHTOOL_EEPROM_MIN,
+			     eeprom->len, &len, buf);
+	eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC;
+	eeprom->len = len;
+	return rc;
+}
+
+static int efx_ethtool_set_eeprom(struct net_device *net_dev,
+				  struct ethtool_eeprom *eeprom, u8 *buf)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_spi_device *spi = efx->spi_eeprom;
+	size_t len;
+	int rc;
+
+	if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC)
+		return -EINVAL;
+
+	rc = falcon_spi_write(spi, eeprom->offset + EFX_ETHTOOL_EEPROM_MIN,
+			      eeprom->len, &len, buf);
+	eeprom->len = len;
+	return rc;
+}
+
 static int efx_ethtool_get_coalesce(struct net_device *net_dev,
 				    struct ethtool_coalesce *coalesce)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_tx_queue *tx_queue;
 	struct efx_rx_queue *rx_queue;
 	struct efx_channel *channel;
@@ -614,7 +622,7 @@
 static int efx_ethtool_set_coalesce(struct net_device *net_dev,
 				    struct ethtool_coalesce *coalesce)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_channel *channel;
 	struct efx_tx_queue *tx_queue;
 	unsigned tx_usecs, rx_usecs;
@@ -657,7 +665,7 @@
 static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
 				      struct ethtool_pauseparam *pause)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	enum efx_fc_type flow_control = efx->flow_control;
 	int rc;
 
@@ -680,11 +688,11 @@
 static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
 				       struct ethtool_pauseparam *pause)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 
-	pause->rx_pause = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
-	pause->tx_pause = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
-	pause->autoneg = (efx->flow_control & EFX_FC_AUTO) ? 1 : 0;
+	pause->rx_pause = !!(efx->flow_control & EFX_FC_RX);
+	pause->tx_pause = !!(efx->flow_control & EFX_FC_TX);
+	pause->autoneg = !!(efx->flow_control & EFX_FC_AUTO);
 }
 
 
@@ -694,6 +702,9 @@
 	.get_drvinfo		= efx_ethtool_get_drvinfo,
 	.nway_reset		= efx_ethtool_nway_reset,
 	.get_link		= efx_ethtool_get_link,
+	.get_eeprom_len		= efx_ethtool_get_eeprom_len,
+	.get_eeprom		= efx_ethtool_get_eeprom,
+	.set_eeprom		= efx_ethtool_set_eeprom,
 	.get_coalesce		= efx_ethtool_get_coalesce,
 	.set_coalesce		= efx_ethtool_set_coalesce,
 	.get_pauseparam         = efx_ethtool_get_pauseparam,
@@ -701,17 +712,16 @@
 	.get_rx_csum		= efx_ethtool_get_rx_csum,
 	.set_rx_csum		= efx_ethtool_set_rx_csum,
 	.get_tx_csum		= ethtool_op_get_tx_csum,
-	.set_tx_csum		= efx_ethtool_set_tx_csum,
+	.set_tx_csum		= ethtool_op_set_tx_csum,
 	.get_sg			= ethtool_op_get_sg,
 	.set_sg			= ethtool_op_set_sg,
 	.get_tso		= ethtool_op_get_tso,
-	.set_tso		= efx_ethtool_set_tso,
+	.set_tso		= ethtool_op_set_tso,
 	.get_flags		= ethtool_op_get_flags,
 	.set_flags		= ethtool_op_set_flags,
-	.self_test_count	= efx_ethtool_self_test_count,
+	.get_sset_count		= efx_ethtool_get_sset_count,
 	.self_test		= efx_ethtool_self_test,
 	.get_strings		= efx_ethtool_get_strings,
 	.phys_id		= efx_ethtool_phys_id,
-	.get_stats_count	= efx_ethtool_get_stats_count,
 	.get_ethtool_stats	= efx_ethtool_get_stats,
 };
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 9138ee5..31ed1f4 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -108,10 +108,10 @@
 /* Max number of internal errors. After this resets will not be performed */
 #define FALCON_MAX_INT_ERRORS 4
 
-/* Maximum period that we wait for flush events. If the flush event
- * doesn't arrive in this period of time then we check if the queue
- * was disabled anyway. */
-#define FALCON_FLUSH_TIMEOUT 10 /* 10ms */
+/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
+ */
+#define FALCON_FLUSH_INTERVAL 10
+#define FALCON_FLUSH_POLL_COUNT 100
 
 /**************************************************************************
  *
@@ -242,7 +242,7 @@
  * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
  * it to be used for event queues, descriptor rings etc.
  */
-static int
+static void
 falcon_init_special_buffer(struct efx_nic *efx,
 			   struct efx_special_buffer *buffer)
 {
@@ -266,8 +266,6 @@
 				     BUF_OWNER_ID_FBUF, 0);
 		falcon_write_sram(efx, &buf_desc, index);
 	}
-
-	return 0;
 }
 
 /* Unmaps a buffer from Falcon and clears the buffer table entries */
@@ -449,16 +447,15 @@
 					   sizeof(efx_qword_t));
 }
 
-int falcon_init_tx(struct efx_tx_queue *tx_queue)
+void falcon_init_tx(struct efx_tx_queue *tx_queue)
 {
 	efx_oword_t tx_desc_ptr;
 	struct efx_nic *efx = tx_queue->efx;
-	int rc;
+
+	tx_queue->flushed = false;
 
 	/* Pin TX descriptor ring */
-	rc = falcon_init_special_buffer(efx, &tx_queue->txd);
-	if (rc)
-		return rc;
+	falcon_init_special_buffer(efx, &tx_queue->txd);
 
 	/* Push TX descriptor ring to card */
 	EFX_POPULATE_OWORD_10(tx_desc_ptr,
@@ -466,7 +463,7 @@
 			      TX_ISCSI_DDIG_EN, 0,
 			      TX_ISCSI_HDIG_EN, 0,
 			      TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
-			      TX_DESCQ_EVQ_ID, tx_queue->channel->evqnum,
+			      TX_DESCQ_EVQ_ID, tx_queue->channel->channel,
 			      TX_DESCQ_OWNER_ID, 0,
 			      TX_DESCQ_LABEL, tx_queue->queue,
 			      TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
@@ -474,9 +471,9 @@
 			      TX_NON_IP_DROP_DIS_B0, 1);
 
 	if (falcon_rev(efx) >= FALCON_REV_B0) {
-		int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
-		EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
-		EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
+		int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
+		EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum);
+		EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum);
 	}
 
 	falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
@@ -485,73 +482,28 @@
 	if (falcon_rev(efx) < FALCON_REV_B0) {
 		efx_oword_t reg;
 
-		BUG_ON(tx_queue->queue >= 128); /* HW limit */
+		/* Only 128 bits in this register */
+		BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
 
 		falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
-		if (efx->net_dev->features & NETIF_F_IP_CSUM)
+		if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
 			clear_bit_le(tx_queue->queue, (void *)&reg);
 		else
 			set_bit_le(tx_queue->queue, (void *)&reg);
 		falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
 	}
-
-	return 0;
 }
 
-static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
+static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
 {
 	struct efx_nic *efx = tx_queue->efx;
-	struct efx_channel *channel = &efx->channel[0];
 	efx_oword_t tx_flush_descq;
-	unsigned int read_ptr, i;
 
 	/* Post a flush command */
 	EFX_POPULATE_OWORD_2(tx_flush_descq,
 			     TX_FLUSH_DESCQ_CMD, 1,
 			     TX_FLUSH_DESCQ, tx_queue->queue);
 	falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
-	msleep(FALCON_FLUSH_TIMEOUT);
-
-	if (EFX_WORKAROUND_7803(efx))
-		return 0;
-
-	/* Look for a flush completed event */
-	read_ptr = channel->eventq_read_ptr;
-	for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
-		efx_qword_t *event = falcon_event(channel, read_ptr);
-		int ev_code, ev_sub_code, ev_queue;
-		if (!falcon_event_present(event))
-			break;
-
-		ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
-		ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
-		ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID);
-		if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) &&
-		    (ev_queue == tx_queue->queue)) {
-			EFX_LOG(efx, "tx queue %d flush command succesful\n",
-				tx_queue->queue);
-			return 0;
-		}
-
-		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
-	}
-
-	if (EFX_WORKAROUND_11557(efx)) {
-		efx_oword_t reg;
-		int enabled;
-
-		falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base,
-				  tx_queue->queue);
-		enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN);
-		if (!enabled) {
-			EFX_LOG(efx, "tx queue %d disabled without a "
-				"flush event seen\n", tx_queue->queue);
-			return 0;
-		}
-	}
-
-	EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue);
-	return -ETIMEDOUT;
 }
 
 void falcon_fini_tx(struct efx_tx_queue *tx_queue)
@@ -559,9 +511,8 @@
 	struct efx_nic *efx = tx_queue->efx;
 	efx_oword_t tx_desc_ptr;
 
-	/* Stop the hardware using the queue */
-	if (falcon_flush_tx_queue(tx_queue))
-		EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue);
+	/* The queue should have been flushed */
+	WARN_ON(!tx_queue->flushed);
 
 	/* Remove TX descriptor ring from card */
 	EFX_ZERO_OWORD(tx_desc_ptr);
@@ -638,29 +589,28 @@
 					   sizeof(efx_qword_t));
 }
 
-int falcon_init_rx(struct efx_rx_queue *rx_queue)
+void falcon_init_rx(struct efx_rx_queue *rx_queue)
 {
 	efx_oword_t rx_desc_ptr;
 	struct efx_nic *efx = rx_queue->efx;
-	int rc;
-	int is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
-	int iscsi_digest_en = is_b0;
+	bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
+	bool iscsi_digest_en = is_b0;
 
 	EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
 		rx_queue->queue, rx_queue->rxd.index,
 		rx_queue->rxd.index + rx_queue->rxd.entries - 1);
 
+	rx_queue->flushed = false;
+
 	/* Pin RX descriptor ring */
-	rc = falcon_init_special_buffer(efx, &rx_queue->rxd);
-	if (rc)
-		return rc;
+	falcon_init_special_buffer(efx, &rx_queue->rxd);
 
 	/* Push RX descriptor ring to card */
 	EFX_POPULATE_OWORD_10(rx_desc_ptr,
 			      RX_ISCSI_DDIG_EN, iscsi_digest_en,
 			      RX_ISCSI_HDIG_EN, iscsi_digest_en,
 			      RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
-			      RX_DESCQ_EVQ_ID, rx_queue->channel->evqnum,
+			      RX_DESCQ_EVQ_ID, rx_queue->channel->channel,
 			      RX_DESCQ_OWNER_ID, 0,
 			      RX_DESCQ_LABEL, rx_queue->queue,
 			      RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
@@ -670,14 +620,11 @@
 			      RX_DESCQ_EN, 1);
 	falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
 			   rx_queue->queue);
-	return 0;
 }
 
-static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
+static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
 {
 	struct efx_nic *efx = rx_queue->efx;
-	struct efx_channel *channel = &efx->channel[0];
-	unsigned int read_ptr, i;
 	efx_oword_t rx_flush_descq;
 
 	/* Post a flush command */
@@ -685,75 +632,15 @@
 			     RX_FLUSH_DESCQ_CMD, 1,
 			     RX_FLUSH_DESCQ, rx_queue->queue);
 	falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
-	msleep(FALCON_FLUSH_TIMEOUT);
-
-	if (EFX_WORKAROUND_7803(efx))
-		return 0;
-
-	/* Look for a flush completed event */
-	read_ptr = channel->eventq_read_ptr;
-	for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
-		efx_qword_t *event = falcon_event(channel, read_ptr);
-		int ev_code, ev_sub_code, ev_queue, ev_failed;
-		if (!falcon_event_present(event))
-			break;
-
-		ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
-		ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
-		ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID);
-		ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL);
-
-		if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) &&
-		    (ev_queue == rx_queue->queue)) {
-			if (ev_failed) {
-				EFX_INFO(efx, "rx queue %d flush command "
-					 "failed\n", rx_queue->queue);
-				return -EAGAIN;
-			} else {
-				EFX_LOG(efx, "rx queue %d flush command "
-					"succesful\n", rx_queue->queue);
-				return 0;
-			}
-		}
-
-		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
-	}
-
-	if (EFX_WORKAROUND_11557(efx)) {
-		efx_oword_t reg;
-		int enabled;
-
-		falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base,
-				  rx_queue->queue);
-		enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN);
-		if (!enabled) {
-			EFX_LOG(efx, "rx queue %d disabled without a "
-				"flush event seen\n", rx_queue->queue);
-			return 0;
-		}
-	}
-
-	EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue);
-	return -ETIMEDOUT;
 }
 
 void falcon_fini_rx(struct efx_rx_queue *rx_queue)
 {
 	efx_oword_t rx_desc_ptr;
 	struct efx_nic *efx = rx_queue->efx;
-	int i, rc;
 
-	/* Try and flush the rx queue. This may need to be repeated */
-	for (i = 0; i < 5; i++) {
-		rc = falcon_flush_rx_queue(rx_queue);
-		if (rc == -EAGAIN)
-			continue;
-		break;
-	}
-	if (rc) {
-		EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
-		efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
-	}
+	/* The queue should already have been flushed */
+	WARN_ON(!rx_queue->flushed);
 
 	/* Remove RX descriptor ring from card */
 	EFX_ZERO_OWORD(rx_desc_ptr);
@@ -793,7 +680,7 @@
 
 	EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
 	falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
-			    channel->evqnum);
+			    channel->channel);
 }
 
 /* Use HW to insert a SW defined event */
@@ -802,7 +689,7 @@
 	efx_oword_t drv_ev_reg;
 
 	EFX_POPULATE_OWORD_2(drv_ev_reg,
-			     DRV_EV_QID, channel->evqnum,
+			     DRV_EV_QID, channel->channel,
 			     DRV_EV_DATA,
 			     EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
 	falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
@@ -813,8 +700,8 @@
  * Falcon batches TX completion events; the message we receive is of
  * the form "complete all TX events up to this index".
  */
-static inline void falcon_handle_tx_event(struct efx_channel *channel,
-					  efx_qword_t *event)
+static void falcon_handle_tx_event(struct efx_channel *channel,
+				   efx_qword_t *event)
 {
 	unsigned int tx_ev_desc_ptr;
 	unsigned int tx_ev_q_label;
@@ -847,39 +734,19 @@
 	}
 }
 
-/* Check received packet's destination MAC address. */
-static int check_dest_mac(struct efx_rx_queue *rx_queue,
-			  const efx_qword_t *event)
-{
-	struct efx_rx_buffer *rx_buf;
-	struct efx_nic *efx = rx_queue->efx;
-	int rx_ev_desc_ptr;
-	struct ethhdr *eh;
-
-	if (efx->promiscuous)
-		return 1;
-
-	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
-	rx_buf = efx_rx_buffer(rx_queue, rx_ev_desc_ptr);
-	eh = (struct ethhdr *)rx_buf->data;
-	if (memcmp(eh->h_dest, efx->net_dev->dev_addr, ETH_ALEN))
-		return 0;
-	return 1;
-}
-
 /* Detect errors included in the rx_evt_pkt_ok bit. */
 static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
 				    const efx_qword_t *event,
-				    unsigned *rx_ev_pkt_ok,
-				    int *discard, int byte_count)
+				    bool *rx_ev_pkt_ok,
+				    bool *discard)
 {
 	struct efx_nic *efx = rx_queue->efx;
-	unsigned rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
-	unsigned rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
-	unsigned rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
-	unsigned rx_ev_pkt_type, rx_ev_other_err, rx_ev_pause_frm;
-	unsigned rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
-	int snap, non_ip;
+	bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+	bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
+	bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+	bool rx_ev_other_err, rx_ev_pause_frm;
+	bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
+	unsigned rx_ev_pkt_type;
 
 	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
 	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
@@ -903,41 +770,6 @@
 			   rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
 			   rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
 
-	snap = (rx_ev_pkt_type == RX_EV_PKT_TYPE_LLC_DECODE) ||
-		(rx_ev_pkt_type == RX_EV_PKT_TYPE_VLAN_LLC_DECODE);
-	non_ip = (rx_ev_hdr_type == RX_EV_HDR_TYPE_NON_IP_DECODE);
-
-	/* SFC bug 5475/8970: The Falcon XMAC incorrectly calculates the
-	 * length field of an LLC frame, which sets TOBE_DISC. We could set
-	 * PASS_LEN_ERR, but we want the MAC to filter out short frames (to
-	 * protect the RX block).
-	 *
-	 * bug5475 - LLC/SNAP: Falcon identifies SNAP packets.
-	 * bug8970 - LLC/noSNAP: Falcon does not provide an LLC flag.
-	 *                       LLC can't encapsulate IP, so by definition
-	 *                       these packets are NON_IP.
-	 *
-	 * Unicast mismatch will also cause TOBE_DISC, so the driver needs
-	 * to check this.
-	 */
-	if (EFX_WORKAROUND_5475(efx) && rx_ev_tobe_disc && (snap || non_ip)) {
-		/* If all the other flags are zero then we can state the
-		 * entire packet is ok, which will flag to the kernel not
-		 * to recalculate checksums.
-		 */
-		if (!(non_ip | rx_ev_other_err | rx_ev_pause_frm))
-			*rx_ev_pkt_ok = 1;
-
-		rx_ev_tobe_disc = 0;
-
-		/* TOBE_DISC is set for unicast mismatch.  But given that
-		 * we can't trust TOBE_DISC here, we must validate the dest
-		 * MAC address ourselves.
-		 */
-		if (!rx_ev_mcast_pkt && !check_dest_mac(rx_queue, event))
-			rx_ev_tobe_disc = 1;
-	}
-
 	/* Count errors that are not in MAC stats. */
 	if (rx_ev_frm_trunc)
 		++rx_queue->channel->n_rx_frm_trunc;
@@ -961,7 +793,7 @@
 #ifdef EFX_ENABLE_DEBUG
 	if (rx_ev_other_err) {
 		EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
-			    EFX_QWORD_FMT "%s%s%s%s%s%s%s%s%s\n",
+			    EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
 			    rx_queue->queue, EFX_QWORD_VAL(*event),
 			    rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
 			    rx_ev_ip_hdr_chksum_err ?
@@ -972,8 +804,7 @@
 			    rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
 			    rx_ev_drib_nib ? " [DRIB_NIB]" : "",
 			    rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
-			    rx_ev_pause_frm ? " [PAUSE]" : "",
-			    snap ? " [SNAP/LLC]" : "");
+			    rx_ev_pause_frm ? " [PAUSE]" : "");
 	}
 #endif
 
@@ -1006,13 +837,13 @@
  * Also "is multicast" and "matches multicast filter" flags can be used to
  * discard non-matching multicast packets.
  */
-static inline int falcon_handle_rx_event(struct efx_channel *channel,
-					 const efx_qword_t *event)
+static void falcon_handle_rx_event(struct efx_channel *channel,
+				   const efx_qword_t *event)
 {
-	unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt;
-	unsigned int rx_ev_pkt_ok, rx_ev_hdr_type, rx_ev_mcast_pkt;
+	unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
+	unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
 	unsigned expected_ptr;
-	int discard = 0, checksummed;
+	bool rx_ev_pkt_ok, discard = false, checksummed;
 	struct efx_rx_queue *rx_queue;
 	struct efx_nic *efx = channel->efx;
 
@@ -1022,16 +853,14 @@
 	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
 	WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
 	WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
+	WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel);
 
-	rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL);
-	rx_queue = &efx->rx_queue[rx_ev_q_label];
+	rx_queue = &efx->rx_queue[channel->channel];
 
 	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
 	expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
-	if (unlikely(rx_ev_desc_ptr != expected_ptr)) {
+	if (unlikely(rx_ev_desc_ptr != expected_ptr))
 		falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
-		return rx_ev_q_label;
-	}
 
 	if (likely(rx_ev_pkt_ok)) {
 		/* If packet is marked as OK and packet type is TCP/IPv4 or
@@ -1040,8 +869,8 @@
 		checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
 	} else {
 		falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
-					&discard, rx_ev_byte_cnt);
-		checksummed = 0;
+					&discard);
+		checksummed = false;
 	}
 
 	/* Detect multicast packets that didn't match the filter */
@@ -1051,14 +880,12 @@
 			EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
 
 		if (unlikely(!rx_ev_mcast_hash_match))
-			discard = 1;
+			discard = true;
 	}
 
 	/* Handle received packet */
 	efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
 		      checksummed, discard);
-
-	return rx_ev_q_label;
 }
 
 /* Global events are basically PHY events */
@@ -1066,23 +893,23 @@
 				       efx_qword_t *event)
 {
 	struct efx_nic *efx = channel->efx;
-	int is_phy_event = 0, handled = 0;
+	bool is_phy_event = false, handled = false;
 
 	/* Check for interrupt on either port.  Some boards have a
 	 * single PHY wired to the interrupt line for port 1. */
 	if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
 	    EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
 	    EFX_QWORD_FIELD(*event, XG_PHY_INTR))
-		is_phy_event = 1;
+		is_phy_event = true;
 
 	if ((falcon_rev(efx) >= FALCON_REV_B0) &&
-	    EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
-		is_phy_event = 1;
+	    EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0))
+		is_phy_event = true;
 
 	if (is_phy_event) {
 		efx->phy_op->clear_interrupt(efx);
 		queue_work(efx->workqueue, &efx->reconfigure_work);
-		handled = 1;
+		handled = true;
 	}
 
 	if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
@@ -1092,7 +919,7 @@
 		atomic_inc(&efx->rx_reset);
 		efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
 				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
-		handled = 1;
+		handled = true;
 	}
 
 	if (!handled)
@@ -1163,13 +990,12 @@
 	}
 }
 
-int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
+int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
 {
 	unsigned int read_ptr;
 	efx_qword_t event, *p_event;
 	int ev_code;
-	int rxq;
-	int rxdmaqs = 0;
+	int rx_packets = 0;
 
 	read_ptr = channel->eventq_read_ptr;
 
@@ -1191,9 +1017,8 @@
 
 		switch (ev_code) {
 		case RX_IP_EV_DECODE:
-			rxq = falcon_handle_rx_event(channel, &event);
-			rxdmaqs |= (1 << rxq);
-			(*rx_quota)--;
+			falcon_handle_rx_event(channel, &event);
+			++rx_packets;
 			break;
 		case TX_IP_EV_DECODE:
 			falcon_handle_tx_event(channel, &event);
@@ -1220,10 +1045,10 @@
 		/* Increment read pointer */
 		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
 
-	} while (*rx_quota);
+	} while (rx_packets < rx_quota);
 
 	channel->eventq_read_ptr = read_ptr;
-	return rxdmaqs;
+	return rx_packets;
 }
 
 void falcon_set_int_moderation(struct efx_channel *channel)
@@ -1251,7 +1076,7 @@
 				     TIMER_VAL, 0);
 	}
 	falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
-				  channel->evqnum);
+				  channel->channel);
 
 }
 
@@ -1265,20 +1090,17 @@
 	return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
 }
 
-int falcon_init_eventq(struct efx_channel *channel)
+void falcon_init_eventq(struct efx_channel *channel)
 {
 	efx_oword_t evq_ptr;
 	struct efx_nic *efx = channel->efx;
-	int rc;
 
 	EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
 		channel->channel, channel->eventq.index,
 		channel->eventq.index + channel->eventq.entries - 1);
 
 	/* Pin event queue buffer */
-	rc = falcon_init_special_buffer(efx, &channel->eventq);
-	if (rc)
-		return rc;
+	falcon_init_special_buffer(efx, &channel->eventq);
 
 	/* Fill event queue with all ones (i.e. empty events) */
 	memset(channel->eventq.addr, 0xff, channel->eventq.len);
@@ -1289,11 +1111,9 @@
 			     EVQ_SIZE, FALCON_EVQ_ORDER,
 			     EVQ_BUF_BASE_ID, channel->eventq.index);
 	falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
-			   channel->evqnum);
+			   channel->channel);
 
 	falcon_set_int_moderation(channel);
-
-	return 0;
 }
 
 void falcon_fini_eventq(struct efx_channel *channel)
@@ -1304,7 +1124,7 @@
 	/* Remove event queue from card */
 	EFX_ZERO_OWORD(eventq_ptr);
 	falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
-			   channel->evqnum);
+			   channel->channel);
 
 	/* Unpin event queue */
 	falcon_fini_special_buffer(efx, &channel->eventq);
@@ -1331,6 +1151,121 @@
 	falcon_generate_event(channel, &test_event);
 }
 
+/**************************************************************************
+ *
+ * Flush handling
+ *
+ **************************************************************************/
+
+
+static void falcon_poll_flush_events(struct efx_nic *efx)
+{
+	struct efx_channel *channel = &efx->channel[0];
+	struct efx_tx_queue *tx_queue;
+	struct efx_rx_queue *rx_queue;
+	unsigned int read_ptr, i;
+
+	read_ptr = channel->eventq_read_ptr;
+	for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
+		efx_qword_t *event = falcon_event(channel, read_ptr);
+		int ev_code, ev_sub_code, ev_queue;
+		bool ev_failed;
+		if (!falcon_event_present(event))
+			break;
+
+		ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
+		if (ev_code != DRIVER_EV_DECODE)
+			continue;
+
+		ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
+		switch (ev_sub_code) {
+		case TX_DESCQ_FLS_DONE_EV_DECODE:
+			ev_queue = EFX_QWORD_FIELD(*event,
+						   DRIVER_EV_TX_DESCQ_ID);
+			if (ev_queue < EFX_TX_QUEUE_COUNT) {
+				tx_queue = efx->tx_queue + ev_queue;
+				tx_queue->flushed = true;
+			}
+			break;
+		case RX_DESCQ_FLS_DONE_EV_DECODE:
+			ev_queue = EFX_QWORD_FIELD(*event,
+						   DRIVER_EV_RX_DESCQ_ID);
+			ev_failed = EFX_QWORD_FIELD(*event,
+						    DRIVER_EV_RX_FLUSH_FAIL);
+			if (ev_queue < efx->n_rx_queues) {
+				rx_queue = efx->rx_queue + ev_queue;
+
+				/* retry the rx flush */
+				if (ev_failed)
+					falcon_flush_rx_queue(rx_queue);
+				else
+					rx_queue->flushed = true;
+			}
+			break;
+		}
+
+		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
+	}
+}
+
+/* Handle tx and rx flushes at the same time, since they run in
+ * parallel in the hardware and there's no reason for us to
+ * serialise them */
+int falcon_flush_queues(struct efx_nic *efx)
+{
+	struct efx_rx_queue *rx_queue;
+	struct efx_tx_queue *tx_queue;
+	int i;
+	bool outstanding;
+
+	/* Issue flush requests */
+	efx_for_each_tx_queue(tx_queue, efx) {
+		tx_queue->flushed = false;
+		falcon_flush_tx_queue(tx_queue);
+	}
+	efx_for_each_rx_queue(rx_queue, efx) {
+		rx_queue->flushed = false;
+		falcon_flush_rx_queue(rx_queue);
+	}
+
+	/* Poll the evq looking for flush completions. Since we're not pushing
+	 * any more rx or tx descriptors at this point, we're in no danger of
+	 * overflowing the evq whilst we wait */
+	for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
+		msleep(FALCON_FLUSH_INTERVAL);
+		falcon_poll_flush_events(efx);
+
+		/* Check if every queue has been succesfully flushed */
+		outstanding = false;
+		efx_for_each_tx_queue(tx_queue, efx)
+			outstanding |= !tx_queue->flushed;
+		efx_for_each_rx_queue(rx_queue, efx)
+			outstanding |= !rx_queue->flushed;
+		if (!outstanding)
+			return 0;
+	}
+
+	/* Mark the queues as all flushed. We're going to return failure
+	 * leading to a reset, or fake up success anyway. "flushed" now
+	 * indicates that we tried to flush. */
+	efx_for_each_tx_queue(tx_queue, efx) {
+		if (!tx_queue->flushed)
+			EFX_ERR(efx, "tx queue %d flush command timed out\n",
+				tx_queue->queue);
+		tx_queue->flushed = true;
+	}
+	efx_for_each_rx_queue(rx_queue, efx) {
+		if (!rx_queue->flushed)
+			EFX_ERR(efx, "rx queue %d flush command timed out\n",
+				rx_queue->queue);
+		rx_queue->flushed = true;
+	}
+
+	if (EFX_WORKAROUND_7803(efx))
+		return 0;
+
+	return -ETIMEDOUT;
+}
 
 /**************************************************************************
  *
@@ -1371,7 +1306,7 @@
 
 	/* Force processing of all the channels to get the EVQ RPTRs up to
 	   date */
-	efx_for_each_channel_with_interrupt(channel, efx)
+	efx_for_each_channel(channel, efx)
 		efx_schedule_channel(channel);
 }
 
@@ -1439,10 +1374,11 @@
 			EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
 	}
 
-	/* Disable DMA bus mastering on both devices */
+	/* Disable both devices */
 	pci_disable_device(efx->pci_dev);
 	if (FALCON_IS_DUAL_FUNC(efx))
 		pci_disable_device(nic_data->pci_dev2);
+	falcon_disable_interrupts(efx);
 
 	if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
 		EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
@@ -1589,7 +1525,7 @@
 	     offset < RX_RSS_INDIR_TBL_B0 + 0x800;
 	     offset += 0x10) {
 		EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
-				     i % efx->rss_queues);
+				     i % efx->n_rx_queues);
 		falcon_writel(efx, &dword, offset);
 		i++;
 	}
@@ -1621,7 +1557,7 @@
 	}
 
 	/* Hook MSI or MSI-X interrupt */
-	efx_for_each_channel_with_interrupt(channel, efx) {
+	efx_for_each_channel(channel, efx) {
 		rc = request_irq(channel->irq, falcon_msi_interrupt,
 				 IRQF_PROBE_SHARED, /* Not shared */
 				 efx->name, channel);
@@ -1634,7 +1570,7 @@
 	return 0;
 
  fail2:
-	efx_for_each_channel_with_interrupt(channel, efx)
+	efx_for_each_channel(channel, efx)
 		free_irq(channel->irq, channel);
  fail1:
 	return rc;
@@ -1646,7 +1582,7 @@
 	efx_oword_t reg;
 
 	/* Disable MSI/MSI-X interrupts */
-	efx_for_each_channel_with_interrupt(channel, efx) {
+	efx_for_each_channel(channel, efx) {
 		if (channel->irq)
 			free_irq(channel->irq, channel);
 	}
@@ -1669,69 +1605,200 @@
  **************************************************************************
  */
 
-#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
+#define FALCON_SPI_MAX_LEN ((unsigned) sizeof(efx_oword_t))
 
 /* Wait for SPI command completion */
 static int falcon_spi_wait(struct efx_nic *efx)
 {
+	unsigned long timeout = jiffies + DIV_ROUND_UP(HZ, 10);
 	efx_oword_t reg;
-	int cmd_en, timer_active;
-	int count;
+	bool cmd_en, timer_active;
 
-	count = 0;
-	do {
+	for (;;) {
 		falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
 		cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN);
 		timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE);
 		if (!cmd_en && !timer_active)
 			return 0;
-		udelay(10);
-	} while (++count < 10000); /* wait upto 100msec */
-	EFX_ERR(efx, "timed out waiting for SPI\n");
-	return -ETIMEDOUT;
+		if (time_after_eq(jiffies, timeout)) {
+			EFX_ERR(efx, "timed out waiting for SPI\n");
+			return -ETIMEDOUT;
+		}
+		cpu_relax();
+	}
 }
 
-static int
-falcon_spi_read(struct efx_nic *efx, int device_id, unsigned int command,
-		unsigned int address, unsigned int addr_len,
-		void *data, unsigned int len)
+static int falcon_spi_cmd(const struct efx_spi_device *spi,
+			  unsigned int command, int address,
+			  const void *in, void *out, unsigned int len)
 {
+	struct efx_nic *efx = spi->efx;
+	bool addressed = (address >= 0);
+	bool reading = (out != NULL);
 	efx_oword_t reg;
 	int rc;
 
-	BUG_ON(len > FALCON_SPI_MAX_LEN);
+	/* Input validation */
+	if (len > FALCON_SPI_MAX_LEN)
+		return -EINVAL;
 
 	/* Check SPI not currently being accessed */
 	rc = falcon_spi_wait(efx);
 	if (rc)
 		return rc;
 
-	/* Program address register */
-	EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
-	falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
+	/* Program address register, if we have an address */
+	if (addressed) {
+		EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
+		falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
+	}
 
-	/* Issue read command */
+	/* Program data register, if we have data */
+	if (in != NULL) {
+		memcpy(&reg, in, len);
+		falcon_write(efx, &reg, EE_SPI_HDATA_REG_KER);
+	}
+
+	/* Issue read/write command */
 	EFX_POPULATE_OWORD_7(reg,
 			     EE_SPI_HCMD_CMD_EN, 1,
-			     EE_SPI_HCMD_SF_SEL, device_id,
+			     EE_SPI_HCMD_SF_SEL, spi->device_id,
 			     EE_SPI_HCMD_DABCNT, len,
-			     EE_SPI_HCMD_READ, EE_SPI_READ,
+			     EE_SPI_HCMD_READ, reading,
 			     EE_SPI_HCMD_DUBCNT, 0,
-			     EE_SPI_HCMD_ADBCNT, addr_len,
+			     EE_SPI_HCMD_ADBCNT,
+			     (addressed ? spi->addr_len : 0),
 			     EE_SPI_HCMD_ENC, command);
 	falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
 
-	/* Wait for read to complete */
+	/* Wait for read/write to complete */
 	rc = falcon_spi_wait(efx);
 	if (rc)
 		return rc;
 
 	/* Read data */
-	falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
-	memcpy(data, &reg, len);
+	if (out != NULL) {
+		falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
+		memcpy(out, &reg, len);
+	}
+
 	return 0;
 }
 
+static unsigned int
+falcon_spi_write_limit(const struct efx_spi_device *spi, unsigned int start)
+{
+	return min(FALCON_SPI_MAX_LEN,
+		   (spi->block_size - (start & (spi->block_size - 1))));
+}
+
+static inline u8
+efx_spi_munge_command(const struct efx_spi_device *spi,
+		      const u8 command, const unsigned int address)
+{
+	return command | (((address >> 8) & spi->munge_address) << 3);
+}
+
+
+static int falcon_spi_fast_wait(const struct efx_spi_device *spi)
+{
+	u8 status;
+	int i, rc;
+
+	/* Wait up to 1000us for flash/EEPROM to finish a fast operation. */
+	for (i = 0; i < 50; i++) {
+		udelay(20);
+
+		rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
+				    &status, sizeof(status));
+		if (rc)
+			return rc;
+		if (!(status & SPI_STATUS_NRDY))
+			return 0;
+	}
+	EFX_ERR(spi->efx,
+		"timed out waiting for device %d last status=0x%02x\n",
+		spi->device_id, status);
+	return -ETIMEDOUT;
+}
+
+int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
+		    size_t len, size_t *retlen, u8 *buffer)
+{
+	unsigned int command, block_len, pos = 0;
+	int rc = 0;
+
+	while (pos < len) {
+		block_len = min((unsigned int)len - pos,
+				FALCON_SPI_MAX_LEN);
+
+		command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+		rc = falcon_spi_cmd(spi, command, start + pos, NULL,
+				    buffer + pos, block_len);
+		if (rc)
+			break;
+		pos += block_len;
+
+		/* Avoid locking up the system */
+		cond_resched();
+		if (signal_pending(current)) {
+			rc = -EINTR;
+			break;
+		}
+	}
+
+	if (retlen)
+		*retlen = pos;
+	return rc;
+}
+
+int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
+		     size_t len, size_t *retlen, const u8 *buffer)
+{
+	u8 verify_buffer[FALCON_SPI_MAX_LEN];
+	unsigned int command, block_len, pos = 0;
+	int rc = 0;
+
+	while (pos < len) {
+		rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
+		if (rc)
+			break;
+
+		block_len = min((unsigned int)len - pos,
+				falcon_spi_write_limit(spi, start + pos));
+		command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
+		rc = falcon_spi_cmd(spi, command, start + pos,
+				    buffer + pos, NULL, block_len);
+		if (rc)
+			break;
+
+		rc = falcon_spi_fast_wait(spi);
+		if (rc)
+			break;
+
+		command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+		rc = falcon_spi_cmd(spi, command, start + pos,
+				    NULL, verify_buffer, block_len);
+		if (memcmp(verify_buffer, buffer + pos, block_len)) {
+			rc = -EIO;
+			break;
+		}
+
+		pos += block_len;
+
+		/* Avoid locking up the system */
+		cond_resched();
+		if (signal_pending(current)) {
+			rc = -EINTR;
+			break;
+		}
+	}
+
+	if (retlen)
+		*retlen = pos;
+	return rc;
+}
+
 /**************************************************************************
  *
  * MAC wrapper
@@ -1812,7 +1879,7 @@
 {
 	efx_oword_t reg;
 	int link_speed;
-	unsigned int tx_fc;
+	bool tx_fc;
 
 	if (efx->link_options & GM_LPA_10000)
 		link_speed = 0x3;
@@ -1847,7 +1914,7 @@
 	/* Transmission of pause frames when RX crosses the threshold is
 	 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
 	 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
-	tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
+	tx_fc = !!(efx->flow_control & EFX_FC_TX);
 	falcon_read(efx, &reg, RX_CFG_REG_KER);
 	EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
 
@@ -1887,8 +1954,10 @@
 
 	/* Wait for transfer to complete */
 	for (i = 0; i < 400; i++) {
-		if (*(volatile u32 *)dma_done == FALCON_STATS_DONE)
+		if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
+			rmb(); /* Ensure the stats are valid. */
 			return 0;
+		}
 		udelay(10);
 	}
 
@@ -1951,7 +2020,7 @@
 static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
 			      int addr, int value)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
 	efx_oword_t reg;
 
@@ -2019,7 +2088,7 @@
  * could be read, -1 will be returned. */
 static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
 {
-	struct efx_nic *efx = net_dev->priv;
+	struct efx_nic *efx = netdev_priv(net_dev);
 	unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
 	efx_oword_t reg;
 	int value = -1;
@@ -2120,7 +2189,7 @@
 		return rc;
 
 	/* Set up GMII structure for PHY */
-	efx->mii.supports_gmii = 1;
+	efx->mii.supports_gmii = true;
 	falcon_init_mdio(&efx->mii);
 
 	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
@@ -2168,6 +2237,170 @@
 	falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
 }
 
+
+/**************************************************************************
+ *
+ * Falcon test code
+ *
+ **************************************************************************/
+
+int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
+{
+	struct falcon_nvconfig *nvconfig;
+	struct efx_spi_device *spi;
+	void *region;
+	int rc, magic_num, struct_ver;
+	__le16 *word, *limit;
+	u32 csum;
+
+	region = kmalloc(NVCONFIG_END, GFP_KERNEL);
+	if (!region)
+		return -ENOMEM;
+	nvconfig = region + NVCONFIG_OFFSET;
+
+	spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
+	rc = falcon_spi_read(spi, 0, NVCONFIG_END, NULL, region);
+	if (rc) {
+		EFX_ERR(efx, "Failed to read %s\n",
+			efx->spi_flash ? "flash" : "EEPROM");
+		rc = -EIO;
+		goto out;
+	}
+
+	magic_num = le16_to_cpu(nvconfig->board_magic_num);
+	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
+
+	rc = -EINVAL;
+	if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) {
+		EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
+		goto out;
+	}
+	if (struct_ver < 2) {
+		EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
+		goto out;
+	} else if (struct_ver < 4) {
+		word = &nvconfig->board_magic_num;
+		limit = (__le16 *) (nvconfig + 1);
+	} else {
+		word = region;
+		limit = region + NVCONFIG_END;
+	}
+	for (csum = 0; word < limit; ++word)
+		csum += le16_to_cpu(*word);
+
+	if (~csum & 0xffff) {
+		EFX_ERR(efx, "NVRAM has incorrect checksum\n");
+		goto out;
+	}
+
+	rc = 0;
+	if (nvconfig_out)
+		memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
+
+ out:
+	kfree(region);
+	return rc;
+}
+
+/* Registers tested in the falcon register test */
+static struct {
+	unsigned address;
+	efx_oword_t mask;
+} efx_test_registers[] = {
+	{ ADR_REGION_REG_KER,
+	  EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
+	{ RX_CFG_REG_KER,
+	  EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
+	{ TX_CFG_REG_KER,
+	  EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
+	{ TX_CFG2_REG_KER,
+	  EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
+	{ MAC0_CTRL_REG_KER,
+	  EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
+	{ SRM_TX_DC_CFG_REG_KER,
+	  EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
+	{ RX_DC_CFG_REG_KER,
+	  EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
+	{ RX_DC_PF_WM_REG_KER,
+	  EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
+	{ DP_CTRL_REG,
+	  EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
+	{ XM_GLB_CFG_REG,
+	  EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
+	{ XM_TX_CFG_REG,
+	  EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
+	{ XM_RX_CFG_REG,
+	  EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
+	{ XM_RX_PARAM_REG,
+	  EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
+	{ XM_FC_REG,
+	  EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
+	{ XM_ADR_LO_REG,
+	  EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
+	{ XX_SD_CTL_REG,
+	  EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
+};
+
+static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
+				     const efx_oword_t *mask)
+{
+	return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
+		((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
+}
+
+int falcon_test_registers(struct efx_nic *efx)
+{
+	unsigned address = 0, i, j;
+	efx_oword_t mask, imask, original, reg, buf;
+
+	/* Falcon should be in loopback to isolate the XMAC from the PHY */
+	WARN_ON(!LOOPBACK_INTERNAL(efx));
+
+	for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
+		address = efx_test_registers[i].address;
+		mask = imask = efx_test_registers[i].mask;
+		EFX_INVERT_OWORD(imask);
+
+		falcon_read(efx, &original, address);
+
+		/* bit sweep on and off */
+		for (j = 0; j < 128; j++) {
+			if (!EFX_EXTRACT_OWORD32(mask, j, j))
+				continue;
+
+			/* Test this testable bit can be set in isolation */
+			EFX_AND_OWORD(reg, original, mask);
+			EFX_SET_OWORD32(reg, j, j, 1);
+
+			falcon_write(efx, &reg, address);
+			falcon_read(efx, &buf, address);
+
+			if (efx_masked_compare_oword(&reg, &buf, &mask))
+				goto fail;
+
+			/* Test this testable bit can be cleared in isolation */
+			EFX_OR_OWORD(reg, original, mask);
+			EFX_SET_OWORD32(reg, j, j, 0);
+
+			falcon_write(efx, &reg, address);
+			falcon_read(efx, &buf, address);
+
+			if (efx_masked_compare_oword(&reg, &buf, &mask))
+				goto fail;
+		}
+
+		falcon_write(efx, &original, address);
+	}
+
+	return 0;
+
+fail:
+	EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+		" at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+		EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+	return -EIO;
+}
+
 /**************************************************************************
  *
  * Device reset
@@ -2305,68 +2538,103 @@
 	return -ETIMEDOUT;
 }
 
+static int falcon_spi_device_init(struct efx_nic *efx,
+				  struct efx_spi_device **spi_device_ret,
+				  unsigned int device_id, u32 device_type)
+{
+	struct efx_spi_device *spi_device;
+
+	if (device_type != 0) {
+		spi_device = kmalloc(sizeof(*spi_device), GFP_KERNEL);
+		if (!spi_device)
+			return -ENOMEM;
+		spi_device->device_id = device_id;
+		spi_device->size =
+			1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
+		spi_device->addr_len =
+			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
+		spi_device->munge_address = (spi_device->size == 1 << 9 &&
+					     spi_device->addr_len == 1);
+		spi_device->block_size =
+			1 << SPI_DEV_TYPE_FIELD(device_type,
+						SPI_DEV_TYPE_BLOCK_SIZE);
+
+		spi_device->efx = efx;
+	} else {
+		spi_device = NULL;
+	}
+
+	kfree(*spi_device_ret);
+	*spi_device_ret = spi_device;
+	return 0;
+}
+
+
+static void falcon_remove_spi_devices(struct efx_nic *efx)
+{
+	kfree(efx->spi_eeprom);
+	efx->spi_eeprom = NULL;
+	kfree(efx->spi_flash);
+	efx->spi_flash = NULL;
+}
+
 /* Extract non-volatile configuration */
 static int falcon_probe_nvconfig(struct efx_nic *efx)
 {
 	struct falcon_nvconfig *nvconfig;
-	efx_oword_t nic_stat;
-	int device_id;
-	unsigned addr_len;
-	size_t offset, len;
-	int magic_num, struct_ver, board_rev;
+	int board_rev;
 	int rc;
 
-	/* Find the boot device. */
-	falcon_read(efx, &nic_stat, NIC_STAT_REG);
-	if (EFX_OWORD_FIELD(nic_stat, SF_PRST)) {
-		device_id = EE_SPI_FLASH;
-		addr_len = 3;
-	} else if (EFX_OWORD_FIELD(nic_stat, EE_PRST)) {
-		device_id = EE_SPI_EEPROM;
-		addr_len = 2;
-	} else {
-		return -ENODEV;
-	}
-
 	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
+	if (!nvconfig)
+		return -ENOMEM;
 
-	/* Read the whole configuration structure into memory. */
-	for (offset = 0; offset < sizeof(*nvconfig); offset += len) {
-		len = min(sizeof(*nvconfig) - offset,
-			  (size_t) FALCON_SPI_MAX_LEN);
-		rc = falcon_spi_read(efx, device_id, SPI_READ,
-				     NVCONFIG_BASE + offset, addr_len,
-				     (char *)nvconfig + offset, len);
-		if (rc)
-			goto out;
+	rc = falcon_read_nvram(efx, nvconfig);
+	if (rc == -EINVAL) {
+		EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
+		efx->phy_type = PHY_TYPE_NONE;
+		efx->mii.phy_id = PHY_ADDR_INVALID;
+		board_rev = 0;
+		rc = 0;
+	} else if (rc) {
+		goto fail1;
+	} else {
+		struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
+		struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
+
+		efx->phy_type = v2->port0_phy_type;
+		efx->mii.phy_id = v2->port0_phy_addr;
+		board_rev = le16_to_cpu(v2->board_revision);
+
+		if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
+			__le32 fl = v3->spi_device_type[EE_SPI_FLASH];
+			__le32 ee = v3->spi_device_type[EE_SPI_EEPROM];
+			rc = falcon_spi_device_init(efx, &efx->spi_flash,
+						    EE_SPI_FLASH,
+						    le32_to_cpu(fl));
+			if (rc)
+				goto fail2;
+			rc = falcon_spi_device_init(efx, &efx->spi_eeprom,
+						    EE_SPI_EEPROM,
+						    le32_to_cpu(ee));
+			if (rc)
+				goto fail2;
+		}
 	}
 
 	/* Read the MAC addresses */
 	memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
 
-	/* Read the board configuration. */
-	magic_num = le16_to_cpu(nvconfig->board_magic_num);
-	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
-
-	if (magic_num != NVCONFIG_BOARD_MAGIC_NUM || struct_ver < 2) {
-		EFX_ERR(efx, "Non volatile memory bad magic=%x ver=%x "
-			"therefore using defaults\n", magic_num, struct_ver);
-		efx->phy_type = PHY_TYPE_NONE;
-		efx->mii.phy_id = PHY_ADDR_INVALID;
-		board_rev = 0;
-	} else {
-		struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
-
-		efx->phy_type = v2->port0_phy_type;
-		efx->mii.phy_id = v2->port0_phy_addr;
-		board_rev = le16_to_cpu(v2->board_revision);
-	}
-
 	EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id);
 
 	efx_set_board_info(efx, board_rev);
 
- out:
+	kfree(nvconfig);
+	return 0;
+
+ fail2:
+	falcon_remove_spi_devices(efx);
+ fail1:
 	kfree(nvconfig);
 	return rc;
 }
@@ -2417,6 +2685,86 @@
 	return 0;
 }
 
+/* Probe all SPI devices on the NIC */
+static void falcon_probe_spi_devices(struct efx_nic *efx)
+{
+	efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
+	bool has_flash, has_eeprom, boot_is_external;
+
+	falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER);
+	falcon_read(efx, &nic_stat, NIC_STAT_REG);
+	falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
+
+	has_flash = EFX_OWORD_FIELD(nic_stat, SF_PRST);
+	has_eeprom = EFX_OWORD_FIELD(nic_stat, EE_PRST);
+	boot_is_external = EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE);
+
+	if (has_flash) {
+		/* Default flash SPI device: Atmel AT25F1024
+		 * 128 KB, 24-bit address, 32 KB erase block,
+		 * 256 B write block
+		 */
+		u32 flash_device_type =
+			(17 << SPI_DEV_TYPE_SIZE_LBN)
+			| (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
+			| (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
+			| (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
+			| (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
+
+		falcon_spi_device_init(efx, &efx->spi_flash,
+				       EE_SPI_FLASH, flash_device_type);
+
+		if (!boot_is_external) {
+			/* Disable VPD and set clock dividers to safe
+			 * values for initial programming.
+			 */
+			EFX_LOG(efx, "Booted from internal ASIC settings;"
+				" setting SPI config\n");
+			EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
+					     /* 125 MHz / 7 ~= 20 MHz */
+					     EE_SF_CLOCK_DIV, 7,
+					     /* 125 MHz / 63 ~= 2 MHz */
+					     EE_EE_CLOCK_DIV, 63);
+			falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
+		}
+	}
+
+	if (has_eeprom) {
+		u32 eeprom_device_type;
+
+		/* If it has no flash, it must have a large EEPROM
+		 * for chip config; otherwise check whether 9-bit
+		 * addressing is used for VPD configuration
+		 */
+		if (has_flash &&
+		    (!boot_is_external ||
+		     EFX_OWORD_FIELD(ee_vpd_cfg, EE_VPD_EN_AD9_MODE))) {
+			/* Default SPI device: Atmel AT25040 or similar
+			 * 512 B, 9-bit address, 8 B write block
+			 */
+			eeprom_device_type =
+				(9 << SPI_DEV_TYPE_SIZE_LBN)
+				| (1 << SPI_DEV_TYPE_ADDR_LEN_LBN)
+				| (3 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
+		} else {
+			/* "Large" SPI device: Atmel AT25640 or similar
+			 * 8 KB, 16-bit address, 32 B write block
+			 */
+			eeprom_device_type =
+				(13 << SPI_DEV_TYPE_SIZE_LBN)
+				| (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
+				| (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
+		}
+
+		falcon_spi_device_init(efx, &efx->spi_eeprom,
+				       EE_SPI_EEPROM, eeprom_device_type);
+	}
+
+	EFX_LOG(efx, "flash is %s, EEPROM is %s\n",
+		(has_flash ? "present" : "absent"),
+		(has_eeprom ? "present" : "absent"));
+}
+
 int falcon_probe_nic(struct efx_nic *efx)
 {
 	struct falcon_nic_data *nic_data;
@@ -2424,6 +2772,8 @@
 
 	/* Allocate storage for hardware specific data */
 	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+	if (!nic_data)
+		return -ENOMEM;
 	efx->nic_data = nic_data;
 
 	/* Determine number of ports etc. */
@@ -2467,6 +2817,8 @@
 		(unsigned long long)efx->irq_status.dma_addr,
 		efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
 
+	falcon_probe_spi_devices(efx);
+
 	/* Read in the non-volatile configuration */
 	rc = falcon_probe_nvconfig(efx);
 	if (rc)
@@ -2486,6 +2838,7 @@
 	return 0;
 
  fail5:
+	falcon_remove_spi_devices(efx);
 	falcon_free_buffer(efx, &efx->irq_status);
  fail4:
  fail3:
@@ -2573,19 +2926,14 @@
 	EFX_INVERT_OWORD(temp);
 	falcon_write(efx, &temp, FATAL_INTR_REG_KER);
 
-	/* Set number of RSS queues for receive path. */
-	falcon_read(efx, &temp, RX_FILTER_CTL_REG);
-	if (falcon_rev(efx) >= FALCON_REV_B0)
-		EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
-	else
-		EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
 	if (EFX_WORKAROUND_7244(efx)) {
+		falcon_read(efx, &temp, RX_FILTER_CTL_REG);
 		EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
 		EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
 		EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
 		EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
+		falcon_write(efx, &temp, RX_FILTER_CTL_REG);
 	}
-	falcon_write(efx, &temp, RX_FILTER_CTL_REG);
 
 	falcon_setup_rss_indir_table(efx);
 
@@ -2641,8 +2989,8 @@
 		  rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
 	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
 	/* RX control FIFO thresholds [32 entries] */
-	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 25);
-	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 20);
+	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
+	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
 	falcon_write(efx, &temp, RX_CFG_REG_KER);
 
 	/* Set destination of both TX and RX Flush events */
@@ -2662,6 +3010,7 @@
 	rc = i2c_del_adapter(&efx->i2c_adap);
 	BUG_ON(rc);
 
+	falcon_remove_spi_devices(efx);
 	falcon_free_buffer(efx, &efx->irq_status);
 
 	falcon_reset_hw(efx, RESET_TYPE_ALL);
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 492f9bc..be025ba 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -40,24 +40,24 @@
 
 /* TX data path */
 extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
-extern int falcon_init_tx(struct efx_tx_queue *tx_queue);
+extern void falcon_init_tx(struct efx_tx_queue *tx_queue);
 extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
 extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
 extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
 
 /* RX data path */
 extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
-extern int falcon_init_rx(struct efx_rx_queue *rx_queue);
+extern void falcon_init_rx(struct efx_rx_queue *rx_queue);
 extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
 extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
 extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
 
 /* Event data path */
 extern int falcon_probe_eventq(struct efx_channel *channel);
-extern int falcon_init_eventq(struct efx_channel *channel);
+extern void falcon_init_eventq(struct efx_channel *channel);
 extern void falcon_fini_eventq(struct efx_channel *channel);
 extern void falcon_remove_eventq(struct efx_channel *channel);
-extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota);
+extern int falcon_process_eventq(struct efx_channel *channel, int rx_quota);
 extern void falcon_eventq_read_ack(struct efx_channel *channel);
 
 /* Ports */
@@ -65,7 +65,7 @@
 extern void falcon_remove_port(struct efx_nic *efx);
 
 /* MAC/PHY */
-extern int falcon_xaui_link_ok(struct efx_nic *efx);
+extern bool falcon_xaui_link_ok(struct efx_nic *efx);
 extern int falcon_dma_stats(struct efx_nic *efx,
 			    unsigned int done_offset);
 extern void falcon_drain_tx_fifo(struct efx_nic *efx);
@@ -86,6 +86,7 @@
 extern int falcon_probe_nic(struct efx_nic *efx);
 extern int falcon_probe_resources(struct efx_nic *efx);
 extern int falcon_init_nic(struct efx_nic *efx);
+extern int falcon_flush_queues(struct efx_nic *efx);
 extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
 extern void falcon_remove_resources(struct efx_nic *efx);
 extern void falcon_remove_nic(struct efx_nic *efx);
@@ -93,6 +94,12 @@
 extern void falcon_set_multicast_hash(struct efx_nic *efx);
 extern int falcon_reset_xaui(struct efx_nic *efx);
 
+/* Tests */
+struct falcon_nvconfig;
+extern int falcon_read_nvram(struct efx_nic *efx,
+			     struct falcon_nvconfig *nvconfig);
+extern int falcon_test_registers(struct efx_nic *efx);
+
 /**************************************************************************
  *
  * Falcon MAC stats
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index 6d00311..5d584b0 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -92,6 +92,17 @@
 /* SPI host data register */
 #define EE_SPI_HDATA_REG_KER 0x0120
 
+/* SPI/VPD config register */
+#define EE_VPD_CFG_REG_KER 0x0140
+#define EE_VPD_EN_LBN 0
+#define EE_VPD_EN_WIDTH 1
+#define EE_VPD_EN_AD9_MODE_LBN 1
+#define EE_VPD_EN_AD9_MODE_WIDTH 1
+#define EE_EE_CLOCK_DIV_LBN 112
+#define EE_EE_CLOCK_DIV_WIDTH 7
+#define EE_SF_CLOCK_DIV_LBN 120
+#define EE_SF_CLOCK_DIV_WIDTH 7
+
 /* PCIE CORE ACCESS REG */
 #define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
 #define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
@@ -106,7 +117,6 @@
 #define SF_PRST_WIDTH 1
 #define EE_PRST_LBN 8
 #define EE_PRST_WIDTH 1
-/* See pic_mode_t for decoding of this field */
 /* These bit definitions are extrapolated from the list of numerical
  * values for STRAP_PINS.
  */
@@ -115,6 +125,9 @@
 #define STRAP_PCIE_LBN 0
 #define STRAP_PCIE_WIDTH 1
 
+#define BOOTED_USING_NVDEVICE_LBN 3
+#define BOOTED_USING_NVDEVICE_WIDTH 1
+
 /* GPIO control register */
 #define GPIO_CTL_REG_KER 0x0210
 #define GPIO_OUTPUTS_LBN   (16)
@@ -479,18 +492,8 @@
 #define MAC_MCAST_HASH_REG0_KER 0xca0
 #define MAC_MCAST_HASH_REG1_KER 0xcb0
 
-/* GMAC registers */
-#define FALCON_GMAC_REGBANK 0xe00
-#define FALCON_GMAC_REGBANK_SIZE 0x200
-#define FALCON_GMAC_REG_SIZE 0x10
-
-/* XMAC registers */
-#define FALCON_XMAC_REGBANK 0x1200
-#define FALCON_XMAC_REGBANK_SIZE 0x200
-#define FALCON_XMAC_REG_SIZE 0x10
-
 /* XGMAC address register low */
-#define XM_ADR_LO_REG_MAC 0x00
+#define XM_ADR_LO_REG 0x1200
 #define XM_ADR_3_LBN 24
 #define XM_ADR_3_WIDTH 8
 #define XM_ADR_2_LBN 16
@@ -501,14 +504,14 @@
 #define XM_ADR_0_WIDTH 8
 
 /* XGMAC address register high */
-#define XM_ADR_HI_REG_MAC 0x01
+#define XM_ADR_HI_REG 0x1210
 #define XM_ADR_5_LBN 8
 #define XM_ADR_5_WIDTH 8
 #define XM_ADR_4_LBN 0
 #define XM_ADR_4_WIDTH 8
 
 /* XGMAC global configuration */
-#define XM_GLB_CFG_REG_MAC 0x02
+#define XM_GLB_CFG_REG 0x1220
 #define XM_RX_STAT_EN_LBN 11
 #define XM_RX_STAT_EN_WIDTH 1
 #define XM_TX_STAT_EN_LBN 10
@@ -521,7 +524,7 @@
 #define XM_CORE_RST_WIDTH 1
 
 /* XGMAC transmit configuration */
-#define XM_TX_CFG_REG_MAC 0x03
+#define XM_TX_CFG_REG 0x1230
 #define XM_IPG_LBN 16
 #define XM_IPG_WIDTH 4
 #define XM_FCNTL_LBN 10
@@ -536,7 +539,7 @@
 #define XM_TXEN_WIDTH 1
 
 /* XGMAC receive configuration */
-#define XM_RX_CFG_REG_MAC 0x04
+#define XM_RX_CFG_REG 0x1240
 #define XM_PASS_CRC_ERR_LBN 25
 #define XM_PASS_CRC_ERR_WIDTH 1
 #define XM_ACPT_ALL_MCAST_LBN 11
@@ -549,7 +552,7 @@
 #define XM_RXEN_WIDTH 1
 
 /* XGMAC management interrupt mask register */
-#define XM_MGT_INT_MSK_REG_MAC_B0 0x5
+#define XM_MGT_INT_MSK_REG_B0 0x1250
 #define XM_MSK_PRMBLE_ERR_LBN 2
 #define XM_MSK_PRMBLE_ERR_WIDTH 1
 #define XM_MSK_RMTFLT_LBN 1
@@ -558,29 +561,29 @@
 #define XM_MSK_LCLFLT_WIDTH 1
 
 /* XGMAC flow control register */
-#define XM_FC_REG_MAC 0x7
+#define XM_FC_REG 0x1270
 #define XM_PAUSE_TIME_LBN 16
 #define XM_PAUSE_TIME_WIDTH 16
 #define XM_DIS_FCNTL_LBN 0
 #define XM_DIS_FCNTL_WIDTH 1
 
 /* XGMAC pause time count register */
-#define XM_PAUSE_TIME_REG_MAC 0x9
+#define XM_PAUSE_TIME_REG 0x1290
 
 /* XGMAC transmit parameter register */
-#define XM_TX_PARAM_REG_MAC 0x0d
+#define XM_TX_PARAM_REG 0x012d0
 #define XM_TX_JUMBO_MODE_LBN 31
 #define XM_TX_JUMBO_MODE_WIDTH 1
 #define XM_MAX_TX_FRM_SIZE_LBN 16
 #define XM_MAX_TX_FRM_SIZE_WIDTH 14
 
 /* XGMAC receive parameter register */
-#define XM_RX_PARAM_REG_MAC 0x0e
+#define XM_RX_PARAM_REG 0x12e0
 #define XM_MAX_RX_FRM_SIZE_LBN 0
 #define XM_MAX_RX_FRM_SIZE_WIDTH 14
 
 /* XGMAC management interrupt status register */
-#define XM_MGT_INT_REG_MAC_B0 0x0f
+#define XM_MGT_INT_REG_B0 0x12f0
 #define XM_PRMBLE_ERR 2
 #define XM_PRMBLE_WIDTH 1
 #define XM_RMTFLT_LBN 1
@@ -589,7 +592,7 @@
 #define XM_LCLFLT_WIDTH 1
 
 /* XGXS/XAUI powerdown/reset register */
-#define XX_PWR_RST_REG_MAC 0x10
+#define XX_PWR_RST_REG 0x1300
 
 #define XX_PWRDND_EN_LBN 15
 #define XX_PWRDND_EN_WIDTH 1
@@ -619,7 +622,7 @@
 #define XX_RST_XX_EN_WIDTH 1
 
 /* XGXS/XAUI powerdown/reset control register */
-#define XX_SD_CTL_REG_MAC 0x11
+#define XX_SD_CTL_REG 0x1310
 #define XX_HIDRVD_LBN 15
 #define XX_HIDRVD_WIDTH 1
 #define XX_LODRVD_LBN 14
@@ -645,7 +648,7 @@
 #define XX_LPBKA_LBN 0
 #define XX_LPBKA_WIDTH 1
 
-#define XX_TXDRV_CTL_REG_MAC 0x12
+#define XX_TXDRV_CTL_REG 0x1320
 #define XX_DEQD_LBN 28
 #define XX_DEQD_WIDTH 4
 #define XX_DEQC_LBN 24
@@ -664,7 +667,7 @@
 #define XX_DTXA_WIDTH 4
 
 /* XAUI XGXS core status register */
-#define XX_CORE_STAT_REG_MAC 0x16
+#define XX_CORE_STAT_REG 0x1360
 #define XX_FORCE_SIG_LBN 24
 #define XX_FORCE_SIG_WIDTH 8
 #define XX_FORCE_SIG_DECODE_FORCED 0xff
@@ -1127,7 +1130,28 @@
 	__le16 board_revision;
 } __packed;
 
-#define NVCONFIG_BASE 0x300
+/* Board configuration v3 extra information */
+struct falcon_nvconfig_board_v3 {
+	__le32 spi_device_type[2];
+} __packed;
+
+/* Bit numbers for spi_device_type */
+#define SPI_DEV_TYPE_SIZE_LBN 0
+#define SPI_DEV_TYPE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
+#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
+#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
+#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
+#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
+#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
+#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_FIELD(type, field)					\
+	(((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
+
+#define NVCONFIG_OFFSET 0x300
+#define NVCONFIG_END 0x400
+
 #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
 struct falcon_nvconfig {
 	efx_oword_t ee_vpd_cfg_reg;			/* 0x300 */
@@ -1144,6 +1168,8 @@
 	__le16 board_struct_ver;
 	__le16 board_checksum;
 	struct falcon_nvconfig_board_v2 board_v2;
+	efx_oword_t ee_base_page_reg;			/* 0x3B0 */
+	struct falcon_nvconfig_board_v3 board_v3;
 } __packed;
 
 #endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
index 6670cdf..c16da31 100644
--- a/drivers/net/sfc/falcon_io.h
+++ b/drivers/net/sfc/falcon_io.h
@@ -13,7 +13,6 @@
 
 #include <linux/io.h>
 #include <linux/spinlock.h>
-#include "net_driver.h"
 
 /**************************************************************************
  *
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 55c0d97..d401231 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -23,56 +23,24 @@
 
 /**************************************************************************
  *
- * MAC register access
- *
- **************************************************************************/
-
-/* Offset of an XMAC register within Falcon */
-#define FALCON_XMAC_REG(mac_reg)					\
-	(FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
-
-void falcon_xmac_writel(struct efx_nic *efx,
-			 efx_dword_t *value, unsigned int mac_reg)
-{
-	efx_oword_t temp;
-
-	EFX_POPULATE_OWORD_1(temp, MAC_DATA, EFX_DWORD_FIELD(*value, MAC_DATA));
-	falcon_write(efx, &temp, FALCON_XMAC_REG(mac_reg));
-}
-
-void falcon_xmac_readl(struct efx_nic *efx,
-		       efx_dword_t *value, unsigned int mac_reg)
-{
-	efx_oword_t temp;
-
-	falcon_read(efx, &temp, FALCON_XMAC_REG(mac_reg));
-	EFX_POPULATE_DWORD_1(*value, MAC_DATA, EFX_OWORD_FIELD(temp, MAC_DATA));
-}
-
-/**************************************************************************
- *
  * MAC operations
  *
  *************************************************************************/
 static int falcon_reset_xmac(struct efx_nic *efx)
 {
-	efx_dword_t reg;
+	efx_oword_t reg;
 	int count;
 
-	EFX_POPULATE_DWORD_1(reg, XM_CORE_RST, 1);
-	falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
+	EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
+	falcon_write(efx, &reg, XM_GLB_CFG_REG);
 
 	for (count = 0; count < 10000; count++) {	/* wait upto 100ms */
-		falcon_xmac_readl(efx, &reg, XM_GLB_CFG_REG_MAC);
-		if (EFX_DWORD_FIELD(reg, XM_CORE_RST) == 0)
+		falcon_read(efx, &reg, XM_GLB_CFG_REG);
+		if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
 			return 0;
 		udelay(10);
 	}
 
-	/* This often fails when DSP is disabled, ignore it */
-	if (sfe4001_phy_flash_cfg != 0)
-		return 0;
-
 	EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
 	return -ETIMEDOUT;
 }
@@ -80,25 +48,25 @@
 /* Configure the XAUI driver that is an output from Falcon */
 static void falcon_setup_xaui(struct efx_nic *efx)
 {
-	efx_dword_t sdctl, txdrv;
+	efx_oword_t sdctl, txdrv;
 
 	/* Move the XAUI into low power, unless there is no PHY, in
 	 * which case the XAUI will have to drive a cable. */
 	if (efx->phy_type == PHY_TYPE_NONE)
 		return;
 
-	falcon_xmac_readl(efx, &sdctl, XX_SD_CTL_REG_MAC);
-	EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_DWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_DWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_DWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_DWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
-	falcon_xmac_writel(efx, &sdctl, XX_SD_CTL_REG_MAC);
+	falcon_read(efx, &sdctl, XX_SD_CTL_REG);
+	EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
+	EFX_SET_OWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
+	EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
+	EFX_SET_OWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
+	EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
+	EFX_SET_OWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
+	EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
+	EFX_SET_OWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
+	falcon_write(efx, &sdctl, XX_SD_CTL_REG);
 
-	EFX_POPULATE_DWORD_8(txdrv,
+	EFX_POPULATE_OWORD_8(txdrv,
 			     XX_DEQD, XX_TXDRV_DEQ_DEFAULT,
 			     XX_DEQC, XX_TXDRV_DEQ_DEFAULT,
 			     XX_DEQB, XX_TXDRV_DEQ_DEFAULT,
@@ -107,93 +75,21 @@
 			     XX_DTXC, XX_TXDRV_DTX_DEFAULT,
 			     XX_DTXB, XX_TXDRV_DTX_DEFAULT,
 			     XX_DTXA, XX_TXDRV_DTX_DEFAULT);
-	falcon_xmac_writel(efx, &txdrv, XX_TXDRV_CTL_REG_MAC);
+	falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG);
 }
 
-static void falcon_hold_xaui_in_rst(struct efx_nic *efx)
+int falcon_reset_xaui(struct efx_nic *efx)
 {
-	efx_dword_t reg;
-
-	EFX_ZERO_DWORD(reg);
-	EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 1);
-	EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 1);
-	EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 1);
-	EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 1);
-	EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 1);
-	EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 1);
-	EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 1);
-	EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 1);
-	EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 1);
-	EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 1);
-	EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
-	EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
-	falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
-	udelay(10);
-}
-
-static int _falcon_reset_xaui_a(struct efx_nic *efx)
-{
-	efx_dword_t reg;
-
-	falcon_hold_xaui_in_rst(efx);
-	falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
-
-	/* Follow the RAMBUS XAUI data reset sequencing
-	 * Channels A and B first: power down, reset PLL, reset, clear
-	 */
-	EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 0);
-	EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 0);
-	falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
-	udelay(10);
-
-	EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 0);
-	falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
-	udelay(10);
-
-	EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 0);
-	EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 0);
-	falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
-	udelay(10);
-
-	/* Channels C and D: power down, reset PLL, reset, clear */
-	EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 0);
-	EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 0);
-	falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
-	udelay(10);
-
-	EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 0);
-	falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
-	udelay(10);
-
-	EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 0);
-	EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 0);
-	falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
-	udelay(10);
-
-	/* Setup XAUI */
-	falcon_setup_xaui(efx);
-	udelay(10);
-
-	/* Take XGXS out of reset */
-	EFX_ZERO_DWORD(reg);
-	falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
-	udelay(10);
-
-	return 0;
-}
-
-static int _falcon_reset_xaui_b(struct efx_nic *efx)
-{
-	efx_dword_t reg;
+	efx_oword_t reg;
 	int count;
 
 	EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
-	falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+	falcon_write(efx, &reg, XX_PWR_RST_REG);
 
 	/* Give some time for the link to establish */
 	for (count = 0; count < 1000; count++) { /* wait upto 10ms */
-		falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
-		if (EFX_DWORD_FIELD(reg, XX_RST_XX_EN) == 0) {
+		falcon_read(efx, &reg, XX_PWR_RST_REG);
+		if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0) {
 			falcon_setup_xaui(efx);
 			return 0;
 		}
@@ -203,55 +99,41 @@
 	return -ETIMEDOUT;
 }
 
-int falcon_reset_xaui(struct efx_nic *efx)
+static bool falcon_xgmii_status(struct efx_nic *efx)
 {
-	int rc;
-
-	if (EFX_WORKAROUND_9388(efx)) {
-		falcon_hold_xaui_in_rst(efx);
-		efx->phy_op->reset_xaui(efx);
-		rc = _falcon_reset_xaui_a(efx);
-	} else {
-		rc = _falcon_reset_xaui_b(efx);
-	}
-	return rc;
-}
-
-static int falcon_xgmii_status(struct efx_nic *efx)
-{
-	efx_dword_t reg;
+	efx_oword_t reg;
 
 	if (falcon_rev(efx) < FALCON_REV_B0)
-		return 1;
+		return true;
 
 	/* The ISR latches, so clear it and re-read */
-	falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
-	falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
+	falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
+	falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
 
-	if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
-	    EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
+	if (EFX_OWORD_FIELD(reg, XM_LCLFLT) ||
+	    EFX_OWORD_FIELD(reg, XM_RMTFLT)) {
 		EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
-		return 0;
+		return false;
 	}
 
-	return 1;
+	return true;
 }
 
-static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
+static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
 {
-	efx_dword_t reg;
+	efx_oword_t reg;
 
 	if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
 		return;
 
 	/* Flush the ISR */
 	if (enable)
-		falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
+		falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
 
-	EFX_POPULATE_DWORD_2(reg,
+	EFX_POPULATE_OWORD_2(reg,
 			     XM_MSK_RMTFLT, !enable,
 			     XM_MSK_LCLFLT, !enable);
-	falcon_xmac_writel(efx, &reg, XM_MGT_INT_MSK_REG_MAC_B0);
+	falcon_write(efx, &reg, XM_MGT_INT_MSK_REG_B0);
 }
 
 int falcon_init_xmac(struct efx_nic *efx)
@@ -274,7 +156,7 @@
 	if (rc)
 		goto fail2;
 
-	falcon_mask_status_intr(efx, 1);
+	falcon_mask_status_intr(efx, true);
 	return 0;
 
  fail2:
@@ -283,34 +165,34 @@
 	return rc;
 }
 
-int falcon_xaui_link_ok(struct efx_nic *efx)
+bool falcon_xaui_link_ok(struct efx_nic *efx)
 {
-	efx_dword_t reg;
-	int align_done, sync_status, link_ok = 0;
+	efx_oword_t reg;
+	bool align_done, link_ok = false;
+	int sync_status;
 
 	if (LOOPBACK_INTERNAL(efx))
-		return 1;
+		return true;
 
 	/* Read link status */
-	falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
+	falcon_read(efx, &reg, XX_CORE_STAT_REG);
 
-	align_done = EFX_DWORD_FIELD(reg, XX_ALIGN_DONE);
-	sync_status = EFX_DWORD_FIELD(reg, XX_SYNC_STAT);
+	align_done = EFX_OWORD_FIELD(reg, XX_ALIGN_DONE);
+	sync_status = EFX_OWORD_FIELD(reg, XX_SYNC_STAT);
 	if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED))
-		link_ok = 1;
+		link_ok = true;
 
 	/* Clear link status ready for next read */
-	EFX_SET_DWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
-	EFX_SET_DWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
-	EFX_SET_DWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
-	falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
+	EFX_SET_OWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
+	EFX_SET_OWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
+	EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
+	falcon_write(efx, &reg, XX_CORE_STAT_REG);
 
 	/* If the link is up, then check the phy side of the xaui link
 	 * (error conditions from the wire side propoagate back through
 	 * the phy to the xaui side). */
 	if (efx->link_up && link_ok) {
-		int has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS);
-		if (has_phyxs)
+		if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS))
 			link_ok = mdio_clause45_phyxgxs_lane_sync(efx);
 	}
 
@@ -325,15 +207,15 @@
 static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
 {
 	unsigned int max_frame_len;
-	efx_dword_t reg;
-	int rx_fc = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
+	efx_oword_t reg;
+	bool rx_fc = !!(efx->flow_control & EFX_FC_RX);
 
 	/* Configure MAC  - cut-thru mode is hard wired on */
 	EFX_POPULATE_DWORD_3(reg,
 			     XM_RX_JUMBO_MODE, 1,
 			     XM_TX_STAT_EN, 1,
 			     XM_RX_STAT_EN, 1);
-	falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
+	falcon_write(efx, &reg, XM_GLB_CFG_REG);
 
 	/* Configure TX */
 	EFX_POPULATE_DWORD_6(reg,
@@ -343,7 +225,7 @@
 			     XM_TXCRC, 1,
 			     XM_FCNTL, 1,
 			     XM_IPG, 0x3);
-	falcon_xmac_writel(efx, &reg, XM_TX_CFG_REG_MAC);
+	falcon_write(efx, &reg, XM_TX_CFG_REG);
 
 	/* Configure RX */
 	EFX_POPULATE_DWORD_5(reg,
@@ -352,21 +234,21 @@
 			     XM_ACPT_ALL_MCAST, 1,
 			     XM_ACPT_ALL_UCAST, efx->promiscuous,
 			     XM_PASS_CRC_ERR, 1);
-	falcon_xmac_writel(efx, &reg, XM_RX_CFG_REG_MAC);
+	falcon_write(efx, &reg, XM_RX_CFG_REG);
 
 	/* Set frame length */
 	max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
 	EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len);
-	falcon_xmac_writel(efx, &reg, XM_RX_PARAM_REG_MAC);
+	falcon_write(efx, &reg, XM_RX_PARAM_REG);
 	EFX_POPULATE_DWORD_2(reg,
 			     XM_MAX_TX_FRM_SIZE, max_frame_len,
 			     XM_TX_JUMBO_MODE, 1);
-	falcon_xmac_writel(efx, &reg, XM_TX_PARAM_REG_MAC);
+	falcon_write(efx, &reg, XM_TX_PARAM_REG);
 
 	EFX_POPULATE_DWORD_2(reg,
 			     XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
-			     XM_DIS_FCNTL, rx_fc ? 0 : 1);
-	falcon_xmac_writel(efx, &reg, XM_FC_REG_MAC);
+			     XM_DIS_FCNTL, !rx_fc);
+	falcon_write(efx, &reg, XM_FC_REG);
 
 	/* Set MAC address */
 	EFX_POPULATE_DWORD_4(reg,
@@ -374,83 +256,75 @@
 			     XM_ADR_1, efx->net_dev->dev_addr[1],
 			     XM_ADR_2, efx->net_dev->dev_addr[2],
 			     XM_ADR_3, efx->net_dev->dev_addr[3]);
-	falcon_xmac_writel(efx, &reg, XM_ADR_LO_REG_MAC);
+	falcon_write(efx, &reg, XM_ADR_LO_REG);
 	EFX_POPULATE_DWORD_2(reg,
 			     XM_ADR_4, efx->net_dev->dev_addr[4],
 			     XM_ADR_5, efx->net_dev->dev_addr[5]);
-	falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC);
+	falcon_write(efx, &reg, XM_ADR_HI_REG);
 }
 
 static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
 {
-	efx_dword_t reg;
-	int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0;
-	int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0;
-	int xgmii_loopback =
-		(efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
+	efx_oword_t reg;
+	bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
+	bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
+	bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
 
 	/* XGXS block is flaky and will need to be reset if moving
 	 * into our out of XGMII, XGXS or XAUI loopbacks. */
 	if (EFX_WORKAROUND_5147(efx)) {
-		int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
-		int reset_xgxs;
+		bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
+		bool reset_xgxs;
 
-		falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
-		old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN);
-		old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN);
+		falcon_read(efx, &reg, XX_CORE_STAT_REG);
+		old_xgxs_loopback = EFX_OWORD_FIELD(reg, XX_XGXS_LB_EN);
+		old_xgmii_loopback = EFX_OWORD_FIELD(reg, XX_XGMII_LB_EN);
 
-		falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
-		old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA);
+		falcon_read(efx, &reg, XX_SD_CTL_REG);
+		old_xaui_loopback = EFX_OWORD_FIELD(reg, XX_LPBKA);
 
 		/* The PHY driver may have turned XAUI off */
 		reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
 			      (xaui_loopback != old_xaui_loopback) ||
 			      (xgmii_loopback != old_xgmii_loopback));
-		if (reset_xgxs) {
-			falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
-			EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
-			EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
-			falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
-			udelay(1);
-			EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0);
-			EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0);
-			falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
-			udelay(1);
-		}
+
+		if (reset_xgxs)
+			falcon_reset_xaui(efx);
 	}
 
-	falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
-	EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG,
+	falcon_read(efx, &reg, XX_CORE_STAT_REG);
+	EFX_SET_OWORD_FIELD(reg, XX_FORCE_SIG,
 			    (xgxs_loopback || xaui_loopback) ?
 			    XX_FORCE_SIG_DECODE_FORCED : 0);
-	EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
-	EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
-	falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
+	EFX_SET_OWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
+	EFX_SET_OWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
+	falcon_write(efx, &reg, XX_CORE_STAT_REG);
 
-	falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
-	EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
-	EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
-	EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
-	EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
-	falcon_xmac_writel(efx, &reg, XX_SD_CTL_REG_MAC);
+	falcon_read(efx, &reg, XX_SD_CTL_REG);
+	EFX_SET_OWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
+	EFX_SET_OWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
+	EFX_SET_OWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
+	EFX_SET_OWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
+	falcon_write(efx, &reg, XX_SD_CTL_REG);
 }
 
 
 /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
  * to come back up. Bash it until it comes back up */
-static int falcon_check_xaui_link_up(struct efx_nic *efx)
+static bool falcon_check_xaui_link_up(struct efx_nic *efx)
 {
 	int max_tries, tries;
 	tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
 	max_tries = tries;
 
 	if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
-	    (efx->phy_type == PHY_TYPE_NONE))
-		return 0;
+	    (efx->phy_type == PHY_TYPE_NONE) ||
+	    efx_phy_mode_disabled(efx->phy_mode))
+		return false;
 
 	while (tries) {
 		if (falcon_xaui_link_ok(efx))
-			return 1;
+			return true;
 
 		EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
 			__func__, tries);
@@ -461,18 +335,22 @@
 
 	EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n",
 		max_tries);
-	return 0;
+	return false;
 }
 
 void falcon_reconfigure_xmac(struct efx_nic *efx)
 {
-	int xaui_link_ok;
+	bool xaui_link_ok;
 
-	falcon_mask_status_intr(efx, 0);
+	falcon_mask_status_intr(efx, false);
 
 	falcon_deconfigure_mac_wrapper(efx);
 
-	efx->tx_disabled = LOOPBACK_INTERNAL(efx);
+	/* Reconfigure the PHY, disabling transmit in mac level loopback. */
+	if (LOOPBACK_INTERNAL(efx))
+		efx->phy_mode |= PHY_MODE_TX_DISABLED;
+	else
+		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
 	efx->phy_op->reconfigure(efx);
 
 	falcon_reconfigure_xgxs_core(efx);
@@ -484,7 +362,7 @@
 	xaui_link_ok = falcon_check_xaui_link_up(efx);
 
 	if (xaui_link_ok && efx->link_up)
-		falcon_mask_status_intr(efx, 1);
+		falcon_mask_status_intr(efx, true);
 }
 
 void falcon_fini_xmac(struct efx_nic *efx)
@@ -554,21 +432,23 @@
 
 	/* Update derived statistics */
 	mac_stats->tx_good_bytes =
-		(mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
+		(mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
+		 mac_stats->tx_control * 64);
 	mac_stats->rx_bad_bytes =
-		(mac_stats->rx_bytes - mac_stats->rx_good_bytes);
+		(mac_stats->rx_bytes - mac_stats->rx_good_bytes -
+		 mac_stats->rx_control * 64);
 }
 
 int falcon_check_xmac(struct efx_nic *efx)
 {
-	unsigned xaui_link_ok;
+	bool xaui_link_ok;
 	int rc;
 
 	if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
-	    (efx->phy_type == PHY_TYPE_NONE))
+	    efx_phy_mode_disabled(efx->phy_mode))
 		return 0;
 
-	falcon_mask_status_intr(efx, 0);
+	falcon_mask_status_intr(efx, false);
 	xaui_link_ok = falcon_xaui_link_ok(efx);
 
 	if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
@@ -579,7 +459,7 @@
 
 	/* Unmask interrupt if everything was (and still is) ok */
 	if (xaui_link_ok && efx->link_up)
-		falcon_mask_status_intr(efx, 1);
+		falcon_mask_status_intr(efx, true);
 
 	return rc;
 }
@@ -620,7 +500,7 @@
 
 int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
 {
-	int reset;
+	bool reset;
 
 	if (flow_control & EFX_FC_AUTO) {
 		EFX_LOG(efx, "10G does not support flow control "
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index edd07d4..a31571c 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -13,10 +13,6 @@
 
 #include "net_driver.h"
 
-extern void falcon_xmac_writel(struct efx_nic *efx,
-			       efx_dword_t *value, unsigned int mac_reg);
-extern void falcon_xmac_readl(struct efx_nic *efx,
-			      efx_dword_t *value, unsigned int mac_reg);
 extern int falcon_init_xmac(struct efx_nic *efx);
 extern void falcon_reconfigure_xmac(struct efx_nic *efx);
 extern void falcon_update_stats_xmac(struct efx_nic *efx);
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index c4f540e..003e48d 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -159,20 +159,21 @@
 	return 0;
 }
 
-int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
+bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
 {
 	int phy_id = efx->mii.phy_id;
 	int status;
-	int ok = 1;
+	bool ok = true;
 	int mmd = 0;
-	int good;
 
 	/* If the port is in loopback, then we should only consider a subset
 	 * of mmd's */
 	if (LOOPBACK_INTERNAL(efx))
-		return 1;
+		return true;
 	else if (efx->loopback_mode == LOOPBACK_NETWORK)
-		return 0;
+		return false;
+	else if (efx_phy_mode_disabled(efx->phy_mode))
+		return false;
 	else if (efx->loopback_mode == LOOPBACK_PHYXS)
 		mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS |
 			      MDIO_MMDREG_DEVS0_PCS |
@@ -192,8 +193,7 @@
 			status = mdio_clause45_read(efx, phy_id,
 						    mmd, MDIO_MMDREG_STAT1);
 
-			good = status & (1 << MDIO_MMDREG_STAT1_LINK_LBN);
-			ok = ok && good;
+			ok = ok && (status & (1 << MDIO_MMDREG_STAT1_LINK_LBN));
 		}
 		mmd_mask = (mmd_mask >> 1);
 		mmd++;
@@ -208,7 +208,7 @@
 
 	ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
 					   MDIO_MMDREG_TXDIS);
-	if (efx->tx_disabled)
+	if (efx->phy_mode & PHY_MODE_TX_DISABLED)
 		ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
 	else
 		ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index cb99f3f..19c42ea 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -199,18 +199,19 @@
 	return (id_hi << 16) | (id_low);
 }
 
-static inline int mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx)
+static inline bool mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx)
 {
-	int i, sync, lane_status;
+	int i, lane_status;
+	bool sync;
 
 	for (i = 0; i < 2; ++i)
 		lane_status = mdio_clause45_read(efx, efx->mii.phy_id,
 						 MDIO_MMD_PHYXS,
 						 MDIO_PHYXS_LANE_STATE);
 
-	sync = (lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN)) != 0;
+	sync = !!(lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN));
 	if (!sync)
-		EFX_INFO(efx, "XGXS lane status: %x\n", lane_status);
+		EFX_LOG(efx, "XGXS lane status: %x\n", lane_status);
 	return sync;
 }
 
@@ -230,8 +231,8 @@
 			     unsigned int mmd_mask, unsigned int fatal_mask);
 
 /* Check the link status of specified mmds in bit mask */
-extern int mdio_clause45_links_ok(struct efx_nic *efx,
-				  unsigned int mmd_mask);
+extern bool mdio_clause45_links_ok(struct efx_nic *efx,
+				   unsigned int mmd_mask);
 
 /* Generic transmit disable support though PMAPMD */
 extern void mdio_clause45_transmit_disable(struct efx_nic *efx);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 219c74a..cdb11fa 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -88,9 +88,12 @@
  **************************************************************************/
 
 #define EFX_MAX_CHANNELS 32
-#define EFX_MAX_TX_QUEUES 1
 #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
 
+#define EFX_TX_QUEUE_OFFLOAD_CSUM	0
+#define EFX_TX_QUEUE_NO_CSUM		1
+#define EFX_TX_QUEUE_COUNT		2
+
 /**
  * struct efx_special_buffer - An Efx special buffer
  * @addr: CPU base address of the buffer
@@ -127,7 +130,6 @@
  *	This field is zero when the queue slot is empty.
  * @continuation: True if this fragment is not the end of a packet.
  * @unmap_single: True if pci_unmap_single should be used.
- * @unmap_addr: DMA address to unmap
  * @unmap_len: Length of this fragment to unmap
  */
 struct efx_tx_buffer {
@@ -135,9 +137,8 @@
 	struct efx_tso_header *tsoh;
 	dma_addr_t dma_addr;
 	unsigned short len;
-	unsigned char continuation;
-	unsigned char unmap_single;
-	dma_addr_t unmap_addr;
+	bool continuation;
+	bool unmap_single;
 	unsigned short unmap_len;
 };
 
@@ -156,13 +157,13 @@
  *
  * @efx: The associated Efx NIC
  * @queue: DMA queue number
- * @used: Queue is used by net driver
  * @channel: The associated channel
  * @buffer: The software buffer ring
  * @txd: The hardware descriptor ring
+ * @flushed: Used when handling queue flushing
  * @read_count: Current read pointer.
  *	This is the number of buffers that have been removed from both rings.
- * @stopped: Stopped flag.
+ * @stopped: Stopped count.
  *	Set if this TX queue is currently stopping its port.
  * @insert_count: Current insert pointer
  *	This is the number of buffers that have been added to the
@@ -188,11 +189,11 @@
 	/* Members which don't change on the fast path */
 	struct efx_nic *efx ____cacheline_aligned_in_smp;
 	int queue;
-	int used;
 	struct efx_channel *channel;
 	struct efx_nic *nic;
 	struct efx_tx_buffer *buffer;
 	struct efx_special_buffer txd;
+	bool flushed;
 
 	/* Members used mainly on the completion path */
 	unsigned int read_count ____cacheline_aligned_in_smp;
@@ -232,7 +233,6 @@
  * struct efx_rx_queue - An Efx RX queue
  * @efx: The associated Efx NIC
  * @queue: DMA queue number
- * @used: Queue is used by net driver
  * @channel: The associated channel
  * @buffer: The software buffer ring
  * @rxd: The hardware descriptor ring
@@ -262,11 +262,11 @@
  *	the remaining space in the allocation.
  * @buf_dma_addr: Page's DMA address.
  * @buf_data: Page's host address.
+ * @flushed: Use when handling queue flushing
  */
 struct efx_rx_queue {
 	struct efx_nic *efx;
 	int queue;
-	int used;
 	struct efx_channel *channel;
 	struct efx_rx_buffer *buffer;
 	struct efx_special_buffer rxd;
@@ -288,6 +288,7 @@
 	struct page *buf_page;
 	dma_addr_t buf_dma_addr;
 	char *buf_data;
+	bool flushed;
 };
 
 /**
@@ -325,12 +326,10 @@
  * queue.
  *
  * @efx: Associated Efx NIC
- * @evqnum: Event queue number
  * @channel: Channel instance number
  * @used_flags: Channel is used by net driver
  * @enabled: Channel enabled indicator
  * @irq: IRQ number (MSI and MSI-X only)
- * @has_interrupt: Channel has an interrupt
  * @irq_moderation: IRQ moderation value (in us)
  * @napi_dev: Net device used with NAPI
  * @napi_str: NAPI control structure
@@ -357,17 +356,14 @@
  */
 struct efx_channel {
 	struct efx_nic *efx;
-	int evqnum;
 	int channel;
 	int used_flags;
-	int enabled;
+	bool enabled;
 	int irq;
-	unsigned int has_interrupt;
 	unsigned int irq_moderation;
 	struct net_device *napi_dev;
 	struct napi_struct napi_str;
-	struct work_struct reset_work;
-	int work_pending;
+	bool work_pending;
 	struct efx_special_buffer eventq;
 	unsigned int eventq_read_ptr;
 	unsigned int last_eventq_read_ptr;
@@ -390,7 +386,7 @@
 	 * access with prefetches.
 	 */
 	struct efx_rx_buffer *rx_pkt;
-	int rx_pkt_csummed;
+	bool rx_pkt_csummed;
 
 };
 
@@ -403,8 +399,8 @@
  */
 struct efx_blinker {
 	int led_num;
-	int state;
-	int resubmit;
+	bool state;
+	bool resubmit;
 	struct timer_list timer;
 };
 
@@ -432,8 +428,8 @@
 	 * have a separate init callback that happens later than
 	 * board init. */
 	int (*init_leds)(struct efx_nic *efx);
-	void (*set_fault_led) (struct efx_nic *efx, int state);
-	void (*blink) (struct efx_nic *efx, int start);
+	void (*set_fault_led) (struct efx_nic *efx, bool state);
+	void (*blink) (struct efx_nic *efx, bool start);
 	void (*fini) (struct efx_nic *nic);
 	struct efx_blinker blinker;
 	struct i2c_client *hwmon_client, *ioexp_client;
@@ -467,8 +463,7 @@
 	STATE_INIT = 0,
 	STATE_RUNNING = 1,
 	STATE_FINI = 2,
-	STATE_RESETTING = 3, /* rtnl_lock always held */
-	STATE_DISABLED = 4,
+	STATE_DISABLED = 3,
 	STATE_MAX,
 };
 
@@ -479,7 +474,7 @@
  * This is the equivalent of NET_IP_ALIGN [which controls the alignment
  * of the skb->head for hardware DMA].
  */
-#if defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 #define EFX_PAGE_IP_ALIGN 0
 #else
 #define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
@@ -512,7 +507,6 @@
  * @clear_interrupt: Clear down interrupt
  * @blink: Blink LEDs
  * @check_hw: Check hardware
- * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
  * @mmds: MMD presence mask
  * @loopbacks: Supported loopback modes mask
  */
@@ -522,11 +516,28 @@
 	void (*reconfigure) (struct efx_nic *efx);
 	void (*clear_interrupt) (struct efx_nic *efx);
 	int (*check_hw) (struct efx_nic *efx);
-	void (*reset_xaui) (struct efx_nic *efx);
+	int (*test) (struct efx_nic *efx);
 	int mmds;
 	unsigned loopbacks;
 };
 
+/**
+ * @enum efx_phy_mode - PHY operating mode flags
+ * @PHY_MODE_NORMAL: on and should pass traffic
+ * @PHY_MODE_TX_DISABLED: on with TX disabled
+ * @PHY_MODE_SPECIAL: on but will not pass traffic
+ */
+enum efx_phy_mode {
+	PHY_MODE_NORMAL		= 0,
+	PHY_MODE_TX_DISABLED	= 1,
+	PHY_MODE_SPECIAL	= 8,
+};
+
+static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
+{
+	return !!(mode & ~PHY_MODE_TX_DISABLED);
+}
+
 /*
  * Efx extended statistics
  *
@@ -632,7 +643,7 @@
  * @tx_queue: TX DMA queues
  * @rx_queue: RX DMA queues
  * @channel: Channels
- * @rss_queues: Number of RSS queues
+ * @n_rx_queues: Number of RX queues
  * @rx_buffer_len: RX buffer length
  * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
  * @irq_status: Interrupt status buffer
@@ -640,15 +651,20 @@
  *	This register is written with the SMP processor ID whenever an
  *	interrupt is handled.  It is used by falcon_test_interrupt()
  *	to verify that an interrupt has occurred.
+ * @spi_flash: SPI flash device
+ *	This field will be %NULL if no flash device is present.
+ * @spi_eeprom: SPI EEPROM device
+ *	This field will be %NULL if no EEPROM device is present.
  * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
  * @nic_data: Hardware dependant state
- * @mac_lock: MAC access lock. Protects @port_enabled, efx_monitor() and
- *	efx_reconfigure_port()
+ * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
+ *	@port_inhibited, efx_monitor() and efx_reconfigure_port()
  * @port_enabled: Port enabled indicator.
  *	Serialises efx_stop_all(), efx_start_all() and efx_monitor() and
  *	efx_reconfigure_work with kernel interfaces. Safe to read under any
  *	one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
  *	be held to modify it.
+ * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
  * @port_initialized: Port initialized?
  * @net_dev: Operating system network device. Consider holding the rtnl lock
  * @rx_checksum_enabled: RX checksumming enabled
@@ -658,14 +674,16 @@
  *	can provide.  Generic code converts these into a standard
  *	&struct net_device_stats.
  * @stats_buffer: DMA buffer for statistics
- * @stats_lock: Statistics update lock
+ * @stats_lock: Statistics update lock. Serialises statistics fetches
+ * @stats_enabled: Temporarily disable statistics fetches.
+ *	Serialised by @stats_lock
  * @mac_address: Permanent MAC address
  * @phy_type: PHY type
  * @phy_lock: PHY access lock
  * @phy_op: PHY interface
  * @phy_data: PHY private data (including PHY-specific stats)
  * @mii: PHY interface
- * @tx_disabled: PHY transmitter turned off
+ * @phy_mode: PHY operating mode. Serialised by @mac_lock.
  * @link_up: Link status
  * @link_options: Link options (MII/GMII format)
  * @n_link_state_changes: Number of times the link has changed state
@@ -700,27 +718,31 @@
 	enum nic_state state;
 	enum reset_type reset_pending;
 
-	struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
+	struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT];
 	struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
 	struct efx_channel channel[EFX_MAX_CHANNELS];
 
-	int rss_queues;
+	int n_rx_queues;
 	unsigned int rx_buffer_len;
 	unsigned int rx_buffer_order;
 
 	struct efx_buffer irq_status;
 	volatile signed int last_irq_cpu;
 
+	struct efx_spi_device *spi_flash;
+	struct efx_spi_device *spi_eeprom;
+
 	unsigned n_rx_nodesc_drop_cnt;
 
 	struct falcon_nic_data *nic_data;
 
 	struct mutex mac_lock;
-	int port_enabled;
+	bool port_enabled;
+	bool port_inhibited;
 
-	int port_initialized;
+	bool port_initialized;
 	struct net_device *net_dev;
-	int rx_checksum_enabled;
+	bool rx_checksum_enabled;
 
 	atomic_t netif_stop_count;
 	spinlock_t netif_stop_lock;
@@ -728,6 +750,7 @@
 	struct efx_mac_stats mac_stats;
 	struct efx_buffer stats_buffer;
 	spinlock_t stats_lock;
+	bool stats_enabled;
 
 	unsigned char mac_address[ETH_ALEN];
 
@@ -736,13 +759,13 @@
 	struct efx_phy_operations *phy_op;
 	void *phy_data;
 	struct mii_if_info mii;
-	unsigned tx_disabled;
+	enum efx_phy_mode phy_mode;
 
-	int link_up;
+	bool link_up;
 	unsigned int link_options;
 	unsigned int n_link_state_changes;
 
-	int promiscuous;
+	bool promiscuous;
 	union efx_multicast_hash multicast_hash;
 	enum efx_fc_type flow_control;
 	struct work_struct reconfigure_work;
@@ -829,50 +852,33 @@
 			continue;					\
 		else
 
-/* Iterate over all used channels with interrupts */
-#define efx_for_each_channel_with_interrupt(_channel, _efx)		\
-	for (_channel = &_efx->channel[0];				\
-	     _channel < &_efx->channel[EFX_MAX_CHANNELS];		\
-	     _channel++)						\
-		if (!(_channel->used_flags && _channel->has_interrupt))	\
-			continue;					\
-		else
-
 /* Iterate over all used TX queues */
 #define efx_for_each_tx_queue(_tx_queue, _efx)				\
 	for (_tx_queue = &_efx->tx_queue[0];				\
-	     _tx_queue < &_efx->tx_queue[EFX_MAX_TX_QUEUES];		\
-	     _tx_queue++)						\
-		if (!_tx_queue->used)					\
-			continue;					\
-		else
+	     _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT];		\
+	     _tx_queue++)
 
 /* Iterate over all TX queues belonging to a channel */
 #define efx_for_each_channel_tx_queue(_tx_queue, _channel)		\
 	for (_tx_queue = &_channel->efx->tx_queue[0];			\
-	     _tx_queue < &_channel->efx->tx_queue[EFX_MAX_TX_QUEUES];	\
+	     _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT];	\
 	     _tx_queue++)						\
-		if ((!_tx_queue->used) ||				\
-		    (_tx_queue->channel != _channel))			\
+		if (_tx_queue->channel != _channel)			\
 			continue;					\
 		else
 
 /* Iterate over all used RX queues */
 #define efx_for_each_rx_queue(_rx_queue, _efx)				\
 	for (_rx_queue = &_efx->rx_queue[0];				\
-	     _rx_queue < &_efx->rx_queue[EFX_MAX_RX_QUEUES];		\
-	     _rx_queue++)						\
-		if (!_rx_queue->used)					\
-			continue;					\
-		else
+	     _rx_queue < &_efx->rx_queue[_efx->n_rx_queues];		\
+	     _rx_queue++)
 
 /* Iterate over all RX queues belonging to a channel */
 #define efx_for_each_channel_rx_queue(_rx_queue, _channel)		\
-	for (_rx_queue = &_channel->efx->rx_queue[0];			\
-	     _rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES];	\
-	     _rx_queue++)						\
-		if ((!_rx_queue->used) ||				\
-		    (_rx_queue->channel != _channel))			\
+	for (_rx_queue = &_channel->efx->rx_queue[_channel->channel];	\
+	     _rx_queue;							\
+	     _rx_queue = NULL)						\
+		if (_rx_queue->channel != _channel)			\
 			continue;					\
 		else
 
@@ -886,13 +892,13 @@
 }
 
 /* Set bit in a little-endian bitfield */
-static inline void set_bit_le(int nr, unsigned char *addr)
+static inline void set_bit_le(unsigned nr, unsigned char *addr)
 {
 	addr[nr / 8] |= (1 << (nr % 8));
 }
 
 /* Clear bit in a little-endian bitfield */
-static inline void clear_bit_le(int nr, unsigned char *addr)
+static inline void clear_bit_le(unsigned nr, unsigned char *addr)
 {
 	addr[nr / 8] &= ~(1 << (nr % 8));
 }
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 9d02c84..f746536 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -15,15 +15,7 @@
  */
 extern struct efx_phy_operations falcon_tenxpress_phy_ops;
 
-enum tenxpress_state {
-	TENXPRESS_STATUS_OFF = 0,
-	TENXPRESS_STATUS_OTEMP = 1,
-	TENXPRESS_STATUS_NORMAL = 2,
-};
-
-extern void tenxpress_set_state(struct efx_nic *efx,
-				enum tenxpress_state state);
-extern void tenxpress_phy_blink(struct efx_nic *efx, int blink);
+extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
 extern void tenxpress_crc_err(struct efx_nic *efx);
 
 /****************************************************************************
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 0d27dd3..0f805da 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -212,8 +212,8 @@
  * and populates a struct efx_rx_buffer with the relevant
  * information.  Return a negative error code or 0 on success.
  */
-static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
-					 struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
+				  struct efx_rx_buffer *rx_buf)
 {
 	struct efx_nic *efx = rx_queue->efx;
 	struct net_device *net_dev = efx->net_dev;
@@ -252,8 +252,8 @@
  * and populates a struct efx_rx_buffer with the relevant
  * information.  Return a negative error code or 0 on success.
  */
-static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
-					  struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
+				   struct efx_rx_buffer *rx_buf)
 {
 	struct efx_nic *efx = rx_queue->efx;
 	int bytes, space, offset;
@@ -319,8 +319,8 @@
  * and populates a struct efx_rx_buffer with the relevant
  * information.
  */
-static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
-				     struct efx_rx_buffer *new_rx_buf)
+static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
+			      struct efx_rx_buffer *new_rx_buf)
 {
 	int rc = 0;
 
@@ -340,8 +340,8 @@
 	return rc;
 }
 
-static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
-				       struct efx_rx_buffer *rx_buf)
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+				struct efx_rx_buffer *rx_buf)
 {
 	if (rx_buf->page) {
 		EFX_BUG_ON_PARANOID(rx_buf->skb);
@@ -357,8 +357,8 @@
 	}
 }
 
-static inline void efx_free_rx_buffer(struct efx_nic *efx,
-				      struct efx_rx_buffer *rx_buf)
+static void efx_free_rx_buffer(struct efx_nic *efx,
+			       struct efx_rx_buffer *rx_buf)
 {
 	if (rx_buf->page) {
 		__free_pages(rx_buf->page, efx->rx_buffer_order);
@@ -369,8 +369,8 @@
 	}
 }
 
-static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
-				      struct efx_rx_buffer *rx_buf)
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+			       struct efx_rx_buffer *rx_buf)
 {
 	efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
 	efx_free_rx_buffer(rx_queue->efx, rx_buf);
@@ -506,10 +506,10 @@
 		efx_schedule_slow_fill(rx_queue, 1);
 }
 
-static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
-					    struct efx_rx_buffer *rx_buf,
-					    int len, int *discard,
-					    int *leak_packet)
+static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
+				     struct efx_rx_buffer *rx_buf,
+				     int len, bool *discard,
+				     bool *leak_packet)
 {
 	struct efx_nic *efx = rx_queue->efx;
 	unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -520,7 +520,7 @@
 	/* The packet must be discarded, but this is only a fatal error
 	 * if the caller indicated it was
 	 */
-	*discard = 1;
+	*discard = true;
 
 	if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
 		EFX_ERR_RL(efx, " RX queue %d seriously overlength "
@@ -546,8 +546,8 @@
  * Handles driverlink veto, and passes the fragment up via
  * the appropriate LRO method
  */
-static inline void efx_rx_packet_lro(struct efx_channel *channel,
-				     struct efx_rx_buffer *rx_buf)
+static void efx_rx_packet_lro(struct efx_channel *channel,
+			      struct efx_rx_buffer *rx_buf)
 {
 	struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
 	void *priv = channel;
@@ -574,9 +574,9 @@
 }
 
 /* Allocate and construct an SKB around a struct page.*/
-static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
-					    struct efx_nic *efx,
-					    int hdr_len)
+static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
+				     struct efx_nic *efx,
+				     int hdr_len)
 {
 	struct sk_buff *skb;
 
@@ -621,11 +621,11 @@
 }
 
 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
-		   unsigned int len, int checksummed, int discard)
+		   unsigned int len, bool checksummed, bool discard)
 {
 	struct efx_nic *efx = rx_queue->efx;
 	struct efx_rx_buffer *rx_buf;
-	int leak_packet = 0;
+	bool leak_packet = false;
 
 	rx_buf = efx_rx_buffer(rx_queue, index);
 	EFX_BUG_ON_PARANOID(!rx_buf->data);
@@ -683,11 +683,11 @@
 
 /* Handle a received packet.  Second half: Touches packet payload. */
 void __efx_rx_packet(struct efx_channel *channel,
-		     struct efx_rx_buffer *rx_buf, int checksummed)
+		     struct efx_rx_buffer *rx_buf, bool checksummed)
 {
 	struct efx_nic *efx = channel->efx;
 	struct sk_buff *skb;
-	int lro = efx->net_dev->features & NETIF_F_LRO;
+	bool lro = !!(efx->net_dev->features & NETIF_F_LRO);
 
 	/* If we're in loopback test, then pass the packet directly to the
 	 * loopback layer, and free the rx_buf here
@@ -789,27 +789,18 @@
 	/* Allocate RX buffers */
 	rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
 	rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
-	if (!rx_queue->buffer) {
-		rc = -ENOMEM;
-		goto fail1;
-	}
+	if (!rx_queue->buffer)
+		return -ENOMEM;
 
 	rc = falcon_probe_rx(rx_queue);
-	if (rc)
-		goto fail2;
-
-	return 0;
-
- fail2:
-	kfree(rx_queue->buffer);
-	rx_queue->buffer = NULL;
- fail1:
-	rx_queue->used = 0;
-
+	if (rc) {
+		kfree(rx_queue->buffer);
+		rx_queue->buffer = NULL;
+	}
 	return rc;
 }
 
-int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 {
 	struct efx_nic *efx = rx_queue->efx;
 	unsigned int max_fill, trigger, limit;
@@ -833,7 +824,7 @@
 	rx_queue->fast_fill_limit = limit;
 
 	/* Set up RX descriptor ring */
-	return falcon_init_rx(rx_queue);
+	falcon_init_rx(rx_queue);
 }
 
 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
@@ -872,7 +863,6 @@
 
 	kfree(rx_queue->buffer);
 	rx_queue->buffer = NULL;
-	rx_queue->used = 0;
 }
 
 void efx_flush_lro(struct efx_channel *channel)
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
index f35e377..0e88a9d 100644
--- a/drivers/net/sfc/rx.h
+++ b/drivers/net/sfc/rx.h
@@ -14,7 +14,7 @@
 
 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-int efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
 
 int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
@@ -24,6 +24,6 @@
 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
 void efx_rx_work(struct work_struct *data);
 void __efx_rx_packet(struct efx_channel *channel,
-		     struct efx_rx_buffer *rx_buf, int checksummed);
+		     struct efx_rx_buffer *rx_buf, bool checksummed);
 
 #endif /* EFX_RX_H */
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 3b2de9f..362956e 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -27,6 +27,9 @@
 #include "boards.h"
 #include "workarounds.h"
 #include "mac.h"
+#include "spi.h"
+#include "falcon_io.h"
+#include "mdio_10g.h"
 
 /*
  * Loopback test packet structure
@@ -51,7 +54,7 @@
 	"Hello world! This is an Efx loopback test in progress!";
 
 /**
- * efx_selftest_state - persistent state during a selftest
+ * efx_loopback_state - persistent state during a loopback selftest
  * @flush:		Drop all packets in efx_loopback_rx_packet
  * @packet_count:	Number of packets being used in this test
  * @skbs:		An array of skbs transmitted
@@ -59,10 +62,14 @@
  * @rx_bad:		RX bad packet count
  * @payload:		Payload used in tests
  */
-struct efx_selftest_state {
-	int flush;
+struct efx_loopback_state {
+	bool flush;
 	int packet_count;
 	struct sk_buff **skbs;
+
+	/* Checksums are being offloaded */
+	bool offload_csum;
+
 	atomic_t rx_good;
 	atomic_t rx_bad;
 	struct efx_loopback_payload payload;
@@ -70,21 +77,65 @@
 
 /**************************************************************************
  *
- * Configurable values
+ * MII, NVRAM and register tests
  *
  **************************************************************************/
 
-/* Level of loopback testing
- *
- * The maximum packet burst length is 16**(n-1), i.e.
- *
- * - Level 0 : no packets
- * - Level 1 : 1 packet
- * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets)
- * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets)
- *
- */
-static unsigned int loopback_test_level = 3;
+static int efx_test_mii(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+	int rc = 0;
+	u16 physid1, physid2;
+	struct mii_if_info *mii = &efx->mii;
+	struct net_device *net_dev = efx->net_dev;
+
+	if (efx->phy_type == PHY_TYPE_NONE)
+		return 0;
+
+	mutex_lock(&efx->mac_lock);
+	tests->mii = -1;
+
+	physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
+	physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
+
+	if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
+	    (physid2 == 0x0000) || (physid2 == 0xffff)) {
+		EFX_ERR(efx, "no MII PHY present with ID %d\n",
+			mii->phy_id);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = mdio_clause45_check_mmds(efx, efx->phy_op->mmds, 0);
+	if (rc)
+		goto out;
+
+out:
+	mutex_unlock(&efx->mac_lock);
+	tests->mii = rc ? -1 : 1;
+	return rc;
+}
+
+static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+	int rc;
+
+	rc = falcon_read_nvram(efx, NULL);
+	tests->nvram = rc ? -1 : 1;
+	return rc;
+}
+
+static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+	int rc;
+
+	/* Not supported on A-series silicon */
+	if (falcon_rev(efx) < FALCON_REV_B0)
+		return 0;
+
+	rc = falcon_test_registers(efx);
+	tests->registers = rc ? -1 : 1;
+	return rc;
+}
 
 /**************************************************************************
  *
@@ -107,7 +158,7 @@
 
 	/* ACK each interrupting event queue. Receiving an interrupt due to
 	 * traffic before a test event is raised is considered a pass */
-	efx_for_each_channel_with_interrupt(channel, efx) {
+	efx_for_each_channel(channel, efx) {
 		if (channel->work_pending)
 			efx_process_channel_now(channel);
 		if (efx->last_irq_cpu >= 0)
@@ -132,41 +183,6 @@
 	return 0;
 }
 
-/* Test generation and receipt of non-interrupting events */
-static int efx_test_eventq(struct efx_channel *channel,
-			   struct efx_self_tests *tests)
-{
-	unsigned int magic;
-
-	/* Channel specific code, limited to 20 bits */
-	magic = (0x00010150 + channel->channel);
-	EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
-		channel->channel, magic);
-
-	tests->eventq_dma[channel->channel] = -1;
-	tests->eventq_int[channel->channel] = 1;	/* fake pass */
-	tests->eventq_poll[channel->channel] = 1;	/* fake pass */
-
-	/* Reset flag and zero magic word */
-	channel->efx->last_irq_cpu = -1;
-	channel->eventq_magic = 0;
-	smp_wmb();
-
-	falcon_generate_test_event(channel, magic);
-	udelay(1);
-
-	efx_process_channel_now(channel);
-	if (channel->eventq_magic != magic) {
-		EFX_ERR(channel->efx, "channel %d  failed to see test event\n",
-			channel->channel);
-		return -ETIMEDOUT;
-	} else {
-		tests->eventq_dma[channel->channel] = 1;
-	}
-
-	return 0;
-}
-
 /* Test generation and receipt of interrupting events */
 static int efx_test_eventq_irq(struct efx_channel *channel,
 			       struct efx_self_tests *tests)
@@ -230,39 +246,18 @@
 	return 0;
 }
 
-/**************************************************************************
- *
- * PHY testing
- *
- **************************************************************************/
-
-/* Check PHY presence by reading the PHY ID registers */
-static int efx_test_phy(struct efx_nic *efx,
-			struct efx_self_tests *tests)
+static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests)
 {
-	u16 physid1, physid2;
-	struct mii_if_info *mii = &efx->mii;
-	struct net_device *net_dev = efx->net_dev;
+	int rc;
 
-	if (efx->phy_type == PHY_TYPE_NONE)
+	if (!efx->phy_op->test)
 		return 0;
 
-	EFX_LOG(efx, "testing PHY presence\n");
-	tests->phy_ok = -1;
-
-	physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
-	physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
-
-	if ((physid1 != 0x0000) && (physid1 != 0xffff) &&
-	    (physid2 != 0x0000) && (physid2 != 0xffff)) {
-		EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n",
-			mii->phy_id, physid1, physid2);
-		tests->phy_ok = 1;
-		return 0;
-	}
-
-	EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id);
-	return -ENODEV;
+	mutex_lock(&efx->mac_lock);
+	rc = efx->phy_op->test(efx);
+	mutex_unlock(&efx->mac_lock);
+	tests->phy = rc ? -1 : 1;
+	return rc;
 }
 
 /**************************************************************************
@@ -278,7 +273,7 @@
 void efx_loopback_rx_packet(struct efx_nic *efx,
 			    const char *buf_ptr, int pkt_len)
 {
-	struct efx_selftest_state *state = efx->loopback_selftest;
+	struct efx_loopback_state *state = efx->loopback_selftest;
 	struct efx_loopback_payload *received;
 	struct efx_loopback_payload *payload;
 
@@ -289,11 +284,12 @@
 		return;
 
 	payload = &state->payload;
-	
+
 	received = (struct efx_loopback_payload *) buf_ptr;
 	received->ip.saddr = payload->ip.saddr;
-	received->ip.check = payload->ip.check;
-	
+	if (state->offload_csum)
+		received->ip.check = payload->ip.check;
+
 	/* Check that header exists */
 	if (pkt_len < sizeof(received->header)) {
 		EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
@@ -362,7 +358,7 @@
 /* Initialise an efx_selftest_state for a new iteration */
 static void efx_iterate_state(struct efx_nic *efx)
 {
-	struct efx_selftest_state *state = efx->loopback_selftest;
+	struct efx_loopback_state *state = efx->loopback_selftest;
 	struct net_device *net_dev = efx->net_dev;
 	struct efx_loopback_payload *payload = &state->payload;
 
@@ -395,17 +391,17 @@
 	smp_wmb();
 }
 
-static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
+static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
 {
 	struct efx_nic *efx = tx_queue->efx;
-	struct efx_selftest_state *state = efx->loopback_selftest;
+	struct efx_loopback_state *state = efx->loopback_selftest;
 	struct efx_loopback_payload *payload;
 	struct sk_buff *skb;
 	int i, rc;
 
 	/* Transmit N copies of buffer */
 	for (i = 0; i < state->packet_count; i++) {
-		/* Allocate an skb, holding an extra reference for 
+		/* Allocate an skb, holding an extra reference for
 		 * transmit completion counting */
 		skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
 		if (!skb)
@@ -444,11 +440,25 @@
 	return 0;
 }
 
-static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
-			   struct efx_loopback_self_tests *lb_tests)
+static int efx_poll_loopback(struct efx_nic *efx)
+{
+	struct efx_loopback_state *state = efx->loopback_selftest;
+	struct efx_channel *channel;
+
+	/* NAPI polling is not enabled, so process channels
+	 * synchronously */
+	efx_for_each_channel(channel, efx) {
+		if (channel->work_pending)
+			efx_process_channel_now(channel);
+	}
+	return atomic_read(&state->rx_good) == state->packet_count;
+}
+
+static int efx_end_loopback(struct efx_tx_queue *tx_queue,
+			    struct efx_loopback_self_tests *lb_tests)
 {
 	struct efx_nic *efx = tx_queue->efx;
-	struct efx_selftest_state *state = efx->loopback_selftest;
+	struct efx_loopback_state *state = efx->loopback_selftest;
 	struct sk_buff *skb;
 	int tx_done = 0, rx_good, rx_bad;
 	int i, rc = 0;
@@ -507,11 +517,10 @@
 		  struct efx_loopback_self_tests *lb_tests)
 {
 	struct efx_nic *efx = tx_queue->efx;
-	struct efx_selftest_state *state = efx->loopback_selftest;
-	struct efx_channel *channel;
-	int i, rc = 0;
+	struct efx_loopback_state *state = efx->loopback_selftest;
+	int i, begin_rc, end_rc;
 
-	for (i = 0; i < loopback_test_level; i++) {
+	for (i = 0; i < 3; i++) {
 		/* Determine how many packets to send */
 		state->packet_count = (efx->type->txd_ring_mask + 1) / 3;
 		state->packet_count = min(1 << (i << 2), state->packet_count);
@@ -519,30 +528,31 @@
 				      state->packet_count, GFP_KERNEL);
 		if (!state->skbs)
 			return -ENOMEM;
-		state->flush = 0;
+		state->flush = false;
 
 		EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
 			"packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
 			state->packet_count);
 
 		efx_iterate_state(efx);
-		rc = efx_tx_loopback(tx_queue);
-		
-		/* NAPI polling is not enabled, so process channels synchronously */
-		schedule_timeout_uninterruptible(HZ / 50);
-		efx_for_each_channel_with_interrupt(channel, efx) {
-			if (channel->work_pending)
-				efx_process_channel_now(channel);
+		begin_rc = efx_begin_loopback(tx_queue);
+
+		/* This will normally complete very quickly, but be
+		 * prepared to wait up to 100 ms. */
+		msleep(1);
+		if (!efx_poll_loopback(efx)) {
+			msleep(100);
+			efx_poll_loopback(efx);
 		}
 
-		rc |= efx_rx_loopback(tx_queue, lb_tests);
+		end_rc = efx_end_loopback(tx_queue, lb_tests);
 		kfree(state->skbs);
 
-		if (rc) {
+		if (begin_rc || end_rc) {
 			/* Wait a while to ensure there are no packets
 			 * floating around after a failure. */
 			schedule_timeout_uninterruptible(HZ / 10);
-			return rc;
+			return begin_rc ? begin_rc : end_rc;
 		}
 	}
 
@@ -550,49 +560,36 @@
 		"of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
 		state->packet_count);
 
-	return rc;
+	return 0;
 }
 
-static int efx_test_loopbacks(struct efx_nic *efx,
+static int efx_test_loopbacks(struct efx_nic *efx, struct ethtool_cmd ecmd,
 			      struct efx_self_tests *tests,
 			      unsigned int loopback_modes)
 {
-	struct efx_selftest_state *state = efx->loopback_selftest;
-	struct ethtool_cmd ecmd, ecmd_loopback;
+	enum efx_loopback_mode mode;
+	struct efx_loopback_state *state;
 	struct efx_tx_queue *tx_queue;
-	enum efx_loopback_mode old_mode, mode;
-	int count, rc = 0, link_up;
-	
-	rc = efx_ethtool_get_settings(efx->net_dev, &ecmd);
-	if (rc) {
-		EFX_ERR(efx, "could not get GMII settings\n");
-		return rc;
-	}
-	old_mode = efx->loopback_mode;
+	bool link_up;
+	int count, rc = 0;
 
-	/* Disable autonegotiation for the purposes of loopback */
-	memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback));
-	if (ecmd_loopback.autoneg == AUTONEG_ENABLE) {
-		ecmd_loopback.autoneg = AUTONEG_DISABLE;
-		ecmd_loopback.duplex = DUPLEX_FULL;
-		ecmd_loopback.speed = SPEED_10000;
-	}
-
-	rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback);
-	if (rc) {
-		EFX_ERR(efx, "could not disable autonegotiation\n");
-		goto out;
-	}
-	tests->loopback_speed = ecmd_loopback.speed;
-	tests->loopback_full_duplex = ecmd_loopback.duplex;
+	/* Set the port loopback_selftest member. From this point on
+	 * all received packets will be dropped. Mark the state as
+	 * "flushing" so all inflight packets are dropped */
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (state == NULL)
+		return -ENOMEM;
+	BUG_ON(efx->loopback_selftest);
+	state->flush = true;
+	efx->loopback_selftest = state;
 
 	/* Test all supported loopback modes */
-	for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
+	for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
 		if (!(loopback_modes & (1 << mode)))
 			continue;
 
 		/* Move the port into the specified loopback mode. */
-		state->flush = 1;
+		state->flush = true;
 		efx->loopback_mode = mode;
 		efx_reconfigure_port(efx);
 
@@ -616,7 +613,7 @@
 			 */
 			link_up = efx->link_up;
 			if (!falcon_xaui_link_ok(efx))
-				link_up = 0;
+				link_up = false;
 
 		} while ((++count < 20) && !link_up);
 
@@ -634,18 +631,21 @@
 
 		/* Test every TX queue */
 		efx_for_each_tx_queue(tx_queue, efx) {
-			rc |= efx_test_loopback(tx_queue,
-						&tests->loopback[mode]);
+			state->offload_csum = (tx_queue->queue ==
+					       EFX_TX_QUEUE_OFFLOAD_CSUM);
+			rc = efx_test_loopback(tx_queue,
+					       &tests->loopback[mode]);
 			if (rc)
 				goto out;
 		}
 	}
 
  out:
-	/* Take out of loopback and restore PHY settings */
-	state->flush = 1;
-	efx->loopback_mode = old_mode;
-	efx_ethtool_set_settings(efx->net_dev, &ecmd);
+	/* Remove the flush. The caller will remove the loopback setting */
+	state->flush = true;
+	efx->loopback_selftest = NULL;
+	wmb();
+	kfree(state);
 
 	return rc;
 }
@@ -661,23 +661,27 @@
 int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
 {
 	struct efx_channel *channel;
-	int rc = 0;
+	int rc, rc2 = 0;
 
-	EFX_LOG(efx, "performing online self-tests\n");
+	rc = efx_test_mii(efx, tests);
+	if (rc && !rc2)
+		rc2 = rc;
 
-	rc |= efx_test_interrupts(efx, tests);
+	rc = efx_test_nvram(efx, tests);
+	if (rc && !rc2)
+		rc2 = rc;
+
+	rc = efx_test_interrupts(efx, tests);
+	if (rc && !rc2)
+		rc2 = rc;
+
 	efx_for_each_channel(channel, efx) {
-		if (channel->has_interrupt)
-			rc |= efx_test_eventq_irq(channel, tests);
-		else
-			rc |= efx_test_eventq(channel, tests);
+		rc = efx_test_eventq_irq(channel, tests);
+		if (rc && !rc2)
+			rc2 = rc;
 	}
-	rc |= efx_test_phy(efx, tests);
 
-	if (rc)
-		EFX_ERR(efx, "failed online self-tests\n");
-
-	return rc;
+	return rc2;
 }
 
 /* Offline (i.e. disruptive) testing
@@ -685,35 +689,66 @@
 int efx_offline_test(struct efx_nic *efx,
 		     struct efx_self_tests *tests, unsigned int loopback_modes)
 {
-	struct efx_selftest_state *state;
-	int rc = 0;
+	enum efx_loopback_mode loopback_mode = efx->loopback_mode;
+	int phy_mode = efx->phy_mode;
+	struct ethtool_cmd ecmd, ecmd_test;
+	int rc, rc2 = 0;
 
-	EFX_LOG(efx, "performing offline self-tests\n");
+	/* force the carrier state off so the kernel doesn't transmit during
+	 * the loopback test, and the watchdog timeout doesn't fire. Also put
+	 * falcon into loopback for the register test.
+	 */
+	mutex_lock(&efx->mac_lock);
+	efx->port_inhibited = true;
+	if (efx->loopback_modes)
+		efx->loopback_mode = __ffs(efx->loopback_modes);
+	__efx_reconfigure_port(efx);
+	mutex_unlock(&efx->mac_lock);
 
-	/* Create a selftest_state structure to hold state for the test */
-	state = kzalloc(sizeof(*state), GFP_KERNEL);
-	if (state == NULL) {
-		rc = -ENOMEM;
-		goto out;
+	/* free up all consumers of SRAM (including all the queues) */
+	efx_reset_down(efx, &ecmd);
+
+	rc = efx_test_chip(efx, tests);
+	if (rc && !rc2)
+		rc2 = rc;
+
+	/* reset the chip to recover from the register test */
+	rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
+
+	/* Modify the saved ecmd so that when efx_reset_up() restores the phy
+	 * state, AN is disabled, and the phy is powered, and out of loopback */
+	memcpy(&ecmd_test, &ecmd, sizeof(ecmd_test));
+	if (ecmd_test.autoneg == AUTONEG_ENABLE) {
+		ecmd_test.autoneg = AUTONEG_DISABLE;
+		ecmd_test.duplex = DUPLEX_FULL;
+		ecmd_test.speed = SPEED_10000;
+	}
+	efx->loopback_mode = LOOPBACK_NONE;
+
+	rc = efx_reset_up(efx, &ecmd_test, rc == 0);
+	if (rc) {
+		EFX_ERR(efx, "Unable to recover from chip test\n");
+		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+		return rc;
 	}
 
-	/* Set the port loopback_selftest member. From this point on
-	 * all received packets will be dropped. Mark the state as
-	 * "flushing" so all inflight packets are dropped */
-	BUG_ON(efx->loopback_selftest);
-	state->flush = 1;
-	efx->loopback_selftest = state;
+	tests->loopback_speed = ecmd_test.speed;
+	tests->loopback_full_duplex = ecmd_test.duplex;
 
-	rc = efx_test_loopbacks(efx, tests, loopback_modes);
+	rc = efx_test_phy(efx, tests);
+	if (rc && !rc2)
+		rc2 = rc;
 
-	efx->loopback_selftest = NULL;
-	wmb();
-	kfree(state);
+	rc = efx_test_loopbacks(efx, ecmd_test, tests, loopback_modes);
+	if (rc && !rc2)
+		rc2 = rc;
 
- out:
-	if (rc)
-		EFX_ERR(efx, "failed offline self-tests\n");
+	/* restore the PHY to the previous state */
+	efx->loopback_mode = loopback_mode;
+	efx->phy_mode = phy_mode;
+	efx->port_inhibited = false;
+	efx_ethtool_set_settings(efx->net_dev, &ecmd);
 
-	return rc;
+	return rc2;
 }
 
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index f6999c2..fc15df1 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
  */
 
 struct efx_loopback_self_tests {
-	int tx_sent[EFX_MAX_TX_QUEUES];
-	int tx_done[EFX_MAX_TX_QUEUES];
+	int tx_sent[EFX_TX_QUEUE_COUNT];
+	int tx_done[EFX_TX_QUEUE_COUNT];
 	int rx_good;
 	int rx_bad;
 };
@@ -29,14 +29,19 @@
  * indicates failure.
  */
 struct efx_self_tests {
+	/* online tests */
+	int mii;
+	int nvram;
 	int interrupt;
 	int eventq_dma[EFX_MAX_CHANNELS];
 	int eventq_int[EFX_MAX_CHANNELS];
 	int eventq_poll[EFX_MAX_CHANNELS];
-	int phy_ok;
+	/* offline tests */
+	int registers;
+	int phy;
 	int loopback_speed;
 	int loopback_full_duplex;
-	struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX];
+	struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
 };
 
 extern void efx_loopback_rx_packet(struct efx_nic *efx,
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index b278495..fe4e3fd 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -13,11 +13,13 @@
  * the PHY
  */
 #include <linux/delay.h>
+#include "net_driver.h"
 #include "efx.h"
 #include "phy.h"
 #include "boards.h"
 #include "falcon.h"
 #include "falcon_hwdefs.h"
+#include "falcon_io.h"
 #include "mac.h"
 
 /**************************************************************************
@@ -120,65 +122,159 @@
 	i2c_smbus_read_byte_data(hwmon_client, RSL);
 }
 
+static int sfe4001_poweron(struct efx_nic *efx)
+{
+	struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
+	struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
+	unsigned int i, j;
+	int rc;
+	u8 out;
+
+	/* Clear any previous over-temperature alert */
+	rc = i2c_smbus_read_byte_data(hwmon_client, RSL);
+	if (rc < 0)
+		return rc;
+
+	/* Enable port 0 and port 1 outputs on IO expander */
+	rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
+	if (rc)
+		return rc;
+	rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
+				       0xff & ~(1 << P1_SPARE_LBN));
+	if (rc)
+		goto fail_on;
+
+	/* If PHY power is on, turn it all off and wait 1 second to
+	 * ensure a full reset.
+	 */
+	rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
+	if (rc < 0)
+		goto fail_on;
+	out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
+		       (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
+		       (0 << P0_EN_1V0X_LBN));
+	if (rc != out) {
+		EFX_INFO(efx, "power-cycling PHY\n");
+		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+		if (rc)
+			goto fail_on;
+		schedule_timeout_uninterruptible(HZ);
+	}
+
+	for (i = 0; i < 20; ++i) {
+		/* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
+		out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
+			       (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
+			       (1 << P0_X_TRST_LBN));
+		if (efx->phy_mode & PHY_MODE_SPECIAL)
+			out |= 1 << P0_EN_3V3X_LBN;
+
+		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+		if (rc)
+			goto fail_on;
+		msleep(10);
+
+		/* Turn on 1V power rail */
+		out &= ~(1 << P0_EN_1V0X_LBN);
+		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+		if (rc)
+			goto fail_on;
+
+		EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
+
+		/* In flash config mode, DSP does not turn on AFE, so
+		 * just wait 1 second.
+		 */
+		if (efx->phy_mode & PHY_MODE_SPECIAL) {
+			schedule_timeout_uninterruptible(HZ);
+			return 0;
+		}
+
+		for (j = 0; j < 10; ++j) {
+			msleep(100);
+
+			/* Check DSP has asserted AFE power line */
+			rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
+			if (rc < 0)
+				goto fail_on;
+			if (rc & (1 << P1_AFE_PWD_LBN))
+				return 0;
+		}
+	}
+
+	EFX_INFO(efx, "timed out waiting for DSP boot\n");
+	rc = -ETIMEDOUT;
+fail_on:
+	sfe4001_poweroff(efx);
+	return rc;
+}
+
+/* On SFE4001 rev A2 and later, we can control the FLASH_CFG_1 pin
+ * using the 3V3X output of the IO-expander.  Allow the user to set
+ * this when the device is stopped, and keep it stopped then.
+ */
+
+static ssize_t show_phy_flash_cfg(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+	return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
+}
+
+static ssize_t set_phy_flash_cfg(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+	enum efx_phy_mode old_mode, new_mode;
+	int err;
+
+	rtnl_lock();
+	old_mode = efx->phy_mode;
+	if (count == 0 || *buf == '0')
+		new_mode = old_mode & ~PHY_MODE_SPECIAL;
+	else
+		new_mode = PHY_MODE_SPECIAL;
+	if (old_mode == new_mode) {
+		err = 0;
+	} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
+		err = -EBUSY;
+	} else {
+		efx->phy_mode = new_mode;
+		err = sfe4001_poweron(efx);
+		efx_reconfigure_port(efx);
+	}
+	rtnl_unlock();
+
+	return err ? err : count;
+}
+
+static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
+
 static void sfe4001_fini(struct efx_nic *efx)
 {
 	EFX_INFO(efx, "%s\n", __func__);
 
+	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
 	sfe4001_poweroff(efx);
- 	i2c_unregister_device(efx->board_info.ioexp_client);
- 	i2c_unregister_device(efx->board_info.hwmon_client);
+	i2c_unregister_device(efx->board_info.ioexp_client);
+	i2c_unregister_device(efx->board_info.hwmon_client);
 }
 
-/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
- * to the FLASH_CFG_1 input on the DSP.  We must keep it high at power-
- * up to allow writing the flash (done through MDIO from userland).
- */
-unsigned int sfe4001_phy_flash_cfg;
-module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444);
-MODULE_PARM_DESC(phy_flash_cfg,
-		 "Force PHY to enter flash configuration mode");
-
 /* This board uses an I2C expander to provider power to the PHY, which needs to
  * be turned on before the PHY can be used.
  * Context: Process context, rtnl lock held
  */
 int sfe4001_init(struct efx_nic *efx)
 {
-	struct i2c_client *hwmon_client, *ioexp_client;
-	unsigned int count;
+	struct i2c_client *hwmon_client;
 	int rc;
-	u8 out;
-	efx_dword_t reg;
 
 	hwmon_client = i2c_new_dummy(&efx->i2c_adap, MAX6647);
 	if (!hwmon_client)
 		return -EIO;
 	efx->board_info.hwmon_client = hwmon_client;
 
-	ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
-	if (!ioexp_client) {
-		rc = -EIO;
-		goto fail_hwmon;
-	}
-	efx->board_info.ioexp_client = ioexp_client;
-
-	/* 10Xpress has fixed-function LED pins, so there is no board-specific
-	 * blink code. */
-	efx->board_info.blink = tenxpress_phy_blink;
-
-	/* Ensure that XGXS and XAUI SerDes are held in reset */
-	EFX_POPULATE_DWORD_7(reg, XX_PWRDNA_EN, 1,
-			     XX_PWRDNB_EN, 1,
-			     XX_RSTPLLAB_EN, 1,
-			     XX_RESETA_EN, 1,
-			     XX_RESETB_EN, 1,
-			     XX_RSTXGXSRX_EN, 1,
-			     XX_RSTXGXSTX_EN, 1);
-	falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
-	udelay(10);
-
-	efx->board_info.fini = sfe4001_fini;
-
 	/* Set DSP over-temperature alert threshold */
 	EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
 	rc = i2c_smbus_write_byte_data(hwmon_client, WLHO,
@@ -195,78 +291,34 @@
 		goto fail_ioexp;
 	}
 
-	/* Clear any previous over-temperature alert */
-	rc = i2c_smbus_read_byte_data(hwmon_client, RSL);
-	if (rc < 0)
-		goto fail_ioexp;
+	efx->board_info.ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
+	if (!efx->board_info.ioexp_client) {
+		rc = -EIO;
+		goto fail_hwmon;
+	}
 
-	/* Enable port 0 and port 1 outputs on IO expander */
-	rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
+	/* 10Xpress has fixed-function LED pins, so there is no board-specific
+	 * blink code. */
+	efx->board_info.blink = tenxpress_phy_blink;
+
+	efx->board_info.fini = sfe4001_fini;
+
+	rc = sfe4001_poweron(efx);
 	if (rc)
 		goto fail_ioexp;
-	rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
-				       0xff & ~(1 << P1_SPARE_LBN));
+
+	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
 	if (rc)
 		goto fail_on;
 
-	/* Turn all power off then wait 1 sec. This ensures PHY is reset */
-	out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
-		       (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
-		       (0 << P0_EN_1V0X_LBN));
-	rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
-	if (rc)
-		goto fail_on;
-
-	schedule_timeout_uninterruptible(HZ);
-	count = 0;
-	do {
-		/* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
-		out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
-			       (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
-			       (1 << P0_X_TRST_LBN));
-		if (sfe4001_phy_flash_cfg)
-			out |= 1 << P0_EN_3V3X_LBN;
-
-		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
-		if (rc)
-			goto fail_on;
-		msleep(10);
-
-		/* Turn on 1V power rail */
-		out &= ~(1 << P0_EN_1V0X_LBN);
-		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
-		if (rc)
-			goto fail_on;
-
-		EFX_INFO(efx, "waiting for power (attempt %d)...\n", count);
-
-		schedule_timeout_uninterruptible(HZ);
-
-		/* Check DSP is powered */
-		rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
-		if (rc < 0)
-			goto fail_on;
-		if (rc & (1 << P1_AFE_PWD_LBN))
-			goto done;
-
-		/* DSP doesn't look powered in flash config mode */
-		if (sfe4001_phy_flash_cfg)
-			goto done;
-	} while (++count < 20);
-
-	EFX_INFO(efx, "timed out waiting for power\n");
-	rc = -ETIMEDOUT;
-	goto fail_on;
-
-done:
 	EFX_INFO(efx, "PHY is powered on\n");
 	return 0;
 
 fail_on:
 	sfe4001_poweroff(efx);
 fail_ioexp:
- 	i2c_unregister_device(ioexp_client);
+	i2c_unregister_device(efx->board_info.ioexp_client);
 fail_hwmon:
- 	i2c_unregister_device(hwmon_client);
+	i2c_unregister_device(hwmon_client);
 	return rc;
 }
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 34412f3..feef619 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -19,53 +19,48 @@
  *
  *************************************************************************/
 
-/*
- * Commands common to all known devices.
- *
+#define SPI_WRSR 0x01		/* Write status register */
+#define SPI_WRITE 0x02		/* Write data to memory array */
+#define SPI_READ 0x03		/* Read data from memory array */
+#define SPI_WRDI 0x04		/* Reset write enable latch */
+#define SPI_RDSR 0x05		/* Read status register */
+#define SPI_WREN 0x06		/* Set write enable latch */
+
+#define SPI_STATUS_WPEN 0x80	/* Write-protect pin enabled */
+#define SPI_STATUS_BP2 0x10	/* Block protection bit 2 */
+#define SPI_STATUS_BP1 0x08	/* Block protection bit 1 */
+#define SPI_STATUS_BP0 0x04	/* Block protection bit 0 */
+#define SPI_STATUS_WEN 0x02	/* State of the write enable latch */
+#define SPI_STATUS_NRDY 0x01	/* Device busy flag */
+
+/**
+ * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
+ * @efx:		The Efx controller that owns this device
+ * @device_id:		Controller's id for the device
+ * @size:		Size (in bytes)
+ * @addr_len:		Number of address bytes in read/write commands
+ * @munge_address:	Flag whether addresses should be munged.
+ *	Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
+ *	use bit 3 of the command byte as address bit A8, rather
+ *	than having a two-byte address.  If this flag is set, then
+ *	commands should be munged in this way.
+ * @block_size:		Write block size (in bytes).
+ *	Write commands are limited to blocks with this size and alignment.
+ * @read:		Read function for the device
+ * @write:		Write function for the device
  */
+struct efx_spi_device {
+	struct efx_nic *efx;
+	int device_id;
+	unsigned int size;
+	unsigned int addr_len;
+	unsigned int munge_address:1;
+	unsigned int block_size;
+};
 
-/* Write status register */
-#define SPI_WRSR 0x01
-
-/* Write data to memory array */
-#define SPI_WRITE 0x02
-
-/* Read data from memory array */
-#define SPI_READ 0x03
-
-/* Reset write enable latch */
-#define SPI_WRDI 0x04
-
-/* Read status register */
-#define SPI_RDSR 0x05
-
-/* Set write enable latch */
-#define SPI_WREN 0x06
-
-/* SST: Enable write to status register */
-#define SPI_SST_EWSR 0x50
-
-/*
- * Status register bits.  Not all bits are supported on all devices.
- *
- */
-
-/* Write-protect pin enabled */
-#define SPI_STATUS_WPEN 0x80
-
-/* Block protection bit 2 */
-#define SPI_STATUS_BP2 0x10
-
-/* Block protection bit 1 */
-#define SPI_STATUS_BP1 0x08
-
-/* Block protection bit 0 */
-#define SPI_STATUS_BP0 0x04
-
-/* State of the write enable latch */
-#define SPI_STATUS_WEN 0x02
-
-/* Device busy flag */
-#define SPI_STATUS_NRDY 0x01
+int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
+		    size_t len, size_t *retlen, u8 *buffer);
+int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
+		     size_t len, size_t *retlen, const u8 *buffer);
 
 #endif /* EFX_SPI_H */
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index c014606..d507c93 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -65,25 +65,10 @@
 #define PMA_PMD_LED_DEFAULT	(PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
 
 
-/* Self test (BIST) control register */
-#define PMA_PMD_BIST_CTRL_REG	(0xc014)
-#define PMA_PMD_BIST_BER_LBN	(2)	/* Run BER test */
-#define PMA_PMD_BIST_CONT_LBN	(1)	/* Run continuous BIST until cleared */
-#define PMA_PMD_BIST_SINGLE_LBN	(0)	/* Run 1 BIST iteration (self clears) */
-/* Self test status register */
-#define PMA_PMD_BIST_STAT_REG	(0xc015)
-#define PMA_PMD_BIST_ENX_LBN	(3)
-#define PMA_PMD_BIST_PMA_LBN	(2)
-#define PMA_PMD_BIST_RXD_LBN	(1)
-#define PMA_PMD_BIST_AFE_LBN	(0)
-
 /* Special Software reset register */
 #define PMA_PMD_EXT_CTRL_REG 49152
 #define PMA_PMD_EXT_SSR_LBN 15
 
-#define BIST_MAX_DELAY	(1000)
-#define BIST_POLL_DELAY	(10)
-
 /* Misc register defines */
 #define PCS_CLOCK_CTRL_REG 0xd801
 #define PLL312_RST_N_LBN 2
@@ -119,27 +104,12 @@
 		 "Max number of CRC errors before XAUI reset");
 
 struct tenxpress_phy_data {
-	enum tenxpress_state state;
 	enum efx_loopback_mode loopback_mode;
 	atomic_t bad_crc_count;
-	int tx_disabled;
+	enum efx_phy_mode phy_mode;
 	int bad_lp_tries;
 };
 
-static int tenxpress_state_is(struct efx_nic *efx, int state)
-{
-	struct tenxpress_phy_data *phy_data = efx->phy_data;
-	return (phy_data != NULL) && (state == phy_data->state);
-}
-
-void tenxpress_set_state(struct efx_nic *efx,
-				enum tenxpress_state state)
-{
-	struct tenxpress_phy_data *phy_data = efx->phy_data;
-	if (phy_data != NULL)
-		phy_data->state = state;
-}
-
 void tenxpress_crc_err(struct efx_nic *efx)
 {
 	struct tenxpress_phy_data *phy_data = efx->phy_data;
@@ -176,8 +146,6 @@
 	return 0;
 }
 
-static void tenxpress_reset_xaui(struct efx_nic *efx);
-
 static int tenxpress_init(struct efx_nic *efx)
 {
 	int rc, reg;
@@ -214,15 +182,12 @@
 	if (!phy_data)
 		return -ENOMEM;
 	efx->phy_data = phy_data;
+	phy_data->phy_mode = efx->phy_mode;
 
-	tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
-
-	if (!sfe4001_phy_flash_cfg) {
-		rc = mdio_clause45_wait_reset_mmds(efx,
-						   TENXPRESS_REQUIRED_DEVS);
-		if (rc < 0)
-			goto fail;
-	}
+	rc = mdio_clause45_wait_reset_mmds(efx,
+					   TENXPRESS_REQUIRED_DEVS);
+	if (rc < 0)
+		goto fail;
 
 	rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
 	if (rc < 0)
@@ -249,7 +214,10 @@
 {
 	int rc, reg;
 
-	EFX_TRACE(efx, "%s\n", __func__);
+	/* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
+	 * a special software reset can glitch the XGMAC sufficiently for stats
+	 * requests to fail. Since we don't ofen special_reset, just lock. */
+	spin_lock(&efx->stats_lock);
 
 	/* Initiate reset */
 	reg = mdio_clause45_read(efx, efx->mii.phy_id,
@@ -258,23 +226,25 @@
 	mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
 			    PMA_PMD_EXT_CTRL_REG, reg);
 
-	msleep(200);
+	mdelay(200);
 
 	/* Wait for the blocks to come out of reset */
 	rc = mdio_clause45_wait_reset_mmds(efx,
 					   TENXPRESS_REQUIRED_DEVS);
 	if (rc < 0)
-		return rc;
+		goto unlock;
 
 	/* Try and reconfigure the device */
 	rc = tenxpress_init(efx);
 	if (rc < 0)
-		return rc;
+		goto unlock;
 
-	return 0;
+unlock:
+	spin_unlock(&efx->stats_lock);
+	return rc;
 }
 
-static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
+static void tenxpress_set_bad_lp(struct efx_nic *efx, bool bad_lp)
 {
 	struct tenxpress_phy_data *pd = efx->phy_data;
 	int reg;
@@ -311,15 +281,15 @@
  * into a non-10GBT port and if so warn the user that they won't get
  * link any time soon as we are 10GBT only, unless caller specified
  * not to do this check (it isn't useful in loopback) */
-static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
+static bool tenxpress_link_ok(struct efx_nic *efx, bool check_lp)
 {
-	int ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS);
+	bool ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS);
 
 	if (ok) {
-		tenxpress_set_bad_lp(efx, 0);
+		tenxpress_set_bad_lp(efx, false);
 	} else if (check_lp) {
 		/* Are we plugged into the wrong sort of link? */
-		int bad_lp = 0;
+		bool bad_lp = false;
 		int phy_id = efx->mii.phy_id;
 		int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
 						 MDIO_AN_STATUS);
@@ -332,7 +302,7 @@
 		 * bit has the advantage of not clearing when autoneg
 		 * restarts. */
 		if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) {
-			tenxpress_set_bad_lp(efx, 0);
+			tenxpress_set_bad_lp(efx, false);
 			return ok;
 		}
 
@@ -367,16 +337,19 @@
 static void tenxpress_phy_reconfigure(struct efx_nic *efx)
 {
 	struct tenxpress_phy_data *phy_data = efx->phy_data;
-	int loop_change = LOOPBACK_OUT_OF(phy_data, efx,
-					  TENXPRESS_LOOPBACKS);
+	bool loop_change = LOOPBACK_OUT_OF(phy_data, efx,
+					   TENXPRESS_LOOPBACKS);
 
-	if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL))
+	if (efx->phy_mode & PHY_MODE_SPECIAL) {
+		phy_data->phy_mode = efx->phy_mode;
 		return;
+	}
 
 	/* When coming out of transmit disable, coming out of low power
 	 * mode, or moving out of any PHY internal loopback mode,
 	 * perform a special software reset */
-	if ((phy_data->tx_disabled && !efx->tx_disabled) ||
+	if ((efx->phy_mode == PHY_MODE_NORMAL &&
+	     phy_data->phy_mode != PHY_MODE_NORMAL) ||
 	    loop_change) {
 		tenxpress_special_reset(efx);
 		falcon_reset_xaui(efx);
@@ -386,9 +359,9 @@
 	mdio_clause45_phy_reconfigure(efx);
 	tenxpress_phyxs_loopback(efx);
 
-	phy_data->tx_disabled = efx->tx_disabled;
 	phy_data->loopback_mode = efx->loopback_mode;
-	efx->link_up = tenxpress_link_ok(efx, 0);
+	phy_data->phy_mode = efx->phy_mode;
+	efx->link_up = tenxpress_link_ok(efx, false);
 	efx->link_options = GM_LPA_10000FULL;
 }
 
@@ -402,16 +375,14 @@
 static int tenxpress_phy_check_hw(struct efx_nic *efx)
 {
 	struct tenxpress_phy_data *phy_data = efx->phy_data;
-	int phy_up = tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL);
-	int link_ok;
+	bool link_ok;
 
-	link_ok = phy_up && tenxpress_link_ok(efx, 1);
+	link_ok = tenxpress_link_ok(efx, true);
 
 	if (link_ok != efx->link_up)
 		falcon_xmac_sim_phy_event(efx);
 
-	/* Nothing to check if we've already shut down the PHY */
-	if (!phy_up)
+	if (phy_data->phy_mode != PHY_MODE_NORMAL)
 		return 0;
 
 	if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
@@ -444,7 +415,7 @@
 
 /* Set the RX and TX LEDs and Link LED flashing. The other LEDs
  * (which probably aren't wired anyway) are left in AUTO mode */
-void tenxpress_phy_blink(struct efx_nic *efx, int blink)
+void tenxpress_phy_blink(struct efx_nic *efx, bool blink)
 {
 	int reg;
 
@@ -459,52 +430,10 @@
 			    PMA_PMD_LED_OVERR_REG, reg);
 }
 
-static void tenxpress_reset_xaui(struct efx_nic *efx)
+static int tenxpress_phy_test(struct efx_nic *efx)
 {
-	int phy = efx->mii.phy_id;
-	int clk_ctrl, test_select, soft_rst2;
-
-	/* Real work is done on clock_ctrl other resets are thought to be
-	 * optional but make the reset more reliable
-	 */
-
-	/* Read */
-	clk_ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
-				      PCS_CLOCK_CTRL_REG);
-	test_select = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
-					 PCS_TEST_SELECT_REG);
-	soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
-				       PCS_SOFT_RST2_REG);
-
-	/* Put in reset */
-	test_select &= ~(1 << CLK312_EN_LBN);
-	mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
-			    PCS_TEST_SELECT_REG, test_select);
-
-	soft_rst2 &= ~((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
-	mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
-			    PCS_SOFT_RST2_REG, soft_rst2);
-
-	clk_ctrl &= ~(1 << PLL312_RST_N_LBN);
-	mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
-			    PCS_CLOCK_CTRL_REG, clk_ctrl);
-	udelay(10);
-
-	/* Remove reset */
-	clk_ctrl |= (1 << PLL312_RST_N_LBN);
-	mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
-			    PCS_CLOCK_CTRL_REG, clk_ctrl);
-	udelay(10);
-
-	soft_rst2 |= ((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
-	mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
-			    PCS_SOFT_RST2_REG, soft_rst2);
-	udelay(10);
-
-	test_select |= (1 << CLK312_EN_LBN);
-	mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
-			    PCS_TEST_SELECT_REG, test_select);
-	udelay(10);
+	/* BIST is automatically run after a special software reset */
+	return tenxpress_special_reset(efx);
 }
 
 struct efx_phy_operations falcon_tenxpress_phy_ops = {
@@ -513,7 +442,7 @@
 	.check_hw         = tenxpress_phy_check_hw,
 	.fini             = tenxpress_phy_fini,
 	.clear_interrupt  = tenxpress_phy_clear_interrupt,
-	.reset_xaui       = tenxpress_reset_xaui,
+	.test             = tenxpress_phy_test,
 	.mmds             = TENXPRESS_REQUIRED_DEVS,
 	.loopbacks        = TENXPRESS_LOOPBACKS,
 };
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 5e8374a..da3e9ff 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -47,7 +47,7 @@
  * We want to be able to nest calls to netif_stop_queue(), since each
  * channel can have an individual stop on the queue.
  */
-inline void efx_wake_queue(struct efx_nic *efx)
+void efx_wake_queue(struct efx_nic *efx)
 {
 	local_bh_disable();
 	if (atomic_dec_and_lock(&efx->netif_stop_count,
@@ -59,19 +59,21 @@
 	local_bh_enable();
 }
 
-static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
-				      struct efx_tx_buffer *buffer)
+static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+			       struct efx_tx_buffer *buffer)
 {
 	if (buffer->unmap_len) {
 		struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+		dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
+					 buffer->unmap_len);
 		if (buffer->unmap_single)
-			pci_unmap_single(pci_dev, buffer->unmap_addr,
-					 buffer->unmap_len, PCI_DMA_TODEVICE);
+			pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
+					 PCI_DMA_TODEVICE);
 		else
-			pci_unmap_page(pci_dev, buffer->unmap_addr,
-				       buffer->unmap_len, PCI_DMA_TODEVICE);
+			pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
+				       PCI_DMA_TODEVICE);
 		buffer->unmap_len = 0;
-		buffer->unmap_single = 0;
+		buffer->unmap_single = false;
 	}
 
 	if (buffer->skb) {
@@ -103,13 +105,13 @@
 };
 
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
-			       const struct sk_buff *skb);
+			       struct sk_buff *skb);
 static void efx_fini_tso(struct efx_tx_queue *tx_queue);
 static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
 			       struct efx_tso_header *tsoh);
 
-static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
-				 struct efx_tx_buffer *buffer)
+static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
+			  struct efx_tx_buffer *buffer)
 {
 	if (buffer->tsoh) {
 		if (likely(!buffer->tsoh->unmap_len)) {
@@ -136,8 +138,8 @@
  * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
  * You must hold netif_tx_lock() to call this function.
  */
-static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
-				  const struct sk_buff *skb)
+static int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
+			   struct sk_buff *skb)
 {
 	struct efx_nic *efx = tx_queue->efx;
 	struct pci_dev *pci_dev = efx->pci_dev;
@@ -148,7 +150,7 @@
 	unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
 	dma_addr_t dma_addr, unmap_addr = 0;
 	unsigned int dma_len;
-	unsigned unmap_single;
+	bool unmap_single;
 	int q_space, i = 0;
 	int rc = NETDEV_TX_OK;
 
@@ -167,7 +169,7 @@
 	 * since this is more efficient on machines with sparse
 	 * memory.
 	 */
-	unmap_single = 1;
+	unmap_single = true;
 	dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
 
 	/* Process all fragments */
@@ -213,7 +215,7 @@
 			EFX_BUG_ON_PARANOID(buffer->tsoh);
 			EFX_BUG_ON_PARANOID(buffer->skb);
 			EFX_BUG_ON_PARANOID(buffer->len);
-			EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+			EFX_BUG_ON_PARANOID(!buffer->continuation);
 			EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
 			dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
@@ -233,7 +235,6 @@
 		} while (len);
 
 		/* Transfer ownership of the unmapping to the final buffer */
-		buffer->unmap_addr = unmap_addr;
 		buffer->unmap_single = unmap_single;
 		buffer->unmap_len = unmap_len;
 		unmap_len = 0;
@@ -247,14 +248,14 @@
 		page_offset = fragment->page_offset;
 		i++;
 		/* Map for DMA */
-		unmap_single = 0;
+		unmap_single = false;
 		dma_addr = pci_map_page(pci_dev, page, page_offset, len,
 					PCI_DMA_TODEVICE);
 	}
 
 	/* Transfer ownership of the skb to the final buffer */
 	buffer->skb = skb;
-	buffer->continuation = 0;
+	buffer->continuation = false;
 
 	/* Pass off to hardware */
 	falcon_push_buffers(tx_queue);
@@ -287,9 +288,14 @@
 	}
 
 	/* Free the fragment we were mid-way through pushing */
-	if (unmap_len)
-		pci_unmap_page(pci_dev, unmap_addr, unmap_len,
-			       PCI_DMA_TODEVICE);
+	if (unmap_len) {
+		if (unmap_single)
+			pci_unmap_single(pci_dev, unmap_addr, unmap_len,
+					 PCI_DMA_TODEVICE);
+		else
+			pci_unmap_page(pci_dev, unmap_addr, unmap_len,
+				       PCI_DMA_TODEVICE);
+	}
 
 	return rc;
 }
@@ -299,8 +305,8 @@
  * This removes packets from the TX queue, up to and including the
  * specified index.
  */
-static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
-				       unsigned int index)
+static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
+				unsigned int index)
 {
 	struct efx_nic *efx = tx_queue->efx;
 	unsigned int stop_index, read_ptr;
@@ -320,7 +326,7 @@
 		}
 
 		efx_dequeue_buffer(tx_queue, buffer);
-		buffer->continuation = 1;
+		buffer->continuation = true;
 		buffer->len = 0;
 
 		++tx_queue->read_count;
@@ -367,8 +373,15 @@
  */
 int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
 {
-	struct efx_nic *efx = net_dev->priv;
-	return efx_xmit(efx, &efx->tx_queue[0], skb);
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_tx_queue *tx_queue;
+
+	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+		tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
+	else
+		tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
+
+	return efx_xmit(efx, tx_queue, skb);
 }
 
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
@@ -412,30 +425,25 @@
 	/* Allocate software ring */
 	txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
 	tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
-	if (!tx_queue->buffer) {
-		rc = -ENOMEM;
-		goto fail1;
-	}
+	if (!tx_queue->buffer)
+		return -ENOMEM;
 	for (i = 0; i <= efx->type->txd_ring_mask; ++i)
-		tx_queue->buffer[i].continuation = 1;
+		tx_queue->buffer[i].continuation = true;
 
 	/* Allocate hardware ring */
 	rc = falcon_probe_tx(tx_queue);
 	if (rc)
-		goto fail2;
+		goto fail;
 
 	return 0;
 
- fail2:
+ fail:
 	kfree(tx_queue->buffer);
 	tx_queue->buffer = NULL;
- fail1:
-	tx_queue->used = 0;
-
 	return rc;
 }
 
-int efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
 {
 	EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
 
@@ -446,7 +454,7 @@
 	BUG_ON(tx_queue->stopped);
 
 	/* Set up TX descriptor ring */
-	return falcon_init_tx(tx_queue);
+	falcon_init_tx(tx_queue);
 }
 
 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -461,7 +469,7 @@
 		buffer = &tx_queue->buffer[tx_queue->read_count &
 					   tx_queue->efx->type->txd_ring_mask];
 		efx_dequeue_buffer(tx_queue, buffer);
-		buffer->continuation = 1;
+		buffer->continuation = true;
 		buffer->len = 0;
 
 		++tx_queue->read_count;
@@ -494,7 +502,6 @@
 
 	kfree(tx_queue->buffer);
 	tx_queue->buffer = NULL;
-	tx_queue->used = 0;
 }
 
 
@@ -509,7 +516,7 @@
 /* Number of bytes inserted at the start of a TSO header buffer,
  * similar to NET_IP_ALIGN.
  */
-#if defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 #define TSOH_OFFSET	0
 #else
 #define TSOH_OFFSET	NET_IP_ALIGN
@@ -533,47 +540,37 @@
 
 /**
  * struct tso_state - TSO state for an SKB
- * @remaining_len: Bytes of data we've yet to segment
+ * @out_len: Remaining length in current segment
  * @seqnum: Current sequence number
+ * @ipv4_id: Current IPv4 ID, host endian
  * @packet_space: Remaining space in current packet
- * @ifc: Input fragment cursor.
- *	Where we are in the current fragment of the incoming SKB.  These
- *	values get updated in place when we split a fragment over
- *	multiple packets.
- * @p: Parameters.
- *	These values are set once at the start of the TSO send and do
- *	not get changed as the routine progresses.
+ * @dma_addr: DMA address of current position
+ * @in_len: Remaining length in current SKB fragment
+ * @unmap_len: Length of SKB fragment
+ * @unmap_addr: DMA address of SKB fragment
+ * @unmap_single: DMA single vs page mapping flag
+ * @header_len: Number of bytes of header
+ * @full_packet_size: Number of bytes to put in each outgoing segment
  *
  * The state used during segmentation.  It is put into this data structure
  * just to make it easy to pass into inline functions.
  */
 struct tso_state {
-	unsigned remaining_len;
+	/* Output position */
+	unsigned out_len;
 	unsigned seqnum;
+	unsigned ipv4_id;
 	unsigned packet_space;
 
-	struct {
-		/* DMA address of current position */
-		dma_addr_t dma_addr;
-		/* Remaining length */
-		unsigned int len;
-		/* DMA address and length of the whole fragment */
-		unsigned int unmap_len;
-		dma_addr_t unmap_addr;
-		struct page *page;
-		unsigned page_off;
-	} ifc;
+	/* Input position */
+	dma_addr_t dma_addr;
+	unsigned in_len;
+	unsigned unmap_len;
+	dma_addr_t unmap_addr;
+	bool unmap_single;
 
-	struct {
-		/* The number of bytes of header */
-		unsigned int header_length;
-
-		/* The number of bytes to put in each outgoing segment. */
-		int full_packet_size;
-
-		/* Current IPv4 ID, host endian. */
-		unsigned ipv4_id;
-	} p;
+	unsigned header_len;
+	int full_packet_size;
 };
 
 
@@ -581,11 +578,24 @@
  * Verify that our various assumptions about sk_buffs and the conditions
  * under which TSO will be attempted hold true.
  */
-static inline void efx_tso_check_safe(const struct sk_buff *skb)
+static void efx_tso_check_safe(struct sk_buff *skb)
 {
-	EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP));
+	__be16 protocol = skb->protocol;
+
 	EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
-			    skb->protocol);
+			    protocol);
+	if (protocol == htons(ETH_P_8021Q)) {
+		/* Find the encapsulated protocol; reset network header
+		 * and transport header based on that. */
+		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+		protocol = veh->h_vlan_encapsulated_proto;
+		skb_set_network_header(skb, sizeof(*veh));
+		if (protocol == htons(ETH_P_IP))
+			skb_set_transport_header(skb, sizeof(*veh) +
+						 4 * ip_hdr(skb)->ihl);
+	}
+
+	EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IP));
 	EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
 	EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
 			     + (tcp_hdr(skb)->doff << 2u)) >
@@ -685,18 +695,14 @@
  * @tx_queue:		Efx TX queue
  * @dma_addr:		DMA address of fragment
  * @len:		Length of fragment
- * @skb:		Only non-null for end of last segment
- * @end_of_packet:	True if last fragment in a packet
- * @unmap_addr:		DMA address of fragment for unmapping
- * @unmap_len:		Only set this in last segment of a fragment
+ * @final_buffer:	The final buffer inserted into the queue
  *
  * Push descriptors onto the TX queue.  Return 0 on success or 1 if
  * @tx_queue full.
  */
 static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
 			       dma_addr_t dma_addr, unsigned len,
-			       const struct sk_buff *skb, int end_of_packet,
-			       dma_addr_t unmap_addr, unsigned unmap_len)
+			       struct efx_tx_buffer **final_buffer)
 {
 	struct efx_tx_buffer *buffer;
 	struct efx_nic *efx = tx_queue->efx;
@@ -724,8 +730,10 @@
 			fill_level = (tx_queue->insert_count
 				      - tx_queue->old_read_count);
 			q_space = efx->type->txd_ring_mask - 1 - fill_level;
-			if (unlikely(q_space-- <= 0))
+			if (unlikely(q_space-- <= 0)) {
+				*final_buffer = NULL;
 				return 1;
+			}
 			smp_mb();
 			--tx_queue->stopped;
 		}
@@ -742,7 +750,7 @@
 		EFX_BUG_ON_PARANOID(buffer->len);
 		EFX_BUG_ON_PARANOID(buffer->unmap_len);
 		EFX_BUG_ON_PARANOID(buffer->skb);
-		EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+		EFX_BUG_ON_PARANOID(!buffer->continuation);
 		EFX_BUG_ON_PARANOID(buffer->tsoh);
 
 		buffer->dma_addr = dma_addr;
@@ -765,10 +773,7 @@
 
 	EFX_BUG_ON_PARANOID(!len);
 	buffer->len = len;
-	buffer->skb = skb;
-	buffer->continuation = !end_of_packet;
-	buffer->unmap_addr = unmap_addr;
-	buffer->unmap_len = unmap_len;
+	*final_buffer = buffer;
 	return 0;
 }
 
@@ -780,8 +785,8 @@
  * a single fragment, and we know it doesn't cross a page boundary.  It
  * also allows us to not worry about end-of-packet etc.
  */
-static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
-				      struct efx_tso_header *tsoh, unsigned len)
+static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
+			       struct efx_tso_header *tsoh, unsigned len)
 {
 	struct efx_tx_buffer *buffer;
 
@@ -791,7 +796,7 @@
 	EFX_BUG_ON_PARANOID(buffer->len);
 	EFX_BUG_ON_PARANOID(buffer->unmap_len);
 	EFX_BUG_ON_PARANOID(buffer->skb);
-	EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+	EFX_BUG_ON_PARANOID(!buffer->continuation);
 	EFX_BUG_ON_PARANOID(buffer->tsoh);
 	buffer->len = len;
 	buffer->dma_addr = tsoh->dma_addr;
@@ -805,6 +810,7 @@
 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 {
 	struct efx_tx_buffer *buffer;
+	dma_addr_t unmap_addr;
 
 	/* Work backwards until we hit the original insert pointer value */
 	while (tx_queue->insert_count != tx_queue->write_count) {
@@ -814,11 +820,18 @@
 		efx_tsoh_free(tx_queue, buffer);
 		EFX_BUG_ON_PARANOID(buffer->skb);
 		buffer->len = 0;
-		buffer->continuation = 1;
+		buffer->continuation = true;
 		if (buffer->unmap_len) {
-			pci_unmap_page(tx_queue->efx->pci_dev,
-				       buffer->unmap_addr,
-				       buffer->unmap_len, PCI_DMA_TODEVICE);
+			unmap_addr = (buffer->dma_addr + buffer->len -
+				      buffer->unmap_len);
+			if (buffer->unmap_single)
+				pci_unmap_single(tx_queue->efx->pci_dev,
+						 unmap_addr, buffer->unmap_len,
+						 PCI_DMA_TODEVICE);
+			else
+				pci_unmap_page(tx_queue->efx->pci_dev,
+					       unmap_addr, buffer->unmap_len,
+					       PCI_DMA_TODEVICE);
 			buffer->unmap_len = 0;
 		}
 	}
@@ -826,50 +839,57 @@
 
 
 /* Parse the SKB header and initialise state. */
-static inline void tso_start(struct tso_state *st, const struct sk_buff *skb)
+static void tso_start(struct tso_state *st, const struct sk_buff *skb)
 {
 	/* All ethernet/IP/TCP headers combined size is TCP header size
 	 * plus offset of TCP header relative to start of packet.
 	 */
-	st->p.header_length = ((tcp_hdr(skb)->doff << 2u)
-			       + PTR_DIFF(tcp_hdr(skb), skb->data));
-	st->p.full_packet_size = (st->p.header_length
-				  + skb_shinfo(skb)->gso_size);
+	st->header_len = ((tcp_hdr(skb)->doff << 2u)
+			  + PTR_DIFF(tcp_hdr(skb), skb->data));
+	st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
 
-	st->p.ipv4_id = ntohs(ip_hdr(skb)->id);
+	st->ipv4_id = ntohs(ip_hdr(skb)->id);
 	st->seqnum = ntohl(tcp_hdr(skb)->seq);
 
 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
 
-	st->packet_space = st->p.full_packet_size;
-	st->remaining_len = skb->len - st->p.header_length;
+	st->packet_space = st->full_packet_size;
+	st->out_len = skb->len - st->header_len;
+	st->unmap_len = 0;
+	st->unmap_single = false;
 }
 
-
-/**
- * tso_get_fragment - record fragment details and map for DMA
- * @st:			TSO state
- * @efx:		Efx NIC
- * @data:		Pointer to fragment data
- * @len:		Length of fragment
- *
- * Record fragment details and map for DMA.  Return 0 on success, or
- * -%ENOMEM if DMA mapping fails.
- */
-static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
-				   int len, struct page *page, int page_off)
+static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
+			    skb_frag_t *frag)
 {
+	st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
+				      frag->page_offset, frag->size,
+				      PCI_DMA_TODEVICE);
+	if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
+		st->unmap_single = false;
+		st->unmap_len = frag->size;
+		st->in_len = frag->size;
+		st->dma_addr = st->unmap_addr;
+		return 0;
+	}
+	return -ENOMEM;
+}
 
-	st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
-					  len, PCI_DMA_TODEVICE);
-	if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
-		st->ifc.unmap_len = len;
-		st->ifc.len = len;
-		st->ifc.dma_addr = st->ifc.unmap_addr;
-		st->ifc.page = page;
-		st->ifc.page_off = page_off;
+static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
+				 const struct sk_buff *skb)
+{
+	int hl = st->header_len;
+	int len = skb_headlen(skb) - hl;
+
+	st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
+					len, PCI_DMA_TODEVICE);
+	if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
+		st->unmap_single = true;
+		st->unmap_len = len;
+		st->in_len = len;
+		st->dma_addr = st->unmap_addr;
 		return 0;
 	}
 	return -ENOMEM;
@@ -886,36 +906,45 @@
  * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
  * space in @tx_queue.
  */
-static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
-						const struct sk_buff *skb,
-						struct tso_state *st)
+static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+					 const struct sk_buff *skb,
+					 struct tso_state *st)
 {
-
+	struct efx_tx_buffer *buffer;
 	int n, end_of_packet, rc;
 
-	if (st->ifc.len == 0)
+	if (st->in_len == 0)
 		return 0;
 	if (st->packet_space == 0)
 		return 0;
 
-	EFX_BUG_ON_PARANOID(st->ifc.len <= 0);
+	EFX_BUG_ON_PARANOID(st->in_len <= 0);
 	EFX_BUG_ON_PARANOID(st->packet_space <= 0);
 
-	n = min(st->ifc.len, st->packet_space);
+	n = min(st->in_len, st->packet_space);
 
 	st->packet_space -= n;
-	st->remaining_len -= n;
-	st->ifc.len -= n;
-	st->ifc.page_off += n;
-	end_of_packet = st->remaining_len == 0 || st->packet_space == 0;
+	st->out_len -= n;
+	st->in_len -= n;
 
-	rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n,
-				 st->remaining_len ? NULL : skb,
-				 end_of_packet, st->ifc.unmap_addr,
-				 st->ifc.len ? 0 : st->ifc.unmap_len);
+	rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
+	if (likely(rc == 0)) {
+		if (st->out_len == 0)
+			/* Transfer ownership of the skb */
+			buffer->skb = skb;
 
-	st->ifc.dma_addr += n;
+		end_of_packet = st->out_len == 0 || st->packet_space == 0;
+		buffer->continuation = !end_of_packet;
 
+		if (st->in_len == 0) {
+			/* Transfer ownership of the pci mapping */
+			buffer->unmap_len = st->unmap_len;
+			buffer->unmap_single = st->unmap_single;
+			st->unmap_len = 0;
+		}
+	}
+
+	st->dma_addr += n;
 	return rc;
 }
 
@@ -929,9 +958,9 @@
  * Generate a new header and prepare for the new packet.  Return 0 on
  * success, or -1 if failed to alloc header.
  */
-static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
-				       const struct sk_buff *skb,
-				       struct tso_state *st)
+static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
+				const struct sk_buff *skb,
+				struct tso_state *st)
 {
 	struct efx_tso_header *tsoh;
 	struct iphdr *tsoh_iph;
@@ -940,7 +969,7 @@
 	u8 *header;
 
 	/* Allocate a DMA-mapped header buffer. */
-	if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
+	if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
 		if (tx_queue->tso_headers_free == NULL) {
 			if (efx_tsoh_block_alloc(tx_queue))
 				return -1;
@@ -951,7 +980,7 @@
 		tsoh->unmap_len = 0;
 	} else {
 		tx_queue->tso_long_headers++;
-		tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length);
+		tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
 		if (unlikely(!tsoh))
 			return -1;
 	}
@@ -961,33 +990,32 @@
 	tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
 
 	/* Copy and update the headers. */
-	memcpy(header, skb->data, st->p.header_length);
+	memcpy(header, skb->data, st->header_len);
 
 	tsoh_th->seq = htonl(st->seqnum);
 	st->seqnum += skb_shinfo(skb)->gso_size;
-	if (st->remaining_len > skb_shinfo(skb)->gso_size) {
+	if (st->out_len > skb_shinfo(skb)->gso_size) {
 		/* This packet will not finish the TSO burst. */
-		ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb);
+		ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
 		tsoh_th->fin = 0;
 		tsoh_th->psh = 0;
 	} else {
 		/* This packet will be the last in the TSO burst. */
-		ip_length = (st->p.header_length - ETH_HDR_LEN(skb)
-			     + st->remaining_len);
+		ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
 		tsoh_th->fin = tcp_hdr(skb)->fin;
 		tsoh_th->psh = tcp_hdr(skb)->psh;
 	}
 	tsoh_iph->tot_len = htons(ip_length);
 
 	/* Linux leaves suitable gaps in the IP ID space for us to fill. */
-	tsoh_iph->id = htons(st->p.ipv4_id);
-	st->p.ipv4_id++;
+	tsoh_iph->id = htons(st->ipv4_id);
+	st->ipv4_id++;
 
 	st->packet_space = skb_shinfo(skb)->gso_size;
 	++tx_queue->tso_packets;
 
 	/* Form a descriptor for this header. */
-	efx_tso_put_header(tx_queue, tsoh, st->p.header_length);
+	efx_tso_put_header(tx_queue, tsoh, st->header_len);
 
 	return 0;
 }
@@ -1005,11 +1033,11 @@
  * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
  */
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
-			       const struct sk_buff *skb)
+			       struct sk_buff *skb)
 {
+	struct efx_nic *efx = tx_queue->efx;
 	int frag_i, rc, rc2 = NETDEV_TX_OK;
 	struct tso_state state;
-	skb_frag_t *f;
 
 	/* Verify TSO is safe - these checks should never fail. */
 	efx_tso_check_safe(skb);
@@ -1021,29 +1049,16 @@
 	/* Assume that skb header area contains exactly the headers, and
 	 * all payload is in the frag list.
 	 */
-	if (skb_headlen(skb) == state.p.header_length) {
+	if (skb_headlen(skb) == state.header_len) {
 		/* Grab the first payload fragment. */
 		EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
 		frag_i = 0;
-		f = &skb_shinfo(skb)->frags[frag_i];
-		rc = tso_get_fragment(&state, tx_queue->efx,
-				      f->size, f->page, f->page_offset);
+		rc = tso_get_fragment(&state, efx,
+				      skb_shinfo(skb)->frags + frag_i);
 		if (rc)
 			goto mem_err;
 	} else {
-		/* It may look like this code fragment assumes that the
-		 * skb->data portion does not cross a page boundary, but
-		 * that is not the case.  It is guaranteed to be direct
-		 * mapped memory, and therefore is physically contiguous,
-		 * and so DMA will work fine.  kmap_atomic() on this region
-		 * will just return the direct mapping, so that will work
-		 * too.
-		 */
-		int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1);
-		int hl = state.p.header_length;
-		rc = tso_get_fragment(&state, tx_queue->efx,
-				      skb_headlen(skb) - hl,
-				      virt_to_page(skb->data), page_off + hl);
+		rc = tso_get_head_fragment(&state, efx, skb);
 		if (rc)
 			goto mem_err;
 		frag_i = -1;
@@ -1058,13 +1073,12 @@
 			goto stop;
 
 		/* Move onto the next fragment? */
-		if (state.ifc.len == 0) {
+		if (state.in_len == 0) {
 			if (++frag_i >= skb_shinfo(skb)->nr_frags)
 				/* End of payload reached. */
 				break;
-			f = &skb_shinfo(skb)->frags[frag_i];
-			rc = tso_get_fragment(&state, tx_queue->efx,
-					      f->size, f->page, f->page_offset);
+			rc = tso_get_fragment(&state, efx,
+					      skb_shinfo(skb)->frags + frag_i);
 			if (rc)
 				goto mem_err;
 		}
@@ -1082,8 +1096,7 @@
 	return NETDEV_TX_OK;
 
  mem_err:
-	EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping"
-		" error\n");
+	EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
 	dev_kfree_skb_any((struct sk_buff *)skb);
 	goto unwind;
 
@@ -1092,9 +1105,19 @@
 
 	/* Stop the queue if it wasn't stopped before. */
 	if (tx_queue->stopped == 1)
-		efx_stop_queue(tx_queue->efx);
+		efx_stop_queue(efx);
 
  unwind:
+	/* Free the DMA mapping we were in the process of writing out */
+	if (state.unmap_len) {
+		if (state.unmap_single)
+			pci_unmap_single(efx->pci_dev, state.unmap_addr,
+					 state.unmap_len, PCI_DMA_TODEVICE);
+		else
+			pci_unmap_page(efx->pci_dev, state.unmap_addr,
+				       state.unmap_len, PCI_DMA_TODEVICE);
+	}
+
 	efx_enqueue_unwind(tx_queue);
 	return rc2;
 }
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h
index 1526a73..5e1cc23 100644
--- a/drivers/net/sfc/tx.h
+++ b/drivers/net/sfc/tx.h
@@ -15,7 +15,7 @@
 
 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-int efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
 
 int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 35ab19c..fa7b49d 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -20,14 +20,10 @@
 
 /* XAUI resets if link not detected */
 #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
-/* SNAP frames have TOBE_DISC set */
-#define EFX_WORKAROUND_5475 EFX_WORKAROUND_ALWAYS
 /* RX PCIe double split performance issue */
 #define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
 /* TX pkt parser problem with <= 16 byte TXes */
 #define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
-/* XGXS and XAUI reset sequencing in SW */
-#define EFX_WORKAROUND_9388 EFX_WORKAROUND_ALWAYS
 /* Low rate CRC errors require XAUI reset */
 #define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS
 /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index f3684ad..276151d 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -40,7 +40,7 @@
 }
 
 struct xfp_phy_data {
-	int tx_disabled;
+	enum efx_phy_mode phy_mode;
 };
 
 #define XFP_MAX_RESET_TIME 500
@@ -93,7 +93,7 @@
 		 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
 		 MDIO_ID_REV(devid));
 
-	phy_data->tx_disabled = efx->tx_disabled;
+	phy_data->phy_mode = efx->phy_mode;
 
 	rc = xfp_reset_phy(efx);
 
@@ -136,13 +136,14 @@
 	struct xfp_phy_data *phy_data = efx->phy_data;
 
 	/* Reset the PHY when moving from tx off to tx on */
-	if (phy_data->tx_disabled && !efx->tx_disabled)
+	if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
+	    (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
 		xfp_reset_phy(efx);
 
 	mdio_clause45_transmit_disable(efx);
 	mdio_clause45_phy_reconfigure(efx);
 
-	phy_data->tx_disabled = efx->tx_disabled;
+	phy_data->phy_mode = efx->phy_mode;
 	efx->link_up = xfp_link_ok(efx);
 	efx->link_options = GM_LPA_10000FULL;
 }
@@ -151,7 +152,7 @@
 static void xfp_phy_fini(struct efx_nic *efx)
 {
 	/* Clobber the LED if it was blinking */
-	efx->board_info.blink(efx, 0);
+	efx->board_info.blink(efx, false);
 
 	/* Free the context block */
 	kfree(efx->phy_data);
@@ -164,7 +165,6 @@
 	.check_hw        = xfp_phy_check_hw,
 	.fini            = xfp_phy_fini,
 	.clear_interrupt = xfp_phy_clear_interrupt,
-	.reset_xaui      = efx_port_dummy_op_void,
 	.mmds            = XFP_REQUIRED_DEVS,
 	.loopbacks       = XFP_LOOPBACKS,
 };
diff --git a/drivers/net/skfp/pmf.c b/drivers/net/skfp/pmf.c
index ea85de9..79e665e 100644
--- a/drivers/net/skfp/pmf.c
+++ b/drivers/net/skfp/pmf.c
@@ -44,17 +44,10 @@
 				     int set, int local);
 static int port_to_mib(struct s_smc *smc, int p);
 
-#define MOFFSS(e)	((int)&(((struct fddi_mib *)0)->e))
-#define MOFFSA(e)	((int) (((struct fddi_mib *)0)->e))
-
-#define MOFFMS(e)	((int)&(((struct fddi_mib_m *)0)->e))
-#define MOFFMA(e)	((int) (((struct fddi_mib_m *)0)->e))
-
-#define MOFFAS(e)	((int)&(((struct fddi_mib_a *)0)->e))
-#define MOFFAA(e)	((int) (((struct fddi_mib_a *)0)->e))
-
-#define MOFFPS(e)	((int)&(((struct fddi_mib_p *)0)->e))
-#define MOFFPA(e)	((int) (((struct fddi_mib_p *)0)->e))
+#define MOFFSS(e)	offsetof(struct fddi_mib, e)
+#define MOFFMS(e)	offsetof(struct fddi_mib_m, e)
+#define MOFFAS(e)	offsetof(struct fddi_mib_a, e)
+#define MOFFPS(e)	offsetof(struct fddi_mib_p, e)
 
 
 #define AC_G	0x01		/* Get */
@@ -87,8 +80,8 @@
 	{ SMT_P100D,AC_G,	MOFFSS(fddiSMTOpVersionId),	"S"	} ,
 	{ SMT_P100E,AC_G,	MOFFSS(fddiSMTHiVersionId),	"S"	} ,
 	{ SMT_P100F,AC_G,	MOFFSS(fddiSMTLoVersionId),	"S"	} ,
-	{ SMT_P1010,AC_G,	MOFFSA(fddiSMTManufacturerData), "D" } ,
-	{ SMT_P1011,AC_GR,	MOFFSA(fddiSMTUserData),	"D"	} ,
+	{ SMT_P1010,AC_G,	MOFFSS(fddiSMTManufacturerData), "D" } ,
+	{ SMT_P1011,AC_GR,	MOFFSS(fddiSMTUserData),	"D"	} ,
 	{ SMT_P1012,AC_G,	MOFFSS(fddiSMTMIBVersionId),	"S"	} ,
 
 	/* StationConfigGrp */
@@ -103,7 +96,7 @@
 	{ SMT_P101D,AC_GR,	MOFFSS(fddiSMTTT_Notify),	"wS"	} ,
 	{ SMT_P101E,AC_GR,	MOFFSS(fddiSMTStatRptPolicy),	"bB"	} ,
 	{ SMT_P101F,AC_GR,	MOFFSS(fddiSMTTrace_MaxExpiration),"lL"	} ,
-	{ SMT_P1020,AC_G,	MOFFSA(fddiSMTPORTIndexes),	"II"	} ,
+	{ SMT_P1020,AC_G,	MOFFSS(fddiSMTPORTIndexes),	"II"	} ,
 	{ SMT_P1021,AC_G,	MOFFSS(fddiSMTMACIndexes),	"I"	} ,
 	{ SMT_P1022,AC_G,	MOFFSS(fddiSMTBypassPresent),	"F"	} ,
 
@@ -117,8 +110,8 @@
 
 	/* MIBOperationGrp */
 	{ SMT_P1032,AC_GROUP	} ,
-	{ SMT_P1033,AC_G,	MOFFSA(fddiSMTTimeStamp),"P"		} ,
-	{ SMT_P1034,AC_G,	MOFFSA(fddiSMTTransitionTimeStamp),"P"	} ,
+	{ SMT_P1033,AC_G,	MOFFSS(fddiSMTTimeStamp),"P"		} ,
+	{ SMT_P1034,AC_G,	MOFFSS(fddiSMTTransitionTimeStamp),"P"	} ,
 	/* NOTE : SMT_P1035 is already swapped ! SMT_P_SETCOUNT */
 	{ SMT_P1035,AC_G,	MOFFSS(fddiSMTSetCount),"4P"		} ,
 	{ SMT_P1036,AC_G,	MOFFSS(fddiSMTLastSetStationId),"8"	} ,
@@ -129,7 +122,7 @@
 	 * PRIVATE EXTENSIONS
 	 * only accessible locally to get/set passwd
 	 */
-	{ SMT_P10F0,AC_GR,	MOFFSA(fddiPRPMFPasswd),	"8"	} ,
+	{ SMT_P10F0,AC_GR,	MOFFSS(fddiPRPMFPasswd),	"8"	} ,
 	{ SMT_P10F1,AC_GR,	MOFFSS(fddiPRPMFStation),	"8"	} ,
 #ifdef	ESS
 	{ SMT_P10F2,AC_GR,	MOFFSS(fddiESSPayload),		"lL"	} ,
@@ -245,7 +238,7 @@
 	{ SMT_P400E,AC_GR,	MOFFPS(fddiPORTConnectionPolicies),"bB"	} ,
 	{ SMT_P400F,AC_G,	MOFFPS(fddiPORTMacIndicated),	"2"	} ,
 	{ SMT_P4010,AC_G,	MOFFPS(fddiPORTCurrentPath),	"E"	} ,
-	{ SMT_P4011,AC_GR,	MOFFPA(fddiPORTRequestedPaths),	"l4"	} ,
+	{ SMT_P4011,AC_GR,	MOFFPS(fddiPORTRequestedPaths),	"l4"	} ,
 	{ SMT_P4012,AC_G,	MOFFPS(fddiPORTMACPlacement),	"S"	} ,
 	{ SMT_P4013,AC_G,	MOFFPS(fddiPORTAvailablePaths),	"B"	} ,
 	{ SMT_P4016,AC_G,	MOFFPS(fddiPORTPMDClass),	"E"	} ,
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index e24b25c..3805b93 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -3732,27 +3732,63 @@
 	return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
 }
 
-static u32 sky2_vpd_read(struct sky2_hw *hw, int cap, u16 offset)
+static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
 {
-	u32 val;
+	unsigned long start = jiffies;
 
-	sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
+	while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
+		/* Can take up to 10.6 ms for write */
+		if (time_after(jiffies, start + HZ/4)) {
+			dev_err(&hw->pdev->dev, PFX "VPD cycle timed out");
+			return -ETIMEDOUT;
+		}
+		mdelay(1);
+	}
 
-	do {
-		offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR);
-	} while (!(offset & PCI_VPD_ADDR_F));
-
-	val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
-	return val;
+	return 0;
 }
 
-static void sky2_vpd_write(struct sky2_hw *hw, int cap, u16 offset, u32 val)
+static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data,
+			 u16 offset, size_t length)
 {
-	sky2_pci_write16(hw, cap + PCI_VPD_DATA, val);
-	sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
-	do {
-		offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR);
-	} while (offset & PCI_VPD_ADDR_F);
+	int rc = 0;
+
+	while (length > 0) {
+		u32 val;
+
+		sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
+		rc = sky2_vpd_wait(hw, cap, 0);
+		if (rc)
+			break;
+
+		val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
+
+		memcpy(data, &val, min(sizeof(val), length));
+		offset += sizeof(u32);
+		data += sizeof(u32);
+		length -= sizeof(u32);
+	}
+
+	return rc;
+}
+
+static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data,
+			  u16 offset, unsigned int length)
+{
+	unsigned int i;
+	int rc = 0;
+
+	for (i = 0; i < length; i += sizeof(u32)) {
+		u32 val = *(u32 *)(data + i);
+
+		sky2_pci_write32(hw, cap + PCI_VPD_DATA, val);
+		sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
+
+		rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
+		if (rc)
+			break;
+	}
+	return rc;
 }
 
 static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
@@ -3760,24 +3796,13 @@
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
 	int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
-	int length = eeprom->len;
-	u16 offset = eeprom->offset;
 
 	if (!cap)
 		return -EINVAL;
 
 	eeprom->magic = SKY2_EEPROM_MAGIC;
 
-	while (length > 0) {
-		u32 val = sky2_vpd_read(sky2->hw, cap, offset);
-		int n = min_t(int, length, sizeof(val));
-
-		memcpy(data, &val, n);
-		length -= n;
-		data += n;
-		offset += n;
-	}
-	return 0;
+	return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len);
 }
 
 static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
@@ -3785,8 +3810,6 @@
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
 	int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
-	int length = eeprom->len;
-	u16 offset = eeprom->offset;
 
 	if (!cap)
 		return -EINVAL;
@@ -3794,21 +3817,11 @@
 	if (eeprom->magic != SKY2_EEPROM_MAGIC)
 		return -EINVAL;
 
-	while (length > 0) {
-		u32 val;
-		int n = min_t(int, length, sizeof(val));
+	/* Partial writes not supported */
+	if ((eeprom->offset & 3) || (eeprom->len & 3))
+		return -EINVAL;
 
-		if (n < sizeof(val))
-			val = sky2_vpd_read(sky2->hw, cap, offset);
-		memcpy(&val, data, n);
-
-		sky2_vpd_write(sky2->hw, cap, offset, val);
-
-		length -= n;
-		data += n;
-		offset += n;
-	}
-	return 0;
+	return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
 }
 
 
@@ -4178,6 +4191,69 @@
 	return value & PCI_PM_CTRL_PME_ENABLE;
 }
 
+/*
+ * Read and parse the first part of Vital Product Data
+ */
+#define VPD_SIZE	128
+#define VPD_MAGIC	0x82
+
+static void __devinit sky2_vpd_info(struct sky2_hw *hw)
+{
+	int cap = pci_find_capability(hw->pdev, PCI_CAP_ID_VPD);
+	const u8 *p;
+	u8 *vpd_buf = NULL;
+	u16 len;
+	static struct vpd_tag {
+		char tag[2];
+		char *label;
+	} vpd_tags[] = {
+		{ "PN",	"Part Number" },
+		{ "EC", "Engineering Level" },
+		{ "MN", "Manufacturer" },
+	};
+
+	if (!cap)
+		goto out;
+
+	vpd_buf = kmalloc(VPD_SIZE, GFP_KERNEL);
+	if (!vpd_buf)
+		goto out;
+
+	if (sky2_vpd_read(hw, cap, vpd_buf, 0, VPD_SIZE))
+		goto out;
+
+	if (vpd_buf[0] != VPD_MAGIC)
+		goto out;
+	len = vpd_buf[1];
+	if (len == 0 || len > VPD_SIZE - 4)
+		goto out;
+	p = vpd_buf + 3;
+	dev_info(&hw->pdev->dev, "%.*s\n", len, p);
+	p += len;
+
+	while (p < vpd_buf + VPD_SIZE - 4) {
+		int i;
+
+		if (!memcmp("RW", p, 2))	/* end marker */
+			break;
+
+		len = p[2];
+		if (len > (p - vpd_buf) - 4)
+			break;
+
+		for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
+			if (!memcmp(vpd_tags[i].tag, p, 2)) {
+				printk(KERN_DEBUG " %s: %.*s\n",
+				       vpd_tags[i].label, len, p + 3);
+				break;
+			}
+		}
+		p += len + 3;
+	}
+out:
+	kfree(vpd_buf);
+}
+
 /* This driver supports yukon2 chipset only */
 static const char *sky2_name(u8 chipid, char *buf, int sz)
 {
@@ -4276,13 +4352,13 @@
 	if (err)
 		goto err_out_iounmap;
 
-	dev_info(&pdev->dev, "v%s addr 0x%llx irq %d Yukon-2 %s rev %d\n",
-		 DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0),
-		 pdev->irq, sky2_name(hw->chip_id, buf1, sizeof(buf1)),
-		 hw->chip_rev);
+	dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
+		 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
 
 	sky2_reset(hw);
 
+	sky2_vpd_info(hw);
+
 	dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
 	if (!dev) {
 		err = -ENOMEM;
@@ -4533,6 +4609,8 @@
 
 static int __init sky2_init_module(void)
 {
+	pr_info(PFX "driver version " DRV_VERSION "\n");
+
 	sky2_debug_init();
 	return pci_register_driver(&sky2_driver);
 }
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index c587162..02cc064 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -183,7 +183,7 @@
 	unsigned int reg, timeout=0, resets=1;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	/*	 Take out of PM setting first */
 	if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
@@ -272,7 +272,7 @@
 	unsigned mask, cfg, cr;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	SMC_SET_MAC_ADDR(lp, dev->dev_addr);
 
@@ -329,7 +329,7 @@
 	unsigned cr;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__);
 
 	/* Disable IRQ's */
 	SMC_SET_INT_EN(lp, 0);
@@ -348,7 +348,7 @@
 	struct smc911x_local *lp = netdev_priv(dev);
 	unsigned int fifo_count, timeout, reg;
 
-	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__);
 	fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
 	if (fifo_count <= 4) {
 		/* Manually dump the packet data */
@@ -382,7 +382,7 @@
 	unsigned char *data;
 
 	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
-		dev->name, __FUNCTION__);
+		dev->name, __func__);
 	status = SMC_GET_RX_STS_FIFO(lp);
 	DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n",
 		dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
@@ -460,7 +460,7 @@
 	unsigned char *buf;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
 	BUG_ON(lp->pending_tx_skb == NULL);
 
 	skb = lp->pending_tx_skb;
@@ -524,7 +524,7 @@
 	unsigned long flags;
 
 	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
-		dev->name, __FUNCTION__);
+		dev->name, __func__);
 
 	BUG_ON(lp->pending_tx_skb != NULL);
 
@@ -596,7 +596,7 @@
 	unsigned int tx_status;
 
 	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
-		dev->name, __FUNCTION__);
+		dev->name, __func__);
 
 	/* Collect the TX status */
 	while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
@@ -647,7 +647,7 @@
 	SMC_GET_MII(lp, phyreg, phyaddr, phydata);
 
 	DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
-		__FUNCTION__, phyaddr, phyreg, phydata);
+		__func__, phyaddr, phyreg, phydata);
 	return phydata;
 }
 
@@ -661,7 +661,7 @@
 	struct smc911x_local *lp = netdev_priv(dev);
 
 	DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
-		__FUNCTION__, phyaddr, phyreg, phydata);
+		__func__, phyaddr, phyreg, phydata);
 
 	SMC_SET_MII(lp, phyreg, phyaddr, phydata);
 }
@@ -676,7 +676,7 @@
 	int phyaddr;
 	unsigned int cfg, id1, id2;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	lp->phy_type = 0;
 
@@ -746,7 +746,7 @@
 	int phyaddr = lp->mii.phy_id;
 	int bmcr;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	/* Enter Link Disable state */
 	SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
@@ -793,7 +793,7 @@
 	unsigned long flags;
 	unsigned int reg;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
 
 	spin_lock_irqsave(&lp->lock, flags);
 	reg = SMC_GET_PMT_CTRL(lp);
@@ -852,7 +852,7 @@
 	int phyaddr = lp->mii.phy_id;
 	unsigned int bmcr, cr;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
 		/* duplex state has changed */
@@ -892,7 +892,7 @@
 	int status;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
 
 	/*
 	 * We should not be called if phy_type is zero.
@@ -985,7 +985,7 @@
 	int phyaddr = lp->mii.phy_id;
 	int status;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	if (lp->phy_type == 0)
 		return;
@@ -1013,7 +1013,7 @@
 	unsigned int rx_overrun=0, cr, pkts;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	spin_lock_irqsave(&lp->lock, flags);
 
@@ -1174,8 +1174,6 @@
 
 	spin_unlock_irqrestore(&lp->lock, flags);
 
-	DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, 8-timeout);
-
 	return IRQ_HANDLED;
 }
 
@@ -1188,7 +1186,7 @@
 	struct sk_buff *skb = lp->current_tx_skb;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
 	/* Clear the DMA interrupt sources */
@@ -1224,7 +1222,7 @@
 	unsigned long flags;
 	unsigned int pkts;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 	DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
 	/* Clear the DMA interrupt sources */
 	SMC_DMA_ACK_IRQ(dev, dma);
@@ -1272,7 +1270,7 @@
 	int status, mask;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	spin_lock_irqsave(&lp->lock, flags);
 	status = SMC_GET_INT(lp);
@@ -1310,7 +1308,7 @@
 	unsigned int mcr, update_multicast = 0;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	spin_lock_irqsave(&lp->lock, flags);
 	SMC_GET_MAC_CR(lp, mcr);
@@ -1412,7 +1410,7 @@
 {
 	struct smc911x_local *lp = netdev_priv(dev);
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	/*
 	 * Check that the address is valid.  If its not, refuse
@@ -1420,7 +1418,7 @@
 	 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
 	 */
 	if (!is_valid_ether_addr(dev->dev_addr)) {
-		PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__);
+		PRINTK("%s: no valid ethernet hw addr\n", __func__);
 		return -EINVAL;
 	}
 
@@ -1449,7 +1447,7 @@
 {
 	struct smc911x_local *lp = netdev_priv(dev);
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	netif_stop_queue(dev);
 	netif_carrier_off(dev);
@@ -1483,7 +1481,7 @@
 	int ret, status;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 	cmd->maxtxpkt = 1;
 	cmd->maxrxpkt = 1;
 
@@ -1621,7 +1619,7 @@
 	for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
 		if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
 			PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
-				dev->name, __FUNCTION__);
+				dev->name, __func__);
 			return -EFAULT;
 		}
 		mdelay(1);
@@ -1629,7 +1627,7 @@
 	}
 	if (timeout == 0) {
 		PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
-			dev->name, __FUNCTION__);
+			dev->name, __func__);
 		return -ETIMEDOUT;
 	}
 	return 0;
@@ -1742,7 +1740,7 @@
 	int timeout = 20;
 	unsigned long cookie;
 
-	DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
 
 	cookie = probe_irq_on();
 
@@ -1808,7 +1806,7 @@
 	const char *version_string;
 	unsigned long irq_flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
 
 	/* First, see if the endian word is recognized */
 	val = SMC_GET_BYTE_TEST(lp);
@@ -2058,7 +2056,7 @@
 	unsigned int *addr;
 	int ret;
 
-	DBG(SMC_DEBUG_FUNC, "--> %s\n",  __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "--> %s\n",  __func__);
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		ret = -ENODEV;
@@ -2129,7 +2127,7 @@
 	struct smc911x_local *lp = netdev_priv(ndev);
 	struct resource *res;
 
-	DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
 	platform_set_drvdata(pdev, NULL);
 
 	unregister_netdev(ndev);
@@ -2159,7 +2157,7 @@
 	struct net_device *ndev = platform_get_drvdata(dev);
 	struct smc911x_local *lp = netdev_priv(ndev);
 
-	DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
 	if (ndev) {
 		if (netif_running(ndev)) {
 			netif_device_detach(ndev);
@@ -2177,7 +2175,7 @@
 {
 	struct net_device *ndev = platform_get_drvdata(dev);
 
-	DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+	DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
 	if (ndev) {
 		struct smc911x_local *lp = netdev_priv(ndev);
 
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 24768c1..ef5ce88 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -270,7 +270,7 @@
 	unsigned int ctl, cfg;
 	struct sk_buff *pending_skb;
 
-	DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(2, "%s: %s\n", dev->name, __func__);
 
 	/* Disable all interrupts, block TX tasklet */
 	spin_lock_irq(&lp->lock);
@@ -363,7 +363,7 @@
 	void __iomem *ioaddr = lp->base;
 	int mask;
 
-	DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(2, "%s: %s\n", dev->name, __func__);
 
 	/* see the header file for options in TCR/RCR DEFAULT */
 	SMC_SELECT_BANK(lp, 0);
@@ -397,7 +397,7 @@
 	void __iomem *ioaddr = lp->base;
 	struct sk_buff *pending_skb;
 
-	DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
+	DBG(2, "%s: %s\n", CARDNAME, __func__);
 
 	/* no more interrupts for me */
 	spin_lock_irq(&lp->lock);
@@ -430,7 +430,7 @@
 	void __iomem *ioaddr = lp->base;
 	unsigned int packet_number, status, packet_len;
 
-	DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(3, "%s: %s\n", dev->name, __func__);
 
 	packet_number = SMC_GET_RXFIFO(lp);
 	if (unlikely(packet_number & RXFIFO_REMPTY)) {
@@ -577,7 +577,7 @@
 	unsigned int packet_no, len;
 	unsigned char *buf;
 
-	DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(3, "%s: %s\n", dev->name, __func__);
 
 	if (!smc_special_trylock(&lp->lock)) {
 		netif_stop_queue(dev);
@@ -662,7 +662,7 @@
 	void __iomem *ioaddr = lp->base;
 	unsigned int numPages, poll_count, status;
 
-	DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(3, "%s: %s\n", dev->name, __func__);
 
 	BUG_ON(lp->pending_tx_skb != NULL);
 
@@ -734,7 +734,7 @@
 	void __iomem *ioaddr = lp->base;
 	unsigned int saved_packet, packet_no, tx_status, pkt_len;
 
-	DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(3, "%s: %s\n", dev->name, __func__);
 
 	/* If the TX FIFO is empty then nothing to do */
 	packet_no = SMC_GET_TXFIFO(lp);
@@ -856,7 +856,7 @@
 	SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
 
 	DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
-		__FUNCTION__, phyaddr, phyreg, phydata);
+		__func__, phyaddr, phyreg, phydata);
 
 	SMC_SELECT_BANK(lp, 2);
 	return phydata;
@@ -883,7 +883,7 @@
 	SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
 
 	DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
-		__FUNCTION__, phyaddr, phyreg, phydata);
+		__func__, phyaddr, phyreg, phydata);
 
 	SMC_SELECT_BANK(lp, 2);
 }
@@ -896,7 +896,7 @@
 	struct smc_local *lp = netdev_priv(dev);
 	int phyaddr;
 
-	DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(2, "%s: %s\n", dev->name, __func__);
 
 	lp->phy_type = 0;
 
@@ -935,7 +935,7 @@
 	int phyaddr = lp->mii.phy_id;
 	int bmcr, cfg1;
 
-	DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(3, "%s: %s\n", dev->name, __func__);
 
 	/* Enter Link Disable state */
 	cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
@@ -1168,7 +1168,7 @@
 	int phyaddr = lp->mii.phy_id;
 	int phy18;
 
-	DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(2, "%s: %s\n", dev->name, __func__);
 
 	if (lp->phy_type == 0)
 		return;
@@ -1236,7 +1236,7 @@
 	int status, mask, timeout, card_stats;
 	int saved_pointer;
 
-	DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(3, "%s: %s\n", dev->name, __func__);
 
 	spin_lock(&lp->lock);
 
@@ -1358,7 +1358,7 @@
 	void __iomem *ioaddr = lp->base;
 	int status, mask, eph_st, meminfo, fifo;
 
-	DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(2, "%s: %s\n", dev->name, __func__);
 
 	spin_lock_irq(&lp->lock);
 	status = SMC_GET_INT(lp);
@@ -1402,7 +1402,7 @@
 	unsigned char multicast_table[8];
 	int update_multicast = 0;
 
-	DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(2, "%s: %s\n", dev->name, __func__);
 
 	if (dev->flags & IFF_PROMISC) {
 		DBG(2, "%s: RCR_PRMS\n", dev->name);
@@ -1505,7 +1505,7 @@
 {
 	struct smc_local *lp = netdev_priv(dev);
 
-	DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(2, "%s: %s\n", dev->name, __func__);
 
 	/*
 	 * Check that the address is valid.  If its not, refuse
@@ -1513,7 +1513,7 @@
 	 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
 	 */
 	if (!is_valid_ether_addr(dev->dev_addr)) {
-		PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__);
+		PRINTK("%s: no valid ethernet hw addr\n", __func__);
 		return -EINVAL;
 	}
 
@@ -1557,7 +1557,7 @@
 {
 	struct smc_local *lp = netdev_priv(dev);
 
-	DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+	DBG(2, "%s: %s\n", dev->name, __func__);
 
 	netif_stop_queue(dev);
 	netif_carrier_off(dev);
@@ -1700,7 +1700,7 @@
 	int timeout = 20;
 	unsigned long cookie;
 
-	DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
+	DBG(2, "%s: %s\n", CARDNAME, __func__);
 
 	cookie = probe_irq_on();
 
@@ -1778,7 +1778,7 @@
 	const char *version_string;
 	DECLARE_MAC_BUF(mac);
 
-	DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
+	DBG(2, "%s: %s\n", CARDNAME, __func__);
 
 	/* First, see if the high byte is 0x33 */
 	val = SMC_CURRENT_BANK(lp);
@@ -1961,7 +1961,8 @@
 		if (dev->dma != (unsigned char)-1)
 			printk(" DMA %d", dev->dma);
 
-		printk("%s%s\n", nowait ? " [nowait]" : "",
+		printk("%s%s\n",
+			lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
 			THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
 
 		if (!is_valid_ether_addr(dev->dev_addr)) {
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 997e7f1..edea073 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -446,6 +446,8 @@
 #define SMC_CAN_USE_32BIT	1
 #define SMC_NOWAIT		1
 
+#define SMC_IO_SHIFT		(lp->io_shift)
+
 #define SMC_inb(a, r)		readb((a) + (r))
 #define SMC_inw(a, r)		readw((a) + (r))
 #define SMC_inl(a, r)		readl((a) + (r))
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 7d5561b..f860ea1 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -409,6 +409,7 @@
 static int  eeprom_read(void __iomem *ioaddr, int location);
 static int  mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int  mdio_wait_link(struct net_device *dev, int wait);
 static int  netdev_open(struct net_device *dev);
 static void check_duplex(struct net_device *dev);
 static void netdev_timer(unsigned long data);
@@ -785,6 +786,24 @@
 	return;
 }
 
+static int mdio_wait_link(struct net_device *dev, int wait)
+{
+	int bmsr;
+	int phy_id;
+	struct netdev_private *np;
+
+	np = netdev_priv(dev);
+	phy_id = np->phys[0];
+
+	do {
+		bmsr = mdio_read(dev, phy_id, MII_BMSR);
+		if (bmsr & 0x0004)
+			return 0;
+		mdelay(1);
+	} while (--wait > 0);
+	return -1;
+}
+
 static int netdev_open(struct net_device *dev)
 {
 	struct netdev_private *np = netdev_priv(dev);
@@ -1393,41 +1412,51 @@
 	int speed;
 
 	if (intr_status & LinkChange) {
-		if (np->an_enable) {
-			mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
-			mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
-			mii_advertise &= mii_lpa;
-			printk (KERN_INFO "%s: Link changed: ", dev->name);
-			if (mii_advertise & ADVERTISE_100FULL) {
-				np->speed = 100;
-				printk ("100Mbps, full duplex\n");
-			} else if (mii_advertise & ADVERTISE_100HALF) {
-				np->speed = 100;
-				printk ("100Mbps, half duplex\n");
-			} else if (mii_advertise & ADVERTISE_10FULL) {
-				np->speed = 10;
-				printk ("10Mbps, full duplex\n");
-			} else if (mii_advertise & ADVERTISE_10HALF) {
-				np->speed = 10;
-				printk ("10Mbps, half duplex\n");
-			} else
-				printk ("\n");
+		if (mdio_wait_link(dev, 10) == 0) {
+			printk(KERN_INFO "%s: Link up\n", dev->name);
+			if (np->an_enable) {
+				mii_advertise = mdio_read(dev, np->phys[0],
+							   MII_ADVERTISE);
+				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
+				mii_advertise &= mii_lpa;
+				printk(KERN_INFO "%s: Link changed: ",
+					dev->name);
+				if (mii_advertise & ADVERTISE_100FULL) {
+					np->speed = 100;
+					printk("100Mbps, full duplex\n");
+				} else if (mii_advertise & ADVERTISE_100HALF) {
+					np->speed = 100;
+					printk("100Mbps, half duplex\n");
+				} else if (mii_advertise & ADVERTISE_10FULL) {
+					np->speed = 10;
+					printk("10Mbps, full duplex\n");
+				} else if (mii_advertise & ADVERTISE_10HALF) {
+					np->speed = 10;
+					printk("10Mbps, half duplex\n");
+				} else
+					printk("\n");
 
+			} else {
+				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
+				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
+				np->speed = speed;
+				printk(KERN_INFO "%s: Link changed: %dMbps ,",
+					dev->name, speed);
+				printk("%s duplex.\n",
+					(mii_ctl & BMCR_FULLDPLX) ?
+						"full" : "half");
+			}
+			check_duplex(dev);
+			if (np->flowctrl && np->mii_if.full_duplex) {
+				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
+					ioaddr + MulticastFilter1+2);
+				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
+					ioaddr + MACCtrl0);
+			}
+			netif_carrier_on(dev);
 		} else {
-			mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
-			speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
-			np->speed = speed;
-			printk (KERN_INFO "%s: Link changed: %dMbps ,",
-				dev->name, speed);
-			printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
-				"full" : "half");
-		}
-		check_duplex (dev);
-		if (np->flowctrl && np->mii_if.full_duplex) {
-			iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
-				ioaddr + MulticastFilter1+2);
-			iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
-				ioaddr + MACCtrl0);
+			printk(KERN_INFO "%s: Link down\n", dev->name);
+			netif_carrier_off(dev);
 		}
 	}
 	if (intr_status & StatsMax) {
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 7db48f1..efaf84d 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -539,22 +539,22 @@
 
 #define ERR(fmt, args...) printk(KERN_ERR fmt, ## args)
 #define DBG2(fmt, args...)	\
-	printk(KERN_ERR  "%s:%-5d: " fmt, __FUNCTION__, __LINE__, ## args)
+	printk(KERN_ERR  "%s:%-5d: " fmt, __func__, __LINE__, ## args)
 
 #define BDX_ASSERT(x) BUG_ON(x)
 
 #ifdef DEBUG
 
 #define ENTER          do { \
-	printk(KERN_ERR  "%s:%-5d: ENTER\n", __FUNCTION__, __LINE__); \
+	printk(KERN_ERR  "%s:%-5d: ENTER\n", __func__, __LINE__); \
 } while (0)
 
 #define RET(args...)   do { \
-	printk(KERN_ERR  "%s:%-5d: RETURN\n", __FUNCTION__, __LINE__); \
+	printk(KERN_ERR  "%s:%-5d: RETURN\n", __func__, __LINE__); \
 return args; } while (0)
 
 #define DBG(fmt, args...)	\
-	printk(KERN_ERR  "%s:%-5d: " fmt, __FUNCTION__, __LINE__, ## args)
+	printk(KERN_ERR  "%s:%-5d: " fmt, __func__, __LINE__, ## args)
 #else
 #define ENTER         do {  } while (0)
 #define RET(args...)   return args
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 71d2c5c..1239207 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3861,10 +3861,7 @@
 			return;
 		}
 
-		pci_unmap_single(tp->pdev,
-				 pci_unmap_addr(ri, mapping),
-				 skb_headlen(skb),
-				 PCI_DMA_TODEVICE);
+		skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
 
 		ri->skb = NULL;
 
@@ -3874,12 +3871,6 @@
 			ri = &tp->tx_buffers[sw_idx];
 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
 				tx_bug = 1;
-
-			pci_unmap_page(tp->pdev,
-				       pci_unmap_addr(ri, mapping),
-				       skb_shinfo(skb)->frags[i].size,
-				       PCI_DMA_TODEVICE);
-
 			sw_idx = NEXT_TX(sw_idx);
 		}
 
@@ -4633,12 +4624,16 @@
 	} else {
 		/* New SKB is guaranteed to be linear. */
 		entry = *start;
-		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
-					  PCI_DMA_TODEVICE);
+		ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
+		new_addr = skb_shinfo(new_skb)->dma_maps[0];
+
 		/* Make sure new skb does not cross any 4G boundaries.
 		 * Drop the packet if it does.
 		 */
-		if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
+		if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
+			if (!ret)
+				skb_dma_unmap(&tp->pdev->dev, new_skb,
+					      DMA_TO_DEVICE);
 			ret = -1;
 			dev_kfree_skb(new_skb);
 			new_skb = NULL;
@@ -4652,18 +4647,8 @@
 	/* Now clean up the sw ring entries. */
 	i = 0;
 	while (entry != last_plus_one) {
-		int len;
-
-		if (i == 0)
-			len = skb_headlen(skb);
-		else
-			len = skb_shinfo(skb)->frags[i-1].size;
-		pci_unmap_single(tp->pdev,
-				 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
-				 len, PCI_DMA_TODEVICE);
 		if (i == 0) {
 			tp->tx_buffers[entry].skb = new_skb;
-			pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
 		} else {
 			tp->tx_buffers[entry].skb = NULL;
 		}
@@ -4671,6 +4656,7 @@
 		i++;
 	}
 
+	skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
 	dev_kfree_skb(skb);
 
 	return ret;
@@ -4705,8 +4691,9 @@
 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct tg3 *tp = netdev_priv(dev);
-	dma_addr_t mapping;
 	u32 len, entry, base_flags, mss;
+	struct skb_shared_info *sp;
+	dma_addr_t mapping;
 
 	len = skb_headlen(skb);
 
@@ -4765,11 +4752,16 @@
 			       (vlan_tx_tag_get(skb) << 16));
 #endif
 
-	/* Queue skb data, a.k.a. the main skb fragment. */
-	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+	if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
+		dev_kfree_skb(skb);
+		goto out_unlock;
+	}
+
+	sp = skb_shinfo(skb);
+
+	mapping = sp->dma_maps[0];
 
 	tp->tx_buffers[entry].skb = skb;
-	pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
 
 	tg3_set_txd(tp, entry, mapping, len, base_flags,
 		    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
@@ -4785,13 +4777,8 @@
 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 			len = frag->size;
-			mapping = pci_map_page(tp->pdev,
-					       frag->page,
-					       frag->page_offset,
-					       len, PCI_DMA_TODEVICE);
-
+			mapping = sp->dma_maps[i + 1];
 			tp->tx_buffers[entry].skb = NULL;
-			pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
 
 			tg3_set_txd(tp, entry, mapping, len,
 				    base_flags, (i == last) | (mss << 1));
@@ -4859,9 +4846,10 @@
 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
 {
 	struct tg3 *tp = netdev_priv(dev);
-	dma_addr_t mapping;
 	u32 len, entry, base_flags, mss;
+	struct skb_shared_info *sp;
 	int would_hit_hwbug;
+	dma_addr_t mapping;
 
 	len = skb_headlen(skb);
 
@@ -4942,11 +4930,16 @@
 			       (vlan_tx_tag_get(skb) << 16));
 #endif
 
-	/* Queue skb data, a.k.a. the main skb fragment. */
-	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+	if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
+		dev_kfree_skb(skb);
+		goto out_unlock;
+	}
+
+	sp = skb_shinfo(skb);
+
+	mapping = sp->dma_maps[0];
 
 	tp->tx_buffers[entry].skb = skb;
-	pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
 
 	would_hit_hwbug = 0;
 
@@ -4969,13 +4962,9 @@
 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 			len = frag->size;
-			mapping = pci_map_page(tp->pdev,
-					       frag->page,
-					       frag->page_offset,
-					       len, PCI_DMA_TODEVICE);
+			mapping = sp->dma_maps[i + 1];
 
 			tp->tx_buffers[entry].skb = NULL;
-			pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
 
 			if (tg3_4g_overflow_test(mapping, len))
 				would_hit_hwbug = 1;
@@ -5128,7 +5117,6 @@
 	for (i = 0; i < TG3_TX_RING_SIZE; ) {
 		struct tx_ring_info *txp;
 		struct sk_buff *skb;
-		int j;
 
 		txp = &tp->tx_buffers[i];
 		skb = txp->skb;
@@ -5138,22 +5126,11 @@
 			continue;
 		}
 
-		pci_unmap_single(tp->pdev,
-				 pci_unmap_addr(txp, mapping),
-				 skb_headlen(skb),
-				 PCI_DMA_TODEVICE);
+		skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
+
 		txp->skb = NULL;
 
-		i++;
-
-		for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
-			txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
-			pci_unmap_page(tp->pdev,
-				       pci_unmap_addr(txp, mapping),
-				       skb_shinfo(skb)->frags[j].size,
-				       PCI_DMA_TODEVICE);
-			i++;
-		}
+		i += skb_shinfo(skb)->nr_frags + 1;
 
 		dev_kfree_skb_any(skb);
 	}
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index f5b8cab..6c7b5e3 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2197,7 +2197,6 @@
 
 struct tx_ring_info {
 	struct sk_buff			*skb;
-	DECLARE_PCI_UNMAP_ADDR(mapping)
 	u32				prev_vlan_tag;
 };
 
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 43fde99..eb1da6f 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -263,7 +263,7 @@
 			return;
 		udelay(10);
 	}
-	printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+	printk(KERN_ERR "%s function time out \n", __func__);
 }
 
 static int mii_speed(struct mii_if_info *mii)
@@ -1059,7 +1059,7 @@
 			return;
 		udelay(10);
 	}
-	printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+	printk(KERN_ERR "%s function time out \n", __func__);
 }
 
 static void tsi108_reset_ether(struct tsi108_prv_data * data)
@@ -1244,7 +1244,7 @@
 		udelay(10);
 	}
 	if (i == 0)
-		printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+		printk(KERN_ERR "%s function time out \n", __func__);
 
 	if (data->phy_type == TSI108_PHY_BCM54XX) {
 		tsi108_write_mii(data, 0x09, 0x0300);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 9281d06..f54c450 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1418,7 +1418,6 @@
 
 	de_free_rings(de);
 	de_adapter_sleep(de);
-	pci_disable_device(de->pdev);
 	return 0;
 }
 
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 617ef41..6444cbe 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -832,7 +832,7 @@
 	s32 csr14;                          /* Saved SIA TX/RX Register     */
 	s32 csr15;                          /* Saved SIA General Register   */
 	int save_cnt;                       /* Flag if state already saved  */
-	struct sk_buff *skb;                /* Save the (re-ordered) skb's  */
+	struct sk_buff_head queue;          /* Save the (re-ordered) skb's  */
     } cache;
     struct de4x5_srom srom;                 /* A copy of the SROM           */
     int cfrv;				    /* Card CFRV copy */
@@ -1128,6 +1128,7 @@
 	printk("      which has an Ethernet PROM CRC error.\n");
 	return -ENXIO;
     } else {
+	skb_queue_head_init(&lp->cache.queue);
 	lp->cache.gepc = GEP_INIT;
 	lp->asBit = GEP_SLNK;
 	lp->asPolarity = GEP_SLNK;
@@ -1487,7 +1488,7 @@
 	}
     } else if (skb->len > 0) {
 	/* If we already have stuff queued locally, use that first */
-	if (lp->cache.skb && !lp->interrupt) {
+	if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
 	    de4x5_put_cache(dev, skb);
 	    skb = de4x5_get_cache(dev);
 	}
@@ -1580,7 +1581,7 @@
 
     /* Load the TX ring with any locally stored packets */
     if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
-	while (lp->cache.skb && !netif_queue_stopped(dev) && lp->tx_enable) {
+	while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
 	    de4x5_queue_pkt(de4x5_get_cache(dev), dev);
 	}
 	lp->cache.lock = 0;
@@ -3679,11 +3680,7 @@
     }
 
     /* Unload the locally queued packets */
-    while (lp->cache.skb) {
-	dev_kfree_skb(de4x5_get_cache(dev));
-    }
-
-    return;
+    __skb_queue_purge(&lp->cache.queue);
 }
 
 /*
@@ -3781,43 +3778,24 @@
 de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
 {
     struct de4x5_private *lp = netdev_priv(dev);
-    struct sk_buff *p;
 
-    if (lp->cache.skb) {
-	for (p=lp->cache.skb; p->next; p=p->next);
-	p->next = skb;
-    } else {
-	lp->cache.skb = skb;
-    }
-    skb->next = NULL;
-
-    return;
+    __skb_queue_tail(&lp->cache.queue, skb);
 }
 
 static void
 de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
 {
     struct de4x5_private *lp = netdev_priv(dev);
-    struct sk_buff *p = lp->cache.skb;
 
-    lp->cache.skb = skb;
-    skb->next = p;
-
-    return;
+    __skb_queue_head(&lp->cache.queue, skb);
 }
 
 static struct sk_buff *
 de4x5_get_cache(struct net_device *dev)
 {
     struct de4x5_private *lp = netdev_priv(dev);
-    struct sk_buff *p = lp->cache.skb;
 
-    if (p) {
-	lp->cache.skb = p->next;
-	p->next = NULL;
-    }
-
-    return p;
+    return __skb_dequeue(&lp->cache.queue);
 }
 
 /*
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 8f944e5..c87747b 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -400,7 +400,7 @@
 	enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL);
 	if (!enet_addr_cont) {
 		ugeth_err("%s: No memory for enet_addr_container object.",
-			  __FUNCTION__);
+			  __func__);
 		return NULL;
 	}
 
@@ -427,7 +427,7 @@
 	struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
 
 	if (!(paddr_num < NUM_OF_PADDRS)) {
-		ugeth_warn("%s: Illegal paddr_num.", __FUNCTION__);
+		ugeth_warn("%s: Illegal paddr_num.", __func__);
 		return -EINVAL;
 	}
 
@@ -447,7 +447,7 @@
 	struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
 
 	if (!(paddr_num < NUM_OF_PADDRS)) {
-		ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
+		ugeth_warn("%s: Illagel paddr_num.", __func__);
 		return -EINVAL;
 	}
 
@@ -1441,7 +1441,7 @@
 	u32 upsmr, maccfg2, tbiBaseAddress;
 	u16 value;
 
-	ugeth_vdbg("%s: IN", __FUNCTION__);
+	ugeth_vdbg("%s: IN", __func__);
 
 	ug_info = ugeth->ug_info;
 	ug_regs = ugeth->ug_regs;
@@ -1504,7 +1504,7 @@
 	if (ret_val != 0) {
 		if (netif_msg_probe(ugeth))
 			ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
-			     __FUNCTION__);
+			     __func__);
 		return ret_val;
 	}
 
@@ -1744,7 +1744,7 @@
 	/* check if the UCC number is in range. */
 	if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
 		if (netif_msg_probe(ugeth))
-			ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
+			ugeth_err("%s: ucc_num out of range.", __func__);
 		return -EINVAL;
 	}
 
@@ -1773,7 +1773,7 @@
 	/* check if the UCC number is in range. */
 	if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
 		if (netif_msg_probe(ugeth))
-			ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
+			ugeth_err("%s: ucc_num out of range.", __func__);
 		return -EINVAL;
 	}
 
@@ -2062,7 +2062,7 @@
 		ugeth_warn
 		    ("%s: multicast address added to paddr will have no "
 		     "effect - is this what you wanted?",
-		     __FUNCTION__);
+		     __func__);
 
 	ugeth->indAddrRegUsed[paddr_num] = 1;	/* mark this paddr as used */
 	/* store address in our database */
@@ -2278,7 +2278,7 @@
 	struct phy_device *phydev = ugeth->phydev;
 	u32 tempval;
 
-	ugeth_vdbg("%s: IN", __FUNCTION__);
+	ugeth_vdbg("%s: IN", __func__);
 
 	/* Disable the controller */
 	ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
@@ -2315,7 +2315,7 @@
 	      (uf_info->bd_mem_part == MEM_PART_MURAM))) {
 		if (netif_msg_probe(ugeth))
 			ugeth_err("%s: Bad memory partition value.",
-					__FUNCTION__);
+					__func__);
 		return -EINVAL;
 	}
 
@@ -2327,7 +2327,7 @@
 			if (netif_msg_probe(ugeth))
 				ugeth_err
 				    ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
-					__FUNCTION__);
+					__func__);
 			return -EINVAL;
 		}
 	}
@@ -2338,7 +2338,7 @@
 			if (netif_msg_probe(ugeth))
 				ugeth_err
 				    ("%s: Tx BD ring length must be no smaller than 2.",
-				     __FUNCTION__);
+				     __func__);
 			return -EINVAL;
 		}
 	}
@@ -2349,21 +2349,21 @@
 		if (netif_msg_probe(ugeth))
 			ugeth_err
 			    ("%s: max_rx_buf_length must be non-zero multiple of 128.",
-			     __FUNCTION__);
+			     __func__);
 		return -EINVAL;
 	}
 
 	/* num Tx queues */
 	if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
 		if (netif_msg_probe(ugeth))
-			ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
+			ugeth_err("%s: number of tx queues too large.", __func__);
 		return -EINVAL;
 	}
 
 	/* num Rx queues */
 	if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
 		if (netif_msg_probe(ugeth))
-			ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
+			ugeth_err("%s: number of rx queues too large.", __func__);
 		return -EINVAL;
 	}
 
@@ -2374,7 +2374,7 @@
 				ugeth_err
 				    ("%s: VLAN priority table entry must not be"
 					" larger than number of Rx queues.",
-				     __FUNCTION__);
+				     __func__);
 			return -EINVAL;
 		}
 	}
@@ -2386,7 +2386,7 @@
 				ugeth_err
 				    ("%s: IP priority table entry must not be"
 					" larger than number of Rx queues.",
-				     __FUNCTION__);
+				     __func__);
 			return -EINVAL;
 		}
 	}
@@ -2394,7 +2394,7 @@
 	if (ug_info->cam && !ug_info->ecamptr) {
 		if (netif_msg_probe(ugeth))
 			ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
-				  __FUNCTION__);
+				  __func__);
 		return -EINVAL;
 	}
 
@@ -2404,7 +2404,7 @@
 		if (netif_msg_probe(ugeth))
 			ugeth_err("%s: Number of station addresses greater than 1 "
 				  "not allowed in extended parsing mode.",
-				  __FUNCTION__);
+				  __func__);
 		return -EINVAL;
 	}
 
@@ -2418,7 +2418,7 @@
 	/* Initialize the general fast UCC block. */
 	if (ucc_fast_init(uf_info, &ugeth->uccf)) {
 		if (netif_msg_probe(ugeth))
-			ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
+			ugeth_err("%s: Failed to init uccf.", __func__);
 		ucc_geth_memclean(ugeth);
 		return -ENOMEM;
 	}
@@ -2448,7 +2448,7 @@
 	u8 __iomem *endOfRing;
 	u8 numThreadsRxNumerical, numThreadsTxNumerical;
 
-	ugeth_vdbg("%s: IN", __FUNCTION__);
+	ugeth_vdbg("%s: IN", __func__);
 	uccf = ugeth->uccf;
 	ug_info = ugeth->ug_info;
 	uf_info = &ug_info->uf_info;
@@ -2474,7 +2474,7 @@
 	default:
 		if (netif_msg_ifup(ugeth))
 			ugeth_err("%s: Bad number of Rx threads value.",
-				       	__FUNCTION__);
+				       	__func__);
 		ucc_geth_memclean(ugeth);
 		return -EINVAL;
 		break;
@@ -2499,7 +2499,7 @@
 	default:
 		if (netif_msg_ifup(ugeth))
 			ugeth_err("%s: Bad number of Tx threads value.",
-				       	__FUNCTION__);
+				       	__func__);
 		ucc_geth_memclean(ugeth);
 		return -EINVAL;
 		break;
@@ -2553,7 +2553,7 @@
 	if (ret_val != 0) {
 		if (netif_msg_ifup(ugeth))
 			ugeth_err("%s: IPGIFG initialization parameter too large.",
-				  __FUNCTION__);
+				  __func__);
 		ucc_geth_memclean(ugeth);
 		return ret_val;
 	}
@@ -2571,7 +2571,7 @@
 	if (ret_val != 0) {
 		if (netif_msg_ifup(ugeth))
 			ugeth_err("%s: Half Duplex initialization parameter too large.",
-			  __FUNCTION__);
+			  __func__);
 		ucc_geth_memclean(ugeth);
 		return ret_val;
 	}
@@ -2626,7 +2626,7 @@
 			if (netif_msg_ifup(ugeth))
 				ugeth_err
 				    ("%s: Can not allocate memory for Tx bd rings.",
-				     __FUNCTION__);
+				     __func__);
 			ucc_geth_memclean(ugeth);
 			return -ENOMEM;
 		}
@@ -2662,7 +2662,7 @@
 			if (netif_msg_ifup(ugeth))
 				ugeth_err
 				    ("%s: Can not allocate memory for Rx bd rings.",
-				     __FUNCTION__);
+				     __func__);
 			ucc_geth_memclean(ugeth);
 			return -ENOMEM;
 		}
@@ -2678,7 +2678,7 @@
 		if (ugeth->tx_skbuff[j] == NULL) {
 			if (netif_msg_ifup(ugeth))
 				ugeth_err("%s: Could not allocate tx_skbuff",
-					  __FUNCTION__);
+					  __func__);
 			ucc_geth_memclean(ugeth);
 			return -ENOMEM;
 		}
@@ -2710,7 +2710,7 @@
 		if (ugeth->rx_skbuff[j] == NULL) {
 			if (netif_msg_ifup(ugeth))
 				ugeth_err("%s: Could not allocate rx_skbuff",
-					  __FUNCTION__);
+					  __func__);
 			ucc_geth_memclean(ugeth);
 			return -ENOMEM;
 		}
@@ -2744,7 +2744,7 @@
 		if (netif_msg_ifup(ugeth))
 			ugeth_err
 			    ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
-			     __FUNCTION__);
+			     __func__);
 		ucc_geth_memclean(ugeth);
 		return -ENOMEM;
 	}
@@ -2767,7 +2767,7 @@
 		if (netif_msg_ifup(ugeth))
 			ugeth_err
 			    ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
-			     __FUNCTION__);
+			     __func__);
 		ucc_geth_memclean(ugeth);
 		return -ENOMEM;
 	}
@@ -2797,7 +2797,7 @@
 		if (netif_msg_ifup(ugeth))
 			ugeth_err
 			    ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
-			     __FUNCTION__);
+			     __func__);
 		ucc_geth_memclean(ugeth);
 		return -ENOMEM;
 	}
@@ -2841,7 +2841,7 @@
 			if (netif_msg_ifup(ugeth))
 				ugeth_err
 				 ("%s: Can not allocate DPRAM memory for p_scheduler.",
-				     __FUNCTION__);
+				     __func__);
 			ucc_geth_memclean(ugeth);
 			return -ENOMEM;
 		}
@@ -2892,7 +2892,7 @@
 				ugeth_err
 				    ("%s: Can not allocate DPRAM memory for"
 					" p_tx_fw_statistics_pram.",
-				       	__FUNCTION__);
+				       	__func__);
 			ucc_geth_memclean(ugeth);
 			return -ENOMEM;
 		}
@@ -2932,7 +2932,7 @@
 		if (netif_msg_ifup(ugeth))
 			ugeth_err
 			    ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
-			     __FUNCTION__);
+			     __func__);
 		ucc_geth_memclean(ugeth);
 		return -ENOMEM;
 	}
@@ -2954,7 +2954,7 @@
 		if (netif_msg_ifup(ugeth))
 			ugeth_err
 			    ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
-			     __FUNCTION__);
+			     __func__);
 		ucc_geth_memclean(ugeth);
 		return -ENOMEM;
 	}
@@ -2978,7 +2978,7 @@
 			if (netif_msg_ifup(ugeth))
 				ugeth_err
 					("%s: Can not allocate DPRAM memory for"
-					" p_rx_fw_statistics_pram.", __FUNCTION__);
+					" p_rx_fw_statistics_pram.", __func__);
 			ucc_geth_memclean(ugeth);
 			return -ENOMEM;
 		}
@@ -3001,7 +3001,7 @@
 		if (netif_msg_ifup(ugeth))
 			ugeth_err
 			    ("%s: Can not allocate DPRAM memory for"
-				" p_rx_irq_coalescing_tbl.", __FUNCTION__);
+				" p_rx_irq_coalescing_tbl.", __func__);
 		ucc_geth_memclean(ugeth);
 		return -ENOMEM;
 	}
@@ -3070,7 +3070,7 @@
 		if (netif_msg_ifup(ugeth))
 			ugeth_err
 			    ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
-			     __FUNCTION__);
+			     __func__);
 		ucc_geth_memclean(ugeth);
 		return -ENOMEM;
 	}
@@ -3147,7 +3147,7 @@
 		if (!ug_info->extendedFilteringChainPointer) {
 			if (netif_msg_ifup(ugeth))
 				ugeth_err("%s: Null Extended Filtering Chain Pointer.",
-					  __FUNCTION__);
+					  __func__);
 			ucc_geth_memclean(ugeth);
 			return -EINVAL;
 		}
@@ -3161,7 +3161,7 @@
 			if (netif_msg_ifup(ugeth))
 				ugeth_err
 					("%s: Can not allocate DPRAM memory for"
-					" p_exf_glbl_param.", __FUNCTION__);
+					" p_exf_glbl_param.", __func__);
 			ucc_geth_memclean(ugeth);
 			return -ENOMEM;
 		}
@@ -3209,7 +3209,7 @@
 		if (netif_msg_ifup(ugeth))
 			ugeth_err
 			    ("%s: Can not allocate memory for"
-				" p_UccInitEnetParamShadows.", __FUNCTION__);
+				" p_UccInitEnetParamShadows.", __func__);
 		ucc_geth_memclean(ugeth);
 		return -ENOMEM;
 	}
@@ -3244,7 +3244,7 @@
 		QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
 		if (netif_msg_ifup(ugeth))
 			ugeth_err("%s: Invalid largest External Lookup Key Size.",
-				  __FUNCTION__);
+				  __func__);
 		ucc_geth_memclean(ugeth);
 		return -EINVAL;
 	}
@@ -3271,7 +3271,7 @@
 		ug_info->riscRx, 1)) != 0) {
 		if (netif_msg_ifup(ugeth))
 				ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
-					__FUNCTION__);
+					__func__);
 		ucc_geth_memclean(ugeth);
 		return ret_val;
 	}
@@ -3287,7 +3287,7 @@
 				    ug_info->riscTx, 0)) != 0) {
 		if (netif_msg_ifup(ugeth))
 			ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
-				  __FUNCTION__);
+				  __func__);
 		ucc_geth_memclean(ugeth);
 		return ret_val;
 	}
@@ -3297,7 +3297,7 @@
 		if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
 			if (netif_msg_ifup(ugeth))
 				ugeth_err("%s: Can not fill Rx bds with buffers.",
-					  __FUNCTION__);
+					  __func__);
 			ucc_geth_memclean(ugeth);
 			return ret_val;
 		}
@@ -3309,7 +3309,7 @@
 		if (netif_msg_ifup(ugeth))
 			ugeth_err
 			    ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
-			     __FUNCTION__);
+			     __func__);
 		ucc_geth_memclean(ugeth);
 		return -ENOMEM;
 	}
@@ -3360,7 +3360,7 @@
 {
 	struct ucc_geth_private *ugeth = netdev_priv(dev);
 
-	ugeth_vdbg("%s: IN", __FUNCTION__);
+	ugeth_vdbg("%s: IN", __func__);
 
 	dev->stats.tx_errors++;
 
@@ -3386,7 +3386,7 @@
 	u32 bd_status;
 	u8 txQ = 0;
 
-	ugeth_vdbg("%s: IN", __FUNCTION__);
+	ugeth_vdbg("%s: IN", __func__);
 
 	spin_lock_irq(&ugeth->lock);
 
@@ -3459,7 +3459,7 @@
 	u8 *bdBuffer;
 	struct net_device *dev;
 
-	ugeth_vdbg("%s: IN", __FUNCTION__);
+	ugeth_vdbg("%s: IN", __func__);
 
 	dev = ugeth->dev;
 
@@ -3481,7 +3481,7 @@
 		    (bd_status & R_ERRORS_FATAL)) {
 			if (netif_msg_rx_err(ugeth))
 				ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
-					   __FUNCTION__, __LINE__, (u32) skb);
+					   __func__, __LINE__, (u32) skb);
 			if (skb)
 				dev_kfree_skb_any(skb);
 
@@ -3507,7 +3507,7 @@
 		skb = get_new_skb(ugeth, bd);
 		if (!skb) {
 			if (netif_msg_rx_err(ugeth))
-				ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
+				ugeth_warn("%s: No Rx Data Buffer", __func__);
 			dev->stats.rx_dropped++;
 			break;
 		}
@@ -3613,7 +3613,7 @@
 	register u32 tx_mask;
 	u8 i;
 
-	ugeth_vdbg("%s: IN", __FUNCTION__);
+	ugeth_vdbg("%s: IN", __func__);
 
 	uccf = ugeth->uccf;
 	ug_info = ugeth->ug_info;
@@ -3683,13 +3683,13 @@
 	struct ucc_geth_private *ugeth = netdev_priv(dev);
 	int err;
 
-	ugeth_vdbg("%s: IN", __FUNCTION__);
+	ugeth_vdbg("%s: IN", __func__);
 
 	/* Test station address */
 	if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
 		if (netif_msg_ifup(ugeth))
 			ugeth_err("%s: Multicast address used for station address"
-				  " - is this what you wanted?", __FUNCTION__);
+				  " - is this what you wanted?", __func__);
 		return -EINVAL;
 	}
 
@@ -3772,7 +3772,7 @@
 {
 	struct ucc_geth_private *ugeth = netdev_priv(dev);
 
-	ugeth_vdbg("%s: IN", __FUNCTION__);
+	ugeth_vdbg("%s: IN", __func__);
 
 	napi_disable(&ugeth->napi);
 
@@ -3840,7 +3840,7 @@
 		PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
 	};
 
-	ugeth_vdbg("%s: IN", __FUNCTION__);
+	ugeth_vdbg("%s: IN", __func__);
 
 	prop = of_get_property(np, "cell-index", NULL);
 	if (!prop) {
@@ -3857,7 +3857,7 @@
 	if (ug_info == NULL) {
 		if (netif_msg_probe(&debug))
 			ugeth_err("%s: [%d] Missing additional data!",
-				       	__FUNCTION__, ucc_num);
+				       	__func__, ucc_num);
 		return -ENODEV;
 	}
 
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 6e42b5a..1164c52 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -92,9 +92,6 @@
 
 #define	HSO_NET_TX_TIMEOUT		(HZ*10)
 
-/* Serial port defines and structs. */
-#define HSO_SERIAL_FLAG_RX_SENT		0
-
 #define HSO_SERIAL_MAGIC		0x48534f31
 
 /* Number of ttys to handle */
@@ -179,6 +176,12 @@
 	unsigned long flags;
 };
 
+enum rx_ctrl_state{
+	RX_IDLE,
+	RX_SENT,
+	RX_PENDING
+};
+
 struct hso_serial {
 	struct hso_device *parent;
 	int magic;
@@ -205,7 +208,7 @@
 	struct usb_endpoint_descriptor *in_endp;
 	struct usb_endpoint_descriptor *out_endp;
 
-	unsigned long flags;
+	enum rx_ctrl_state rx_state;
 	u8 rts_state;
 	u8 dtr_state;
 	unsigned tx_urb_used:1;
@@ -216,6 +219,15 @@
 	spinlock_t serial_lock;
 
 	int (*write_data) (struct hso_serial *serial);
+	/* Hacks required to get flow control
+	 * working on the serial receive buffers
+	 * so as not to drop characters on the floor.
+	 */
+	int  curr_rx_urb_idx;
+	u16  curr_rx_urb_offset;
+	u8   rx_urb_filled[MAX_RX_URBS];
+	struct tasklet_struct unthrottle_tasklet;
+	struct work_struct    retry_unthrottle_workqueue;
 };
 
 struct hso_device {
@@ -271,7 +283,7 @@
 static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
 			       unsigned int set, unsigned int clear);
 static void ctrl_callback(struct urb *urb);
-static void put_rxbuf_data(struct urb *urb, struct hso_serial *serial);
+static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial);
 static void hso_kick_transmit(struct hso_serial *serial);
 /* Helper functions */
 static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int,
@@ -287,6 +299,8 @@
 static void hso_free_shared_int(struct hso_shared_int *shared_int);
 static int hso_stop_net_device(struct hso_device *hso_dev);
 static void hso_serial_ref_free(struct kref *ref);
+static void hso_std_serial_read_bulk_callback(struct urb *urb);
+static int hso_mux_serial_read(struct hso_serial *serial);
 static void async_get_intf(struct work_struct *data);
 static void async_put_intf(struct work_struct *data);
 static int hso_put_activity(struct hso_device *hso_dev);
@@ -458,6 +472,17 @@
 }
 static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL);
 
+static int hso_urb_to_index(struct hso_serial *serial, struct urb *urb)
+{
+	int idx;
+
+	for (idx = 0; idx < serial->num_rx_urbs; idx++)
+		if (serial->rx_urb[idx] == urb)
+			return idx;
+	dev_err(serial->parent->dev, "hso_urb_to_index failed\n");
+	return -1;
+}
+
 /* converts mux value to a port spec value */
 static u32 hso_mux_to_port(int mux)
 {
@@ -1039,6 +1064,158 @@
 	return;
 }
 
+static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb)
+{
+	int result;
+#ifdef CONFIG_HSO_AUTOPM
+	usb_mark_last_busy(urb->dev);
+#endif
+	/* We are done with this URB, resubmit it. Prep the USB to wait for
+	 * another frame */
+	usb_fill_bulk_urb(urb, serial->parent->usb,
+			  usb_rcvbulkpipe(serial->parent->usb,
+					  serial->in_endp->
+					  bEndpointAddress & 0x7F),
+			  urb->transfer_buffer, serial->rx_data_length,
+			  hso_std_serial_read_bulk_callback, serial);
+	/* Give this to the USB subsystem so it can tell us when more data
+	 * arrives. */
+	result = usb_submit_urb(urb, GFP_ATOMIC);
+	if (result) {
+		dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d\n",
+			__func__, result);
+	}
+}
+
+
+
+
+static void put_rxbuf_data_and_resubmit_bulk_urb(struct hso_serial *serial)
+{
+	int count;
+	struct urb *curr_urb;
+
+	while (serial->rx_urb_filled[serial->curr_rx_urb_idx]) {
+		curr_urb = serial->rx_urb[serial->curr_rx_urb_idx];
+		count = put_rxbuf_data(curr_urb, serial);
+		if (count == -1)
+			return;
+		if (count == 0) {
+			serial->curr_rx_urb_idx++;
+			if (serial->curr_rx_urb_idx >= serial->num_rx_urbs)
+				serial->curr_rx_urb_idx = 0;
+			hso_resubmit_rx_bulk_urb(serial, curr_urb);
+		}
+	}
+}
+
+static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
+{
+	int count = 0;
+	struct urb *urb;
+
+	urb = serial->rx_urb[0];
+	if (serial->open_count > 0) {
+		count = put_rxbuf_data(urb, serial);
+		if (count == -1)
+			return;
+	}
+	/* Re issue a read as long as we receive data. */
+
+	if (count == 0 && ((urb->actual_length != 0) ||
+			   (serial->rx_state == RX_PENDING))) {
+		serial->rx_state = RX_SENT;
+		hso_mux_serial_read(serial);
+	} else
+		serial->rx_state = RX_IDLE;
+}
+
+
+/* read callback for Diag and CS port */
+static void hso_std_serial_read_bulk_callback(struct urb *urb)
+{
+	struct hso_serial *serial = urb->context;
+	int status = urb->status;
+
+	/* sanity check */
+	if (!serial) {
+		D1("serial == NULL");
+		return;
+	} else if (status) {
+		log_usb_status(status, __func__);
+		return;
+	}
+
+	D4("\n--- Got serial_read_bulk callback %02x ---", status);
+	D1("Actual length = %d\n", urb->actual_length);
+	DUMP1(urb->transfer_buffer, urb->actual_length);
+
+	/* Anyone listening? */
+	if (serial->open_count == 0)
+		return;
+
+	if (status == 0) {
+		if (serial->parent->port_spec & HSO_INFO_CRC_BUG) {
+			u32 rest;
+			u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
+			rest =
+			    urb->actual_length %
+			    serial->in_endp->wMaxPacketSize;
+			if (((rest == 5) || (rest == 6))
+			    && !memcmp(((u8 *) urb->transfer_buffer) +
+				       urb->actual_length - 4, crc_check, 4)) {
+				urb->actual_length -= 4;
+			}
+		}
+		/* Valid data, handle RX data */
+		spin_lock(&serial->serial_lock);
+		serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
+		put_rxbuf_data_and_resubmit_bulk_urb(serial);
+		spin_unlock(&serial->serial_lock);
+	} else if (status == -ENOENT || status == -ECONNRESET) {
+		/* Unlinked - check for throttled port. */
+		D2("Port %d, successfully unlinked urb", serial->minor);
+		spin_lock(&serial->serial_lock);
+		serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
+		hso_resubmit_rx_bulk_urb(serial, urb);
+		spin_unlock(&serial->serial_lock);
+	} else {
+		D2("Port %d, status = %d for read urb", serial->minor, status);
+		return;
+	}
+}
+
+/*
+ * This needs to be a tasklet otherwise we will
+ * end up recursively calling this function.
+ */
+void hso_unthrottle_tasklet(struct hso_serial *serial)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&serial->serial_lock, flags);
+	if ((serial->parent->port_spec & HSO_INTF_MUX))
+		put_rxbuf_data_and_resubmit_ctrl_urb(serial);
+	else
+		put_rxbuf_data_and_resubmit_bulk_urb(serial);
+	spin_unlock_irqrestore(&serial->serial_lock, flags);
+}
+
+static	void hso_unthrottle(struct tty_struct *tty)
+{
+	struct hso_serial *serial = get_serial_by_tty(tty);
+
+	tasklet_hi_schedule(&serial->unthrottle_tasklet);
+}
+
+void hso_unthrottle_workfunc(struct work_struct *work)
+{
+	struct hso_serial *serial =
+	    container_of(work, struct hso_serial,
+			 retry_unthrottle_workqueue);
+	hso_unthrottle_tasklet(serial);
+}
+
 /* open the requested serial port */
 static int hso_serial_open(struct tty_struct *tty, struct file *filp)
 {
@@ -1064,13 +1241,18 @@
 	tty->driver_data = serial;
 	serial->tty = tty;
 
-	/* check for port allready opened, if not set the termios */
+	/* check for port already opened, if not set the termios */
 	serial->open_count++;
 	if (serial->open_count == 1) {
 		tty->low_latency = 1;
-		serial->flags = 0;
+		serial->rx_state = RX_IDLE;
 		/* Force default termio settings */
 		_hso_serial_set_termios(tty, NULL);
+		tasklet_init(&serial->unthrottle_tasklet,
+			     (void (*)(unsigned long))hso_unthrottle_tasklet,
+			     (unsigned long)serial);
+		INIT_WORK(&serial->retry_unthrottle_workqueue,
+			  hso_unthrottle_workfunc);
 		result = hso_start_serial_device(serial->parent, GFP_KERNEL);
 		if (result) {
 			hso_stop_serial_device(serial->parent);
@@ -1117,9 +1299,13 @@
 		}
 		if (!usb_gone)
 			hso_stop_serial_device(serial->parent);
+		tasklet_kill(&serial->unthrottle_tasklet);
+		cancel_work_sync(&serial->retry_unthrottle_workqueue);
 	}
+
 	if (!usb_gone)
 		usb_autopm_put_interface(serial->parent->interface);
+
 	mutex_unlock(&serial->parent->mutex);
 }
 
@@ -1422,15 +1608,21 @@
 								   (1 << i));
 			if (serial != NULL) {
 				D1("Pending read interrupt on port %d\n", i);
-				if (!test_and_set_bit(HSO_SERIAL_FLAG_RX_SENT,
-						      &serial->flags)) {
+				spin_lock(&serial->serial_lock);
+				if (serial->rx_state == RX_IDLE) {
 					/* Setup and send a ctrl req read on
 					 * port i */
-					hso_mux_serial_read(serial);
+				if (!serial->rx_urb_filled[0]) {
+						serial->rx_state = RX_SENT;
+						hso_mux_serial_read(serial);
+					} else
+						serial->rx_state = RX_PENDING;
+
 				} else {
 					D1("Already pending a read on "
 					   "port %d\n", i);
 				}
+				spin_unlock(&serial->serial_lock);
 			}
 		}
 	}
@@ -1532,16 +1724,10 @@
 	if (req->bRequestType ==
 	    (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) {
 		/* response to a read command */
-		if (serial->open_count > 0) {
-			/* handle RX data the normal way */
-			put_rxbuf_data(urb, serial);
-		}
-
-		/* Re issue a read as long as we receive data. */
-		if (urb->actual_length != 0)
-			hso_mux_serial_read(serial);
-		else
-			clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags);
+		serial->rx_urb_filled[0] = 1;
+		spin_lock(&serial->serial_lock);
+		put_rxbuf_data_and_resubmit_ctrl_urb(serial);
+		spin_unlock(&serial->serial_lock);
 	} else {
 		hso_put_activity(serial->parent);
 		if (serial->tty)
@@ -1552,91 +1738,42 @@
 }
 
 /* handle RX data for serial port */
-static void put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
+static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
 {
 	struct tty_struct *tty = serial->tty;
-
+	int write_length_remaining = 0;
+	int curr_write_len;
 	/* Sanity check */
 	if (urb == NULL || serial == NULL) {
 		D1("serial = NULL");
-		return;
+		return -2;
 	}
 
 	/* Push data to tty */
-	if (tty && urb->actual_length) {
+	if (tty) {
+		write_length_remaining = urb->actual_length -
+			serial->curr_rx_urb_offset;
 		D1("data to push to tty");
-		tty_insert_flip_string(tty, urb->transfer_buffer,
-				       urb->actual_length);
-		tty_flip_buffer_push(tty);
-	}
-}
-
-/* read callback for Diag and CS port */
-static void hso_std_serial_read_bulk_callback(struct urb *urb)
-{
-	struct hso_serial *serial = urb->context;
-	int result;
-	int status = urb->status;
-
-	/* sanity check */
-	if (!serial) {
-		D1("serial == NULL");
-		return;
-	} else if (status) {
-		log_usb_status(status, __func__);
-		return;
-	}
-
-	D4("\n--- Got serial_read_bulk callback %02x ---", status);
-	D1("Actual length = %d\n", urb->actual_length);
-	DUMP1(urb->transfer_buffer, urb->actual_length);
-
-	/* Anyone listening? */
-	if (serial->open_count == 0)
-		return;
-
-	if (status == 0) {
-		if (serial->parent->port_spec & HSO_INFO_CRC_BUG) {
-			u32 rest;
-			u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
-			rest =
-			    urb->actual_length %
-			    serial->in_endp->wMaxPacketSize;
-			if (((rest == 5) || (rest == 6))
-			    && !memcmp(((u8 *) urb->transfer_buffer) +
-				       urb->actual_length - 4, crc_check, 4)) {
-				urb->actual_length -= 4;
-			}
+		while (write_length_remaining) {
+			if (test_bit(TTY_THROTTLED, &tty->flags))
+				return -1;
+			curr_write_len =  tty_insert_flip_string
+				(tty, urb->transfer_buffer +
+				 serial->curr_rx_urb_offset,
+				 write_length_remaining);
+			serial->curr_rx_urb_offset += curr_write_len;
+			write_length_remaining -= curr_write_len;
+			tty_flip_buffer_push(tty);
 		}
-		/* Valid data, handle RX data */
-		put_rxbuf_data(urb, serial);
-	} else if (status == -ENOENT || status == -ECONNRESET) {
-		/* Unlinked - check for throttled port. */
-		D2("Port %d, successfully unlinked urb", serial->minor);
-	} else {
-		D2("Port %d, status = %d for read urb", serial->minor, status);
-		return;
 	}
-
-	usb_mark_last_busy(urb->dev);
-
-	/* We are done with this URB, resubmit it. Prep the USB to wait for
-	 * another frame */
-	usb_fill_bulk_urb(urb, serial->parent->usb,
-			  usb_rcvbulkpipe(serial->parent->usb,
-					  serial->in_endp->
-					  bEndpointAddress & 0x7F),
-			  urb->transfer_buffer, serial->rx_data_length,
-			  hso_std_serial_read_bulk_callback, serial);
-	/* Give this to the USB subsystem so it can tell us when more data
-	 * arrives. */
-	result = usb_submit_urb(urb, GFP_ATOMIC);
-	if (result) {
-		dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d",
-			__func__, result);
+	if (write_length_remaining == 0) {
+		serial->curr_rx_urb_offset = 0;
+		serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
 	}
+	return write_length_remaining;
 }
 
+
 /* Base driver functions */
 
 static void hso_log_port(struct hso_device *hso_dev)
@@ -1794,9 +1931,13 @@
 		return -ENODEV;
 
 	for (i = 0; i < serial->num_rx_urbs; i++) {
-		if (serial->rx_urb[i])
+		if (serial->rx_urb[i]) {
 				usb_kill_urb(serial->rx_urb[i]);
+				serial->rx_urb_filled[i] = 0;
+		}
 	}
+	serial->curr_rx_urb_idx = 0;
+	serial->curr_rx_urb_offset = 0;
 
 	if (serial->tx_urb)
 		usb_kill_urb(serial->tx_urb);
@@ -2211,14 +2352,14 @@
 				     USB_DIR_IN);
 	if (!serial->in_endp) {
 		dev_err(&interface->dev, "Failed to find BULK IN ep\n");
-		goto exit;
+		goto exit2;
 	}
 
 	if (!
 	    (serial->out_endp =
 	     hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) {
 		dev_err(&interface->dev, "Failed to find BULK IN ep\n");
-		goto exit;
+		goto exit2;
 	}
 
 	serial->write_data = hso_std_serial_write_data;
@@ -2231,9 +2372,10 @@
 
 	/* done, return it */
 	return hso_dev;
+
+exit2:
+	hso_serial_common_free(serial);
 exit:
-	if (hso_dev && serial)
-		hso_serial_common_free(serial);
 	kfree(serial);
 	hso_free_device(hso_dev);
 	return NULL;
@@ -2740,6 +2882,7 @@
 	.chars_in_buffer = hso_serial_chars_in_buffer,
 	.tiocmget = hso_serial_tiocmget,
 	.tiocmset = hso_serial_tiocmset,
+	.unthrottle = hso_unthrottle
 };
 
 static struct usb_driver hso_driver = {
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index ca9d00c..b514350 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -118,7 +118,7 @@
 
 	if (urb->status < 0)
 		printk(KERN_DEBUG "%s() failed with %d\n",
-		       __FUNCTION__, urb->status);
+		       __func__, urb->status);
 
 	kfree(req);
 	usb_free_urb(urb);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 8c19307..38b90e7 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -119,7 +119,7 @@
 	default:
 		if (netif_msg_drv(pegasus) && printk_ratelimit())
 			dev_dbg(&pegasus->intf->dev, "%s, status %d\n",
-				__FUNCTION__, urb->status);
+				__func__, urb->status);
 	}
 	pegasus->flags &= ~ETH_REGS_CHANGED;
 	wake_up(&pegasus->ctrl_wait);
@@ -136,7 +136,7 @@
 	if (!buffer) {
 		if (netif_msg_drv(pegasus))
 			dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
-					__FUNCTION__);
+					__func__);
 		return -ENOMEM;
 	}
 	add_wait_queue(&pegasus->ctrl_wait, &wait);
@@ -224,7 +224,7 @@
 			netif_device_detach(pegasus->net);
 		if (netif_msg_drv(pegasus))
 			dev_err(&pegasus->intf->dev, "%s, status %d\n",
-					__FUNCTION__, ret);
+					__func__, ret);
 		goto out;
 	}
 
@@ -246,7 +246,7 @@
 	if (!tmp) {
 		if (netif_msg_drv(pegasus))
 			dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
-					__FUNCTION__);
+					__func__);
 		return -ENOMEM;
 	}
 	memcpy(tmp, &data, 1);
@@ -277,7 +277,7 @@
 			netif_device_detach(pegasus->net);
 		if (netif_msg_drv(pegasus) && printk_ratelimit())
 			dev_err(&pegasus->intf->dev, "%s, status %d\n",
-					__FUNCTION__, ret);
+					__func__, ret);
 		goto out;
 	}
 
@@ -310,7 +310,7 @@
 			netif_device_detach(pegasus->net);
 		if (netif_msg_drv(pegasus))
 			dev_err(&pegasus->intf->dev, "%s, status %d\n",
-					__FUNCTION__, ret);
+					__func__, ret);
 	}
 
 	return ret;
@@ -341,7 +341,7 @@
 	}
 fail:
 	if (netif_msg_drv(pegasus))
-		dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__);
+		dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
 
 	return ret;
 }
@@ -378,7 +378,7 @@
 
 fail:
 	if (netif_msg_drv(pegasus))
-		dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__);
+		dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
 	return -ETIMEDOUT;
 }
 
@@ -415,7 +415,7 @@
 
 fail:
 	if (netif_msg_drv(pegasus))
-		dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__);
+		dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
 	return -ETIMEDOUT;
 }
 
@@ -463,7 +463,7 @@
 		return ret;
 fail:
 	if (netif_msg_drv(pegasus))
-		dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__);
+		dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
 	return -ETIMEDOUT;
 }
 #endif				/* PEGASUS_WRITE_EEPROM */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8463efb..02d25c7 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -512,14 +512,13 @@
 	int			count = 0;
 
 	spin_lock_irqsave (&q->lock, flags);
-	for (skb = q->next; skb != (struct sk_buff *) q; skb = skbnext) {
+	skb_queue_walk_safe(q, skb, skbnext) {
 		struct skb_data		*entry;
 		struct urb		*urb;
 		int			retval;
 
 		entry = (struct skb_data *) skb->cb;
 		urb = entry->urb;
-		skbnext = skb->next;
 
 		// during some PM-driven resume scenarios,
 		// these (async) unlinks complete immediately
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 96dff04..5b78700 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -914,7 +914,7 @@
 
 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
-		struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
+		struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
 		rp->rx_skbuff[i] = skb;
 		if (skb == NULL)
 			break;
@@ -1473,8 +1473,8 @@
 			/* Check if the packet is long enough to accept without
 			   copying to a minimally-sized skbuff. */
 			if (pkt_len < rx_copybreak &&
-				(skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-				skb_reserve(skb, 2);	/* 16 byte align the IP header */
+				(skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) {
+				skb_reserve(skb, NET_IP_ALIGN);	/* 16 byte align the IP header */
 				pci_dma_sync_single_for_cpu(rp->pdev,
 							    rp->rx_skbuff_dma[entry],
 							    rp->rx_buf_sz,
@@ -1518,7 +1518,7 @@
 		struct sk_buff *skb;
 		entry = rp->dirty_rx % RX_RING_SIZE;
 		if (rp->rx_skbuff[entry] == NULL) {
-			skb = dev_alloc_skb(rp->rx_buf_sz);
+			skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
 			rp->rx_skbuff[entry] = skb;
 			if (skb == NULL)
 				break;	/* Better luck next round. */
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 1b95b04..29a3309 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1381,7 +1381,7 @@
 #define ASSERT(x) { \
 	if (!(x)) { \
 		printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\
-			__FUNCTION__, __LINE__);\
+			__func__, __LINE__);\
 		BUG(); \
 	}\
 }
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index d14e667..a5ddc6c 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -407,7 +407,7 @@
 	if (cfm->version != CFM_VERSION) {
 		printk(KERN_ERR "%s:%s: firmware format %u rejected! "
 				"Expecting %u.\n",
-				modname, __FUNCTION__, cfm->version, CFM_VERSION);
+				modname, __func__, cfm->version, CFM_VERSION);
 		return -EINVAL;
 	}
 
@@ -420,7 +420,7 @@
 */
 	if (cksum != cfm->checksum) {
 		printk(KERN_ERR "%s:%s: firmware corrupted!\n",
-				modname, __FUNCTION__);
+				modname, __func__);
 		printk(KERN_ERR " cdsize = 0x%x (expected 0x%lx)\n",
 				len - (int)sizeof(struct cycx_firmware) - 1,
 				cfm->info.codesize);
@@ -432,7 +432,7 @@
 	/* If everything is ok, set reset, data and code pointers */
 	img_hdr = (struct cycx_fw_header *)&cfm->image;
 #ifdef FIRMWARE_DEBUG
-	printk(KERN_INFO "%s:%s: image sizes\n", __FUNCTION__, modname);
+	printk(KERN_INFO "%s:%s: image sizes\n", __func__, modname);
 	printk(KERN_INFO " reset=%lu\n", img_hdr->reset_size);
 	printk(KERN_INFO "  data=%lu\n", img_hdr->data_size);
 	printk(KERN_INFO "  code=%lu\n", img_hdr->code_size);
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index d3b28b0..5a7303d 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -874,7 +874,7 @@
 		nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1);
 
 	dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n",
-			  __FUNCTION__, lcn, loc, rem);
+			  __func__, lcn, loc, rem);
 
 	dev = cycx_x25_get_dev_by_dte_addr(wandev, rem);
 	if (!dev) {
@@ -902,7 +902,7 @@
 	cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
 	cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key));
 	dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n",
-			  card->devname, __FUNCTION__, lcn, key);
+			  card->devname, __func__, lcn, key);
 
 	dev = cycx_x25_get_dev_by_lcn(wandev, -key);
 	if (!dev) {
@@ -929,7 +929,7 @@
 
 	cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
 	dprintk(1, KERN_INFO "%s: %s:lcn=%d\n",
-			  card->devname, __FUNCTION__, lcn);
+			  card->devname, __func__, lcn);
 	dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
 	if (!dev) {
 		/* Invalid channel, discard packet */
@@ -950,7 +950,7 @@
 	u8 lcn;
 
 	cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
-	dprintk(1, KERN_INFO "%s:lcn=%d\n", __FUNCTION__, lcn);
+	dprintk(1, KERN_INFO "%s:lcn=%d\n", __func__, lcn);
 
 	dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
 	if (dev) {
@@ -1381,7 +1381,7 @@
 		cycx_x25_chan_disconnect(dev);
 	else
 		printk(KERN_ERR "%s: %s for svc (%s) not connected!\n",
-				chan->card->devname, __FUNCTION__, dev->name);
+				chan->card->devname, __func__, dev->name);
 }
 
 /* Set logical channel state. */
@@ -1485,7 +1485,7 @@
 	unsigned char *ptr;
 
 	if ((skb = dev_alloc_skb(1)) == NULL) {
-		printk(KERN_ERR "%s: out of memory\n", __FUNCTION__);
+		printk(KERN_ERR "%s: out of memory\n", __func__);
 		return;
 	}
 
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index f5d55ad..5f1ccb2 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -647,7 +647,7 @@
 
 	skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE];
 	if (!skb) {
-		printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __FUNCTION__);
+		printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__);
 		goto refill;
 	}
 	pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2));
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 8b7e5d2..cbcbf6f 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -163,15 +163,17 @@
 
 static int x25_rx(struct sk_buff *skb)
 {
+	struct net_device *dev = skb->dev;
+
 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
-		skb->dev->stats.rx_dropped++;
+		dev->stats.rx_dropped++;
 		return NET_RX_DROP;
 	}
 
-	if (lapb_data_received(skb->dev, skb) == LAPB_OK)
+	if (lapb_data_received(dev, skb) == LAPB_OK)
 		return NET_RX_SUCCESS;
 
-	skb->dev->stats.rx_errors++;
+	dev->stats.rx_errors++;
 	dev_kfree_skb_any(skb);
 	return NET_RX_DROP;
 }
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4518d0a..4917a94 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -548,7 +548,7 @@
 {
 	st_cpc_tty_area    *cpc_tty; 
 
-	CPC_TTY_DBG("%s: set:%x clear:%x\n", __FUNCTION__, set, clear);
+	CPC_TTY_DBG("%s: set:%x clear:%x\n", __func__, set, clear);
 
 	if (!tty || !tty->driver_data ) {
 	   	CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n");	
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 6596cd0..f972fef 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -856,7 +856,7 @@
 		len = SBNI_MIN_LEN;
 
 	nl->tx_buf_p	= skb;
-	nl->tx_frameno	= (len + nl->maxframe - 1) / nl->maxframe;
+	nl->tx_frameno	= DIV_ROUND_UP(len, nl->maxframe);
 	nl->framelen	= len < nl->maxframe  ?  len  :  nl->maxframe;
 
 	outb( inb( dev->base_addr + CSR0 ) | TR_REQ,  dev->base_addr + CSR0 );
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 9931b5a..45bdf0b 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -300,6 +300,19 @@
 	---help---
 	  Debugging support.
 
+config LIBERTAS_THINFIRM
+	tristate "Marvell 8xxx Libertas WLAN driver support with thin firmware"
+	depends on WLAN_80211 && MAC80211
+	select FW_LOADER
+	---help---
+	  A library for Marvell Libertas 8xxx devices using thinfirm.
+
+config LIBERTAS_THINFIRM_USB
+	tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware"
+	depends on LIBERTAS_THINFIRM && USB
+	---help---
+	  A driver for Marvell Libertas 8388 USB devices using thinfirm.
+
 config AIRO
 	tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
 	depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN)
@@ -322,6 +335,9 @@
 	tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
 	depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211
 	select WIRELESS_EXT
+	select FW_LOADER
+	select CRYPTO
+	select CRYPTO_MICHAEL_MIC
 	---help---
 	  A driver for 802.11b wireless cards based on the "Hermes" or
 	  Intersil HFA384x (Prism 2) MAC controller.  This includes the vast
@@ -411,7 +427,6 @@
 config PCMCIA_SPECTRUM
 	tristate "Symbol Spectrum24 Trilogy PCMCIA card support"
 	depends on PCMCIA && HERMES
-	select FW_LOADER
 	---help---
 
 	  This is a driver for 802.11b cards using RAM-loadable Symbol
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 59aa89e..59d2d80 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -16,7 +16,7 @@
 obj-$(CONFIG_PCMCIA_NETWAVE)	+= netwave_cs.o
 obj-$(CONFIG_PCMCIA_WAVELAN)	+= wavelan_cs.o
 
-obj-$(CONFIG_HERMES)		+= orinoco.o hermes.o
+obj-$(CONFIG_HERMES)		+= orinoco.o hermes.o hermes_dld.o
 obj-$(CONFIG_PCMCIA_HERMES)	+= orinoco_cs.o
 obj-$(CONFIG_APPLE_AIRPORT)	+= airport.o
 obj-$(CONFIG_PLX_HERMES)	+= orinoco_plx.o
@@ -48,6 +48,8 @@
 obj-$(CONFIG_USB_ZD1201)	+= zd1201.o
 obj-$(CONFIG_LIBERTAS)		+= libertas/
 
+obj-$(CONFIG_LIBERTAS_THINFIRM)	+= libertas_tf/
+
 rtl8180-objs		:= rtl8180_dev.o rtl8180_rtl8225.o rtl8180_sa2400.o rtl8180_max2820.o rtl8180_grf5101.o
 rtl8187-objs		:= rtl8187_dev.o rtl8187_rtl8225.o
 
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 3333d45..b2c050b 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -765,11 +765,11 @@
 
 	priv->soft_rx_crc = 0;
 	switch (priv->mode) {
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA);
 		priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR;
 		break;
-	case IEEE80211_IF_TYPE_IBSS:
+	case NL80211_IFTYPE_ADHOC:
 		priv->nar &= ~ADM8211_NAR_PR;
 		priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR;
 
@@ -777,7 +777,7 @@
 		if (priv->pdev->revision >= ADM8211_REV_BA)
 			priv->soft_rx_crc = 1;
 		break;
-	case IEEE80211_IF_TYPE_MNTR:
+	case NL80211_IFTYPE_MONITOR:
 		priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST);
 		priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR;
 		break;
@@ -1410,11 +1410,11 @@
 				 struct ieee80211_if_init_conf *conf)
 {
 	struct adm8211_priv *priv = dev->priv;
-	if (priv->mode != IEEE80211_IF_TYPE_MNTR)
+	if (priv->mode != NL80211_IFTYPE_MONITOR)
 		return -EOPNOTSUPP;
 
 	switch (conf->type) {
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		priv->mode = conf->type;
 		break;
 	default:
@@ -1437,7 +1437,7 @@
 				     struct ieee80211_if_init_conf *conf)
 {
 	struct adm8211_priv *priv = dev->priv;
-	priv->mode = IEEE80211_IF_TYPE_MNTR;
+	priv->mode = NL80211_IFTYPE_MONITOR;
 }
 
 static int adm8211_init_rings(struct ieee80211_hw *dev)
@@ -1556,7 +1556,7 @@
 	ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE |
 			       ADM8211_IER_RCIE | ADM8211_IER_TCIE |
 			       ADM8211_IER_TDUIE | ADM8211_IER_GPTIE);
-	priv->mode = IEEE80211_IF_TYPE_MNTR;
+	priv->mode = NL80211_IFTYPE_MONITOR;
 	adm8211_update_mode(dev);
 	ADM8211_CSR_WRITE(RDR, 0);
 
@@ -1571,7 +1571,7 @@
 {
 	struct adm8211_priv *priv = dev->priv;
 
-	priv->mode = IEEE80211_IF_TYPE_INVALID;
+	priv->mode = NL80211_IFTYPE_UNSPECIFIED;
 	priv->nar = 0;
 	ADM8211_CSR_WRITE(NAR, 0);
 	ADM8211_CSR_WRITE(IER, 0);
@@ -1884,6 +1884,7 @@
 	dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
 	/* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */
 	dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
+	dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
 
 	dev->channel_change_time = 1000;
 	dev->max_signal = 100;    /* FIXME: find better value */
@@ -1895,7 +1896,7 @@
 	priv->tx_power = 0x40;
 	priv->lpf_cutoff = 0xFF;
 	priv->lnags_threshold = 0xFF;
-	priv->mode = IEEE80211_IF_TYPE_INVALID;
+	priv->mode = NL80211_IFTYPE_UNSPECIFIED;
 
 	/* Power-on issue. EEPROM won't read correctly without */
 	if (pdev->revision >= ADM8211_REV_BA) {
@@ -1985,7 +1986,7 @@
 	struct ieee80211_hw *dev = pci_get_drvdata(pdev);
 	struct adm8211_priv *priv = dev->priv;
 
-	if (priv->mode != IEEE80211_IF_TYPE_INVALID) {
+	if (priv->mode != NL80211_IFTYPE_UNSPECIFIED) {
 		ieee80211_stop_queues(dev);
 		adm8211_stop(dev);
 	}
@@ -2003,7 +2004,7 @@
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
 
-	if (priv->mode != IEEE80211_IF_TYPE_INVALID) {
+	if (priv->mode != NL80211_IFTYPE_UNSPECIFIED) {
 		adm8211_start(dev);
 		ieee80211_wake_queues(dev);
 	}
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index b5cd850..370133e 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1915,7 +1915,7 @@
 	struct airo_info *ai = dev->priv;
 
 	if (!skb) {
-		airo_print_err(dev->name, "%s: skb == NULL!",__FUNCTION__);
+		airo_print_err(dev->name, "%s: skb == NULL!",__func__);
 		return 0;
 	}
 	npacks = skb_queue_len (&ai->txq);
@@ -1964,7 +1964,7 @@
 	if ((skb = skb_dequeue(&ai->txq)) == NULL) {
 		airo_print_err(dev->name,
 			"%s: Dequeue'd zero in send_packet()",
-			__FUNCTION__);
+			__func__);
 		return 0;
 	}
 
@@ -2115,7 +2115,7 @@
 	u32 *fids = priv->fids;
 
 	if ( skb == NULL ) {
-		airo_print_err(dev->name, "%s: skb == NULL!", __FUNCTION__);
+		airo_print_err(dev->name, "%s: skb == NULL!", __func__);
 		return 0;
 	}
 
@@ -2186,7 +2186,7 @@
 	}
 
 	if ( skb == NULL ) {
-		airo_print_err(dev->name, "%s: skb == NULL!", __FUNCTION__);
+		airo_print_err(dev->name, "%s: skb == NULL!", __func__);
 		return 0;
 	}
 
@@ -4127,7 +4127,7 @@
 		if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid))
 			airo_print_err(ai->dev->name,
 				"%s: MAC should be disabled (rid=%04x)",
-				__FUNCTION__, rid);
+				__func__, rid);
 		memset(&cmd, 0, sizeof(cmd));
 		memset(&rsp, 0, sizeof(rsp));
 
@@ -4142,7 +4142,7 @@
 			&ai->config_desc.rid_desc, sizeof(Rid));
 
 		if (len < 4 || len > 2047) {
-			airo_print_err(ai->dev->name, "%s: len=%d", __FUNCTION__, len);
+			airo_print_err(ai->dev->name, "%s: len=%d", __func__, len);
 			rc = -1;
 		} else {
 			memcpy((char *)ai->config_desc.virtual_host_addr,
@@ -4151,9 +4151,9 @@
 			rc = issuecommand(ai, &cmd, &rsp);
 			if ((rc & 0xff00) != 0) {
 				airo_print_err(ai->dev->name, "%s: Write rid Error %d",
-						__FUNCTION__, rc);
+						__func__, rc);
 				airo_print_err(ai->dev->name, "%s: Cmd=%04x",
-						__FUNCTION__, cmd.cmd);
+						__func__, cmd.cmd);
 			}
 
 			if ((rsp.status & 0x7f00))
@@ -7107,7 +7107,7 @@
  */
 static int airo_set_scan(struct net_device *dev,
 			 struct iw_request_info *info,
-			 struct iw_param *vwrq,
+			 struct iw_point *dwrq,
 			 char *extra)
 {
 	struct airo_info *ai = dev->priv;
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index f123553..fd72e42 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -147,7 +147,7 @@
 	DEBUG(0, "airo_attach()\n");
 
 	/* Interrupt setup */
-	p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+	p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
 	p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
 	p_dev->irq.Handler = NULL;
 	
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c
index 6f7eb9f..ce03a2e 100644
--- a/drivers/net/wireless/airport.c
+++ b/drivers/net/wireless/airport.c
@@ -180,7 +180,8 @@
 	}
 
 	/* Allocate space for private device-specific data */
-	dev = alloc_orinocodev(sizeof(*card), airport_hard_reset);
+	dev = alloc_orinocodev(sizeof(*card), &mdev->ofdev.dev,
+			       airport_hard_reset, NULL);
 	if (! dev) {
 		printk(KERN_ERR PFX "Cannot allocate network device\n");
 		return -ENODEV;
diff --git a/drivers/net/wireless/ath5k/Makefile b/drivers/net/wireless/ath5k/Makefile
index 564ecd0..719cfae 100644
--- a/drivers/net/wireless/ath5k/Makefile
+++ b/drivers/net/wireless/ath5k/Makefile
@@ -1,6 +1,14 @@
-ath5k-y				+= base.o
-ath5k-y				+= hw.o
+ath5k-y				+= caps.o
 ath5k-y				+= initvals.o
+ath5k-y				+= eeprom.o
+ath5k-y				+= gpio.o
+ath5k-y				+= desc.o
+ath5k-y				+= dma.o
+ath5k-y				+= qcu.o
+ath5k-y				+= pcu.o
 ath5k-y				+= phy.o
+ath5k-y				+= reset.o
+ath5k-y				+= attach.o
+ath5k-y				+= base.o
 ath5k-$(CONFIG_ATH5K_DEBUG)	+= debug.o
 obj-$(CONFIG_ATH5K)		+= ath5k.o
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index 9102eea..7134c40 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -18,18 +18,23 @@
 #ifndef _ATH5K_H
 #define _ATH5K_H
 
-/* Set this to 1 to disable regulatory domain restrictions for channel tests.
- * WARNING: This is for debuging only and has side effects (eg. scan takes too
- * long and results timeouts). It's also illegal to tune to some of the
- * supported frequencies in some countries, so use this at your own risk,
- * you've been warned. */
+/* TODO: Clean up channel debuging -doesn't work anyway- and start
+ * working on reg. control code using all available eeprom information
+ * -rev. engineering needed- */
 #define CHAN_DEBUG	0
 
 #include <linux/io.h>
 #include <linux/types.h>
 #include <net/mac80211.h>
 
-#include "hw.h"
+/* RX/TX descriptor hw structs
+ * TODO: Driver part should only see sw structs */
+#include "desc.h"
+
+/* EEPROM structs/offsets
+ * TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities)
+ * and clean up common bits, then introduce set/get functions in eeprom.c */
+#include "eeprom.h"
 
 /* PCI IDs */
 #define PCI_DEVICE_ID_ATHEROS_AR5210 		0x0007 /* AR5210 */
@@ -87,7 +92,92 @@
 	ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__)
 
 /*
+ * AR5K REGISTER ACCESS
+ */
+
+/* Some macros to read/write fields */
+
+/* First shift, then mask */
+#define AR5K_REG_SM(_val, _flags)					\
+	(((_val) << _flags##_S) & (_flags))
+
+/* First mask, then shift */
+#define AR5K_REG_MS(_val, _flags)					\
+	(((_val) & (_flags)) >> _flags##_S)
+
+/* Some registers can hold multiple values of interest. For this
+ * reason when we want to write to these registers we must first
+ * retrieve the values which we do not want to clear (lets call this
+ * old_data) and then set the register with this and our new_value:
+ * ( old_data | new_value) */
+#define AR5K_REG_WRITE_BITS(ah, _reg, _flags, _val)			\
+	ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & ~(_flags)) | \
+	    (((_val) << _flags##_S) & (_flags)), _reg)
+
+#define AR5K_REG_MASKED_BITS(ah, _reg, _flags, _mask)			\
+	ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) &		\
+			(_mask)) | (_flags), _reg)
+
+#define AR5K_REG_ENABLE_BITS(ah, _reg, _flags)				\
+	ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) | (_flags), _reg)
+
+#define AR5K_REG_DISABLE_BITS(ah, _reg, _flags)			\
+	ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) & ~(_flags), _reg)
+
+/* Access to PHY registers */
+#define AR5K_PHY_READ(ah, _reg)					\
+	ath5k_hw_reg_read(ah, (ah)->ah_phy + ((_reg) << 2))
+
+#define AR5K_PHY_WRITE(ah, _reg, _val)					\
+	ath5k_hw_reg_write(ah, _val, (ah)->ah_phy + ((_reg) << 2))
+
+/* Access QCU registers per queue */
+#define AR5K_REG_READ_Q(ah, _reg, _queue)				\
+	(ath5k_hw_reg_read(ah, _reg) & (1 << _queue))			\
+
+#define AR5K_REG_WRITE_Q(ah, _reg, _queue)				\
+	ath5k_hw_reg_write(ah, (1 << _queue), _reg)
+
+#define AR5K_Q_ENABLE_BITS(_reg, _queue) do {				\
+	_reg |= 1 << _queue;						\
+} while (0)
+
+#define AR5K_Q_DISABLE_BITS(_reg, _queue) do {				\
+	_reg &= ~(1 << _queue);						\
+} while (0)
+
+/* Used while writing initvals */
+#define AR5K_REG_WAIT(_i) do {						\
+	if (_i % 64)							\
+		udelay(1);						\
+} while (0)
+
+/* Register dumps are done per operation mode */
+#define AR5K_INI_RFGAIN_5GHZ		0
+#define AR5K_INI_RFGAIN_2GHZ		1
+
+/* TODO: Clean this up */
+#define AR5K_INI_VAL_11A		0
+#define AR5K_INI_VAL_11A_TURBO		1
+#define AR5K_INI_VAL_11B		2
+#define AR5K_INI_VAL_11G		3
+#define AR5K_INI_VAL_11G_TURBO		4
+#define AR5K_INI_VAL_XR			0
+#define AR5K_INI_VAL_MAX		5
+
+#define AR5K_RF5111_INI_RF_MAX_BANKS	AR5K_MAX_RF_BANKS
+#define AR5K_RF5112_INI_RF_MAX_BANKS	AR5K_MAX_RF_BANKS
+
+/* Used for BSSID etc manipulation */
+#define AR5K_LOW_ID(_a)(				\
+(_a)[0] | (_a)[1] << 8 | (_a)[2] << 16 | (_a)[3] << 24	\
+)
+
+#define AR5K_HIGH_ID(_a)	((_a)[4] | (_a)[5] << 8)
+
+/*
  * Some tuneable values (these should be changeable by the user)
+ * TODO: Make use of them and add more options OR use debug/configfs
  */
 #define AR5K_TUNE_DMA_BEACON_RESP		2
 #define AR5K_TUNE_SW_BEACON_RESP		10
@@ -98,13 +188,13 @@
 #define AR5K_TUNE_REGISTER_TIMEOUT		20000
 /* Register for RSSI threshold has a mask of 0xff, so 255 seems to
  * be the max value. */
-#define AR5K_TUNE_RSSI_THRES                   129
+#define AR5K_TUNE_RSSI_THRES			129
 /* This must be set when setting the RSSI threshold otherwise it can
  * prevent a reset. If AR5K_RSSI_THR is read after writing to it
  * the BMISS_THRES will be seen as 0, seems harware doesn't keep
  * track of it. Max value depends on harware. For AR5210 this is just 7.
  * For AR5211+ this seems to be up to 255. */
-#define AR5K_TUNE_BMISS_THRES                  7
+#define AR5K_TUNE_BMISS_THRES			7
 #define AR5K_TUNE_REGISTER_DWELL_TIME		20000
 #define AR5K_TUNE_BEACON_INTERVAL		100
 #define AR5K_TUNE_AIFS				2
@@ -123,6 +213,55 @@
 #define AR5K_TUNE_ANT_DIVERSITY			true
 #define AR5K_TUNE_HWTXTRIES			4
 
+#define AR5K_INIT_CARR_SENSE_EN			1
+
+/*Swap RX/TX Descriptor for big endian archs*/
+#if defined(__BIG_ENDIAN)
+#define AR5K_INIT_CFG	(		\
+	AR5K_CFG_SWTD | AR5K_CFG_SWRD	\
+)
+#else
+#define AR5K_INIT_CFG	0x00000000
+#endif
+
+/* Initial values */
+#define AR5K_INIT_TX_LATENCY			502
+#define AR5K_INIT_USEC				39
+#define AR5K_INIT_USEC_TURBO			79
+#define AR5K_INIT_USEC_32			31
+#define AR5K_INIT_SLOT_TIME			396
+#define AR5K_INIT_SLOT_TIME_TURBO		480
+#define AR5K_INIT_ACK_CTS_TIMEOUT		1024
+#define AR5K_INIT_ACK_CTS_TIMEOUT_TURBO		0x08000800
+#define AR5K_INIT_PROG_IFS			920
+#define AR5K_INIT_PROG_IFS_TURBO		960
+#define AR5K_INIT_EIFS				3440
+#define AR5K_INIT_EIFS_TURBO			6880
+#define AR5K_INIT_SIFS				560
+#define AR5K_INIT_SIFS_TURBO			480
+#define AR5K_INIT_SH_RETRY			10
+#define AR5K_INIT_LG_RETRY			AR5K_INIT_SH_RETRY
+#define AR5K_INIT_SSH_RETRY			32
+#define AR5K_INIT_SLG_RETRY			AR5K_INIT_SSH_RETRY
+#define AR5K_INIT_TX_RETRY			10
+
+#define AR5K_INIT_TRANSMIT_LATENCY		(			\
+	(AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) |	\
+	(AR5K_INIT_USEC)						\
+)
+#define AR5K_INIT_TRANSMIT_LATENCY_TURBO	(			\
+	(AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) |	\
+	(AR5K_INIT_USEC_TURBO)						\
+)
+#define AR5K_INIT_PROTO_TIME_CNTRL		(			\
+	(AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS << 12) |	\
+	(AR5K_INIT_PROG_IFS)						\
+)
+#define AR5K_INIT_PROTO_TIME_CNTRL_TURBO	(			\
+	(AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS_TURBO << 12) | \
+	(AR5K_INIT_PROG_IFS_TURBO)					\
+)
+
 /* token to use for aifs, cwmin, cwmax in MadWiFi */
 #define	AR5K_TXQ_USEDEFAULT	((u32) -1)
 
@@ -142,7 +281,9 @@
 	AR5K_RF5112	= 2,
 	AR5K_RF2413	= 3,
 	AR5K_RF5413	= 4,
-	AR5K_RF2425	= 5,
+	AR5K_RF2316	= 5,
+	AR5K_RF2317	= 6,
+	AR5K_RF2425	= 7,
 };
 
 /*
@@ -150,7 +291,7 @@
  */
 
 enum ath5k_srev_type {
-	AR5K_VERSION_VER,
+	AR5K_VERSION_MAC,
 	AR5K_VERSION_RAD,
 };
 
@@ -162,23 +303,24 @@
 
 #define AR5K_SREV_UNKNOWN	0xffff
 
-#define AR5K_SREV_VER_AR5210	0x00
-#define AR5K_SREV_VER_AR5311	0x10
-#define AR5K_SREV_VER_AR5311A	0x20
-#define AR5K_SREV_VER_AR5311B	0x30
-#define AR5K_SREV_VER_AR5211	0x40
-#define AR5K_SREV_VER_AR5212	0x50
-#define AR5K_SREV_VER_AR5213	0x55
-#define AR5K_SREV_VER_AR5213A	0x59
-#define AR5K_SREV_VER_AR2413	0x78
-#define AR5K_SREV_VER_AR2414	0x79
-#define AR5K_SREV_VER_AR2424	0xa0 /* PCI-E */
-#define AR5K_SREV_VER_AR5424	0xa3 /* PCI-E */
-#define AR5K_SREV_VER_AR5413	0xa4
-#define AR5K_SREV_VER_AR5414	0xa5
-#define AR5K_SREV_VER_AR5416	0xc0 /* PCI-E */
-#define AR5K_SREV_VER_AR5418	0xca /* PCI-E */
-#define AR5K_SREV_VER_AR2425	0xe2 /* PCI-E */
+#define AR5K_SREV_AR5210	0x00 /* Crete */
+#define AR5K_SREV_AR5311	0x10 /* Maui 1 */
+#define AR5K_SREV_AR5311A	0x20 /* Maui 2 */
+#define AR5K_SREV_AR5311B	0x30 /* Spirit */
+#define AR5K_SREV_AR5211	0x40 /* Oahu */
+#define AR5K_SREV_AR5212	0x50 /* Venice */
+#define AR5K_SREV_AR5213	0x55 /* ??? */
+#define AR5K_SREV_AR5213A	0x59 /* Hainan */
+#define AR5K_SREV_AR2413	0x78 /* Griffin lite */
+#define AR5K_SREV_AR2414	0x70 /* Griffin */
+#define AR5K_SREV_AR5424	0x90 /* Condor */
+#define AR5K_SREV_AR5413	0xa4 /* Eagle lite */
+#define AR5K_SREV_AR5414	0xa0 /* Eagle */
+#define AR5K_SREV_AR2415	0xb0 /* Cobra */
+#define AR5K_SREV_AR5416	0xc0 /* PCI-E */
+#define AR5K_SREV_AR5418	0xca /* PCI-E */
+#define AR5K_SREV_AR2425	0xe0 /* Swan */
+#define AR5K_SREV_AR2417	0xf0 /* Nala */
 
 #define AR5K_SREV_RAD_5110	0x00
 #define AR5K_SREV_RAD_5111	0x10
@@ -190,13 +332,22 @@
 #define AR5K_SREV_RAD_2112	0x40
 #define AR5K_SREV_RAD_2112A	0x45
 #define	AR5K_SREV_RAD_2112B	0x46
-#define AR5K_SREV_RAD_SC0	0x50	/* Found on 2413/2414 */
-#define AR5K_SREV_RAD_SC1	0x60	/* Found on 5413/5414 */
-#define AR5K_SREV_RAD_SC2	0xa0	/* Found on 2424-5/5424 */
-#define AR5K_SREV_RAD_5133	0xc0	/* MIMO found on 5418 */
+#define AR5K_SREV_RAD_2413	0x50
+#define AR5K_SREV_RAD_5413	0x60
+#define AR5K_SREV_RAD_2316	0x70
+#define AR5K_SREV_RAD_2317	0x80
+#define AR5K_SREV_RAD_5424	0xa0 /* Mostly same as 5413 */
+#define AR5K_SREV_RAD_2425	0xa2
+#define AR5K_SREV_RAD_5133	0xc0
+
+#define AR5K_SREV_PHY_5211	0x30
+#define AR5K_SREV_PHY_5212	0x41
+#define AR5K_SREV_PHY_2112B	0x43
+#define AR5K_SREV_PHY_2413	0x45
+#define AR5K_SREV_PHY_5413	0x61
+#define AR5K_SREV_PHY_2425	0x70
 
 /* IEEE defs */
-
 #define IEEE80211_MAX_LEN       2500
 
 /* TODO add support to mac80211 for vendor-specific rates and modes */
@@ -268,21 +419,13 @@
 	AR5K_MODE_MAX		=	5
 };
 
-/* adding this flag to rate_code enables short preamble, see ar5212_reg.h */
-#define AR5K_SET_SHORT_PREAMBLE 0x04
-
-#define HAS_SHPREAMBLE(_ix) \
-	(rt->rates[_ix].modulation == IEEE80211_RATE_SHORT_PREAMBLE)
-#define SHPREAMBLE_FLAG(_ix) \
-	(HAS_SHPREAMBLE(_ix) ? AR5K_SET_SHORT_PREAMBLE : 0)
-
 
 /****************\
   TX DEFINITIONS
 \****************/
 
 /*
- * TX Status
+ * TX Status descriptor
  */
 struct ath5k_tx_status {
 	u16	ts_seqnum;
@@ -354,7 +497,6 @@
 	AR5K_TX_QUEUE_ID_XR_DATA	= 9,
 };
 
-
 /*
  * Flags to set hw queue's parameters...
  */
@@ -387,7 +529,8 @@
 
 /*
  * Transmit packet types.
- * These are not fully used inside OpenHAL yet
+ * used on tx control descriptor
+ * TODO: Use them inside base.c corectly
  */
 enum ath5k_pkt_type {
 	AR5K_PKT_TYPE_NORMAL		= 0,
@@ -430,7 +573,7 @@
 \****************/
 
 /*
- * RX Status
+ * RX Status descriptor
  */
 struct ath5k_rx_status {
 	u16	rs_datalen;
@@ -494,34 +637,59 @@
 #define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
 
 
+/*******************************\
+  GAIN OPTIMIZATION DEFINITIONS
+\*******************************/
+
+enum ath5k_rfgain {
+	AR5K_RFGAIN_INACTIVE = 0,
+	AR5K_RFGAIN_READ_REQUESTED,
+	AR5K_RFGAIN_NEED_CHANGE,
+};
+
+#define AR5K_GAIN_CRN_FIX_BITS_5111		4
+#define AR5K_GAIN_CRN_FIX_BITS_5112		7
+#define AR5K_GAIN_CRN_MAX_FIX_BITS		AR5K_GAIN_CRN_FIX_BITS_5112
+#define AR5K_GAIN_DYN_ADJUST_HI_MARGIN		15
+#define AR5K_GAIN_DYN_ADJUST_LO_MARGIN		20
+#define AR5K_GAIN_CCK_PROBE_CORR		5
+#define AR5K_GAIN_CCK_OFDM_GAIN_DELTA		15
+#define AR5K_GAIN_STEP_COUNT			10
+#define AR5K_GAIN_PARAM_TX_CLIP			0
+#define AR5K_GAIN_PARAM_PD_90			1
+#define AR5K_GAIN_PARAM_PD_84			2
+#define AR5K_GAIN_PARAM_GAIN_SEL		3
+#define AR5K_GAIN_PARAM_MIX_ORN			0
+#define AR5K_GAIN_PARAM_PD_138			1
+#define AR5K_GAIN_PARAM_PD_137			2
+#define AR5K_GAIN_PARAM_PD_136			3
+#define AR5K_GAIN_PARAM_PD_132			4
+#define AR5K_GAIN_PARAM_PD_131			5
+#define AR5K_GAIN_PARAM_PD_130			6
+#define AR5K_GAIN_CHECK_ADJUST(_g) 		\
+	((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
+
+struct ath5k_gain_opt_step {
+	s16				gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
+	s32				gos_gain;
+};
+
+struct ath5k_gain {
+	u32			g_step_idx;
+	u32			g_current;
+	u32			g_target;
+	u32			g_low;
+	u32			g_high;
+	u32			g_f_corr;
+	u32			g_active;
+	const struct ath5k_gain_opt_step	*g_step;
+};
+
+
 /********************\
   COMMON DEFINITIONS
 \********************/
 
-/*
- * Atheros hardware descriptor
- * This is read and written to by the hardware
- */
-struct ath5k_desc {
-	u32	ds_link;	/* physical address of the next descriptor */
-	u32	ds_data;	/* physical address of data buffer (skb) */
-
-	union {
-		struct ath5k_hw_5210_tx_desc	ds_tx5210;
-		struct ath5k_hw_5212_tx_desc	ds_tx5212;
-		struct ath5k_hw_all_rx_desc	ds_rx;
-	} ud;
-} __packed;
-
-#define AR5K_RXDESC_INTREQ	0x0020
-
-#define AR5K_TXDESC_CLRDMASK	0x0001
-#define AR5K_TXDESC_NOACK	0x0002	/*[5211+]*/
-#define AR5K_TXDESC_RTSENA	0x0004
-#define AR5K_TXDESC_CTSENA	0x0008
-#define AR5K_TXDESC_INTREQ	0x0010
-#define AR5K_TXDESC_VEOL	0x0020	/*[5211+]*/
-
 #define AR5K_SLOT_TIME_9	396
 #define AR5K_SLOT_TIME_20	880
 #define AR5K_SLOT_TIME_MAX	0xffff
@@ -553,167 +721,79 @@
 #define CHANNEL_MODES		CHANNEL_ALL
 
 /*
- * Used internaly in OpenHAL (ar5211.c/ar5212.c
- * for reset_tx_queue). Also see struct struct ieee80211_channel.
+ * Used internaly for reset_tx_queue).
+ * Also see struct struct ieee80211_channel.
  */
 #define IS_CHAN_XR(_c)	((_c.hw_value & CHANNEL_XR) != 0)
 #define IS_CHAN_B(_c)	((_c.hw_value & CHANNEL_B) != 0)
 
 /*
- * The following structure will be used to map 2GHz channels to
+ * The following structure is used to map 2GHz channels to
  * 5GHz Atheros channels.
+ * TODO: Clean up
  */
 struct ath5k_athchan_2ghz {
 	u32	a2_flags;
 	u16	a2_athchan;
 };
 
-/*
- * Rate definitions
- * TODO: Clean them up or move them on mac80211 -most of these infos are
- * 	 used by the rate control algorytm on MadWiFi.
- */
 
-/* Max number of rates on the rate table and what it seems
- * Atheros hardware supports */
-#define AR5K_MAX_RATES 32
+/******************\
+  RATE DEFINITIONS
+\******************/
 
 /**
- * struct ath5k_rate - rate structure
- * @valid: is this a valid rate for rate control (remove)
- * @modulation: respective mac80211 modulation
- * @rate_kbps: rate in kbit/s
- * @rate_code: hardware rate value, used in &struct ath5k_desc, on RX on
- *     &struct ath5k_rx_status.rs_rate and on TX on
- *     &struct ath5k_tx_status.ts_rate. Seems the ar5xxx harware supports
- *     up to 32 rates, indexed by 1-32. This means we really only need
- *     6 bits for the rate_code.
- * @dot11_rate: respective IEEE-802.11 rate value
- * @control_rate: index of rate assumed to be used to send control frames.
- *     This can be used to set override the value on the rate duration
- *     registers. This is only useful if we can override in the harware at
- *     what rate we want to send control frames at. Note that IEEE-802.11
- *     Ch. 9.6 (after IEEE 802.11g changes) defines the rate at which we
- *     should send ACK/CTS, if we change this value we can be breaking
- *     the spec.
+ * Seems the ar5xxx harware supports up to 32 rates, indexed by 1-32.
  *
- * This structure is used to get the RX rate or set the TX rate on the
+ * The rate code is used to get the RX rate or set the TX rate on the
  * hardware descriptors. It is also used for internal modulation control
  * and settings.
  *
- * On RX after the &struct ath5k_desc is parsed by the appropriate
- * ah_proc_rx_desc() the respective hardware rate value is set in
- * &struct ath5k_rx_status.rs_rate. On TX the desired rate is set in
- * &struct ath5k_tx_status.ts_rate which is later used to setup the
- * &struct ath5k_desc correctly. This is the hardware rate map we are
- * aware of:
+ * This is the hardware rate map we are aware of:
  *
- * rate_code   1       2       3       4       5       6       7       8
+ * rate_code   0x01    0x02    0x03    0x04    0x05    0x06    0x07    0x08
  * rate_kbps   3000    1000    ?       ?       ?       2000    500     48000
  *
- * rate_code   9       10      11      12      13      14      15      16
+ * rate_code   0x09    0x0A    0x0B    0x0C    0x0D    0x0E    0x0F    0x10
  * rate_kbps   24000   12000   6000    54000   36000   18000   9000    ?
  *
  * rate_code   17      18      19      20      21      22      23      24
  * rate_kbps   ?       ?       ?       ?       ?       ?       ?       11000
  *
  * rate_code   25      26      27      28      29      30      31      32
- * rate_kbps   5500    2000    1000    ?       ?       ?       ?       ?
+ * rate_kbps   5500    2000    1000    11000S  5500S   2000S   ?       ?
  *
+ * "S" indicates CCK rates with short preamble.
+ *
+ * AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
+ * lowest 4 bits, so they are the same as below with a 0xF mask.
+ * (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
+ * We handle this in ath5k_setup_bands().
  */
-struct ath5k_rate {
-	u8	valid;
-	u32	modulation;
-	u16	rate_kbps;
-	u8	rate_code;
-	u8	dot11_rate;
-	u8	control_rate;
-};
+#define AR5K_MAX_RATES 32
 
-/* XXX: GRR all this stuff to get leds blinking ??? (check out setcurmode) */
-struct ath5k_rate_table {
-	u16	rate_count;
-	u8	rate_code_to_index[AR5K_MAX_RATES];	/* Back-mapping */
-	struct ath5k_rate rates[AR5K_MAX_RATES];
-};
+/* B */
+#define ATH5K_RATE_CODE_1M	0x1B
+#define ATH5K_RATE_CODE_2M	0x1A
+#define ATH5K_RATE_CODE_5_5M	0x19
+#define ATH5K_RATE_CODE_11M	0x18
+/* A and G */
+#define ATH5K_RATE_CODE_6M	0x0B
+#define ATH5K_RATE_CODE_9M	0x0F
+#define ATH5K_RATE_CODE_12M	0x0A
+#define ATH5K_RATE_CODE_18M	0x0E
+#define ATH5K_RATE_CODE_24M	0x09
+#define ATH5K_RATE_CODE_36M	0x0D
+#define ATH5K_RATE_CODE_48M	0x08
+#define ATH5K_RATE_CODE_54M	0x0C
+/* XR */
+#define ATH5K_RATE_CODE_XR_500K	0x07
+#define ATH5K_RATE_CODE_XR_1M	0x02
+#define ATH5K_RATE_CODE_XR_2M	0x06
+#define ATH5K_RATE_CODE_XR_3M	0x01
 
-/*
- * Rate tables...
- * TODO: CLEAN THIS !!!
- */
-#define AR5K_RATES_11A { 8, {					\
-	255, 255, 255, 255, 255, 255, 255, 255, 6, 4, 2, 0,	\
-	7, 5, 3, 1, 255, 255, 255, 255, 255, 255, 255, 255,	\
-	255, 255, 255, 255, 255, 255, 255, 255 }, {		\
-	{ 1, 0, 6000, 11, 140, 0 },		\
-	{ 1, 0, 9000, 15, 18, 0 },		\
-	{ 1, 0, 12000, 10, 152, 2 },		\
-	{ 1, 0, 18000, 14, 36, 2 },		\
-	{ 1, 0, 24000, 9, 176, 4 },		\
-	{ 1, 0, 36000, 13, 72, 4 },		\
-	{ 1, 0, 48000, 8, 96, 4 },		\
-	{ 1, 0, 54000, 12, 108, 4 } }		\
-}
-
-#define AR5K_RATES_11B { 4, {						\
-	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,	\
-	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,	\
-	3, 2, 1, 0, 255, 255, 255, 255 }, {				\
-	{ 1, 0, 1000, 27, 130, 0 },	\
-	{ 1, IEEE80211_RATE_SHORT_PREAMBLE, 2000, 26, 132, 1 },	\
-	{ 1, IEEE80211_RATE_SHORT_PREAMBLE, 5500, 25, 139, 1 },	\
-	{ 1, IEEE80211_RATE_SHORT_PREAMBLE, 11000, 24, 150, 1 } }	\
-}
-
-#define AR5K_RATES_11G { 12, {					\
-	255, 255, 255, 255, 255, 255, 255, 255, 10, 8, 6, 4,	\
-	11, 9, 7, 5, 255, 255, 255, 255, 255, 255, 255, 255,	\
-	3, 2, 1, 0, 255, 255, 255, 255 }, {			\
-	{ 1, 0, 1000, 27, 2, 0 },		\
-	{ 1, IEEE80211_RATE_SHORT_PREAMBLE, 2000, 26, 4, 1 },		\
-	{ 1, IEEE80211_RATE_SHORT_PREAMBLE, 5500, 25, 11, 1 },		\
-	{ 1, IEEE80211_RATE_SHORT_PREAMBLE, 11000, 24, 22, 1 },	\
-	{ 0, 0, 6000, 11, 12, 4 },	\
-	{ 0, 0, 9000, 15, 18, 4 },	\
-	{ 1, 0, 12000, 10, 24, 6 },	\
-	{ 1, 0, 18000, 14, 36, 6 },	\
-	{ 1, 0, 24000, 9, 48, 8 },	\
-	{ 1, 0, 36000, 13, 72, 8 },	\
-	{ 1, 0, 48000, 8, 96, 8 },	\
-	{ 1, 0, 54000, 12, 108, 8 } }	\
-}
-
-#define AR5K_RATES_TURBO { 8, {					\
-	255, 255, 255, 255, 255, 255, 255, 255, 6, 4, 2, 0,	\
-	7, 5, 3, 1, 255, 255, 255, 255, 255, 255, 255, 255,	\
-	255, 255, 255, 255, 255, 255, 255, 255 }, {		\
-	{ 1, MODULATION_TURBO, 6000, 11, 140, 0 },	\
-	{ 1, MODULATION_TURBO, 9000, 15, 18, 0 },	\
-	{ 1, MODULATION_TURBO, 12000, 10, 152, 2 },	\
-	{ 1, MODULATION_TURBO, 18000, 14, 36, 2 },	\
-	{ 1, MODULATION_TURBO, 24000, 9, 176, 4 },	\
-	{ 1, MODULATION_TURBO, 36000, 13, 72, 4 },	\
-	{ 1, MODULATION_TURBO, 48000, 8, 96, 4 },	\
-	{ 1, MODULATION_TURBO, 54000, 12, 108, 4 } }	\
-}
-
-#define AR5K_RATES_XR { 12, {					\
-	255, 3, 1, 255, 255, 255, 2, 0, 10, 8, 6, 4,		\
-	11, 9, 7, 5, 255, 255, 255, 255, 255, 255, 255, 255,	\
-	255, 255, 255, 255, 255, 255, 255, 255 }, {		\
-	{ 1, MODULATION_XR, 500, 7, 129, 0 },		\
-	{ 1, MODULATION_XR, 1000, 2, 139, 1 },		\
-	{ 1, MODULATION_XR, 2000, 6, 150, 2 },		\
-	{ 1, MODULATION_XR, 3000, 1, 150, 3 },		\
-	{ 1, 0, 6000, 11, 140, 4 },	\
-	{ 1, 0, 9000, 15, 18, 4 },	\
-	{ 1, 0, 12000, 10, 152, 6 },	\
-	{ 1, 0, 18000, 14, 36, 6 },	\
-	{ 1, 0, 24000, 9, 176, 8 },	\
-	{ 1, 0, 36000, 13, 72, 8 },	\
-	{ 1, 0, 48000, 8, 96, 8 },	\
-	{ 1, 0, 54000, 12, 108, 8 } }	\
-}
+/* adding this flag to rate_code enables short preamble */
+#define AR5K_SET_SHORT_PREAMBLE 0x04
 
 /*
  * Crypto definitions
@@ -735,7 +815,6 @@
 		return (false);			\
 } while (0)
 
-
 enum ath5k_ant_setting {
 	AR5K_ANT_VARIABLE	= 0,	/* variable by programming */
 	AR5K_ANT_FIXED_A	= 1,	/* fixed to 11a frequencies */
@@ -846,7 +925,8 @@
 
 /*
  * These match net80211 definitions (not used in
- * d80211).
+ * mac80211).
+ * TODO: Clean this up
  */
 #define AR5K_LED_INIT	0 /*IEEE80211_S_INIT*/
 #define AR5K_LED_SCAN	1 /*IEEE80211_S_SCAN*/
@@ -862,7 +942,8 @@
 /*
  * Chipset capabilities -see ath5k_hw_get_capability-
  * get_capability function is not yet fully implemented
- * in OpenHAL so most of these don't work yet...
+ * in ath5k so most of these don't work yet...
+ * TODO: Implement these & merge with _TUNE_ stuff above
  */
 enum ath5k_capability_type {
 	AR5K_CAP_REG_DMN		= 0,	/* Used to get current reg. domain id */
@@ -931,6 +1012,7 @@
 #define AR5K_MAX_GPIO		10
 #define AR5K_MAX_RF_BANKS	8
 
+/* TODO: Clean up and merge with ath5k_softc */
 struct ath5k_hw {
 	u32			ah_magic;
 
@@ -939,7 +1021,7 @@
 
 	enum ath5k_int		ah_imr;
 
-	enum ieee80211_if_types	ah_op_mode;
+	enum nl80211_iftype	ah_op_mode;
 	enum ath5k_power_mode	ah_power_mode;
 	struct ieee80211_channel ah_current_channel;
 	bool			ah_turbo;
@@ -1023,11 +1105,13 @@
 	/*
 	 * Function pointers
 	 */
+	int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
+				u32 size, unsigned int flags);
 	int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
 		unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
 		unsigned int, unsigned int, unsigned int, unsigned int,
 		unsigned int, unsigned int, unsigned int);
-	int (*ah_setup_xtx_desc)(struct ath5k_hw *, struct ath5k_desc *,
+	int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
 		unsigned int, unsigned int, unsigned int, unsigned int,
 		unsigned int, unsigned int);
 	int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
@@ -1040,33 +1124,38 @@
  * Prototypes
  */
 
-/* General Functions */
-extern int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val, bool is_set);
 /* Attach/Detach Functions */
 extern struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version);
-extern const struct ath5k_rate_table *ath5k_hw_get_rate_table(struct ath5k_hw *ah, unsigned int mode);
 extern void ath5k_hw_detach(struct ath5k_hw *ah);
+
 /* Reset Functions */
-extern int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode, struct ieee80211_channel *channel, bool change_channel);
+extern int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
+extern int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool change_channel);
 /* Power management functions */
 extern int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration);
+
 /* DMA Related Functions */
-extern void ath5k_hw_start_rx(struct ath5k_hw *ah);
+extern void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
 extern int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
-extern u32 ath5k_hw_get_rx_buf(struct ath5k_hw *ah);
-extern void ath5k_hw_put_rx_buf(struct ath5k_hw *ah, u32 phys_addr);
-extern int ath5k_hw_tx_start(struct ath5k_hw *ah, unsigned int queue);
+extern u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
+extern void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
+extern int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
 extern int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
-extern u32 ath5k_hw_get_tx_buf(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_put_tx_buf(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr);
+extern u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
+extern int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
+				u32 phys_addr);
 extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
 /* Interrupt handling */
 extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
 extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
-extern enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask);
+extern enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum
+ath5k_int new_mask);
 extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats);
+
 /* EEPROM access functions */
-extern int ath5k_hw_set_regdomain(struct ath5k_hw *ah, u16 regdomain);
+extern int ath5k_eeprom_init(struct ath5k_hw *ah);
+extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
+
 /* Protocol Control Unit Functions */
 extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
 /* BSSID Functions */
@@ -1076,14 +1165,14 @@
 extern int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
 /* Receive start/stop functions */
 extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
-extern void ath5k_hw_stop_pcu_recv(struct ath5k_hw *ah);
+extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
 /* RX Filter functions */
 extern void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
-extern int ath5k_hw_set_mcast_filterindex(struct ath5k_hw *ah, u32 index);
+extern int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
 extern int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
 extern u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
 extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
-/* Beacon related functions */
+/* Beacon control functions */
 extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah);
 extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
 extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
@@ -1105,61 +1194,129 @@
 extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
 extern int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac);
 extern int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
+
 /* Queue Control Unit, DFS Control Unit Functions */
-extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, struct ath5k_txq_info *queue_info);
-extern int ath5k_hw_setup_tx_queueprops(struct ath5k_hw *ah, int queue, const struct ath5k_txq_info *queue_info);
 extern int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info);
+extern int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+				const struct ath5k_txq_info *queue_info);
+extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
+				enum ath5k_tx_queue queue_type,
+				struct ath5k_txq_info *queue_info);
+extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
 extern void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
 extern int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
-extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
 extern unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah);
+extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
+
 /* Hardware Descriptor Functions */
-extern int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, u32 size, unsigned int flags);
+extern int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
+
 /* GPIO Functions */
 extern void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
-extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
 extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
+extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
 extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
 extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
 extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level);
-/* Misc functions */
-extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result);
 
+/* Misc functions */
+int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
+extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result);
+extern int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
+extern int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
 
 /* Initial register settings functions */
 extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
+
 /* Initialize RF */
 extern int ath5k_hw_rfregs(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int mode);
 extern int ath5k_hw_rfgain(struct ath5k_hw *ah, unsigned int freq);
 extern enum ath5k_rfgain ath5k_hw_get_rf_gain(struct ath5k_hw *ah);
 extern int ath5k_hw_set_rfgain_opt(struct ath5k_hw *ah);
-
-
 /* PHY/RF channel functions */
 extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
 extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
 /* PHY calibration */
 extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
-extern int ath5k_hw_phy_disable(struct ath5k_hw *ah);
+extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
 /* Misc PHY functions */
 extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
 extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, unsigned int ant);
 extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
-extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
+extern int ath5k_hw_phy_disable(struct ath5k_hw *ah);
 /* TX power setup */
 extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int txpower);
 extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power);
 
+/*
+ * Functions used internaly
+ */
 
+/*
+ * Translate usec to hw clock units
+ */
+static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
+{
+	return turbo ? (usec * 80) : (usec * 40);
+}
+
+/*
+ * Translate hw clock units to usec
+ */
+static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
+{
+	return turbo ? (clock / 80) : (clock / 40);
+}
+
+/*
+ * Read from a register
+ */
 static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
 {
 	return ioread32(ah->ah_iobase + reg);
 }
 
+/*
+ * Write to a register
+ */
 static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
 {
 	iowrite32(val, ah->ah_iobase + reg);
 }
 
+#if defined(_ATH5K_RESET) || defined(_ATH5K_PHY)
+/*
+ * Check if a register write has been completed
+ */
+static int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag,
+		u32 val, bool is_set)
+{
+	int i;
+	u32 data;
+
+	for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
+		data = ath5k_hw_reg_read(ah, reg);
+		if (is_set && (data & flag))
+			break;
+		else if ((data & flag) == val)
+			break;
+		udelay(15);
+	}
+
+	return (i <= 0) ? -EAGAIN : 0;
+}
+#endif
+
+static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
+{
+	u32 retval = 0, bit, i;
+
+	for (i = 0; i < bits; i++) {
+		bit = (val >> i) & 1;
+		retval = (retval << 1) | bit;
+	}
+
+	return retval;
+}
+
 #endif
diff --git a/drivers/net/wireless/ath5k/attach.c b/drivers/net/wireless/ath5k/attach.c
new file mode 100644
index 0000000..51d5698
--- /dev/null
+++ b/drivers/net/wireless/ath5k/attach.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*************************************\
+* Attach/Detach Functions and helpers *
+\*************************************/
+
+#include <linux/pci.h>
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+#include "base.h"
+
+/**
+ * ath5k_hw_post - Power On Self Test helper function
+ *
+ * @ah: The &struct ath5k_hw
+ */
+static int ath5k_hw_post(struct ath5k_hw *ah)
+{
+
+	int i, c;
+	u16 cur_reg;
+	u16 regs[2] = {AR5K_STA_ID0, AR5K_PHY(8)};
+	u32 var_pattern;
+	u32 static_pattern[4] = {
+		0x55555555,	0xaaaaaaaa,
+		0x66666666,	0x99999999
+	};
+	u32 init_val;
+	u32 cur_val;
+
+	for (c = 0; c < 2; c++) {
+
+		cur_reg = regs[c];
+
+		/* Save previous value */
+		init_val = ath5k_hw_reg_read(ah, cur_reg);
+
+		for (i = 0; i < 256; i++) {
+			var_pattern = i << 16 | i;
+			ath5k_hw_reg_write(ah, var_pattern, cur_reg);
+			cur_val = ath5k_hw_reg_read(ah, cur_reg);
+
+			if (cur_val != var_pattern) {
+				ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
+				return -EAGAIN;
+			}
+
+			/* Found on ndiswrapper dumps */
+			var_pattern = 0x0039080f;
+			ath5k_hw_reg_write(ah, var_pattern, cur_reg);
+		}
+
+		for (i = 0; i < 4; i++) {
+			var_pattern = static_pattern[i];
+			ath5k_hw_reg_write(ah, var_pattern, cur_reg);
+			cur_val = ath5k_hw_reg_read(ah, cur_reg);
+
+			if (cur_val != var_pattern) {
+				ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
+				return -EAGAIN;
+			}
+
+			/* Found on ndiswrapper dumps */
+			var_pattern = 0x003b080f;
+			ath5k_hw_reg_write(ah, var_pattern, cur_reg);
+		}
+
+		/* Restore previous value */
+		ath5k_hw_reg_write(ah, init_val, cur_reg);
+
+	}
+
+	return 0;
+
+}
+
+/**
+ * ath5k_hw_attach - Check if hw is supported and init the needed structs
+ *
+ * @sc: The &struct ath5k_softc we got from the driver's attach function
+ * @mac_version: The mac version id (check out ath5k.h) based on pci id
+ *
+ * Check if the device is supported, perform a POST and initialize the needed
+ * structs. Returns -ENOMEM if we don't have memory for the needed structs,
+ * -ENODEV if the device is not supported or prints an error msg if something
+ * else went wrong.
+ */
+struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
+{
+	struct ath5k_hw *ah;
+	struct pci_dev *pdev = sc->pdev;
+	u8 mac[ETH_ALEN];
+	int ret;
+	u32 srev;
+
+	/*If we passed the test malloc a ath5k_hw struct*/
+	ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
+	if (ah == NULL) {
+		ret = -ENOMEM;
+		ATH5K_ERR(sc, "out of memory\n");
+		goto err;
+	}
+
+	ah->ah_sc = sc;
+	ah->ah_iobase = sc->iobase;
+
+	/*
+	 * HW information
+	 */
+	ah->ah_op_mode = NL80211_IFTYPE_STATION;
+	ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
+	ah->ah_turbo = false;
+	ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
+	ah->ah_imr = 0;
+	ah->ah_atim_window = 0;
+	ah->ah_aifs = AR5K_TUNE_AIFS;
+	ah->ah_cw_min = AR5K_TUNE_CWMIN;
+	ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
+	ah->ah_software_retry = false;
+	ah->ah_ant_diversity = AR5K_TUNE_ANT_DIVERSITY;
+
+	/*
+	 * Set the mac version based on the pci id
+	 */
+	ah->ah_version = mac_version;
+
+	/*Fill the ath5k_hw struct with the needed functions*/
+	ret = ath5k_hw_init_desc_functions(ah);
+	if (ret)
+		goto err_free;
+
+	/* Bring device out of sleep and reset it's units */
+	ret = ath5k_hw_nic_wakeup(ah, CHANNEL_B, true);
+	if (ret)
+		goto err_free;
+
+	/* Get MAC, PHY and RADIO revisions */
+	srev = ath5k_hw_reg_read(ah, AR5K_SREV);
+	ah->ah_mac_srev = srev;
+	ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
+	ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
+	ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
+			0xffffffff;
+	ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
+			CHANNEL_5GHZ);
+	ah->ah_phy = AR5K_PHY(0);
+
+	/* Try to identify radio chip based on it's srev */
+	switch (ah->ah_radio_5ghz_revision & 0xf0) {
+	case AR5K_SREV_RAD_5111:
+		ah->ah_radio = AR5K_RF5111;
+		ah->ah_single_chip = false;
+		ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
+							CHANNEL_2GHZ);
+		ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111;
+		break;
+	case AR5K_SREV_RAD_5112:
+	case AR5K_SREV_RAD_2112:
+		ah->ah_radio = AR5K_RF5112;
+		ah->ah_single_chip = false;
+		ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
+							CHANNEL_2GHZ);
+		ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
+		break;
+	case AR5K_SREV_RAD_2413:
+		ah->ah_radio = AR5K_RF2413;
+		ah->ah_single_chip = true;
+		ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
+		break;
+	case AR5K_SREV_RAD_5413:
+		ah->ah_radio = AR5K_RF5413;
+		ah->ah_single_chip = true;
+		ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
+		break;
+	case AR5K_SREV_RAD_2316:
+		ah->ah_radio = AR5K_RF2316;
+		ah->ah_single_chip = true;
+		ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2316;
+		break;
+	case AR5K_SREV_RAD_2317:
+		ah->ah_radio = AR5K_RF2317;
+		ah->ah_single_chip = true;
+		ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2317;
+		break;
+	case AR5K_SREV_RAD_5424:
+		if (ah->ah_mac_version == AR5K_SREV_AR2425 ||
+		ah->ah_mac_version == AR5K_SREV_AR2417){
+			ah->ah_radio = AR5K_RF2425;
+			ah->ah_single_chip = true;
+			ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
+		} else {
+			ah->ah_radio = AR5K_RF5413;
+			ah->ah_single_chip = true;
+			ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
+		}
+		break;
+	default:
+		/* Identify radio based on mac/phy srev */
+		if (ah->ah_version == AR5K_AR5210) {
+			ah->ah_radio = AR5K_RF5110;
+			ah->ah_single_chip = false;
+		} else if (ah->ah_version == AR5K_AR5211) {
+			ah->ah_radio = AR5K_RF5111;
+			ah->ah_single_chip = false;
+			ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
+								CHANNEL_2GHZ);
+		} else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) ||
+		ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
+		ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
+			ah->ah_radio = AR5K_RF2425;
+			ah->ah_single_chip = true;
+			ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2425;
+			ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
+		} else if (srev == AR5K_SREV_AR5213A &&
+		ah->ah_phy_revision == AR5K_SREV_PHY_2112B) {
+			ah->ah_radio = AR5K_RF5112;
+			ah->ah_single_chip = false;
+			ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2112B;
+		} else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4)) {
+			ah->ah_radio = AR5K_RF2316;
+			ah->ah_single_chip = true;
+			ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
+			ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2316;
+		} else if (ah->ah_mac_version == (AR5K_SREV_AR5414 >> 4) ||
+		ah->ah_phy_revision == AR5K_SREV_PHY_5413) {
+			ah->ah_radio = AR5K_RF5413;
+			ah->ah_single_chip = true;
+			ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5413;
+			ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
+		} else if (ah->ah_mac_version == (AR5K_SREV_AR2414 >> 4) ||
+		ah->ah_phy_revision == AR5K_SREV_PHY_2413) {
+			ah->ah_radio = AR5K_RF2413;
+			ah->ah_single_chip = true;
+			ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2413;
+			ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
+		} else {
+			ATH5K_ERR(sc, "Couldn't identify radio revision.\n");
+			ret = -ENODEV;
+			goto err_free;
+		}
+	}
+
+
+	/* Return on unsuported chips (unsupported eeprom etc) */
+	if ((srev >= AR5K_SREV_AR5416) &&
+	(srev < AR5K_SREV_AR2425)) {
+		ATH5K_ERR(sc, "Device not yet supported.\n");
+		ret = -ENODEV;
+		goto err_free;
+	}
+
+	/*
+	 * Write PCI-E power save settings
+	 */
+	if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
+		ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES);
+		ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES);
+		/* Shut off RX when elecidle is asserted */
+		ath5k_hw_reg_write(ah, 0x28000039, AR5K_PCIE_SERDES);
+		ath5k_hw_reg_write(ah, 0x53160824, AR5K_PCIE_SERDES);
+		/* TODO: EEPROM work */
+		ath5k_hw_reg_write(ah, 0xe5980579, AR5K_PCIE_SERDES);
+		/* Shut off PLL and CLKREQ active in L1 */
+		ath5k_hw_reg_write(ah, 0x001defff, AR5K_PCIE_SERDES);
+		/* Preserce other settings */
+		ath5k_hw_reg_write(ah, 0x1aaabe40, AR5K_PCIE_SERDES);
+		ath5k_hw_reg_write(ah, 0xbe105554, AR5K_PCIE_SERDES);
+		ath5k_hw_reg_write(ah, 0x000e3007, AR5K_PCIE_SERDES);
+		/* Reset SERDES to load new settings */
+		ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
+		mdelay(1);
+	}
+
+	/*
+	 * POST
+	 */
+	ret = ath5k_hw_post(ah);
+	if (ret)
+		goto err_free;
+
+	/* Enable pci core retry fix on Hainan (5213A) and later chips */
+	if (srev >= AR5K_SREV_AR5213A)
+		ath5k_hw_reg_write(ah, AR5K_PCICFG_RETRY_FIX, AR5K_PCICFG);
+
+	/*
+	 * Get card capabilities, calibration values etc
+	 * TODO: EEPROM work
+	 */
+	ret = ath5k_eeprom_init(ah);
+	if (ret) {
+		ATH5K_ERR(sc, "unable to init EEPROM\n");
+		goto err_free;
+	}
+
+	/* Get misc capabilities */
+	ret = ath5k_hw_set_capabilities(ah);
+	if (ret) {
+		ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n",
+			sc->pdev->device);
+		goto err_free;
+	}
+
+	/* Set MAC address */
+	ret = ath5k_eeprom_read_mac(ah, mac);
+	if (ret) {
+		ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
+			sc->pdev->device);
+		goto err_free;
+	}
+
+	ath5k_hw_set_lladdr(ah, mac);
+	/* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
+	memset(ah->ah_bssid, 0xff, ETH_ALEN);
+	ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
+	ath5k_hw_set_opmode(ah);
+
+	ath5k_hw_set_rfgain_opt(ah);
+
+	return ah;
+err_free:
+	kfree(ah);
+err:
+	return ERR_PTR(ret);
+}
+
+/**
+ * ath5k_hw_detach - Free the ath5k_hw struct
+ *
+ * @ah: The &struct ath5k_hw
+ */
+void ath5k_hw_detach(struct ath5k_hw *ah)
+{
+	ATH5K_TRACE(ah->ah_sc);
+
+	__set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
+
+	if (ah->ah_rf_banks != NULL)
+		kfree(ah->ah_rf_banks);
+
+	/* assume interrupts are down */
+	kfree(ah);
+}
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 0676c6d..c151588 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -72,7 +72,7 @@
 MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
 MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION("0.5.0 (EXPERIMENTAL)");
+MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
 
 
 /* Known PCI ids */
@@ -93,45 +93,94 @@
 	{ PCI_VDEVICE(ATHEROS, 0x0019), .driver_data = AR5K_AR5212 }, /* 5212 combatible */
 	{ PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */
 	{ PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */
-	{ PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/
+	{ PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* PCI-E cards */
+	{ PCI_VDEVICE(ATHEROS, 0x001d), .driver_data = AR5K_AR5212 }, /* 2417 Nala */
 	{ 0 }
 };
 MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
 
 /* Known SREVs */
 static struct ath5k_srev_name srev_names[] = {
-	{ "5210",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5210 },
-	{ "5311",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5311 },
-	{ "5311A",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5311A },
-	{ "5311B",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5311B },
-	{ "5211",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5211 },
-	{ "5212",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5212 },
-	{ "5213",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5213 },
-	{ "5213A",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5213A },
-	{ "2413",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR2413 },
-	{ "2414",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR2414 },
-	{ "2424",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR2424 },
-	{ "5424",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5424 },
-	{ "5413",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5413 },
-	{ "5414",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5414 },
-	{ "5416",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5416 },
-	{ "5418",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR5418 },
-	{ "2425",	AR5K_VERSION_VER,	AR5K_SREV_VER_AR2425 },
-	{ "xxxxx",	AR5K_VERSION_VER,	AR5K_SREV_UNKNOWN },
+	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
+	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
+	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
+	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
+	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
+	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
+	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
+	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
+	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
+	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
+	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
+	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
+	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
+	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
+	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
+	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
+	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
+	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
+	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
 	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
 	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
+	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
 	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
 	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
 	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
+	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
 	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
 	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
-	{ "SChip",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_SC0 },
-	{ "SChip",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_SC1 },
-	{ "SChip",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_SC2 },
+	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
+	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
+	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
+	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
+	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
+	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
 	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
 	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
 };
 
+static struct ieee80211_rate ath5k_rates[] = {
+	{ .bitrate = 10,
+	  .hw_value = ATH5K_RATE_CODE_1M, },
+	{ .bitrate = 20,
+	  .hw_value = ATH5K_RATE_CODE_2M,
+	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 55,
+	  .hw_value = ATH5K_RATE_CODE_5_5M,
+	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 110,
+	  .hw_value = ATH5K_RATE_CODE_11M,
+	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 60,
+	  .hw_value = ATH5K_RATE_CODE_6M,
+	  .flags = 0 },
+	{ .bitrate = 90,
+	  .hw_value = ATH5K_RATE_CODE_9M,
+	  .flags = 0 },
+	{ .bitrate = 120,
+	  .hw_value = ATH5K_RATE_CODE_12M,
+	  .flags = 0 },
+	{ .bitrate = 180,
+	  .hw_value = ATH5K_RATE_CODE_18M,
+	  .flags = 0 },
+	{ .bitrate = 240,
+	  .hw_value = ATH5K_RATE_CODE_24M,
+	  .flags = 0 },
+	{ .bitrate = 360,
+	  .hw_value = ATH5K_RATE_CODE_36M,
+	  .flags = 0 },
+	{ .bitrate = 480,
+	  .hw_value = ATH5K_RATE_CODE_48M,
+	  .flags = 0 },
+	{ .bitrate = 540,
+	  .hw_value = ATH5K_RATE_CODE_54M,
+	  .flags = 0 },
+	/* XR missing */
+};
+
 /*
  * Prototypes - PCI stack related functions
  */
@@ -162,7 +211,8 @@
  * Prototypes - MAC 802.11 stack related functions
  */
 static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
-static int ath5k_reset(struct ieee80211_hw *hw);
+static int ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel);
+static int ath5k_reset_wake(struct ath5k_softc *sc);
 static int ath5k_start(struct ieee80211_hw *hw);
 static void ath5k_stop(struct ieee80211_hw *hw);
 static int ath5k_add_interface(struct ieee80211_hw *hw,
@@ -218,20 +268,16 @@
 			struct ieee80211_hw *hw);
 /* Channel/mode setup */
 static inline short ath5k_ieee2mhz(short chan);
-static unsigned int ath5k_copy_rates(struct ieee80211_rate *rates,
-				const struct ath5k_rate_table *rt,
-				unsigned int max);
 static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
 				struct ieee80211_channel *channels,
 				unsigned int mode,
 				unsigned int max);
-static int 	ath5k_getchannels(struct ieee80211_hw *hw);
+static int 	ath5k_setup_bands(struct ieee80211_hw *hw);
 static int 	ath5k_chan_set(struct ath5k_softc *sc,
 				struct ieee80211_channel *chan);
 static void	ath5k_setcurmode(struct ath5k_softc *sc,
 				unsigned int mode);
 static void	ath5k_mode_setup(struct ath5k_softc *sc);
-static void	ath5k_set_total_hw_rates(struct ath5k_softc *sc);
 
 /* Descriptor setup */
 static int	ath5k_desc_alloc(struct ath5k_softc *sc,
@@ -351,7 +397,11 @@
 	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
 		if (srev_names[i].sr_type != type)
 			continue;
-		if ((val & 0xff) < srev_names[i + 1].sr_val) {
+
+		if ((val & 0xf0) == srev_names[i].sr_val)
+			name = srev_names[i].sr_name;
+
+		if ((val & 0xff) == srev_names[i].sr_val) {
 			name = srev_names[i].sr_name;
 			break;
 		}
@@ -446,6 +496,12 @@
 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
 		    IEEE80211_HW_SIGNAL_DBM |
 		    IEEE80211_HW_NOISE_DBM;
+
+	hw->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_ADHOC) |
+		BIT(NL80211_IFTYPE_MESH_POINT);
+
 	hw->extra_tx_headroom = 2;
 	hw->channel_change_time = 5000;
 	sc = hw->priv;
@@ -462,7 +518,7 @@
 
 	sc->iobase = mem; /* So we can unmap it on detach */
 	sc->cachelsz = csz * sizeof(u32); /* convert to bytes */
-	sc->opmode = IEEE80211_IF_TYPE_STA;
+	sc->opmode = NL80211_IFTYPE_STATION;
 	mutex_init(&sc->lock);
 	spin_lock_init(&sc->rxbuflock);
 	spin_lock_init(&sc->txbuflock);
@@ -491,7 +547,7 @@
 		goto err_ah;
 
 	ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
-			ath5k_chip_name(AR5K_VERSION_VER,sc->ah->ah_mac_srev),
+			ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
 					sc->ah->ah_mac_srev,
 					sc->ah->ah_phy_revision);
 
@@ -646,7 +702,6 @@
 #endif /* CONFIG_PM */
 
 
-
 /***********************\
 * Driver Initialization *
 \***********************/
@@ -669,7 +724,7 @@
 	 * return false w/o doing anything.  MAC's that do
 	 * support it will return true w/o doing anything.
 	 */
-	ret = ah->ah_setup_xtx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
+	ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
 	if (ret < 0)
 		goto err;
 	if (ret > 0)
@@ -688,15 +743,12 @@
 	 * on settings like the phy mode and regulatory
 	 * domain restrictions.
 	 */
-	ret = ath5k_getchannels(hw);
+	ret = ath5k_setup_bands(hw);
 	if (ret) {
 		ATH5K_ERR(sc, "can't get channels\n");
 		goto err;
 	}
 
-	/* Set *_rates so we can map hw rate index */
-	ath5k_set_total_hw_rates(sc);
-
 	/* NB: setup here so ath5k_rate_update is happy */
 	if (test_bit(AR5K_MODE_11A, ah->ah_modes))
 		ath5k_setcurmode(sc, AR5K_MODE_11A);
@@ -813,27 +865,6 @@
 }
 
 static unsigned int
-ath5k_copy_rates(struct ieee80211_rate *rates,
-		const struct ath5k_rate_table *rt,
-		unsigned int max)
-{
-	unsigned int i, count;
-
-	if (rt == NULL)
-		return 0;
-
-	for (i = 0, count = 0; i < rt->rate_count && max > 0; i++) {
-		rates[count].bitrate = rt->rates[i].rate_kbps / 100;
-		rates[count].hw_value = rt->rates[i].rate_code;
-		rates[count].flags = rt->rates[i].modulation;
-		count++;
-		max--;
-	}
-
-	return count;
-}
-
-static unsigned int
 ath5k_copy_channels(struct ath5k_hw *ah,
 		struct ieee80211_channel *channels,
 		unsigned int mode,
@@ -895,74 +926,97 @@
 	return count;
 }
 
+static void
+ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b)
+{
+	u8 i;
+
+	for (i = 0; i < AR5K_MAX_RATES; i++)
+		sc->rate_idx[b->band][i] = -1;
+
+	for (i = 0; i < b->n_bitrates; i++) {
+		sc->rate_idx[b->band][b->bitrates[i].hw_value] = i;
+		if (b->bitrates[i].hw_value_short)
+			sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
+	}
+}
+
 static int
-ath5k_getchannels(struct ieee80211_hw *hw)
+ath5k_setup_bands(struct ieee80211_hw *hw)
 {
 	struct ath5k_softc *sc = hw->priv;
 	struct ath5k_hw *ah = sc->ah;
-	struct ieee80211_supported_band *sbands = sc->sbands;
-	const struct ath5k_rate_table *hw_rates;
-	unsigned int max_r, max_c, count_r, count_c;
-	int mode2g = AR5K_MODE_11G;
+	struct ieee80211_supported_band *sband;
+	int max_c, count_c = 0;
+	int i;
 
 	BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
-
-	max_r = ARRAY_SIZE(sc->rates);
 	max_c = ARRAY_SIZE(sc->channels);
-	count_r = count_c = 0;
 
 	/* 2GHz band */
-	if (!test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
-		mode2g = AR5K_MODE_11B;
-		if (!test_bit(AR5K_MODE_11B,
-			sc->ah->ah_capabilities.cap_mode))
-			mode2g = -1;
-	}
+	sband = &sc->sbands[IEEE80211_BAND_2GHZ];
+	sband->band = IEEE80211_BAND_2GHZ;
+	sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
 
-	if (mode2g > 0) {
-		struct ieee80211_supported_band *sband =
-			&sbands[IEEE80211_BAND_2GHZ];
+	if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
+		/* G mode */
+		memcpy(sband->bitrates, &ath5k_rates[0],
+		       sizeof(struct ieee80211_rate) * 12);
+		sband->n_bitrates = 12;
 
-		sband->bitrates = sc->rates;
 		sband->channels = sc->channels;
-
-		sband->band = IEEE80211_BAND_2GHZ;
 		sband->n_channels = ath5k_copy_channels(ah, sband->channels,
-					mode2g, max_c);
-
-		hw_rates = ath5k_hw_get_rate_table(ah, mode2g);
-		sband->n_bitrates = ath5k_copy_rates(sband->bitrates,
-					hw_rates, max_r);
-
-		count_c = sband->n_channels;
-		count_r = sband->n_bitrates;
+					AR5K_MODE_11G, max_c);
 
 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
-
-		max_r -= count_r;
+		count_c = sband->n_channels;
 		max_c -= count_c;
+	} else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
+		/* B mode */
+		memcpy(sband->bitrates, &ath5k_rates[0],
+		       sizeof(struct ieee80211_rate) * 4);
+		sband->n_bitrates = 4;
 
+		/* 5211 only supports B rates and uses 4bit rate codes
+		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
+		 * fix them up here:
+		 */
+		if (ah->ah_version == AR5K_AR5211) {
+			for (i = 0; i < 4; i++) {
+				sband->bitrates[i].hw_value =
+					sband->bitrates[i].hw_value & 0xF;
+				sband->bitrates[i].hw_value_short =
+					sband->bitrates[i].hw_value_short & 0xF;
+			}
+		}
+
+		sband->channels = sc->channels;
+		sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+					AR5K_MODE_11B, max_c);
+
+		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+		count_c = sband->n_channels;
+		max_c -= count_c;
 	}
+	ath5k_setup_rate_idx(sc, sband);
 
-	/* 5GHz band */
-
+	/* 5GHz band, A mode */
 	if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
-		struct ieee80211_supported_band *sband =
-			&sbands[IEEE80211_BAND_5GHZ];
-
-		sband->bitrates = &sc->rates[count_r];
-		sband->channels = &sc->channels[count_c];
-
+		sband = &sc->sbands[IEEE80211_BAND_5GHZ];
 		sband->band = IEEE80211_BAND_5GHZ;
+		sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
+
+		memcpy(sband->bitrates, &ath5k_rates[4],
+		       sizeof(struct ieee80211_rate) * 8);
+		sband->n_bitrates = 8;
+
+		sband->channels = &sc->channels[count_c];
 		sband->n_channels = ath5k_copy_channels(ah, sband->channels,
 					AR5K_MODE_11A, max_c);
 
-		hw_rates = ath5k_hw_get_rate_table(ah, AR5K_MODE_11A);
-		sband->n_bitrates = ath5k_copy_rates(sband->bitrates,
-					hw_rates, max_r);
-
 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
 	}
+	ath5k_setup_rate_idx(sc, sband);
 
 	ath5k_debug_dump_bands(sc);
 
@@ -978,9 +1032,6 @@
 static int
 ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
 {
-	struct ath5k_hw *ah = sc->ah;
-	int ret;
-
 	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n",
 		sc->curchan->center_freq, chan->center_freq);
 
@@ -996,41 +1047,7 @@
 		 * hardware at the new frequency, and then re-enable
 		 * the relevant bits of the h/w.
 		 */
-		ath5k_hw_set_intr(ah, 0);	/* disable interrupts */
-		ath5k_txq_cleanup(sc);		/* clear pending tx frames */
-		ath5k_rx_stop(sc);		/* turn off frame recv */
-		ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true);
-		if (ret) {
-			ATH5K_ERR(sc, "%s: unable to reset channel "
-				"(%u Mhz)\n", __func__, chan->center_freq);
-			return ret;
-		}
-
-		ath5k_hw_set_txpower_limit(sc->ah, 0);
-
-		/*
-		 * Re-enable rx framework.
-		 */
-		ret = ath5k_rx_start(sc);
-		if (ret) {
-			ATH5K_ERR(sc, "%s: unable to restart recv logic\n",
-					__func__);
-			return ret;
-		}
-
-		/*
-		 * Change channels and update the h/w rate map
-		 * if we're switching; e.g. 11a to 11b/g.
-		 *
-		 * XXX needed?
-		 */
-/*		ath5k_chan_change(sc, chan); */
-
-		ath5k_beacon_config(sc);
-		/*
-		 * Re-enable interrupts.
-		 */
-		ath5k_hw_set_intr(ah, sc->imask);
+		return ath5k_reset(sc, true, true);
 	}
 
 	return 0;
@@ -1068,75 +1085,13 @@
 	ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
 }
 
-/*
- * Match the hw provided rate index (through descriptors)
- * to an index for sc->curband->bitrates, so it can be used
- * by the stack.
- *
- * This one is a little bit tricky but i think i'm right
- * about this...
- *
- * We have 4 rate tables in the following order:
- * XR (4 rates)
- * 802.11a (8 rates)
- * 802.11b (4 rates)
- * 802.11g (12 rates)
- * that make the hw rate table.
- *
- * Lets take a 5211 for example that supports a and b modes only.
- * First comes the 802.11a table and then 802.11b (total 12 rates).
- * When hw returns eg. 11 it points to the last 802.11b rate (11Mbit),
- * if it returns 2 it points to the second 802.11a rate etc.
- *
- * Same goes for 5212 who has xr/a/b/g support (total 28 rates).
- * First comes the XR table, then 802.11a, 802.11b and 802.11g.
- * When hw returns eg. 27 it points to the last 802.11g rate (54Mbits) etc
- */
-static void
-ath5k_set_total_hw_rates(struct ath5k_softc *sc) {
-
-	struct ath5k_hw *ah = sc->ah;
-
-	if (test_bit(AR5K_MODE_11A, ah->ah_modes))
-		sc->a_rates = 8;
-
-	if (test_bit(AR5K_MODE_11B, ah->ah_modes))
-		sc->b_rates = 4;
-
-	if (test_bit(AR5K_MODE_11G, ah->ah_modes))
-		sc->g_rates = 12;
-
-	/* XXX: Need to see what what happens when
-		xr disable bits in eeprom are set */
-	if (ah->ah_version >= AR5K_AR5212)
-		sc->xr_rates = 4;
-
-}
-
 static inline int
-ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) {
-
-	int mac80211_rix;
-
-	if(sc->curband->band == IEEE80211_BAND_2GHZ) {
-		/* We setup a g ratetable for both b/g modes */
-		mac80211_rix =
-			hw_rix - sc->b_rates - sc->a_rates - sc->xr_rates;
-	} else {
-		mac80211_rix = hw_rix - sc->xr_rates;
-	}
-
-	/* Something went wrong, fallback to basic rate for this band */
-	if ((mac80211_rix >= sc->curband->n_bitrates) ||
-		(mac80211_rix <= 0 ))
-		mac80211_rix = 1;
-
-	return mac80211_rix;
+ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
+{
+	WARN_ON(hw_rix < 0 || hw_rix > AR5K_MAX_RATES);
+	return sc->rate_idx[sc->curband->band][hw_rix];
 }
 
-
-
-
 /***************\
 * Buffers setup *
 \***************/
@@ -1199,7 +1154,7 @@
 	ds = bf->desc;
 	ds->ds_link = bf->daddr;	/* link to self */
 	ds->ds_data = bf->skbaddr;
-	ath5k_hw_setup_rx_desc(ah, ds,
+	ah->ah_setup_rx_desc(ah, ds,
 		skb_tailroom(skb),	/* buffer size */
 		0);
 
@@ -1250,12 +1205,12 @@
 	list_add_tail(&bf->list, &txq->q);
 	sc->tx_stats[txq->qnum].len++;
 	if (txq->link == NULL) /* is this first packet? */
-		ath5k_hw_put_tx_buf(ah, txq->qnum, bf->daddr);
+		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
 	else /* no, so only link it */
 		*txq->link = bf->daddr;
 
 	txq->link = &ds->ds_link;
-	ath5k_hw_tx_start(ah, txq->qnum);
+	ath5k_hw_start_tx_dma(ah, txq->qnum);
 	mmiowb();
 	spin_unlock_bh(&txq->lock);
 
@@ -1433,7 +1388,8 @@
 	ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
 	if (ret)
 		return ret;
-	if (sc->opmode == IEEE80211_IF_TYPE_AP) {
+	if (sc->opmode == NL80211_IFTYPE_AP ||
+		sc->opmode == NL80211_IFTYPE_MESH_POINT) {
 		/*
 		 * Always burst out beacon and CAB traffic
 		 * (aifs = cwmin = cwmax = 0)
@@ -1441,7 +1397,7 @@
 		qi.tqi_aifs = 0;
 		qi.tqi_cw_min = 0;
 		qi.tqi_cw_max = 0;
-	} else if (sc->opmode == IEEE80211_IF_TYPE_IBSS) {
+	} else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
 		/*
 		 * Adhoc mode; backoff between 0 and (2 * cw_min).
 		 */
@@ -1454,7 +1410,7 @@
 		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
 		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
 
-	ret = ath5k_hw_setup_tx_queueprops(ah, sc->bhalq, &qi);
+	ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi);
 	if (ret) {
 		ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
 			"hardware queue!\n", __func__);
@@ -1503,14 +1459,14 @@
 		/* don't touch the hardware if marked invalid */
 		ath5k_hw_stop_tx_dma(ah, sc->bhalq);
 		ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n",
-			ath5k_hw_get_tx_buf(ah, sc->bhalq));
+			ath5k_hw_get_txdp(ah, sc->bhalq));
 		for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
 			if (sc->txqs[i].setup) {
 				ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum);
 				ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, "
 					"link %p\n",
 					sc->txqs[i].qnum,
-					ath5k_hw_get_tx_buf(ah,
+					ath5k_hw_get_txdp(ah,
 							sc->txqs[i].qnum),
 					sc->txqs[i].link);
 			}
@@ -1570,8 +1526,8 @@
 	bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
 	spin_unlock_bh(&sc->rxbuflock);
 
-	ath5k_hw_put_rx_buf(ah, bf->daddr);
-	ath5k_hw_start_rx(ah);		/* enable recv descriptors */
+	ath5k_hw_set_rxdp(ah, bf->daddr);
+	ath5k_hw_start_rx_dma(ah);	/* enable recv descriptors */
 	ath5k_mode_setup(sc);		/* set filters, etc. */
 	ath5k_hw_start_rx_pcu(ah);	/* re-enable PCU/DMA engine */
 
@@ -1588,7 +1544,7 @@
 {
 	struct ath5k_hw *ah = sc->ah;
 
-	ath5k_hw_stop_pcu_recv(ah);	/* disable PCU */
+	ath5k_hw_stop_rx_pcu(ah);	/* disable PCU */
 	ath5k_hw_set_rx_filter(ah, 0);	/* clear recv filter */
 	ath5k_hw_stop_rx_dma(ah);	/* disable DMA engine */
 
@@ -1602,7 +1558,7 @@
 		struct sk_buff *skb, struct ath5k_rx_status *rs)
 {
 	struct ieee80211_hdr *hdr = (void *)skb->data;
-	unsigned int keyix, hlen = ieee80211_get_hdrlen_from_skb(skb);
+	unsigned int keyix, hlen;
 
 	if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
 			rs->rs_keyix != AR5K_RXKEYIX_INVALID)
@@ -1611,6 +1567,7 @@
 	/* Apparently when a default key is used to decrypt the packet
 	   the hw does not set the index used to decrypt.  In such cases
 	   get the index from the packet. */
+	hlen = ieee80211_hdrlen(hdr->frame_control);
 	if (ieee80211_has_protected(hdr->frame_control) &&
 	    !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
 	    skb->len >= hlen + 4) {
@@ -1768,7 +1725,7 @@
 			/* let crypto-error packets fall through in MNTR */
 			if ((rs.rs_status &
 				~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
-					sc->opmode != IEEE80211_IF_TYPE_MNTR)
+					sc->opmode != NL80211_IFTYPE_MONITOR)
 				goto next;
 		}
 accept:
@@ -1824,10 +1781,14 @@
 		rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
 		rxs.flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
 
+		if (rxs.rate_idx >= 0 && rs.rs_rate ==
+		    sc->curband->bitrates[rxs.rate_idx].hw_value_short)
+			rxs.flag |= RX_FLAG_SHORTPRE;
+
 		ath5k_debug_dump_skb(sc, skb, "RX  ", 0);
 
 		/* check beacons in IBSS mode */
-		if (sc->opmode == IEEE80211_IF_TYPE_IBSS)
+		if (sc->opmode == NL80211_IFTYPE_ADHOC)
 			ath5k_check_ibss_tsf(sc, skb, &rxs);
 
 		__ieee80211_rx(sc->hw, skb, &rxs);
@@ -1942,7 +1903,7 @@
 	ds = bf->desc;
 
 	flags = AR5K_TXDESC_NOACK;
-	if (sc->opmode == IEEE80211_IF_TYPE_IBSS && ath5k_hw_hasveol(ah)) {
+	if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
 		ds->ds_link = bf->daddr;	/* self-linked */
 		flags |= AR5K_TXDESC_VEOL;
 		/*
@@ -1991,8 +1952,8 @@
 
 	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
 
-	if (unlikely(bf->skb == NULL || sc->opmode == IEEE80211_IF_TYPE_STA ||
-			sc->opmode == IEEE80211_IF_TYPE_MNTR)) {
+	if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION ||
+			sc->opmode == NL80211_IFTYPE_MONITOR)) {
 		ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
 		return;
 	}
@@ -2032,8 +1993,8 @@
 		/* NB: hw still stops DMA, so proceed */
 	}
 
-	ath5k_hw_put_tx_buf(ah, sc->bhalq, bf->daddr);
-	ath5k_hw_tx_start(ah, sc->bhalq);
+	ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
+	ath5k_hw_start_tx_dma(ah, sc->bhalq);
 	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
 		sc->bhalq, (unsigned long long)bf->daddr, bf->desc);
 
@@ -2162,13 +2123,13 @@
 {
 	struct ath5k_hw *ah = sc->ah;
 
-	ath5k_hw_set_intr(ah, 0);
+	ath5k_hw_set_imr(ah, 0);
 	sc->bmisscount = 0;
 	sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
 
-	if (sc->opmode == IEEE80211_IF_TYPE_STA) {
+	if (sc->opmode == NL80211_IFTYPE_STATION) {
 		sc->imask |= AR5K_INT_BMISS;
-	} else if (sc->opmode == IEEE80211_IF_TYPE_IBSS) {
+	} else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
 		/*
 		 * In IBSS mode we use a self-linked tx descriptor and let the
 		 * hardware send the beacons automatically. We have to load it
@@ -2188,7 +2149,7 @@
 	}
 	/* TODO else AP */
 
-	ath5k_hw_set_intr(ah, sc->imask);
+	ath5k_hw_set_imr(ah, sc->imask);
 }
 
 
@@ -2220,36 +2181,13 @@
 	 */
 	sc->curchan = sc->hw->conf.channel;
 	sc->curband = &sc->sbands[sc->curchan->band];
-	ret = ath5k_hw_reset(sc->ah, sc->opmode, sc->curchan, false);
-	if (ret) {
-		ATH5K_ERR(sc, "unable to reset hardware: %d\n", ret);
-		goto done;
-	}
-	/*
-	 * This is needed only to setup initial state
-	 * but it's best done after a reset.
-	 */
-	ath5k_hw_set_txpower_limit(sc->ah, 0);
-
-	/*
-	 * Setup the hardware after reset: the key cache
-	 * is filled as needed and the receive engine is
-	 * set going.  Frame transmit is handled entirely
-	 * in the frame output path; there's nothing to do
-	 * here except setup the interrupt mask.
-	 */
-	ret = ath5k_rx_start(sc);
-	if (ret)
-		goto done;
-
-	/*
-	 * Enable interrupts.
-	 */
 	sc->imask = AR5K_INT_RX | AR5K_INT_TX | AR5K_INT_RXEOL |
 		AR5K_INT_RXORN | AR5K_INT_FATAL | AR5K_INT_GLOBAL |
 		AR5K_INT_MIB;
+	ret = ath5k_reset(sc, false, false);
+	if (ret)
+		goto done;
 
-	ath5k_hw_set_intr(sc->ah, sc->imask);
 	/* Set ack to be sent at low bit-rates */
 	ath5k_hw_set_ack_bitrate_high(sc->ah, false);
 
@@ -2290,7 +2228,7 @@
 
 	if (!test_bit(ATH_STAT_INVALID, sc->status)) {
 		ath5k_led_off(sc);
-		ath5k_hw_set_intr(ah, 0);
+		ath5k_hw_set_imr(ah, 0);
 		synchronize_irq(sc->pdev->irq);
 	}
 	ath5k_txq_cleanup(sc);
@@ -2396,7 +2334,7 @@
 				* transmission time) in order to detect wether
 				* automatic TSF updates happened.
 				*/
-				if (sc->opmode == IEEE80211_IF_TYPE_IBSS) {
+				if (sc->opmode == NL80211_IFTYPE_ADHOC) {
 					 /* XXX: only if VEOL suppported */
 					u64 tsf = ath5k_hw_get_tsf64(ah);
 					sc->nexttbtt += sc->bintval;
@@ -2451,7 +2389,7 @@
 {
 	struct ath5k_softc *sc = (void *)data;
 
-	ath5k_reset(sc->hw);
+	ath5k_reset_wake(sc);
 }
 
 /*
@@ -2474,7 +2412,7 @@
 		 * to load new gain values.
 		 */
 		ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
-		ath5k_reset(sc->hw);
+		ath5k_reset_wake(sc);
 	}
 	if (ath5k_hw_phy_calibrate(ah, sc->curchan))
 		ATH5K_ERR(sc, "calibration of channel %u failed\n",
@@ -2626,7 +2564,7 @@
 
 	ath5k_debug_dump_skb(sc, skb, "TX  ", 1);
 
-	if (sc->opmode == IEEE80211_IF_TYPE_MNTR)
+	if (sc->opmode == NL80211_IFTYPE_MONITOR)
 		ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n");
 
 	/*
@@ -2675,48 +2613,67 @@
 }
 
 static int
-ath5k_reset(struct ieee80211_hw *hw)
+ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel)
 {
-	struct ath5k_softc *sc = hw->priv;
 	struct ath5k_hw *ah = sc->ah;
 	int ret;
 
 	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
 
-	ath5k_hw_set_intr(ah, 0);
-	ath5k_txq_cleanup(sc);
-	ath5k_rx_stop(sc);
-
+	if (stop) {
+		ath5k_hw_set_imr(ah, 0);
+		ath5k_txq_cleanup(sc);
+		ath5k_rx_stop(sc);
+	}
 	ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true);
-	if (unlikely(ret)) {
+	if (ret) {
 		ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
 		goto err;
 	}
+
+	/*
+	 * This is needed only to setup initial state
+	 * but it's best done after a reset.
+	 */
 	ath5k_hw_set_txpower_limit(sc->ah, 0);
 
 	ret = ath5k_rx_start(sc);
-	if (unlikely(ret)) {
+	if (ret) {
 		ATH5K_ERR(sc, "can't start recv logic\n");
 		goto err;
 	}
+
 	/*
-	 * We may be doing a reset in response to an ioctl
-	 * that changes the channel so update any state that
-	 * might change as a result.
+	 * Change channels and update the h/w rate map if we're switching;
+	 * e.g. 11a to 11b/g.
+	 *
+	 * We may be doing a reset in response to an ioctl that changes the
+	 * channel so update any state that might change as a result.
 	 *
 	 * XXX needed?
 	 */
 /*	ath5k_chan_change(sc, c); */
-	ath5k_beacon_config(sc);
-	/* intrs are started by ath5k_beacon_config */
 
-	ieee80211_wake_queues(hw);
+	ath5k_beacon_config(sc);
+	/* intrs are enabled by ath5k_beacon_config */
 
 	return 0;
 err:
 	return ret;
 }
 
+static int
+ath5k_reset_wake(struct ath5k_softc *sc)
+{
+	int ret;
+
+	ret = ath5k_reset(sc, true, true);
+	if (!ret)
+		ieee80211_wake_queues(sc->hw);
+
+	return ret;
+}
+
 static int ath5k_start(struct ieee80211_hw *hw)
 {
 	return ath5k_init(hw->priv);
@@ -2742,9 +2699,9 @@
 	sc->vif = conf->vif;
 
 	switch (conf->type) {
-	case IEEE80211_IF_TYPE_STA:
-	case IEEE80211_IF_TYPE_IBSS:
-	case IEEE80211_IF_TYPE_MNTR:
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_ADHOC:
+	case NL80211_IFTYPE_MONITOR:
 		sc->opmode = conf->type;
 		break;
 	default:
@@ -2815,7 +2772,7 @@
 	}
 
 	if (conf->changed & IEEE80211_IFCC_BEACON &&
-	    vif->type == IEEE80211_IF_TYPE_IBSS) {
+	    vif->type == NL80211_IFTYPE_ADHOC) {
 		struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
 		if (!beacon) {
 			ret = -ENOMEM;
@@ -2827,7 +2784,7 @@
 
 	mutex_unlock(&sc->lock);
 
-	return ath5k_reset(hw);
+	return ath5k_reset_wake(sc);
 unlock:
 	mutex_unlock(&sc->lock);
 	return ret;
@@ -2934,16 +2891,17 @@
 
 	/* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
 
-	if (sc->opmode == IEEE80211_IF_TYPE_MNTR)
+	if (sc->opmode == NL80211_IFTYPE_MONITOR)
 		rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON |
 			AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM;
-	if (sc->opmode != IEEE80211_IF_TYPE_STA)
+	if (sc->opmode != NL80211_IFTYPE_STATION)
 		rfilt |= AR5K_RX_FILTER_PROBEREQ;
-	if (sc->opmode != IEEE80211_IF_TYPE_AP &&
+	if (sc->opmode != NL80211_IFTYPE_AP &&
+		sc->opmode != NL80211_IFTYPE_MESH_POINT &&
 		test_bit(ATH_STAT_PROMISC, sc->status))
 		rfilt |= AR5K_RX_FILTER_PROM;
-	if (sc->opmode == IEEE80211_IF_TYPE_STA ||
-		sc->opmode == IEEE80211_IF_TYPE_IBSS) {
+	if (sc->opmode == NL80211_IFTYPE_STATION ||
+		sc->opmode == NL80211_IFTYPE_ADHOC) {
 		rfilt |= AR5K_RX_FILTER_BEACON;
 	}
 
@@ -3048,7 +3006,7 @@
 	 * in IBSS mode we need to update the beacon timers too.
 	 * this will also reset the TSF if we call it with 0
 	 */
-	if (sc->opmode == IEEE80211_IF_TYPE_IBSS)
+	if (sc->opmode == NL80211_IFTYPE_ADHOC)
 		ath5k_beacon_update_timers(sc, 0);
 	else
 		ath5k_hw_reset_tsf(sc->ah);
@@ -3063,7 +3021,7 @@
 
 	ath5k_debug_dump_skb(sc, skb, "BC  ", 1);
 
-	if (sc->opmode != IEEE80211_IF_TYPE_IBSS) {
+	if (sc->opmode != NL80211_IFTYPE_ADHOC) {
 		ret = -EIO;
 		goto end;
 	}
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 7ec2f37..9d0b728 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -111,17 +111,13 @@
 	struct ieee80211_hw	*hw;		/* IEEE 802.11 common */
 	struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
 	struct ieee80211_channel channels[ATH_CHAN_MAX];
-	struct ieee80211_rate	rates[AR5K_MAX_RATES * IEEE80211_NUM_BANDS];
-	enum ieee80211_if_types	opmode;
+	struct ieee80211_rate	rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
+	u8			rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
+	enum nl80211_iftype	opmode;
 	struct ath5k_hw		*ah;		/* Atheros HW */
 
 	struct ieee80211_supported_band		*curband;
 
-	u8			a_rates;
-	u8			b_rates;
-	u8			g_rates;
-	u8			xr_rates;
-
 #ifdef CONFIG_ATH5K_DEBUG
 	struct ath5k_dbg_info	debug;		/* debug info */
 #endif /* CONFIG_ATH5K_DEBUG */
diff --git a/drivers/net/wireless/ath5k/caps.c b/drivers/net/wireless/ath5k/caps.c
new file mode 100644
index 0000000..150f5ed
--- /dev/null
+++ b/drivers/net/wireless/ath5k/caps.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/**************\
+* Capabilities *
+\**************/
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+#include "base.h"
+
+/*
+ * Fill the capabilities struct
+ * TODO: Merge this with EEPROM code when we are done with it
+ */
+int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
+{
+	u16 ee_header;
+
+	ATH5K_TRACE(ah->ah_sc);
+	/* Capabilities stored in the EEPROM */
+	ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
+
+	if (ah->ah_version == AR5K_AR5210) {
+		/*
+		 * Set radio capabilities
+		 * (The AR5110 only supports the middle 5GHz band)
+		 */
+		ah->ah_capabilities.cap_range.range_5ghz_min = 5120;
+		ah->ah_capabilities.cap_range.range_5ghz_max = 5430;
+		ah->ah_capabilities.cap_range.range_2ghz_min = 0;
+		ah->ah_capabilities.cap_range.range_2ghz_max = 0;
+
+		/* Set supported modes */
+		__set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
+		__set_bit(AR5K_MODE_11A_TURBO, ah->ah_capabilities.cap_mode);
+	} else {
+		/*
+		 * XXX The tranceiver supports frequencies from 4920 to 6100GHz
+		 * XXX and from 2312 to 2732GHz. There are problems with the
+		 * XXX current ieee80211 implementation because the IEEE
+		 * XXX channel mapping does not support negative channel
+		 * XXX numbers (2312MHz is channel -19). Of course, this
+		 * XXX doesn't matter because these channels are out of range
+		 * XXX but some regulation domains like MKK (Japan) will
+		 * XXX support frequencies somewhere around 4.8GHz.
+		 */
+
+		/*
+		 * Set radio capabilities
+		 */
+
+		if (AR5K_EEPROM_HDR_11A(ee_header)) {
+			/* 4920 */
+			ah->ah_capabilities.cap_range.range_5ghz_min = 5005;
+			ah->ah_capabilities.cap_range.range_5ghz_max = 6100;
+
+			/* Set supported modes */
+			__set_bit(AR5K_MODE_11A,
+					ah->ah_capabilities.cap_mode);
+			__set_bit(AR5K_MODE_11A_TURBO,
+					ah->ah_capabilities.cap_mode);
+			if (ah->ah_version == AR5K_AR5212)
+				__set_bit(AR5K_MODE_11G_TURBO,
+						ah->ah_capabilities.cap_mode);
+		}
+
+		/* Enable  802.11b if a 2GHz capable radio (2111/5112) is
+		 * connected */
+		if (AR5K_EEPROM_HDR_11B(ee_header) ||
+				AR5K_EEPROM_HDR_11G(ee_header)) {
+			/* 2312 */
+			ah->ah_capabilities.cap_range.range_2ghz_min = 2412;
+			ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
+
+			if (AR5K_EEPROM_HDR_11B(ee_header))
+				__set_bit(AR5K_MODE_11B,
+						ah->ah_capabilities.cap_mode);
+
+			if (AR5K_EEPROM_HDR_11G(ee_header))
+				__set_bit(AR5K_MODE_11G,
+						ah->ah_capabilities.cap_mode);
+		}
+	}
+
+	/* GPIO */
+	ah->ah_gpio_npins = AR5K_NUM_GPIO;
+
+	/* Set number of supported TX queues */
+	if (ah->ah_version == AR5K_AR5210)
+		ah->ah_capabilities.cap_queues.q_tx_num =
+			AR5K_NUM_TX_QUEUES_NOQCU;
+	else
+		ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
+
+	return 0;
+}
+
+/* Main function used by the driver part to check caps */
+int ath5k_hw_get_capability(struct ath5k_hw *ah,
+		enum ath5k_capability_type cap_type,
+		u32 capability, u32 *result)
+{
+	ATH5K_TRACE(ah->ah_sc);
+
+	switch (cap_type) {
+	case AR5K_CAP_NUM_TXQUEUES:
+		if (result) {
+			if (ah->ah_version == AR5K_AR5210)
+				*result = AR5K_NUM_TX_QUEUES_NOQCU;
+			else
+				*result = AR5K_NUM_TX_QUEUES;
+			goto yes;
+		}
+	case AR5K_CAP_VEOL:
+		goto yes;
+	case AR5K_CAP_COMPRESSION:
+		if (ah->ah_version == AR5K_AR5212)
+			goto yes;
+		else
+			goto no;
+	case AR5K_CAP_BURST:
+		goto yes;
+	case AR5K_CAP_TPC:
+		goto yes;
+	case AR5K_CAP_BSSIDMASK:
+		if (ah->ah_version == AR5K_AR5212)
+			goto yes;
+		else
+			goto no;
+	case AR5K_CAP_XR:
+		if (ah->ah_version == AR5K_AR5212)
+			goto yes;
+		else
+			goto no;
+	default:
+		goto no;
+	}
+
+no:
+	return -EINVAL;
+yes:
+	return 0;
+}
+
+/*
+ * TODO: Following functions should be part of a new function
+ * set_capability
+ */
+
+int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
+		u16 assoc_id)
+{
+	ATH5K_TRACE(ah->ah_sc);
+
+	if (ah->ah_version == AR5K_AR5210) {
+		AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
+			AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
+		return 0;
+	}
+
+	return -EIO;
+}
+
+int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
+{
+	ATH5K_TRACE(ah->ah_sc);
+
+	if (ah->ah_version == AR5K_AR5210) {
+		AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
+			AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
+		return 0;
+	}
+
+	return -EIO;
+}
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index 6fa6c8e0..8f92d67 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -58,8 +58,8 @@
  * THE POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include "debug.h"
 #include "base.h"
+#include "debug.h"
 
 static unsigned int ath5k_debug;
 module_param_named(debug, ath5k_debug, uint, 0);
@@ -525,7 +525,7 @@
 		return;
 
 	printk(KERN_DEBUG "rx queue %x, link %p\n",
-		ath5k_hw_get_rx_buf(ah), sc->rxlink);
+		ath5k_hw_get_rxdp(ah), sc->rxlink);
 
 	spin_lock_bh(&sc->rxbuflock);
 	list_for_each_entry(bf, &sc->rxbuf, list) {
diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath5k/desc.c
new file mode 100644
index 0000000..d45b90a
--- /dev/null
+++ b/drivers/net/wireless/ath5k/desc.c
@@ -0,0 +1,667 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/******************************\
+ Hardware Descriptor Functions
+\******************************/
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+#include "base.h"
+
+/*
+ * TX Descriptors
+ */
+
+/*
+ * Initialize the 2-word tx control descriptor on 5210/5211
+ */
+static int
+ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+	unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type,
+	unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
+	unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
+	unsigned int rtscts_rate, unsigned int rtscts_duration)
+{
+	u32 frame_type;
+	struct ath5k_hw_2w_tx_ctl *tx_ctl;
+	unsigned int frame_len;
+
+	tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
+
+	/*
+	 * Validate input
+	 * - Zero retries don't make sense.
+	 * - A zero rate will put the HW into a mode where it continously sends
+	 *   noise on the channel, so it is important to avoid this.
+	 */
+	if (unlikely(tx_tries0 == 0)) {
+		ATH5K_ERR(ah->ah_sc, "zero retries\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	if (unlikely(tx_rate0 == 0)) {
+		ATH5K_ERR(ah->ah_sc, "zero rate\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	/* Clear descriptor */
+	memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc));
+
+	/* Setup control descriptor */
+
+	/* Verify and set frame length */
+
+	/* remove padding we might have added before */
+	frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
+
+	if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
+		return -EINVAL;
+
+	tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
+
+	/* Verify and set buffer length */
+
+	/* NB: beacon's BufLen must be a multiple of 4 bytes */
+	if (type == AR5K_PKT_TYPE_BEACON)
+		pkt_len = roundup(pkt_len, 4);
+
+	if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
+		return -EINVAL;
+
+	tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
+
+	/*
+	 * Verify and set header length
+	 * XXX: I only found that on 5210 code, does it work on 5211 ?
+	 */
+	if (ah->ah_version == AR5K_AR5210) {
+		if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN)
+			return -EINVAL;
+		tx_ctl->tx_control_0 |=
+			AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
+	}
+
+	/*Diferences between 5210-5211*/
+	if (ah->ah_version == AR5K_AR5210) {
+		switch (type) {
+		case AR5K_PKT_TYPE_BEACON:
+		case AR5K_PKT_TYPE_PROBE_RESP:
+			frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
+		case AR5K_PKT_TYPE_PIFS:
+			frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
+		default:
+			frame_type = type /*<< 2 ?*/;
+		}
+
+		tx_ctl->tx_control_0 |=
+		AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) |
+		AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
+
+	} else {
+		tx_ctl->tx_control_0 |=
+			AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) |
+			AR5K_REG_SM(antenna_mode,
+				AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
+		tx_ctl->tx_control_1 |=
+			AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE);
+	}
+#define _TX_FLAGS(_c, _flag)					\
+	if (flags & AR5K_TXDESC_##_flag) {			\
+		tx_ctl->tx_control_##_c |=			\
+			AR5K_2W_TX_DESC_CTL##_c##_##_flag;	\
+	}
+
+	_TX_FLAGS(0, CLRDMASK);
+	_TX_FLAGS(0, VEOL);
+	_TX_FLAGS(0, INTREQ);
+	_TX_FLAGS(0, RTSENA);
+	_TX_FLAGS(1, NOACK);
+
+#undef _TX_FLAGS
+
+	/*
+	 * WEP crap
+	 */
+	if (key_index != AR5K_TXKEYIX_INVALID) {
+		tx_ctl->tx_control_0 |=
+			AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
+		tx_ctl->tx_control_1 |=
+			AR5K_REG_SM(key_index,
+			AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
+	}
+
+	/*
+	 * RTS/CTS Duration [5210 ?]
+	 */
+	if ((ah->ah_version == AR5K_AR5210) &&
+			(flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
+		tx_ctl->tx_control_1 |= rtscts_duration &
+				AR5K_2W_TX_DESC_CTL1_RTS_DURATION;
+
+	return 0;
+}
+
+/*
+ * Initialize the 4-word tx control descriptor on 5212
+ */
+static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
+	struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
+	enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
+	unsigned int tx_tries0, unsigned int key_index,
+	unsigned int antenna_mode, unsigned int flags,
+	unsigned int rtscts_rate,
+	unsigned int rtscts_duration)
+{
+	struct ath5k_hw_4w_tx_ctl *tx_ctl;
+	unsigned int frame_len;
+
+	ATH5K_TRACE(ah->ah_sc);
+	tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
+
+	/*
+	 * Validate input
+	 * - Zero retries don't make sense.
+	 * - A zero rate will put the HW into a mode where it continously sends
+	 *   noise on the channel, so it is important to avoid this.
+	 */
+	if (unlikely(tx_tries0 == 0)) {
+		ATH5K_ERR(ah->ah_sc, "zero retries\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	if (unlikely(tx_rate0 == 0)) {
+		ATH5K_ERR(ah->ah_sc, "zero rate\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	/* Clear descriptor */
+	memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc));
+
+	/* Setup control descriptor */
+
+	/* Verify and set frame length */
+
+	/* remove padding we might have added before */
+	frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
+
+	if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
+		return -EINVAL;
+
+	tx_ctl->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
+
+	/* Verify and set buffer length */
+
+	/* NB: beacon's BufLen must be a multiple of 4 bytes */
+	if (type == AR5K_PKT_TYPE_BEACON)
+		pkt_len = roundup(pkt_len, 4);
+
+	if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
+		return -EINVAL;
+
+	tx_ctl->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
+
+	tx_ctl->tx_control_0 |=
+		AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
+		AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
+	tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
+					AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
+	tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES,
+					AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
+	tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
+
+#define _TX_FLAGS(_c, _flag)					\
+	if (flags & AR5K_TXDESC_##_flag) {			\
+		tx_ctl->tx_control_##_c |=			\
+			AR5K_4W_TX_DESC_CTL##_c##_##_flag;	\
+	}
+
+	_TX_FLAGS(0, CLRDMASK);
+	_TX_FLAGS(0, VEOL);
+	_TX_FLAGS(0, INTREQ);
+	_TX_FLAGS(0, RTSENA);
+	_TX_FLAGS(0, CTSENA);
+	_TX_FLAGS(1, NOACK);
+
+#undef _TX_FLAGS
+
+	/*
+	 * WEP crap
+	 */
+	if (key_index != AR5K_TXKEYIX_INVALID) {
+		tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
+		tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index,
+				AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
+	}
+
+	/*
+	 * RTS/CTS
+	 */
+	if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) {
+		if ((flags & AR5K_TXDESC_RTSENA) &&
+				(flags & AR5K_TXDESC_CTSENA))
+			return -EINVAL;
+		tx_ctl->tx_control_2 |= rtscts_duration &
+				AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
+		tx_ctl->tx_control_3 |= AR5K_REG_SM(rtscts_rate,
+				AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
+	}
+
+	return 0;
+}
+
+/*
+ * Initialize a 4-word multi rate retry tx control descriptor on 5212
+ */
+static int
+ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+	unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
+	u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
+{
+	struct ath5k_hw_4w_tx_ctl *tx_ctl;
+
+	/*
+	 * Rates can be 0 as long as the retry count is 0 too.
+	 * A zero rate and nonzero retry count will put the HW into a mode where
+	 * it continously sends noise on the channel, so it is important to
+	 * avoid this.
+	 */
+	if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
+		     (tx_rate2 == 0 && tx_tries2 != 0) ||
+		     (tx_rate3 == 0 && tx_tries3 != 0))) {
+		ATH5K_ERR(ah->ah_sc, "zero rate\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	if (ah->ah_version == AR5K_AR5212) {
+		tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
+
+#define _XTX_TRIES(_n)							\
+	if (tx_tries##_n) {						\
+		tx_ctl->tx_control_2 |=					\
+		    AR5K_REG_SM(tx_tries##_n,				\
+		    AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n);		\
+		tx_ctl->tx_control_3 |=					\
+		    AR5K_REG_SM(tx_rate##_n,				\
+		    AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n);		\
+	}
+
+		_XTX_TRIES(1);
+		_XTX_TRIES(2);
+		_XTX_TRIES(3);
+
+#undef _XTX_TRIES
+
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Proccess the tx status descriptor on 5210/5211
+ */
+static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
+		struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+{
+	struct ath5k_hw_2w_tx_ctl *tx_ctl;
+	struct ath5k_hw_tx_status *tx_status;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
+	tx_status = &desc->ud.ds_tx5210.tx_stat;
+
+	/* No frame has been send or error */
+	if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
+		return -EINPROGRESS;
+
+	/*
+	 * Get descriptor status
+	 */
+	ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
+		AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
+	ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
+		AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
+	ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
+		AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
+	/*TODO: ts->ts_virtcol + test*/
+	ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
+		AR5K_DESC_TX_STATUS1_SEQ_NUM);
+	ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
+		AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
+	ts->ts_antenna = 1;
+	ts->ts_status = 0;
+	ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_0,
+		AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
+
+	if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
+		if (tx_status->tx_status_0 &
+				AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
+			ts->ts_status |= AR5K_TXERR_XRETRY;
+
+		if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
+			ts->ts_status |= AR5K_TXERR_FIFO;
+
+		if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
+			ts->ts_status |= AR5K_TXERR_FILT;
+	}
+
+	return 0;
+}
+
+/*
+ * Proccess a tx status descriptor on 5212
+ */
+static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
+		struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+{
+	struct ath5k_hw_4w_tx_ctl *tx_ctl;
+	struct ath5k_hw_tx_status *tx_status;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
+	tx_status = &desc->ud.ds_tx5212.tx_stat;
+
+	/* No frame has been send or error */
+	if (unlikely(!(tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE)))
+		return -EINPROGRESS;
+
+	/*
+	 * Get descriptor status
+	 */
+	ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
+		AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
+	ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
+		AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
+	ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
+		AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
+	ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
+		AR5K_DESC_TX_STATUS1_SEQ_NUM);
+	ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
+		AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
+	ts->ts_antenna = (tx_status->tx_status_1 &
+		AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1;
+	ts->ts_status = 0;
+
+	switch (AR5K_REG_MS(tx_status->tx_status_1,
+			AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX)) {
+	case 0:
+		ts->ts_rate = tx_ctl->tx_control_3 &
+			AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
+		break;
+	case 1:
+		ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
+			AR5K_4W_TX_DESC_CTL3_XMIT_RATE1);
+		ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
+			AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1);
+		break;
+	case 2:
+		ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
+			AR5K_4W_TX_DESC_CTL3_XMIT_RATE2);
+		ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
+			AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2);
+		break;
+	case 3:
+		ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
+			AR5K_4W_TX_DESC_CTL3_XMIT_RATE3);
+		ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
+			AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3);
+		break;
+	}
+
+	/* TX error */
+	if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
+		if (tx_status->tx_status_0 &
+				AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
+			ts->ts_status |= AR5K_TXERR_XRETRY;
+
+		if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
+			ts->ts_status |= AR5K_TXERR_FIFO;
+
+		if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
+			ts->ts_status |= AR5K_TXERR_FILT;
+	}
+
+	return 0;
+}
+
+/*
+ * RX Descriptors
+ */
+
+/*
+ * Initialize an rx control descriptor
+ */
+static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+			u32 size, unsigned int flags)
+{
+	struct ath5k_hw_rx_ctl *rx_ctl;
+
+	ATH5K_TRACE(ah->ah_sc);
+	rx_ctl = &desc->ud.ds_rx.rx_ctl;
+
+	/*
+	 * Clear the descriptor
+	 * If we don't clean the status descriptor,
+	 * while scanning we get too many results,
+	 * most of them virtual, after some secs
+	 * of scanning system hangs. M.F.
+	*/
+	memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
+
+	/* Setup descriptor */
+	rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
+	if (unlikely(rx_ctl->rx_control_1 != size))
+		return -EINVAL;
+
+	if (flags & AR5K_RXDESC_INTREQ)
+		rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
+
+	return 0;
+}
+
+/*
+ * Proccess the rx status descriptor on 5210/5211
+ */
+static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
+		struct ath5k_desc *desc, struct ath5k_rx_status *rs)
+{
+	struct ath5k_hw_rx_status *rx_status;
+
+	rx_status = &desc->ud.ds_rx.u.rx_stat;
+
+	/* No frame received / not ready */
+	if (unlikely(!(rx_status->rx_status_1 &
+	AR5K_5210_RX_DESC_STATUS1_DONE)))
+		return -EINPROGRESS;
+
+	/*
+	 * Frame receive status
+	 */
+	rs->rs_datalen = rx_status->rx_status_0 &
+		AR5K_5210_RX_DESC_STATUS0_DATA_LEN;
+	rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
+		AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
+	rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
+		AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
+	rs->rs_antenna = rx_status->rx_status_0 &
+		AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA;
+	rs->rs_more = rx_status->rx_status_0 &
+		AR5K_5210_RX_DESC_STATUS0_MORE;
+	/* TODO: this timestamp is 13 bit, later on we assume 15 bit */
+	rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
+		AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
+	rs->rs_status = 0;
+	rs->rs_phyerr = 0;
+
+	/*
+	 * Key table status
+	 */
+	if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID)
+		rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
+			AR5K_5210_RX_DESC_STATUS1_KEY_INDEX);
+	else
+		rs->rs_keyix = AR5K_RXKEYIX_INVALID;
+
+	/*
+	 * Receive/descriptor errors
+	 */
+	if (!(rx_status->rx_status_1 &
+	AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
+		if (rx_status->rx_status_1 &
+				AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
+			rs->rs_status |= AR5K_RXERR_CRC;
+
+		if (rx_status->rx_status_1 &
+				AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN)
+			rs->rs_status |= AR5K_RXERR_FIFO;
+
+		if (rx_status->rx_status_1 &
+				AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
+			rs->rs_status |= AR5K_RXERR_PHY;
+			rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1,
+				AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
+		}
+
+		if (rx_status->rx_status_1 &
+				AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
+			rs->rs_status |= AR5K_RXERR_DECRYPT;
+	}
+
+	return 0;
+}
+
+/*
+ * Proccess the rx status descriptor on 5212
+ */
+static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
+		struct ath5k_desc *desc, struct ath5k_rx_status *rs)
+{
+	struct ath5k_hw_rx_status *rx_status;
+	struct ath5k_hw_rx_error *rx_err;
+
+	ATH5K_TRACE(ah->ah_sc);
+	rx_status = &desc->ud.ds_rx.u.rx_stat;
+
+	/* Overlay on error */
+	rx_err = &desc->ud.ds_rx.u.rx_err;
+
+	/* No frame received / not ready */
+	if (unlikely(!(rx_status->rx_status_1 &
+	AR5K_5212_RX_DESC_STATUS1_DONE)))
+		return -EINPROGRESS;
+
+	/*
+	 * Frame receive status
+	 */
+	rs->rs_datalen = rx_status->rx_status_0 &
+		AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
+	rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
+		AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
+	rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
+		AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
+	rs->rs_antenna = rx_status->rx_status_0 &
+		AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA;
+	rs->rs_more = rx_status->rx_status_0 &
+		AR5K_5212_RX_DESC_STATUS0_MORE;
+	rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
+		AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
+	rs->rs_status = 0;
+	rs->rs_phyerr = 0;
+
+	/*
+	 * Key table status
+	 */
+	if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
+		rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
+				AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
+	else
+		rs->rs_keyix = AR5K_RXKEYIX_INVALID;
+
+	/*
+	 * Receive/descriptor errors
+	 */
+	if (!(rx_status->rx_status_1 &
+	AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
+		if (rx_status->rx_status_1 &
+				AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
+			rs->rs_status |= AR5K_RXERR_CRC;
+
+		if (rx_status->rx_status_1 &
+				AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
+			rs->rs_status |= AR5K_RXERR_PHY;
+			rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
+					   AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
+		}
+
+		if (rx_status->rx_status_1 &
+				AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
+			rs->rs_status |= AR5K_RXERR_DECRYPT;
+
+		if (rx_status->rx_status_1 &
+				AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
+			rs->rs_status |= AR5K_RXERR_MIC;
+	}
+
+	return 0;
+}
+
+/*
+ * Init function pointers inside ath5k_hw struct
+ */
+int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
+{
+
+	if (ah->ah_version != AR5K_AR5210 &&
+		ah->ah_version != AR5K_AR5211 &&
+		ah->ah_version != AR5K_AR5212)
+			return -ENOTSUPP;
+
+	/* XXX: What is this magic value and where is it used ? */
+	if (ah->ah_version == AR5K_AR5212)
+		ah->ah_magic = AR5K_EEPROM_MAGIC_5212;
+	else if (ah->ah_version == AR5K_AR5211)
+		ah->ah_magic = AR5K_EEPROM_MAGIC_5211;
+
+	if (ah->ah_version == AR5K_AR5212) {
+		ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
+		ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
+		ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_mrr_tx_desc;
+		ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
+	} else {
+		ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
+		ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
+		ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_mrr_tx_desc;
+		ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
+	}
+
+	if (ah->ah_version == AR5K_AR5212)
+		ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
+	else if (ah->ah_version <= AR5K_AR5211)
+		ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
+
+	return 0;
+}
+
diff --git a/drivers/net/wireless/ath5k/desc.h b/drivers/net/wireless/ath5k/desc.h
new file mode 100644
index 0000000..56158c8
--- /dev/null
+++ b/drivers/net/wireless/ath5k/desc.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*
+ * Internal RX/TX descriptor structures
+ * (rX: reserved fields possibily used by future versions of the ar5k chipset)
+ */
+
+/*
+ * common hardware RX control descriptor
+ */
+struct ath5k_hw_rx_ctl {
+	u32	rx_control_0; /* RX control word 0 */
+	u32	rx_control_1; /* RX control word 1 */
+} __packed;
+
+/* RX control word 0 field/sflags */
+#define AR5K_DESC_RX_CTL0			0x00000000
+
+/* RX control word 1 fields/flags */
+#define AR5K_DESC_RX_CTL1_BUF_LEN		0x00000fff
+#define AR5K_DESC_RX_CTL1_INTREQ		0x00002000
+
+/*
+ * common hardware RX status descriptor
+ * 5210/11 and 5212 differ only in the flags defined below
+ */
+struct ath5k_hw_rx_status {
+	u32	rx_status_0; /* RX status word 0 */
+	u32	rx_status_1; /* RX status word 1 */
+} __packed;
+
+/* 5210/5211 */
+/* RX status word 0 fields/flags */
+#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN		0x00000fff
+#define AR5K_5210_RX_DESC_STATUS0_MORE			0x00001000
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE		0x00078000
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE_S	15
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL	0x07f80000
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S	19
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA	0x38000000
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S	27
+
+/* RX status word 1 fields/flags */
+#define AR5K_5210_RX_DESC_STATUS1_DONE			0x00000001
+#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK	0x00000002
+#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR		0x00000004
+#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN		0x00000008
+#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR	0x00000010
+#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR		0x000000e0
+#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S		5
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID	0x00000100
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX		0x00007e00
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S		9
+#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP	0x0fff8000
+#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S	15
+#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS	0x10000000
+
+/* 5212 */
+/* RX status word 0 fields/flags */
+#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN		0x00000fff
+#define AR5K_5212_RX_DESC_STATUS0_MORE			0x00001000
+#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR	0x00002000
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE		0x000f8000
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE_S	15
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL	0x0ff00000
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S	20
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA	0xf0000000
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S	28
+
+/* RX status word 1 fields/flags */
+#define AR5K_5212_RX_DESC_STATUS1_DONE			0x00000001
+#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK	0x00000002
+#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR		0x00000004
+#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR	0x00000008
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR		0x00000010
+#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR		0x00000020
+#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID	0x00000100
+#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX		0x0000fe00
+#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_S		9
+#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP	0x7fff0000
+#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S	16
+#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS	0x80000000
+
+/*
+ * common hardware RX error descriptor
+ */
+struct ath5k_hw_rx_error {
+	u32	rx_error_0; /* RX status word 0 */
+	u32	rx_error_1; /* RX status word 1 */
+} __packed;
+
+/* RX error word 0 fields/flags */
+#define AR5K_RX_DESC_ERROR0			0x00000000
+
+/* RX error word 1 fields/flags */
+#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE	0x0000ff00
+#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S	8
+
+/* PHY Error codes */
+#define AR5K_DESC_RX_PHY_ERROR_NONE		0x00
+#define AR5K_DESC_RX_PHY_ERROR_TIMING		0x20
+#define AR5K_DESC_RX_PHY_ERROR_PARITY		0x40
+#define AR5K_DESC_RX_PHY_ERROR_RATE		0x60
+#define AR5K_DESC_RX_PHY_ERROR_LENGTH		0x80
+#define AR5K_DESC_RX_PHY_ERROR_64QAM		0xa0
+#define AR5K_DESC_RX_PHY_ERROR_SERVICE		0xc0
+#define AR5K_DESC_RX_PHY_ERROR_TRANSMITOVR	0xe0
+
+/*
+ * 5210/5211 hardware 2-word TX control descriptor
+ */
+struct ath5k_hw_2w_tx_ctl {
+	u32	tx_control_0; /* TX control word 0 */
+	u32	tx_control_1; /* TX control word 1 */
+} __packed;
+
+/* TX control word 0 fields/flags */
+#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN		0x00000fff
+#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN		0x0003f000 /*[5210 ?]*/
+#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_S	12
+#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE		0x003c0000
+#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE_S	18
+#define AR5K_2W_TX_DESC_CTL0_RTSENA		0x00400000
+#define AR5K_2W_TX_DESC_CTL0_CLRDMASK		0x01000000
+#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET	0x00800000 /*[5210]*/
+#define AR5K_2W_TX_DESC_CTL0_VEOL		0x00800000 /*[5211]*/
+#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE		0x1c000000 /*[5210]*/
+#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_S	26
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210	0x02000000
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211	0x1e000000
+
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT			\
+		(ah->ah_version == AR5K_AR5210 ?		\
+		AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 :	\
+		AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211)
+
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S	25
+#define AR5K_2W_TX_DESC_CTL0_INTREQ		0x20000000
+#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID	0x40000000
+
+/* TX control word 1 fields/flags */
+#define AR5K_2W_TX_DESC_CTL1_BUF_LEN		0x00000fff
+#define AR5K_2W_TX_DESC_CTL1_MORE		0x00001000
+#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210	0x0007e000
+#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211	0x000fe000
+
+#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX				\
+			(ah->ah_version == AR5K_AR5210 ?		\
+			AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 :	\
+			AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211)
+
+#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S	13
+#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE		0x00700000 /*[5211]*/
+#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_S	20
+#define AR5K_2W_TX_DESC_CTL1_NOACK		0x00800000 /*[5211]*/
+#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION	0xfff80000 /*[5210 ?]*/
+
+/* Frame types */
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL   0x00
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM     0x04
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL   0x08
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 0x0c
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS     0x10
+
+/*
+ * 5212 hardware 4-word TX control descriptor
+ */
+struct ath5k_hw_4w_tx_ctl {
+	u32	tx_control_0; /* TX control word 0 */
+
+#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN		0x00000fff
+#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER		0x003f0000
+#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER_S	16
+#define AR5K_4W_TX_DESC_CTL0_RTSENA		0x00400000
+#define AR5K_4W_TX_DESC_CTL0_VEOL		0x00800000
+#define AR5K_4W_TX_DESC_CTL0_CLRDMASK		0x01000000
+#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT	0x1e000000
+#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT_S	25
+#define AR5K_4W_TX_DESC_CTL0_INTREQ		0x20000000
+#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID	0x40000000
+#define AR5K_4W_TX_DESC_CTL0_CTSENA		0x80000000
+
+	u32	tx_control_1; /* TX control word 1 */
+
+#define AR5K_4W_TX_DESC_CTL1_BUF_LEN		0x00000fff
+#define AR5K_4W_TX_DESC_CTL1_MORE		0x00001000
+#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX	0x000fe000
+#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S	13
+#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE		0x00f00000
+#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE_S	20
+#define AR5K_4W_TX_DESC_CTL1_NOACK		0x01000000
+#define AR5K_4W_TX_DESC_CTL1_COMP_PROC		0x06000000
+#define AR5K_4W_TX_DESC_CTL1_COMP_PROC_S	25
+#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN	0x18000000
+#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN_S	27
+#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN	0x60000000
+#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN_S	29
+
+	u32	tx_control_2; /* TX control word 2 */
+
+#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION		0x00007fff
+#define AR5K_4W_TX_DESC_CTL2_DURATION_UPDATE_ENABLE	0x00008000
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0		0x000f0000
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S		16
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1		0x00f00000
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S		20
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2		0x0f000000
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S		24
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3		0xf0000000
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S		28
+
+	u32	tx_control_3; /* TX control word 3 */
+
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0		0x0000001f
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1		0x000003e0
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1_S	5
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2		0x00007c00
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2_S	10
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3		0x000f8000
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3_S	15
+#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE	0x01f00000
+#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S	20
+} __packed;
+
+/*
+ * Common TX status descriptor
+ */
+struct ath5k_hw_tx_status {
+	u32	tx_status_0; /* TX status word 0 */
+	u32	tx_status_1; /* TX status word 1 */
+} __packed;
+
+/* TX status word 0 fields/flags */
+#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK	0x00000001
+#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES	0x00000002
+#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN	0x00000004
+#define AR5K_DESC_TX_STATUS0_FILTERED		0x00000008
+/*???
+#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT	0x000000f0
+#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT_S	4
+*/
+#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT	0x000000f0
+#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT_S	4
+/*???
+#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT	0x00000f00
+#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT_S	8
+*/
+#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT	0x00000f00
+#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT_S	8
+#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT	0x0000f000
+#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT_S	12
+#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP	0xffff0000
+#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S	16
+
+/* TX status word 1 fields/flags */
+#define AR5K_DESC_TX_STATUS1_DONE		0x00000001
+#define AR5K_DESC_TX_STATUS1_SEQ_NUM		0x00001ffe
+#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S		1
+#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH	0x001fe000
+#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH_S	13
+#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX	0x00600000
+#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX_S	21
+#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS	0x00800000
+#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA	0x01000000
+
+/*
+ * 5210/5211 hardware TX descriptor
+ */
+struct ath5k_hw_5210_tx_desc {
+	struct ath5k_hw_2w_tx_ctl	tx_ctl;
+	struct ath5k_hw_tx_status	tx_stat;
+} __packed;
+
+/*
+ * 5212 hardware TX descriptor
+ */
+struct ath5k_hw_5212_tx_desc {
+	struct ath5k_hw_4w_tx_ctl	tx_ctl;
+	struct ath5k_hw_tx_status	tx_stat;
+} __packed;
+
+/*
+ * common hardware RX descriptor
+ */
+struct ath5k_hw_all_rx_desc {
+	struct ath5k_hw_rx_ctl			rx_ctl;
+	union {
+		struct ath5k_hw_rx_status	rx_stat;
+		struct ath5k_hw_rx_error	rx_err;
+	} u;
+} __packed;
+
+/*
+ * Atheros hardware descriptor
+ * This is read and written to by the hardware
+ */
+struct ath5k_desc {
+	u32	ds_link;	/* physical address of the next descriptor */
+	u32	ds_data;	/* physical address of data buffer (skb) */
+
+	union {
+		struct ath5k_hw_5210_tx_desc	ds_tx5210;
+		struct ath5k_hw_5212_tx_desc	ds_tx5212;
+		struct ath5k_hw_all_rx_desc	ds_rx;
+	} ud;
+} __packed;
+
+#define AR5K_RXDESC_INTREQ	0x0020
+
+#define AR5K_TXDESC_CLRDMASK	0x0001
+#define AR5K_TXDESC_NOACK	0x0002	/*[5211+]*/
+#define AR5K_TXDESC_RTSENA	0x0004
+#define AR5K_TXDESC_CTSENA	0x0008
+#define AR5K_TXDESC_INTREQ	0x0010
+#define AR5K_TXDESC_VEOL	0x0020	/*[5211+]*/
+
diff --git a/drivers/net/wireless/ath5k/dma.c b/drivers/net/wireless/ath5k/dma.c
new file mode 100644
index 0000000..7adceb2
--- /dev/null
+++ b/drivers/net/wireless/ath5k/dma.c
@@ -0,0 +1,605 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*************************************\
+* DMA and interrupt masking functions *
+\*************************************/
+
+/*
+ * dma.c - DMA and interrupt masking functions
+ *
+ * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
+ * handle queue setup for 5210 chipset (rest are handled on qcu.c).
+ * Also we setup interrupt mask register (IMR) and read the various iterrupt
+ * status registers (ISR).
+ *
+ * TODO: Handle SISR on 5211+ and introduce a function to return the queue
+ * number that resulted the interrupt.
+ */
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+#include "base.h"
+
+/*********\
+* Receive *
+\*********/
+
+/**
+ * ath5k_hw_start_rx_dma - Start DMA receive
+ *
+ * @ah:	The &struct ath5k_hw
+ */
+void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
+	ath5k_hw_reg_read(ah, AR5K_CR);
+}
+
+/**
+ * ath5k_hw_stop_rx_dma - Stop DMA receive
+ *
+ * @ah:	The &struct ath5k_hw
+ */
+int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
+{
+	unsigned int i;
+
+	ATH5K_TRACE(ah->ah_sc);
+	ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
+
+	/*
+	 * It may take some time to disable the DMA receive unit
+	 */
+	for (i = 1000; i > 0 &&
+			(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
+			i--)
+		udelay(10);
+
+	return i ? 0 : -EBUSY;
+}
+
+/**
+ * ath5k_hw_get_rxdp - Get RX Descriptor's address
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * XXX: Is RXDP read and clear ?
+ */
+u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
+{
+	return ath5k_hw_reg_read(ah, AR5K_RXDP);
+}
+
+/**
+ * ath5k_hw_set_rxdp - Set RX Descriptor's address
+ *
+ * @ah: The &struct ath5k_hw
+ * @phys_addr: RX descriptor address
+ *
+ * XXX: Should we check if rx is enabled before setting rxdp ?
+ */
+void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
+{
+	ATH5K_TRACE(ah->ah_sc);
+
+	ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
+}
+
+
+/**********\
+* Transmit *
+\**********/
+
+/**
+ * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
+ *
+ * @ah: The &struct ath5k_hw
+ * @queue: The hw queue number
+ *
+ * Start DMA transmit for a specific queue and since 5210 doesn't have
+ * QCU/DCU, set up queue parameters for 5210 here based on queue type (one
+ * queue for normal data and one queue for beacons). For queue setup
+ * on newer chips check out qcu.c. Returns -EINVAL if queue number is out
+ * of range or if queue is already disabled.
+ *
+ * NOTE: Must be called after setting up tx control descriptor for that
+ * queue (see below).
+ */
+int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+{
+	u32 tx_queue;
+
+	ATH5K_TRACE(ah->ah_sc);
+	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+	/* Return if queue is declared inactive */
+	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
+		return -EIO;
+
+	if (ah->ah_version == AR5K_AR5210) {
+		tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
+
+		/*
+		 * Set the queue by type on 5210
+		 */
+		switch (ah->ah_txq[queue].tqi_type) {
+		case AR5K_TX_QUEUE_DATA:
+			tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
+			break;
+		case AR5K_TX_QUEUE_BEACON:
+			tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
+			ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
+					AR5K_BSR);
+			break;
+		case AR5K_TX_QUEUE_CAB:
+			tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
+			ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
+				AR5K_BCR_BDMAE, AR5K_BSR);
+			break;
+		default:
+			return -EINVAL;
+		}
+		/* Start queue */
+		ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
+		ath5k_hw_reg_read(ah, AR5K_CR);
+	} else {
+		/* Return if queue is disabled */
+		if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
+			return -EIO;
+
+		/* Start queue */
+		AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
+	}
+
+	return 0;
+}
+
+/**
+ * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
+ *
+ * @ah: The &struct ath5k_hw
+ * @queue: The hw queue number
+ *
+ * Stop DMA transmit on a specific hw queue and drain queue so we don't
+ * have any pending frames. Returns -EBUSY if we still have pending frames,
+ * -EINVAL if queue number is out of range.
+ *
+ */
+int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+{
+	unsigned int i = 40;
+	u32 tx_queue, pending;
+
+	ATH5K_TRACE(ah->ah_sc);
+	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+	/* Return if queue is declared inactive */
+	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
+		return -EIO;
+
+	if (ah->ah_version == AR5K_AR5210) {
+		tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
+
+		/*
+		 * Set by queue type
+		 */
+		switch (ah->ah_txq[queue].tqi_type) {
+		case AR5K_TX_QUEUE_DATA:
+			tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
+			break;
+		case AR5K_TX_QUEUE_BEACON:
+		case AR5K_TX_QUEUE_CAB:
+			/* XXX Fix me... */
+			tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
+			ath5k_hw_reg_write(ah, 0, AR5K_BSR);
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		/* Stop queue */
+		ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
+		ath5k_hw_reg_read(ah, AR5K_CR);
+	} else {
+		/*
+		 * Schedule TX disable and wait until queue is empty
+		 */
+		AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
+
+		/*Check for pending frames*/
+		do {
+			pending = ath5k_hw_reg_read(ah,
+				AR5K_QUEUE_STATUS(queue)) &
+				AR5K_QCU_STS_FRMPENDCNT;
+			udelay(100);
+		} while (--i && pending);
+
+		/* For 2413+ order PCU to drop packets using
+		 * QUIET mechanism */
+		if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
+		pending){
+			/* Set periodicity and duration */
+			ath5k_hw_reg_write(ah,
+				AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
+				AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
+				AR5K_QUIET_CTL2);
+
+			/* Enable quiet period for current TSF */
+			ath5k_hw_reg_write(ah,
+				AR5K_QUIET_CTL1_QT_EN |
+				AR5K_REG_SM(ath5k_hw_reg_read(ah,
+						AR5K_TSF_L32_5211) >> 10,
+						AR5K_QUIET_CTL1_NEXT_QT_TSF),
+				AR5K_QUIET_CTL1);
+
+			/* Force channel idle high */
+			AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
+					AR5K_DIAG_SW_CHANEL_IDLE_HIGH);
+
+			/* Wait a while and disable mechanism */
+			udelay(200);
+			AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
+						AR5K_QUIET_CTL1_QT_EN);
+
+			/* Re-check for pending frames */
+			i = 40;
+			do {
+				pending = ath5k_hw_reg_read(ah,
+					AR5K_QUEUE_STATUS(queue)) &
+					AR5K_QCU_STS_FRMPENDCNT;
+				udelay(100);
+			} while (--i && pending);
+
+			AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
+					AR5K_DIAG_SW_CHANEL_IDLE_HIGH);
+		}
+
+		/* Clear register */
+		ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
+		if (pending)
+			return -EBUSY;
+	}
+
+	/* TODO: Check for success on 5210 else return error */
+	return 0;
+}
+
+/**
+ * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
+ *
+ * @ah: The &struct ath5k_hw
+ * @queue: The hw queue number
+ *
+ * Get TX descriptor's address for a specific queue. For 5210 we ignore
+ * the queue number and use tx queue type since we only have 2 queues.
+ * We use TXDP0 for normal data queue and TXDP1 for beacon queue.
+ * For newer chips with QCU/DCU we just read the corresponding TXDP register.
+ *
+ * XXX: Is TXDP read and clear ?
+ */
+u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
+{
+	u16 tx_reg;
+
+	ATH5K_TRACE(ah->ah_sc);
+	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+	/*
+	 * Get the transmit queue descriptor pointer from the selected queue
+	 */
+	/*5210 doesn't have QCU*/
+	if (ah->ah_version == AR5K_AR5210) {
+		switch (ah->ah_txq[queue].tqi_type) {
+		case AR5K_TX_QUEUE_DATA:
+			tx_reg = AR5K_NOQCU_TXDP0;
+			break;
+		case AR5K_TX_QUEUE_BEACON:
+		case AR5K_TX_QUEUE_CAB:
+			tx_reg = AR5K_NOQCU_TXDP1;
+			break;
+		default:
+			return 0xffffffff;
+		}
+	} else {
+		tx_reg = AR5K_QUEUE_TXDP(queue);
+	}
+
+	return ath5k_hw_reg_read(ah, tx_reg);
+}
+
+/**
+ * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
+ *
+ * @ah: The &struct ath5k_hw
+ * @queue: The hw queue number
+ *
+ * Set TX descriptor's address for a specific queue. For 5210 we ignore
+ * the queue number and we use tx queue type since we only have 2 queues
+ * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue.
+ * For newer chips with QCU/DCU we just set the corresponding TXDP register.
+ * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
+ * active.
+ */
+int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
+{
+	u16 tx_reg;
+
+	ATH5K_TRACE(ah->ah_sc);
+	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+	/*
+	 * Set the transmit queue descriptor pointer register by type
+	 * on 5210
+	 */
+	if (ah->ah_version == AR5K_AR5210) {
+		switch (ah->ah_txq[queue].tqi_type) {
+		case AR5K_TX_QUEUE_DATA:
+			tx_reg = AR5K_NOQCU_TXDP0;
+			break;
+		case AR5K_TX_QUEUE_BEACON:
+		case AR5K_TX_QUEUE_CAB:
+			tx_reg = AR5K_NOQCU_TXDP1;
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else {
+		/*
+		 * Set the transmit queue descriptor pointer for
+		 * the selected queue on QCU for 5211+
+		 * (this won't work if the queue is still active)
+		 */
+		if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
+			return -EIO;
+
+		tx_reg = AR5K_QUEUE_TXDP(queue);
+	}
+
+	/* Set descriptor pointer */
+	ath5k_hw_reg_write(ah, phys_addr, tx_reg);
+
+	return 0;
+}
+
+/**
+ * ath5k_hw_update_tx_triglevel - Update tx trigger level
+ *
+ * @ah: The &struct ath5k_hw
+ * @increase: Flag to force increase of trigger level
+ *
+ * This function increases/decreases the tx trigger level for the tx fifo
+ * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
+ * the buffer and transmits it's data. Lowering this results sending small
+ * frames more quickly but can lead to tx underruns, raising it a lot can
+ * result other problems (i think bmiss is related). Right now we start with
+ * the lowest possible (64Bytes) and if we get tx underrun we increase it using
+ * the increase flag. Returns -EIO if we have have reached maximum/minimum.
+ *
+ * XXX: Link this with tx DMA size ?
+ * XXX: Use it to save interrupts ?
+ * TODO: Needs testing, i think it's related to bmiss...
+ */
+int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
+{
+	u32 trigger_level, imr;
+	int ret = -EIO;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	/*
+	 * Disable interrupts by setting the mask
+	 */
+	imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
+
+	trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
+			AR5K_TXCFG_TXFULL);
+
+	if (!increase) {
+		if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
+			goto done;
+	} else
+		trigger_level +=
+			((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
+
+	/*
+	 * Update trigger level on success
+	 */
+	if (ah->ah_version == AR5K_AR5210)
+		ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
+	else
+		AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
+				AR5K_TXCFG_TXFULL, trigger_level);
+
+	ret = 0;
+
+done:
+	/*
+	 * Restore interrupt mask
+	 */
+	ath5k_hw_set_imr(ah, imr);
+
+	return ret;
+}
+
+/*******************\
+* Interrupt masking *
+\*******************/
+
+/**
+ * ath5k_hw_is_intr_pending - Check if we have pending interrupts
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * Check if we have pending interrupts to process. Returns 1 if we
+ * have pending interrupts and 0 if we haven't.
+ */
+bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
+}
+
+/**
+ * ath5k_hw_get_isr - Get interrupt status
+ *
+ * @ah: The @struct ath5k_hw
+ * @interrupt_mask: Driver's interrupt mask used to filter out
+ * interrupts in sw.
+ *
+ * This function is used inside our interrupt handler to determine the reason
+ * for the interrupt by reading Primary Interrupt Status Register. Returns an
+ * abstract interrupt status mask which is mostly ISR with some uncommon bits
+ * being mapped on some standard non hw-specific positions
+ * (check out &ath5k_int).
+ *
+ * NOTE: We use read-and-clear register, so after this function is called ISR
+ * is zeroed.
+ *
+ * XXX: Why filter interrupts in sw with interrupt_mask ? No benefit at all
+ * plus it can be misleading (one might thing that we save interrupts this way)
+ */
+int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
+{
+	u32 data;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	/*
+	 * Read interrupt status from the Interrupt Status register
+	 * on 5210
+	 */
+	if (ah->ah_version == AR5K_AR5210) {
+		data = ath5k_hw_reg_read(ah, AR5K_ISR);
+		if (unlikely(data == AR5K_INT_NOCARD)) {
+			*interrupt_mask = data;
+			return -ENODEV;
+		}
+	} else {
+		/*
+		 * Read interrupt status from the Read-And-Clear
+		 * shadow register.
+		 * Note: PISR/SISR Not available on 5210
+		 */
+		data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
+	}
+
+	/*
+	 * Get abstract interrupt mask (driver-compatible)
+	 */
+	*interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
+
+	if (unlikely(data == AR5K_INT_NOCARD))
+		return -ENODEV;
+
+	if (data & (AR5K_ISR_RXOK | AR5K_ISR_RXERR))
+		*interrupt_mask |= AR5K_INT_RX;
+
+	if (data & (AR5K_ISR_TXOK | AR5K_ISR_TXERR
+		| AR5K_ISR_TXDESC | AR5K_ISR_TXEOL))
+		*interrupt_mask |= AR5K_INT_TX;
+
+	if (ah->ah_version != AR5K_AR5210) {
+		/*HIU = Host Interface Unit (PCI etc)*/
+		if (unlikely(data & (AR5K_ISR_HIUERR)))
+			*interrupt_mask |= AR5K_INT_FATAL;
+
+		/*Beacon Not Ready*/
+		if (unlikely(data & (AR5K_ISR_BNR)))
+			*interrupt_mask |= AR5K_INT_BNR;
+	}
+
+	/*
+	 * XXX: BMISS interrupts may occur after association.
+	 * I found this on 5210 code but it needs testing. If this is
+	 * true we should disable them before assoc and re-enable them
+	 * after a successfull assoc + some jiffies.
+	 */
+#if 0
+	interrupt_mask &= ~AR5K_INT_BMISS;
+#endif
+
+	/*
+	 * In case we didn't handle anything,
+	 * print the register value.
+	 */
+	if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
+		ATH5K_PRINTF("0x%08x\n", data);
+
+	return 0;
+}
+
+/**
+ * ath5k_hw_set_imr - Set interrupt mask
+ *
+ * @ah: The &struct ath5k_hw
+ * @new_mask: The new interrupt mask to be set
+ *
+ * Set the interrupt mask in hw to save interrupts. We do that by mapping
+ * ath5k_int bits to hw-specific bits to remove abstraction and writing
+ * Interrupt Mask Register.
+ */
+enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
+{
+	enum ath5k_int old_mask, int_mask;
+
+	/*
+	 * Disable card interrupts to prevent any race conditions
+	 * (they will be re-enabled afterwards).
+	 */
+	ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
+	ath5k_hw_reg_read(ah, AR5K_IER);
+
+	old_mask = ah->ah_imr;
+
+	/*
+	 * Add additional, chipset-dependent interrupt mask flags
+	 * and write them to the IMR (interrupt mask register).
+	 */
+	int_mask = new_mask & AR5K_INT_COMMON;
+
+	if (new_mask & AR5K_INT_RX)
+		int_mask |= AR5K_IMR_RXOK | AR5K_IMR_RXERR | AR5K_IMR_RXORN |
+			AR5K_IMR_RXDESC;
+
+	if (new_mask & AR5K_INT_TX)
+		int_mask |= AR5K_IMR_TXOK | AR5K_IMR_TXERR | AR5K_IMR_TXDESC |
+			AR5K_IMR_TXURN;
+
+	if (ah->ah_version != AR5K_AR5210) {
+		if (new_mask & AR5K_INT_FATAL) {
+			int_mask |= AR5K_IMR_HIUERR;
+			AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_MCABT |
+					AR5K_SIMR2_SSERR | AR5K_SIMR2_DPERR);
+		}
+	}
+
+	ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
+
+	/* Store new interrupt mask */
+	ah->ah_imr = new_mask;
+
+	/* ..re-enable interrupts */
+	ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
+	ath5k_hw_reg_read(ah, AR5K_IER);
+
+	return old_mask;
+}
+
diff --git a/drivers/net/wireless/ath5k/eeprom.c b/drivers/net/wireless/ath5k/eeprom.c
new file mode 100644
index 0000000..a883839
--- /dev/null
+++ b/drivers/net/wireless/ath5k/eeprom.c
@@ -0,0 +1,466 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*************************************\
+* EEPROM access functions and helpers *
+\*************************************/
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+#include "base.h"
+
+/*
+ * Read from eeprom
+ */
+static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data)
+{
+	u32 status, timeout;
+
+	ATH5K_TRACE(ah->ah_sc);
+	/*
+	 * Initialize EEPROM access
+	 */
+	if (ah->ah_version == AR5K_AR5210) {
+		AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
+		(void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
+	} else {
+		ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
+		AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
+				AR5K_EEPROM_CMD_READ);
+	}
+
+	for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
+		status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
+		if (status & AR5K_EEPROM_STAT_RDDONE) {
+			if (status & AR5K_EEPROM_STAT_RDERR)
+				return -EIO;
+			*data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
+					0xffff);
+			return 0;
+		}
+		udelay(15);
+	}
+
+	return -ETIMEDOUT;
+}
+
+/*
+ * Translate binary channel representation in EEPROM to frequency
+ */
+static u16 ath5k_eeprom_bin2freq(struct ath5k_hw *ah, u16 bin,
+				unsigned int mode)
+{
+	u16 val;
+
+	if (bin == AR5K_EEPROM_CHANNEL_DIS)
+		return bin;
+
+	if (mode == AR5K_EEPROM_MODE_11A) {
+		if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2)
+			val = (5 * bin) + 4800;
+		else
+			val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 :
+				(bin * 10) + 5100;
+	} else {
+		if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2)
+			val = bin + 2300;
+		else
+			val = bin + 2400;
+	}
+
+	return val;
+}
+
+/*
+ * Read antenna infos from eeprom
+ */
+static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
+		unsigned int mode)
+{
+	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+	u32 o = *offset;
+	u16 val;
+	int ret, i = 0;
+
+	AR5K_EEPROM_READ(o++, val);
+	ee->ee_switch_settling[mode]	= (val >> 8) & 0x7f;
+	ee->ee_ant_tx_rx[mode]		= (val >> 2) & 0x3f;
+	ee->ee_ant_control[mode][i]	= (val << 4) & 0x3f;
+
+	AR5K_EEPROM_READ(o++, val);
+	ee->ee_ant_control[mode][i++]	|= (val >> 12) & 0xf;
+	ee->ee_ant_control[mode][i++]	= (val >> 6) & 0x3f;
+	ee->ee_ant_control[mode][i++]	= val & 0x3f;
+
+	AR5K_EEPROM_READ(o++, val);
+	ee->ee_ant_control[mode][i++]	= (val >> 10) & 0x3f;
+	ee->ee_ant_control[mode][i++]	= (val >> 4) & 0x3f;
+	ee->ee_ant_control[mode][i]	= (val << 2) & 0x3f;
+
+	AR5K_EEPROM_READ(o++, val);
+	ee->ee_ant_control[mode][i++]	|= (val >> 14) & 0x3;
+	ee->ee_ant_control[mode][i++]	= (val >> 8) & 0x3f;
+	ee->ee_ant_control[mode][i++]	= (val >> 2) & 0x3f;
+	ee->ee_ant_control[mode][i]	= (val << 4) & 0x3f;
+
+	AR5K_EEPROM_READ(o++, val);
+	ee->ee_ant_control[mode][i++]	|= (val >> 12) & 0xf;
+	ee->ee_ant_control[mode][i++]	= (val >> 6) & 0x3f;
+	ee->ee_ant_control[mode][i++]	= val & 0x3f;
+
+	/* Get antenna modes */
+	ah->ah_antenna[mode][0] =
+	    (ee->ee_ant_control[mode][0] << 4) | 0x1;
+	ah->ah_antenna[mode][AR5K_ANT_FIXED_A] =
+	     ee->ee_ant_control[mode][1] 	|
+	    (ee->ee_ant_control[mode][2] << 6) 	|
+	    (ee->ee_ant_control[mode][3] << 12) |
+	    (ee->ee_ant_control[mode][4] << 18) |
+	    (ee->ee_ant_control[mode][5] << 24);
+	ah->ah_antenna[mode][AR5K_ANT_FIXED_B] =
+	     ee->ee_ant_control[mode][6] 	|
+	    (ee->ee_ant_control[mode][7] << 6) 	|
+	    (ee->ee_ant_control[mode][8] << 12) |
+	    (ee->ee_ant_control[mode][9] << 18) |
+	    (ee->ee_ant_control[mode][10] << 24);
+
+	/* return new offset */
+	*offset = o;
+
+	return 0;
+}
+
+/*
+ * Read supported modes from eeprom
+ */
+static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
+		unsigned int mode)
+{
+	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+	u32 o = *offset;
+	u16 val;
+	int ret;
+
+	AR5K_EEPROM_READ(o++, val);
+	ee->ee_tx_end2xlna_enable[mode]	= (val >> 8) & 0xff;
+	ee->ee_thr_62[mode]		= val & 0xff;
+
+	if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
+		ee->ee_thr_62[mode] = mode == AR5K_EEPROM_MODE_11A ? 15 : 28;
+
+	AR5K_EEPROM_READ(o++, val);
+	ee->ee_tx_end2xpa_disable[mode]	= (val >> 8) & 0xff;
+	ee->ee_tx_frm2xpa_enable[mode]	= val & 0xff;
+
+	AR5K_EEPROM_READ(o++, val);
+	ee->ee_pga_desired_size[mode]	= (val >> 8) & 0xff;
+
+	if ((val & 0xff) & 0x80)
+		ee->ee_noise_floor_thr[mode] = -((((val & 0xff) ^ 0xff)) + 1);
+	else
+		ee->ee_noise_floor_thr[mode] = val & 0xff;
+
+	if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
+		ee->ee_noise_floor_thr[mode] =
+		    mode == AR5K_EEPROM_MODE_11A ? -54 : -1;
+
+	AR5K_EEPROM_READ(o++, val);
+	ee->ee_xlna_gain[mode]		= (val >> 5) & 0xff;
+	ee->ee_x_gain[mode]		= (val >> 1) & 0xf;
+	ee->ee_xpd[mode]		= val & 0x1;
+
+	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0)
+		ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
+
+	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
+		AR5K_EEPROM_READ(o++, val);
+		ee->ee_false_detect[mode] = (val >> 6) & 0x7f;
+
+		if (mode == AR5K_EEPROM_MODE_11A)
+			ee->ee_xr_power[mode] = val & 0x3f;
+		else {
+			ee->ee_ob[mode][0] = val & 0x7;
+			ee->ee_db[mode][0] = (val >> 3) & 0x7;
+		}
+	}
+
+	if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_4) {
+		ee->ee_i_gain[mode] = AR5K_EEPROM_I_GAIN;
+		ee->ee_cck_ofdm_power_delta = AR5K_EEPROM_CCK_OFDM_DELTA;
+	} else {
+		ee->ee_i_gain[mode] = (val >> 13) & 0x7;
+
+		AR5K_EEPROM_READ(o++, val);
+		ee->ee_i_gain[mode] |= (val << 3) & 0x38;
+
+		if (mode == AR5K_EEPROM_MODE_11G)
+			ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff;
+	}
+
+	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
+			mode == AR5K_EEPROM_MODE_11A) {
+		ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
+		ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
+	}
+
+	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6 &&
+	    mode == AR5K_EEPROM_MODE_11G)
+		ee->ee_scaled_cck_delta = (val >> 11) & 0x1f;
+
+	/* return new offset */
+	*offset = o;
+
+	return 0;
+}
+
+/*
+ * Initialize eeprom & capabilities structs
+ */
+int ath5k_eeprom_init(struct ath5k_hw *ah)
+{
+	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+	unsigned int mode, i;
+	int ret;
+	u32 offset;
+	u16 val;
+
+	/* Initial TX thermal adjustment values */
+	ee->ee_tx_clip = 4;
+	ee->ee_pwd_84 = ee->ee_pwd_90 = 1;
+	ee->ee_gain_select = 1;
+
+	/*
+	 * Read values from EEPROM and store them in the capability structure
+	 */
+	AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic);
+	AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect);
+	AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain);
+	AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version);
+	AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header);
+
+	/* Return if we have an old EEPROM */
+	if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0)
+		return 0;
+
+#ifdef notyet
+	/*
+	 * Validate the checksum of the EEPROM date. There are some
+	 * devices with invalid EEPROMs.
+	 */
+	for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
+		AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
+		cksum ^= val;
+	}
+	if (cksum != AR5K_EEPROM_INFO_CKSUM) {
+		ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
+		return -EIO;
+	}
+#endif
+
+	AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version),
+	    ee_ant_gain);
+
+	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
+		AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0);
+		AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1);
+	}
+
+	if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) {
+		AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val);
+		ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7;
+		ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7;
+
+		AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val);
+		ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7;
+		ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7;
+	}
+
+	/*
+	 * Get conformance test limit values
+	 */
+	offset = AR5K_EEPROM_CTL(ah->ah_ee_version);
+	ee->ee_ctls = AR5K_EEPROM_N_CTLS(ah->ah_ee_version);
+
+	for (i = 0; i < ee->ee_ctls; i++) {
+		AR5K_EEPROM_READ(offset++, val);
+		ee->ee_ctl[i] = (val >> 8) & 0xff;
+		ee->ee_ctl[i + 1] = val & 0xff;
+	}
+
+	/*
+	 * Get values for 802.11a (5GHz)
+	 */
+	mode = AR5K_EEPROM_MODE_11A;
+
+	ee->ee_turbo_max_power[mode] =
+			AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header);
+
+	offset = AR5K_EEPROM_MODES_11A(ah->ah_ee_version);
+
+	ret = ath5k_eeprom_read_ants(ah, &offset, mode);
+	if (ret)
+		return ret;
+
+	AR5K_EEPROM_READ(offset++, val);
+	ee->ee_adc_desired_size[mode]	= (s8)((val >> 8) & 0xff);
+	ee->ee_ob[mode][3]		= (val >> 5) & 0x7;
+	ee->ee_db[mode][3]		= (val >> 2) & 0x7;
+	ee->ee_ob[mode][2]		= (val << 1) & 0x7;
+
+	AR5K_EEPROM_READ(offset++, val);
+	ee->ee_ob[mode][2]		|= (val >> 15) & 0x1;
+	ee->ee_db[mode][2]		= (val >> 12) & 0x7;
+	ee->ee_ob[mode][1]		= (val >> 9) & 0x7;
+	ee->ee_db[mode][1]		= (val >> 6) & 0x7;
+	ee->ee_ob[mode][0]		= (val >> 3) & 0x7;
+	ee->ee_db[mode][0]		= val & 0x7;
+
+	ret = ath5k_eeprom_read_modes(ah, &offset, mode);
+	if (ret)
+		return ret;
+
+	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) {
+		AR5K_EEPROM_READ(offset++, val);
+		ee->ee_margin_tx_rx[mode] = val & 0x3f;
+	}
+
+	/*
+	 * Get values for 802.11b (2.4GHz)
+	 */
+	mode = AR5K_EEPROM_MODE_11B;
+	offset = AR5K_EEPROM_MODES_11B(ah->ah_ee_version);
+
+	ret = ath5k_eeprom_read_ants(ah, &offset, mode);
+	if (ret)
+		return ret;
+
+	AR5K_EEPROM_READ(offset++, val);
+	ee->ee_adc_desired_size[mode]	= (s8)((val >> 8) & 0xff);
+	ee->ee_ob[mode][1]		= (val >> 4) & 0x7;
+	ee->ee_db[mode][1]		= val & 0x7;
+
+	ret = ath5k_eeprom_read_modes(ah, &offset, mode);
+	if (ret)
+		return ret;
+
+	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
+		AR5K_EEPROM_READ(offset++, val);
+		ee->ee_cal_pier[mode][0] =
+			ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
+		ee->ee_cal_pier[mode][1] =
+			ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
+
+		AR5K_EEPROM_READ(offset++, val);
+		ee->ee_cal_pier[mode][2] =
+			ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
+	}
+
+	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
+		ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
+
+	/*
+	 * Get values for 802.11g (2.4GHz)
+	 */
+	mode = AR5K_EEPROM_MODE_11G;
+	offset = AR5K_EEPROM_MODES_11G(ah->ah_ee_version);
+
+	ret = ath5k_eeprom_read_ants(ah, &offset, mode);
+	if (ret)
+		return ret;
+
+	AR5K_EEPROM_READ(offset++, val);
+	ee->ee_adc_desired_size[mode]	= (s8)((val >> 8) & 0xff);
+	ee->ee_ob[mode][1]		= (val >> 4) & 0x7;
+	ee->ee_db[mode][1]		= val & 0x7;
+
+	ret = ath5k_eeprom_read_modes(ah, &offset, mode);
+	if (ret)
+		return ret;
+
+	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
+		AR5K_EEPROM_READ(offset++, val);
+		ee->ee_cal_pier[mode][0] =
+			ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
+		ee->ee_cal_pier[mode][1] =
+			ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
+
+		AR5K_EEPROM_READ(offset++, val);
+		ee->ee_turbo_max_power[mode] = val & 0x7f;
+		ee->ee_xr_power[mode] = (val >> 7) & 0x3f;
+
+		AR5K_EEPROM_READ(offset++, val);
+		ee->ee_cal_pier[mode][2] =
+			ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
+
+		if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
+			ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
+
+		AR5K_EEPROM_READ(offset++, val);
+		ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
+		ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
+
+		if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) {
+			AR5K_EEPROM_READ(offset++, val);
+			ee->ee_cck_ofdm_gain_delta = val & 0xff;
+		}
+	}
+
+	/*
+	 * Read 5GHz EEPROM channels
+	 */
+
+	return 0;
+}
+
+/*
+ * Read the MAC address from eeprom
+ */
+int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
+{
+	u8 mac_d[ETH_ALEN];
+	u32 total, offset;
+	u16 data;
+	int octet, ret;
+
+	memset(mac, 0, ETH_ALEN);
+	memset(mac_d, 0, ETH_ALEN);
+
+	ret = ath5k_hw_eeprom_read(ah, 0x20, &data);
+	if (ret)
+		return ret;
+
+	for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
+		ret = ath5k_hw_eeprom_read(ah, offset, &data);
+		if (ret)
+			return ret;
+
+		total += data;
+		mac_d[octet + 1] = data & 0xff;
+		mac_d[octet] = data >> 8;
+		octet += 2;
+	}
+
+	memcpy(mac, mac_d, ETH_ALEN);
+
+	if (!total || total == 3 * 0xffff)
+		return -EINVAL;
+
+	return 0;
+}
+
diff --git a/drivers/net/wireless/ath5k/eeprom.h b/drivers/net/wireless/ath5k/eeprom.h
new file mode 100644
index 0000000..a468ecf
--- /dev/null
+++ b/drivers/net/wireless/ath5k/eeprom.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*
+ * Common ar5xxx EEPROM data offsets (set these on AR5K_EEPROM_BASE)
+ */
+#define AR5K_EEPROM_MAGIC		0x003d	/* EEPROM Magic number */
+#define AR5K_EEPROM_MAGIC_VALUE		0x5aa5	/* Default - found on EEPROM */
+#define AR5K_EEPROM_MAGIC_5212		0x0000145c /* 5212 */
+#define AR5K_EEPROM_MAGIC_5211		0x0000145b /* 5211 */
+#define AR5K_EEPROM_MAGIC_5210		0x0000145a /* 5210 */
+
+#define AR5K_EEPROM_PROTECT		0x003f	/* EEPROM protect status */
+#define AR5K_EEPROM_PROTECT_RD_0_31	0x0001	/* Read protection bit for offsets 0x0 - 0x1f */
+#define AR5K_EEPROM_PROTECT_WR_0_31	0x0002	/* Write protection bit for offsets 0x0 - 0x1f */
+#define AR5K_EEPROM_PROTECT_RD_32_63	0x0004	/* 0x20 - 0x3f */
+#define AR5K_EEPROM_PROTECT_WR_32_63	0x0008
+#define AR5K_EEPROM_PROTECT_RD_64_127	0x0010	/* 0x40 - 0x7f */
+#define AR5K_EEPROM_PROTECT_WR_64_127	0x0020
+#define AR5K_EEPROM_PROTECT_RD_128_191	0x0040	/* 0x80 - 0xbf (regdom) */
+#define AR5K_EEPROM_PROTECT_WR_128_191	0x0080
+#define AR5K_EEPROM_PROTECT_RD_192_207	0x0100	/* 0xc0 - 0xcf */
+#define AR5K_EEPROM_PROTECT_WR_192_207	0x0200
+#define AR5K_EEPROM_PROTECT_RD_208_223	0x0400	/* 0xd0 - 0xdf */
+#define AR5K_EEPROM_PROTECT_WR_208_223	0x0800
+#define AR5K_EEPROM_PROTECT_RD_224_239	0x1000	/* 0xe0 - 0xef */
+#define AR5K_EEPROM_PROTECT_WR_224_239	0x2000
+#define AR5K_EEPROM_PROTECT_RD_240_255	0x4000	/* 0xf0 - 0xff */
+#define AR5K_EEPROM_PROTECT_WR_240_255	0x8000
+#define AR5K_EEPROM_REG_DOMAIN		0x00bf	/* EEPROM regdom */
+#define AR5K_EEPROM_INFO_BASE		0x00c0	/* EEPROM header */
+#define AR5K_EEPROM_INFO_MAX		(0x400 - AR5K_EEPROM_INFO_BASE)
+#define AR5K_EEPROM_INFO_CKSUM		0xffff
+#define AR5K_EEPROM_INFO(_n)		(AR5K_EEPROM_INFO_BASE + (_n))
+
+#define AR5K_EEPROM_VERSION		AR5K_EEPROM_INFO(1)	/* EEPROM Version */
+#define AR5K_EEPROM_VERSION_3_0		0x3000	/* No idea what's going on before this version */
+#define AR5K_EEPROM_VERSION_3_1		0x3001	/* ob/db values for 2Ghz (ar5211_rfregs) */
+#define AR5K_EEPROM_VERSION_3_2		0x3002	/* different frequency representation (eeprom_bin2freq) */
+#define AR5K_EEPROM_VERSION_3_3		0x3003	/* offsets changed, has 32 CTLs (see below) and ee_false_detect (eeprom_read_modes) */
+#define AR5K_EEPROM_VERSION_3_4		0x3004	/* has ee_i_gain ee_cck_ofdm_power_delta (eeprom_read_modes) */
+#define AR5K_EEPROM_VERSION_4_0		0x4000	/* has ee_misc*, ee_cal_pier, ee_turbo_max_power and ee_xr_power (eeprom_init) */
+#define AR5K_EEPROM_VERSION_4_1		0x4001	/* has ee_margin_tx_rx (eeprom_init) */
+#define AR5K_EEPROM_VERSION_4_2		0x4002	/* has ee_cck_ofdm_gain_delta (eeprom_init) */
+#define AR5K_EEPROM_VERSION_4_3		0x4003
+#define AR5K_EEPROM_VERSION_4_4		0x4004
+#define AR5K_EEPROM_VERSION_4_5		0x4005
+#define AR5K_EEPROM_VERSION_4_6		0x4006	/* has ee_scaled_cck_delta */
+#define AR5K_EEPROM_VERSION_4_7		0x4007
+
+#define AR5K_EEPROM_MODE_11A		0
+#define AR5K_EEPROM_MODE_11B		1
+#define AR5K_EEPROM_MODE_11G		2
+
+#define AR5K_EEPROM_HDR			AR5K_EEPROM_INFO(2)	/* Header that contains the device caps */
+#define AR5K_EEPROM_HDR_11A(_v)		(((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
+#define AR5K_EEPROM_HDR_11B(_v)		(((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
+#define AR5K_EEPROM_HDR_11G(_v)		(((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
+#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v)	(((_v) >> 3) & 0x1)	/* Disable turbo for 2Ghz (?) */
+#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v)	(((_v) >> 4) & 0x7f)	/* Max turbo power for a/XR mode (eeprom_init) */
+#define AR5K_EEPROM_HDR_DEVICE(_v)	(((_v) >> 11) & 0x7)
+#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v)	(((_v) >> 15) & 0x1)	/* Disable turbo for 5Ghz (?) */
+#define AR5K_EEPROM_HDR_RFKILL(_v)	(((_v) >> 14) & 0x1)	/* Device has RFKill support */
+
+#define AR5K_EEPROM_RFKILL_GPIO_SEL	0x0000001c
+#define AR5K_EEPROM_RFKILL_GPIO_SEL_S	2
+#define AR5K_EEPROM_RFKILL_POLARITY	0x00000002
+#define AR5K_EEPROM_RFKILL_POLARITY_S	1
+
+/* Newer EEPROMs are using a different offset */
+#define AR5K_EEPROM_OFF(_v, _v3_0, _v3_3) \
+	(((_v) >= AR5K_EEPROM_VERSION_3_3) ? _v3_3 : _v3_0)
+
+#define AR5K_EEPROM_ANT_GAIN(_v)	AR5K_EEPROM_OFF(_v, 0x00c4, 0x00c3)
+#define AR5K_EEPROM_ANT_GAIN_5GHZ(_v)	((int8_t)(((_v) >> 8) & 0xff))
+#define AR5K_EEPROM_ANT_GAIN_2GHZ(_v)	((int8_t)((_v) & 0xff))
+
+/* calibration settings */
+#define AR5K_EEPROM_MODES_11A(_v)	AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
+#define AR5K_EEPROM_MODES_11B(_v)	AR5K_EEPROM_OFF(_v, 0x00d0, 0x00f2)
+#define AR5K_EEPROM_MODES_11G(_v)	AR5K_EEPROM_OFF(_v, 0x00da, 0x010d)
+#define AR5K_EEPROM_CTL(_v)		AR5K_EEPROM_OFF(_v, 0x00e4, 0x0128)	/* Conformance test limits */
+
+/* [3.1 - 3.3] */
+#define AR5K_EEPROM_OBDB0_2GHZ		0x00ec
+#define AR5K_EEPROM_OBDB1_2GHZ		0x00ed
+
+/* Misc values available since EEPROM 4.0 */
+#define AR5K_EEPROM_MISC0		0x00c4
+#define AR5K_EEPROM_EARSTART(_v)	((_v) & 0xfff)
+#define AR5K_EEPROM_EEMAP(_v)		(((_v) >> 14) & 0x3)
+#define AR5K_EEPROM_MISC1		0x00c5
+#define AR5K_EEPROM_TARGET_PWRSTART(_v)	((_v) & 0xfff)
+#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v)	(((_v) >> 14) & 0x1)
+
+
+/* Some EEPROM defines */
+#define AR5K_EEPROM_EEP_SCALE		100
+#define AR5K_EEPROM_EEP_DELTA		10
+#define AR5K_EEPROM_N_MODES		3
+#define AR5K_EEPROM_N_5GHZ_CHAN		10
+#define AR5K_EEPROM_N_2GHZ_CHAN		3
+#define AR5K_EEPROM_MAX_CHAN		10
+#define AR5K_EEPROM_N_PCDAC		11
+#define AR5K_EEPROM_N_TEST_FREQ		8
+#define AR5K_EEPROM_N_EDGES		8
+#define AR5K_EEPROM_N_INTERCEPTS	11
+#define AR5K_EEPROM_FREQ_M(_v)		AR5K_EEPROM_OFF(_v, 0x7f, 0xff)
+#define AR5K_EEPROM_PCDAC_M		0x3f
+#define AR5K_EEPROM_PCDAC_START		1
+#define AR5K_EEPROM_PCDAC_STOP		63
+#define AR5K_EEPROM_PCDAC_STEP		1
+#define AR5K_EEPROM_NON_EDGE_M		0x40
+#define AR5K_EEPROM_CHANNEL_POWER	8
+#define AR5K_EEPROM_N_OBDB		4
+#define AR5K_EEPROM_OBDB_DIS		0xffff
+#define AR5K_EEPROM_CHANNEL_DIS		0xff
+#define AR5K_EEPROM_SCALE_OC_DELTA(_x)	(((_x) * 2) / 10)
+#define AR5K_EEPROM_N_CTLS(_v)		AR5K_EEPROM_OFF(_v, 16, 32)
+#define AR5K_EEPROM_MAX_CTLS		32
+#define AR5K_EEPROM_N_XPD_PER_CHANNEL	4
+#define AR5K_EEPROM_N_XPD0_POINTS	4
+#define AR5K_EEPROM_N_XPD3_POINTS	3
+#define AR5K_EEPROM_N_INTERCEPT_10_2GHZ	35
+#define AR5K_EEPROM_N_INTERCEPT_10_5GHZ	55
+#define AR5K_EEPROM_POWER_M		0x3f
+#define AR5K_EEPROM_POWER_MIN		0
+#define AR5K_EEPROM_POWER_MAX		3150
+#define AR5K_EEPROM_POWER_STEP		50
+#define AR5K_EEPROM_POWER_TABLE_SIZE	64
+#define AR5K_EEPROM_N_POWER_LOC_11B	4
+#define AR5K_EEPROM_N_POWER_LOC_11G	6
+#define AR5K_EEPROM_I_GAIN		10
+#define AR5K_EEPROM_CCK_OFDM_DELTA	15
+#define AR5K_EEPROM_N_IQ_CAL		2
+
+#define AR5K_EEPROM_READ(_o, _v) do {			\
+	ret = ath5k_hw_eeprom_read(ah, (_o), &(_v));	\
+	if (ret)					\
+		return ret;				\
+} while (0)
+
+#define AR5K_EEPROM_READ_HDR(_o, _v)					\
+	AR5K_EEPROM_READ(_o, ah->ah_capabilities.cap_eeprom._v);	\
+
+/* Struct to hold EEPROM calibration data */
+struct ath5k_eeprom_info {
+	u16	ee_magic;
+	u16	ee_protect;
+	u16	ee_regdomain;
+	u16	ee_version;
+	u16	ee_header;
+	u16	ee_ant_gain;
+	u16	ee_misc0;
+	u16	ee_misc1;
+	u16	ee_cck_ofdm_gain_delta;
+	u16	ee_cck_ofdm_power_delta;
+	u16	ee_scaled_cck_delta;
+
+	/* Used for tx thermal adjustment (eeprom_init, rfregs) */
+	u16	ee_tx_clip;
+	u16	ee_pwd_84;
+	u16	ee_pwd_90;
+	u16	ee_gain_select;
+
+	/* RF Calibration settings (reset, rfregs) */
+	u16	ee_i_cal[AR5K_EEPROM_N_MODES];
+	u16	ee_q_cal[AR5K_EEPROM_N_MODES];
+	u16	ee_fixed_bias[AR5K_EEPROM_N_MODES];
+	u16	ee_turbo_max_power[AR5K_EEPROM_N_MODES];
+	u16	ee_xr_power[AR5K_EEPROM_N_MODES];
+	u16	ee_switch_settling[AR5K_EEPROM_N_MODES];
+	u16	ee_ant_tx_rx[AR5K_EEPROM_N_MODES];
+	u16	ee_ant_control[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PCDAC];
+	u16	ee_ob[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
+	u16	ee_db[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
+	u16	ee_tx_end2xlna_enable[AR5K_EEPROM_N_MODES];
+	u16	ee_tx_end2xpa_disable[AR5K_EEPROM_N_MODES];
+	u16	ee_tx_frm2xpa_enable[AR5K_EEPROM_N_MODES];
+	u16	ee_thr_62[AR5K_EEPROM_N_MODES];
+	u16	ee_xlna_gain[AR5K_EEPROM_N_MODES];
+	u16	ee_xpd[AR5K_EEPROM_N_MODES];
+	u16	ee_x_gain[AR5K_EEPROM_N_MODES];
+	u16	ee_i_gain[AR5K_EEPROM_N_MODES];
+	u16	ee_margin_tx_rx[AR5K_EEPROM_N_MODES];
+
+	/* Unused */
+	u16	ee_false_detect[AR5K_EEPROM_N_MODES];
+	u16	ee_cal_pier[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_2GHZ_CHAN];
+	u16	ee_channel[AR5K_EEPROM_N_MODES][AR5K_EEPROM_MAX_CHAN]; /*empty*/
+
+	/* Conformance test limits (Unused) */
+	u16	ee_ctls;
+	u16	ee_ctl[AR5K_EEPROM_MAX_CTLS];
+
+	/* Noise Floor Calibration settings */
+	s16	ee_noise_floor_thr[AR5K_EEPROM_N_MODES];
+	s8	ee_adc_desired_size[AR5K_EEPROM_N_MODES];
+	s8	ee_pga_desired_size[AR5K_EEPROM_N_MODES];
+};
diff --git a/drivers/net/wireless/ath5k/gpio.c b/drivers/net/wireless/ath5k/gpio.c
new file mode 100644
index 0000000..b77205a
--- /dev/null
+++ b/drivers/net/wireless/ath5k/gpio.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/****************\
+  GPIO Functions
+\****************/
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+#include "base.h"
+
+/*
+ * Set led state
+ */
+void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
+{
+	u32 led;
+	/*5210 has different led mode handling*/
+	u32 led_5210;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	/*Reset led status*/
+	if (ah->ah_version != AR5K_AR5210)
+		AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
+			AR5K_PCICFG_LEDMODE |  AR5K_PCICFG_LED);
+	else
+		AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_LED);
+
+	/*
+	 * Some blinking values, define at your wish
+	 */
+	switch (state) {
+	case AR5K_LED_SCAN:
+	case AR5K_LED_AUTH:
+		led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_PEND;
+		led_5210 = AR5K_PCICFG_LED_PEND | AR5K_PCICFG_LED_BCTL;
+		break;
+
+	case AR5K_LED_INIT:
+		led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_NONE;
+		led_5210 = AR5K_PCICFG_LED_PEND;
+		break;
+
+	case AR5K_LED_ASSOC:
+	case AR5K_LED_RUN:
+		led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_ASSOC;
+		led_5210 = AR5K_PCICFG_LED_ASSOC;
+		break;
+
+	default:
+		led = AR5K_PCICFG_LEDMODE_PROM | AR5K_PCICFG_LED_NONE;
+		led_5210 = AR5K_PCICFG_LED_PEND;
+		break;
+	}
+
+	/*Write new status to the register*/
+	if (ah->ah_version != AR5K_AR5210)
+		AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led);
+	else
+		AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
+}
+
+/*
+ * Set GPIO inputs
+ */
+int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	if (gpio > AR5K_NUM_GPIO)
+		return -EINVAL;
+
+	ath5k_hw_reg_write(ah,
+		(ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio))
+		| AR5K_GPIOCR_IN(gpio), AR5K_GPIOCR);
+
+	return 0;
+}
+
+/*
+ * Set GPIO outputs
+ */
+int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	if (gpio > AR5K_NUM_GPIO)
+		return -EINVAL;
+
+	ath5k_hw_reg_write(ah,
+		(ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio))
+		| AR5K_GPIOCR_OUT(gpio), AR5K_GPIOCR);
+
+	return 0;
+}
+
+/*
+ * Get GPIO state
+ */
+u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	if (gpio > AR5K_NUM_GPIO)
+		return 0xffffffff;
+
+	/* GPIO input magic */
+	return ((ath5k_hw_reg_read(ah, AR5K_GPIODI) & AR5K_GPIODI_M) >> gpio) &
+		0x1;
+}
+
+/*
+ * Set GPIO state
+ */
+int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
+{
+	u32 data;
+	ATH5K_TRACE(ah->ah_sc);
+
+	if (gpio > AR5K_NUM_GPIO)
+		return -EINVAL;
+
+	/* GPIO output magic */
+	data = ath5k_hw_reg_read(ah, AR5K_GPIODO);
+
+	data &= ~(1 << gpio);
+	data |= (val & 1) << gpio;
+
+	ath5k_hw_reg_write(ah, data, AR5K_GPIODO);
+
+	return 0;
+}
+
+/*
+ * Initialize the GPIO interrupt (RFKill switch)
+ */
+void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+		u32 interrupt_level)
+{
+	u32 data;
+
+	ATH5K_TRACE(ah->ah_sc);
+	if (gpio > AR5K_NUM_GPIO)
+		return;
+
+	/*
+	 * Set the GPIO interrupt
+	 */
+	data = (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &
+		~(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_SELH |
+		AR5K_GPIOCR_INT_ENA | AR5K_GPIOCR_OUT(gpio))) |
+		(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_ENA);
+
+	ath5k_hw_reg_write(ah, interrupt_level ? data :
+		(data | AR5K_GPIOCR_INT_SELH), AR5K_GPIOCR);
+
+	ah->ah_imr |= AR5K_IMR_GPIO;
+
+	/* Enable GPIO interrupts */
+	AR5K_REG_ENABLE_BITS(ah, AR5K_PIMR, AR5K_IMR_GPIO);
+}
+
diff --git a/drivers/net/wireless/ath5k/hw.c b/drivers/net/wireless/ath5k/hw.c
deleted file mode 100644
index ad1a5b4..0000000
--- a/drivers/net/wireless/ath5k/hw.c
+++ /dev/null
@@ -1,4529 +0,0 @@
-/*
- * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
- * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com>
- * Copyright (c) 2007 Matthew W. S. Bell  <mentor@madwifi.org>
- * Copyright (c) 2007 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
- * Copyright (c) 2007 Pavel Roskin <proski@gnu.org>
- * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- */
-
-/*
- * HW related functions for Atheros Wireless LAN devices.
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-#include "reg.h"
-#include "base.h"
-#include "debug.h"
-
-/* Rate tables */
-static const struct ath5k_rate_table ath5k_rt_11a = AR5K_RATES_11A;
-static const struct ath5k_rate_table ath5k_rt_11b = AR5K_RATES_11B;
-static const struct ath5k_rate_table ath5k_rt_11g = AR5K_RATES_11G;
-static const struct ath5k_rate_table ath5k_rt_turbo = AR5K_RATES_TURBO;
-static const struct ath5k_rate_table ath5k_rt_xr = AR5K_RATES_XR;
-
-/* Prototypes */
-static int ath5k_hw_nic_reset(struct ath5k_hw *, u32);
-static int ath5k_hw_nic_wakeup(struct ath5k_hw *, int, bool);
-static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
-	unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
-	unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
-	unsigned int, unsigned int);
-static int ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
-	unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
-	unsigned int);
-static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *, struct ath5k_desc *,
-					 struct ath5k_tx_status *);
-static int ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
-	unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
-	unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
-	unsigned int, unsigned int);
-static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *, struct ath5k_desc *,
-					 struct ath5k_tx_status *);
-static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *, struct ath5k_desc *,
-					struct ath5k_rx_status *);
-static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *, struct ath5k_desc *,
-					struct ath5k_rx_status *);
-static int ath5k_hw_get_capabilities(struct ath5k_hw *);
-
-static int ath5k_eeprom_init(struct ath5k_hw *);
-static int ath5k_eeprom_read_mac(struct ath5k_hw *, u8 *);
-
-static int ath5k_hw_enable_pspoll(struct ath5k_hw *, u8 *, u16);
-static int ath5k_hw_disable_pspoll(struct ath5k_hw *);
-
-/*
- * Enable to overwrite the country code (use "00" for debug)
- */
-#if 0
-#define COUNTRYCODE "00"
-#endif
-
-/*******************\
-  General Functions
-\*******************/
-
-/*
- * Functions used internaly
- */
-
-static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
-{
-	return turbo ? (usec * 80) : (usec * 40);
-}
-
-static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
-{
-	return turbo ? (clock / 80) : (clock / 40);
-}
-
-/*
- * Check if a register write has been completed
- */
-int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
-		bool is_set)
-{
-	int i;
-	u32 data;
-
-	for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
-		data = ath5k_hw_reg_read(ah, reg);
-		if (is_set && (data & flag))
-			break;
-		else if ((data & flag) == val)
-			break;
-		udelay(15);
-	}
-
-	return (i <= 0) ? -EAGAIN : 0;
-}
-
-
-/***************************************\
-	Attach/Detach Functions
-\***************************************/
-
-/*
- * Power On Self Test helper function
- */
-static int ath5k_hw_post(struct ath5k_hw *ah)
-{
-
-	int i, c;
-	u16 cur_reg;
-	u16 regs[2] = {AR5K_STA_ID0, AR5K_PHY(8)};
-	u32 var_pattern;
-	u32 static_pattern[4] = {
-		0x55555555,	0xaaaaaaaa,
-		0x66666666,	0x99999999
-	};
-	u32 init_val;
-	u32 cur_val;
-
-	for (c = 0; c < 2; c++) {
-
-		cur_reg = regs[c];
-
-		/* Save previous value */
-		init_val = ath5k_hw_reg_read(ah, cur_reg);
-
-		for (i = 0; i < 256; i++) {
-			var_pattern = i << 16 | i;
-			ath5k_hw_reg_write(ah, var_pattern, cur_reg);
-			cur_val = ath5k_hw_reg_read(ah, cur_reg);
-
-			if (cur_val != var_pattern) {
-				ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
-				return -EAGAIN;
-			}
-
-			/* Found on ndiswrapper dumps */
-			var_pattern = 0x0039080f;
-			ath5k_hw_reg_write(ah, var_pattern, cur_reg);
-		}
-
-		for (i = 0; i < 4; i++) {
-			var_pattern = static_pattern[i];
-			ath5k_hw_reg_write(ah, var_pattern, cur_reg);
-			cur_val = ath5k_hw_reg_read(ah, cur_reg);
-
-			if (cur_val != var_pattern) {
-				ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
-				return -EAGAIN;
-			}
-
-			/* Found on ndiswrapper dumps */
-			var_pattern = 0x003b080f;
-			ath5k_hw_reg_write(ah, var_pattern, cur_reg);
-		}
-
-		/* Restore previous value */
-		ath5k_hw_reg_write(ah, init_val, cur_reg);
-
-	}
-
-	return 0;
-
-}
-
-/*
- * Check if the device is supported and initialize the needed structs
- */
-struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
-{
-	struct ath5k_hw *ah;
-	struct pci_dev *pdev = sc->pdev;
-	u8 mac[ETH_ALEN];
-	int ret;
-	u32 srev;
-
-	/*If we passed the test malloc a ath5k_hw struct*/
-	ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
-	if (ah == NULL) {
-		ret = -ENOMEM;
-		ATH5K_ERR(sc, "out of memory\n");
-		goto err;
-	}
-
-	ah->ah_sc = sc;
-	ah->ah_iobase = sc->iobase;
-
-	/*
-	 * HW information
-	 */
-
-	ah->ah_op_mode = IEEE80211_IF_TYPE_STA;
-	ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
-	ah->ah_turbo = false;
-	ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
-	ah->ah_imr = 0;
-	ah->ah_atim_window = 0;
-	ah->ah_aifs = AR5K_TUNE_AIFS;
-	ah->ah_cw_min = AR5K_TUNE_CWMIN;
-	ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
-	ah->ah_software_retry = false;
-	ah->ah_ant_diversity = AR5K_TUNE_ANT_DIVERSITY;
-
-	/*
-	 * Set the mac revision based on the pci id
-	 */
-	ah->ah_version = mac_version;
-
-	/*Fill the ath5k_hw struct with the needed functions*/
-	if (ah->ah_version == AR5K_AR5212)
-		ah->ah_magic = AR5K_EEPROM_MAGIC_5212;
-	else if (ah->ah_version == AR5K_AR5211)
-		ah->ah_magic = AR5K_EEPROM_MAGIC_5211;
-
-	if (ah->ah_version == AR5K_AR5212) {
-		ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
-		ah->ah_setup_xtx_desc = ath5k_hw_setup_xr_tx_desc;
-		ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
-	} else {
-		ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
-		ah->ah_setup_xtx_desc = ath5k_hw_setup_xr_tx_desc;
-		ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
-	}
-
-	if (ah->ah_version == AR5K_AR5212)
-		ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
-	else if (ah->ah_version <= AR5K_AR5211)
-		ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
-
-	/* Bring device out of sleep and reset it's units */
-	ret = ath5k_hw_nic_wakeup(ah, AR5K_INIT_MODE, true);
-	if (ret)
-		goto err_free;
-
-	/* Get MAC, PHY and RADIO revisions */
-	srev = ath5k_hw_reg_read(ah, AR5K_SREV);
-	ah->ah_mac_srev = srev;
-	ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
-	ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
-	ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
-			0xffffffff;
-	ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
-			CHANNEL_5GHZ);
-
-	if (ah->ah_version == AR5K_AR5210)
-		ah->ah_radio_2ghz_revision = 0;
-	else
-		ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
-				CHANNEL_2GHZ);
-
-	/* Return on unsuported chips (unsupported eeprom etc) */
-	if ((srev >= AR5K_SREV_VER_AR5416) &&
-	(srev < AR5K_SREV_VER_AR2425)) {
-		ATH5K_ERR(sc, "Device not yet supported.\n");
-		ret = -ENODEV;
-		goto err_free;
-	} else if (srev == AR5K_SREV_VER_AR2425) {
-		ATH5K_WARN(sc, "Support for RF2425 is under development.\n");
-	}
-
-	/* Identify single chip solutions */
-	if (((srev <= AR5K_SREV_VER_AR5414) &&
-	(srev >= AR5K_SREV_VER_AR2413)) ||
-	(srev == AR5K_SREV_VER_AR2425)) {
-		ah->ah_single_chip = true;
-	} else {
-		ah->ah_single_chip = false;
-	}
-
-	/* Single chip radio */
-	if (ah->ah_radio_2ghz_revision == ah->ah_radio_5ghz_revision)
-		ah->ah_radio_2ghz_revision = 0;
-
-	/* Identify the radio chip*/
-	if (ah->ah_version == AR5K_AR5210) {
-		ah->ah_radio = AR5K_RF5110;
-	/*
-	 * Register returns 0x0/0x04 for radio revision
-	 * so ath5k_hw_radio_revision doesn't parse the value
-	 * correctly. For now we are based on mac's srev to
-	 * identify RF2425 radio.
-	 */
-	} else if (srev == AR5K_SREV_VER_AR2425) {
-		ah->ah_radio = AR5K_RF2425;
-		ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
-	} else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112) {
-		ah->ah_radio = AR5K_RF5111;
-		ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111;
-	} else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC0) {
-		ah->ah_radio = AR5K_RF5112;
-		ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
-	} else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC1) {
-		ah->ah_radio = AR5K_RF2413;
-		ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
-	} else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC2) {
-		ah->ah_radio = AR5K_RF5413;
-		ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
-	} else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5133) {
-		/* AR5424 */
-		if (srev >= AR5K_SREV_VER_AR5424) {
-			ah->ah_radio = AR5K_RF5413;
-			ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
-		/* AR2424 */
-		} else {
-			ah->ah_radio = AR5K_RF2413; /* For testing */
-			ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
-		}
-	}
-	ah->ah_phy = AR5K_PHY(0);
-
-	/*
-	 * Write PCI-E power save settings
-	 */
-	if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
-		ath5k_hw_reg_write(ah, 0x9248fc00, 0x4080);
-		ath5k_hw_reg_write(ah, 0x24924924, 0x4080);
-		ath5k_hw_reg_write(ah, 0x28000039, 0x4080);
-		ath5k_hw_reg_write(ah, 0x53160824, 0x4080);
-		ath5k_hw_reg_write(ah, 0xe5980579, 0x4080);
-		ath5k_hw_reg_write(ah, 0x001defff, 0x4080);
-		ath5k_hw_reg_write(ah, 0x1aaabe40, 0x4080);
-		ath5k_hw_reg_write(ah, 0xbe105554, 0x4080);
-		ath5k_hw_reg_write(ah, 0x000e3007, 0x4080);
-		ath5k_hw_reg_write(ah, 0x00000000, 0x4084);
-	}
-
-	/*
-	 * POST
-	 */
-	ret = ath5k_hw_post(ah);
-	if (ret)
-		goto err_free;
-
-	/* Write AR5K_PCICFG_UNK on 2112B and later chips */
-	if (ah->ah_radio_5ghz_revision > AR5K_SREV_RAD_2112B ||
-	srev > AR5K_SREV_VER_AR2413) {
-		ath5k_hw_reg_write(ah, AR5K_PCICFG_UNK, AR5K_PCICFG);
-	}
-
-	/*
-	 * Get card capabilities, values, ...
-	 */
-	ret = ath5k_eeprom_init(ah);
-	if (ret) {
-		ATH5K_ERR(sc, "unable to init EEPROM\n");
-		goto err_free;
-	}
-
-	/* Get misc capabilities */
-	ret = ath5k_hw_get_capabilities(ah);
-	if (ret) {
-		ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n",
-			sc->pdev->device);
-		goto err_free;
-	}
-
-	/* Get MAC address */
-	ret = ath5k_eeprom_read_mac(ah, mac);
-	if (ret) {
-		ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
-			sc->pdev->device);
-		goto err_free;
-	}
-
-	ath5k_hw_set_lladdr(ah, mac);
-	/* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
-	memset(ah->ah_bssid, 0xff, ETH_ALEN);
-	ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
-	ath5k_hw_set_opmode(ah);
-
-	ath5k_hw_set_rfgain_opt(ah);
-
-	return ah;
-err_free:
-	kfree(ah);
-err:
-	return ERR_PTR(ret);
-}
-
-/*
- * Bring up MAC + PHY Chips
- */
-static int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
-{
-	struct pci_dev *pdev = ah->ah_sc->pdev;
-	u32 turbo, mode, clock, bus_flags;
-	int ret;
-
-	turbo = 0;
-	mode = 0;
-	clock = 0;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	/* Wakeup the device */
-	ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
-	if (ret) {
-		ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
-		return ret;
-	}
-
-	if (ah->ah_version != AR5K_AR5210) {
-		/*
-		 * Get channel mode flags
-		 */
-
-		if (ah->ah_radio >= AR5K_RF5112) {
-			mode = AR5K_PHY_MODE_RAD_RF5112;
-			clock = AR5K_PHY_PLL_RF5112;
-		} else {
-			mode = AR5K_PHY_MODE_RAD_RF5111;	/*Zero*/
-			clock = AR5K_PHY_PLL_RF5111;		/*Zero*/
-		}
-
-		if (flags & CHANNEL_2GHZ) {
-			mode |= AR5K_PHY_MODE_FREQ_2GHZ;
-			clock |= AR5K_PHY_PLL_44MHZ;
-
-			if (flags & CHANNEL_CCK) {
-				mode |= AR5K_PHY_MODE_MOD_CCK;
-			} else if (flags & CHANNEL_OFDM) {
-				/* XXX Dynamic OFDM/CCK is not supported by the
-				 * AR5211 so we set MOD_OFDM for plain g (no
-				 * CCK headers) operation. We need to test
-				 * this, 5211 might support ofdm-only g after
-				 * all, there are also initial register values
-				 * in the code for g mode (see initvals.c). */
-				if (ah->ah_version == AR5K_AR5211)
-					mode |= AR5K_PHY_MODE_MOD_OFDM;
-				else
-					mode |= AR5K_PHY_MODE_MOD_DYN;
-			} else {
-				ATH5K_ERR(ah->ah_sc,
-					"invalid radio modulation mode\n");
-				return -EINVAL;
-			}
-		} else if (flags & CHANNEL_5GHZ) {
-			mode |= AR5K_PHY_MODE_FREQ_5GHZ;
-			clock |= AR5K_PHY_PLL_40MHZ;
-
-			if (flags & CHANNEL_OFDM)
-				mode |= AR5K_PHY_MODE_MOD_OFDM;
-			else {
-				ATH5K_ERR(ah->ah_sc,
-					"invalid radio modulation mode\n");
-				return -EINVAL;
-			}
-		} else {
-			ATH5K_ERR(ah->ah_sc, "invalid radio frequency mode\n");
-			return -EINVAL;
-		}
-
-		if (flags & CHANNEL_TURBO)
-			turbo = AR5K_PHY_TURBO_MODE | AR5K_PHY_TURBO_SHORT;
-	} else { /* Reset the device */
-
-		/* ...enable Atheros turbo mode if requested */
-		if (flags & CHANNEL_TURBO)
-			ath5k_hw_reg_write(ah, AR5K_PHY_TURBO_MODE,
-					AR5K_PHY_TURBO);
-	}
-
-	/* reseting PCI on PCI-E cards results card to hang
-	 * and always return 0xffff... so we ingore that flag
-	 * for PCI-E cards */
-	bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI;
-
-	/* Reset chipset */
-	ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
-		AR5K_RESET_CTL_BASEBAND | bus_flags);
-	if (ret) {
-		ATH5K_ERR(ah->ah_sc, "failed to reset the MAC Chip\n");
-		return -EIO;
-	}
-
-	if (ah->ah_version == AR5K_AR5210)
-		udelay(2300);
-
-	/* ...wakeup again!*/
-	ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
-	if (ret) {
-		ATH5K_ERR(ah->ah_sc, "failed to resume the MAC Chip\n");
-		return ret;
-	}
-
-	/* ...final warm reset */
-	if (ath5k_hw_nic_reset(ah, 0)) {
-		ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n");
-		return -EIO;
-	}
-
-	if (ah->ah_version != AR5K_AR5210) {
-		/* ...set the PHY operating mode */
-		ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
-		udelay(300);
-
-		ath5k_hw_reg_write(ah, mode, AR5K_PHY_MODE);
-		ath5k_hw_reg_write(ah, turbo, AR5K_PHY_TURBO);
-	}
-
-	return 0;
-}
-
-/*
- * Get the rate table for a specific operation mode
- */
-const struct ath5k_rate_table *ath5k_hw_get_rate_table(struct ath5k_hw *ah,
-		unsigned int mode)
-{
-	ATH5K_TRACE(ah->ah_sc);
-
-	if (!test_bit(mode, ah->ah_capabilities.cap_mode))
-		return NULL;
-
-	/* Get rate tables */
-	switch (mode) {
-	case AR5K_MODE_11A:
-		return &ath5k_rt_11a;
-	case AR5K_MODE_11A_TURBO:
-		return &ath5k_rt_turbo;
-	case AR5K_MODE_11B:
-		return &ath5k_rt_11b;
-	case AR5K_MODE_11G:
-		return &ath5k_rt_11g;
-	case AR5K_MODE_11G_TURBO:
-		return &ath5k_rt_xr;
-	}
-
-	return NULL;
-}
-
-/*
- * Free the ath5k_hw struct
- */
-void ath5k_hw_detach(struct ath5k_hw *ah)
-{
-	ATH5K_TRACE(ah->ah_sc);
-
-	__set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
-
-	if (ah->ah_rf_banks != NULL)
-		kfree(ah->ah_rf_banks);
-
-	/* assume interrupts are down */
-	kfree(ah);
-}
-
-/****************************\
-  Reset function and helpers
-\****************************/
-
-/**
- * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
- *
- * @ah: the &struct ath5k_hw
- * @channel: the currently set channel upon reset
- *
- * Write the OFDM timings for the AR5212 upon reset. This is a helper for
- * ath5k_hw_reset(). This seems to tune the PLL a specified frequency
- * depending on the bandwidth of the channel.
- *
- */
-static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
-	struct ieee80211_channel *channel)
-{
-	/* Get exponent and mantissa and set it */
-	u32 coef_scaled, coef_exp, coef_man,
-		ds_coef_exp, ds_coef_man, clock;
-
-	if (!(ah->ah_version == AR5K_AR5212) ||
-		!(channel->hw_value & CHANNEL_OFDM))
-		BUG();
-
-	/* Seems there are two PLLs, one for baseband sampling and one
-	 * for tuning. Tuning basebands are 40 MHz or 80MHz when in
-	 * turbo. */
-	clock = channel->hw_value & CHANNEL_TURBO ? 80 : 40;
-	coef_scaled = ((5 * (clock << 24)) / 2) /
-	channel->center_freq;
-
-	for (coef_exp = 31; coef_exp > 0; coef_exp--)
-		if ((coef_scaled >> coef_exp) & 0x1)
-			break;
-
-	if (!coef_exp)
-		return -EINVAL;
-
-	coef_exp = 14 - (coef_exp - 24);
-	coef_man = coef_scaled +
-		(1 << (24 - coef_exp - 1));
-	ds_coef_man = coef_man >> (24 - coef_exp);
-	ds_coef_exp = coef_exp - 16;
-
-	AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
-		AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man);
-	AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
-		AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp);
-
-	return 0;
-}
-
-/**
- * ath5k_hw_write_rate_duration - set rate duration during hw resets
- *
- * @ah: the &struct ath5k_hw
- * @mode: one of enum ath5k_driver_mode
- *
- * Write the rate duration table for the current mode upon hw reset. This
- * is a helper for ath5k_hw_reset(). It seems all this is doing is setting
- * an ACK timeout for the hardware for the current mode for each rate. The
- * rates which are capable of short preamble (802.11b rates 2Mbps, 5.5Mbps,
- * and 11Mbps) have another register for the short preamble ACK timeout
- * calculation.
- *
- */
-static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah,
-       unsigned int mode)
-{
-	struct ath5k_softc *sc = ah->ah_sc;
-	const struct ath5k_rate_table *rt;
-	struct ieee80211_rate srate = {};
-	unsigned int i;
-
-	/* Get rate table for the current operating mode */
-	rt = ath5k_hw_get_rate_table(ah, mode);
-
-	/* Write rate duration table */
-	for (i = 0; i < rt->rate_count; i++) {
-		const struct ath5k_rate *rate, *control_rate;
-
-		u32 reg;
-		u16 tx_time;
-
-		rate = &rt->rates[i];
-		control_rate = &rt->rates[rate->control_rate];
-
-		/* Set ACK timeout */
-		reg = AR5K_RATE_DUR(rate->rate_code);
-
-		srate.bitrate = control_rate->rate_kbps/100;
-
-		/* An ACK frame consists of 10 bytes. If you add the FCS,
-		 * which ieee80211_generic_frame_duration() adds,
-		 * its 14 bytes. Note we use the control rate and not the
-		 * actual rate for this rate. See mac80211 tx.c
-		 * ieee80211_duration() for a brief description of
-		 * what rate we should choose to TX ACKs. */
-		tx_time = le16_to_cpu(ieee80211_generic_frame_duration(sc->hw,
-							sc->vif, 10, &srate));
-
-		ath5k_hw_reg_write(ah, tx_time, reg);
-
-		if (!HAS_SHPREAMBLE(i))
-			continue;
-
-		/*
-		 * We're not distinguishing short preamble here,
-		 * This is true, all we'll get is a longer value here
-		 * which is not necessarilly bad. We could use
-		 * export ieee80211_frame_duration() but that needs to be
-		 * fixed first to be properly used by mac802111 drivers:
-		 *
-		 *  - remove erp stuff and let the routine figure ofdm
-		 *    erp rates
-		 *  - remove passing argument ieee80211_local as
-		 *    drivers don't have access to it
-		 *  - move drivers using ieee80211_generic_frame_duration()
-		 *    to this
-		 */
-		ath5k_hw_reg_write(ah, tx_time,
-			reg + (AR5K_SET_SHORT_PREAMBLE << 2));
-	}
-}
-
-/*
- * Main reset function
- */
-int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
-	struct ieee80211_channel *channel, bool change_channel)
-{
-	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
-	struct pci_dev *pdev = ah->ah_sc->pdev;
-	u32 data, s_seq, s_ant, s_led[3], dma_size;
-	unsigned int i, mode, freq, ee_mode, ant[2];
-	int ret;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	s_seq = 0;
-	s_ant = 0;
-	ee_mode = 0;
-	freq = 0;
-	mode = 0;
-
-	/*
-	 * Save some registers before a reset
-	 */
-	/*DCU/Antenna selection not available on 5210*/
-	if (ah->ah_version != AR5K_AR5210) {
-		if (change_channel) {
-			/* Seq number for queue 0 -do this for all queues ? */
-			s_seq = ath5k_hw_reg_read(ah,
-					AR5K_QUEUE_DFS_SEQNUM(0));
-			/*Default antenna*/
-			s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
-		}
-	}
-
-	/*GPIOs*/
-	s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) & AR5K_PCICFG_LEDSTATE;
-	s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
-	s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
-
-	if (change_channel && ah->ah_rf_banks != NULL)
-		ath5k_hw_get_rf_gain(ah);
-
-
-	/*Wakeup the device*/
-	ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false);
-	if (ret)
-		return ret;
-
-	/*
-	 * Initialize operating mode
-	 */
-	ah->ah_op_mode = op_mode;
-
-	/*
-	 * 5111/5112 Settings
-	 * 5210 only comes with RF5110
-	 */
-	if (ah->ah_version != AR5K_AR5210) {
-		if (ah->ah_radio != AR5K_RF5111 &&
-			ah->ah_radio != AR5K_RF5112 &&
-			ah->ah_radio != AR5K_RF5413 &&
-			ah->ah_radio != AR5K_RF2413 &&
-			ah->ah_radio != AR5K_RF2425) {
-			ATH5K_ERR(ah->ah_sc,
-				"invalid phy radio: %u\n", ah->ah_radio);
-			return -EINVAL;
-		}
-
-		switch (channel->hw_value & CHANNEL_MODES) {
-		case CHANNEL_A:
-			mode = AR5K_MODE_11A;
-			freq = AR5K_INI_RFGAIN_5GHZ;
-			ee_mode = AR5K_EEPROM_MODE_11A;
-			break;
-		case CHANNEL_G:
-			mode = AR5K_MODE_11G;
-			freq = AR5K_INI_RFGAIN_2GHZ;
-			ee_mode = AR5K_EEPROM_MODE_11G;
-			break;
-		case CHANNEL_B:
-			mode = AR5K_MODE_11B;
-			freq = AR5K_INI_RFGAIN_2GHZ;
-			ee_mode = AR5K_EEPROM_MODE_11B;
-			break;
-		case CHANNEL_T:
-			mode = AR5K_MODE_11A_TURBO;
-			freq = AR5K_INI_RFGAIN_5GHZ;
-			ee_mode = AR5K_EEPROM_MODE_11A;
-			break;
-		/*Is this ok on 5211 too ?*/
-		case CHANNEL_TG:
-			mode = AR5K_MODE_11G_TURBO;
-			freq = AR5K_INI_RFGAIN_2GHZ;
-			ee_mode = AR5K_EEPROM_MODE_11G;
-			break;
-		case CHANNEL_XR:
-			if (ah->ah_version == AR5K_AR5211) {
-				ATH5K_ERR(ah->ah_sc,
-					"XR mode not available on 5211");
-				return -EINVAL;
-			}
-			mode = AR5K_MODE_XR;
-			freq = AR5K_INI_RFGAIN_5GHZ;
-			ee_mode = AR5K_EEPROM_MODE_11A;
-			break;
-		default:
-			ATH5K_ERR(ah->ah_sc,
-				"invalid channel: %d\n", channel->center_freq);
-			return -EINVAL;
-		}
-
-		/* PHY access enable */
-		ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
-
-	}
-
-	ret = ath5k_hw_write_initvals(ah, mode, change_channel);
-	if (ret)
-		return ret;
-
-	/*
-	 * 5211/5212 Specific
-	 */
-	if (ah->ah_version != AR5K_AR5210) {
-		/*
-		 * Write initial RF gain settings
-		 * This should work for both 5111/5112
-		 */
-		ret = ath5k_hw_rfgain(ah, freq);
-		if (ret)
-			return ret;
-
-		mdelay(1);
-
-		/*
-		 * Write some more initial register settings
-		 */
-		if (ah->ah_version == AR5K_AR5212) {
-			ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
-
-			if (channel->hw_value == CHANNEL_G)
-				if (ah->ah_mac_srev < AR5K_SREV_VER_AR2413)
-					ath5k_hw_reg_write(ah, 0x00f80d80,
-								0x994c);
-				else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2424)
-					ath5k_hw_reg_write(ah, 0x00380140,
-								0x994c);
-				else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2425)
-					ath5k_hw_reg_write(ah, 0x00fc0ec0,
-								0x994c);
-				else /* 2425 */
-					ath5k_hw_reg_write(ah, 0x00fc0fc0,
-								0x994c);
-			else
-				ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
-
-			/* Some bits are disabled here, we know nothing about
-			 * register 0xa228 yet, most of the times this ends up
-			 * with a value 0x9b5 -haven't seen any dump with
-			 * a different value- */
-			/* Got this from decompiling binary HAL */
-			data = ath5k_hw_reg_read(ah, 0xa228);
-			data &= 0xfffffdff;
-			ath5k_hw_reg_write(ah, data, 0xa228);
-
-			data = ath5k_hw_reg_read(ah, 0xa228);
-			data &= 0xfffe03ff;
-			ath5k_hw_reg_write(ah, data, 0xa228);
-			data = 0;
-
-			/* Just write 0x9b5 ? */
-			/* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
-			ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
-			ath5k_hw_reg_write(ah, 0x00000000, 0xa254);
-			ath5k_hw_reg_write(ah, 0x0000000e, AR5K_PHY_SCAL);
-		}
-
-		/* Fix for first revision of the RF5112 RF chipset */
-		if (ah->ah_radio >= AR5K_RF5112 &&
-				ah->ah_radio_5ghz_revision <
-				AR5K_SREV_RAD_5112A) {
-			ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
-					AR5K_PHY_CCKTXCTL);
-			if (channel->hw_value & CHANNEL_5GHZ)
-				data = 0xffb81020;
-			else
-				data = 0xffb80d20;
-			ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
-			data = 0;
-		}
-
-		/*
-		 * Set TX power (FIXME)
-		 */
-		ret = ath5k_hw_txpower(ah, channel, AR5K_TUNE_DEFAULT_TXPOWER);
-		if (ret)
-			return ret;
-
-		/* Write rate duration table only on AR5212 and if
-		 * virtual interface has already been brought up
-		 * XXX: rethink this after new mode changes to
-		 * mac80211 are integrated */
-		if (ah->ah_version == AR5K_AR5212 &&
-			ah->ah_sc->vif != NULL)
-			ath5k_hw_write_rate_duration(ah, mode);
-
-		/*
-		 * Write RF registers
-		 */
-		ret = ath5k_hw_rfregs(ah, channel, mode);
-		if (ret)
-			return ret;
-
-		/*
-		 * Configure additional registers
-		 */
-
-		/* Write OFDM timings on 5212*/
-		if (ah->ah_version == AR5K_AR5212 &&
-			channel->hw_value & CHANNEL_OFDM) {
-			ret = ath5k_hw_write_ofdm_timings(ah, channel);
-			if (ret)
-				return ret;
-		}
-
-		/*Enable/disable 802.11b mode on 5111
-		(enable 2111 frequency converter + CCK)*/
-		if (ah->ah_radio == AR5K_RF5111) {
-			if (mode == AR5K_MODE_11B)
-				AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
-				    AR5K_TXCFG_B_MODE);
-			else
-				AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
-				    AR5K_TXCFG_B_MODE);
-		}
-
-		/*
-		 * Set channel and calibrate the PHY
-		 */
-		ret = ath5k_hw_channel(ah, channel);
-		if (ret)
-			return ret;
-
-		/* Set antenna mode */
-		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_ANT_CTL,
-			ah->ah_antenna[ee_mode][0], 0xfffffc06);
-
-		/*
-		 * In case a fixed antenna was set as default
-		 * write the same settings on both AR5K_PHY_ANT_SWITCH_TABLE
-		 * registers.
-		 */
-		if (s_ant != 0){
-			if (s_ant == AR5K_ANT_FIXED_A) /* 1 - Main */
-				ant[0] = ant[1] = AR5K_ANT_FIXED_A;
-			else	/* 2 - Aux */
-				ant[0] = ant[1] = AR5K_ANT_FIXED_B;
-		} else {
-			ant[0] = AR5K_ANT_FIXED_A;
-			ant[1] = AR5K_ANT_FIXED_B;
-		}
-
-		ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[0]],
-			AR5K_PHY_ANT_SWITCH_TABLE_0);
-		ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[1]],
-			AR5K_PHY_ANT_SWITCH_TABLE_1);
-
-		/* Commit values from EEPROM */
-		if (ah->ah_radio == AR5K_RF5111)
-			AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL,
-			    AR5K_PHY_FRAME_CTL_TX_CLIP, ee->ee_tx_clip);
-
-		ath5k_hw_reg_write(ah,
-			AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
-			AR5K_PHY_NFTHRES);
-
-		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_SETTLING,
-			(ee->ee_switch_settling[ee_mode] << 7) & 0x3f80,
-			0xffffc07f);
-		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_GAIN,
-			(ee->ee_ant_tx_rx[ee_mode] << 12) & 0x3f000,
-			0xfffc0fff);
-		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_DESIRED_SIZE,
-			(ee->ee_adc_desired_size[ee_mode] & 0x00ff) |
-			((ee->ee_pga_desired_size[ee_mode] << 8) & 0xff00),
-			0xffff0000);
-
-		ath5k_hw_reg_write(ah,
-			(ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
-			(ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
-			(ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
-			(ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
-
-		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_RF_CTL3,
-			ee->ee_tx_end2xlna_enable[ee_mode] << 8, 0xffff00ff);
-		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_NF,
-			(ee->ee_thr_62[ee_mode] << 12) & 0x7f000, 0xfff80fff);
-		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_OFDM_SELFCORR, 4, 0xffffff01);
-
-		AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
-		    AR5K_PHY_IQ_CORR_ENABLE |
-		    (ee->ee_i_cal[ee_mode] << AR5K_PHY_IQ_CORR_Q_I_COFF_S) |
-		    ee->ee_q_cal[ee_mode]);
-
-		if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
-			AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
-				AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
-				ee->ee_margin_tx_rx[ee_mode]);
-
-	} else {
-		mdelay(1);
-		/* Disable phy and wait */
-		ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
-		mdelay(1);
-	}
-
-	/*
-	 * Restore saved values
-	 */
-	/*DCU/Antenna selection not available on 5210*/
-	if (ah->ah_version != AR5K_AR5210) {
-		ath5k_hw_reg_write(ah, s_seq, AR5K_QUEUE_DFS_SEQNUM(0));
-		ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA);
-	}
-	AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, s_led[0]);
-	ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR);
-	ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
-
-	/*
-	 * Misc
-	 */
-	/* XXX: add ah->aid once mac80211 gives this to us */
-	ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
-
-	ath5k_hw_set_opmode(ah);
-	/*PISR/SISR Not available on 5210*/
-	if (ah->ah_version != AR5K_AR5210) {
-		ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
-		/* If we later allow tuning for this, store into sc structure */
-		data = AR5K_TUNE_RSSI_THRES |
-			AR5K_TUNE_BMISS_THRES << AR5K_RSSI_THR_BMISS_S;
-		ath5k_hw_reg_write(ah, data, AR5K_RSSI_THR);
-	}
-
-	/*
-	 * Set Rx/Tx DMA Configuration
-	 *
-	 * Set maximum DMA size (512) except for PCI-E cards since
-	 * it causes rx overruns and tx errors (tested on 5424 but since
-	 * rx overruns also occur on 5416/5418 with madwifi we set 128
-	 * for all PCI-E cards to be safe).
-	 *
-	 * In dumps this is 128 for allchips.
-	 *
-	 * XXX: need to check 5210 for this
-	 * TODO: Check out tx triger level, it's always 64 on dumps but I
-	 * guess we can tweak it and see how it goes ;-)
-	 */
-	dma_size = (pdev->is_pcie) ? AR5K_DMASIZE_128B : AR5K_DMASIZE_512B;
-	if (ah->ah_version != AR5K_AR5210) {
-		AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
-			AR5K_TXCFG_SDMAMR, dma_size);
-		AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
-			AR5K_RXCFG_SDMAMW, dma_size);
-	}
-
-	/*
-	 * Enable the PHY and wait until completion
-	 */
-	ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
-
-	/*
-	 * On 5211+ read activation -> rx delay
-	 * and use it.
-	 */
-	if (ah->ah_version != AR5K_AR5210) {
-		data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
-			AR5K_PHY_RX_DELAY_M;
-		data = (channel->hw_value & CHANNEL_CCK) ?
-			((data << 2) / 22) : (data / 10);
-
-		udelay(100 + (2 * data));
-		data = 0;
-	} else {
-		mdelay(1);
-	}
-
-	/*
-	 * Perform ADC test (?)
-	 */
-	data = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
-	ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
-	for (i = 0; i <= 20; i++) {
-		if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
-			break;
-		udelay(200);
-	}
-	ath5k_hw_reg_write(ah, data, AR5K_PHY_TST1);
-	data = 0;
-
-	/*
-	 * Start automatic gain calibration
-	 *
-	 * During AGC calibration RX path is re-routed to
-	 * a signal detector so we don't receive anything.
-	 *
-	 * This method is used to calibrate some static offsets
-	 * used together with on-the fly I/Q calibration (the
-	 * one performed via ath5k_hw_phy_calibrate), that doesn't
-	 * interrupt rx path.
-	 *
-	 * If we are in a noisy environment AGC calibration may time
-	 * out.
-	 */
-	AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
-				AR5K_PHY_AGCCTL_CAL);
-
-	/* At the same time start I/Q calibration for QAM constellation
-	 * -no need for CCK- */
-	ah->ah_calibration = false;
-	if (!(mode == AR5K_MODE_11B)) {
-		ah->ah_calibration = true;
-		AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
-				AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
-		AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
-				AR5K_PHY_IQ_RUN);
-	}
-
-	/* Wait for gain calibration to finish (we check for I/Q calibration
-	 * during ath5k_phy_calibrate) */
-	if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
-			AR5K_PHY_AGCCTL_CAL, 0, false)) {
-		ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
-			channel->center_freq);
-		return -EAGAIN;
-	}
-
-	/*
-	 * Start noise floor calibration
-	 *
-	 * If we run NF calibration before AGC, it always times out.
-	 * Binary HAL starts NF and AGC calibration at the same time
-	 * and only waits for AGC to finish. I believe that's wrong because
-	 * during NF calibration, rx path is also routed to a detector, so if
-	 * it doesn't finish we won't have RX.
-	 *
-	 * XXX: Find an interval that's OK for all cards...
-	 */
-	ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
-	if (ret)
-		return ret;
-
-	/*
-	 * Reset queues and start beacon timers at the end of the reset routine
-	 */
-	for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
-		/*No QCU on 5210*/
-		if (ah->ah_version != AR5K_AR5210)
-			AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(i), i);
-
-		ret = ath5k_hw_reset_tx_queue(ah, i);
-		if (ret) {
-			ATH5K_ERR(ah->ah_sc,
-				"failed to reset TX queue #%d\n", i);
-			return ret;
-		}
-	}
-
-	/* Pre-enable interrupts on 5211/5212*/
-	if (ah->ah_version != AR5K_AR5210)
-		ath5k_hw_set_intr(ah, AR5K_INT_RX | AR5K_INT_TX |
-				AR5K_INT_FATAL);
-
-	/*
-	 * Set RF kill flags if supported by the device (read from the EEPROM)
-	 * Disable gpio_intr for now since it results system hang.
-	 * TODO: Handle this in ath5k_intr
-	 */
-#if 0
-	if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) {
-		ath5k_hw_set_gpio_input(ah, 0);
-		ah->ah_gpio[0] = ath5k_hw_get_gpio(ah, 0);
-		if (ah->ah_gpio[0] == 0)
-			ath5k_hw_set_gpio_intr(ah, 0, 1);
-		else
-			ath5k_hw_set_gpio_intr(ah, 0, 0);
-	}
-#endif
-
-	/*
-	 * Set the 32MHz reference clock on 5212 phy clock sleep register
-	 *
-	 * TODO: Find out how to switch to external 32Khz clock to save power
-	 */
-	if (ah->ah_version == AR5K_AR5212) {
-		ath5k_hw_reg_write(ah, AR5K_PHY_SCR_32MHZ, AR5K_PHY_SCR);
-		ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
-		ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ, AR5K_PHY_SCAL);
-		ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
-		ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
-		ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING);
-
-		data = ath5k_hw_reg_read(ah, AR5K_USEC_5211) & 0xffffc07f ;
-		data |= (ah->ah_phy_spending == AR5K_PHY_SPENDING_18) ?
-						0x00000f80 : 0x00001380 ;
-		ath5k_hw_reg_write(ah, data, AR5K_USEC_5211);
-		data = 0;
-	}
-
-	if (ah->ah_version == AR5K_AR5212) {
-		ath5k_hw_reg_write(ah, 0x000100aa, 0x8118);
-		ath5k_hw_reg_write(ah, 0x00003210, 0x811c);
-		ath5k_hw_reg_write(ah, 0x00000052, 0x8108);
-		if (ah->ah_mac_srev >= AR5K_SREV_VER_AR2413)
-			ath5k_hw_reg_write(ah, 0x00000004, 0x8120);
-	}
-
-	/*
-	 * Disable beacons and reset the register
-	 */
-	AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE |
-			AR5K_BEACON_RESET_TSF);
-
-	return 0;
-}
-
-/*
- * Reset chipset
- */
-static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
-{
-	int ret;
-	u32 mask = val ? val : ~0U;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	/* Read-and-clear RX Descriptor Pointer*/
-	ath5k_hw_reg_read(ah, AR5K_RXDP);
-
-	/*
-	 * Reset the device and wait until success
-	 */
-	ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
-
-	/* Wait at least 128 PCI clocks */
-	udelay(15);
-
-	if (ah->ah_version == AR5K_AR5210) {
-		val &= AR5K_RESET_CTL_CHIP;
-		mask &= AR5K_RESET_CTL_CHIP;
-	} else {
-		val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
-		mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
-	}
-
-	ret = ath5k_hw_register_timeout(ah, AR5K_RESET_CTL, mask, val, false);
-
-	/*
-	 * Reset configuration register (for hw byte-swap). Note that this
-	 * is only set for big endian. We do the necessary magic in
-	 * AR5K_INIT_CFG.
-	 */
-	if ((val & AR5K_RESET_CTL_PCU) == 0)
-		ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG);
-
-	return ret;
-}
-
-/*
- * Power management functions
- */
-
-/*
- * Sleep control
- */
-int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
-		bool set_chip, u16 sleep_duration)
-{
-	unsigned int i;
-	u32 staid, data;
-
-	ATH5K_TRACE(ah->ah_sc);
-	staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
-
-	switch (mode) {
-	case AR5K_PM_AUTO:
-		staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA;
-		/* fallthrough */
-	case AR5K_PM_NETWORK_SLEEP:
-		if (set_chip)
-			ath5k_hw_reg_write(ah,
-				AR5K_SLEEP_CTL_SLE_ALLOW |
-				sleep_duration,
-				AR5K_SLEEP_CTL);
-
-		staid |= AR5K_STA_ID1_PWR_SV;
-		break;
-
-	case AR5K_PM_FULL_SLEEP:
-		if (set_chip)
-			ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_SLP,
-				AR5K_SLEEP_CTL);
-
-		staid |= AR5K_STA_ID1_PWR_SV;
-		break;
-
-	case AR5K_PM_AWAKE:
-
-		staid &= ~AR5K_STA_ID1_PWR_SV;
-
-		if (!set_chip)
-			goto commit;
-
-		/* Preserve sleep duration */
-		data = ath5k_hw_reg_read(ah, AR5K_SLEEP_CTL);
-		if( data & 0xffc00000 ){
-			data = 0;
-		} else {
-			data = data & 0xfffcffff;
-		}
-
-		ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
-		udelay(15);
-
-		for (i = 50; i > 0; i--) {
-			/* Check if the chip did wake up */
-			if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) &
-					AR5K_PCICFG_SPWR_DN) == 0)
-				break;
-
-			/* Wait a bit and retry */
-			udelay(200);
-			ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
-		}
-
-		/* Fail if the chip didn't wake up */
-		if (i <= 0)
-			return -EIO;
-
-		break;
-
-	default:
-		return -EINVAL;
-	}
-
-commit:
-	ah->ah_power_mode = mode;
-	ath5k_hw_reg_write(ah, staid, AR5K_STA_ID1);
-
-	return 0;
-}
-
-/***********************\
-  DMA Related Functions
-\***********************/
-
-/*
- * Receive functions
- */
-
-/*
- * Start DMA receive
- */
-void ath5k_hw_start_rx(struct ath5k_hw *ah)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
-	ath5k_hw_reg_read(ah, AR5K_CR);
-}
-
-/*
- * Stop DMA receive
- */
-int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
-{
-	unsigned int i;
-
-	ATH5K_TRACE(ah->ah_sc);
-	ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
-
-	/*
-	 * It may take some time to disable the DMA receive unit
-	 */
-	for (i = 2000; i > 0 &&
-			(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
-			i--)
-		udelay(10);
-
-	return i ? 0 : -EBUSY;
-}
-
-/*
- * Get the address of the RX Descriptor
- */
-u32 ath5k_hw_get_rx_buf(struct ath5k_hw *ah)
-{
-	return ath5k_hw_reg_read(ah, AR5K_RXDP);
-}
-
-/*
- * Set the address of the RX Descriptor
- */
-void ath5k_hw_put_rx_buf(struct ath5k_hw *ah, u32 phys_addr)
-{
-	ATH5K_TRACE(ah->ah_sc);
-
-	/*TODO:Shouldn't we check if RX is enabled first ?*/
-	ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
-}
-
-/*
- * Transmit functions
- */
-
-/*
- * Start DMA transmit for a specific queue
- * (see also QCU/DCU functions)
- */
-int ath5k_hw_tx_start(struct ath5k_hw *ah, unsigned int queue)
-{
-	u32 tx_queue;
-
-	ATH5K_TRACE(ah->ah_sc);
-	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
-
-	/* Return if queue is declared inactive */
-	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
-		return -EIO;
-
-	if (ah->ah_version == AR5K_AR5210) {
-		tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
-
-		/*
-		 * Set the queue by type on 5210
-		 */
-		switch (ah->ah_txq[queue].tqi_type) {
-		case AR5K_TX_QUEUE_DATA:
-			tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
-			break;
-		case AR5K_TX_QUEUE_BEACON:
-			tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
-			ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
-					AR5K_BSR);
-			break;
-		case AR5K_TX_QUEUE_CAB:
-			tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
-			ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
-				AR5K_BCR_BDMAE, AR5K_BSR);
-			break;
-		default:
-			return -EINVAL;
-		}
-		/* Start queue */
-		ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
-		ath5k_hw_reg_read(ah, AR5K_CR);
-	} else {
-		/* Return if queue is disabled */
-		if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
-			return -EIO;
-
-		/* Start queue */
-		AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
-	}
-
-	return 0;
-}
-
-/*
- * Stop DMA transmit for a specific queue
- * (see also QCU/DCU functions)
- */
-int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
-{
-	unsigned int i = 100;
-	u32 tx_queue, pending;
-
-	ATH5K_TRACE(ah->ah_sc);
-	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
-
-	/* Return if queue is declared inactive */
-	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
-		return -EIO;
-
-	if (ah->ah_version == AR5K_AR5210) {
-		tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
-
-		/*
-		 * Set by queue type
-		 */
-		switch (ah->ah_txq[queue].tqi_type) {
-		case AR5K_TX_QUEUE_DATA:
-			tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
-			break;
-		case AR5K_TX_QUEUE_BEACON:
-		case AR5K_TX_QUEUE_CAB:
-			/* XXX Fix me... */
-			tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
-			ath5k_hw_reg_write(ah, 0, AR5K_BSR);
-			break;
-		default:
-			return -EINVAL;
-		}
-
-		/* Stop queue */
-		ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
-		ath5k_hw_reg_read(ah, AR5K_CR);
-	} else {
-		/*
-		 * Schedule TX disable and wait until queue is empty
-		 */
-		AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
-
-		/*Check for pending frames*/
-		do {
-			pending = ath5k_hw_reg_read(ah,
-				AR5K_QUEUE_STATUS(queue)) &
-				AR5K_QCU_STS_FRMPENDCNT;
-			udelay(100);
-		} while (--i && pending);
-
-		/* Clear register */
-		ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
-		if (pending)
-			return -EBUSY;
-	}
-
-	/* TODO: Check for success else return error */
-	return 0;
-}
-
-/*
- * Get the address of the TX Descriptor for a specific queue
- * (see also QCU/DCU functions)
- */
-u32 ath5k_hw_get_tx_buf(struct ath5k_hw *ah, unsigned int queue)
-{
-	u16 tx_reg;
-
-	ATH5K_TRACE(ah->ah_sc);
-	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
-
-	/*
-	 * Get the transmit queue descriptor pointer from the selected queue
-	 */
-	/*5210 doesn't have QCU*/
-	if (ah->ah_version == AR5K_AR5210) {
-		switch (ah->ah_txq[queue].tqi_type) {
-		case AR5K_TX_QUEUE_DATA:
-			tx_reg = AR5K_NOQCU_TXDP0;
-			break;
-		case AR5K_TX_QUEUE_BEACON:
-		case AR5K_TX_QUEUE_CAB:
-			tx_reg = AR5K_NOQCU_TXDP1;
-			break;
-		default:
-			return 0xffffffff;
-		}
-	} else {
-		tx_reg = AR5K_QUEUE_TXDP(queue);
-	}
-
-	return ath5k_hw_reg_read(ah, tx_reg);
-}
-
-/*
- * Set the address of the TX Descriptor for a specific queue
- * (see also QCU/DCU functions)
- */
-int ath5k_hw_put_tx_buf(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
-{
-	u16 tx_reg;
-
-	ATH5K_TRACE(ah->ah_sc);
-	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
-
-	/*
-	 * Set the transmit queue descriptor pointer register by type
-	 * on 5210
-	 */
-	if (ah->ah_version == AR5K_AR5210) {
-		switch (ah->ah_txq[queue].tqi_type) {
-		case AR5K_TX_QUEUE_DATA:
-			tx_reg = AR5K_NOQCU_TXDP0;
-			break;
-		case AR5K_TX_QUEUE_BEACON:
-		case AR5K_TX_QUEUE_CAB:
-			tx_reg = AR5K_NOQCU_TXDP1;
-			break;
-		default:
-			return -EINVAL;
-		}
-	} else {
-		/*
-		 * Set the transmit queue descriptor pointer for
-		 * the selected queue on QCU for 5211+
-		 * (this won't work if the queue is still active)
-		 */
-		if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
-			return -EIO;
-
-		tx_reg = AR5K_QUEUE_TXDP(queue);
-	}
-
-	/* Set descriptor pointer */
-	ath5k_hw_reg_write(ah, phys_addr, tx_reg);
-
-	return 0;
-}
-
-/*
- * Update tx trigger level
- */
-int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
-{
-	u32 trigger_level, imr;
-	int ret = -EIO;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	/*
-	 * Disable interrupts by setting the mask
-	 */
-	imr = ath5k_hw_set_intr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
-
-	/*TODO: Boundary check on trigger_level*/
-	trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
-			AR5K_TXCFG_TXFULL);
-
-	if (!increase) {
-		if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
-			goto done;
-	} else
-		trigger_level +=
-			((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
-
-	/*
-	 * Update trigger level on success
-	 */
-	if (ah->ah_version == AR5K_AR5210)
-		ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
-	else
-		AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
-				AR5K_TXCFG_TXFULL, trigger_level);
-
-	ret = 0;
-
-done:
-	/*
-	 * Restore interrupt mask
-	 */
-	ath5k_hw_set_intr(ah, imr);
-
-	return ret;
-}
-
-/*
- * Interrupt handling
- */
-
-/*
- * Check if we have pending interrupts
- */
-bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	return ath5k_hw_reg_read(ah, AR5K_INTPEND);
-}
-
-/*
- * Get interrupt mask (ISR)
- */
-int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
-{
-	u32 data;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	/*
-	 * Read interrupt status from the Interrupt Status register
-	 * on 5210
-	 */
-	if (ah->ah_version == AR5K_AR5210) {
-		data = ath5k_hw_reg_read(ah, AR5K_ISR);
-		if (unlikely(data == AR5K_INT_NOCARD)) {
-			*interrupt_mask = data;
-			return -ENODEV;
-		}
-	} else {
-		/*
-		 * Read interrupt status from the Read-And-Clear shadow register
-		 * Note: PISR/SISR Not available on 5210
-		 */
-		data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
-	}
-
-	/*
-	 * Get abstract interrupt mask (driver-compatible)
-	 */
-	*interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
-
-	if (unlikely(data == AR5K_INT_NOCARD))
-		return -ENODEV;
-
-	if (data & (AR5K_ISR_RXOK | AR5K_ISR_RXERR))
-		*interrupt_mask |= AR5K_INT_RX;
-
-	if (data & (AR5K_ISR_TXOK | AR5K_ISR_TXERR
-		| AR5K_ISR_TXDESC | AR5K_ISR_TXEOL))
-		*interrupt_mask |= AR5K_INT_TX;
-
-	if (ah->ah_version != AR5K_AR5210) {
-		/*HIU = Host Interface Unit (PCI etc)*/
-		if (unlikely(data & (AR5K_ISR_HIUERR)))
-			*interrupt_mask |= AR5K_INT_FATAL;
-
-		/*Beacon Not Ready*/
-		if (unlikely(data & (AR5K_ISR_BNR)))
-			*interrupt_mask |= AR5K_INT_BNR;
-	}
-
-	/*
-	 * XXX: BMISS interrupts may occur after association.
-	 * I found this on 5210 code but it needs testing. If this is
-	 * true we should disable them before assoc and re-enable them
-	 * after a successfull assoc + some jiffies.
-	 */
-#if 0
-	interrupt_mask &= ~AR5K_INT_BMISS;
-#endif
-
-	/*
-	 * In case we didn't handle anything,
-	 * print the register value.
-	 */
-	if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
-		ATH5K_PRINTF("0x%08x\n", data);
-
-	return 0;
-}
-
-/*
- * Set interrupt mask
- */
-enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask)
-{
-	enum ath5k_int old_mask, int_mask;
-
-	/*
-	 * Disable card interrupts to prevent any race conditions
-	 * (they will be re-enabled afterwards).
-	 */
-	ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
-	ath5k_hw_reg_read(ah, AR5K_IER);
-
-	old_mask = ah->ah_imr;
-
-	/*
-	 * Add additional, chipset-dependent interrupt mask flags
-	 * and write them to the IMR (interrupt mask register).
-	 */
-	int_mask = new_mask & AR5K_INT_COMMON;
-
-	if (new_mask & AR5K_INT_RX)
-		int_mask |= AR5K_IMR_RXOK | AR5K_IMR_RXERR | AR5K_IMR_RXORN |
-			AR5K_IMR_RXDESC;
-
-	if (new_mask & AR5K_INT_TX)
-		int_mask |= AR5K_IMR_TXOK | AR5K_IMR_TXERR | AR5K_IMR_TXDESC |
-			AR5K_IMR_TXURN;
-
-	if (ah->ah_version != AR5K_AR5210) {
-		if (new_mask & AR5K_INT_FATAL) {
-			int_mask |= AR5K_IMR_HIUERR;
-			AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_MCABT |
-					AR5K_SIMR2_SSERR | AR5K_SIMR2_DPERR);
-		}
-	}
-
-	ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
-
-	/* Store new interrupt mask */
-	ah->ah_imr = new_mask;
-
-	/* ..re-enable interrupts */
-	ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
-	ath5k_hw_reg_read(ah, AR5K_IER);
-
-	return old_mask;
-}
-
-
-/*************************\
-  EEPROM access functions
-\*************************/
-
-/*
- * Read from eeprom
- */
-static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data)
-{
-	u32 status, timeout;
-
-	ATH5K_TRACE(ah->ah_sc);
-	/*
-	 * Initialize EEPROM access
-	 */
-	if (ah->ah_version == AR5K_AR5210) {
-		AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
-		(void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
-	} else {
-		ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
-		AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
-				AR5K_EEPROM_CMD_READ);
-	}
-
-	for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
-		status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
-		if (status & AR5K_EEPROM_STAT_RDDONE) {
-			if (status & AR5K_EEPROM_STAT_RDERR)
-				return -EIO;
-			*data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
-					0xffff);
-			return 0;
-		}
-		udelay(15);
-	}
-
-	return -ETIMEDOUT;
-}
-
-/*
- * Write to eeprom - currently disabled, use at your own risk
- */
-#if 0
-static int ath5k_hw_eeprom_write(struct ath5k_hw *ah, u32 offset, u16 data)
-{
-
-	u32 status, timeout;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	/*
-	 * Initialize eeprom access
-	 */
-
-	if (ah->ah_version == AR5K_AR5210) {
-		AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
-	} else {
-		AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
-				AR5K_EEPROM_CMD_RESET);
-	}
-
-	/*
-	 * Write data to data register
-	 */
-
-	if (ah->ah_version == AR5K_AR5210) {
-		ath5k_hw_reg_write(ah, data, AR5K_EEPROM_BASE + (4 * offset));
-	} else {
-		ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
-		ath5k_hw_reg_write(ah, data, AR5K_EEPROM_DATA);
-		AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
-				AR5K_EEPROM_CMD_WRITE);
-	}
-
-	/*
-	 * Check status
-	 */
-
-	for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
-		status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
-		if (status & AR5K_EEPROM_STAT_WRDONE) {
-			if (status & AR5K_EEPROM_STAT_WRERR)
-				return EIO;
-			return 0;
-		}
-		udelay(15);
-	}
-
-	ATH5K_ERR(ah->ah_sc, "EEPROM Write is disabled!");
-	return -EIO;
-}
-#endif
-
-/*
- * Translate binary channel representation in EEPROM to frequency
- */
-static u16 ath5k_eeprom_bin2freq(struct ath5k_hw *ah, u16 bin, unsigned int mode)
-{
-	u16 val;
-
-	if (bin == AR5K_EEPROM_CHANNEL_DIS)
-		return bin;
-
-	if (mode == AR5K_EEPROM_MODE_11A) {
-		if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2)
-			val = (5 * bin) + 4800;
-		else
-			val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 :
-				(bin * 10) + 5100;
-	} else {
-		if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2)
-			val = bin + 2300;
-		else
-			val = bin + 2400;
-	}
-
-	return val;
-}
-
-/*
- * Read antenna infos from eeprom
- */
-static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
-		unsigned int mode)
-{
-	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
-	u32 o = *offset;
-	u16 val;
-	int ret, i = 0;
-
-	AR5K_EEPROM_READ(o++, val);
-	ee->ee_switch_settling[mode]	= (val >> 8) & 0x7f;
-	ee->ee_ant_tx_rx[mode]		= (val >> 2) & 0x3f;
-	ee->ee_ant_control[mode][i]	= (val << 4) & 0x3f;
-
-	AR5K_EEPROM_READ(o++, val);
-	ee->ee_ant_control[mode][i++]	|= (val >> 12) & 0xf;
-	ee->ee_ant_control[mode][i++]	= (val >> 6) & 0x3f;
-	ee->ee_ant_control[mode][i++]	= val & 0x3f;
-
-	AR5K_EEPROM_READ(o++, val);
-	ee->ee_ant_control[mode][i++]	= (val >> 10) & 0x3f;
-	ee->ee_ant_control[mode][i++]	= (val >> 4) & 0x3f;
-	ee->ee_ant_control[mode][i]	= (val << 2) & 0x3f;
-
-	AR5K_EEPROM_READ(o++, val);
-	ee->ee_ant_control[mode][i++]	|= (val >> 14) & 0x3;
-	ee->ee_ant_control[mode][i++]	= (val >> 8) & 0x3f;
-	ee->ee_ant_control[mode][i++]	= (val >> 2) & 0x3f;
-	ee->ee_ant_control[mode][i]	= (val << 4) & 0x3f;
-
-	AR5K_EEPROM_READ(o++, val);
-	ee->ee_ant_control[mode][i++]	|= (val >> 12) & 0xf;
-	ee->ee_ant_control[mode][i++]	= (val >> 6) & 0x3f;
-	ee->ee_ant_control[mode][i++]	= val & 0x3f;
-
-	/* Get antenna modes */
-	ah->ah_antenna[mode][0] =
-	    (ee->ee_ant_control[mode][0] << 4) | 0x1;
-	ah->ah_antenna[mode][AR5K_ANT_FIXED_A] =
-	     ee->ee_ant_control[mode][1] 	|
-	    (ee->ee_ant_control[mode][2] << 6) 	|
-	    (ee->ee_ant_control[mode][3] << 12) |
-	    (ee->ee_ant_control[mode][4] << 18) |
-	    (ee->ee_ant_control[mode][5] << 24);
-	ah->ah_antenna[mode][AR5K_ANT_FIXED_B] =
-	     ee->ee_ant_control[mode][6] 	|
-	    (ee->ee_ant_control[mode][7] << 6) 	|
-	    (ee->ee_ant_control[mode][8] << 12) |
-	    (ee->ee_ant_control[mode][9] << 18) |
-	    (ee->ee_ant_control[mode][10] << 24);
-
-	/* return new offset */
-	*offset = o;
-
-	return 0;
-}
-
-/*
- * Read supported modes from eeprom
- */
-static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
-		unsigned int mode)
-{
-	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
-	u32 o = *offset;
-	u16 val;
-	int ret;
-
-	AR5K_EEPROM_READ(o++, val);
-	ee->ee_tx_end2xlna_enable[mode]	= (val >> 8) & 0xff;
-	ee->ee_thr_62[mode]		= val & 0xff;
-
-	if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
-		ee->ee_thr_62[mode] = mode == AR5K_EEPROM_MODE_11A ? 15 : 28;
-
-	AR5K_EEPROM_READ(o++, val);
-	ee->ee_tx_end2xpa_disable[mode]	= (val >> 8) & 0xff;
-	ee->ee_tx_frm2xpa_enable[mode]	= val & 0xff;
-
-	AR5K_EEPROM_READ(o++, val);
-	ee->ee_pga_desired_size[mode]	= (val >> 8) & 0xff;
-
-	if ((val & 0xff) & 0x80)
-		ee->ee_noise_floor_thr[mode] = -((((val & 0xff) ^ 0xff)) + 1);
-	else
-		ee->ee_noise_floor_thr[mode] = val & 0xff;
-
-	if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
-		ee->ee_noise_floor_thr[mode] =
-		    mode == AR5K_EEPROM_MODE_11A ? -54 : -1;
-
-	AR5K_EEPROM_READ(o++, val);
-	ee->ee_xlna_gain[mode]		= (val >> 5) & 0xff;
-	ee->ee_x_gain[mode]		= (val >> 1) & 0xf;
-	ee->ee_xpd[mode]		= val & 0x1;
-
-	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0)
-		ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
-
-	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
-		AR5K_EEPROM_READ(o++, val);
-		ee->ee_false_detect[mode] = (val >> 6) & 0x7f;
-
-		if (mode == AR5K_EEPROM_MODE_11A)
-			ee->ee_xr_power[mode] = val & 0x3f;
-		else {
-			ee->ee_ob[mode][0] = val & 0x7;
-			ee->ee_db[mode][0] = (val >> 3) & 0x7;
-		}
-	}
-
-	if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_4) {
-		ee->ee_i_gain[mode] = AR5K_EEPROM_I_GAIN;
-		ee->ee_cck_ofdm_power_delta = AR5K_EEPROM_CCK_OFDM_DELTA;
-	} else {
-		ee->ee_i_gain[mode] = (val >> 13) & 0x7;
-
-		AR5K_EEPROM_READ(o++, val);
-		ee->ee_i_gain[mode] |= (val << 3) & 0x38;
-
-		if (mode == AR5K_EEPROM_MODE_11G)
-			ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff;
-	}
-
-	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
-			mode == AR5K_EEPROM_MODE_11A) {
-		ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
-		ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
-	}
-
-	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6 &&
-	    mode == AR5K_EEPROM_MODE_11G)
-		ee->ee_scaled_cck_delta = (val >> 11) & 0x1f;
-
-	/* return new offset */
-	*offset = o;
-
-	return 0;
-}
-
-/*
- * Initialize eeprom & capabilities structs
- */
-static int ath5k_eeprom_init(struct ath5k_hw *ah)
-{
-	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
-	unsigned int mode, i;
-	int ret;
-	u32 offset;
-	u16 val;
-
-	/* Initial TX thermal adjustment values */
-	ee->ee_tx_clip = 4;
-	ee->ee_pwd_84 = ee->ee_pwd_90 = 1;
-	ee->ee_gain_select = 1;
-
-	/*
-	 * Read values from EEPROM and store them in the capability structure
-	 */
-	AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic);
-	AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect);
-	AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain);
-	AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version);
-	AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header);
-
-	/* Return if we have an old EEPROM */
-	if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0)
-		return 0;
-
-#ifdef notyet
-	/*
-	 * Validate the checksum of the EEPROM date. There are some
-	 * devices with invalid EEPROMs.
-	 */
-	for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
-		AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
-		cksum ^= val;
-	}
-	if (cksum != AR5K_EEPROM_INFO_CKSUM) {
-		ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
-		return -EIO;
-	}
-#endif
-
-	AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version),
-	    ee_ant_gain);
-
-	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
-		AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0);
-		AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1);
-	}
-
-	if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) {
-		AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val);
-		ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7;
-		ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7;
-
-		AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val);
-		ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7;
-		ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7;
-	}
-
-	/*
-	 * Get conformance test limit values
-	 */
-	offset = AR5K_EEPROM_CTL(ah->ah_ee_version);
-	ee->ee_ctls = AR5K_EEPROM_N_CTLS(ah->ah_ee_version);
-
-	for (i = 0; i < ee->ee_ctls; i++) {
-		AR5K_EEPROM_READ(offset++, val);
-		ee->ee_ctl[i] = (val >> 8) & 0xff;
-		ee->ee_ctl[i + 1] = val & 0xff;
-	}
-
-	/*
-	 * Get values for 802.11a (5GHz)
-	 */
-	mode = AR5K_EEPROM_MODE_11A;
-
-	ee->ee_turbo_max_power[mode] =
-			AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header);
-
-	offset = AR5K_EEPROM_MODES_11A(ah->ah_ee_version);
-
-	ret = ath5k_eeprom_read_ants(ah, &offset, mode);
-	if (ret)
-		return ret;
-
-	AR5K_EEPROM_READ(offset++, val);
-	ee->ee_adc_desired_size[mode]	= (s8)((val >> 8) & 0xff);
-	ee->ee_ob[mode][3]		= (val >> 5) & 0x7;
-	ee->ee_db[mode][3]		= (val >> 2) & 0x7;
-	ee->ee_ob[mode][2]		= (val << 1) & 0x7;
-
-	AR5K_EEPROM_READ(offset++, val);
-	ee->ee_ob[mode][2]		|= (val >> 15) & 0x1;
-	ee->ee_db[mode][2]		= (val >> 12) & 0x7;
-	ee->ee_ob[mode][1]		= (val >> 9) & 0x7;
-	ee->ee_db[mode][1]		= (val >> 6) & 0x7;
-	ee->ee_ob[mode][0]		= (val >> 3) & 0x7;
-	ee->ee_db[mode][0]		= val & 0x7;
-
-	ret = ath5k_eeprom_read_modes(ah, &offset, mode);
-	if (ret)
-		return ret;
-
-	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) {
-		AR5K_EEPROM_READ(offset++, val);
-		ee->ee_margin_tx_rx[mode] = val & 0x3f;
-	}
-
-	/*
-	 * Get values for 802.11b (2.4GHz)
-	 */
-	mode = AR5K_EEPROM_MODE_11B;
-	offset = AR5K_EEPROM_MODES_11B(ah->ah_ee_version);
-
-	ret = ath5k_eeprom_read_ants(ah, &offset, mode);
-	if (ret)
-		return ret;
-
-	AR5K_EEPROM_READ(offset++, val);
-	ee->ee_adc_desired_size[mode]	= (s8)((val >> 8) & 0xff);
-	ee->ee_ob[mode][1]		= (val >> 4) & 0x7;
-	ee->ee_db[mode][1]		= val & 0x7;
-
-	ret = ath5k_eeprom_read_modes(ah, &offset, mode);
-	if (ret)
-		return ret;
-
-	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
-		AR5K_EEPROM_READ(offset++, val);
-		ee->ee_cal_pier[mode][0] =
-			ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
-		ee->ee_cal_pier[mode][1] =
-			ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
-
-		AR5K_EEPROM_READ(offset++, val);
-		ee->ee_cal_pier[mode][2] =
-			ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
-	}
-
-	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
-		ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
-
-	/*
-	 * Get values for 802.11g (2.4GHz)
-	 */
-	mode = AR5K_EEPROM_MODE_11G;
-	offset = AR5K_EEPROM_MODES_11G(ah->ah_ee_version);
-
-	ret = ath5k_eeprom_read_ants(ah, &offset, mode);
-	if (ret)
-		return ret;
-
-	AR5K_EEPROM_READ(offset++, val);
-	ee->ee_adc_desired_size[mode]	= (s8)((val >> 8) & 0xff);
-	ee->ee_ob[mode][1]		= (val >> 4) & 0x7;
-	ee->ee_db[mode][1]		= val & 0x7;
-
-	ret = ath5k_eeprom_read_modes(ah, &offset, mode);
-	if (ret)
-		return ret;
-
-	if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
-		AR5K_EEPROM_READ(offset++, val);
-		ee->ee_cal_pier[mode][0] =
-			ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
-		ee->ee_cal_pier[mode][1] =
-			ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
-
-		AR5K_EEPROM_READ(offset++, val);
-		ee->ee_turbo_max_power[mode] = val & 0x7f;
-		ee->ee_xr_power[mode] = (val >> 7) & 0x3f;
-
-		AR5K_EEPROM_READ(offset++, val);
-		ee->ee_cal_pier[mode][2] =
-			ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
-
-		if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
-			ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
-
-		AR5K_EEPROM_READ(offset++, val);
-		ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
-		ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
-
-		if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) {
-			AR5K_EEPROM_READ(offset++, val);
-			ee->ee_cck_ofdm_gain_delta = val & 0xff;
-		}
-	}
-
-	/*
-	 * Read 5GHz EEPROM channels
-	 */
-
-	return 0;
-}
-
-/*
- * Read the MAC address from eeprom
- */
-static int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
-{
-	u8 mac_d[ETH_ALEN];
-	u32 total, offset;
-	u16 data;
-	int octet, ret;
-
-	memset(mac, 0, ETH_ALEN);
-	memset(mac_d, 0, ETH_ALEN);
-
-	ret = ath5k_hw_eeprom_read(ah, 0x20, &data);
-	if (ret)
-		return ret;
-
-	for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
-		ret = ath5k_hw_eeprom_read(ah, offset, &data);
-		if (ret)
-			return ret;
-
-		total += data;
-		mac_d[octet + 1] = data & 0xff;
-		mac_d[octet] = data >> 8;
-		octet += 2;
-	}
-
-	memcpy(mac, mac_d, ETH_ALEN);
-
-	if (!total || total == 3 * 0xffff)
-		return -EINVAL;
-
-	return 0;
-}
-
-/*
- * Fill the capabilities struct
- */
-static int ath5k_hw_get_capabilities(struct ath5k_hw *ah)
-{
-	u16 ee_header;
-
-	ATH5K_TRACE(ah->ah_sc);
-	/* Capabilities stored in the EEPROM */
-	ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
-
-	if (ah->ah_version == AR5K_AR5210) {
-		/*
-		 * Set radio capabilities
-		 * (The AR5110 only supports the middle 5GHz band)
-		 */
-		ah->ah_capabilities.cap_range.range_5ghz_min = 5120;
-		ah->ah_capabilities.cap_range.range_5ghz_max = 5430;
-		ah->ah_capabilities.cap_range.range_2ghz_min = 0;
-		ah->ah_capabilities.cap_range.range_2ghz_max = 0;
-
-		/* Set supported modes */
-		__set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
-		__set_bit(AR5K_MODE_11A_TURBO, ah->ah_capabilities.cap_mode);
-	} else {
-		/*
-		 * XXX The tranceiver supports frequencies from 4920 to 6100GHz
-		 * XXX and from 2312 to 2732GHz. There are problems with the
-		 * XXX current ieee80211 implementation because the IEEE
-		 * XXX channel mapping does not support negative channel
-		 * XXX numbers (2312MHz is channel -19). Of course, this
-		 * XXX doesn't matter because these channels are out of range
-		 * XXX but some regulation domains like MKK (Japan) will
-		 * XXX support frequencies somewhere around 4.8GHz.
-		 */
-
-		/*
-		 * Set radio capabilities
-		 */
-
-		if (AR5K_EEPROM_HDR_11A(ee_header)) {
-			ah->ah_capabilities.cap_range.range_5ghz_min = 5005; /* 4920 */
-			ah->ah_capabilities.cap_range.range_5ghz_max = 6100;
-
-			/* Set supported modes */
-			__set_bit(AR5K_MODE_11A,
-					ah->ah_capabilities.cap_mode);
-			__set_bit(AR5K_MODE_11A_TURBO,
-					ah->ah_capabilities.cap_mode);
-			if (ah->ah_version == AR5K_AR5212)
-				__set_bit(AR5K_MODE_11G_TURBO,
-						ah->ah_capabilities.cap_mode);
-		}
-
-		/* Enable  802.11b if a 2GHz capable radio (2111/5112) is
-		 * connected */
-		if (AR5K_EEPROM_HDR_11B(ee_header) ||
-				AR5K_EEPROM_HDR_11G(ee_header)) {
-			ah->ah_capabilities.cap_range.range_2ghz_min = 2412; /* 2312 */
-			ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
-
-			if (AR5K_EEPROM_HDR_11B(ee_header))
-				__set_bit(AR5K_MODE_11B,
-						ah->ah_capabilities.cap_mode);
-
-			if (AR5K_EEPROM_HDR_11G(ee_header))
-				__set_bit(AR5K_MODE_11G,
-						ah->ah_capabilities.cap_mode);
-		}
-	}
-
-	/* GPIO */
-	ah->ah_gpio_npins = AR5K_NUM_GPIO;
-
-	/* Set number of supported TX queues */
-	if (ah->ah_version == AR5K_AR5210)
-		ah->ah_capabilities.cap_queues.q_tx_num =
-			AR5K_NUM_TX_QUEUES_NOQCU;
-	else
-		ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
-
-	return 0;
-}
-
-/*********************************\
-  Protocol Control Unit Functions
-\*********************************/
-
-/*
- * Set Operation mode
- */
-int ath5k_hw_set_opmode(struct ath5k_hw *ah)
-{
-	u32 pcu_reg, beacon_reg, low_id, high_id;
-
-	pcu_reg = 0;
-	beacon_reg = 0;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	switch (ah->ah_op_mode) {
-	case IEEE80211_IF_TYPE_IBSS:
-		pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_DESC_ANTENNA |
-			(ah->ah_version == AR5K_AR5210 ?
-				AR5K_STA_ID1_NO_PSPOLL : 0);
-		beacon_reg |= AR5K_BCR_ADHOC;
-		break;
-
-	case IEEE80211_IF_TYPE_AP:
-		pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_RTS_DEF_ANTENNA |
-			(ah->ah_version == AR5K_AR5210 ?
-				AR5K_STA_ID1_NO_PSPOLL : 0);
-		beacon_reg |= AR5K_BCR_AP;
-		break;
-
-	case IEEE80211_IF_TYPE_STA:
-		pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA |
-			(ah->ah_version == AR5K_AR5210 ?
-				AR5K_STA_ID1_PWR_SV : 0);
-	case IEEE80211_IF_TYPE_MNTR:
-		pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA |
-			(ah->ah_version == AR5K_AR5210 ?
-				AR5K_STA_ID1_NO_PSPOLL : 0);
-		break;
-
-	default:
-		return -EINVAL;
-	}
-
-	/*
-	 * Set PCU registers
-	 */
-	low_id = AR5K_LOW_ID(ah->ah_sta_id);
-	high_id = AR5K_HIGH_ID(ah->ah_sta_id);
-	ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
-	ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
-
-	/*
-	 * Set Beacon Control Register on 5210
-	 */
-	if (ah->ah_version == AR5K_AR5210)
-		ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
-
-	return 0;
-}
-
-/*
- * BSSID Functions
- */
-
-/*
- * Get station id
- */
-void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	memcpy(mac, ah->ah_sta_id, ETH_ALEN);
-}
-
-/*
- * Set station id
- */
-int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
-{
-	u32 low_id, high_id;
-
-	ATH5K_TRACE(ah->ah_sc);
-	/* Set new station ID */
-	memcpy(ah->ah_sta_id, mac, ETH_ALEN);
-
-	low_id = AR5K_LOW_ID(mac);
-	high_id = AR5K_HIGH_ID(mac);
-
-	ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
-	ath5k_hw_reg_write(ah, high_id, AR5K_STA_ID1);
-
-	return 0;
-}
-
-/*
- * Set BSSID
- */
-void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id)
-{
-	u32 low_id, high_id;
-	u16 tim_offset = 0;
-
-	/*
-	 * Set simple BSSID mask on 5212
-	 */
-	if (ah->ah_version == AR5K_AR5212) {
-		ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM0);
-		ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM1);
-	}
-
-	/*
-	 * Set BSSID which triggers the "SME Join" operation
-	 */
-	low_id = AR5K_LOW_ID(bssid);
-	high_id = AR5K_HIGH_ID(bssid);
-	ath5k_hw_reg_write(ah, low_id, AR5K_BSS_ID0);
-	ath5k_hw_reg_write(ah, high_id | ((assoc_id & 0x3fff) <<
-				AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1);
-
-	if (assoc_id == 0) {
-		ath5k_hw_disable_pspoll(ah);
-		return;
-	}
-
-	AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
-			tim_offset ? tim_offset + 4 : 0);
-
-	ath5k_hw_enable_pspoll(ah, NULL, 0);
-}
-/**
- * ath5k_hw_set_bssid_mask - set common bits we should listen to
- *
- * The bssid_mask is a utility used by AR5212 hardware to inform the hardware
- * which bits of the interface's MAC address should be looked at when trying
- * to decide which packets to ACK. In station mode every bit matters. In AP
- * mode with a single BSS every bit matters as well. In AP mode with
- * multiple BSSes not every bit matters.
- *
- * @ah: the &struct ath5k_hw
- * @mask: the bssid_mask, a u8 array of size ETH_ALEN
- *
- * Note that this is a simple filter and *does* not filter out all
- * relevant frames. Some non-relevant frames will get through, probability
- * jocks are welcomed to compute.
- *
- * When handling multiple BSSes (or VAPs) you can get the BSSID mask by
- * computing the set of:
- *
- *     ~ ( MAC XOR BSSID )
- *
- * When you do this you are essentially computing the common bits. Later it
- * is assumed the harware will "and" (&) the BSSID mask with the MAC address
- * to obtain the relevant bits which should match on the destination frame.
- *
- * Simple example: on your card you have have two BSSes you have created with
- * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
- * There is another BSSID-03 but you are not part of it. For simplicity's sake,
- * assuming only 4 bits for a mac address and for BSSIDs you can then have:
- *
- *                  \
- * MAC:                0001 |
- * BSSID-01:   0100 | --> Belongs to us
- * BSSID-02:   1001 |
- *                  /
- * -------------------
- * BSSID-03:   0110  | --> External
- * -------------------
- *
- * Our bssid_mask would then be:
- *
- *             On loop iteration for BSSID-01:
- *             ~(0001 ^ 0100)  -> ~(0101)
- *                             ->   1010
- *             bssid_mask      =    1010
- *
- *             On loop iteration for BSSID-02:
- *             bssid_mask &= ~(0001   ^   1001)
- *             bssid_mask =   (1010)  & ~(0001 ^ 1001)
- *             bssid_mask =   (1010)  & ~(1001)
- *             bssid_mask =   (1010)  &  (0110)
- *             bssid_mask =   0010
- *
- * A bssid_mask of 0010 means "only pay attention to the second least
- * significant bit". This is because its the only bit common
- * amongst the MAC and all BSSIDs we support. To findout what the real
- * common bit is we can simply "&" the bssid_mask now with any BSSID we have
- * or our MAC address (we assume the hardware uses the MAC address).
- *
- * Now, suppose there's an incoming frame for BSSID-03:
- *
- * IFRAME-01:  0110
- *
- * An easy eye-inspeciton of this already should tell you that this frame
- * will not pass our check. This is beacuse the bssid_mask tells the
- * hardware to only look at the second least significant bit and the
- * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
- * as 1, which does not match 0.
- *
- * So with IFRAME-01 we *assume* the hardware will do:
- *
- *     allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
- *  --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
- *  --> allow = (0010) == 0000 ? 1 : 0;
- *  --> allow = 0
- *
- *  Lets now test a frame that should work:
- *
- * IFRAME-02:  0001 (we should allow)
- *
- *     allow = (0001 & 1010) == 1010
- *
- *     allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
- *  --> allow = (0001 & 0010) ==  (0010 & 0001) ? 1 :0;
- *  --> allow = (0010) == (0010)
- *  --> allow = 1
- *
- * Other examples:
- *
- * IFRAME-03:  0100 --> allowed
- * IFRAME-04:  1001 --> allowed
- * IFRAME-05:  1101 --> allowed but its not for us!!!
- *
- */
-int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
-{
-	u32 low_id, high_id;
-	ATH5K_TRACE(ah->ah_sc);
-
-	if (ah->ah_version == AR5K_AR5212) {
-		low_id = AR5K_LOW_ID(mask);
-		high_id = AR5K_HIGH_ID(mask);
-
-		ath5k_hw_reg_write(ah, low_id, AR5K_BSS_IDM0);
-		ath5k_hw_reg_write(ah, high_id, AR5K_BSS_IDM1);
-
-		return 0;
-	}
-
-	return -EIO;
-}
-
-/*
- * Receive start/stop functions
- */
-
-/*
- * Start receive on PCU
- */
-void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
-
-	/* TODO: ANI Support */
-}
-
-/*
- * Stop receive on PCU
- */
-void ath5k_hw_stop_pcu_recv(struct ath5k_hw *ah)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
-
-	/* TODO: ANI Support */
-}
-
-/*
- * RX Filter functions
- */
-
-/*
- * Set multicast filter
- */
-void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	/* Set the multicat filter */
-	ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
-	ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
-}
-
-/*
- * Set multicast filter by index
- */
-int ath5k_hw_set_mcast_filterindex(struct ath5k_hw *ah, u32 index)
-{
-
-	ATH5K_TRACE(ah->ah_sc);
-	if (index >= 64)
-		return -EINVAL;
-	else if (index >= 32)
-		AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
-				(1 << (index - 32)));
-	else
-		AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
-
-	return 0;
-}
-
-/*
- * Clear Multicast filter by index
- */
-int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
-{
-
-	ATH5K_TRACE(ah->ah_sc);
-	if (index >= 64)
-		return -EINVAL;
-	else if (index >= 32)
-		AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
-				(1 << (index - 32)));
-	else
-		AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
-
-	return 0;
-}
-
-/*
- * Get current rx filter
- */
-u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
-{
-	u32 data, filter = 0;
-
-	ATH5K_TRACE(ah->ah_sc);
-	filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
-
-	/*Radar detection for 5212*/
-	if (ah->ah_version == AR5K_AR5212) {
-		data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL);
-
-		if (data & AR5K_PHY_ERR_FIL_RADAR)
-			filter |= AR5K_RX_FILTER_RADARERR;
-		if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK))
-			filter |= AR5K_RX_FILTER_PHYERR;
-	}
-
-	return filter;
-}
-
-/*
- * Set rx filter
- */
-void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
-{
-	u32 data = 0;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	/* Set PHY error filter register on 5212*/
-	if (ah->ah_version == AR5K_AR5212) {
-		if (filter & AR5K_RX_FILTER_RADARERR)
-			data |= AR5K_PHY_ERR_FIL_RADAR;
-		if (filter & AR5K_RX_FILTER_PHYERR)
-			data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK;
-	}
-
-	/*
-	 * The AR5210 uses promiscous mode to detect radar activity
-	 */
-	if (ah->ah_version == AR5K_AR5210 &&
-			(filter & AR5K_RX_FILTER_RADARERR)) {
-		filter &= ~AR5K_RX_FILTER_RADARERR;
-		filter |= AR5K_RX_FILTER_PROM;
-	}
-
-	/*Zero length DMA*/
-	if (data)
-		AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
-	else
-		AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
-
-	/*Write RX Filter register*/
-	ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER);
-
-	/*Write PHY error filter register on 5212*/
-	if (ah->ah_version == AR5K_AR5212)
-		ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL);
-
-}
-
-/*
- * Beacon related functions
- */
-
-/*
- * Get a 32bit TSF
- */
-u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
-}
-
-/*
- * Get the full 64bit TSF
- */
-u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
-{
-	u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
-	ATH5K_TRACE(ah->ah_sc);
-
-	return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32);
-}
-
-/*
- * Force a TSF reset
- */
-void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_RESET_TSF);
-}
-
-/*
- * Initialize beacon timers
- */
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
-{
-	u32 timer1, timer2, timer3;
-
-	ATH5K_TRACE(ah->ah_sc);
-	/*
-	 * Set the additional timers by mode
-	 */
-	switch (ah->ah_op_mode) {
-	case IEEE80211_IF_TYPE_STA:
-		if (ah->ah_version == AR5K_AR5210) {
-			timer1 = 0xffffffff;
-			timer2 = 0xffffffff;
-		} else {
-			timer1 = 0x0000ffff;
-			timer2 = 0x0007ffff;
-		}
-		break;
-
-	default:
-		timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3;
-		timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3;
-	}
-
-	timer3 = next_beacon + (ah->ah_atim_window ? ah->ah_atim_window : 1);
-
-	/*
-	 * Set the beacon register and enable all timers.
-	 * (next beacon, DMA beacon, software beacon, ATIM window time)
-	 */
-	ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
-	ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
-	ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
-	ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
-
-	ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD |
-			AR5K_BEACON_RESET_TSF | AR5K_BEACON_ENABLE),
-		AR5K_BEACON);
-}
-
-#if 0
-/*
- * Set beacon timers
- */
-int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
-		const struct ath5k_beacon_state *state)
-{
-	u32 cfp_period, next_cfp, dtim, interval, next_beacon;
-
-	/*
-	 * TODO: should be changed through *state
-	 * review struct ath5k_beacon_state struct
-	 *
-	 * XXX: These are used for cfp period bellow, are they
-	 * ok ? Is it O.K. for tsf here to be 0 or should we use
-	 * get_tsf ?
-	 */
-	u32 dtim_count = 0; /* XXX */
-	u32 cfp_count = 0; /* XXX */
-	u32 tsf = 0; /* XXX */
-
-	ATH5K_TRACE(ah->ah_sc);
-	/* Return on an invalid beacon state */
-	if (state->bs_interval < 1)
-		return -EINVAL;
-
-	interval = state->bs_interval;
-	dtim = state->bs_dtim_period;
-
-	/*
-	 * PCF support?
-	 */
-	if (state->bs_cfp_period > 0) {
-		/*
-		 * Enable PCF mode and set the CFP
-		 * (Contention Free Period) and timer registers
-		 */
-		cfp_period = state->bs_cfp_period * state->bs_dtim_period *
-			state->bs_interval;
-		next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
-			state->bs_interval;
-
-		AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
-				AR5K_STA_ID1_DEFAULT_ANTENNA |
-				AR5K_STA_ID1_PCF);
-		ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
-		ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
-				AR5K_CFP_DUR);
-		ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
-						next_cfp)) << 3, AR5K_TIMER2);
-	} else {
-		/* Disable PCF mode */
-		AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
-				AR5K_STA_ID1_DEFAULT_ANTENNA |
-				AR5K_STA_ID1_PCF);
-	}
-
-	/*
-	 * Enable the beacon timer register
-	 */
-	ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
-
-	/*
-	 * Start the beacon timers
-	 */
-	ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &~
-		(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
-		AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
-		AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
-		AR5K_BEACON_PERIOD), AR5K_BEACON);
-
-	/*
-	 * Write new beacon miss threshold, if it appears to be valid
-	 * XXX: Figure out right values for min <= bs_bmiss_threshold <= max
-	 * and return if its not in range. We can test this by reading value and
-	 * setting value to a largest value and seeing which values register.
-	 */
-
-	AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
-			state->bs_bmiss_threshold);
-
-	/*
-	 * Set sleep control register
-	 * XXX: Didn't find this in 5210 code but since this register
-	 * exists also in ar5k's 5210 headers i leave it as common code.
-	 */
-	AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
-			(state->bs_sleep_duration - 3) << 3);
-
-	/*
-	 * Set enhanced sleep registers on 5212
-	 */
-	if (ah->ah_version == AR5K_AR5212) {
-		if (state->bs_sleep_duration > state->bs_interval &&
-				roundup(state->bs_sleep_duration, interval) ==
-				state->bs_sleep_duration)
-			interval = state->bs_sleep_duration;
-
-		if (state->bs_sleep_duration > dtim && (dtim == 0 ||
-				roundup(state->bs_sleep_duration, dtim) ==
-				state->bs_sleep_duration))
-			dtim = state->bs_sleep_duration;
-
-		if (interval > dtim)
-			return -EINVAL;
-
-		next_beacon = interval == dtim ? state->bs_next_dtim :
-			state->bs_next_beacon;
-
-		ath5k_hw_reg_write(ah,
-			AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
-			AR5K_SLEEP0_NEXT_DTIM) |
-			AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
-			AR5K_SLEEP0_ENH_SLEEP_EN |
-			AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
-
-		ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
-			AR5K_SLEEP1_NEXT_TIM) |
-			AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
-
-		ath5k_hw_reg_write(ah,
-			AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
-			AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
-	}
-
-	return 0;
-}
-
-/*
- * Reset beacon timers
- */
-void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	/*
-	 * Disable beacon timer
-	 */
-	ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
-
-	/*
-	 * Disable some beacon register values
-	 */
-	AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
-			AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
-	ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
-}
-
-/*
- * Wait for beacon queue to finish
- */
-int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
-{
-	unsigned int i;
-	int ret;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	/* 5210 doesn't have QCU*/
-	if (ah->ah_version == AR5K_AR5210) {
-		/*
-		 * Wait for beaconn queue to finish by checking
-		 * Control Register and Beacon Status Register.
-		 */
-		for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
-			if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
-					||
-			    !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
-				break;
-			udelay(10);
-		}
-
-		/* Timeout... */
-		if (i <= 0) {
-			/*
-			 * Re-schedule the beacon queue
-			 */
-			ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
-			ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
-					AR5K_BCR);
-
-			return -EIO;
-		}
-		ret = 0;
-	} else {
-	/*5211/5212*/
-		ret = ath5k_hw_register_timeout(ah,
-			AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
-			AR5K_QCU_STS_FRMPENDCNT, 0, false);
-
-		if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
-			return -EIO;
-	}
-
-	return ret;
-}
-#endif
-
-/*
- * Update mib counters (statistics)
- */
-void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
-		struct ieee80211_low_level_stats  *stats)
-{
-	ATH5K_TRACE(ah->ah_sc);
-
-	/* Read-And-Clear */
-	stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
-	stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
-	stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
-	stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
-
-	/* XXX: Should we use this to track beacon count ?
-	 * -we read it anyway to clear the register */
-	ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
-
-	/* Reset profile count registers on 5212*/
-	if (ah->ah_version == AR5K_AR5212) {
-		ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
-		ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
-		ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
-		ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
-	}
-}
-
-/** ath5k_hw_set_ack_bitrate - set bitrate for ACKs
- *
- * @ah: the &struct ath5k_hw
- * @high: determines if to use low bit rate or now
- */
-void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
-{
-	if (ah->ah_version != AR5K_AR5212)
-		return;
-	else {
-		u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
-		if (high)
-			AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
-		else
-			AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
-	}
-}
-
-
-/*
- * ACK/CTS Timeouts
- */
-
-/*
- * Set ACK timeout on PCU
- */
-int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK),
-			ah->ah_turbo) <= timeout)
-		return -EINVAL;
-
-	AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
-		ath5k_hw_htoclock(timeout, ah->ah_turbo));
-
-	return 0;
-}
-
-/*
- * Read the ACK timeout from PCU
- */
-unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
-{
-	ATH5K_TRACE(ah->ah_sc);
-
-	return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
-			AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo);
-}
-
-/*
- * Set CTS timeout on PCU
- */
-int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS),
-			ah->ah_turbo) <= timeout)
-		return -EINVAL;
-
-	AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
-			ath5k_hw_htoclock(timeout, ah->ah_turbo));
-
-	return 0;
-}
-
-/*
- * Read CTS timeout from PCU
- */
-unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
-			AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo);
-}
-
-/*
- * Key table (WEP) functions
- */
-
-int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
-{
-	unsigned int i;
-
-	ATH5K_TRACE(ah->ah_sc);
-	AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
-
-	for (i = 0; i < AR5K_KEYCACHE_SIZE; i++)
-		ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i));
-
-	/*
-	 * Set NULL encryption on AR5212+
-	 *
-	 * Note: AR5K_KEYTABLE_TYPE -> AR5K_KEYTABLE_OFF(entry, 5)
-	 *       AR5K_KEYTABLE_TYPE_NULL -> 0x00000007
-	 *
-	 * Note2: Windows driver (ndiswrapper) sets this to
-	 *        0x00000714 instead of 0x00000007
-	 */
-	if (ah->ah_version > AR5K_AR5211)
-		ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
-				AR5K_KEYTABLE_TYPE(entry));
-
-	return 0;
-}
-
-int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
-
-	/* Check the validation flag at the end of the entry */
-	return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
-		AR5K_KEYTABLE_VALID;
-}
-
-int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
-		const struct ieee80211_key_conf *key, const u8 *mac)
-{
-	unsigned int i;
-	__le32 key_v[5] = {};
-	u32 keytype;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	/* key->keylen comes in from mac80211 in bytes */
-
-	if (key->keylen > AR5K_KEYTABLE_SIZE / 8)
-		return -EOPNOTSUPP;
-
-	switch (key->keylen) {
-	/* WEP 40-bit   = 40-bit  entered key + 24 bit IV = 64-bit */
-	case 40 / 8:
-		memcpy(&key_v[0], key->key, 5);
-		keytype = AR5K_KEYTABLE_TYPE_40;
-		break;
-
-	/* WEP 104-bit  = 104-bit entered key + 24-bit IV = 128-bit */
-	case 104 / 8:
-		memcpy(&key_v[0], &key->key[0], 6);
-		memcpy(&key_v[2], &key->key[6], 6);
-		memcpy(&key_v[4], &key->key[12], 1);
-		keytype = AR5K_KEYTABLE_TYPE_104;
-		break;
-	/* WEP 128-bit  = 128-bit entered key + 24 bit IV = 152-bit */
-	case 128 / 8:
-		memcpy(&key_v[0], &key->key[0], 6);
-		memcpy(&key_v[2], &key->key[6], 6);
-		memcpy(&key_v[4], &key->key[12], 4);
-		keytype = AR5K_KEYTABLE_TYPE_128;
-		break;
-
-	default:
-		return -EINVAL; /* shouldn't happen */
-	}
-
-	for (i = 0; i < ARRAY_SIZE(key_v); i++)
-		ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
-				AR5K_KEYTABLE_OFF(entry, i));
-
-	ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry));
-
-	return ath5k_hw_set_key_lladdr(ah, entry, mac);
-}
-
-int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
-{
-	u32 low_id, high_id;
-
-	ATH5K_TRACE(ah->ah_sc);
-	 /* Invalid entry (key table overflow) */
-	AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
-
-	/* MAC may be NULL if it's a broadcast key. In this case no need to
-	 * to compute AR5K_LOW_ID and AR5K_HIGH_ID as we already know it. */
-	if (unlikely(mac == NULL)) {
-		low_id = 0xffffffff;
-		high_id = 0xffff | AR5K_KEYTABLE_VALID;
-	} else {
-		low_id = AR5K_LOW_ID(mac);
-		high_id = AR5K_HIGH_ID(mac) | AR5K_KEYTABLE_VALID;
-	}
-
-	ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry));
-	ath5k_hw_reg_write(ah, high_id, AR5K_KEYTABLE_MAC1(entry));
-
-	return 0;
-}
-
-
-/********************************************\
-Queue Control Unit, DFS Control Unit Functions
-\********************************************/
-
-/*
- * Initialize a transmit queue
- */
-int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
-		struct ath5k_txq_info *queue_info)
-{
-	unsigned int queue;
-	int ret;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	/*
-	 * Get queue by type
-	 */
-	/*5210 only has 2 queues*/
-	if (ah->ah_version == AR5K_AR5210) {
-		switch (queue_type) {
-		case AR5K_TX_QUEUE_DATA:
-			queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
-			break;
-		case AR5K_TX_QUEUE_BEACON:
-		case AR5K_TX_QUEUE_CAB:
-			queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
-			break;
-		default:
-			return -EINVAL;
-		}
-	} else {
-		switch (queue_type) {
-		case AR5K_TX_QUEUE_DATA:
-			for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
-				ah->ah_txq[queue].tqi_type !=
-				AR5K_TX_QUEUE_INACTIVE; queue++) {
-
-				if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
-					return -EINVAL;
-			}
-			break;
-		case AR5K_TX_QUEUE_UAPSD:
-			queue = AR5K_TX_QUEUE_ID_UAPSD;
-			break;
-		case AR5K_TX_QUEUE_BEACON:
-			queue = AR5K_TX_QUEUE_ID_BEACON;
-			break;
-		case AR5K_TX_QUEUE_CAB:
-			queue = AR5K_TX_QUEUE_ID_CAB;
-			break;
-		case AR5K_TX_QUEUE_XR_DATA:
-			if (ah->ah_version != AR5K_AR5212)
-				ATH5K_ERR(ah->ah_sc,
-					"XR data queues only supported in"
-					" 5212!\n");
-			queue = AR5K_TX_QUEUE_ID_XR_DATA;
-			break;
-		default:
-			return -EINVAL;
-		}
-	}
-
-	/*
-	 * Setup internal queue structure
-	 */
-	memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
-	ah->ah_txq[queue].tqi_type = queue_type;
-
-	if (queue_info != NULL) {
-		queue_info->tqi_type = queue_type;
-		ret = ath5k_hw_setup_tx_queueprops(ah, queue, queue_info);
-		if (ret)
-			return ret;
-	}
-	/*
-	 * We use ah_txq_status to hold a temp value for
-	 * the Secondary interrupt mask registers on 5211+
-	 * check out ath5k_hw_reset_tx_queue
-	 */
-	AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
-
-	return queue;
-}
-
-/*
- * Setup a transmit queue
- */
-int ath5k_hw_setup_tx_queueprops(struct ath5k_hw *ah, int queue,
-				const struct ath5k_txq_info *queue_info)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
-
-	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
-		return -EIO;
-
-	memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info));
-
-	/*XXX: Is this supported on 5210 ?*/
-	if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA &&
-			((queue_info->tqi_subtype == AR5K_WME_AC_VI) ||
-			(queue_info->tqi_subtype == AR5K_WME_AC_VO))) ||
-			queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD)
-		ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
-
-	return 0;
-}
-
-/*
- * Get properties for a specific transmit queue
- */
-int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
-		struct ath5k_txq_info *queue_info)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
-	return 0;
-}
-
-/*
- * Set a transmit queue inactive
- */
-void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
-		return;
-
-	/* This queue will be skipped in further operations */
-	ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
-	/*For SIMR setup*/
-	AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
-}
-
-/*
- * Set DFS params for a transmit queue
- */
-int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
-{
-	u32 cw_min, cw_max, retry_lg, retry_sh;
-	struct ath5k_txq_info *tq = &ah->ah_txq[queue];
-
-	ATH5K_TRACE(ah->ah_sc);
-	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
-
-	tq = &ah->ah_txq[queue];
-
-	if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
-		return 0;
-
-	if (ah->ah_version == AR5K_AR5210) {
-		/* Only handle data queues, others will be ignored */
-		if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
-			return 0;
-
-		/* Set Slot time */
-		ath5k_hw_reg_write(ah, ah->ah_turbo ?
-			AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
-			AR5K_SLOT_TIME);
-		/* Set ACK_CTS timeout */
-		ath5k_hw_reg_write(ah, ah->ah_turbo ?
-			AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
-			AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
-		/* Set Transmit Latency */
-		ath5k_hw_reg_write(ah, ah->ah_turbo ?
-			AR5K_INIT_TRANSMIT_LATENCY_TURBO :
-			AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
-		/* Set IFS0 */
-		if (ah->ah_turbo)
-			 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
-				(ah->ah_aifs + tq->tqi_aifs) *
-				AR5K_INIT_SLOT_TIME_TURBO) <<
-				AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
-				AR5K_IFS0);
-		else
-			ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
-				(ah->ah_aifs + tq->tqi_aifs) *
-				AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) |
-				AR5K_INIT_SIFS, AR5K_IFS0);
-
-		/* Set IFS1 */
-		ath5k_hw_reg_write(ah, ah->ah_turbo ?
-			AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
-			AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
-		/* Set AR5K_PHY_SETTLING */
-		ath5k_hw_reg_write(ah, ah->ah_turbo ?
-			(ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
-			| 0x38 :
-			(ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
-			| 0x1C,
-			AR5K_PHY_SETTLING);
-		/* Set Frame Control Register */
-		ath5k_hw_reg_write(ah, ah->ah_turbo ?
-			(AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
-			AR5K_PHY_TURBO_SHORT | 0x2020) :
-			(AR5K_PHY_FRAME_CTL_INI | 0x1020),
-			AR5K_PHY_FRAME_CTL_5210);
-	}
-
-	/*
-	 * Calculate cwmin/max by channel mode
-	 */
-	cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
-	cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
-	ah->ah_aifs = AR5K_TUNE_AIFS;
-	/*XR is only supported on 5212*/
-	if (IS_CHAN_XR(ah->ah_current_channel) &&
-			ah->ah_version == AR5K_AR5212) {
-		cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
-		cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
-		ah->ah_aifs = AR5K_TUNE_AIFS_XR;
-	/*B mode is not supported on 5210*/
-	} else if (IS_CHAN_B(ah->ah_current_channel) &&
-			ah->ah_version != AR5K_AR5210) {
-		cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
-		cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
-		ah->ah_aifs = AR5K_TUNE_AIFS_11B;
-	}
-
-	cw_min = 1;
-	while (cw_min < ah->ah_cw_min)
-		cw_min = (cw_min << 1) | 1;
-
-	cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
-		((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
-	cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
-		((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
-
-	/*
-	 * Calculate and set retry limits
-	 */
-	if (ah->ah_software_retry) {
-		/* XXX Need to test this */
-		retry_lg = ah->ah_limit_tx_retries;
-		retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
-			AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
-	} else {
-		retry_lg = AR5K_INIT_LG_RETRY;
-		retry_sh = AR5K_INIT_SH_RETRY;
-	}
-
-	/*No QCU/DCU [5210]*/
-	if (ah->ah_version == AR5K_AR5210) {
-		ath5k_hw_reg_write(ah,
-			(cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
-			| AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
-				AR5K_NODCU_RETRY_LMT_SLG_RETRY)
-			| AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
-				AR5K_NODCU_RETRY_LMT_SSH_RETRY)
-			| AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
-			| AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
-			AR5K_NODCU_RETRY_LMT);
-	} else {
-		/*QCU/DCU [5211+]*/
-		ath5k_hw_reg_write(ah,
-			AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
-				AR5K_DCU_RETRY_LMT_SLG_RETRY) |
-			AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
-				AR5K_DCU_RETRY_LMT_SSH_RETRY) |
-			AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
-			AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
-			AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
-
-	/*===Rest is also for QCU/DCU only [5211+]===*/
-
-		/*
-		 * Set initial content window (cw_min/cw_max)
-		 * and arbitrated interframe space (aifs)...
-		 */
-		ath5k_hw_reg_write(ah,
-			AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
-			AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
-			AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs,
-				AR5K_DCU_LCL_IFS_AIFS),
-			AR5K_QUEUE_DFS_LOCAL_IFS(queue));
-
-		/*
-		 * Set misc registers
-		 */
-		ath5k_hw_reg_write(ah, AR5K_QCU_MISC_DCU_EARLY,
-			AR5K_QUEUE_MISC(queue));
-
-		if (tq->tqi_cbr_period) {
-			ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
-				AR5K_QCU_CBRCFG_INTVAL) |
-				AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
-				AR5K_QCU_CBRCFG_ORN_THRES),
-				AR5K_QUEUE_CBRCFG(queue));
-			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
-				AR5K_QCU_MISC_FRSHED_CBR);
-			if (tq->tqi_cbr_overflow_limit)
-				AR5K_REG_ENABLE_BITS(ah,
-					AR5K_QUEUE_MISC(queue),
-					AR5K_QCU_MISC_CBR_THRES_ENABLE);
-		}
-
-		if (tq->tqi_ready_time)
-			ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
-				AR5K_QCU_RDYTIMECFG_INTVAL) |
-				AR5K_QCU_RDYTIMECFG_ENABLE,
-				AR5K_QUEUE_RDYTIMECFG(queue));
-
-		if (tq->tqi_burst_time) {
-			ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
-				AR5K_DCU_CHAN_TIME_DUR) |
-				AR5K_DCU_CHAN_TIME_ENABLE,
-				AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
-
-			if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
-				AR5K_REG_ENABLE_BITS(ah,
-					AR5K_QUEUE_MISC(queue),
-					AR5K_QCU_MISC_RDY_VEOL_POLICY);
-		}
-
-		if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
-			ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
-				AR5K_QUEUE_DFS_MISC(queue));
-
-		if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
-			ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
-				AR5K_QUEUE_DFS_MISC(queue));
-
-		/*
-		 * Set registers by queue type
-		 */
-		switch (tq->tqi_type) {
-		case AR5K_TX_QUEUE_BEACON:
-			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
-				AR5K_QCU_MISC_FRSHED_DBA_GT |
-				AR5K_QCU_MISC_CBREXP_BCN |
-				AR5K_QCU_MISC_BCN_ENABLE);
-
-			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
-				(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
-				AR5K_DCU_MISC_ARBLOCK_CTL_S) |
-				AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
-				AR5K_DCU_MISC_BCN_ENABLE);
-
-			ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
-				(AR5K_TUNE_SW_BEACON_RESP -
-				AR5K_TUNE_DMA_BEACON_RESP) -
-				AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
-				AR5K_QCU_RDYTIMECFG_ENABLE,
-				AR5K_QUEUE_RDYTIMECFG(queue));
-			break;
-
-		case AR5K_TX_QUEUE_CAB:
-			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
-				AR5K_QCU_MISC_FRSHED_DBA_GT |
-				AR5K_QCU_MISC_CBREXP |
-				AR5K_QCU_MISC_CBREXP_BCN);
-
-			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
-				(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
-				AR5K_DCU_MISC_ARBLOCK_CTL_S));
-			break;
-
-		case AR5K_TX_QUEUE_UAPSD:
-			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
-				AR5K_QCU_MISC_CBREXP);
-			break;
-
-		case AR5K_TX_QUEUE_DATA:
-		default:
-			break;
-		}
-
-		/*
-		 * Enable interrupts for this tx queue
-		 * in the secondary interrupt mask registers
-		 */
-		if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
-			AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
-
-		if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
-			AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
-
-		if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
-			AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
-
-		if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
-			AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
-
-		if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
-			AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
-
-
-		/* Update secondary interrupt mask registers */
-		ah->ah_txq_imr_txok &= ah->ah_txq_status;
-		ah->ah_txq_imr_txerr &= ah->ah_txq_status;
-		ah->ah_txq_imr_txurn &= ah->ah_txq_status;
-		ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
-		ah->ah_txq_imr_txeol &= ah->ah_txq_status;
-
-		ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
-			AR5K_SIMR0_QCU_TXOK) |
-			AR5K_REG_SM(ah->ah_txq_imr_txdesc,
-			AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
-		ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
-			AR5K_SIMR1_QCU_TXERR) |
-			AR5K_REG_SM(ah->ah_txq_imr_txeol,
-			AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
-		ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txurn,
-			AR5K_SIMR2_QCU_TXURN), AR5K_SIMR2);
-	}
-
-	return 0;
-}
-
-/*
- * Get number of pending frames
- * for a specific queue [5211+]
- */
-u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) {
-	ATH5K_TRACE(ah->ah_sc);
-	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
-
-	/* Return if queue is declared inactive */
-	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
-		return false;
-
-	/* XXX: How about AR5K_CFG_TXCNT ? */
-	if (ah->ah_version == AR5K_AR5210)
-		return false;
-
-	return AR5K_QUEUE_STATUS(queue) & AR5K_QCU_STS_FRMPENDCNT;
-}
-
-/*
- * Set slot time
- */
-int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
-		return -EINVAL;
-
-	if (ah->ah_version == AR5K_AR5210)
-		ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
-				ah->ah_turbo), AR5K_SLOT_TIME);
-	else
-		ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
-
-	return 0;
-}
-
-/*
- * Get slot time
- */
-unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	if (ah->ah_version == AR5K_AR5210)
-		return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
-				AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
-	else
-		return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
-}
-
-
-/******************************\
- Hardware Descriptor Functions
-\******************************/
-
-/*
- * TX Descriptor
- */
-
-/*
- * Initialize the 2-word tx descriptor on 5210/5211
- */
-static int
-ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-	unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type,
-	unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
-	unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
-	unsigned int rtscts_rate, unsigned int rtscts_duration)
-{
-	u32 frame_type;
-	struct ath5k_hw_2w_tx_ctl *tx_ctl;
-	unsigned int frame_len;
-
-	tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
-
-	/*
-	 * Validate input
-	 * - Zero retries don't make sense.
-	 * - A zero rate will put the HW into a mode where it continously sends
-	 *   noise on the channel, so it is important to avoid this.
-	 */
-	if (unlikely(tx_tries0 == 0)) {
-		ATH5K_ERR(ah->ah_sc, "zero retries\n");
-		WARN_ON(1);
-		return -EINVAL;
-	}
-	if (unlikely(tx_rate0 == 0)) {
-		ATH5K_ERR(ah->ah_sc, "zero rate\n");
-		WARN_ON(1);
-		return -EINVAL;
-	}
-
-	/* Clear descriptor */
-	memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc));
-
-	/* Setup control descriptor */
-
-	/* Verify and set frame length */
-
-	/* remove padding we might have added before */
-	frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
-
-	if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
-		return -EINVAL;
-
-	tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
-
-	/* Verify and set buffer length */
-
-	/* NB: beacon's BufLen must be a multiple of 4 bytes */
-	if(type == AR5K_PKT_TYPE_BEACON)
-		pkt_len = roundup(pkt_len, 4);
-
-	if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
-		return -EINVAL;
-
-	tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
-
-	/*
-	 * Verify and set header length
-	 * XXX: I only found that on 5210 code, does it work on 5211 ?
-	 */
-	if (ah->ah_version == AR5K_AR5210) {
-		if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN)
-			return -EINVAL;
-		tx_ctl->tx_control_0 |=
-			AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
-	}
-
-	/*Diferences between 5210-5211*/
-	if (ah->ah_version == AR5K_AR5210) {
-		switch (type) {
-		case AR5K_PKT_TYPE_BEACON:
-		case AR5K_PKT_TYPE_PROBE_RESP:
-			frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
-		case AR5K_PKT_TYPE_PIFS:
-			frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
-		default:
-			frame_type = type /*<< 2 ?*/;
-		}
-
-		tx_ctl->tx_control_0 |=
-			AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) |
-			AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
-	} else {
-		tx_ctl->tx_control_0 |=
-			AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) |
-			AR5K_REG_SM(antenna_mode, AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
-		tx_ctl->tx_control_1 |=
-			AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE);
-	}
-#define _TX_FLAGS(_c, _flag)						\
-	if (flags & AR5K_TXDESC_##_flag)				\
-		tx_ctl->tx_control_##_c |=				\
-			AR5K_2W_TX_DESC_CTL##_c##_##_flag
-
-	_TX_FLAGS(0, CLRDMASK);
-	_TX_FLAGS(0, VEOL);
-	_TX_FLAGS(0, INTREQ);
-	_TX_FLAGS(0, RTSENA);
-	_TX_FLAGS(1, NOACK);
-
-#undef _TX_FLAGS
-
-	/*
-	 * WEP crap
-	 */
-	if (key_index != AR5K_TXKEYIX_INVALID) {
-		tx_ctl->tx_control_0 |=
-			AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
-		tx_ctl->tx_control_1 |=
-			AR5K_REG_SM(key_index,
-			AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
-	}
-
-	/*
-	 * RTS/CTS Duration [5210 ?]
-	 */
-	if ((ah->ah_version == AR5K_AR5210) &&
-			(flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
-		tx_ctl->tx_control_1 |= rtscts_duration &
-				AR5K_2W_TX_DESC_CTL1_RTS_DURATION;
-
-	return 0;
-}
-
-/*
- * Initialize the 4-word tx descriptor on 5212
- */
-static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
-	struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
-	enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
-	unsigned int tx_tries0, unsigned int key_index,
-	unsigned int antenna_mode, unsigned int flags, unsigned int rtscts_rate,
-	unsigned int rtscts_duration)
-{
-	struct ath5k_hw_4w_tx_ctl *tx_ctl;
-	unsigned int frame_len;
-
-	ATH5K_TRACE(ah->ah_sc);
-	tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
-
-	/*
-	 * Validate input
-	 * - Zero retries don't make sense.
-	 * - A zero rate will put the HW into a mode where it continously sends
-	 *   noise on the channel, so it is important to avoid this.
-	 */
-	if (unlikely(tx_tries0 == 0)) {
-		ATH5K_ERR(ah->ah_sc, "zero retries\n");
-		WARN_ON(1);
-		return -EINVAL;
-	}
-	if (unlikely(tx_rate0 == 0)) {
-		ATH5K_ERR(ah->ah_sc, "zero rate\n");
-		WARN_ON(1);
-		return -EINVAL;
-	}
-
-	/* Clear descriptor */
-	memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc));
-
-	/* Setup control descriptor */
-
-	/* Verify and set frame length */
-
-	/* remove padding we might have added before */
-	frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
-
-	if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
-		return -EINVAL;
-
-	tx_ctl->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
-
-	/* Verify and set buffer length */
-
-	/* NB: beacon's BufLen must be a multiple of 4 bytes */
-	if(type == AR5K_PKT_TYPE_BEACON)
-		pkt_len = roundup(pkt_len, 4);
-
-	if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
-		return -EINVAL;
-
-	tx_ctl->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
-
-	tx_ctl->tx_control_0 |=
-		AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
-		AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
-	tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
-					AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
-	tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES,
-					AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
-	tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
-
-#define _TX_FLAGS(_c, _flag)			\
-	if (flags & AR5K_TXDESC_##_flag)	\
-		tx_ctl->tx_control_##_c |=	\
-			AR5K_4W_TX_DESC_CTL##_c##_##_flag
-
-	_TX_FLAGS(0, CLRDMASK);
-	_TX_FLAGS(0, VEOL);
-	_TX_FLAGS(0, INTREQ);
-	_TX_FLAGS(0, RTSENA);
-	_TX_FLAGS(0, CTSENA);
-	_TX_FLAGS(1, NOACK);
-
-#undef _TX_FLAGS
-
-	/*
-	 * WEP crap
-	 */
-	if (key_index != AR5K_TXKEYIX_INVALID) {
-		tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
-		tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index,
-				AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
-	}
-
-	/*
-	 * RTS/CTS
-	 */
-	if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) {
-		if ((flags & AR5K_TXDESC_RTSENA) &&
-				(flags & AR5K_TXDESC_CTSENA))
-			return -EINVAL;
-		tx_ctl->tx_control_2 |= rtscts_duration &
-				AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
-		tx_ctl->tx_control_3 |= AR5K_REG_SM(rtscts_rate,
-				AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
-	}
-
-	return 0;
-}
-
-/*
- * Initialize a 4-word multirate tx descriptor on 5212
- */
-static int
-ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-	unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, u_int tx_tries2,
-	unsigned int tx_rate3, u_int tx_tries3)
-{
-	struct ath5k_hw_4w_tx_ctl *tx_ctl;
-
-	/*
-	 * Rates can be 0 as long as the retry count is 0 too.
-	 * A zero rate and nonzero retry count will put the HW into a mode where
-	 * it continously sends noise on the channel, so it is important to
-	 * avoid this.
-	 */
-	if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
-		     (tx_rate2 == 0 && tx_tries2 != 0) ||
-		     (tx_rate3 == 0 && tx_tries3 != 0))) {
-		ATH5K_ERR(ah->ah_sc, "zero rate\n");
-		WARN_ON(1);
-		return -EINVAL;
-	}
-
-	if (ah->ah_version == AR5K_AR5212) {
-		tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
-
-#define _XTX_TRIES(_n)							\
-	if (tx_tries##_n) {						\
-		tx_ctl->tx_control_2 |=				\
-		    AR5K_REG_SM(tx_tries##_n,				\
-		    AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n);		\
-		tx_ctl->tx_control_3 |=				\
-		    AR5K_REG_SM(tx_rate##_n,				\
-		    AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n);		\
-	}
-
-		_XTX_TRIES(1);
-		_XTX_TRIES(2);
-		_XTX_TRIES(3);
-
-#undef _XTX_TRIES
-
-		return 1;
-	}
-
-	return 0;
-}
-
-/*
- * Proccess the tx status descriptor on 5210/5211
- */
-static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
-		struct ath5k_desc *desc, struct ath5k_tx_status *ts)
-{
-	struct ath5k_hw_2w_tx_ctl *tx_ctl;
-	struct ath5k_hw_tx_status *tx_status;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
-	tx_status = &desc->ud.ds_tx5210.tx_stat;
-
-	/* No frame has been send or error */
-	if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
-		return -EINPROGRESS;
-
-	/*
-	 * Get descriptor status
-	 */
-	ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
-		AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
-	ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
-		AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
-	ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
-		AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
-	/*TODO: ts->ts_virtcol + test*/
-	ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
-		AR5K_DESC_TX_STATUS1_SEQ_NUM);
-	ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
-		AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
-	ts->ts_antenna = 1;
-	ts->ts_status = 0;
-	ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_0,
-		AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
-
-	if ((tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK) == 0){
-		if (tx_status->tx_status_0 &
-				AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
-			ts->ts_status |= AR5K_TXERR_XRETRY;
-
-		if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
-			ts->ts_status |= AR5K_TXERR_FIFO;
-
-		if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
-			ts->ts_status |= AR5K_TXERR_FILT;
-	}
-
-	return 0;
-}
-
-/*
- * Proccess a tx descriptor on 5212
- */
-static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
-		struct ath5k_desc *desc, struct ath5k_tx_status *ts)
-{
-	struct ath5k_hw_4w_tx_ctl *tx_ctl;
-	struct ath5k_hw_tx_status *tx_status;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
-	tx_status = &desc->ud.ds_tx5212.tx_stat;
-
-	/* No frame has been send or error */
-	if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
-		return -EINPROGRESS;
-
-	/*
-	 * Get descriptor status
-	 */
-	ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
-		AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
-	ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
-		AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
-	ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
-		AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
-	ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
-		AR5K_DESC_TX_STATUS1_SEQ_NUM);
-	ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
-		AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
-	ts->ts_antenna = (tx_status->tx_status_1 &
-		AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1;
-	ts->ts_status = 0;
-
-	switch (AR5K_REG_MS(tx_status->tx_status_1,
-			AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX)) {
-	case 0:
-		ts->ts_rate = tx_ctl->tx_control_3 &
-			AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
-		break;
-	case 1:
-		ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
-			AR5K_4W_TX_DESC_CTL3_XMIT_RATE1);
-		ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
-			AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1);
-		break;
-	case 2:
-		ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
-			AR5K_4W_TX_DESC_CTL3_XMIT_RATE2);
-		ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
-			AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2);
-		break;
-	case 3:
-		ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
-			AR5K_4W_TX_DESC_CTL3_XMIT_RATE3);
-		ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
-			AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3);
-		break;
-	}
-
-	if ((tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK) == 0){
-		if (tx_status->tx_status_0 &
-				AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
-			ts->ts_status |= AR5K_TXERR_XRETRY;
-
-		if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
-			ts->ts_status |= AR5K_TXERR_FIFO;
-
-		if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
-			ts->ts_status |= AR5K_TXERR_FILT;
-	}
-
-	return 0;
-}
-
-/*
- * RX Descriptor
- */
-
-/*
- * Initialize an rx descriptor
- */
-int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-			u32 size, unsigned int flags)
-{
-	struct ath5k_hw_rx_ctl *rx_ctl;
-
-	ATH5K_TRACE(ah->ah_sc);
-	rx_ctl = &desc->ud.ds_rx.rx_ctl;
-
-	/*
-	 * Clear the descriptor
-	 * If we don't clean the status descriptor,
-	 * while scanning we get too many results,
-	 * most of them virtual, after some secs
-	 * of scanning system hangs. M.F.
-	*/
-	memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
-
-	/* Setup descriptor */
-	rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
-	if (unlikely(rx_ctl->rx_control_1 != size))
-		return -EINVAL;
-
-	if (flags & AR5K_RXDESC_INTREQ)
-		rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
-
-	return 0;
-}
-
-/*
- * Proccess the rx status descriptor on 5210/5211
- */
-static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
-		struct ath5k_desc *desc, struct ath5k_rx_status *rs)
-{
-	struct ath5k_hw_rx_status *rx_status;
-
-	rx_status = &desc->ud.ds_rx.u.rx_stat;
-
-	/* No frame received / not ready */
-	if (unlikely((rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_DONE)
-				== 0))
-		return -EINPROGRESS;
-
-	/*
-	 * Frame receive status
-	 */
-	rs->rs_datalen = rx_status->rx_status_0 &
-		AR5K_5210_RX_DESC_STATUS0_DATA_LEN;
-	rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
-		AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
-	rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
-		AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
-	rs->rs_antenna = rx_status->rx_status_0 &
-		AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA;
-	rs->rs_more = rx_status->rx_status_0 &
-		AR5K_5210_RX_DESC_STATUS0_MORE;
-	/* TODO: this timestamp is 13 bit, later on we assume 15 bit */
-	rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
-		AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
-	rs->rs_status = 0;
-	rs->rs_phyerr = 0;
-
-	/*
-	 * Key table status
-	 */
-	if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID)
-		rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
-			AR5K_5210_RX_DESC_STATUS1_KEY_INDEX);
-	else
-		rs->rs_keyix = AR5K_RXKEYIX_INVALID;
-
-	/*
-	 * Receive/descriptor errors
-	 */
-	if ((rx_status->rx_status_1 &
-			AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK) == 0) {
-		if (rx_status->rx_status_1 &
-				AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
-			rs->rs_status |= AR5K_RXERR_CRC;
-
-		if (rx_status->rx_status_1 &
-				AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN)
-			rs->rs_status |= AR5K_RXERR_FIFO;
-
-		if (rx_status->rx_status_1 &
-				AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
-			rs->rs_status |= AR5K_RXERR_PHY;
-			rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1,
-					   AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
-		}
-
-		if (rx_status->rx_status_1 &
-				AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
-			rs->rs_status |= AR5K_RXERR_DECRYPT;
-	}
-
-	return 0;
-}
-
-/*
- * Proccess the rx status descriptor on 5212
- */
-static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
-		struct ath5k_desc *desc, struct ath5k_rx_status *rs)
-{
-	struct ath5k_hw_rx_status *rx_status;
-	struct ath5k_hw_rx_error *rx_err;
-
-	ATH5K_TRACE(ah->ah_sc);
-	rx_status = &desc->ud.ds_rx.u.rx_stat;
-
-	/* Overlay on error */
-	rx_err = &desc->ud.ds_rx.u.rx_err;
-
-	/* No frame received / not ready */
-	if (unlikely((rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_DONE)
-				== 0))
-		return -EINPROGRESS;
-
-	/*
-	 * Frame receive status
-	 */
-	rs->rs_datalen = rx_status->rx_status_0 &
-		AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
-	rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
-		AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
-	rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
-		AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
-	rs->rs_antenna = rx_status->rx_status_0 &
-		AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA;
-	rs->rs_more = rx_status->rx_status_0 &
-		AR5K_5212_RX_DESC_STATUS0_MORE;
-	rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
-		AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
-	rs->rs_status = 0;
-	rs->rs_phyerr = 0;
-
-	/*
-	 * Key table status
-	 */
-	if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
-		rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
-				AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
-	else
-		rs->rs_keyix = AR5K_RXKEYIX_INVALID;
-
-	/*
-	 * Receive/descriptor errors
-	 */
-	if ((rx_status->rx_status_1 &
-			AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK) == 0) {
-		if (rx_status->rx_status_1 &
-				AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
-			rs->rs_status |= AR5K_RXERR_CRC;
-
-		if (rx_status->rx_status_1 &
-				AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
-			rs->rs_status |= AR5K_RXERR_PHY;
-			rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
-					   AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
-		}
-
-		if (rx_status->rx_status_1 &
-				AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
-			rs->rs_status |= AR5K_RXERR_DECRYPT;
-
-		if (rx_status->rx_status_1 &
-				AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
-			rs->rs_status |= AR5K_RXERR_MIC;
-	}
-
-	return 0;
-}
-
-
-/****************\
-  GPIO Functions
-\****************/
-
-/*
- * Set led state
- */
-void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
-{
-	u32 led;
-	/*5210 has different led mode handling*/
-	u32 led_5210;
-
-	ATH5K_TRACE(ah->ah_sc);
-
-	/*Reset led status*/
-	if (ah->ah_version != AR5K_AR5210)
-		AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
-			AR5K_PCICFG_LEDMODE |  AR5K_PCICFG_LED);
-	else
-		AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_LED);
-
-	/*
-	 * Some blinking values, define at your wish
-	 */
-	switch (state) {
-	case AR5K_LED_SCAN:
-	case AR5K_LED_AUTH:
-		led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_PEND;
-		led_5210 = AR5K_PCICFG_LED_PEND | AR5K_PCICFG_LED_BCTL;
-		break;
-
-	case AR5K_LED_INIT:
-		led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_NONE;
-		led_5210 = AR5K_PCICFG_LED_PEND;
-		break;
-
-	case AR5K_LED_ASSOC:
-	case AR5K_LED_RUN:
-		led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_ASSOC;
-		led_5210 = AR5K_PCICFG_LED_ASSOC;
-		break;
-
-	default:
-		led = AR5K_PCICFG_LEDMODE_PROM | AR5K_PCICFG_LED_NONE;
-		led_5210 = AR5K_PCICFG_LED_PEND;
-		break;
-	}
-
-	/*Write new status to the register*/
-	if (ah->ah_version != AR5K_AR5210)
-		AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led);
-	else
-		AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
-}
-
-/*
- * Set GPIO outputs
- */
-int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	if (gpio > AR5K_NUM_GPIO)
-		return -EINVAL;
-
-	ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &~
-		AR5K_GPIOCR_OUT(gpio)) | AR5K_GPIOCR_OUT(gpio), AR5K_GPIOCR);
-
-	return 0;
-}
-
-/*
- * Set GPIO inputs
- */
-int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	if (gpio > AR5K_NUM_GPIO)
-		return -EINVAL;
-
-	ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &~
-		AR5K_GPIOCR_OUT(gpio)) | AR5K_GPIOCR_IN(gpio), AR5K_GPIOCR);
-
-	return 0;
-}
-
-/*
- * Get GPIO state
- */
-u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
-{
-	ATH5K_TRACE(ah->ah_sc);
-	if (gpio > AR5K_NUM_GPIO)
-		return 0xffffffff;
-
-	/* GPIO input magic */
-	return ((ath5k_hw_reg_read(ah, AR5K_GPIODI) & AR5K_GPIODI_M) >> gpio) &
-		0x1;
-}
-
-/*
- * Set GPIO state
- */
-int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
-{
-	u32 data;
-	ATH5K_TRACE(ah->ah_sc);
-
-	if (gpio > AR5K_NUM_GPIO)
-		return -EINVAL;
-
-	/* GPIO output magic */
-	data = ath5k_hw_reg_read(ah, AR5K_GPIODO);
-
-	data &= ~(1 << gpio);
-	data |= (val & 1) << gpio;
-
-	ath5k_hw_reg_write(ah, data, AR5K_GPIODO);
-
-	return 0;
-}
-
-/*
- * Initialize the GPIO interrupt (RFKill switch)
- */
-void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
-		u32 interrupt_level)
-{
-	u32 data;
-
-	ATH5K_TRACE(ah->ah_sc);
-	if (gpio > AR5K_NUM_GPIO)
-		return;
-
-	/*
-	 * Set the GPIO interrupt
-	 */
-	data = (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &
-		~(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_SELH |
-		AR5K_GPIOCR_INT_ENA | AR5K_GPIOCR_OUT(gpio))) |
-		(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_ENA);
-
-	ath5k_hw_reg_write(ah, interrupt_level ? data :
-		(data | AR5K_GPIOCR_INT_SELH), AR5K_GPIOCR);
-
-	ah->ah_imr |= AR5K_IMR_GPIO;
-
-	/* Enable GPIO interrupts */
-	AR5K_REG_ENABLE_BITS(ah, AR5K_PIMR, AR5K_IMR_GPIO);
-}
-
-
-
-
-/****************\
-  Misc functions
-\****************/
-
-int ath5k_hw_get_capability(struct ath5k_hw *ah,
-		enum ath5k_capability_type cap_type,
-		u32 capability, u32 *result)
-{
-	ATH5K_TRACE(ah->ah_sc);
-
-	switch (cap_type) {
-	case AR5K_CAP_NUM_TXQUEUES:
-		if (result) {
-			if (ah->ah_version == AR5K_AR5210)
-				*result = AR5K_NUM_TX_QUEUES_NOQCU;
-			else
-				*result = AR5K_NUM_TX_QUEUES;
-			goto yes;
-		}
-	case AR5K_CAP_VEOL:
-		goto yes;
-	case AR5K_CAP_COMPRESSION:
-		if (ah->ah_version == AR5K_AR5212)
-			goto yes;
-		else
-			goto no;
-	case AR5K_CAP_BURST:
-		goto yes;
-	case AR5K_CAP_TPC:
-		goto yes;
-	case AR5K_CAP_BSSIDMASK:
-		if (ah->ah_version == AR5K_AR5212)
-			goto yes;
-		else
-			goto no;
-	case AR5K_CAP_XR:
-		if (ah->ah_version == AR5K_AR5212)
-			goto yes;
-		else
-			goto no;
-	default:
-		goto no;
-	}
-
-no:
-	return -EINVAL;
-yes:
-	return 0;
-}
-
-static int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
-		u16 assoc_id)
-{
-	ATH5K_TRACE(ah->ah_sc);
-
-	if (ah->ah_version == AR5K_AR5210) {
-		AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
-			AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
-		return 0;
-	}
-
-	return -EIO;
-}
-
-static int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
-{
-	ATH5K_TRACE(ah->ah_sc);
-
-	if (ah->ah_version == AR5K_AR5210) {
-		AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
-			AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
-		return 0;
-	}
-
-	return -EIO;
-}
diff --git a/drivers/net/wireless/ath5k/hw.h b/drivers/net/wireless/ath5k/hw.h
deleted file mode 100644
index 64fca8d..0000000
--- a/drivers/net/wireless/ath5k/hw.h
+++ /dev/null
@@ -1,616 +0,0 @@
-/*
- * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
- * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com>
- * Copyright (c) 2007 Matthew W. S. Bell  <mentor@madwifi.org>
- * Copyright (c) 2007 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/delay.h>
-
-/*
- * Gain settings
- */
-
-enum ath5k_rfgain {
-	AR5K_RFGAIN_INACTIVE = 0,
-	AR5K_RFGAIN_READ_REQUESTED,
-	AR5K_RFGAIN_NEED_CHANGE,
-};
-
-#define AR5K_GAIN_CRN_FIX_BITS_5111		4
-#define AR5K_GAIN_CRN_FIX_BITS_5112		7
-#define AR5K_GAIN_CRN_MAX_FIX_BITS		AR5K_GAIN_CRN_FIX_BITS_5112
-#define AR5K_GAIN_DYN_ADJUST_HI_MARGIN		15
-#define AR5K_GAIN_DYN_ADJUST_LO_MARGIN		20
-#define AR5K_GAIN_CCK_PROBE_CORR		5
-#define AR5K_GAIN_CCK_OFDM_GAIN_DELTA		15
-#define AR5K_GAIN_STEP_COUNT			10
-#define AR5K_GAIN_PARAM_TX_CLIP			0
-#define AR5K_GAIN_PARAM_PD_90			1
-#define AR5K_GAIN_PARAM_PD_84			2
-#define AR5K_GAIN_PARAM_GAIN_SEL		3
-#define AR5K_GAIN_PARAM_MIX_ORN			0
-#define AR5K_GAIN_PARAM_PD_138			1
-#define AR5K_GAIN_PARAM_PD_137			2
-#define AR5K_GAIN_PARAM_PD_136			3
-#define AR5K_GAIN_PARAM_PD_132			4
-#define AR5K_GAIN_PARAM_PD_131			5
-#define AR5K_GAIN_PARAM_PD_130			6
-#define AR5K_GAIN_CHECK_ADJUST(_g) 		\
-	((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
-
-struct ath5k_gain_opt_step {
-	s16				gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
-	s32				gos_gain;
-};
-
-struct ath5k_gain {
-	u32			g_step_idx;
-	u32			g_current;
-	u32			g_target;
-	u32			g_low;
-	u32			g_high;
-	u32			g_f_corr;
-	u32			g_active;
-	const struct ath5k_gain_opt_step	*g_step;
-};
-
-
-/*
- * HW SPECIFIC STRUCTS
- */
-
-/* Some EEPROM defines */
-#define AR5K_EEPROM_EEP_SCALE		100
-#define AR5K_EEPROM_EEP_DELTA		10
-#define AR5K_EEPROM_N_MODES		3
-#define AR5K_EEPROM_N_5GHZ_CHAN		10
-#define AR5K_EEPROM_N_2GHZ_CHAN		3
-#define AR5K_EEPROM_MAX_CHAN		10
-#define AR5K_EEPROM_N_PCDAC		11
-#define AR5K_EEPROM_N_TEST_FREQ		8
-#define AR5K_EEPROM_N_EDGES		8
-#define AR5K_EEPROM_N_INTERCEPTS	11
-#define AR5K_EEPROM_FREQ_M(_v)		AR5K_EEPROM_OFF(_v, 0x7f, 0xff)
-#define AR5K_EEPROM_PCDAC_M		0x3f
-#define AR5K_EEPROM_PCDAC_START		1
-#define AR5K_EEPROM_PCDAC_STOP		63
-#define AR5K_EEPROM_PCDAC_STEP		1
-#define AR5K_EEPROM_NON_EDGE_M		0x40
-#define AR5K_EEPROM_CHANNEL_POWER	8
-#define AR5K_EEPROM_N_OBDB		4
-#define AR5K_EEPROM_OBDB_DIS		0xffff
-#define AR5K_EEPROM_CHANNEL_DIS		0xff
-#define AR5K_EEPROM_SCALE_OC_DELTA(_x)	(((_x) * 2) / 10)
-#define AR5K_EEPROM_N_CTLS(_v)		AR5K_EEPROM_OFF(_v, 16, 32)
-#define AR5K_EEPROM_MAX_CTLS		32
-#define AR5K_EEPROM_N_XPD_PER_CHANNEL	4
-#define AR5K_EEPROM_N_XPD0_POINTS	4
-#define AR5K_EEPROM_N_XPD3_POINTS	3
-#define AR5K_EEPROM_N_INTERCEPT_10_2GHZ	35
-#define AR5K_EEPROM_N_INTERCEPT_10_5GHZ	55
-#define AR5K_EEPROM_POWER_M		0x3f
-#define AR5K_EEPROM_POWER_MIN		0
-#define AR5K_EEPROM_POWER_MAX		3150
-#define AR5K_EEPROM_POWER_STEP		50
-#define AR5K_EEPROM_POWER_TABLE_SIZE	64
-#define AR5K_EEPROM_N_POWER_LOC_11B	4
-#define AR5K_EEPROM_N_POWER_LOC_11G	6
-#define AR5K_EEPROM_I_GAIN		10
-#define AR5K_EEPROM_CCK_OFDM_DELTA	15
-#define AR5K_EEPROM_N_IQ_CAL		2
-
-/* Struct to hold EEPROM calibration data */
-struct ath5k_eeprom_info {
-	u16	ee_magic;
-	u16	ee_protect;
-	u16	ee_regdomain;
-	u16	ee_version;
-	u16	ee_header;
-	u16	ee_ant_gain;
-	u16	ee_misc0;
-	u16	ee_misc1;
-	u16	ee_cck_ofdm_gain_delta;
-	u16	ee_cck_ofdm_power_delta;
-	u16	ee_scaled_cck_delta;
-
-	/* Used for tx thermal adjustment (eeprom_init, rfregs) */
-	u16	ee_tx_clip;
-	u16	ee_pwd_84;
-	u16	ee_pwd_90;
-	u16	ee_gain_select;
-
-	/* RF Calibration settings (reset, rfregs) */
-	u16	ee_i_cal[AR5K_EEPROM_N_MODES];
-	u16	ee_q_cal[AR5K_EEPROM_N_MODES];
-	u16	ee_fixed_bias[AR5K_EEPROM_N_MODES];
-	u16	ee_turbo_max_power[AR5K_EEPROM_N_MODES];
-	u16	ee_xr_power[AR5K_EEPROM_N_MODES];
-	u16	ee_switch_settling[AR5K_EEPROM_N_MODES];
-	u16	ee_ant_tx_rx[AR5K_EEPROM_N_MODES];
-	u16	ee_ant_control[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PCDAC];
-	u16	ee_ob[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
-	u16	ee_db[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
-	u16	ee_tx_end2xlna_enable[AR5K_EEPROM_N_MODES];
-	u16	ee_tx_end2xpa_disable[AR5K_EEPROM_N_MODES];
-	u16	ee_tx_frm2xpa_enable[AR5K_EEPROM_N_MODES];
-	u16	ee_thr_62[AR5K_EEPROM_N_MODES];
-	u16	ee_xlna_gain[AR5K_EEPROM_N_MODES];
-	u16	ee_xpd[AR5K_EEPROM_N_MODES];
-	u16	ee_x_gain[AR5K_EEPROM_N_MODES];
-	u16	ee_i_gain[AR5K_EEPROM_N_MODES];
-	u16	ee_margin_tx_rx[AR5K_EEPROM_N_MODES];
-
-	/* Unused */
-	u16	ee_false_detect[AR5K_EEPROM_N_MODES];
-	u16	ee_cal_pier[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_2GHZ_CHAN];
-	u16	ee_channel[AR5K_EEPROM_N_MODES][AR5K_EEPROM_MAX_CHAN]; /*empty*/
-
-	/* Conformance test limits (Unused) */
-	u16	ee_ctls;
-	u16	ee_ctl[AR5K_EEPROM_MAX_CTLS];
-
-	/* Noise Floor Calibration settings */
-	s16	ee_noise_floor_thr[AR5K_EEPROM_N_MODES];
-	s8	ee_adc_desired_size[AR5K_EEPROM_N_MODES];
-	s8	ee_pga_desired_size[AR5K_EEPROM_N_MODES];
-};
-
-/*
- * Internal RX/TX descriptor structures
- * (rX: reserved fields possibily used by future versions of the ar5k chipset)
- */
-
-/*
- * common hardware RX control descriptor
- */
-struct ath5k_hw_rx_ctl {
-	u32	rx_control_0; /* RX control word 0 */
-
-#define AR5K_DESC_RX_CTL0			0x00000000
-
-	u32	rx_control_1; /* RX control word 1 */
-
-#define AR5K_DESC_RX_CTL1_BUF_LEN		0x00000fff
-#define AR5K_DESC_RX_CTL1_INTREQ		0x00002000
-} __packed;
-
-/*
- * common hardware RX status descriptor
- * 5210/11 and 5212 differ only in the flags defined below
- */
-struct ath5k_hw_rx_status {
-	u32	rx_status_0; /* RX status word 0 */
-	u32	rx_status_1; /* RX status word 1 */
-} __packed;
-
-/* 5210/5211 */
-#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN		0x00000fff
-#define AR5K_5210_RX_DESC_STATUS0_MORE			0x00001000
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE		0x00078000
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE_S	15
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL	0x07f80000
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S	19
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA	0x38000000
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S	27
-#define AR5K_5210_RX_DESC_STATUS1_DONE			0x00000001
-#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK	0x00000002
-#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR		0x00000004
-#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN		0x00000008
-#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR	0x00000010
-#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR		0x000000e0
-#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S		5
-#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID	0x00000100
-#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX		0x00007e00
-#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S		9
-#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP	0x0fff8000
-#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S	15
-#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS	0x10000000
-
-/* 5212 */
-#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN		0x00000fff
-#define AR5K_5212_RX_DESC_STATUS0_MORE			0x00001000
-#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR	0x00002000
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE		0x000f8000
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE_S	15
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL	0x0ff00000
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S	20
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA	0xf0000000
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S	28
-#define AR5K_5212_RX_DESC_STATUS1_DONE			0x00000001
-#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK	0x00000002
-#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR		0x00000004
-#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR	0x00000008
-#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR		0x00000010
-#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR		0x00000020
-#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID	0x00000100
-#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX		0x0000fe00
-#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_S		9
-#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP	0x7fff0000
-#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S	16
-#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS	0x80000000
-
-/*
- * common hardware RX error descriptor
- */
-struct ath5k_hw_rx_error {
-	u32	rx_error_0; /* RX error word 0 */
-
-#define AR5K_RX_DESC_ERROR0			0x00000000
-
-	u32	rx_error_1; /* RX error word 1 */
-
-#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE	0x0000ff00
-#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S	8
-} __packed;
-
-#define AR5K_DESC_RX_PHY_ERROR_NONE		0x00
-#define AR5K_DESC_RX_PHY_ERROR_TIMING		0x20
-#define AR5K_DESC_RX_PHY_ERROR_PARITY		0x40
-#define AR5K_DESC_RX_PHY_ERROR_RATE		0x60
-#define AR5K_DESC_RX_PHY_ERROR_LENGTH		0x80
-#define AR5K_DESC_RX_PHY_ERROR_64QAM		0xa0
-#define AR5K_DESC_RX_PHY_ERROR_SERVICE		0xc0
-#define AR5K_DESC_RX_PHY_ERROR_TRANSMITOVR	0xe0
-
-/*
- * 5210/5211 hardware 2-word TX control descriptor
- */
-struct ath5k_hw_2w_tx_ctl {
-	u32	tx_control_0; /* TX control word 0 */
-
-#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN		0x00000fff
-#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN		0x0003f000 /*[5210 ?]*/
-#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_S	12
-#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE		0x003c0000
-#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE_S	18
-#define AR5K_2W_TX_DESC_CTL0_RTSENA		0x00400000
-#define AR5K_2W_TX_DESC_CTL0_CLRDMASK		0x01000000
-#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET	0x00800000 /*[5210]*/
-#define AR5K_2W_TX_DESC_CTL0_VEOL		0x00800000 /*[5211]*/
-#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE		0x1c000000 /*[5210]*/
-#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_S	26
-#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210	0x02000000
-#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211	0x1e000000
-#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT	(ah->ah_version == AR5K_AR5210 ? \
-						AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \
-						AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211)
-#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S	25
-#define AR5K_2W_TX_DESC_CTL0_INTREQ		0x20000000
-#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID	0x40000000
-
-	u32	tx_control_1; /* TX control word 1 */
-
-#define AR5K_2W_TX_DESC_CTL1_BUF_LEN		0x00000fff
-#define AR5K_2W_TX_DESC_CTL1_MORE		0x00001000
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210	0x0007e000
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211	0x000fe000
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX	(ah->ah_version == AR5K_AR5210 ? \
-						AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 : \
-						AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211)
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S	13
-#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE		0x00700000 /*[5211]*/
-#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_S	20
-#define AR5K_2W_TX_DESC_CTL1_NOACK		0x00800000 /*[5211]*/
-#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION	0xfff80000 /*[5210 ?]*/
-} __packed;
-
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL   0x00
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM     0x04
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL   0x08
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 0x0c
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS     0x10
-
-/*
- * 5212 hardware 4-word TX control descriptor
- */
-struct ath5k_hw_4w_tx_ctl {
-	u32	tx_control_0; /* TX control word 0 */
-
-#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN		0x00000fff
-#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER		0x003f0000
-#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER_S	16
-#define AR5K_4W_TX_DESC_CTL0_RTSENA		0x00400000
-#define AR5K_4W_TX_DESC_CTL0_VEOL		0x00800000
-#define AR5K_4W_TX_DESC_CTL0_CLRDMASK		0x01000000
-#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT	0x1e000000
-#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT_S	25
-#define AR5K_4W_TX_DESC_CTL0_INTREQ		0x20000000
-#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID	0x40000000
-#define AR5K_4W_TX_DESC_CTL0_CTSENA		0x80000000
-
-	u32	tx_control_1; /* TX control word 1 */
-
-#define AR5K_4W_TX_DESC_CTL1_BUF_LEN		0x00000fff
-#define AR5K_4W_TX_DESC_CTL1_MORE		0x00001000
-#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX	0x000fe000
-#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S	13
-#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE		0x00f00000
-#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE_S	20
-#define AR5K_4W_TX_DESC_CTL1_NOACK		0x01000000
-#define AR5K_4W_TX_DESC_CTL1_COMP_PROC		0x06000000
-#define AR5K_4W_TX_DESC_CTL1_COMP_PROC_S	25
-#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN	0x18000000
-#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN_S	27
-#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN	0x60000000
-#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN_S	29
-
-	u32	tx_control_2; /* TX control word 2 */
-
-#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION		0x00007fff
-#define AR5K_4W_TX_DESC_CTL2_DURATION_UPDATE_ENABLE	0x00008000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0		0x000f0000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S		16
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1		0x00f00000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S		20
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2		0x0f000000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S		24
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3		0xf0000000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S		28
-
-	u32	tx_control_3; /* TX control word 3 */
-
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0		0x0000001f
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1		0x000003e0
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1_S	5
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2		0x00007c00
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2_S	10
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3		0x000f8000
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3_S	15
-#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE	0x01f00000
-#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S	20
-} __packed;
-
-/*
- * Common TX status descriptor
- */
-struct ath5k_hw_tx_status {
-	u32	tx_status_0; /* TX status word 0 */
-
-#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK	0x00000001
-#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES	0x00000002
-#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN	0x00000004
-#define AR5K_DESC_TX_STATUS0_FILTERED		0x00000008
-/*???
-#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT	0x000000f0
-#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT_S	4
-*/
-#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT	0x000000f0
-#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT_S	4
-/*???
-#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT	0x00000f00
-#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT_S	8
-*/
-#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT	0x00000f00
-#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT_S	8
-#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT	0x0000f000
-#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT_S	12
-#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP	0xffff0000
-#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S	16
-
-	u32	tx_status_1; /* TX status word 1 */
-
-#define AR5K_DESC_TX_STATUS1_DONE		0x00000001
-#define AR5K_DESC_TX_STATUS1_SEQ_NUM		0x00001ffe
-#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S		1
-#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH	0x001fe000
-#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH_S	13
-#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX	0x00600000
-#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX_S	21
-#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS	0x00800000
-#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA	0x01000000
-} __packed;
-
-
-/*
- * 5210/5211 hardware TX descriptor
- */
-struct ath5k_hw_5210_tx_desc {
-	struct ath5k_hw_2w_tx_ctl	tx_ctl;
-	struct ath5k_hw_tx_status	tx_stat;
-} __packed;
-
-/*
- * 5212 hardware TX descriptor
- */
-struct ath5k_hw_5212_tx_desc {
-	struct ath5k_hw_4w_tx_ctl	tx_ctl;
-	struct ath5k_hw_tx_status	tx_stat;
-} __packed;
-
-/*
- * common hardware RX descriptor
- */
-struct ath5k_hw_all_rx_desc {
-	struct ath5k_hw_rx_ctl			rx_ctl;
-	union {
-		struct ath5k_hw_rx_status	rx_stat;
-		struct ath5k_hw_rx_error	rx_err;
-	} u;
-} __packed;
-
-
-/*
- * AR5K REGISTER ACCESS
- */
-
-/*Swap RX/TX Descriptor for big endian archs*/
-#if defined(__BIG_ENDIAN)
-#define AR5K_INIT_CFG	(		\
-	AR5K_CFG_SWTD | AR5K_CFG_SWRD	\
-)
-#else
-#define AR5K_INIT_CFG	0x00000000
-#endif
-
-/*#define AR5K_REG_READ(_reg)	ath5k_hw_reg_read(ah, _reg)
-
-#define AR5K_REG_WRITE(_reg, _val)	ath5k_hw_reg_write(ah, _val, _reg)*/
-
-#define AR5K_REG_SM(_val, _flags)					\
-	(((_val) << _flags##_S) & (_flags))
-
-#define AR5K_REG_MS(_val, _flags)					\
-	(((_val) & (_flags)) >> _flags##_S)
-
-/* Some registers can hold multiple values of interest. For this
- * reason when we want to write to these registers we must first
- * retrieve the values which we do not want to clear (lets call this
- * old_data) and then set the register with this and our new_value:
- * ( old_data | new_value) */
-#define AR5K_REG_WRITE_BITS(ah, _reg, _flags, _val)			\
-	ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & ~(_flags)) | \
-	    (((_val) << _flags##_S) & (_flags)), _reg)
-
-#define AR5K_REG_MASKED_BITS(ah, _reg, _flags, _mask)			\
-	ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) &		\
-			(_mask)) | (_flags), _reg)
-
-#define AR5K_REG_ENABLE_BITS(ah, _reg, _flags)				\
-	ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) | (_flags), _reg)
-
-#define AR5K_REG_DISABLE_BITS(ah, _reg, _flags)			\
-	ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) & ~(_flags), _reg)
-
-#define AR5K_PHY_WRITE(ah, _reg, _val)					\
-	ath5k_hw_reg_write(ah, _val, (ah)->ah_phy + ((_reg) << 2))
-
-#define AR5K_PHY_READ(ah, _reg)					\
-	ath5k_hw_reg_read(ah, (ah)->ah_phy + ((_reg) << 2))
-
-#define AR5K_REG_WAIT(_i) do {						\
-	if (_i % 64)							\
-		udelay(1);						\
-} while (0)
-
-#define AR5K_EEPROM_READ(_o, _v) do {					\
-	if ((ret = ath5k_hw_eeprom_read(ah, (_o), &(_v))) != 0)	\
-		return (ret);						\
-} while (0)
-
-#define AR5K_EEPROM_READ_HDR(_o, _v)					\
-	AR5K_EEPROM_READ(_o, ah->ah_capabilities.cap_eeprom._v);	\
-
-/* Read status of selected queue */
-#define AR5K_REG_READ_Q(ah, _reg, _queue)				\
-	(ath5k_hw_reg_read(ah, _reg) & (1 << _queue))			\
-
-#define AR5K_REG_WRITE_Q(ah, _reg, _queue)				\
-	ath5k_hw_reg_write(ah, (1 << _queue), _reg)
-
-#define AR5K_Q_ENABLE_BITS(_reg, _queue) do {				\
-	_reg |= 1 << _queue;						\
-} while (0)
-
-#define AR5K_Q_DISABLE_BITS(_reg, _queue) do {				\
-	_reg &= ~(1 << _queue);						\
-} while (0)
-
-#define AR5K_LOW_ID(_a)(				\
-(_a)[0] | (_a)[1] << 8 | (_a)[2] << 16 | (_a)[3] << 24	\
-)
-
-#define AR5K_HIGH_ID(_a)	((_a)[4] | (_a)[5] << 8)
-
-/*
- * Initial register values
- */
-
-/*
- * Common initial register values
- */
-#define AR5K_INIT_MODE				CHANNEL_B
-
-#define AR5K_INIT_TX_LATENCY			502
-#define AR5K_INIT_USEC				39
-#define AR5K_INIT_USEC_TURBO			79
-#define AR5K_INIT_USEC_32			31
-#define AR5K_INIT_CARR_SENSE_EN			1
-#define AR5K_INIT_PROG_IFS			920
-#define AR5K_INIT_PROG_IFS_TURBO		960
-#define AR5K_INIT_EIFS				3440
-#define AR5K_INIT_EIFS_TURBO			6880
-#define AR5K_INIT_SLOT_TIME			396
-#define AR5K_INIT_SLOT_TIME_TURBO		480
-#define AR5K_INIT_ACK_CTS_TIMEOUT		1024
-#define AR5K_INIT_ACK_CTS_TIMEOUT_TURBO		0x08000800
-#define AR5K_INIT_SIFS				560
-#define AR5K_INIT_SIFS_TURBO			480
-#define AR5K_INIT_SH_RETRY			10
-#define AR5K_INIT_LG_RETRY			AR5K_INIT_SH_RETRY
-#define AR5K_INIT_SSH_RETRY			32
-#define AR5K_INIT_SLG_RETRY			AR5K_INIT_SSH_RETRY
-#define AR5K_INIT_TX_RETRY			10
-#define AR5K_INIT_TOPS				8
-#define AR5K_INIT_RXNOFRM			8
-#define AR5K_INIT_RPGTO				0
-#define AR5K_INIT_TXNOFRM			0
-#define AR5K_INIT_BEACON_PERIOD			65535
-#define AR5K_INIT_TIM_OFFSET			0
-#define AR5K_INIT_BEACON_EN			0
-#define AR5K_INIT_RESET_TSF			0
-
-#define AR5K_INIT_TRANSMIT_LATENCY		(			\
-	(AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) |	\
-	(AR5K_INIT_USEC)						\
-)
-#define AR5K_INIT_TRANSMIT_LATENCY_TURBO	(			\
-	(AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) |	\
-	(AR5K_INIT_USEC_TURBO)						\
-)
-#define AR5K_INIT_PROTO_TIME_CNTRL		(			\
-	(AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS << 12) |	\
-	(AR5K_INIT_PROG_IFS)						\
-)
-#define AR5K_INIT_PROTO_TIME_CNTRL_TURBO	(			\
-	(AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS_TURBO << 12) | \
-	(AR5K_INIT_PROG_IFS_TURBO)					\
-)
-#define AR5K_INIT_BEACON_CONTROL		(			\
-	(AR5K_INIT_RESET_TSF << 24) | (AR5K_INIT_BEACON_EN << 23) |	\
-	(AR5K_INIT_TIM_OFFSET << 16) | (AR5K_INIT_BEACON_PERIOD)	\
-)
-
-/*
- * Non-common initial register values which have to be loaded into the
- * card at boot time and after each reset.
- */
-
-/* Register dumps are done per operation mode */
-#define AR5K_INI_RFGAIN_5GHZ		0
-#define AR5K_INI_RFGAIN_2GHZ		1
-
-#define AR5K_INI_VAL_11A		0
-#define AR5K_INI_VAL_11A_TURBO		1
-#define AR5K_INI_VAL_11B		2
-#define AR5K_INI_VAL_11G		3
-#define AR5K_INI_VAL_11G_TURBO		4
-#define AR5K_INI_VAL_XR			0
-#define AR5K_INI_VAL_MAX		5
-
-#define AR5K_RF5111_INI_RF_MAX_BANKS	AR5K_MAX_RF_BANKS
-#define AR5K_RF5112_INI_RF_MAX_BANKS	AR5K_MAX_RF_BANKS
-
-static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
-{
-	u32 retval = 0, bit, i;
-
-	for (i = 0; i < bits; i++) {
-		bit = (val >> i) & 1;
-		retval = (retval << 1) | bit;
-	}
-
-	return retval;
-}
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index 2806b21..ea2e1a2 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -1,9 +1,9 @@
 /*
  * Initial register settings functions
  *
- * Copyright (c) 2004, 2005, 2006, 2007 Reyk Floeter <reyk@openbsd.org>
- * Copyright (c) 2006, 2007 Nick Kossifidis <mickflemm@gmail.com>
- * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
+ * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -20,13 +20,9 @@
  */
 
 #include "ath5k.h"
-#include "base.h"
 #include "reg.h"
-
-/*
- * MAC/PHY REGISTERS
- */
-
+#include "debug.h"
+#include "base.h"
 
 /*
  * Mode-independent initial register writes
@@ -65,10 +61,10 @@
 	{ AR5K_TXCFG,		AR5K_DMASIZE_128B },
 	{ AR5K_RXCFG,		AR5K_DMASIZE_128B },
 	{ AR5K_CFG,		AR5K_INIT_CFG },
-	{ AR5K_TOPS,		AR5K_INIT_TOPS },
-	{ AR5K_RXNOFRM,		AR5K_INIT_RXNOFRM },
-	{ AR5K_RPGTO,		AR5K_INIT_RPGTO },
-	{ AR5K_TXNOFRM,		AR5K_INIT_TXNOFRM },
+	{ AR5K_TOPS,		8 },
+	{ AR5K_RXNOFRM,		8 },
+	{ AR5K_RPGTO,		0 },
+	{ AR5K_TXNOFRM,		0 },
 	{ AR5K_SFR,		0 },
 	{ AR5K_MIBC,		0 },
 	{ AR5K_MISC,		0 },
diff --git a/drivers/net/wireless/ath5k/pcu.c b/drivers/net/wireless/ath5k/pcu.c
new file mode 100644
index 0000000..a47df9a
--- /dev/null
+++ b/drivers/net/wireless/ath5k/pcu.c
@@ -0,0 +1,1014 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2007-2008 Matthew W. S. Bell  <mentor@madwifi.org>
+ * Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
+ * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
+ * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*********************************\
+* Protocol Control Unit Functions *
+\*********************************/
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+#include "base.h"
+
+/*******************\
+* Generic functions *
+\*******************/
+
+/**
+ * ath5k_hw_set_opmode - Set PCU operating mode
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * Initialize PCU for the various operating modes (AP/STA etc)
+ *
+ * NOTE: ah->ah_op_mode must be set before calling this.
+ */
+int ath5k_hw_set_opmode(struct ath5k_hw *ah)
+{
+	u32 pcu_reg, beacon_reg, low_id, high_id;
+
+	pcu_reg = 0;
+	beacon_reg = 0;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	switch (ah->ah_op_mode) {
+	case NL80211_IFTYPE_ADHOC:
+		pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_DESC_ANTENNA |
+			(ah->ah_version == AR5K_AR5210 ?
+				AR5K_STA_ID1_NO_PSPOLL : 0);
+		beacon_reg |= AR5K_BCR_ADHOC;
+		break;
+
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_MESH_POINT:
+		pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_RTS_DEF_ANTENNA |
+			(ah->ah_version == AR5K_AR5210 ?
+				AR5K_STA_ID1_NO_PSPOLL : 0);
+		beacon_reg |= AR5K_BCR_AP;
+		break;
+
+	case NL80211_IFTYPE_STATION:
+		pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA |
+			(ah->ah_version == AR5K_AR5210 ?
+				AR5K_STA_ID1_PWR_SV : 0);
+	case NL80211_IFTYPE_MONITOR:
+		pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA |
+			(ah->ah_version == AR5K_AR5210 ?
+				AR5K_STA_ID1_NO_PSPOLL : 0);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * Set PCU registers
+	 */
+	low_id = AR5K_LOW_ID(ah->ah_sta_id);
+	high_id = AR5K_HIGH_ID(ah->ah_sta_id);
+	ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
+	ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
+
+	/*
+	 * Set Beacon Control Register on 5210
+	 */
+	if (ah->ah_version == AR5K_AR5210)
+		ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
+
+	return 0;
+}
+
+/**
+ * ath5k_hw_update - Update mib counters (mac layer statistics)
+ *
+ * @ah: The &struct ath5k_hw
+ * @stats: The &struct ieee80211_low_level_stats we use to track
+ * statistics on the driver
+ *
+ * Reads MIB counters from PCU and updates sw statistics. Must be
+ * called after a MIB interrupt.
+ */
+void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
+		struct ieee80211_low_level_stats  *stats)
+{
+	ATH5K_TRACE(ah->ah_sc);
+
+	/* Read-And-Clear */
+	stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
+	stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
+	stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
+	stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
+
+	/* XXX: Should we use this to track beacon count ?
+	 * -we read it anyway to clear the register */
+	ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
+
+	/* Reset profile count registers on 5212*/
+	if (ah->ah_version == AR5K_AR5212) {
+		ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
+		ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
+		ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
+		ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
+	}
+}
+
+/**
+ * ath5k_hw_set_ack_bitrate - set bitrate for ACKs
+ *
+ * @ah: The &struct ath5k_hw
+ * @high: Flag to determine if we want to use high transmition rate
+ * for ACKs or not
+ *
+ * If high flag is set, we tell hw to use a set of control rates based on
+ * the current transmition rate (check out control_rates array inside reset.c).
+ * If not hw just uses the lowest rate available for the current modulation
+ * scheme being used (1Mbit for CCK and 6Mbits for OFDM).
+ */
+void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
+{
+	if (ah->ah_version != AR5K_AR5212)
+		return;
+	else {
+		u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
+		if (high)
+			AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
+		else
+			AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
+	}
+}
+
+
+/******************\
+* ACK/CTS Timeouts *
+\******************/
+
+/**
+ * ath5k_hw_het_ack_timeout - Get ACK timeout from PCU in usec
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
+{
+	ATH5K_TRACE(ah->ah_sc);
+
+	return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
+			AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo);
+}
+
+/**
+ * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
+ *
+ * @ah: The &struct ath5k_hw
+ * @timeout: Timeout in usec
+ */
+int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK),
+			ah->ah_turbo) <= timeout)
+		return -EINVAL;
+
+	AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
+		ath5k_hw_htoclock(timeout, ah->ah_turbo));
+
+	return 0;
+}
+
+/**
+ * ath5k_hw_get_cts_timeout - Get CTS timeout from PCU in usec
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
+			AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo);
+}
+
+/**
+ * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
+ *
+ * @ah: The &struct ath5k_hw
+ * @timeout: Timeout in usec
+ */
+int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS),
+			ah->ah_turbo) <= timeout)
+		return -EINVAL;
+
+	AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
+			ath5k_hw_htoclock(timeout, ah->ah_turbo));
+
+	return 0;
+}
+
+
+/****************\
+* BSSID handling *
+\****************/
+
+/**
+ * ath5k_hw_get_lladdr - Get station id
+ *
+ * @ah: The &struct ath5k_hw
+ * @mac: The card's mac address
+ *
+ * Initialize ah->ah_sta_id using the mac address provided
+ * (just a memcpy).
+ *
+ * TODO: Remove it once we merge ath5k_softc and ath5k_hw
+ */
+void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	memcpy(mac, ah->ah_sta_id, ETH_ALEN);
+}
+
+/**
+ * ath5k_hw_set_lladdr - Set station id
+ *
+ * @ah: The &struct ath5k_hw
+ * @mac: The card's mac address
+ *
+ * Set station id on hw using the provided mac address
+ */
+int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
+{
+	u32 low_id, high_id;
+
+	ATH5K_TRACE(ah->ah_sc);
+	/* Set new station ID */
+	memcpy(ah->ah_sta_id, mac, ETH_ALEN);
+
+	low_id = AR5K_LOW_ID(mac);
+	high_id = AR5K_HIGH_ID(mac);
+
+	ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
+	ath5k_hw_reg_write(ah, high_id, AR5K_STA_ID1);
+
+	return 0;
+}
+
+/**
+ * ath5k_hw_set_associd - Set BSSID for association
+ *
+ * @ah: The &struct ath5k_hw
+ * @bssid: BSSID
+ * @assoc_id: Assoc id
+ *
+ * Sets the BSSID which trigers the "SME Join" operation
+ */
+void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id)
+{
+	u32 low_id, high_id;
+	u16 tim_offset = 0;
+
+	/*
+	 * Set simple BSSID mask on 5212
+	 */
+	if (ah->ah_version == AR5K_AR5212) {
+		ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM0);
+		ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM1);
+	}
+
+	/*
+	 * Set BSSID which triggers the "SME Join" operation
+	 */
+	low_id = AR5K_LOW_ID(bssid);
+	high_id = AR5K_HIGH_ID(bssid);
+	ath5k_hw_reg_write(ah, low_id, AR5K_BSS_ID0);
+	ath5k_hw_reg_write(ah, high_id | ((assoc_id & 0x3fff) <<
+				AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1);
+
+	if (assoc_id == 0) {
+		ath5k_hw_disable_pspoll(ah);
+		return;
+	}
+
+	AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
+			tim_offset ? tim_offset + 4 : 0);
+
+	ath5k_hw_enable_pspoll(ah, NULL, 0);
+}
+
+/**
+ * ath5k_hw_set_bssid_mask - filter out bssids we listen
+ *
+ * @ah: the &struct ath5k_hw
+ * @mask: the bssid_mask, a u8 array of size ETH_ALEN
+ *
+ * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
+ * which bits of the interface's MAC address should be looked at when trying
+ * to decide which packets to ACK. In station mode and AP mode with a single
+ * BSS every bit matters since we lock to only one BSS. In AP mode with
+ * multiple BSSes (virtual interfaces) not every bit matters because hw must
+ * accept frames for all BSSes and so we tweak some bits of our mac address
+ * in order to have multiple BSSes.
+ *
+ * NOTE: This is a simple filter and does *not* filter out all
+ * relevant frames. Some frames that are not for us might get ACKed from us
+ * by PCU because they just match the mask.
+ *
+ * When handling multiple BSSes you can get the BSSID mask by computing the
+ * set of  ~ ( MAC XOR BSSID ) for all bssids we handle.
+ *
+ * When you do this you are essentially computing the common bits of all your
+ * BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with
+ * the MAC address to obtain the relevant bits and compare the result with
+ * (frame's BSSID & mask) to see if they match.
+ */
+/*
+ * Simple example: on your card you have have two BSSes you have created with
+ * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
+ * There is another BSSID-03 but you are not part of it. For simplicity's sake,
+ * assuming only 4 bits for a mac address and for BSSIDs you can then have:
+ *
+ *                  \
+ * MAC:                0001 |
+ * BSSID-01:   0100 | --> Belongs to us
+ * BSSID-02:   1001 |
+ *                  /
+ * -------------------
+ * BSSID-03:   0110  | --> External
+ * -------------------
+ *
+ * Our bssid_mask would then be:
+ *
+ *             On loop iteration for BSSID-01:
+ *             ~(0001 ^ 0100)  -> ~(0101)
+ *                             ->   1010
+ *             bssid_mask      =    1010
+ *
+ *             On loop iteration for BSSID-02:
+ *             bssid_mask &= ~(0001   ^   1001)
+ *             bssid_mask =   (1010)  & ~(0001 ^ 1001)
+ *             bssid_mask =   (1010)  & ~(1001)
+ *             bssid_mask =   (1010)  &  (0110)
+ *             bssid_mask =   0010
+ *
+ * A bssid_mask of 0010 means "only pay attention to the second least
+ * significant bit". This is because its the only bit common
+ * amongst the MAC and all BSSIDs we support. To findout what the real
+ * common bit is we can simply "&" the bssid_mask now with any BSSID we have
+ * or our MAC address (we assume the hardware uses the MAC address).
+ *
+ * Now, suppose there's an incoming frame for BSSID-03:
+ *
+ * IFRAME-01:  0110
+ *
+ * An easy eye-inspeciton of this already should tell you that this frame
+ * will not pass our check. This is beacuse the bssid_mask tells the
+ * hardware to only look at the second least significant bit and the
+ * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
+ * as 1, which does not match 0.
+ *
+ * So with IFRAME-01 we *assume* the hardware will do:
+ *
+ *     allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
+ *  --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
+ *  --> allow = (0010) == 0000 ? 1 : 0;
+ *  --> allow = 0
+ *
+ *  Lets now test a frame that should work:
+ *
+ * IFRAME-02:  0001 (we should allow)
+ *
+ *     allow = (0001 & 1010) == 1010
+ *
+ *     allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
+ *  --> allow = (0001 & 0010) ==  (0010 & 0001) ? 1 :0;
+ *  --> allow = (0010) == (0010)
+ *  --> allow = 1
+ *
+ * Other examples:
+ *
+ * IFRAME-03:  0100 --> allowed
+ * IFRAME-04:  1001 --> allowed
+ * IFRAME-05:  1101 --> allowed but its not for us!!!
+ *
+ */
+int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
+{
+	u32 low_id, high_id;
+	ATH5K_TRACE(ah->ah_sc);
+
+	if (ah->ah_version == AR5K_AR5212) {
+		low_id = AR5K_LOW_ID(mask);
+		high_id = AR5K_HIGH_ID(mask);
+
+		ath5k_hw_reg_write(ah, low_id, AR5K_BSS_IDM0);
+		ath5k_hw_reg_write(ah, high_id, AR5K_BSS_IDM1);
+
+		return 0;
+	}
+
+	return -EIO;
+}
+
+
+/************\
+* RX Control *
+\************/
+
+/**
+ * ath5k_hw_start_rx_pcu - Start RX engine
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * Starts RX engine on PCU so that hw can process RXed frames
+ * (ACK etc).
+ *
+ * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
+ * TODO: Init ANI here
+ */
+void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
+}
+
+/**
+ * at5k_hw_stop_rx_pcu - Stop RX engine
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * Stops RX engine on PCU
+ *
+ * TODO: Detach ANI here
+ */
+void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
+}
+
+/*
+ * Set multicast filter
+ */
+void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	/* Set the multicat filter */
+	ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
+	ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
+}
+
+/*
+ * Set multicast filter by index
+ */
+int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
+{
+
+	ATH5K_TRACE(ah->ah_sc);
+	if (index >= 64)
+		return -EINVAL;
+	else if (index >= 32)
+		AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
+				(1 << (index - 32)));
+	else
+		AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
+
+	return 0;
+}
+
+/*
+ * Clear Multicast filter by index
+ */
+int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
+{
+
+	ATH5K_TRACE(ah->ah_sc);
+	if (index >= 64)
+		return -EINVAL;
+	else if (index >= 32)
+		AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
+				(1 << (index - 32)));
+	else
+		AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
+
+	return 0;
+}
+
+/**
+ * ath5k_hw_get_rx_filter - Get current rx filter
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * Returns the RX filter by reading rx filter and
+ * phy error filter registers. RX filter is used
+ * to set the allowed frame types that PCU will accept
+ * and pass to the driver. For a list of frame types
+ * check out reg.h.
+ */
+u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
+{
+	u32 data, filter = 0;
+
+	ATH5K_TRACE(ah->ah_sc);
+	filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
+
+	/*Radar detection for 5212*/
+	if (ah->ah_version == AR5K_AR5212) {
+		data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL);
+
+		if (data & AR5K_PHY_ERR_FIL_RADAR)
+			filter |= AR5K_RX_FILTER_RADARERR;
+		if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK))
+			filter |= AR5K_RX_FILTER_PHYERR;
+	}
+
+	return filter;
+}
+
+/**
+ * ath5k_hw_set_rx_filter - Set rx filter
+ *
+ * @ah: The &struct ath5k_hw
+ * @filter: RX filter mask (see reg.h)
+ *
+ * Sets RX filter register and also handles PHY error filter
+ * register on 5212 and newer chips so that we have proper PHY
+ * error reporting.
+ */
+void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
+{
+	u32 data = 0;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	/* Set PHY error filter register on 5212*/
+	if (ah->ah_version == AR5K_AR5212) {
+		if (filter & AR5K_RX_FILTER_RADARERR)
+			data |= AR5K_PHY_ERR_FIL_RADAR;
+		if (filter & AR5K_RX_FILTER_PHYERR)
+			data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK;
+	}
+
+	/*
+	 * The AR5210 uses promiscous mode to detect radar activity
+	 */
+	if (ah->ah_version == AR5K_AR5210 &&
+			(filter & AR5K_RX_FILTER_RADARERR)) {
+		filter &= ~AR5K_RX_FILTER_RADARERR;
+		filter |= AR5K_RX_FILTER_PROM;
+	}
+
+	/*Zero length DMA*/
+	if (data)
+		AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
+	else
+		AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
+
+	/*Write RX Filter register*/
+	ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER);
+
+	/*Write PHY error filter register on 5212*/
+	if (ah->ah_version == AR5K_AR5212)
+		ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL);
+
+}
+
+
+/****************\
+* Beacon control *
+\****************/
+
+/**
+ * ath5k_hw_get_tsf32 - Get a 32bit TSF
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * Returns lower 32 bits of current TSF
+ */
+u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
+}
+
+/**
+ * ath5k_hw_get_tsf64 - Get the full 64bit TSF
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * Returns the current TSF
+ */
+u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
+{
+	u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+	ATH5K_TRACE(ah->ah_sc);
+
+	return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32);
+}
+
+/**
+ * ath5k_hw_reset_tsf - Force a TSF reset
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * Forces a TSF reset on PCU
+ */
+void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
+{
+	u32 val;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF;
+
+	/*
+	 * Each write to the RESET_TSF bit toggles a hardware internal
+	 * signal to reset TSF, but if left high it will cause a TSF reset
+	 * on the next chip reset as well.  Thus we always write the value
+	 * twice to clear the signal.
+	 */
+	ath5k_hw_reg_write(ah, val, AR5K_BEACON);
+	ath5k_hw_reg_write(ah, val, AR5K_BEACON);
+}
+
+/*
+ * Initialize beacon timers
+ */
+void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
+{
+	u32 timer1, timer2, timer3;
+
+	ATH5K_TRACE(ah->ah_sc);
+	/*
+	 * Set the additional timers by mode
+	 */
+	switch (ah->ah_op_mode) {
+	case NL80211_IFTYPE_STATION:
+		if (ah->ah_version == AR5K_AR5210) {
+			timer1 = 0xffffffff;
+			timer2 = 0xffffffff;
+		} else {
+			timer1 = 0x0000ffff;
+			timer2 = 0x0007ffff;
+		}
+		break;
+
+	default:
+		timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3;
+		timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3;
+	}
+
+	timer3 = next_beacon + (ah->ah_atim_window ? ah->ah_atim_window : 1);
+
+	/*
+	 * Set the beacon register and enable all timers.
+	 * (next beacon, DMA beacon, software beacon, ATIM window time)
+	 */
+	ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
+	ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
+	ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
+	ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
+
+	ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD |
+			AR5K_BEACON_RESET_TSF | AR5K_BEACON_ENABLE),
+		AR5K_BEACON);
+}
+
+#if 0
+/*
+ * Set beacon timers
+ */
+int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
+		const struct ath5k_beacon_state *state)
+{
+	u32 cfp_period, next_cfp, dtim, interval, next_beacon;
+
+	/*
+	 * TODO: should be changed through *state
+	 * review struct ath5k_beacon_state struct
+	 *
+	 * XXX: These are used for cfp period bellow, are they
+	 * ok ? Is it O.K. for tsf here to be 0 or should we use
+	 * get_tsf ?
+	 */
+	u32 dtim_count = 0; /* XXX */
+	u32 cfp_count = 0; /* XXX */
+	u32 tsf = 0; /* XXX */
+
+	ATH5K_TRACE(ah->ah_sc);
+	/* Return on an invalid beacon state */
+	if (state->bs_interval < 1)
+		return -EINVAL;
+
+	interval = state->bs_interval;
+	dtim = state->bs_dtim_period;
+
+	/*
+	 * PCF support?
+	 */
+	if (state->bs_cfp_period > 0) {
+		/*
+		 * Enable PCF mode and set the CFP
+		 * (Contention Free Period) and timer registers
+		 */
+		cfp_period = state->bs_cfp_period * state->bs_dtim_period *
+			state->bs_interval;
+		next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
+			state->bs_interval;
+
+		AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
+				AR5K_STA_ID1_DEFAULT_ANTENNA |
+				AR5K_STA_ID1_PCF);
+		ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
+		ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
+				AR5K_CFP_DUR);
+		ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
+						next_cfp)) << 3, AR5K_TIMER2);
+	} else {
+		/* Disable PCF mode */
+		AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
+				AR5K_STA_ID1_DEFAULT_ANTENNA |
+				AR5K_STA_ID1_PCF);
+	}
+
+	/*
+	 * Enable the beacon timer register
+	 */
+	ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
+
+	/*
+	 * Start the beacon timers
+	 */
+	ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &
+		~(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
+		AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
+		AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
+		AR5K_BEACON_PERIOD), AR5K_BEACON);
+
+	/*
+	 * Write new beacon miss threshold, if it appears to be valid
+	 * XXX: Figure out right values for min <= bs_bmiss_threshold <= max
+	 * and return if its not in range. We can test this by reading value and
+	 * setting value to a largest value and seeing which values register.
+	 */
+
+	AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
+			state->bs_bmiss_threshold);
+
+	/*
+	 * Set sleep control register
+	 * XXX: Didn't find this in 5210 code but since this register
+	 * exists also in ar5k's 5210 headers i leave it as common code.
+	 */
+	AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
+			(state->bs_sleep_duration - 3) << 3);
+
+	/*
+	 * Set enhanced sleep registers on 5212
+	 */
+	if (ah->ah_version == AR5K_AR5212) {
+		if (state->bs_sleep_duration > state->bs_interval &&
+				roundup(state->bs_sleep_duration, interval) ==
+				state->bs_sleep_duration)
+			interval = state->bs_sleep_duration;
+
+		if (state->bs_sleep_duration > dtim && (dtim == 0 ||
+				roundup(state->bs_sleep_duration, dtim) ==
+				state->bs_sleep_duration))
+			dtim = state->bs_sleep_duration;
+
+		if (interval > dtim)
+			return -EINVAL;
+
+		next_beacon = interval == dtim ? state->bs_next_dtim :
+			state->bs_next_beacon;
+
+		ath5k_hw_reg_write(ah,
+			AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
+			AR5K_SLEEP0_NEXT_DTIM) |
+			AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
+			AR5K_SLEEP0_ENH_SLEEP_EN |
+			AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
+
+		ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
+			AR5K_SLEEP1_NEXT_TIM) |
+			AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
+
+		ath5k_hw_reg_write(ah,
+			AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
+			AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
+	}
+
+	return 0;
+}
+
+/*
+ * Reset beacon timers
+ */
+void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	/*
+	 * Disable beacon timer
+	 */
+	ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
+
+	/*
+	 * Disable some beacon register values
+	 */
+	AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
+			AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
+	ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
+}
+
+/*
+ * Wait for beacon queue to finish
+ */
+int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
+{
+	unsigned int i;
+	int ret;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	/* 5210 doesn't have QCU*/
+	if (ah->ah_version == AR5K_AR5210) {
+		/*
+		 * Wait for beaconn queue to finish by checking
+		 * Control Register and Beacon Status Register.
+		 */
+		for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
+			if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
+					||
+			    !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
+				break;
+			udelay(10);
+		}
+
+		/* Timeout... */
+		if (i <= 0) {
+			/*
+			 * Re-schedule the beacon queue
+			 */
+			ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
+			ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
+					AR5K_BCR);
+
+			return -EIO;
+		}
+		ret = 0;
+	} else {
+	/*5211/5212*/
+		ret = ath5k_hw_register_timeout(ah,
+			AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
+			AR5K_QCU_STS_FRMPENDCNT, 0, false);
+
+		if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
+			return -EIO;
+	}
+
+	return ret;
+}
+#endif
+
+
+/*********************\
+* Key table functions *
+\*********************/
+
+/*
+ * Reset a key entry on the table
+ */
+int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
+{
+	unsigned int i;
+
+	ATH5K_TRACE(ah->ah_sc);
+	AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
+
+	for (i = 0; i < AR5K_KEYCACHE_SIZE; i++)
+		ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i));
+
+	/*
+	 * Set NULL encryption on AR5212+
+	 *
+	 * Note: AR5K_KEYTABLE_TYPE -> AR5K_KEYTABLE_OFF(entry, 5)
+	 *       AR5K_KEYTABLE_TYPE_NULL -> 0x00000007
+	 *
+	 * Note2: Windows driver (ndiswrapper) sets this to
+	 *        0x00000714 instead of 0x00000007
+	 */
+	if (ah->ah_version > AR5K_AR5211)
+		ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
+				AR5K_KEYTABLE_TYPE(entry));
+
+	return 0;
+}
+
+/*
+ * Check if a table entry is valid
+ */
+int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
+
+	/* Check the validation flag at the end of the entry */
+	return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
+		AR5K_KEYTABLE_VALID;
+}
+
+/*
+ * Set a key entry on the table
+ */
+int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
+		const struct ieee80211_key_conf *key, const u8 *mac)
+{
+	unsigned int i;
+	__le32 key_v[5] = {};
+	u32 keytype;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	/* key->keylen comes in from mac80211 in bytes */
+
+	if (key->keylen > AR5K_KEYTABLE_SIZE / 8)
+		return -EOPNOTSUPP;
+
+	switch (key->keylen) {
+	/* WEP 40-bit   = 40-bit  entered key + 24 bit IV = 64-bit */
+	case 40 / 8:
+		memcpy(&key_v[0], key->key, 5);
+		keytype = AR5K_KEYTABLE_TYPE_40;
+		break;
+
+	/* WEP 104-bit  = 104-bit entered key + 24-bit IV = 128-bit */
+	case 104 / 8:
+		memcpy(&key_v[0], &key->key[0], 6);
+		memcpy(&key_v[2], &key->key[6], 6);
+		memcpy(&key_v[4], &key->key[12], 1);
+		keytype = AR5K_KEYTABLE_TYPE_104;
+		break;
+	/* WEP 128-bit  = 128-bit entered key + 24 bit IV = 152-bit */
+	case 128 / 8:
+		memcpy(&key_v[0], &key->key[0], 6);
+		memcpy(&key_v[2], &key->key[6], 6);
+		memcpy(&key_v[4], &key->key[12], 4);
+		keytype = AR5K_KEYTABLE_TYPE_128;
+		break;
+
+	default:
+		return -EINVAL; /* shouldn't happen */
+	}
+
+	for (i = 0; i < ARRAY_SIZE(key_v); i++)
+		ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
+				AR5K_KEYTABLE_OFF(entry, i));
+
+	ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry));
+
+	return ath5k_hw_set_key_lladdr(ah, entry, mac);
+}
+
+int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
+{
+	u32 low_id, high_id;
+
+	ATH5K_TRACE(ah->ah_sc);
+	 /* Invalid entry (key table overflow) */
+	AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
+
+	/* MAC may be NULL if it's a broadcast key. In this case no need to
+	 * to compute AR5K_LOW_ID and AR5K_HIGH_ID as we already know it. */
+	if (unlikely(mac == NULL)) {
+		low_id = 0xffffffff;
+		high_id = 0xffff | AR5K_KEYTABLE_VALID;
+	} else {
+		low_id = AR5K_LOW_ID(mac);
+		high_id = AR5K_HIGH_ID(mac) | AR5K_KEYTABLE_VALID;
+	}
+
+	ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry));
+	ath5k_hw_reg_write(ah, high_id, AR5K_KEYTABLE_MAC1(entry));
+
+	return 0;
+}
+
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c
index fa0d47f..e43f656 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath5k/phy.c
@@ -1,9 +1,9 @@
 /*
  * PHY functions
  *
- * Copyright (c) 2004, 2005, 2006, 2007 Reyk Floeter <reyk@openbsd.org>
- * Copyright (c) 2006, 2007 Nick Kossifidis <mickflemm@gmail.com>
- * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
+ * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -19,6 +19,8 @@
  *
  */
 
+#define _ATH5K_PHY
+
 #include <linux/delay.h>
 
 #include "ath5k.h"
@@ -2122,7 +2124,7 @@
 	beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
 	ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
 
-	udelay(2300);
+	mdelay(2);
 
 	/*
 	 * Set the channel (with AGC turned off)
@@ -2501,3 +2503,5 @@
 
 	return ath5k_hw_txpower(ah, channel, power);
 }
+
+#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath5k/qcu.c b/drivers/net/wireless/ath5k/qcu.c
new file mode 100644
index 0000000..01bf091
--- /dev/null
+++ b/drivers/net/wireless/ath5k/qcu.c
@@ -0,0 +1,488 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/********************************************\
+Queue Control Unit, DFS Control Unit Functions
+\********************************************/
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+#include "base.h"
+
+/*
+ * Get properties for a transmit queue
+ */
+int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+		struct ath5k_txq_info *queue_info)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
+	return 0;
+}
+
+/*
+ * Set properties for a transmit queue
+ */
+int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+				const struct ath5k_txq_info *queue_info)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
+		return -EIO;
+
+	memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info));
+
+	/*XXX: Is this supported on 5210 ?*/
+	if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA &&
+			((queue_info->tqi_subtype == AR5K_WME_AC_VI) ||
+			(queue_info->tqi_subtype == AR5K_WME_AC_VO))) ||
+			queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD)
+		ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
+
+	return 0;
+}
+
+/*
+ * Initialize a transmit queue
+ */
+int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
+		struct ath5k_txq_info *queue_info)
+{
+	unsigned int queue;
+	int ret;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	/*
+	 * Get queue by type
+	 */
+	/*5210 only has 2 queues*/
+	if (ah->ah_version == AR5K_AR5210) {
+		switch (queue_type) {
+		case AR5K_TX_QUEUE_DATA:
+			queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
+			break;
+		case AR5K_TX_QUEUE_BEACON:
+		case AR5K_TX_QUEUE_CAB:
+			queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else {
+		switch (queue_type) {
+		case AR5K_TX_QUEUE_DATA:
+			for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
+				ah->ah_txq[queue].tqi_type !=
+				AR5K_TX_QUEUE_INACTIVE; queue++) {
+
+				if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
+					return -EINVAL;
+			}
+			break;
+		case AR5K_TX_QUEUE_UAPSD:
+			queue = AR5K_TX_QUEUE_ID_UAPSD;
+			break;
+		case AR5K_TX_QUEUE_BEACON:
+			queue = AR5K_TX_QUEUE_ID_BEACON;
+			break;
+		case AR5K_TX_QUEUE_CAB:
+			queue = AR5K_TX_QUEUE_ID_CAB;
+			break;
+		case AR5K_TX_QUEUE_XR_DATA:
+			if (ah->ah_version != AR5K_AR5212)
+				ATH5K_ERR(ah->ah_sc,
+					"XR data queues only supported in"
+					" 5212!\n");
+			queue = AR5K_TX_QUEUE_ID_XR_DATA;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * Setup internal queue structure
+	 */
+	memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
+	ah->ah_txq[queue].tqi_type = queue_type;
+
+	if (queue_info != NULL) {
+		queue_info->tqi_type = queue_type;
+		ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * We use ah_txq_status to hold a temp value for
+	 * the Secondary interrupt mask registers on 5211+
+	 * check out ath5k_hw_reset_tx_queue
+	 */
+	AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
+
+	return queue;
+}
+
+/*
+ * Get number of pending frames
+ * for a specific queue [5211+]
+ */
+u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+	/* Return if queue is declared inactive */
+	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
+		return false;
+
+	/* XXX: How about AR5K_CFG_TXCNT ? */
+	if (ah->ah_version == AR5K_AR5210)
+		return false;
+
+	return AR5K_QUEUE_STATUS(queue) & AR5K_QCU_STS_FRMPENDCNT;
+}
+
+/*
+ * Set a transmit queue inactive
+ */
+void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
+		return;
+
+	/* This queue will be skipped in further operations */
+	ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
+	/*For SIMR setup*/
+	AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
+}
+
+/*
+ * Set DFS properties for a transmit queue on DCU
+ */
+int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+{
+	u32 cw_min, cw_max, retry_lg, retry_sh;
+	struct ath5k_txq_info *tq = &ah->ah_txq[queue];
+
+	ATH5K_TRACE(ah->ah_sc);
+	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+	tq = &ah->ah_txq[queue];
+
+	if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
+		return 0;
+
+	if (ah->ah_version == AR5K_AR5210) {
+		/* Only handle data queues, others will be ignored */
+		if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
+			return 0;
+
+		/* Set Slot time */
+		ath5k_hw_reg_write(ah, ah->ah_turbo ?
+			AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
+			AR5K_SLOT_TIME);
+		/* Set ACK_CTS timeout */
+		ath5k_hw_reg_write(ah, ah->ah_turbo ?
+			AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
+			AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
+		/* Set Transmit Latency */
+		ath5k_hw_reg_write(ah, ah->ah_turbo ?
+			AR5K_INIT_TRANSMIT_LATENCY_TURBO :
+			AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
+
+		/* Set IFS0 */
+		if (ah->ah_turbo) {
+			 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
+				(ah->ah_aifs + tq->tqi_aifs) *
+				AR5K_INIT_SLOT_TIME_TURBO) <<
+				AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
+				AR5K_IFS0);
+		} else {
+			ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
+				(ah->ah_aifs + tq->tqi_aifs) *
+				AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) |
+				AR5K_INIT_SIFS, AR5K_IFS0);
+		}
+
+		/* Set IFS1 */
+		ath5k_hw_reg_write(ah, ah->ah_turbo ?
+			AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
+			AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
+		/* Set AR5K_PHY_SETTLING */
+		ath5k_hw_reg_write(ah, ah->ah_turbo ?
+			(ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
+			| 0x38 :
+			(ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
+			| 0x1C,
+			AR5K_PHY_SETTLING);
+		/* Set Frame Control Register */
+		ath5k_hw_reg_write(ah, ah->ah_turbo ?
+			(AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
+			AR5K_PHY_TURBO_SHORT | 0x2020) :
+			(AR5K_PHY_FRAME_CTL_INI | 0x1020),
+			AR5K_PHY_FRAME_CTL_5210);
+	}
+
+	/*
+	 * Calculate cwmin/max by channel mode
+	 */
+	cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
+	cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
+	ah->ah_aifs = AR5K_TUNE_AIFS;
+	/*XR is only supported on 5212*/
+	if (IS_CHAN_XR(ah->ah_current_channel) &&
+			ah->ah_version == AR5K_AR5212) {
+		cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
+		cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
+		ah->ah_aifs = AR5K_TUNE_AIFS_XR;
+	/*B mode is not supported on 5210*/
+	} else if (IS_CHAN_B(ah->ah_current_channel) &&
+			ah->ah_version != AR5K_AR5210) {
+		cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
+		cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
+		ah->ah_aifs = AR5K_TUNE_AIFS_11B;
+	}
+
+	cw_min = 1;
+	while (cw_min < ah->ah_cw_min)
+		cw_min = (cw_min << 1) | 1;
+
+	cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
+		((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
+	cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
+		((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
+
+	/*
+	 * Calculate and set retry limits
+	 */
+	if (ah->ah_software_retry) {
+		/* XXX Need to test this */
+		retry_lg = ah->ah_limit_tx_retries;
+		retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
+			AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
+	} else {
+		retry_lg = AR5K_INIT_LG_RETRY;
+		retry_sh = AR5K_INIT_SH_RETRY;
+	}
+
+	/*No QCU/DCU [5210]*/
+	if (ah->ah_version == AR5K_AR5210) {
+		ath5k_hw_reg_write(ah,
+			(cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
+			| AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
+				AR5K_NODCU_RETRY_LMT_SLG_RETRY)
+			| AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
+				AR5K_NODCU_RETRY_LMT_SSH_RETRY)
+			| AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
+			| AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
+			AR5K_NODCU_RETRY_LMT);
+	} else {
+		/*QCU/DCU [5211+]*/
+		ath5k_hw_reg_write(ah,
+			AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
+				AR5K_DCU_RETRY_LMT_SLG_RETRY) |
+			AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
+				AR5K_DCU_RETRY_LMT_SSH_RETRY) |
+			AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
+			AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
+			AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
+
+	/*===Rest is also for QCU/DCU only [5211+]===*/
+
+		/*
+		 * Set initial content window (cw_min/cw_max)
+		 * and arbitrated interframe space (aifs)...
+		 */
+		ath5k_hw_reg_write(ah,
+			AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
+			AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
+			AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs,
+				AR5K_DCU_LCL_IFS_AIFS),
+			AR5K_QUEUE_DFS_LOCAL_IFS(queue));
+
+		/*
+		 * Set misc registers
+		 */
+		ath5k_hw_reg_write(ah, AR5K_QCU_MISC_DCU_EARLY,
+			AR5K_QUEUE_MISC(queue));
+
+		if (tq->tqi_cbr_period) {
+			ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
+				AR5K_QCU_CBRCFG_INTVAL) |
+				AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
+				AR5K_QCU_CBRCFG_ORN_THRES),
+				AR5K_QUEUE_CBRCFG(queue));
+			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+				AR5K_QCU_MISC_FRSHED_CBR);
+			if (tq->tqi_cbr_overflow_limit)
+				AR5K_REG_ENABLE_BITS(ah,
+					AR5K_QUEUE_MISC(queue),
+					AR5K_QCU_MISC_CBR_THRES_ENABLE);
+		}
+
+		if (tq->tqi_ready_time)
+			ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
+				AR5K_QCU_RDYTIMECFG_INTVAL) |
+				AR5K_QCU_RDYTIMECFG_ENABLE,
+				AR5K_QUEUE_RDYTIMECFG(queue));
+
+		if (tq->tqi_burst_time) {
+			ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
+				AR5K_DCU_CHAN_TIME_DUR) |
+				AR5K_DCU_CHAN_TIME_ENABLE,
+				AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
+
+			if (tq->tqi_flags
+			& AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
+				AR5K_REG_ENABLE_BITS(ah,
+					AR5K_QUEUE_MISC(queue),
+					AR5K_QCU_MISC_RDY_VEOL_POLICY);
+		}
+
+		if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
+			ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
+				AR5K_QUEUE_DFS_MISC(queue));
+
+		if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
+			ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
+				AR5K_QUEUE_DFS_MISC(queue));
+
+		/*
+		 * Set registers by queue type
+		 */
+		switch (tq->tqi_type) {
+		case AR5K_TX_QUEUE_BEACON:
+			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+				AR5K_QCU_MISC_FRSHED_DBA_GT |
+				AR5K_QCU_MISC_CBREXP_BCN_DIS |
+				AR5K_QCU_MISC_BCN_ENABLE);
+
+			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+				(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
+				AR5K_DCU_MISC_ARBLOCK_CTL_S) |
+				AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
+				AR5K_DCU_MISC_BCN_ENABLE);
+
+			ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
+				(AR5K_TUNE_SW_BEACON_RESP -
+				AR5K_TUNE_DMA_BEACON_RESP) -
+				AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
+				AR5K_QCU_RDYTIMECFG_ENABLE,
+				AR5K_QUEUE_RDYTIMECFG(queue));
+			break;
+
+		case AR5K_TX_QUEUE_CAB:
+			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+				AR5K_QCU_MISC_FRSHED_DBA_GT |
+				AR5K_QCU_MISC_CBREXP_DIS |
+				AR5K_QCU_MISC_CBREXP_BCN_DIS);
+
+			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+				(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
+				AR5K_DCU_MISC_ARBLOCK_CTL_S));
+			break;
+
+		case AR5K_TX_QUEUE_UAPSD:
+			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+				AR5K_QCU_MISC_CBREXP_DIS);
+			break;
+
+		case AR5K_TX_QUEUE_DATA:
+		default:
+			break;
+		}
+
+		/*
+		 * Enable interrupts for this tx queue
+		 * in the secondary interrupt mask registers
+		 */
+		if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
+			AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
+
+		if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
+			AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
+
+		if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
+			AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
+
+		if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
+			AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
+
+		if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
+			AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
+
+
+		/* Update secondary interrupt mask registers */
+		ah->ah_txq_imr_txok &= ah->ah_txq_status;
+		ah->ah_txq_imr_txerr &= ah->ah_txq_status;
+		ah->ah_txq_imr_txurn &= ah->ah_txq_status;
+		ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
+		ah->ah_txq_imr_txeol &= ah->ah_txq_status;
+
+		ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
+			AR5K_SIMR0_QCU_TXOK) |
+			AR5K_REG_SM(ah->ah_txq_imr_txdesc,
+			AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
+		ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
+			AR5K_SIMR1_QCU_TXERR) |
+			AR5K_REG_SM(ah->ah_txq_imr_txeol,
+			AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
+		ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txurn,
+			AR5K_SIMR2_QCU_TXURN), AR5K_SIMR2);
+	}
+
+	return 0;
+}
+
+/*
+ * Get slot time from DCU
+ */
+unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	if (ah->ah_version == AR5K_AR5210)
+		return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
+				AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
+	else
+		return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
+}
+
+/*
+ * Set slot time on DCU
+ */
+int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
+{
+	ATH5K_TRACE(ah->ah_sc);
+	if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
+		return -EINVAL;
+
+	if (ah->ah_version == AR5K_AR5210)
+		ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
+				ah->ah_turbo), AR5K_SLOT_TIME);
+	else
+		ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
+
+	return 0;
+}
+
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 7562bf1..e557fe1 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 2007 Nick Kossifidis <mickflemm@gmail.com>
- * Copyright (c) 2004, 2005, 2006, 2007 Reyk Floeter <reyk@openbsd.org>
- * Copyright (c) 2007 Michael Taylor <mike.taylor@apprion.com>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2007-2008 Michael Taylor <mike.taylor@apprion.com>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -29,6 +29,10 @@
  *        http://www.it.iitb.ac.in/~janak/wifire/01222734.pdf
  *
  * 5211 - http://www.hotchips.org/archives/hc14/3_Tue/16_mcfarland.pdf
+ *
+ * This file also contains register values found on a memory dump of
+ * Atheros's ART program (Atheros Radio Test), on ath9k, on legacy-hal
+ * released by Atheros and on various debug messages found on the net.
  */
 
 
@@ -295,7 +299,7 @@
 #define AR5K_ISR_RXPHY		0x00004000	/* PHY error */
 #define AR5K_ISR_RXKCM		0x00008000	/* RX Key cache miss */
 #define AR5K_ISR_SWBA		0x00010000	/* Software beacon alert */
-#define AR5K_ISR_BRSSI		0x00020000
+#define AR5K_ISR_BRSSI		0x00020000	/* Beacon rssi below threshold (?) */
 #define AR5K_ISR_BMISS		0x00040000	/* Beacon missed */
 #define AR5K_ISR_HIUERR		0x00080000	/* Host Interface Unit error [5211+] */
 #define AR5K_ISR_BNR		0x00100000 	/* Beacon not ready [5211+] */
@@ -303,46 +307,56 @@
 #define AR5K_ISR_RXCHIRP	0x00200000	/* CHIRP Received [5212+] */
 #define AR5K_ISR_SSERR		0x00200000	/* Signaled System Error [5210] */
 #define AR5K_ISR_DPERR		0x00400000	/* Det par Error (?) [5210] */
-#define AR5K_ISR_TIM		0x00800000	/* [5210] */
-#define AR5K_ISR_BCNMISC	0x00800000	/* [5212+] */
-#define AR5K_ISR_GPIO		0x01000000	/* GPIO (rf kill)*/
-#define AR5K_ISR_QCBRORN	0x02000000	/* CBR overrun (?)  [5211+] */
-#define AR5K_ISR_QCBRURN	0x04000000	/* CBR underrun (?) [5211+] */
-#define AR5K_ISR_QTRIG		0x08000000	/* [5211+] */
+#define AR5K_ISR_RXDOPPLER	0x00400000	/* Doppler chirp received [5212+] */
+#define AR5K_ISR_TIM		0x00800000	/* [5211+] */
+#define AR5K_ISR_BCNMISC	0x00800000	/* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
+						CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
+#define AR5K_ISR_GPIO		0x01000000	/* GPIO (rf kill) */
+#define AR5K_ISR_QCBRORN	0x02000000	/* QCU CBR overrun [5211+] */
+#define AR5K_ISR_QCBRURN	0x04000000	/* QCU CBR underrun [5211+] */
+#define AR5K_ISR_QTRIG		0x08000000	/* QCU scheduling trigger [5211+] */
 
 /*
  * Secondary status registers [5211+] (0 - 4)
  *
- * I guess from the names that these give the status for each
- * queue, that's why only masks are defined here, haven't got
- * any info about them (couldn't find them anywhere in ar5k code).
+ * These give the status for each QCU, only QCUs 0-9 are
+ * represented.
  */
 #define AR5K_SISR0		0x0084			/* Register Address [5211+] */
 #define AR5K_SISR0_QCU_TXOK	0x000003ff	/* Mask for QCU_TXOK */
+#define AR5K_SISR0_QCU_TXOK_S	0
 #define AR5K_SISR0_QCU_TXDESC	0x03ff0000	/* Mask for QCU_TXDESC */
+#define AR5K_SISR0_QCU_TXDESC_S	16
 
 #define AR5K_SISR1		0x0088			/* Register Address [5211+] */
 #define AR5K_SISR1_QCU_TXERR	0x000003ff	/* Mask for QCU_TXERR */
+#define AR5K_SISR1_QCU_TXERR_S	0
 #define AR5K_SISR1_QCU_TXEOL	0x03ff0000	/* Mask for QCU_TXEOL */
+#define AR5K_SISR1_QCU_TXEOL_S	16
 
 #define AR5K_SISR2		0x008c			/* Register Address [5211+] */
 #define AR5K_SISR2_QCU_TXURN	0x000003ff	/* Mask for QCU_TXURN */
+#define	AR5K_SISR2_QCU_TXURN_S	0
 #define	AR5K_SISR2_MCABT	0x00100000	/* Master Cycle Abort */
 #define	AR5K_SISR2_SSERR	0x00200000	/* Signaled System Error */
-#define	AR5K_SISR2_DPERR	0x00400000	/* Det par Error (?) */
+#define	AR5K_SISR2_DPERR	0x00400000	/* Bus parity error */
 #define	AR5K_SISR2_TIM		0x01000000	/* [5212+] */
 #define	AR5K_SISR2_CAB_END	0x02000000	/* [5212+] */
 #define	AR5K_SISR2_DTIM_SYNC	0x04000000	/* DTIM sync lost [5212+] */
 #define	AR5K_SISR2_BCN_TIMEOUT	0x08000000	/* Beacon Timeout [5212+] */
 #define	AR5K_SISR2_CAB_TIMEOUT	0x10000000	/* CAB Timeout [5212+] */
 #define	AR5K_SISR2_DTIM		0x20000000	/* [5212+] */
+#define	AR5K_SISR2_TSFOOR	0x80000000	/* TSF OOR (?) */
 
 #define AR5K_SISR3		0x0090			/* Register Address [5211+] */
 #define AR5K_SISR3_QCBRORN	0x000003ff	/* Mask for QCBRORN */
+#define AR5K_SISR3_QCBORN_S	0
 #define AR5K_SISR3_QCBRURN	0x03ff0000	/* Mask for QCBRURN */
+#define AR5K_SISR3_QCBRURN_S	16
 
 #define AR5K_SISR4		0x0094			/* Register Address [5211+] */
 #define AR5K_SISR4_QTRIG	0x000003ff	/* Mask for QTRIG */
+#define AR5K_SISR4_QTRIG_S	0
 
 /*
  * Shadow read-and-clear interrupt status registers [5211+]
@@ -379,7 +393,7 @@
 #define AR5K_IMR_RXPHY		0x00004000	/* PHY error*/
 #define AR5K_IMR_RXKCM		0x00008000	/* RX Key cache miss */
 #define AR5K_IMR_SWBA		0x00010000	/* Software beacon alert*/
-#define AR5K_IMR_BRSSI		0x00020000
+#define AR5K_IMR_BRSSI		0x00020000	/* Beacon rssi below threshold (?) */
 #define AR5K_IMR_BMISS		0x00040000	/* Beacon missed*/
 #define AR5K_IMR_HIUERR		0x00080000	/* Host Interface Unit error [5211+] */
 #define AR5K_IMR_BNR		0x00100000 	/* Beacon not ready [5211+] */
@@ -387,12 +401,14 @@
 #define AR5K_IMR_RXCHIRP	0x00200000	/* CHIRP Received [5212+]*/
 #define AR5K_IMR_SSERR		0x00200000	/* Signaled System Error [5210] */
 #define AR5K_IMR_DPERR		0x00400000	/* Det par Error (?) [5210] */
+#define AR5K_IMR_RXDOPPLER	0x00400000	/* Doppler chirp received [5212+] */
 #define AR5K_IMR_TIM		0x00800000	/* [5211+] */
-#define AR5K_IMR_BCNMISC	0x00800000	/* [5212+] */
+#define AR5K_IMR_BCNMISC	0x00800000	/* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
+						CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
 #define AR5K_IMR_GPIO		0x01000000	/* GPIO (rf kill)*/
-#define AR5K_IMR_QCBRORN	0x02000000	/* CBR overrun (?) [5211+] */
-#define AR5K_IMR_QCBRURN	0x04000000	/* CBR underrun (?) [5211+] */
-#define AR5K_IMR_QTRIG		0x08000000	/* [5211+] */
+#define AR5K_IMR_QCBRORN	0x02000000	/* QCU CBR overrun (?) [5211+] */
+#define AR5K_IMR_QCBRURN	0x04000000	/* QCU CBR underrun (?) [5211+] */
+#define AR5K_IMR_QTRIG		0x08000000	/* QCU scheduling trigger [5211+] */
 
 /*
  * Secondary interrupt mask registers [5211+] (0 - 4)
@@ -414,13 +430,14 @@
 #define AR5K_SIMR2_QCU_TXURN_S	0
 #define	AR5K_SIMR2_MCABT	0x00100000	/* Master Cycle Abort */
 #define	AR5K_SIMR2_SSERR	0x00200000	/* Signaled System Error */
-#define	AR5K_SIMR2_DPERR	0x00400000	/* Det par Error (?) */
+#define	AR5K_SIMR2_DPERR	0x00400000	/* Bus parity error */
 #define	AR5K_SIMR2_TIM		0x01000000	/* [5212+] */
 #define	AR5K_SIMR2_CAB_END	0x02000000	/* [5212+] */
 #define	AR5K_SIMR2_DTIM_SYNC	0x04000000	/* DTIM Sync lost [5212+] */
 #define	AR5K_SIMR2_BCN_TIMEOUT	0x08000000	/* Beacon Timeout [5212+] */
 #define	AR5K_SIMR2_CAB_TIMEOUT	0x10000000	/* CAB Timeout [5212+] */
 #define	AR5K_SIMR2_DTIM		0x20000000	/* [5212+] */
+#define	AR5K_SIMR2_TSFOOR	0x80000000	/* TSF OOR (?) */
 
 #define AR5K_SIMR3		0x00b0			/* Register Address [5211+] */
 #define AR5K_SIMR3_QCBRORN	0x000003ff	/* Mask for QCBRORN */
@@ -586,15 +603,15 @@
 #define	AR5K_QCU_MISC_FRSHED_M		0x0000000f	/* Frame sheduling mask */
 #define	AR5K_QCU_MISC_FRSHED_ASAP		0	/* ASAP */
 #define	AR5K_QCU_MISC_FRSHED_CBR		1	/* Constant Bit Rate */
-#define	AR5K_QCU_MISC_FRSHED_DBA_GT		2	/* DMA Beacon alert gated (?) */
-#define	AR5K_QCU_MISC_FRSHED_TIM_GT		3	/* Time gated (?) */
-#define	AR5K_QCU_MISC_FRSHED_BCN_SENT_GT	4	/* Beacon sent gated (?) */
+#define	AR5K_QCU_MISC_FRSHED_DBA_GT		2	/* DMA Beacon alert gated */
+#define	AR5K_QCU_MISC_FRSHED_TIM_GT		3	/* TIMT gated */
+#define	AR5K_QCU_MISC_FRSHED_BCN_SENT_GT	4	/* Beacon sent gated */
 #define	AR5K_QCU_MISC_ONESHOT_ENABLE	0x00000010	/* Oneshot enable */
-#define	AR5K_QCU_MISC_CBREXP		0x00000020	/* CBR expired (normal queue) */
-#define	AR5K_QCU_MISC_CBREXP_BCN	0x00000040	/* CBR expired (beacon queue) */
+#define	AR5K_QCU_MISC_CBREXP_DIS	0x00000020	/* Disable CBR expired counter (normal queue) */
+#define	AR5K_QCU_MISC_CBREXP_BCN_DIS	0x00000040	/* Disable CBR expired counter (beacon queue) */
 #define	AR5K_QCU_MISC_BCN_ENABLE	0x00000080	/* Enable Beacon use */
-#define	AR5K_QCU_MISC_CBR_THRES_ENABLE	0x00000100	/* CBR threshold enabled */
-#define	AR5K_QCU_MISC_RDY_VEOL_POLICY	0x00000200	/* TXE reset when RDYTIME enalbed */
+#define	AR5K_QCU_MISC_CBR_THRES_ENABLE	0x00000100	/* CBR expired threshold enabled */
+#define	AR5K_QCU_MISC_RDY_VEOL_POLICY	0x00000200	/* TXE reset when RDYTIME expired or VEOL */
 #define	AR5K_QCU_MISC_CBR_RESET_CNT	0x00000400	/* CBR threshold (counter) reset */
 #define	AR5K_QCU_MISC_DCU_EARLY		0x00000800	/* DCU early termination */
 #define AR5K_QCU_MISC_DCU_CMP_EN	0x00001000	/* Enable frame compression */
@@ -663,6 +680,7 @@
 #define	AR5K_DCU_LCL_IFS_CW_MAX_S	10
 #define	AR5K_DCU_LCL_IFS_AIFS		0x0ff00000	/* Arbitrated Interframe Space */
 #define	AR5K_DCU_LCL_IFS_AIFS_S		20
+#define	AR5K_DCU_LCL_IFS_AIFS_MAX	0xfc		/* Anything above that can cause DCU to hang */
 #define	AR5K_QUEUE_DFS_LOCAL_IFS(_q)	AR5K_QUEUE_REG(AR5K_DCU_LCL_IFS_BASE, _q)
 
 /*
@@ -691,11 +709,7 @@
 /*
  * DCU misc registers [5211+]
  *
- * For some of the registers i couldn't find in the code
- * (only backoff stuff is there realy) i tried to match the
- * names with 802.11e parameters etc, so i guess VIRTCOL here
- * means Virtual Collision and HCFPOLL means Hybrid Coordination
- * factor Poll (CF- Poll). Arbiter lockout control controls the
+ * Note: Arbiter lockout control controls the
  * behaviour on low priority queues when we have multiple queues
  * with pending frames. Intra-frame lockout means we wait until
  * the queue's current frame transmits (with post frame backoff and bursting)
@@ -705,15 +719,20 @@
  * No lockout means there is no special handling.
  */
 #define AR5K_DCU_MISC_BASE		0x1100			/* Register Address -Queue0 DCU_MISC */
-#define	AR5K_DCU_MISC_BACKOFF		0x000007ff	/* Mask for backoff threshold */
+#define	AR5K_DCU_MISC_BACKOFF		0x0000003f	/* Mask for backoff threshold */
+#define	AR5K_DCU_MISC_ETS_RTS_POL	0x00000040	/* End of transmission series
+							station RTS/data failure count
+							reset policy (?) */
+#define AR5K_DCU_MISC_ETS_CW_POL	0x00000080	/* End of transmission series
+							CW reset policy */
+#define	AR5K_DCU_MISC_FRAG_WAIT		0x00000100	/* Wait for next fragment */
 #define AR5K_DCU_MISC_BACKOFF_FRAG	0x00000200	/* Enable backoff while bursting */
 #define	AR5K_DCU_MISC_HCFPOLL_ENABLE	0x00000800	/* CF - Poll enable */
 #define	AR5K_DCU_MISC_BACKOFF_PERSIST	0x00001000	/* Persistent backoff */
 #define	AR5K_DCU_MISC_FRMPRFTCH_ENABLE	0x00002000	/* Enable frame pre-fetch */
 #define	AR5K_DCU_MISC_VIRTCOL		0x0000c000	/* Mask for Virtual Collision (?) */
-#define	AR5K_DCU_MISC_VIRTCOL_NORMAL		0
-#define	AR5K_DCU_MISC_VIRTCOL_MODIFIED		1
-#define	AR5K_DCU_MISC_VIRTCOL_IGNORE		2
+#define	AR5K_DCU_MISC_VIRTCOL_NORMAL	0
+#define	AR5K_DCU_MISC_VIRTCOL_IGNORE	1
 #define	AR5K_DCU_MISC_BCN_ENABLE	0x00010000	/* Enable Beacon use */
 #define	AR5K_DCU_MISC_ARBLOCK_CTL	0x00060000	/* Arbiter lockout control mask */
 #define	AR5K_DCU_MISC_ARBLOCK_CTL_S	17
@@ -768,8 +787,9 @@
 #define	AR5K_DCU_GBL_IFS_MISC_TURBO_MODE	0x00000008	/* Turbo mode */
 #define	AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC	0x000003f0	/* SIFS Duration mask */
 #define	AR5K_DCU_GBL_IFS_MISC_USEC_DUR		0x000ffc00	/* USEC Duration mask */
+#define	AR5K_DCU_GBL_IFS_MISC_USEC_DUR_S	10
 #define	AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY	0x00300000	/* DCU Arbiter delay mask */
-#define AR5K_DCU_GBL_IFS_MISC_SIFS_CNT_RST	0x00400000	/* SIFC cnt reset policy (?) */
+#define AR5K_DCU_GBL_IFS_MISC_SIFS_CNT_RST	0x00400000	/* SIFS cnt reset policy (?) */
 #define AR5K_DCU_GBL_IFS_MISC_AIFS_CNT_RST	0x00800000	/* AIFS cnt reset policy (?) */
 #define AR5K_DCU_GBL_IFS_MISC_RND_LFSR_SL_DIS	0x01000000	/* Disable random LFSR slice */
 
@@ -820,8 +840,6 @@
 #define AR5K_RESET_CTL_MAC	0x00000004	/* MAC reset (PCU+Baseband ?) [5210] */
 #define AR5K_RESET_CTL_PHY	0x00000008	/* PHY reset [5210] */
 #define AR5K_RESET_CTL_PCI	0x00000010	/* PCI Core reset (interrupts etc) */
-#define AR5K_RESET_CTL_CHIP	(AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA |	\
-				AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_PHY)
 
 /*
  * Sleep control register
@@ -833,9 +851,11 @@
 #define AR5K_SLEEP_CTL_SLE_S		16
 #define AR5K_SLEEP_CTL_SLE_WAKE		0x00000000	/* Force chip awake */
 #define AR5K_SLEEP_CTL_SLE_SLP		0x00010000	/* Force chip sleep */
-#define AR5K_SLEEP_CTL_SLE_ALLOW	0x00020000
+#define AR5K_SLEEP_CTL_SLE_ALLOW	0x00020000	/* Normal sleep policy */
 #define AR5K_SLEEP_CTL_SLE_UNITS	0x00000008	/* [5211+] */
-/* more bits */
+#define AR5K_SLEEP_CTL_DUR_TIM_POL	0x00040000	/* Sleep duration timing policy */
+#define AR5K_SLEEP_CTL_DUR_WRITE_POL	0x00080000	/* Sleep duration write policy */
+#define AR5K_SLEEP_CTL_SLE_POL		0x00100000	/* Sleep policy mode */
 
 /*
  * Interrupt pending register
@@ -851,27 +871,28 @@
 
 /*
  * PCI configuration register
+ * TODO: Fix LED stuff
  */
 #define AR5K_PCICFG			0x4010			/* Register Address */
 #define AR5K_PCICFG_EEAE		0x00000001	/* Eeprom access enable [5210] */
-#define	AR5K_PCICFG_SLEEP_CLOCK_EN	0x00000002	/* Enable sleep clock (?) */
+#define AR5K_PCICFG_SLEEP_CLOCK_EN	0x00000002	/* Enable sleep clock */
 #define AR5K_PCICFG_CLKRUNEN		0x00000004	/* CLKRUN enable [5211+] */
 #define AR5K_PCICFG_EESIZE		0x00000018	/* Mask for EEPROM size [5211+] */
 #define AR5K_PCICFG_EESIZE_S		3
 #define AR5K_PCICFG_EESIZE_4K		0		/* 4K */
 #define AR5K_PCICFG_EESIZE_8K		1		/* 8K */
 #define AR5K_PCICFG_EESIZE_16K		2		/* 16K */
-#define AR5K_PCICFG_EESIZE_FAIL		3		/* Failed to get size (?) [5211+] */
+#define AR5K_PCICFG_EESIZE_FAIL		3		/* Failed to get size [5211+] */
 #define AR5K_PCICFG_LED			0x00000060	/* Led status [5211+] */
 #define AR5K_PCICFG_LED_NONE		0x00000000	/* Default [5211+] */
 #define AR5K_PCICFG_LED_PEND		0x00000020	/* Scan / Auth pending */
 #define AR5K_PCICFG_LED_ASSOC		0x00000040	/* Associated */
 #define	AR5K_PCICFG_BUS_SEL		0x00000380	/* Mask for "bus select" [5211+] (?) */
-#define	AR5K_PCICFG_CBEFIX_DIS		0x00000400	/* Disable CBE fix (?) */
-#define AR5K_PCICFG_SL_INTEN		0x00000800	/* Enable interrupts when asleep (?) */
+#define AR5K_PCICFG_CBEFIX_DIS		0x00000400	/* Disable CBE fix */
+#define AR5K_PCICFG_SL_INTEN		0x00000800	/* Enable interrupts when asleep */
 #define AR5K_PCICFG_LED_BCTL		0x00001000	/* Led blink (?) [5210] */
-#define	AR5K_PCICFG_UNK			0x00001000	/* Passed on some parts durring attach (?) */
-#define AR5K_PCICFG_SL_INPEN		0x00002000	/* Sleep even whith pending interrupts (?) */
+#define AR5K_PCICFG_RETRY_FIX		0x00001000	/* Enable pci core retry fix */
+#define AR5K_PCICFG_SL_INPEN		0x00002000	/* Sleep even whith pending interrupts*/
 #define AR5K_PCICFG_SPWR_DN		0x00010000	/* Mask for power status */
 #define AR5K_PCICFG_LEDMODE		0x000e0000	/* Ledmode [5211+] */
 #define AR5K_PCICFG_LEDMODE_PROP	0x00000000	/* Blink on standard traffic [5211+] */
@@ -884,7 +905,8 @@
 #define AR5K_PCICFG_LEDSTATE				\
 	(AR5K_PCICFG_LED | AR5K_PCICFG_LEDMODE |	\
 	AR5K_PCICFG_LEDBLINK | AR5K_PCICFG_LEDSLOW)
-#define	AR5K_PCICFG_SLEEP_CLOCK_RATE	0x03000000	/* Sleep clock rate (field) */
+#define	AR5K_PCICFG_SLEEP_CLOCK_RATE	0x03000000	/* Sleep clock rate */
+#define	AR5K_PCICFG_SLEEP_CLOCK_RATE_S	24
 
 /*
  * "General Purpose Input/Output" (GPIO) control register
@@ -906,8 +928,8 @@
 
 #define AR5K_GPIOCR		0x4014				/* Register Address */
 #define AR5K_GPIOCR_INT_ENA	0x00008000		/* Enable GPIO interrupt */
-#define AR5K_GPIOCR_INT_SELL	0x00000000		/* Generate interrupt when pin is off (?) */
-#define AR5K_GPIOCR_INT_SELH	0x00010000		/* Generate interrupt when pin is on */
+#define AR5K_GPIOCR_INT_SELL	0x00000000		/* Generate interrupt when pin is low */
+#define AR5K_GPIOCR_INT_SELH	0x00010000		/* Generate interrupt when pin is high */
 #define AR5K_GPIOCR_IN(n)	(0 << ((n) * 2))	/* Mode 0 for pin n */
 #define AR5K_GPIOCR_OUT0(n)	(1 << ((n) * 2))	/* Mode 1 for pin n */
 #define AR5K_GPIOCR_OUT1(n)	(2 << ((n) * 2))	/* Mode 2 for pin n */
@@ -925,7 +947,6 @@
 #define AR5K_GPIODI	0x401c
 #define AR5K_GPIODI_M	0x0000002f
 
-
 /*
  * Silicon revision register
  */
@@ -935,7 +956,59 @@
 #define AR5K_SREV_VER		0x000000ff	/* Mask for version */
 #define AR5K_SREV_VER_S		4
 
+/*
+ * TXE write posting register
+ */
+#define	AR5K_TXEPOST	0x4028
 
+/*
+ * QCU sleep mask
+ */
+#define	AR5K_QCU_SLEEP_MASK	0x402c
+
+/* 0x4068 is compression buffer configuration
+ * register on 5414 and pm configuration register
+ * on 5424 and newer pci-e chips. */
+
+/*
+ * Compression buffer configuration
+ * register (enable/disable) [5414]
+ */
+#define AR5K_5414_CBCFG		0x4068
+#define AR5K_5414_CBCFG_BUF_DIS	0x10	/* Disable buffer */
+
+/*
+ * PCI-E Power managment configuration
+ * and status register [5424+]
+ */
+#define	AR5K_PCIE_PM_CTL		0x4068			/* Register address */
+/* Only 5424 */
+#define	AR5K_PCIE_PM_CTL_L1_WHEN_D2	0x00000001	/* enable PCIe core enter L1
+							when d2_sleep_en is asserted */
+#define	AR5K_PCIE_PM_CTL_L0_L0S_CLEAR	0x00000002	/* Clear L0 and L0S counters */
+#define	AR5K_PCIE_PM_CTL_L0_L0S_EN	0x00000004	/* Start L0 nd L0S counters */
+#define	AR5K_PCIE_PM_CTL_LDRESET_EN	0x00000008	/* Enable reset when link goes
+							down */
+/* Wake On Wireless */
+#define	AR5K_PCIE_PM_CTL_PME_EN		0x00000010	/* PME Enable */
+#define	AR5K_PCIE_PM_CTL_AUX_PWR_DET	0x00000020	/* Aux power detect */
+#define	AR5K_PCIE_PM_CTL_PME_CLEAR	0x00000040	/* Clear PME */
+#define	AR5K_PCIE_PM_CTL_PSM_D0		0x00000080
+#define	AR5K_PCIE_PM_CTL_PSM_D1		0x00000100
+#define	AR5K_PCIE_PM_CTL_PSM_D2		0x00000200
+#define	AR5K_PCIE_PM_CTL_PSM_D3		0x00000400
+
+/*
+ * PCI-E Workaround enable register
+ */
+#define	AR5K_PCIE_WAEN	0x407c
+
+/*
+ * PCI-E Serializer/Desirializer
+ * registers
+ */
+#define	AR5K_PCIE_SERDES	0x4080
+#define	AR5K_PCIE_SERDES_RESET	0x4084
 
 /*====EEPROM REGISTERS====*/
 
@@ -977,98 +1050,6 @@
 #define AR5K_EEPROM_BASE	0x6000
 
 /*
- * Common ar5xxx EEPROM data offsets (set these on AR5K_EEPROM_BASE)
- */
-#define AR5K_EEPROM_MAGIC		0x003d	/* EEPROM Magic number */
-#define AR5K_EEPROM_MAGIC_VALUE		0x5aa5	/* Default - found on EEPROM */
-#define AR5K_EEPROM_MAGIC_5212		0x0000145c /* 5212 */
-#define AR5K_EEPROM_MAGIC_5211		0x0000145b /* 5211 */
-#define AR5K_EEPROM_MAGIC_5210		0x0000145a /* 5210 */
-
-#define AR5K_EEPROM_PROTECT		0x003f	/* EEPROM protect status */
-#define AR5K_EEPROM_PROTECT_RD_0_31	0x0001	/* Read protection bit for offsets 0x0 - 0x1f */
-#define AR5K_EEPROM_PROTECT_WR_0_31	0x0002	/* Write protection bit for offsets 0x0 - 0x1f */
-#define AR5K_EEPROM_PROTECT_RD_32_63	0x0004	/* 0x20 - 0x3f */
-#define AR5K_EEPROM_PROTECT_WR_32_63	0x0008
-#define AR5K_EEPROM_PROTECT_RD_64_127	0x0010	/* 0x40 - 0x7f */
-#define AR5K_EEPROM_PROTECT_WR_64_127	0x0020
-#define AR5K_EEPROM_PROTECT_RD_128_191	0x0040	/* 0x80 - 0xbf (regdom) */
-#define AR5K_EEPROM_PROTECT_WR_128_191	0x0080
-#define AR5K_EEPROM_PROTECT_RD_192_207	0x0100	/* 0xc0 - 0xcf */
-#define AR5K_EEPROM_PROTECT_WR_192_207	0x0200
-#define AR5K_EEPROM_PROTECT_RD_208_223	0x0400	/* 0xd0 - 0xdf */
-#define AR5K_EEPROM_PROTECT_WR_208_223	0x0800
-#define AR5K_EEPROM_PROTECT_RD_224_239	0x1000	/* 0xe0 - 0xef */
-#define AR5K_EEPROM_PROTECT_WR_224_239	0x2000
-#define AR5K_EEPROM_PROTECT_RD_240_255	0x4000	/* 0xf0 - 0xff */
-#define AR5K_EEPROM_PROTECT_WR_240_255	0x8000
-#define AR5K_EEPROM_REG_DOMAIN		0x00bf	/* EEPROM regdom */
-#define AR5K_EEPROM_INFO_BASE		0x00c0	/* EEPROM header */
-#define AR5K_EEPROM_INFO_MAX		(0x400 - AR5K_EEPROM_INFO_BASE)
-#define AR5K_EEPROM_INFO_CKSUM		0xffff
-#define AR5K_EEPROM_INFO(_n)		(AR5K_EEPROM_INFO_BASE + (_n))
-
-#define AR5K_EEPROM_VERSION		AR5K_EEPROM_INFO(1)	/* EEPROM Version */
-#define AR5K_EEPROM_VERSION_3_0		0x3000	/* No idea what's going on before this version */
-#define AR5K_EEPROM_VERSION_3_1		0x3001	/* ob/db values for 2Ghz (ar5211_rfregs) */
-#define AR5K_EEPROM_VERSION_3_2		0x3002	/* different frequency representation (eeprom_bin2freq) */
-#define AR5K_EEPROM_VERSION_3_3		0x3003	/* offsets changed, has 32 CTLs (see below) and ee_false_detect (eeprom_read_modes) */
-#define AR5K_EEPROM_VERSION_3_4		0x3004	/* has ee_i_gain ee_cck_ofdm_power_delta (eeprom_read_modes) */
-#define AR5K_EEPROM_VERSION_4_0		0x4000	/* has ee_misc*, ee_cal_pier, ee_turbo_max_power and ee_xr_power (eeprom_init) */
-#define AR5K_EEPROM_VERSION_4_1		0x4001	/* has ee_margin_tx_rx (eeprom_init) */
-#define AR5K_EEPROM_VERSION_4_2		0x4002	/* has ee_cck_ofdm_gain_delta (eeprom_init) */
-#define AR5K_EEPROM_VERSION_4_3		0x4003
-#define AR5K_EEPROM_VERSION_4_4		0x4004
-#define AR5K_EEPROM_VERSION_4_5		0x4005
-#define AR5K_EEPROM_VERSION_4_6		0x4006	/* has ee_scaled_cck_delta */
-#define AR5K_EEPROM_VERSION_4_7		0x4007
-
-#define AR5K_EEPROM_MODE_11A		0
-#define AR5K_EEPROM_MODE_11B		1
-#define AR5K_EEPROM_MODE_11G		2
-
-#define AR5K_EEPROM_HDR			AR5K_EEPROM_INFO(2)	/* Header that contains the device caps */
-#define AR5K_EEPROM_HDR_11A(_v)		(((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
-#define AR5K_EEPROM_HDR_11B(_v)		(((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
-#define AR5K_EEPROM_HDR_11G(_v)		(((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
-#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v)	(((_v) >> 3) & 0x1)	/* Disable turbo for 2Ghz (?) */
-#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v)	(((_v) >> 4) & 0x7f)	/* Max turbo power for a/XR mode (eeprom_init) */
-#define AR5K_EEPROM_HDR_DEVICE(_v)	(((_v) >> 11) & 0x7)
-#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v)	(((_v) >> 15) & 0x1)	/* Disable turbo for 5Ghz (?) */
-#define AR5K_EEPROM_HDR_RFKILL(_v)	(((_v) >> 14) & 0x1)	/* Device has RFKill support */
-
-#define AR5K_EEPROM_RFKILL_GPIO_SEL	0x0000001c
-#define AR5K_EEPROM_RFKILL_GPIO_SEL_S	2
-#define AR5K_EEPROM_RFKILL_POLARITY	0x00000002
-#define AR5K_EEPROM_RFKILL_POLARITY_S	1
-
-/* Newer EEPROMs are using a different offset */
-#define AR5K_EEPROM_OFF(_v, _v3_0, _v3_3) \
-	(((_v) >= AR5K_EEPROM_VERSION_3_3) ? _v3_3 : _v3_0)
-
-#define AR5K_EEPROM_ANT_GAIN(_v)	AR5K_EEPROM_OFF(_v, 0x00c4, 0x00c3)
-#define AR5K_EEPROM_ANT_GAIN_5GHZ(_v)	((int8_t)(((_v) >> 8) & 0xff))
-#define AR5K_EEPROM_ANT_GAIN_2GHZ(_v)	((int8_t)((_v) & 0xff))
-
-/* calibration settings */
-#define AR5K_EEPROM_MODES_11A(_v)	AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
-#define AR5K_EEPROM_MODES_11B(_v)	AR5K_EEPROM_OFF(_v, 0x00d0, 0x00f2)
-#define AR5K_EEPROM_MODES_11G(_v)	AR5K_EEPROM_OFF(_v, 0x00da, 0x010d)
-#define AR5K_EEPROM_CTL(_v)		AR5K_EEPROM_OFF(_v, 0x00e4, 0x0128)	/* Conformance test limits */
-
-/* [3.1 - 3.3] */
-#define AR5K_EEPROM_OBDB0_2GHZ		0x00ec
-#define AR5K_EEPROM_OBDB1_2GHZ		0x00ed
-
-/* Misc values available since EEPROM 4.0 */
-#define AR5K_EEPROM_MISC0		0x00c4
-#define AR5K_EEPROM_EARSTART(_v)	((_v) & 0xfff)
-#define AR5K_EEPROM_EEMAP(_v)		(((_v) >> 14) & 0x3)
-#define AR5K_EEPROM_MISC1		0x00c5
-#define AR5K_EEPROM_TARGET_PWRSTART(_v)	((_v) & 0xfff)
-#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v)	(((_v) >> 14) & 0x1)
-
-/*
  * EEPROM data register
  */
 #define AR5K_EEPROM_DATA_5211	0x6004
@@ -1100,14 +1081,28 @@
  * EEPROM config register
  */
 #define AR5K_EEPROM_CFG			0x6010			/* Register Addres */
-#define AR5K_EEPROM_CFG_SIZE_OVR	0x00000001
+#define AR5K_EEPROM_CFG_SIZE		0x00000003		/* Size determination override */
+#define AR5K_EEPROM_CFG_SIZE_AUTO	0
+#define AR5K_EEPROM_CFG_SIZE_4KBIT	1
+#define AR5K_EEPROM_CFG_SIZE_8KBIT	2
+#define AR5K_EEPROM_CFG_SIZE_16KBIT	3
 #define AR5K_EEPROM_CFG_WR_WAIT_DIS	0x00000004	/* Disable write wait */
 #define AR5K_EEPROM_CFG_CLK_RATE	0x00000018	/* Clock rate */
-#define AR5K_EEPROM_CFG_PROT_KEY	0x00ffff00	/* Protectio key */
+#define AR5K_EEPROM_CFG_CLK_RATE_S		3
+#define AR5K_EEPROM_CFG_CLK_RATE_156KHZ	0
+#define AR5K_EEPROM_CFG_CLK_RATE_312KHZ	1
+#define AR5K_EEPROM_CFG_CLK_RATE_625KHZ	2
+#define AR5K_EEPROM_CFG_PROT_KEY	0x00ffff00      /* Protection key */
+#define AR5K_EEPROM_CFG_PROT_KEY_S	8
 #define AR5K_EEPROM_CFG_LIND_EN		0x01000000	/* Enable length indicator (?) */
 
 
 /*
+ * TODO: Wake On Wireless registers
+ * Range 0x7000 - 0x7ce0
+ */
+
+/*
  * Protocol Control Unit (PCU) registers
  */
 /*
@@ -1139,11 +1134,13 @@
 #define AR5K_STA_ID1_DESC_ANTENNA	0x00400000	/* Update antenna from descriptor */
 #define AR5K_STA_ID1_RTS_DEF_ANTENNA	0x00800000	/* Use default antenna for RTS */
 #define AR5K_STA_ID1_ACKCTS_6MB		0x01000000	/* Use 6Mbit/s for ACK/CTS */
-#define AR5K_STA_ID1_BASE_RATE_11B	0x02000000	/* Use 11b base rate (for ACK/CTS ?) [5211+] */
-#define AR5K_STA_ID1_SELF_GEN_SECTORE	0x04000000	/* Self generate sectore (?) */
+#define AR5K_STA_ID1_BASE_RATE_11B	0x02000000	/* Use 11b base rate for ACK/CTS [5211+] */
+#define AR5K_STA_ID1_SELFGEN_DEF_ANT	0x04000000	/* Use def. antenna for self generated frames */
 #define AR5K_STA_ID1_CRYPT_MIC_EN	0x08000000	/* Enable MIC */
-#define AR5K_STA_ID1_KEYSRCH_MODE	0x10000000	/* Keysearch mode (?) */
+#define AR5K_STA_ID1_KEYSRCH_MODE	0x10000000	/* Look up key when key id != 0 */
 #define AR5K_STA_ID1_PRESERVE_SEQ_NUM	0x20000000	/* Preserve sequence number */
+#define AR5K_STA_ID1_CBCIV_ENDIAN	0x40000000	/* ??? */
+#define AR5K_STA_ID1_KEYSRCH_MCAST	0x80000000	/* Do key cache search for mcast frames */
 
 /*
  * First BSSID register (MAC address, lower 32bits)
@@ -1402,16 +1399,16 @@
 #define AR5K_DIAG_SW_LOOP_BACK_5211	0x00000040
 #define AR5K_DIAG_SW_LOOP_BACK		(ah->ah_version == AR5K_AR5210 ? \
 					AR5K_DIAG_SW_LOOP_BACK_5210 : AR5K_DIAG_SW_LOOP_BACK_5211)
-#define AR5K_DIAG_SW_CORR_FCS_5210	0x00000100
+#define AR5K_DIAG_SW_CORR_FCS_5210	0x00000100	/* Corrupted FCS */
 #define AR5K_DIAG_SW_CORR_FCS_5211	0x00000080
 #define AR5K_DIAG_SW_CORR_FCS		(ah->ah_version == AR5K_AR5210 ? \
 					AR5K_DIAG_SW_CORR_FCS_5210 : AR5K_DIAG_SW_CORR_FCS_5211)
-#define AR5K_DIAG_SW_CHAN_INFO_5210	0x00000200
+#define AR5K_DIAG_SW_CHAN_INFO_5210	0x00000200	/* Dump channel info */
 #define AR5K_DIAG_SW_CHAN_INFO_5211	0x00000100
 #define AR5K_DIAG_SW_CHAN_INFO		(ah->ah_version == AR5K_AR5210 ? \
 					AR5K_DIAG_SW_CHAN_INFO_5210 : AR5K_DIAG_SW_CHAN_INFO_5211)
-#define AR5K_DIAG_SW_EN_SCRAM_SEED_5211	0x00000200	/* Enable scrambler seed */
-#define AR5K_DIAG_SW_EN_SCRAM_SEED_5210	0x00000400
+#define AR5K_DIAG_SW_EN_SCRAM_SEED_5210	0x00000400	/* Enable fixed scrambler seed */
+#define AR5K_DIAG_SW_EN_SCRAM_SEED_5211	0x00000200
 #define AR5K_DIAG_SW_EN_SCRAM_SEED	(ah->ah_version == AR5K_AR5210 ? \
 					AR5K_DIAG_SW_EN_SCRAM_SEED_5210 : AR5K_DIAG_SW_EN_SCRAM_SEED_5211)
 #define AR5K_DIAG_SW_ECO_ENABLE		0x00000400	/* [5211+] */
@@ -1420,12 +1417,15 @@
 #define AR5K_DIAG_SW_SCRAM_SEED_S	10
 #define AR5K_DIAG_SW_DIS_SEQ_INC	0x00040000	/* Disable seqnum increment (?)[5210] */
 #define AR5K_DIAG_SW_FRAME_NV0_5210	0x00080000
-#define AR5K_DIAG_SW_FRAME_NV0_5211	0x00020000
+#define AR5K_DIAG_SW_FRAME_NV0_5211	0x00020000	/* Accept frames of non-zero protocol number */
 #define	AR5K_DIAG_SW_FRAME_NV0		(ah->ah_version == AR5K_AR5210 ? \
 					AR5K_DIAG_SW_FRAME_NV0_5210 : AR5K_DIAG_SW_FRAME_NV0_5211)
-#define AR5K_DIAG_SW_OBSPT_M		0x000c0000
+#define AR5K_DIAG_SW_OBSPT_M		0x000c0000	/* Observation point select (?) */
 #define AR5K_DIAG_SW_OBSPT_S		18
-/* more bits */
+#define AR5K_DIAG_SW_RX_CLEAR_HIGH	0x0010000	/* Force RX Clear high */
+#define AR5K_DIAG_SW_IGNORE_CARR_SENSE	0x0020000	/* Ignore virtual carrier sense */
+#define AR5K_DIAG_SW_CHANEL_IDLE_HIGH	0x0040000	/* Force channel idle high */
+#define AR5K_DIAG_SW_PHEAR_ME		0x0080000	/* ??? */
 
 /*
  * TSF (clock) register (lower 32 bits)
@@ -1636,16 +1636,16 @@
  *
  * XXX: PCDAC steps (0.5dbm) or DBM ?
  *
- * XXX: Mask changes for newer chips to 7f
- *      like tx power table ?
  */
 #define AR5K_TXPC			0x80e8			/* Register Address */
-#define AR5K_TXPC_ACK_M			0x0000003f	/* Mask for ACK tx power */
+#define AR5K_TXPC_ACK_M			0x0000003f	/* ACK tx power */
 #define AR5K_TXPC_ACK_S			0
-#define AR5K_TXPC_CTS_M			0x00003f00	/* Mask for CTS tx power */
+#define AR5K_TXPC_CTS_M			0x00003f00	/* CTS tx power */
 #define AR5K_TXPC_CTS_S			8
-#define AR5K_TXPC_CHIRP_M		0x003f0000	/* Mask for CHIRP tx power */
-#define AR5K_TXPC_CHIRP_S		22
+#define AR5K_TXPC_CHIRP_M		0x003f0000	/* CHIRP tx power */
+#define AR5K_TXPC_CHIRP_S		16
+#define AR5K_TXPC_DOPPLER		0x0f000000	/* Doppler chirp span (?) */
+#define AR5K_TXPC_DOPPLER_S		24
 
 /*
  * Profile count registers
@@ -1656,14 +1656,19 @@
 #define AR5K_PROFCNT_CYCLE		0x80f8	/* Cycle count (?) */
 
 /*
- * Quiet (period) control registers (?)
+ * Quiet period control registers
  */
 #define AR5K_QUIET_CTL1			0x80fc			/* Register Address */
-#define AR5K_QUIET_CTL1_NEXT_QT		0x0000ffff	/* Mask for next quiet (period?) (?) */
-#define AR5K_QUIET_CTL1_QT_EN		0x00010000	/* Enable quiet (period?) */
+#define AR5K_QUIET_CTL1_NEXT_QT_TSF	0x0000ffff	/* Next quiet period TSF (TU) */
+#define AR5K_QUIET_CTL1_NEXT_QT_TSF_S	0
+#define AR5K_QUIET_CTL1_QT_EN		0x00010000	/* Enable quiet period */
+#define AR5K_QUIET_CTL1_ACK_CTS_EN	0x00020000	/* Send ACK/CTS during quiet period */
+
 #define AR5K_QUIET_CTL2			0x8100			/* Register Address */
-#define AR5K_QUIET_CTL2_QT_PER		0x0000ffff	/* Mask for quiet period (?) */
-#define AR5K_QUIET_CTL2_QT_DUR		0xffff0000	/* Mask for quiet duration (?) */
+#define AR5K_QUIET_CTL2_QT_PER		0x0000ffff	/* Mask for quiet period periodicity */
+#define AR5K_QUIET_CTL2_QT_PER_S	0
+#define AR5K_QUIET_CTL2_QT_DUR		0xffff0000	/* Mask for quiet period duration */
+#define AR5K_QUIET_CTL2_QT_DUR_S	16
 
 /*
  * TSF parameter register
@@ -1673,12 +1678,15 @@
 #define AR5K_TSF_PARM_INC_S		0
 
 /*
- * QoS register (?)
+ * QoS NOACK policy
  */
-#define AR5K_QOS			0x8108			/* Register Address */
-#define AR5K_QOS_NOACK_2BIT_VALUES	0x00000000	/* (field) */
-#define AR5K_QOS_NOACK_BIT_OFFSET	0x00000020	/* (field) */
-#define AR5K_QOS_NOACK_BYTE_OFFSET	0x00000080	/* (field) */
+#define AR5K_QOS_NOACK			0x8108			/* Register Address */
+#define AR5K_QOS_NOACK_2BIT_VALUES	0x0000000f	/* ??? */
+#define AR5K_QOS_NOACK_2BIT_VALUES_S	0
+#define AR5K_QOS_NOACK_BIT_OFFSET	0x00000070	/* ??? */
+#define AR5K_QOS_NOACK_BIT_OFFSET_S	4
+#define AR5K_QOS_NOACK_BYTE_OFFSET	0x00000180	/* ??? */
+#define AR5K_QOS_NOACK_BYTE_OFFSET_S	8
 
 /*
  * PHY error filter register
@@ -1702,29 +1710,15 @@
 /*
  * MIC QoS control register (?)
  */
-#define	AR5K_MIC_QOS_CTL	0x8118			/* Register Address */
-#define	AR5K_MIC_QOS_CTL_0	0x00000001	/* MIC QoS control 0 (?) */
-#define	AR5K_MIC_QOS_CTL_1	0x00000004	/* MIC QoS control 1 (?) */
-#define	AR5K_MIC_QOS_CTL_2	0x00000010	/* MIC QoS control 2 (?) */
-#define	AR5K_MIC_QOS_CTL_3	0x00000040	/* MIC QoS control 3 (?) */
-#define	AR5K_MIC_QOS_CTL_4	0x00000100	/* MIC QoS control 4 (?) */
-#define	AR5K_MIC_QOS_CTL_5	0x00000400	/* MIC QoS control 5 (?) */
-#define	AR5K_MIC_QOS_CTL_6	0x00001000	/* MIC QoS control 6 (?) */
-#define	AR5K_MIC_QOS_CTL_7	0x00004000	/* MIC QoS control 7 (?) */
-#define	AR5K_MIC_QOS_CTL_MQ_EN	0x00010000	/* Enable MIC QoS */
+#define	AR5K_MIC_QOS_CTL		0x8118			/* Register Address */
+#define	AR5K_MIC_QOS_CTL_OFF(_n)	(1 << (_n * 2))
+#define	AR5K_MIC_QOS_CTL_MQ_EN		0x00010000	/* Enable MIC QoS */
 
 /*
  * MIC QoS select register (?)
  */
-#define	AR5K_MIC_QOS_SEL	0x811c
-#define	AR5K_MIC_QOS_SEL_0	0x00000001
-#define	AR5K_MIC_QOS_SEL_1	0x00000010
-#define	AR5K_MIC_QOS_SEL_2	0x00000100
-#define	AR5K_MIC_QOS_SEL_3	0x00001000
-#define	AR5K_MIC_QOS_SEL_4	0x00010000
-#define	AR5K_MIC_QOS_SEL_5	0x00100000
-#define	AR5K_MIC_QOS_SEL_6	0x01000000
-#define	AR5K_MIC_QOS_SEL_7	0x10000000
+#define	AR5K_MIC_QOS_SEL		0x811c
+#define	AR5K_MIC_QOS_SEL_OFF(_n)	(1 << (_n * 4))
 
 /*
  * Misc mode control register (?)
@@ -1759,6 +1753,11 @@
 #define	AR5K_TSF_THRES			0x813c
 
 /*
+ * TODO: Wake On Wireless registers
+ * Range: 0x8147 - 0x818c
+ */
+
+/*
  * Rate -> ACK SIFS mapping table (32 entries)
  */
 #define	AR5K_RATE_ACKSIFS_BASE		0x8680			/* Register Address */
@@ -1873,7 +1872,8 @@
  */
 #define	AR5K_PHY_TURBO			0x9804			/* Register Address */
 #define	AR5K_PHY_TURBO_MODE		0x00000001	/* Enable turbo mode */
-#define	AR5K_PHY_TURBO_SHORT		0x00000002	/* Short mode (20Mhz channels) (?) */
+#define	AR5K_PHY_TURBO_SHORT		0x00000002	/* Set short symbols to turbo mode */
+#define	AR5K_PHY_TURBO_MIMO		0x00000004	/* Set turbo for mimo mimo */
 
 /*
  * PHY agility command register
@@ -1883,6 +1883,11 @@
 #define	AR5K_PHY_TST1			0x9808
 #define	AR5K_PHY_AGC_DISABLE		0x08000000	/* Disable AGC to A2 (?)*/
 #define	AR5K_PHY_TST1_TXHOLD		0x00003800	/* Set tx hold (?) */
+#define	AR5K_PHY_TST1_TXSRC_SRC		0x00000002	/* Used with bit 7 (?) */
+#define	AR5K_PHY_TST1_TXSRC_SRC_S	1
+#define	AR5K_PHY_TST1_TXSRC_ALT		0x00000080	/* Set input to tsdac (?) */
+#define	AR5K_PHY_TST1_TXSRC_ALT_S	7
+
 
 /*
  * PHY timing register 3 [5112+]
@@ -1907,15 +1912,23 @@
 
 /*
  * PHY RF control registers
- * (i think these are delay times,
- * these calibration values exist
- * in EEPROM)
  */
 #define AR5K_PHY_RF_CTL2		0x9824			/* Register Address */
-#define	AR5K_PHY_RF_CTL2_TXF2TXD_START	0x0000000f	/* Mask for TX frame to TX d(esc?) start */
+#define	AR5K_PHY_RF_CTL2_TXF2TXD_START	0x0000000f	/* TX frame to TX data start */
+#define	AR5K_PHY_RF_CTL2_TXF2TXD_START_S	0
 
 #define AR5K_PHY_RF_CTL3		0x9828			/* Register Address */
-#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON	0x0000000f	/* Mask for TX end to XLNA on */
+#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON	0x0000000f	/* TX end to XLNA on */
+#define	AR5K_PHY_RF_CTL3_TXE2XLNA_ON_S	0
+
+#define	AR5K_PHY_ADC_CTL			0x982c
+#define	AR5K_PHY_ADC_CTL_INBUFGAIN_OFF		0x00000003
+#define	AR5K_PHY_ADC_CTL_INBUFGAIN_OFF_S	0
+#define	AR5K_PHY_ADC_CTL_PWD_DAC_OFF		0x00002000
+#define	AR5K_PHY_ADC_CTL_PWD_BAND_GAP_OFF	0x00004000
+#define	AR5K_PHY_ADC_CTL_PWD_ADC_OFF		0x00008000
+#define	AR5K_PHY_ADC_CTL_INBUFGAIN_ON		0x00030000
+#define	AR5K_PHY_ADC_CTL_INBUFGAIN_ON_S		16
 
 #define AR5K_PHY_RF_CTL4		0x9834			/* Register Address */
 #define AR5K_PHY_RF_CTL4_TXF2XPA_A_ON	0x00000001	/* TX frame to XPA A on (field) */
@@ -1937,35 +1950,43 @@
  * PHY settling register
  */
 #define AR5K_PHY_SETTLING		0x9844			/* Register Address */
-#define	AR5K_PHY_SETTLING_AGC		0x0000007f	/* Mask for AGC settling time */
-#define	AR5K_PHY_SETTLING_SWITCH	0x00003f80	/* Mask for Switch settlig time */
+#define	AR5K_PHY_SETTLING_AGC		0x0000007f	/* AGC settling time */
+#define	AR5K_PHY_SETTLING_AGC_S		0
+#define	AR5K_PHY_SETTLING_SWITCH	0x00003f80	/* Switch settlig time */
+#define	AR5K_PHY_SETTLINK_SWITCH_S	7
 
 /*
  * PHY Gain registers
  */
 #define AR5K_PHY_GAIN			0x9848			/* Register Address */
-#define	AR5K_PHY_GAIN_TXRX_ATTEN	0x0003f000	/* Mask for TX-RX Attenuation */
+#define	AR5K_PHY_GAIN_TXRX_ATTEN	0x0003f000	/* TX-RX Attenuation */
+#define	AR5K_PHY_GAIN_TXRX_ATTEN_S	12
+#define	AR5K_PHY_GAIN_TXRX_RF_MAX	0x007c0000
+#define	AR5K_PHY_GAIN_TXRX_RF_MAX_S	18
 
 #define	AR5K_PHY_GAIN_OFFSET		0x984c			/* Register Address */
 #define	AR5K_PHY_GAIN_OFFSET_RXTX_FLAG	0x00020000	/* RX-TX flag (?) */
 
 /*
- * Desired size register
+ * Desired ADC/PGA size register
  * (for more infos read ANI patent)
  */
 #define AR5K_PHY_DESIRED_SIZE		0x9850			/* Register Address */
-#define	AR5K_PHY_DESIRED_SIZE_ADC	0x000000ff	/* Mask for ADC desired size */
-#define	AR5K_PHY_DESIRED_SIZE_PGA	0x0000ff00	/* Mask for PGA desired size */
-#define	AR5K_PHY_DESIRED_SIZE_TOT	0x0ff00000	/* Mask for Total desired size (?) */
+#define	AR5K_PHY_DESIRED_SIZE_ADC	0x000000ff	/* ADC desired size */
+#define	AR5K_PHY_DESIRED_SIZE_ADC_S	0
+#define	AR5K_PHY_DESIRED_SIZE_PGA	0x0000ff00	/* PGA desired size */
+#define	AR5K_PHY_DESIRED_SIZE_PGA_S	8
+#define	AR5K_PHY_DESIRED_SIZE_TOT	0x0ff00000	/* Total desired size */
+#define	AR5K_PHY_DESIRED_SIZE_TOT_S	20
 
 /*
  * PHY signal register
  * (for more infos read ANI patent)
  */
 #define	AR5K_PHY_SIG			0x9858			/* Register Address */
-#define	AR5K_PHY_SIG_FIRSTEP		0x0003f000	/* Mask for FIRSTEP */
+#define	AR5K_PHY_SIG_FIRSTEP		0x0003f000	/* FIRSTEP */
 #define	AR5K_PHY_SIG_FIRSTEP_S		12
-#define	AR5K_PHY_SIG_FIRPWR		0x03fc0000	/* Mask for FIPWR */
+#define	AR5K_PHY_SIG_FIRPWR		0x03fc0000	/* FIPWR */
 #define	AR5K_PHY_SIG_FIRPWR_S		18
 
 /*
@@ -1973,9 +1994,9 @@
  * (for more infos read ANI patent)
  */
 #define	AR5K_PHY_AGCCOARSE		0x985c			/* Register Address */
-#define	AR5K_PHY_AGCCOARSE_LO		0x00007f80	/* Mask for AGC Coarse low */
+#define	AR5K_PHY_AGCCOARSE_LO		0x00007f80	/* AGC Coarse low */
 #define	AR5K_PHY_AGCCOARSE_LO_S		7
-#define	AR5K_PHY_AGCCOARSE_HI		0x003f8000	/* Mask for AGC Coarse high */
+#define	AR5K_PHY_AGCCOARSE_HI		0x003f8000	/* AGC Coarse high */
 #define	AR5K_PHY_AGCCOARSE_HI_S		15
 
 /*
@@ -1984,6 +2005,8 @@
 #define	AR5K_PHY_AGCCTL			0x9860			/* Register address */
 #define	AR5K_PHY_AGCCTL_CAL		0x00000001	/* Enable PHY calibration */
 #define	AR5K_PHY_AGCCTL_NF		0x00000002	/* Enable Noise Floor calibration */
+#define	AR5K_PHY_AGCCTL_NF_EN		0x00008000	/* Enable nf calibration to happen (?) */
+#define	AR5K_PHY_AGCCTL_NF_NOUPDATE	0x00020000	/* Don't update nf automaticaly */
 
 /*
  * PHY noise floor status register
@@ -1994,7 +2017,10 @@
 #define AR5K_PHY_NF_RVAL(_n)		(((_n) >> 19) & AR5K_PHY_NF_M)
 #define AR5K_PHY_NF_AVAL(_n)		(-((_n) ^ AR5K_PHY_NF_M) + 1)
 #define AR5K_PHY_NF_SVAL(_n)		(((_n) & AR5K_PHY_NF_M) | (1 << 9))
-#define	AR5K_PHY_NF_THRESH62		0x00001000	/* Thresh62 -check ANI patent- (field) */
+#define	AR5K_PHY_NF_THRESH62		0x0007f000	/* Thresh62 -check ANI patent- (field) */
+#define	AR5K_PHY_NF_THRESH62_S		12
+#define	AR5K_PHY_NF_MINCCA_PWR		0x0ff80000	/* ??? */
+#define	AR5K_PHY_NF_MINCCA_PWR_S	19
 
 /*
  * PHY ADC saturation register [5110]
@@ -2034,24 +2060,31 @@
  */
 #define AR5K_PHY_SCR			0x9870
 #define AR5K_PHY_SCR_32MHZ		0x0000001f
+
 #define AR5K_PHY_SLMT			0x9874
 #define AR5K_PHY_SLMT_32MHZ		0x0000007f
+
 #define AR5K_PHY_SCAL			0x9878
 #define AR5K_PHY_SCAL_32MHZ		0x0000000e
 
+
 /*
  * PHY PLL (Phase Locked Loop) control register
  */
 #define	AR5K_PHY_PLL			0x987c
-#define	AR5K_PHY_PLL_20MHZ		0x13	/* For half rate (?) [5111+] */
-#define	AR5K_PHY_PLL_40MHZ_5211		0x18	/* For 802.11a */
+#define	AR5K_PHY_PLL_20MHZ		0x00000013	/* For half rate (?) */
+/* 40MHz -> 5GHz band */
+#define	AR5K_PHY_PLL_40MHZ_5211		0x00000018
 #define	AR5K_PHY_PLL_40MHZ_5212		0x000000aa
+#define	AR5K_PHY_PLL_40MHZ_5413		0x00000004
 #define	AR5K_PHY_PLL_40MHZ		(ah->ah_version == AR5K_AR5211 ? \
 					AR5K_PHY_PLL_40MHZ_5211 : AR5K_PHY_PLL_40MHZ_5212)
-#define	AR5K_PHY_PLL_44MHZ_5211		0x19	/* For 802.11b/g */
+/* 44MHz -> 2.4GHz band */
+#define	AR5K_PHY_PLL_44MHZ_5211		0x00000019
 #define	AR5K_PHY_PLL_44MHZ_5212		0x000000ab
 #define	AR5K_PHY_PLL_44MHZ		(ah->ah_version == AR5K_AR5211 ? \
 					AR5K_PHY_PLL_44MHZ_5211 : AR5K_PHY_PLL_44MHZ_5212)
+
 #define AR5K_PHY_PLL_RF5111		0x00000000
 #define AR5K_PHY_PLL_RF5112		0x00000040
 #define	AR5K_PHY_PLL_HALF_RATE		0x00000100
@@ -2118,6 +2151,19 @@
 #define AR5K_PHY_RFSTG_DISABLE		0x00000021
 
 /*
+ * BIN masks (?)
+ */
+#define	AR5K_PHY_BIN_MASK_1	0x9900
+#define	AR5K_PHY_BIN_MASK_2	0x9904
+#define	AR5K_PHY_BIN_MASK_3	0x9908
+
+#define	AR5K_PHY_BIN_MASK_CTL		0x990c
+#define	AR5K_PHY_BIN_MASK_CTL_MASK_4	0x00003fff
+#define	AR5K_PHY_BIN_MASK_CTL_MASK_4_S	0
+#define	AR5K_PHY_BIN_MASK_CTL_RATE	0xff000000
+#define	AR5K_PHY_BIN_MASK_CTL_RATE_S	24
+
+/*
  * PHY Antenna control register
  */
 #define AR5K_PHY_ANT_CTL		0x9910			/* Register Address */
@@ -2164,6 +2210,7 @@
 #define	AR5K_PHY_OFDM_SELFCORR			0x9924			/* Register Address */
 #define	AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_EN	0x00000001	/* Enable cyclic RSSI thr 1 */
 #define	AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1	0x000000fe	/* Mask for Cyclic RSSI threshold 1 */
+#define	AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_S	0
 #define	AR5K_PHY_OFDM_SELFCORR_CYPWR_THR3	0x00000100	/* Cyclic RSSI threshold 3 (field) (?) */
 #define	AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR_EN	0x00008000	/* Enable 1A RSSI threshold (?) */
 #define	AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR	0x00010000	/* 1A RSSI threshold (field) (?) */
@@ -2210,7 +2257,6 @@
 #define	AR5K_PHY_PAPD_PROBE_INI_5111	0x00004883	/* [5212+] */
 #define	AR5K_PHY_PAPD_PROBE_INI_5112	0x00004882	/* [5212+] */
 
-
 /*
  * PHY TX rate power registers [5112+]
  */
@@ -2232,6 +2278,8 @@
 #define	AR5K_PHY_FRAME_CTL_TX_CLIP	0x00000038	/* Mask for tx clip (?) */
 #define	AR5K_PHY_FRAME_CTL_TX_CLIP_S	3
 #define	AR5K_PHY_FRAME_CTL_PREP_CHINFO	0x00010000	/* Prepend chan info */
+#define	AR5K_PHY_FRAME_CTL_EMU		0x80000000
+#define	AR5K_PHY_FRAME_CTL_EMU_S	31
 /*---[5110/5111]---*/
 #define	AR5K_PHY_FRAME_CTL_TIMING_ERR	0x01000000	/* PHY timing error */
 #define	AR5K_PHY_FRAME_CTL_PARITY_ERR	0x02000000	/* Parity error */
@@ -2250,48 +2298,36 @@
  * PHY radar detection register [5111+]
  */
 #define	AR5K_PHY_RADAR			0x9954
-
-/* Radar enable 			........ ........ ........ .......1 */
 #define	AR5K_PHY_RADAR_ENABLE		0x00000001
-#define	AR5K_PHY_RADAR_DISABLE          0x00000000
-#define	AR5K_PHY_RADAR_ENABLE_S		0
-
-/* This is the value found on the card  .1.111.1 .1.1.... 111....1 1...1...
-at power on. */
-#define	AR5K_PHY_RADAR_PWONDEF_AR5213	0x5d50e188
-
-/* This is the value found on the card 	.1.1.111 ..11...1 .1...1.1 1...11.1
-after DFS is enabled */
-#define	AR5K_PHY_RADAR_ENABLED_AR5213	0x5731458d
-
-/* Finite Impulse Response (FIR) filter .1111111 ........ ........ ........
- * power out threshold.
- * 7-bits, standard power range {0..127} in 1/2 dBm units. */
-#define AR5K_PHY_RADAR_FIRPWROUTTHR    	0x7f000000
-#define AR5K_PHY_RADAR_FIRPWROUTTHR_S	24
-
-/* Radar RSSI/SNR threshold.		........ 111111.. ........ ........
- * 6-bits, dBm range {0..63} in dBm units. */
-#define AR5K_PHY_RADAR_RADARRSSITHR    	0x00fc0000
-#define AR5K_PHY_RADAR_RADARRSSITHR_S	18
-
-/* Pulse height threshold 		........ ......11 1111.... ........
- * 6-bits, dBm range {0..63} in dBm units. */
-#define AR5K_PHY_RADAR_PULSEHEIGHTTHR   0x0003f000
-#define AR5K_PHY_RADAR_PULSEHEIGHTTHR_S	12
-
-/* Pulse RSSI/SNR threshold		........ ........ ....1111 11......
- * 6-bits, dBm range {0..63} in dBm units. */
-#define AR5K_PHY_RADAR_PULSERSSITHR    	0x00000fc0
-#define AR5K_PHY_RADAR_PULSERSSITHR_S	6
-
-/* Inband threshold  			........ ........ ........ ..11111.
- * 5-bits, units unknown {0..31} (? MHz ?) */
-#define AR5K_PHY_RADAR_INBANDTHR    	0x0000003e
+#define	AR5K_PHY_RADAR_DISABLE		0x00000000
+#define AR5K_PHY_RADAR_INBANDTHR    	0x0000003e	/* Inband threshold
+							5-bits, units unknown {0..31}
+							(? MHz ?) */
 #define AR5K_PHY_RADAR_INBANDTHR_S	1
 
+#define AR5K_PHY_RADAR_PRSSI_THR    	0x00000fc0	/* Pulse RSSI/SNR threshold
+							6-bits, dBm range {0..63}
+							in dBm units. */
+#define AR5K_PHY_RADAR_PRSSI_THR_S	6
+
+#define AR5K_PHY_RADAR_PHEIGHT_THR   	0x0003f000	/* Pulse height threshold
+							6-bits, dBm range {0..63}
+							in dBm units. */
+#define AR5K_PHY_RADAR_PHEIGHT_THR_S	12
+
+#define AR5K_PHY_RADAR_RSSI_THR    	0x00fc0000	/* Radar RSSI/SNR threshold.
+							6-bits, dBm range {0..63}
+							in dBm units. */
+#define AR5K_PHY_RADAR_RSSI_THR_S	18
+
+#define AR5K_PHY_RADAR_FIRPWR_THR	0x7f000000	/* Finite Impulse Response
+							filter power out threshold.
+							7-bits, standard power range
+							{0..127} in 1/2 dBm units. */
+#define AR5K_PHY_RADAR_FIRPWR_THRS	24
+
 /*
- * PHY antenna switch table registers [5110]
+ * PHY antenna switch table registers
  */
 #define AR5K_PHY_ANT_SWITCH_TABLE_0	0x9960
 #define AR5K_PHY_ANT_SWITCH_TABLE_1	0x9964
@@ -2302,25 +2338,65 @@
 #define AR5K_PHY_NFTHRES		0x9968
 
 /*
- * PHY clock sleep registers [5112+]
+ * Sigma Delta register (?) [5213]
  */
-#define AR5K_PHY_SCLOCK			0x99f0
-#define AR5K_PHY_SCLOCK_32MHZ		0x0000000c
-#define AR5K_PHY_SDELAY			0x99f4
-#define AR5K_PHY_SDELAY_32MHZ		0x000000ff
-#define AR5K_PHY_SPENDING		0x99f8
-#define	AR5K_PHY_SPENDING_14		0x00000014
-#define	AR5K_PHY_SPENDING_18		0x00000018
-#define AR5K_PHY_SPENDING_RF5111	0x00000018
-#define AR5K_PHY_SPENDING_RF5112	0x00000014
-/* #define AR5K_PHY_SPENDING_RF5112A	0x0000000e */
-/* #define AR5K_PHY_SPENDING_RF5424	0x00000012 */
-#define	AR5K_PHY_SPENDING_RF5413	0x00000014
-#define	AR5K_PHY_SPENDING_RF2413	0x00000014
-#define AR5K_PHY_SPENDING_RF2425	0x00000018
+#define AR5K_PHY_SIGMA_DELTA		0x996C
+#define AR5K_PHY_SIGMA_DELTA_ADC_SEL	0x00000003
+#define AR5K_PHY_SIGMA_DELTA_ADC_SEL_S	0
+#define AR5K_PHY_SIGMA_DELTA_FILT2	0x000000f8
+#define AR5K_PHY_SIGMA_DELTA_FILT2_S	3
+#define AR5K_PHY_SIGMA_DELTA_FILT1	0x00001f00
+#define AR5K_PHY_SIGMA_DELTA_FILT1_S	8
+#define AR5K_PHY_SIGMA_DELTA_ADC_CLIP	0x01ff3000
+#define AR5K_PHY_SIGMA_DELTA_ADC_CLIP_S	13
 
 /*
- * Misc PHY/radio registers [5110 - 5111]
+ * RF restart register [5112+] (?)
+ */
+#define AR5K_PHY_RESTART		0x9970		/* restart */
+#define AR5K_PHY_RESTART_DIV_GC		0x001c0000	/* Fast diversity gc_limit (?) */
+#define AR5K_PHY_RESTART_DIV_GC_S	18
+
+/*
+ * RF Bus access request register (for synth-oly channel switching)
+ */
+#define AR5K_PHY_RFBUS_REQ		0x997C
+#define AR5K_PHY_RFBUS_REQ_REQUEST	0x00000001
+
+/*
+ * Spur mitigation masks (?)
+ */
+#define AR5K_PHY_TIMING_7		0x9980
+#define AR5K_PHY_TIMING_8		0x9984
+#define AR5K_PHY_TIMING_8_PILOT_MASK_2		0x000fffff
+#define AR5K_PHY_TIMING_8_PILOT_MASK_2_S	0
+
+#define AR5K_PHY_BIN_MASK2_1		0x9988
+#define AR5K_PHY_BIN_MASK2_2		0x998c
+#define AR5K_PHY_BIN_MASK2_3		0x9990
+
+#define AR5K_PHY_BIN_MASK2_4		0x9994
+#define AR5K_PHY_BIN_MASK2_4_MASK_4	0x00003fff
+#define AR5K_PHY_BIN_MASK2_4_MASK_4_S	0
+
+#define AR_PHY_TIMING_9			0x9998
+#define AR_PHY_TIMING_10		0x999c
+#define AR_PHY_TIMING_10_PILOT_MASK_2	0x000fffff
+#define AR_PHY_TIMING_10_PILOT_MASK_2_S	0
+
+/*
+ * Spur mitigation control
+ */
+#define AR_PHY_TIMING_11			0x99a0		/* Register address */
+#define AR_PHY_TIMING_11_SPUR_DELTA_PHASE	0x000fffff	/* Spur delta phase */
+#define AR_PHY_TIMING_11_SPUR_DELTA_PHASE_S	0
+#define AR_PHY_TIMING_11_SPUR_FREQ_SD		0x3ff00000	/* Freq sigma delta */
+#define AR_PHY_TIMING_11_SPUR_FREQ_SD_S	20
+#define AR_PHY_TIMING_11_USE_SPUR_IN_AGC	0x40000000	/* Spur filter in AGC detector */
+#define AR_PHY_TIMING_11_USE_SPUR_IN_SELFCOR	0x80000000	/* Spur filter in OFDM self correlator */
+
+/*
+ * Gain tables
  */
 #define	AR5K_BB_GAIN_BASE		0x9b00	/* BaseBand Amplifier Gain table base address */
 #define AR5K_BB_GAIN(_n)		(AR5K_BB_GAIN_BASE + ((_n) << 2))
@@ -2340,9 +2416,10 @@
 #define	AR5K_PHY_CURRENT_RSSI	0x9c1c
 
 /*
- * PHY RF Bus grant register (?)
+ * PHY RF Bus grant register
  */
 #define	AR5K_PHY_RFBUS_GRANT	0x9c20
+#define	AR5K_PHY_RFBUS_GRANT_OK	0x00000001
 
 /*
  * PHY ADC test register
@@ -2386,6 +2463,31 @@
 #define	AR5K_PHY_CHAN_STATUS_RX_CLR_PAP	0x00000008
 
 /*
+ * Heavy clip enable register
+ */
+#define	AR5K_PHY_HEAVY_CLIP_ENABLE	0x99e0
+
+/*
+ * PHY clock sleep registers [5112+]
+ */
+#define AR5K_PHY_SCLOCK			0x99f0
+#define AR5K_PHY_SCLOCK_32MHZ		0x0000000c
+#define AR5K_PHY_SDELAY			0x99f4
+#define AR5K_PHY_SDELAY_32MHZ		0x000000ff
+#define AR5K_PHY_SPENDING		0x99f8
+#define AR5K_PHY_SPENDING_14		0x00000014
+#define AR5K_PHY_SPENDING_18		0x00000018
+#define AR5K_PHY_SPENDING_RF5111	0x00000018
+#define AR5K_PHY_SPENDING_RF5112	0x00000014
+/* #define AR5K_PHY_SPENDING_RF5112A	0x0000000e */
+/* #define AR5K_PHY_SPENDING_RF5424	0x00000012 */
+#define AR5K_PHY_SPENDING_RF5413	0x00000018
+#define AR5K_PHY_SPENDING_RF2413	0x00000018
+#define AR5K_PHY_SPENDING_RF2316	0x00000018
+#define AR5K_PHY_SPENDING_RF2317	0x00000018
+#define AR5K_PHY_SPENDING_RF2425	0x00000014
+
+/*
  * PHY PAPD I (power?) table (?)
  * (92! entries)
  */
@@ -2436,10 +2538,47 @@
 #define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR	0x0000000f
 #define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR_S	0
 
+/* Same address is used for antenna diversity activation */
+#define	AR5K_PHY_FAST_ANT_DIV		0xa208
+#define	AR5K_PHY_FAST_ANT_DIV_EN	0x00002000
+
 /*
  * PHY 2GHz gain register [5111+]
  */
-#define	AR5K_PHY_GAIN_2GHZ		0xa20c
-#define	AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX	0x00fc0000
+#define	AR5K_PHY_GAIN_2GHZ			0xa20c
+#define	AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX		0x00fc0000
 #define	AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX_S	18
-#define	AR5K_PHY_GAIN_2GHZ_INI_5111	0x6480416c
+#define	AR5K_PHY_GAIN_2GHZ_INI_5111		0x6480416c
+
+#define	AR5K_PHY_CCK_RX_CTL_4			0xa21c
+#define	AR5K_PHY_CCK_RX_CTL_4_FREQ_EST_SHORT	0x01f80000
+#define	AR5K_PHY_CCK_RX_CTL_4_FREQ_EST_SHORT_S	19
+
+#define	AR5K_PHY_DAG_CCK_CTL			0xa228
+#define	AR5K_PHY_DAG_CCK_CTL_EN_RSSI_THR	0x00000200
+#define	AR5K_PHY_DAG_CCK_CTL_RSSI_THR		0x0001fc00
+#define	AR5K_PHY_DAG_CCK_CTL_RSSI_THR_S		10
+
+#define	AR5K_PHY_FAST_ADC	0xa24c
+
+#define	AR5K_PHY_BLUETOOTH	0xa254
+
+/*
+ * Transmit Power Control register
+ * [2413+]
+ */
+#define	AR5K_PHY_TPC_RG1		0xa258
+#define	AR5K_PHY_TPC_RG1_NUM_PD_GAIN	0x0000c000
+#define	AR5K_PHY_TPC_RG1_NUM_PD_GAIN_S	14
+
+#define	AR5K_PHY_TPC_RG5			0xa26C
+#define	AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP	0x0000000F
+#define	AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP_S	0
+#define	AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1	0x000003F0
+#define	AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1_S	4
+#define	AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2	0x0000FC00
+#define	AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2_S	10
+#define	AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3	0x003F0000
+#define	AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3_S	16
+#define	AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4	0x0FC00000
+#define	AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4_S	22
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c
new file mode 100644
index 0000000..8f18868
--- /dev/null
+++ b/drivers/net/wireless/ath5k/reset.c
@@ -0,0 +1,931 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
+ * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
+ * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#define _ATH5K_RESET
+
+/*****************************\
+  Reset functions and helpers
+\*****************************/
+
+#include <linux/pci.h>
+#include "ath5k.h"
+#include "reg.h"
+#include "base.h"
+#include "debug.h"
+
+/**
+ * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
+ *
+ * @ah: the &struct ath5k_hw
+ * @channel: the currently set channel upon reset
+ *
+ * Write the OFDM timings for the AR5212 upon reset. This is a helper for
+ * ath5k_hw_reset(). This seems to tune the PLL a specified frequency
+ * depending on the bandwidth of the channel.
+ *
+ */
+static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
+	struct ieee80211_channel *channel)
+{
+	/* Get exponent and mantissa and set it */
+	u32 coef_scaled, coef_exp, coef_man,
+		ds_coef_exp, ds_coef_man, clock;
+
+	if (!(ah->ah_version == AR5K_AR5212) ||
+		!(channel->hw_value & CHANNEL_OFDM))
+		BUG();
+
+	/* Seems there are two PLLs, one for baseband sampling and one
+	 * for tuning. Tuning basebands are 40 MHz or 80MHz when in
+	 * turbo. */
+	clock = channel->hw_value & CHANNEL_TURBO ? 80 : 40;
+	coef_scaled = ((5 * (clock << 24)) / 2) /
+	channel->center_freq;
+
+	for (coef_exp = 31; coef_exp > 0; coef_exp--)
+		if ((coef_scaled >> coef_exp) & 0x1)
+			break;
+
+	if (!coef_exp)
+		return -EINVAL;
+
+	coef_exp = 14 - (coef_exp - 24);
+	coef_man = coef_scaled +
+		(1 << (24 - coef_exp - 1));
+	ds_coef_man = coef_man >> (24 - coef_exp);
+	ds_coef_exp = coef_exp - 16;
+
+	AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
+		AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man);
+	AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
+		AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp);
+
+	return 0;
+}
+
+
+/*
+ * index into rates for control rates, we can set it up like this because
+ * this is only used for AR5212 and we know it supports G mode
+ */
+static int control_rates[] =
+	{ 0, 1, 1, 1, 4, 4, 6, 6, 8, 8, 8, 8 };
+
+/**
+ * ath5k_hw_write_rate_duration - set rate duration during hw resets
+ *
+ * @ah: the &struct ath5k_hw
+ * @mode: one of enum ath5k_driver_mode
+ *
+ * Write the rate duration table upon hw reset. This is a helper for
+ * ath5k_hw_reset(). It seems all this is doing is setting an ACK timeout for
+ * the hardware for the current mode for each rate. The rates which are capable
+ * of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have another
+ * register for the short preamble ACK timeout calculation.
+ */
+static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah,
+       unsigned int mode)
+{
+	struct ath5k_softc *sc = ah->ah_sc;
+	struct ieee80211_rate *rate;
+	unsigned int i;
+
+	/* Write rate duration table */
+	for (i = 0; i < sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates; i++) {
+		u32 reg;
+		u16 tx_time;
+
+		rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[control_rates[i]];
+
+		/* Set ACK timeout */
+		reg = AR5K_RATE_DUR(rate->hw_value);
+
+		/* An ACK frame consists of 10 bytes. If you add the FCS,
+		 * which ieee80211_generic_frame_duration() adds,
+		 * its 14 bytes. Note we use the control rate and not the
+		 * actual rate for this rate. See mac80211 tx.c
+		 * ieee80211_duration() for a brief description of
+		 * what rate we should choose to TX ACKs. */
+		tx_time = le16_to_cpu(ieee80211_generic_frame_duration(sc->hw,
+							sc->vif, 10, rate));
+
+		ath5k_hw_reg_write(ah, tx_time, reg);
+
+		if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
+			continue;
+
+		/*
+		 * We're not distinguishing short preamble here,
+		 * This is true, all we'll get is a longer value here
+		 * which is not necessarilly bad. We could use
+		 * export ieee80211_frame_duration() but that needs to be
+		 * fixed first to be properly used by mac802111 drivers:
+		 *
+		 *  - remove erp stuff and let the routine figure ofdm
+		 *    erp rates
+		 *  - remove passing argument ieee80211_local as
+		 *    drivers don't have access to it
+		 *  - move drivers using ieee80211_generic_frame_duration()
+		 *    to this
+		 */
+		ath5k_hw_reg_write(ah, tx_time,
+			reg + (AR5K_SET_SHORT_PREAMBLE << 2));
+	}
+}
+
+/*
+ * Reset chipset
+ */
+static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
+{
+	int ret;
+	u32 mask = val ? val : ~0U;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	/* Read-and-clear RX Descriptor Pointer*/
+	ath5k_hw_reg_read(ah, AR5K_RXDP);
+
+	/*
+	 * Reset the device and wait until success
+	 */
+	ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
+
+	/* Wait at least 128 PCI clocks */
+	udelay(15);
+
+	if (ah->ah_version == AR5K_AR5210) {
+		val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
+			| AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_PHY;
+		mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
+			| AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_PHY;
+	} else {
+		val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
+		mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
+	}
+
+	ret = ath5k_hw_register_timeout(ah, AR5K_RESET_CTL, mask, val, false);
+
+	/*
+	 * Reset configuration register (for hw byte-swap). Note that this
+	 * is only set for big endian. We do the necessary magic in
+	 * AR5K_INIT_CFG.
+	 */
+	if ((val & AR5K_RESET_CTL_PCU) == 0)
+		ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG);
+
+	return ret;
+}
+
+/*
+ * Sleep control
+ */
+int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+		bool set_chip, u16 sleep_duration)
+{
+	unsigned int i;
+	u32 staid, data;
+
+	ATH5K_TRACE(ah->ah_sc);
+	staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
+
+	switch (mode) {
+	case AR5K_PM_AUTO:
+		staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA;
+		/* fallthrough */
+	case AR5K_PM_NETWORK_SLEEP:
+		if (set_chip)
+			ath5k_hw_reg_write(ah,
+				AR5K_SLEEP_CTL_SLE_ALLOW |
+				sleep_duration,
+				AR5K_SLEEP_CTL);
+
+		staid |= AR5K_STA_ID1_PWR_SV;
+		break;
+
+	case AR5K_PM_FULL_SLEEP:
+		if (set_chip)
+			ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_SLP,
+				AR5K_SLEEP_CTL);
+
+		staid |= AR5K_STA_ID1_PWR_SV;
+		break;
+
+	case AR5K_PM_AWAKE:
+
+		staid &= ~AR5K_STA_ID1_PWR_SV;
+
+		if (!set_chip)
+			goto commit;
+
+		/* Preserve sleep duration */
+		data = ath5k_hw_reg_read(ah, AR5K_SLEEP_CTL);
+		if (data & 0xffc00000)
+			data = 0;
+		else
+			data = data & 0xfffcffff;
+
+		ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
+		udelay(15);
+
+		for (i = 50; i > 0; i--) {
+			/* Check if the chip did wake up */
+			if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) &
+					AR5K_PCICFG_SPWR_DN) == 0)
+				break;
+
+			/* Wait a bit and retry */
+			udelay(200);
+			ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
+		}
+
+		/* Fail if the chip didn't wake up */
+		if (i <= 0)
+			return -EIO;
+
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+commit:
+	ah->ah_power_mode = mode;
+	ath5k_hw_reg_write(ah, staid, AR5K_STA_ID1);
+
+	return 0;
+}
+
+/*
+ * Bring up MAC + PHY Chips
+ */
+int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
+{
+	struct pci_dev *pdev = ah->ah_sc->pdev;
+	u32 turbo, mode, clock, bus_flags;
+	int ret;
+
+	turbo = 0;
+	mode = 0;
+	clock = 0;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	/* Wakeup the device */
+	ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+	if (ret) {
+		ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
+		return ret;
+	}
+
+	if (ah->ah_version != AR5K_AR5210) {
+		/*
+		 * Get channel mode flags
+		 */
+
+		if (ah->ah_radio >= AR5K_RF5112) {
+			mode = AR5K_PHY_MODE_RAD_RF5112;
+			clock = AR5K_PHY_PLL_RF5112;
+		} else {
+			mode = AR5K_PHY_MODE_RAD_RF5111;	/*Zero*/
+			clock = AR5K_PHY_PLL_RF5111;		/*Zero*/
+		}
+
+		if (flags & CHANNEL_2GHZ) {
+			mode |= AR5K_PHY_MODE_FREQ_2GHZ;
+			clock |= AR5K_PHY_PLL_44MHZ;
+
+			if (flags & CHANNEL_CCK) {
+				mode |= AR5K_PHY_MODE_MOD_CCK;
+			} else if (flags & CHANNEL_OFDM) {
+				/* XXX Dynamic OFDM/CCK is not supported by the
+				 * AR5211 so we set MOD_OFDM for plain g (no
+				 * CCK headers) operation. We need to test
+				 * this, 5211 might support ofdm-only g after
+				 * all, there are also initial register values
+				 * in the code for g mode (see initvals.c). */
+				if (ah->ah_version == AR5K_AR5211)
+					mode |= AR5K_PHY_MODE_MOD_OFDM;
+				else
+					mode |= AR5K_PHY_MODE_MOD_DYN;
+			} else {
+				ATH5K_ERR(ah->ah_sc,
+					"invalid radio modulation mode\n");
+				return -EINVAL;
+			}
+		} else if (flags & CHANNEL_5GHZ) {
+			mode |= AR5K_PHY_MODE_FREQ_5GHZ;
+			clock |= AR5K_PHY_PLL_40MHZ;
+
+			if (flags & CHANNEL_OFDM)
+				mode |= AR5K_PHY_MODE_MOD_OFDM;
+			else {
+				ATH5K_ERR(ah->ah_sc,
+					"invalid radio modulation mode\n");
+				return -EINVAL;
+			}
+		} else {
+			ATH5K_ERR(ah->ah_sc, "invalid radio frequency mode\n");
+			return -EINVAL;
+		}
+
+		if (flags & CHANNEL_TURBO)
+			turbo = AR5K_PHY_TURBO_MODE | AR5K_PHY_TURBO_SHORT;
+	} else { /* Reset the device */
+
+		/* ...enable Atheros turbo mode if requested */
+		if (flags & CHANNEL_TURBO)
+			ath5k_hw_reg_write(ah, AR5K_PHY_TURBO_MODE,
+					AR5K_PHY_TURBO);
+	}
+
+	/* reseting PCI on PCI-E cards results card to hang
+	 * and always return 0xffff... so we ingore that flag
+	 * for PCI-E cards */
+	bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI;
+
+	/* Reset chipset */
+	if (ah->ah_version == AR5K_AR5210) {
+		ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
+			AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
+			AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
+			mdelay(2);
+	} else {
+		ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
+			AR5K_RESET_CTL_BASEBAND | bus_flags);
+	}
+	if (ret) {
+		ATH5K_ERR(ah->ah_sc, "failed to reset the MAC Chip\n");
+		return -EIO;
+	}
+
+	/* ...wakeup again!*/
+	ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+	if (ret) {
+		ATH5K_ERR(ah->ah_sc, "failed to resume the MAC Chip\n");
+		return ret;
+	}
+
+	/* ...final warm reset */
+	if (ath5k_hw_nic_reset(ah, 0)) {
+		ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n");
+		return -EIO;
+	}
+
+	if (ah->ah_version != AR5K_AR5210) {
+		/* ...set the PHY operating mode */
+		ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
+		udelay(300);
+
+		ath5k_hw_reg_write(ah, mode, AR5K_PHY_MODE);
+		ath5k_hw_reg_write(ah, turbo, AR5K_PHY_TURBO);
+	}
+
+	return 0;
+}
+
+/*
+ * Main reset function
+ */
+int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+	struct ieee80211_channel *channel, bool change_channel)
+{
+	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+	struct pci_dev *pdev = ah->ah_sc->pdev;
+	u32 data, s_seq, s_ant, s_led[3], dma_size;
+	unsigned int i, mode, freq, ee_mode, ant[2];
+	int ret;
+
+	ATH5K_TRACE(ah->ah_sc);
+
+	s_seq = 0;
+	s_ant = 0;
+	ee_mode = 0;
+	freq = 0;
+	mode = 0;
+
+	/*
+	 * Save some registers before a reset
+	 */
+	/*DCU/Antenna selection not available on 5210*/
+	if (ah->ah_version != AR5K_AR5210) {
+		if (change_channel) {
+			/* Seq number for queue 0 -do this for all queues ? */
+			s_seq = ath5k_hw_reg_read(ah,
+					AR5K_QUEUE_DFS_SEQNUM(0));
+			/*Default antenna*/
+			s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
+		}
+	}
+
+	/*GPIOs*/
+	s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) & AR5K_PCICFG_LEDSTATE;
+	s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
+	s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
+
+	if (change_channel && ah->ah_rf_banks != NULL)
+		ath5k_hw_get_rf_gain(ah);
+
+
+	/*Wakeup the device*/
+	ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false);
+	if (ret)
+		return ret;
+
+	/*
+	 * Initialize operating mode
+	 */
+	ah->ah_op_mode = op_mode;
+
+	/*
+	 * 5111/5112 Settings
+	 * 5210 only comes with RF5110
+	 */
+	if (ah->ah_version != AR5K_AR5210) {
+		if (ah->ah_radio != AR5K_RF5111 &&
+			ah->ah_radio != AR5K_RF5112 &&
+			ah->ah_radio != AR5K_RF5413 &&
+			ah->ah_radio != AR5K_RF2413 &&
+			ah->ah_radio != AR5K_RF2425) {
+			ATH5K_ERR(ah->ah_sc,
+				"invalid phy radio: %u\n", ah->ah_radio);
+			return -EINVAL;
+		}
+
+		switch (channel->hw_value & CHANNEL_MODES) {
+		case CHANNEL_A:
+			mode = AR5K_MODE_11A;
+			freq = AR5K_INI_RFGAIN_5GHZ;
+			ee_mode = AR5K_EEPROM_MODE_11A;
+			break;
+		case CHANNEL_G:
+			mode = AR5K_MODE_11G;
+			freq = AR5K_INI_RFGAIN_2GHZ;
+			ee_mode = AR5K_EEPROM_MODE_11G;
+			break;
+		case CHANNEL_B:
+			mode = AR5K_MODE_11B;
+			freq = AR5K_INI_RFGAIN_2GHZ;
+			ee_mode = AR5K_EEPROM_MODE_11B;
+			break;
+		case CHANNEL_T:
+			mode = AR5K_MODE_11A_TURBO;
+			freq = AR5K_INI_RFGAIN_5GHZ;
+			ee_mode = AR5K_EEPROM_MODE_11A;
+			break;
+		/*Is this ok on 5211 too ?*/
+		case CHANNEL_TG:
+			mode = AR5K_MODE_11G_TURBO;
+			freq = AR5K_INI_RFGAIN_2GHZ;
+			ee_mode = AR5K_EEPROM_MODE_11G;
+			break;
+		case CHANNEL_XR:
+			if (ah->ah_version == AR5K_AR5211) {
+				ATH5K_ERR(ah->ah_sc,
+					"XR mode not available on 5211");
+				return -EINVAL;
+			}
+			mode = AR5K_MODE_XR;
+			freq = AR5K_INI_RFGAIN_5GHZ;
+			ee_mode = AR5K_EEPROM_MODE_11A;
+			break;
+		default:
+			ATH5K_ERR(ah->ah_sc,
+				"invalid channel: %d\n", channel->center_freq);
+			return -EINVAL;
+		}
+
+		/* PHY access enable */
+		ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
+
+	}
+
+	ret = ath5k_hw_write_initvals(ah, mode, change_channel);
+	if (ret)
+		return ret;
+
+	/*
+	 * 5211/5212 Specific
+	 */
+	if (ah->ah_version != AR5K_AR5210) {
+		/*
+		 * Write initial RF gain settings
+		 * This should work for both 5111/5112
+		 */
+		ret = ath5k_hw_rfgain(ah, freq);
+		if (ret)
+			return ret;
+
+		mdelay(1);
+
+		/*
+		 * Write some more initial register settings
+		 */
+		if (ah->ah_version == AR5K_AR5212) {
+			ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
+
+			if (channel->hw_value == CHANNEL_G)
+				if (ah->ah_mac_srev < AR5K_SREV_AR2413)
+					ath5k_hw_reg_write(ah, 0x00f80d80,
+								0x994c);
+				else if (ah->ah_mac_srev < AR5K_SREV_AR5424)
+					ath5k_hw_reg_write(ah, 0x00380140,
+								0x994c);
+				else if (ah->ah_mac_srev < AR5K_SREV_AR2425)
+					ath5k_hw_reg_write(ah, 0x00fc0ec0,
+								0x994c);
+				else /* 2425 */
+					ath5k_hw_reg_write(ah, 0x00fc0fc0,
+								0x994c);
+			else
+				ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
+
+			/* Some bits are disabled here, we know nothing about
+			 * register 0xa228 yet, most of the times this ends up
+			 * with a value 0x9b5 -haven't seen any dump with
+			 * a different value- */
+			/* Got this from decompiling binary HAL */
+			data = ath5k_hw_reg_read(ah, 0xa228);
+			data &= 0xfffffdff;
+			ath5k_hw_reg_write(ah, data, 0xa228);
+
+			data = ath5k_hw_reg_read(ah, 0xa228);
+			data &= 0xfffe03ff;
+			ath5k_hw_reg_write(ah, data, 0xa228);
+			data = 0;
+
+			/* Just write 0x9b5 ? */
+			/* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
+			ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
+			ath5k_hw_reg_write(ah, 0x00000000, 0xa254);
+			ath5k_hw_reg_write(ah, 0x0000000e, AR5K_PHY_SCAL);
+		}
+
+		/* Fix for first revision of the RF5112 RF chipset */
+		if (ah->ah_radio >= AR5K_RF5112 &&
+				ah->ah_radio_5ghz_revision <
+				AR5K_SREV_RAD_5112A) {
+			ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
+					AR5K_PHY_CCKTXCTL);
+			if (channel->hw_value & CHANNEL_5GHZ)
+				data = 0xffb81020;
+			else
+				data = 0xffb80d20;
+			ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
+			data = 0;
+		}
+
+		/*
+		 * Set TX power (FIXME)
+		 */
+		ret = ath5k_hw_txpower(ah, channel, AR5K_TUNE_DEFAULT_TXPOWER);
+		if (ret)
+			return ret;
+
+		/* Write rate duration table only on AR5212 and if
+		 * virtual interface has already been brought up
+		 * XXX: rethink this after new mode changes to
+		 * mac80211 are integrated */
+		if (ah->ah_version == AR5K_AR5212 &&
+			ah->ah_sc->vif != NULL)
+			ath5k_hw_write_rate_duration(ah, mode);
+
+		/*
+		 * Write RF registers
+		 */
+		ret = ath5k_hw_rfregs(ah, channel, mode);
+		if (ret)
+			return ret;
+
+		/*
+		 * Configure additional registers
+		 */
+
+		/* Write OFDM timings on 5212*/
+		if (ah->ah_version == AR5K_AR5212 &&
+			channel->hw_value & CHANNEL_OFDM) {
+			ret = ath5k_hw_write_ofdm_timings(ah, channel);
+			if (ret)
+				return ret;
+		}
+
+		/*Enable/disable 802.11b mode on 5111
+		(enable 2111 frequency converter + CCK)*/
+		if (ah->ah_radio == AR5K_RF5111) {
+			if (mode == AR5K_MODE_11B)
+				AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
+				    AR5K_TXCFG_B_MODE);
+			else
+				AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
+				    AR5K_TXCFG_B_MODE);
+		}
+
+		/*
+		 * Set channel and calibrate the PHY
+		 */
+		ret = ath5k_hw_channel(ah, channel);
+		if (ret)
+			return ret;
+
+		/* Set antenna mode */
+		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_ANT_CTL,
+			ah->ah_antenna[ee_mode][0], 0xfffffc06);
+
+		/*
+		 * In case a fixed antenna was set as default
+		 * write the same settings on both AR5K_PHY_ANT_SWITCH_TABLE
+		 * registers.
+		 */
+		if (s_ant != 0) {
+			if (s_ant == AR5K_ANT_FIXED_A) /* 1 - Main */
+				ant[0] = ant[1] = AR5K_ANT_FIXED_A;
+			else	/* 2 - Aux */
+				ant[0] = ant[1] = AR5K_ANT_FIXED_B;
+		} else {
+			ant[0] = AR5K_ANT_FIXED_A;
+			ant[1] = AR5K_ANT_FIXED_B;
+		}
+
+		ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[0]],
+			AR5K_PHY_ANT_SWITCH_TABLE_0);
+		ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[1]],
+			AR5K_PHY_ANT_SWITCH_TABLE_1);
+
+		/* Commit values from EEPROM */
+		if (ah->ah_radio == AR5K_RF5111)
+			AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL,
+			    AR5K_PHY_FRAME_CTL_TX_CLIP, ee->ee_tx_clip);
+
+		ath5k_hw_reg_write(ah,
+			AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
+			AR5K_PHY_NFTHRES);
+
+		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_SETTLING,
+			(ee->ee_switch_settling[ee_mode] << 7) & 0x3f80,
+			0xffffc07f);
+		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_GAIN,
+			(ee->ee_ant_tx_rx[ee_mode] << 12) & 0x3f000,
+			0xfffc0fff);
+		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_DESIRED_SIZE,
+			(ee->ee_adc_desired_size[ee_mode] & 0x00ff) |
+			((ee->ee_pga_desired_size[ee_mode] << 8) & 0xff00),
+			0xffff0000);
+
+		ath5k_hw_reg_write(ah,
+			(ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
+			(ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
+			(ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
+			(ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
+
+		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_RF_CTL3,
+			ee->ee_tx_end2xlna_enable[ee_mode] << 8, 0xffff00ff);
+		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_NF,
+			(ee->ee_thr_62[ee_mode] << 12) & 0x7f000, 0xfff80fff);
+		AR5K_REG_MASKED_BITS(ah, AR5K_PHY_OFDM_SELFCORR, 4, 0xffffff01);
+
+		AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
+		    AR5K_PHY_IQ_CORR_ENABLE |
+		    (ee->ee_i_cal[ee_mode] << AR5K_PHY_IQ_CORR_Q_I_COFF_S) |
+		    ee->ee_q_cal[ee_mode]);
+
+		if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
+			AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
+				AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
+				ee->ee_margin_tx_rx[ee_mode]);
+
+	} else {
+		mdelay(1);
+		/* Disable phy and wait */
+		ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
+		mdelay(1);
+	}
+
+	/*
+	 * Restore saved values
+	 */
+	/*DCU/Antenna selection not available on 5210*/
+	if (ah->ah_version != AR5K_AR5210) {
+		ath5k_hw_reg_write(ah, s_seq, AR5K_QUEUE_DFS_SEQNUM(0));
+		ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA);
+	}
+	AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, s_led[0]);
+	ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR);
+	ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
+
+	/*
+	 * Misc
+	 */
+	/* XXX: add ah->aid once mac80211 gives this to us */
+	ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
+
+	ath5k_hw_set_opmode(ah);
+	/*PISR/SISR Not available on 5210*/
+	if (ah->ah_version != AR5K_AR5210) {
+		ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
+		/* If we later allow tuning for this, store into sc structure */
+		data = AR5K_TUNE_RSSI_THRES |
+			AR5K_TUNE_BMISS_THRES << AR5K_RSSI_THR_BMISS_S;
+		ath5k_hw_reg_write(ah, data, AR5K_RSSI_THR);
+	}
+
+	/*
+	 * Set Rx/Tx DMA Configuration
+	 *
+	 * Set maximum DMA size (512) except for PCI-E cards since
+	 * it causes rx overruns and tx errors (tested on 5424 but since
+	 * rx overruns also occur on 5416/5418 with madwifi we set 128
+	 * for all PCI-E cards to be safe).
+	 *
+	 * In dumps this is 128 for allchips.
+	 *
+	 * XXX: need to check 5210 for this
+	 * TODO: Check out tx triger level, it's always 64 on dumps but I
+	 * guess we can tweak it and see how it goes ;-)
+	 */
+	dma_size = (pdev->is_pcie) ? AR5K_DMASIZE_128B : AR5K_DMASIZE_512B;
+	if (ah->ah_version != AR5K_AR5210) {
+		AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
+			AR5K_TXCFG_SDMAMR, dma_size);
+		AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
+			AR5K_RXCFG_SDMAMW, dma_size);
+	}
+
+	/*
+	 * Enable the PHY and wait until completion
+	 */
+	ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
+
+	/*
+	 * On 5211+ read activation -> rx delay
+	 * and use it.
+	 */
+	if (ah->ah_version != AR5K_AR5210) {
+		data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
+			AR5K_PHY_RX_DELAY_M;
+		data = (channel->hw_value & CHANNEL_CCK) ?
+			((data << 2) / 22) : (data / 10);
+
+		udelay(100 + (2 * data));
+		data = 0;
+	} else {
+		mdelay(1);
+	}
+
+	/*
+	 * Perform ADC test (?)
+	 */
+	data = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+	ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+	for (i = 0; i <= 20; i++) {
+		if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+			break;
+		udelay(200);
+	}
+	ath5k_hw_reg_write(ah, data, AR5K_PHY_TST1);
+	data = 0;
+
+	/*
+	 * Start automatic gain calibration
+	 *
+	 * During AGC calibration RX path is re-routed to
+	 * a signal detector so we don't receive anything.
+	 *
+	 * This method is used to calibrate some static offsets
+	 * used together with on-the fly I/Q calibration (the
+	 * one performed via ath5k_hw_phy_calibrate), that doesn't
+	 * interrupt rx path.
+	 *
+	 * If we are in a noisy environment AGC calibration may time
+	 * out.
+	 */
+	AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+				AR5K_PHY_AGCCTL_CAL);
+
+	/* At the same time start I/Q calibration for QAM constellation
+	 * -no need for CCK- */
+	ah->ah_calibration = false;
+	if (!(mode == AR5K_MODE_11B)) {
+		ah->ah_calibration = true;
+		AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
+				AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
+		AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
+				AR5K_PHY_IQ_RUN);
+	}
+
+	/* Wait for gain calibration to finish (we check for I/Q calibration
+	 * during ath5k_phy_calibrate) */
+	if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
+			AR5K_PHY_AGCCTL_CAL, 0, false)) {
+		ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
+			channel->center_freq);
+		return -EAGAIN;
+	}
+
+	/*
+	 * Start noise floor calibration
+	 *
+	 * If we run NF calibration before AGC, it always times out.
+	 * Binary HAL starts NF and AGC calibration at the same time
+	 * and only waits for AGC to finish. I believe that's wrong because
+	 * during NF calibration, rx path is also routed to a detector, so if
+	 * it doesn't finish we won't have RX.
+	 *
+	 * XXX: Find an interval that's OK for all cards...
+	 */
+	ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
+	if (ret)
+		return ret;
+
+	/*
+	 * Reset queues and start beacon timers at the end of the reset routine
+	 */
+	for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
+		/*No QCU on 5210*/
+		if (ah->ah_version != AR5K_AR5210)
+			AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(i), i);
+
+		ret = ath5k_hw_reset_tx_queue(ah, i);
+		if (ret) {
+			ATH5K_ERR(ah->ah_sc,
+				"failed to reset TX queue #%d\n", i);
+			return ret;
+		}
+	}
+
+	/* Pre-enable interrupts on 5211/5212*/
+	if (ah->ah_version != AR5K_AR5210)
+		ath5k_hw_set_imr(ah, AR5K_INT_RX | AR5K_INT_TX |
+				AR5K_INT_FATAL);
+
+	/*
+	 * Set RF kill flags if supported by the device (read from the EEPROM)
+	 * Disable gpio_intr for now since it results system hang.
+	 * TODO: Handle this in ath5k_intr
+	 */
+#if 0
+	if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) {
+		ath5k_hw_set_gpio_input(ah, 0);
+		ah->ah_gpio[0] = ath5k_hw_get_gpio(ah, 0);
+		if (ah->ah_gpio[0] == 0)
+			ath5k_hw_set_gpio_intr(ah, 0, 1);
+		else
+			ath5k_hw_set_gpio_intr(ah, 0, 0);
+	}
+#endif
+
+	/*
+	 * Set the 32MHz reference clock on 5212 phy clock sleep register
+	 *
+	 * TODO: Find out how to switch to external 32Khz clock to save power
+	 */
+	if (ah->ah_version == AR5K_AR5212) {
+		ath5k_hw_reg_write(ah, AR5K_PHY_SCR_32MHZ, AR5K_PHY_SCR);
+		ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
+		ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ, AR5K_PHY_SCAL);
+		ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
+		ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
+		ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING);
+
+		data = ath5k_hw_reg_read(ah, AR5K_USEC_5211) & 0xffffc07f ;
+		data |= (ah->ah_phy_spending == AR5K_PHY_SPENDING_18) ?
+						0x00000f80 : 0x00001380 ;
+		ath5k_hw_reg_write(ah, data, AR5K_USEC_5211);
+		data = 0;
+	}
+
+	if (ah->ah_version == AR5K_AR5212) {
+		ath5k_hw_reg_write(ah, 0x000100aa, 0x8118);
+		ath5k_hw_reg_write(ah, 0x00003210, 0x811c);
+		ath5k_hw_reg_write(ah, 0x00000052, 0x8108);
+		if (ah->ah_mac_srev >= AR5K_SREV_AR2413)
+			ath5k_hw_reg_write(ah, 0x00000004, 0x8120);
+	}
+
+	/*
+	 * Disable beacons and reset the register
+	 */
+	AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE |
+			AR5K_BEACON_RESET_TSF);
+
+	return 0;
+}
+
+#undef _ATH5K_RESET
diff --git a/drivers/net/wireless/ath9k/Kconfig b/drivers/net/wireless/ath9k/Kconfig
index 9e19dcc..80a6924 100644
--- a/drivers/net/wireless/ath9k/Kconfig
+++ b/drivers/net/wireless/ath9k/Kconfig
@@ -1,6 +1,9 @@
 config ATH9K
 	tristate "Atheros 802.11n wireless cards support"
 	depends on PCI && MAC80211 && WLAN_80211
+	select MAC80211_LEDS
+	select LEDS_CLASS
+	select NEW_LEDS
 	---help---
 	  This module adds support for wireless adapters based on
 	  Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets.
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
index d1b0fba..0e897c2 100644
--- a/drivers/net/wireless/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -144,6 +144,7 @@
 #define ATH9K_TXDESC_EXT_AND_CTL	0x0080
 #define ATH9K_TXDESC_VMF		0x0100
 #define ATH9K_TXDESC_FRAG_IS_ON 	0x0200
+#define ATH9K_TXDESC_CAB		0x0400
 
 #define ATH9K_RXDESC_INTREQ		0x0020
 
@@ -564,8 +565,6 @@
 #define CTL_5GHT40              8
 
 #define AR_EEPROM_MAC(i)        (0x1d+(i))
-#define EEP_SCALE       100
-#define EEP_DELTA       10
 
 #define AR_EEPROM_RFSILENT_GPIO_SEL     0x001c
 #define AR_EEPROM_RFSILENT_GPIO_SEL_S   2
@@ -606,9 +605,6 @@
 #define REG_CLR_BIT(_a, _r, _f) \
 	REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
 
-#define ATH9K_COMP_BUF_MAX_SIZE   9216
-#define ATH9K_COMP_BUF_ALIGN_SIZE 512
-
 #define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS   0x00000001
 
 #define INIT_AIFS       2
@@ -632,12 +628,6 @@
 				 (IEEE80211_WEP_IVLEN +		\
 				  IEEE80211_WEP_KIDLEN +	\
 				  IEEE80211_WEP_CRCLEN))
-#define IEEE80211_MAX_LEN       (2300 + FCS_LEN +		\
-				 (IEEE80211_WEP_IVLEN +		\
-				  IEEE80211_WEP_KIDLEN +	\
-				  IEEE80211_WEP_CRCLEN))
-
-#define MAX_REG_ADD_COUNT   129
 #define MAX_RATE_POWER 63
 
 enum ath9k_power_mode {
@@ -707,13 +697,6 @@
 };
 #define PHY_CCK PHY_DS
 
-enum start_adhoc_option {
-	START_ADHOC_NO_11A,
-	START_ADHOC_PER_11D,
-	START_ADHOC_IN_11A,
-	START_ADHOC_IN_11B,
-};
-
 enum ath9k_tp_scale {
 	ATH9K_TP_SCALE_MAX = 0,
 	ATH9K_TP_SCALE_50,
@@ -769,14 +752,11 @@
 
 #define ATH9K_RSSI_EP_MULTIPLIER  (1<<7)
 
-enum ath9k_gpio_output_mux_type {
-	ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT,
-	ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
-	ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
-	ATH9K_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
-	ATH9K_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
-	ATH9K_GPIO_OUTPUT_MUX_NUM_ENTRIES
-};
+#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT             0
+#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
+#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED     2
+#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED    5
+#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED      6
 
 enum {
 	ATH9K_RESET_POWER_ON,
@@ -790,19 +770,20 @@
 	u32 ah_magic;
 	u16 ah_devid;
 	u16 ah_subvendorid;
-	struct ath_softc *ah_sc;
-	void __iomem *ah_sh;
-	u16 ah_countryCode;
 	u32 ah_macVersion;
 	u16 ah_macRev;
 	u16 ah_phyRev;
 	u16 ah_analog5GhzRev;
 	u16 ah_analog2GhzRev;
-	u8 ah_decompMask[ATH9K_DECOMP_MASK_SIZE];
-	u32 ah_flags;
+
+	void __iomem *ah_sh;
+	struct ath_softc *ah_sc;
 	enum ath9k_opmode ah_opmode;
 	struct ath9k_ops_config ah_config;
 	struct ath9k_hw_capabilities ah_caps;
+
+	u16 ah_countryCode;
+	u32 ah_flags;
 	int16_t ah_powerLimit;
 	u16 ah_maxPowerLevel;
 	u32 ah_tpScale;
@@ -812,15 +793,17 @@
 	u16 ah_currentRD5G;
 	u16 ah_currentRD2G;
 	char ah_iso[4];
-	enum start_adhoc_option ah_adHocMode;
-	bool ah_commonMode;
+
 	struct ath9k_channel ah_channels[150];
-	u32 ah_nchan;
 	struct ath9k_channel *ah_curchan;
-	u16 ah_rfsilent;
-	bool ah_rfkillEnabled;
+	u32 ah_nchan;
+
 	bool ah_isPciExpress;
 	u16 ah_txTrigLevel;
+	u16 ah_rfsilent;
+	u32 ah_rfkill_gpio;
+	u32 ah_rfkill_polarity;
+
 #ifndef ATH_NF_PER_CHAN
 	struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
 #endif
@@ -853,7 +836,7 @@
 u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
 enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah,
 				     enum ath9k_int ints);
-bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
+bool ath9k_hw_reset(struct ath_hal *ah,
 		    struct ath9k_channel *chan,
 		    enum ath9k_ht_macmode macmode,
 		    u8 txchainmask, u8 rxchainmask,
@@ -1018,4 +1001,9 @@
 bool ath9k_get_channel_edges(struct ath_hal *ah,
 			     u16 flags, u16 *low,
 			     u16 *high);
+void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
+			u32 ah_signal_type);
+void ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, u32 value);
+u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio);
+void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio);
 #endif
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index 00a0eaa..eedb465 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -16,7 +16,6 @@
 
  /* Implementation of beacon processing. */
 
-#include <asm/unaligned.h>
 #include "core.h"
 
 /*
@@ -26,14 +25,13 @@
  *  the operating mode of the station (AP or AdHoc).  Parameters are AIFS
  *  settings and channel width min/max
 */
-
 static int ath_beaconq_config(struct ath_softc *sc)
 {
 	struct ath_hal *ah = sc->sc_ah;
 	struct ath9k_tx_queue_info qi;
 
 	ath9k_hw_get_txq_props(ah, sc->sc_bhalq, &qi);
-	if (sc->sc_opmode == ATH9K_M_HOSTAP) {
+	if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) {
 		/* Always burst out beacon and CAB traffic. */
 		qi.tqi_aifs = 1;
 		qi.tqi_cwmin = 0;
@@ -63,19 +61,18 @@
  *  up all required antenna switch parameters, rate codes, and channel flags.
  *  Beacons are always sent out at the lowest rate, and are not retried.
 */
-
 static void ath_beacon_setup(struct ath_softc *sc,
-	struct ath_vap *avp, struct ath_buf *bf)
+			     struct ath_vap *avp, struct ath_buf *bf)
 {
 	struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
 	struct ath_hal *ah = sc->sc_ah;
 	struct ath_desc *ds;
-	int flags, antenna;
+	struct ath9k_11n_rate_series series[4];
 	const struct ath9k_rate_table *rt;
+	int flags, antenna;
 	u8 rix, rate;
 	int ctsrate = 0;
 	int ctsduration = 0;
-	struct ath9k_11n_rate_series  series[4];
 
 	DPRINTF(sc, ATH_DBG_BEACON, "%s: m %p len %u\n",
 		__func__, skb, skb->len);
@@ -85,7 +82,7 @@
 
 	flags = ATH9K_TXDESC_NOACK;
 
-	if (sc->sc_opmode == ATH9K_M_IBSS &&
+	if (sc->sc_ah->ah_opmode == ATH9K_M_IBSS &&
 	    (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
 		ds->ds_link = bf->bf_daddr; /* self-linked */
 		flags |= ATH9K_TXDESC_VEOL;
@@ -111,24 +108,25 @@
 	rix = 0;
 	rt = sc->sc_currates;
 	rate = rt->info[rix].rateCode;
-	if (sc->sc_flags & ATH_PREAMBLE_SHORT)
+	if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
 		rate |= rt->info[rix].shortPreamble;
 
-	ath9k_hw_set11n_txdesc(ah, ds
-			      , skb->len + FCS_LEN /* frame length */
-			      , ATH9K_PKT_TYPE_BEACON /* Atheros packet type */
-			      , avp->av_btxctl.txpower /* txpower XXX */
-			      , ATH9K_TXKEYIX_INVALID /* no encryption */
-			      , ATH9K_KEY_TYPE_CLEAR /* no encryption */
-			      , flags /* no ack, veol for beacons */
+	ath9k_hw_set11n_txdesc(ah, ds,
+			       skb->len + FCS_LEN,     /* frame length */
+			       ATH9K_PKT_TYPE_BEACON,  /* Atheros packet type */
+			       avp->av_btxctl.txpower, /* txpower XXX */
+			       ATH9K_TXKEYIX_INVALID,  /* no encryption */
+			       ATH9K_KEY_TYPE_CLEAR,   /* no encryption */
+			       flags                   /* no ack,
+							  veol for beacons */
 		);
 
 	/* NB: beacon's BufLen must be a multiple of 4 bytes */
-	ath9k_hw_filltxdesc(ah, ds
-			   , roundup(skb->len, 4) /* buffer length */
-			   , true /* first segment */
-			   , true /* last segment */
-			   , ds /* first descriptor */
+	ath9k_hw_filltxdesc(ah, ds,
+			    roundup(skb->len, 4), /* buffer length */
+			    true,                 /* first segment */
+			    true,                 /* last segment */
+			    ds                    /* first descriptor */
 		);
 
 	memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
@@ -140,55 +138,6 @@
 		ctsrate, ctsduration, series, 4, 0);
 }
 
-/* Move everything from the vap's mcast queue to the hardware cab queue.
- * Caller must hold mcasq lock and cabq lock
- * XXX MORE_DATA bit?
- */
-static void empty_mcastq_into_cabq(struct ath_hal *ah,
-	struct ath_txq *mcastq, struct ath_txq *cabq)
-{
-	struct ath_buf *bfmcast;
-
-	BUG_ON(list_empty(&mcastq->axq_q));
-
-	bfmcast = list_first_entry(&mcastq->axq_q, struct ath_buf, list);
-
-	/* link the descriptors */
-	if (!cabq->axq_link)
-		ath9k_hw_puttxbuf(ah, cabq->axq_qnum, bfmcast->bf_daddr);
-	else
-		*cabq->axq_link = bfmcast->bf_daddr;
-
-	/* append the private vap mcast list to  the cabq */
-
-	cabq->axq_depth	+= mcastq->axq_depth;
-	cabq->axq_totalqueued += mcastq->axq_totalqueued;
-	cabq->axq_linkbuf = mcastq->axq_linkbuf;
-	cabq->axq_link = mcastq->axq_link;
-	list_splice_tail_init(&mcastq->axq_q, &cabq->axq_q);
-	mcastq->axq_depth = 0;
-	mcastq->axq_totalqueued = 0;
-	mcastq->axq_linkbuf = NULL;
-	mcastq->axq_link = NULL;
-}
-
-/* This is only run at DTIM. We move everything from the vap's mcast queue
- * to the hardware cab queue. Caller must hold the mcastq lock. */
-static void trigger_mcastq(struct ath_hal *ah,
-	struct ath_txq *mcastq, struct ath_txq *cabq)
-{
-	spin_lock_bh(&cabq->axq_lock);
-
-	if (!list_empty(&mcastq->axq_q))
-		empty_mcastq_into_cabq(ah, mcastq, cabq);
-
-	/* cabq is gated by beacon so it is safe to start here */
-	if (!list_empty(&cabq->axq_q))
-		ath9k_hw_txstart(ah, cabq->axq_qnum);
-
-	spin_unlock_bh(&cabq->axq_lock);
-}
-
 /*
  *  Generate beacon frame and queue cab data for a vap.
  *
@@ -199,39 +148,36 @@
 */
 static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
 {
-	struct ath_hal *ah = sc->sc_ah;
 	struct ath_buf *bf;
 	struct ath_vap *avp;
 	struct sk_buff *skb;
-	int cabq_depth;
-	int mcastq_depth;
-	int is_beacon_dtim = 0;
-	unsigned int curlen;
 	struct ath_txq *cabq;
-	struct ath_txq *mcastq;
 	struct ieee80211_tx_info *info;
+	int cabq_depth;
+
 	avp = sc->sc_vaps[if_id];
-
-	mcastq = &avp->av_mcastq;
-	cabq = sc->sc_cabq;
-
 	ASSERT(avp);
 
+	cabq = sc->sc_cabq;
+
 	if (avp->av_bcbuf == NULL) {
 		DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
 			__func__, avp, avp->av_bcbuf);
 		return NULL;
 	}
-	bf = avp->av_bcbuf;
-	skb = (struct sk_buff *) bf->bf_mpdu;
 
-	/*
-	 * Update dynamic beacon contents.  If this returns
-	 * non-zero then we need to remap the memory because
-	 * the beacon frame changed size (probably because
-	 * of the TIM bitmap).
-	 */
-	curlen = skb->len;
+	bf = avp->av_bcbuf;
+	skb = (struct sk_buff *)bf->bf_mpdu;
+	if (skb) {
+		pci_unmap_single(sc->pdev, bf->bf_dmacontext,
+				 skb_end_pointer(skb) - skb->head,
+				 PCI_DMA_TODEVICE);
+	}
+
+	skb = ieee80211_beacon_get(sc->hw, avp->av_if_data);
+	bf->bf_mpdu = skb;
+	if (skb == NULL)
+		return NULL;
 
 	info = IEEE80211_SKB_CB(skb);
 	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -239,29 +185,18 @@
 		 * TODO: make sure the seq# gets assigned properly (vs. other
 		 * TX frames)
 		 */
-		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 		sc->seq_no += 0x10;
 		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
 		hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
 	}
 
-	/* XXX: spin_lock_bh should not be used here, but sparse bitches
-	 * otherwise. We should fix sparse :) */
-	spin_lock_bh(&mcastq->axq_lock);
-	mcastq_depth = avp->av_mcastq.axq_depth;
+	bf->bf_buf_addr = bf->bf_dmacontext =
+		pci_map_single(sc->pdev, skb->data,
+			       skb_end_pointer(skb) - skb->head,
+			       PCI_DMA_TODEVICE);
 
-	if (ath_update_beacon(sc, if_id, &avp->av_boff, skb, mcastq_depth) ==
-	    1) {
-		ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
-				     get_dma_mem_context(bf, bf_dmacontext));
-		bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
-			get_dma_mem_context(bf, bf_dmacontext));
-	} else {
-		pci_dma_sync_single_for_cpu(sc->pdev,
-					    bf->bf_buf_addr,
-					    skb_tailroom(skb),
-					    PCI_DMA_TODEVICE);
-	}
+	skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data);
 
 	/*
 	 * if the CABQ traffic from previous DTIM is pending and the current
@@ -275,9 +210,7 @@
 	cabq_depth = cabq->axq_depth;
 	spin_unlock_bh(&cabq->axq_lock);
 
-	is_beacon_dtim = avp->av_boff.bo_tim[4] & 1;
-
-	if (mcastq_depth && is_beacon_dtim && cabq_depth) {
+	if (skb && cabq_depth) {
 		/*
 		 * Unlock the cabq lock as ath_tx_draintxq acquires
 		 * the lock again which is a common function and that
@@ -297,10 +230,11 @@
 	 * Enable the CAB queue before the beacon queue to
 	 * insure cab frames are triggered by this beacon.
 	 */
-	if (is_beacon_dtim)
-		trigger_mcastq(ah, mcastq, cabq);
+	while (skb) {
+		ath_tx_cabq(sc, skb);
+		skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data);
+	}
 
-	spin_unlock_bh(&mcastq->axq_lock);
 	return bf;
 }
 
@@ -308,7 +242,6 @@
  * Startup beacon transmission for adhoc mode when they are sent entirely
  * by the hardware using the self-linked descriptor + veol trick.
 */
-
 static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
 {
 	struct ath_hal *ah = sc->sc_ah;
@@ -345,7 +278,6 @@
  *  min/max, and enable aifs). The info structure does not need to be
  *  persistant.
 */
-
 int ath_beaconq_setup(struct ath_hal *ah)
 {
 	struct ath9k_tx_queue_info qi;
@@ -366,29 +298,27 @@
  *  the ATH interface.  This routine also calculates the beacon "slot" for
  *  staggared beacons in the mBSSID case.
 */
-
 int ath_beacon_alloc(struct ath_softc *sc, int if_id)
 {
 	struct ath_vap *avp;
-	struct ieee80211_hdr *wh;
+	struct ieee80211_hdr *hdr;
 	struct ath_buf *bf;
 	struct sk_buff *skb;
+	__le64 tstamp;
 
 	avp = sc->sc_vaps[if_id];
 	ASSERT(avp);
 
 	/* Allocate a beacon descriptor if we haven't done so. */
 	if (!avp->av_bcbuf) {
-		/*
-		 * Allocate beacon state for hostap/ibss.  We know
-		 * a buffer is available.
-		 */
+		/* Allocate beacon state for hostap/ibss.  We know
+		 * a buffer is available. */
 
 		avp->av_bcbuf = list_first_entry(&sc->sc_bbuf,
-				struct ath_buf, list);
+						 struct ath_buf, list);
 		list_del(&avp->av_bcbuf->list);
 
-		if (sc->sc_opmode == ATH9K_M_HOSTAP ||
+		if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP ||
 		    !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
 			int slot;
 			/*
@@ -421,17 +351,16 @@
 	bf = avp->av_bcbuf;
 	if (bf->bf_mpdu != NULL) {
 		skb = (struct sk_buff *)bf->bf_mpdu;
-		ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
-				     get_dma_mem_context(bf, bf_dmacontext));
+		pci_unmap_single(sc->pdev, bf->bf_dmacontext,
+				 skb_end_pointer(skb) - skb->head,
+				 PCI_DMA_TODEVICE);
 		dev_kfree_skb_any(skb);
 		bf->bf_mpdu = NULL;
 	}
 
 	/*
-	 * NB: the beacon data buffer must be 32-bit aligned;
-	 * we assume the wbuf routines will return us something
-	 * with this alignment (perhaps should assert).
-	 * FIXME: Fill avp->av_boff.bo_tim,avp->av_btxctl.txpower and
+	 * NB: the beacon data buffer must be 32-bit aligned.
+	 * FIXME: Fill avp->av_btxctl.txpower and
 	 * avp->av_btxctl.shortPreamble
 	 */
 	skb = ieee80211_beacon_get(sc->hw, avp->av_if_data);
@@ -441,6 +370,9 @@
 		return -ENOMEM;
 	}
 
+	tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+	sc->bc_tstamp = le64_to_cpu(tstamp);
+
 	/*
 	 * Calculate a TSF adjustment factor required for
 	 * staggered beacons.  Note that we assume the format
@@ -452,9 +384,8 @@
 		__le64 val;
 		int intval;
 
-		/* FIXME: Use default value for now: Sujith */
-
-		intval = ATH_DEFAULT_BINTVAL;
+		intval = sc->hw->conf.beacon_int ?
+			sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
 
 		/*
 		 * The beacon interval is in TU's; the TSF in usecs.
@@ -475,12 +406,14 @@
 			__func__, "stagger",
 			avp->av_bslot, intval, (unsigned long long)tsfadjust);
 
-		wh = (struct ieee80211_hdr *)skb->data;
-		memcpy(&wh[1], &val, sizeof(val));
+		hdr = (struct ieee80211_hdr *)skb->data;
+		memcpy(&hdr[1], &val, sizeof(val));
 	}
 
-	bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
-		get_dma_mem_context(bf, bf_dmacontext));
+	bf->bf_buf_addr = bf->bf_dmacontext =
+		pci_map_single(sc->pdev, skb->data,
+			       skb_end_pointer(skb) - skb->head,
+			       PCI_DMA_TODEVICE);
 	bf->bf_mpdu = skb;
 
 	return 0;
@@ -490,9 +423,8 @@
  *  Reclaim beacon resources and return buffer to the pool.
  *
  *  Checks the VAP to put the beacon frame buffer back to the ATH object
- *  queue, and de-allocates any wbuf frames that were sent as CAB traffic.
+ *  queue, and de-allocates any skbs that were sent as CAB traffic.
 */
-
 void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
 {
 	if (avp->av_bcbuf != NULL) {
@@ -506,8 +438,9 @@
 		bf = avp->av_bcbuf;
 		if (bf->bf_mpdu != NULL) {
 			struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
-			ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
-				get_dma_mem_context(bf, bf_dmacontext));
+			pci_unmap_single(sc->pdev, bf->bf_dmacontext,
+					 skb_end_pointer(skb) - skb->head,
+					 PCI_DMA_TODEVICE);
 			dev_kfree_skb_any(skb);
 			bf->bf_mpdu = NULL;
 		}
@@ -518,44 +451,14 @@
 }
 
 /*
- *  Reclaim beacon resources and return buffer to the pool.
- *
- *  This function will free any wbuf frames that are still attached to the
- *  beacon buffers in the ATH object.  Note that this does not de-allocate
- *  any wbuf objects that are in the transmit queue and have not yet returned
- *  to the ATH object.
-*/
-
-void ath_beacon_free(struct ath_softc *sc)
-{
-	struct ath_buf *bf;
-
-	list_for_each_entry(bf, &sc->sc_bbuf, list) {
-		if (bf->bf_mpdu != NULL) {
-			struct sk_buff *skb = (struct sk_buff *) bf->bf_mpdu;
-			ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
-				get_dma_mem_context(bf, bf_dmacontext));
-			dev_kfree_skb_any(skb);
-			bf->bf_mpdu = NULL;
-		}
-	}
-}
-
-/*
  * Tasklet for Sending Beacons
  *
  * Transmit one or more beacon frames at SWBA.  Dynamic updates to the frame
  * contents are done as needed and the slot time is also adjusted based on
  * current state.
- *
- * This tasklet is not scheduled, it's called in ISR context.
 */
-
 void ath9k_beacon_tasklet(unsigned long data)
 {
-#define TSF_TO_TU(_h,_l)					\
-	((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
-
 	struct ath_softc *sc = (struct ath_softc *)data;
 	struct ath_hal *ah = sc->sc_ah;
 	struct ath_buf *bf = NULL;
@@ -568,7 +471,7 @@
 	u32 tsftu;
 	u16 intval;
 
-	if (sc->sc_noreset) {
+	if (sc->sc_flags & SC_OP_NO_RESET) {
 		show_cycles = ath9k_hw_GetMibCycleCountsPct(ah,
 							    &rx_clear,
 							    &rx_frame,
@@ -581,6 +484,8 @@
 	 * and wait for the next.  Missed beacons indicate
 	 * a problem and should not occur.  If we miss too
 	 * many consecutive beacons reset the device.
+	 *
+	 * FIXME: Clean up this mess !!
 	 */
 	if (ath9k_hw_numtxpending(ah, sc->sc_bhalq) != 0) {
 		sc->sc_bmisscount++;
@@ -590,25 +495,22 @@
 		 *      (in that layer).
 		 */
 		if (sc->sc_bmisscount < BSTUCK_THRESH) {
-			if (sc->sc_noreset) {
+			if (sc->sc_flags & SC_OP_NO_RESET) {
 				DPRINTF(sc, ATH_DBG_BEACON,
 					"%s: missed %u consecutive beacons\n",
 					__func__, sc->sc_bmisscount);
 				if (show_cycles) {
 					/*
-					 * Display cycle counter stats
-					 * from HW to aide in debug of
-					 * stickiness.
+					 * Display cycle counter stats from HW
+					 * to aide in debug of stickiness.
 					 */
-					DPRINTF(sc,
-						ATH_DBG_BEACON,
+					DPRINTF(sc, ATH_DBG_BEACON,
 						"%s: busy times: rx_clear=%d, "
 						"rx_frame=%d, tx_frame=%d\n",
 						__func__, rx_clear, rx_frame,
 						tx_frame);
 				} else {
-					DPRINTF(sc,
-						ATH_DBG_BEACON,
+					DPRINTF(sc, ATH_DBG_BEACON,
 						"%s: unable to obtain "
 						"busy times\n", __func__);
 				}
@@ -618,10 +520,9 @@
 					__func__, sc->sc_bmisscount);
 			}
 		} else if (sc->sc_bmisscount >= BSTUCK_THRESH) {
-			if (sc->sc_noreset) {
+			if (sc->sc_flags & SC_OP_NO_RESET) {
 				if (sc->sc_bmisscount == BSTUCK_THRESH) {
-					DPRINTF(sc,
-						ATH_DBG_BEACON,
+					DPRINTF(sc, ATH_DBG_BEACON,
 						"%s: beacon is officially "
 						"stuck\n", __func__);
 					ath9k_hw_dmaRegDump(ah);
@@ -633,13 +534,12 @@
 				ath_bstuck_process(sc);
 			}
 		}
-
 		return;
 	}
+
 	if (sc->sc_bmisscount != 0) {
-		if (sc->sc_noreset) {
-			DPRINTF(sc,
-				ATH_DBG_BEACON,
+		if (sc->sc_flags & SC_OP_NO_RESET) {
+			DPRINTF(sc, ATH_DBG_BEACON,
 				"%s: resume beacon xmit after %u misses\n",
 				__func__, sc->sc_bmisscount);
 		} else {
@@ -656,17 +556,19 @@
 	 * on the tsf to safeguard against missing an swba.
 	 */
 
-	/* FIXME: Use default value for now - Sujith */
-	intval = ATH_DEFAULT_BINTVAL;
+	intval = sc->hw->conf.beacon_int ?
+		sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
 
 	tsf = ath9k_hw_gettsf64(ah);
 	tsftu = TSF_TO_TU(tsf>>32, tsf);
 	slot = ((tsftu % intval) * ATH_BCBUF) / intval;
 	if_id = sc->sc_bslot[(slot + 1) % ATH_BCBUF];
+
 	DPRINTF(sc, ATH_DBG_BEACON,
-			"%s: slot %d [tsf %llu tsftu %u intval %u] if_id %d\n",
-			__func__, slot, (unsigned long long) tsf, tsftu,
-			intval, if_id);
+		"%s: slot %d [tsf %llu tsftu %u intval %u] if_id %d\n",
+		__func__, slot, (unsigned long long)tsf, tsftu,
+		intval, if_id);
+
 	bfaddr = 0;
 	if (if_id != ATH_IF_ID_ANY) {
 		bf = ath_beacon_generate(sc, if_id);
@@ -717,22 +619,20 @@
 
 		sc->ast_be_xmit += bc;     /* XXX per-vap? */
 	}
-#undef TSF_TO_TU
 }
 
 /*
  *  Tasklet for Beacon Stuck processing
  *
  *  Processing for Beacon Stuck.
- *  Basically calls the ath_internal_reset function to reset the chip.
+ *  Basically resets the chip.
 */
-
 void ath_bstuck_process(struct ath_softc *sc)
 {
 	DPRINTF(sc, ATH_DBG_BEACON,
 		"%s: stuck beacon; resetting (bmiss count %u)\n",
 		__func__, sc->sc_bmisscount);
-	ath_internal_reset(sc);
+	ath_reset(sc, false);
 }
 
 /*
@@ -750,40 +650,32 @@
  * interrupt when we stop seeing beacons from the AP
  * we've associated with.
  */
-
 void ath_beacon_config(struct ath_softc *sc, int if_id)
 {
-#define TSF_TO_TU(_h,_l)					\
-	((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
 	struct ath_hal *ah = sc->sc_ah;
-	u32 nexttbtt, intval;
 	struct ath_beacon_config conf;
 	enum ath9k_opmode av_opmode;
+	u32 nexttbtt, intval;
 
 	if (if_id != ATH_IF_ID_ANY)
 		av_opmode = sc->sc_vaps[if_id]->av_opmode;
 	else
-		av_opmode = sc->sc_opmode;
+		av_opmode = sc->sc_ah->ah_opmode;
 
 	memzero(&conf, sizeof(struct ath_beacon_config));
 
-	/* FIXME: Use default values for now - Sujith */
-	/* Query beacon configuration first */
-	/*
-	 * Protocol stack doesn't support dynamic beacon configuration,
-	 * use default configurations.
-	 */
-	conf.beacon_interval = ATH_DEFAULT_BINTVAL;
+	conf.beacon_interval = sc->hw->conf.beacon_int ?
+		sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
 	conf.listen_interval = 1;
 	conf.dtim_period = conf.beacon_interval;
 	conf.dtim_count = 1;
 	conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval;
 
 	/* extract tstamp from last beacon and convert to TU */
-	nexttbtt = TSF_TO_TU(get_unaligned_le32(conf.u.last_tstamp + 4),
-			     get_unaligned_le32(conf.u.last_tstamp));
+	nexttbtt = TSF_TO_TU(sc->bc_tstamp >> 32, sc->bc_tstamp);
+
 	/* XXX conditionalize multi-bss support? */
-	if (sc->sc_opmode == ATH9K_M_HOSTAP) {
+	if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) {
 		/*
 		 * For multi-bss ap support beacons are either staggered
 		 * evenly over N slots or burst together.  For the former
@@ -797,14 +689,16 @@
 		intval = conf.beacon_interval & ATH9K_BEACON_PERIOD;
 	}
 
-	if (nexttbtt == 0)      /* e.g. for ap mode */
+	if (nexttbtt == 0)	/* e.g. for ap mode */
 		nexttbtt = intval;
-	else if (intval)        /* NB: can be 0 for monitor mode */
+	else if (intval)	/* NB: can be 0 for monitor mode */
 		nexttbtt = roundup(nexttbtt, intval);
+
 	DPRINTF(sc, ATH_DBG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
 		__func__, nexttbtt, intval, conf.beacon_interval);
+
 	/* Check for ATH9K_M_HOSTAP and sc_nostabeacons for WDS client */
-	if (sc->sc_opmode == ATH9K_M_STA) {
+	if (sc->sc_ah->ah_opmode == ATH9K_M_STA) {
 		struct ath9k_beacon_state bs;
 		u64 tsf;
 		u32 tsftu;
@@ -816,19 +710,19 @@
 		 * last beacon we received (which may be none).
 		 */
 		dtimperiod = conf.dtim_period;
-		if (dtimperiod <= 0)        /* NB: 0 if not known */
+		if (dtimperiod <= 0)		/* NB: 0 if not known */
 			dtimperiod = 1;
 		dtimcount = conf.dtim_count;
-		if (dtimcount >= dtimperiod)    /* NB: sanity check */
-			dtimcount = 0;      /* XXX? */
-		cfpperiod = 1;          /* NB: no PCF support yet */
+		if (dtimcount >= dtimperiod)	/* NB: sanity check */
+			dtimcount = 0;
+		cfpperiod = 1;			/* NB: no PCF support yet */
 		cfpcount = 0;
 
 		sleepduration = conf.listen_interval * intval;
 		if (sleepduration <= 0)
 			sleepduration = intval;
 
-#define FUDGE   2
+#define FUDGE 2
 		/*
 		 * Pull nexttbtt forward to reflect the current
 		 * TSF and calculate dtim+cfp state for the result.
@@ -852,6 +746,7 @@
 		bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
 		bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
 		bs.bs_cfpmaxduration = 0;
+
 		/*
 		 * Calculate the number of consecutive beacons to miss
 		 * before taking a BMISS interrupt.  The configuration
@@ -860,9 +755,8 @@
 		 * result to at most 15 beacons.
 		 */
 		if (sleepduration > intval) {
-			bs.bs_bmissthreshold =
-				conf.listen_interval *
-					ATH_DEFAULT_BMISS_LIMIT / 2;
+			bs.bs_bmissthreshold = conf.listen_interval *
+				ATH_DEFAULT_BMISS_LIMIT / 2;
 		} else {
 			bs.bs_bmissthreshold =
 				DIV_ROUND_UP(conf.bmiss_timeout, intval);
@@ -882,8 +776,8 @@
 		 * XXX fixed at 100ms
 		 */
 
-		bs.bs_sleepduration =
-			roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+		bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100),
+					      sleepduration);
 		if (bs.bs_sleepduration > bs.bs_dtimperiod)
 			bs.bs_sleepduration = bs.bs_dtimperiod;
 
@@ -899,19 +793,19 @@
 			"cfp:period %u "
 			"maxdur %u "
 			"next %u "
-			"timoffset %u\n"
-			, __func__
-			, (unsigned long long)tsf, tsftu
-			, bs.bs_intval
-			, bs.bs_nexttbtt
-			, bs.bs_dtimperiod
-			, bs.bs_nextdtim
-			, bs.bs_bmissthreshold
-			, bs.bs_sleepduration
-			, bs.bs_cfpperiod
-			, bs.bs_cfpmaxduration
-			, bs.bs_cfpnext
-			, bs.bs_timoffset
+			"timoffset %u\n",
+			__func__,
+			(unsigned long long)tsf, tsftu,
+			bs.bs_intval,
+			bs.bs_nexttbtt,
+			bs.bs_dtimperiod,
+			bs.bs_nextdtim,
+			bs.bs_bmissthreshold,
+			bs.bs_sleepduration,
+			bs.bs_cfpperiod,
+			bs.bs_cfpmaxduration,
+			bs.bs_cfpnext,
+			bs.bs_timoffset
 			);
 
 		ath9k_hw_set_interrupts(ah, 0);
@@ -924,12 +818,12 @@
 		ath9k_hw_set_interrupts(ah, 0);
 		if (nexttbtt == intval)
 			intval |= ATH9K_BEACON_RESET_TSF;
-		if (sc->sc_opmode == ATH9K_M_IBSS) {
+		if (sc->sc_ah->ah_opmode == ATH9K_M_IBSS) {
 			/*
 			 * Pull nexttbtt forward to reflect the current
-			 * TSF .
+			 * TSF
 			 */
-#define FUDGE   2
+#define FUDGE 2
 			if (!(intval & ATH9K_BEACON_RESET_TSF)) {
 				tsf = ath9k_hw_gettsf64(ah);
 				tsftu = TSF_TO_TU((u32)(tsf>>32),
@@ -956,7 +850,7 @@
 			if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
 				sc->sc_imask |= ATH9K_INT_SWBA;
 			ath_beaconq_config(sc);
-		} else if (sc->sc_opmode == ATH9K_M_HOSTAP) {
+		} else if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) {
 			/*
 			 * In AP mode we enable the beacon timers and
 			 * SWBA interrupts to prepare beacon frames.
@@ -972,11 +866,10 @@
 		 * When using a self-linked beacon descriptor in
 		 * ibss mode load it once here.
 		 */
-		if (sc->sc_opmode == ATH9K_M_IBSS &&
+		if (sc->sc_ah->ah_opmode == ATH9K_M_IBSS &&
 		    (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
 			ath_beacon_start_adhoc(sc, 0);
 	}
-#undef TSF_TO_TU
 }
 
 /* Function to collect beacon rssi data and resync beacon if necessary */
@@ -988,5 +881,5 @@
 	 * beacon frame we just received.
 	 */
 	ath_beacon_config(sc, if_id);
-	sc->sc_beacons = 1;
+	sc->sc_flags |= SC_OP_BEACONS;
 }
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
index 87e37bc..6c433a4 100644
--- a/drivers/net/wireless/ath9k/core.c
+++ b/drivers/net/wireless/ath9k/core.c
@@ -21,9 +21,6 @@
 
 static int ath_outdoor;		/* enable outdoor use */
 
-static const u8 ath_bcast_mac[ETH_ALEN] =
-    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
 static u32 ath_chainmask_sel_up_rssi_thres =
 	ATH_CHAINMASK_SEL_UP_RSSI_THRES;
 static u32 ath_chainmask_sel_down_rssi_thres =
@@ -54,10 +51,8 @@
  *  Set current operating mode
  *
  *  This function initializes and fills the rate table in the ATH object based
- *  on the operating mode.  The blink rates are also set up here, although
- *  they have been superceeded by the ath_led module.
+ *  on the operating mode.
 */
-
 static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
 {
 	const struct ath9k_rate_table *rt;
@@ -235,7 +230,7 @@
  *  Determine mode from channel flags
  *
  *  This routine will provide the enumerated WIRELESSS_MODE value based
- *  on the settings of the channel flags.  If ho valid set of flags
+ *  on the settings of the channel flags.  If no valid set of flags
  *  exist, the lowest mode (11b) is selected.
 */
 
@@ -260,7 +255,8 @@
 	else if (chan->chanmode == CHANNEL_G_HT40MINUS)
 		return ATH9K_MODE_11NG_HT40MINUS;
 
-	/* NB: should not get here */
+	WARN_ON(1); /* should not get here */
+
 	return ATH9K_MODE_11B;
 }
 
@@ -275,14 +271,12 @@
 {
 	struct ath_hal *ah = sc->sc_ah;
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %u\n",
-		__func__, sc->sc_invalid);
+	DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %ld\n",
+		__func__, sc->sc_flags & SC_OP_INVALID);
 
 	/*
 	 * Shutdown the hardware and driver:
 	 *    stop output from above
-	 *    reset 802.11 state machine
-	 *      (sends station deassoc/deauth frames)
 	 *    turn off timers
 	 *    disable interrupts
 	 *    clear transmit machinery
@@ -294,8 +288,10 @@
 	 * hardware is gone (invalid).
 	 */
 
+	if (!(sc->sc_flags & SC_OP_INVALID))
+		ath9k_hw_set_interrupts(ah, 0);
 	ath_draintxq(sc, false);
-	if (!sc->sc_invalid) {
+	if (!(sc->sc_flags & SC_OP_INVALID)) {
 		ath_stoprecv(sc);
 		ath9k_hw_phy_disable(ah);
 	} else
@@ -305,56 +301,6 @@
 }
 
 /*
- *  Start Scan
- *
- *  This function is called when starting a channel scan.  It will perform
- *  power save wakeup processing, set the filter for the scan, and get the
- *  chip ready to send broadcast packets out during the scan.
-*/
-
-void ath_scan_start(struct ath_softc *sc)
-{
-	struct ath_hal *ah = sc->sc_ah;
-	u32 rfilt;
-	u32 now = (u32) jiffies_to_msecs(get_timestamp());
-
-	sc->sc_scanning = 1;
-	rfilt = ath_calcrxfilter(sc);
-	ath9k_hw_setrxfilter(ah, rfilt);
-	ath9k_hw_write_associd(ah, ath_bcast_mac, 0);
-
-	/* Restore previous power management state. */
-
-	DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n",
-		now / 1000, now % 1000, __func__, rfilt);
-}
-
-/*
- *  Scan End
- *
- *  This routine is called by the upper layer when the scan is completed.  This
- *  will set the filters back to normal operating mode, set the BSSID to the
- *  correct value, and restore the power save state.
-*/
-
-void ath_scan_end(struct ath_softc *sc)
-{
-	struct ath_hal *ah = sc->sc_ah;
-	u32 rfilt;
-	u32 now = (u32) jiffies_to_msecs(get_timestamp());
-
-	sc->sc_scanning = 0;
-	/* Request for a full reset due to rx packet filter changes */
-	sc->sc_full_reset = 1;
-	rfilt = ath_calcrxfilter(sc);
-	ath9k_hw_setrxfilter(ah, rfilt);
-	ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
-
-	DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n",
-		now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid);
-}
-
-/*
  * Set the current channel
  *
  * Set/change channels.  If the channel is really being changed, it's done
@@ -365,25 +311,23 @@
 {
 	struct ath_hal *ah = sc->sc_ah;
 	bool fastcc = true, stopped;
-	enum ath9k_ht_macmode ht_macmode;
 
-	if (sc->sc_invalid)	/* if the device is invalid or removed */
+	if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
 		return -EIO;
 
 	DPRINTF(sc, ATH_DBG_CONFIG,
 		"%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
 		__func__,
-		ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel,
-				  sc->sc_curchan.channelFlags),
-		sc->sc_curchan.channel,
+		ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
+				  sc->sc_ah->ah_curchan->channelFlags),
+		sc->sc_ah->ah_curchan->channel,
 		ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
 		hchan->channel, hchan->channelFlags);
 
-	ht_macmode = ath_cwm_macmode(sc);
-
-	if (hchan->channel != sc->sc_curchan.channel ||
-	    hchan->channelFlags != sc->sc_curchan.channelFlags ||
-	    sc->sc_update_chainmask || sc->sc_full_reset) {
+	if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
+	    hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
+	    (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
+	    (sc->sc_flags & SC_OP_FULL_RESET)) {
 		int status;
 		/*
 		 * This is only performed if the channel settings have
@@ -402,15 +346,16 @@
 		 * to flush data frames already in queue because of
 		 * changing channel. */
 
-		if (!stopped || sc->sc_full_reset)
+		if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
 			fastcc = false;
 
 		spin_lock_bh(&sc->sc_resetlock);
-		if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan,
-					ht_macmode, sc->sc_tx_chainmask,
-					sc->sc_rx_chainmask,
-					sc->sc_ht_extprotspacing,
-					fastcc, &status)) {
+		if (!ath9k_hw_reset(ah, hchan,
+				    sc->sc_ht_info.tx_chan_width,
+				    sc->sc_tx_chainmask,
+				    sc->sc_rx_chainmask,
+				    sc->sc_ht_extprotspacing,
+				    fastcc, &status)) {
 			DPRINTF(sc, ATH_DBG_FATAL,
 				"%s: unable to reset channel %u (%uMhz) "
 				"flags 0x%x hal status %u\n", __func__,
@@ -422,9 +367,8 @@
 		}
 		spin_unlock_bh(&sc->sc_resetlock);
 
-		sc->sc_curchan = *hchan;
-		sc->sc_update_chainmask = 0;
-		sc->sc_full_reset = 0;
+		sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
+		sc->sc_flags &= ~SC_OP_FULL_RESET;
 
 		/* Re-enable rx framework */
 		if (ath_startrecv(sc) != 0) {
@@ -535,7 +479,7 @@
 
 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
 {
-	sc->sc_update_chainmask = 1;
+	sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
 	if (is_ht) {
 		sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
 		sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
@@ -552,62 +496,6 @@
 /* VAP management */
 /******************/
 
-/*
- *  VAP in Listen mode
- *
- *  This routine brings the VAP out of the down state into a "listen" state
- *  where it waits for association requests.  This is used in AP and AdHoc
- *  modes.
-*/
-
-int ath_vap_listen(struct ath_softc *sc, int if_id)
-{
-	struct ath_hal *ah = sc->sc_ah;
-	struct ath_vap *avp;
-	u32 rfilt = 0;
-	DECLARE_MAC_BUF(mac);
-
-	avp = sc->sc_vaps[if_id];
-	if (avp == NULL) {
-		DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
-			__func__, if_id);
-		return -EINVAL;
-	}
-
-#ifdef CONFIG_SLOW_ANT_DIV
-	ath_slow_ant_div_stop(&sc->sc_antdiv);
-#endif
-
-	/* update ratectrl about the new state */
-	ath_rate_newstate(sc, avp);
-
-	rfilt = ath_calcrxfilter(sc);
-	ath9k_hw_setrxfilter(ah, rfilt);
-
-	if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS) {
-		memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN);
-		ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
-	} else
-		sc->sc_curaid = 0;
-
-	DPRINTF(sc, ATH_DBG_CONFIG,
-		"%s: RX filter 0x%x bssid %s aid 0x%x\n",
-		__func__, rfilt, print_mac(mac,
-			sc->sc_curbssid), sc->sc_curaid);
-
-	/*
-	 * XXXX
-	 * Disable BMISS interrupt when we're not associated
-	 */
-	ath9k_hw_set_interrupts(ah,
-		sc->sc_imask & ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
-	sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
-	/* need to reconfigure the beacons when it moves to RUN */
-	sc->sc_beacons = 0;
-
-	return 0;
-}
-
 int ath_vap_attach(struct ath_softc *sc,
 		   int if_id,
 		   struct ieee80211_vif *if_data,
@@ -645,16 +533,14 @@
 	/* Set the VAP opmode */
 	avp->av_opmode = opmode;
 	avp->av_bslot = -1;
-	INIT_LIST_HEAD(&avp->av_mcastq.axq_q);
-	INIT_LIST_HEAD(&avp->av_mcastq.axq_acq);
-	spin_lock_init(&avp->av_mcastq.axq_lock);
 
-	ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
+	if (opmode == ATH9K_M_HOSTAP)
+		ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
 
 	sc->sc_vaps[if_id] = avp;
 	sc->sc_nvaps++;
 	/* Set the device opmode */
-	sc->sc_opmode = opmode;
+	sc->sc_ah->ah_opmode = opmode;
 
 	/* default VAP configuration */
 	avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
@@ -687,9 +573,6 @@
 	ath_stoprecv(sc);	/* stop recv side */
 	ath_flushrecv(sc);	/* flush recv queue */
 
-	/* Reclaim any pending mcast bufs on the vap. */
-	ath_tx_draintxq(sc, &avp->av_mcastq, false);
-
 	kfree(avp);
 	sc->sc_vaps[if_id] = NULL;
 	sc->sc_nvaps--;
@@ -726,9 +609,9 @@
 	struct ath_hal *ah = sc->sc_ah;
 	int status;
 	int error = 0;
-	enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
 
-	DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode);
+	DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
+		__func__, sc->sc_ah->ah_opmode);
 
 	/*
 	 * Stop anything previously setup.  This is safe
@@ -750,16 +633,16 @@
 	 * be followed by initialization of the appropriate bits
 	 * and then setup of the interrupt mask.
 	 */
-	sc->sc_curchan = *initial_chan;
 
 	spin_lock_bh(&sc->sc_resetlock);
-	if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode,
-			   sc->sc_tx_chainmask, sc->sc_rx_chainmask,
-			   sc->sc_ht_extprotspacing, false, &status)) {
+	if (!ath9k_hw_reset(ah, initial_chan,
+			    sc->sc_ht_info.tx_chan_width,
+			    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
+			    sc->sc_ht_extprotspacing, false, &status)) {
 		DPRINTF(sc, ATH_DBG_FATAL,
 			"%s: unable to reset hardware; hal status %u "
 			"(freq %u flags 0x%x)\n", __func__, status,
-			sc->sc_curchan.channel, sc->sc_curchan.channelFlags);
+			initial_chan->channel, initial_chan->channelFlags);
 		error = -EIO;
 		spin_unlock_bh(&sc->sc_resetlock);
 		goto done;
@@ -806,7 +689,8 @@
 	 * Note we only do this (at the moment) for station mode.
 	 */
 	if (ath9k_hw_phycounters(ah) &&
-	    ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS)))
+	    ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
+	     (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
 		sc->sc_imask |= ATH9K_INT_MIB;
 #endif
 	/*
@@ -816,7 +700,7 @@
 	 * enable the TIM interrupt when operating as station.
 	 */
 	if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
-	    (sc->sc_opmode == ATH9K_M_STA) &&
+	    (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
 	    !sc->sc_config.swBeaconProcess)
 		sc->sc_imask |= ATH9K_INT_TIM;
 	/*
@@ -828,34 +712,34 @@
 
 	/* XXX: we must make sure h/w is ready and clear invalid flag
 	 * before turning on interrupt. */
-	sc->sc_invalid = 0;
+	sc->sc_flags &= ~SC_OP_INVALID;
 done:
 	return error;
 }
 
-/*
- * Reset the hardware w/o losing operational state.  This is
- * basically a more efficient way of doing ath_stop, ath_init,
- * followed by state transitions to the current 802.11
- * operational state.  Used to recover from errors rx overrun
- * and to reset the hardware when rf gain settings must be reset.
- */
-
-static int ath_reset_start(struct ath_softc *sc, u32 flag)
+int ath_reset(struct ath_softc *sc, bool retry_tx)
 {
 	struct ath_hal *ah = sc->sc_ah;
+	int status;
+	int error = 0;
 
 	ath9k_hw_set_interrupts(ah, 0);	/* disable interrupts */
-	ath_draintxq(sc, flag & RESET_RETRY_TXQ);	/* stop xmit side */
-	ath_stoprecv(sc);	/* stop recv side */
-	ath_flushrecv(sc);	/* flush recv queue */
+	ath_draintxq(sc, retry_tx);	/* stop xmit */
+	ath_stoprecv(sc);		/* stop recv */
+	ath_flushrecv(sc);		/* flush recv queue */
 
-	return 0;
-}
-
-static int ath_reset_end(struct ath_softc *sc, u32 flag)
-{
-	struct ath_hal *ah = sc->sc_ah;
+	/* Reset chip */
+	spin_lock_bh(&sc->sc_resetlock);
+	if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
+			    sc->sc_ht_info.tx_chan_width,
+			    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
+			    sc->sc_ht_extprotspacing, false, &status)) {
+		DPRINTF(sc, ATH_DBG_FATAL,
+			"%s: unable to reset hardware; hal status %u\n",
+			__func__, status);
+		error = -EIO;
+	}
+	spin_unlock_bh(&sc->sc_resetlock);
 
 	if (ath_startrecv(sc) != 0)	/* restart recv */
 		DPRINTF(sc, ATH_DBG_FATAL,
@@ -866,16 +750,17 @@
 	 * that changes the channel so update any state that
 	 * might change as a result.
 	 */
-	ath_setcurmode(sc, ath_chan2mode(&sc->sc_curchan));
+	ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
 
-	ath_update_txpow(sc);	/* update tx power state */
+	ath_update_txpow(sc);
 
-	if (sc->sc_beacons)
+	if (sc->sc_flags & SC_OP_BEACONS)
 		ath_beacon_config(sc, ATH_IF_ID_ANY);	/* restart beacons */
+
 	ath9k_hw_set_interrupts(ah, sc->sc_imask);
 
 	/* Restart the txq */
-	if (flag & RESET_RETRY_TXQ) {
+	if (retry_tx) {
 		int i;
 		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
 			if (ATH_TXQ_SETUP(sc, i)) {
@@ -885,28 +770,6 @@
 			}
 		}
 	}
-	return 0;
-}
-
-int ath_reset(struct ath_softc *sc)
-{
-	struct ath_hal *ah = sc->sc_ah;
-	int status;
-	int error = 0;
-	enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
-
-	/* NB: indicate channel change so we do a full reset */
-	spin_lock_bh(&sc->sc_resetlock);
-	if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan,
-			   ht_macmode,
-			   sc->sc_tx_chainmask, sc->sc_rx_chainmask,
-			   sc->sc_ht_extprotspacing, false, &status)) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"%s: unable to reset hardware; hal status %u\n",
-			__func__, status);
-		error = -EIO;
-	}
-	spin_unlock_bh(&sc->sc_resetlock);
 
 	return error;
 }
@@ -916,7 +779,7 @@
 	struct ath_hal *ah = sc->sc_ah;
 
 	/* No I/O if device has been surprise removed */
-	if (sc->sc_invalid)
+	if (sc->sc_flags & SC_OP_INVALID)
 		return -EIO;
 
 	/* Shut off the interrupt before setting sc->sc_invalid to '1' */
@@ -924,7 +787,7 @@
 
 	/* XXX: we must make sure h/w will not generate any interrupt
 	 * before setting the invalid flag. */
-	sc->sc_invalid = 1;
+	sc->sc_flags |= SC_OP_INVALID;
 
 	/* disable HAL and put h/w to sleep */
 	ath9k_hw_disable(sc->sc_ah);
@@ -945,7 +808,7 @@
 	bool sched = false;
 
 	do {
-		if (sc->sc_invalid) {
+		if (sc->sc_flags & SC_OP_INVALID) {
 			/*
 			 * The hardware is not ready/present, don't
 			 * touch anything. Note this can happen early
@@ -1055,7 +918,7 @@
 
 	if (status & ATH9K_INT_FATAL) {
 		/* need a chip reset */
-		ath_internal_reset(sc);
+		ath_reset(sc, false);
 		return;
 	} else {
 
@@ -1098,10 +961,9 @@
 	int status;
 	int error = 0, i;
 	int csz = 0;
-	u32 rd;
 
 	/* XXX: hardware will not be ready until ath_open() being called */
-	sc->sc_invalid = 1;
+	sc->sc_flags |= SC_OP_INVALID;
 
 	sc->sc_debug = DBG_DEFAULT;
 	DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
@@ -1131,9 +993,6 @@
 	}
 	sc->sc_ah = ah;
 
-	/* Get the chipset-specific aggr limit. */
-	sc->sc_rtsaggrlimit = ah->ah_caps.rts_aggr_limit;
-
 	/* Get the hardware key cache size. */
 	sc->sc_keymax = ah->ah_caps.keycache_size;
 	if (sc->sc_keymax > ATH_KEYMAX) {
@@ -1167,14 +1026,12 @@
 	 * is resposible for filtering this list based on settings
 	 * like the phy mode.
 	 */
-	rd = ah->ah_currentRD;
-
 	error = ath_setup_channels(sc);
 	if (error)
 		goto bad;
 
 	/* default to STA mode */
-	sc->sc_opmode = ATH9K_M_MONITOR;
+	sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
 
 	/* Setup rate tables */
 
@@ -1245,7 +1102,7 @@
 
 	sc->sc_rc = ath_rate_attach(ah);
 	if (sc->sc_rc == NULL) {
-		error = EIO;
+		error = -EIO;
 		goto bad2;
 	}
 
@@ -1285,20 +1142,13 @@
 
 	/* 11n Capabilities */
 	if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
-		sc->sc_txaggr = 1;
-		sc->sc_rxaggr = 1;
+		sc->sc_flags |= SC_OP_TXAGGR;
+		sc->sc_flags |= SC_OP_RXAGGR;
 	}
 
 	sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
 	sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
 
-	/* Configuration for rx chain detection */
-	sc->sc_rxchaindetect_ref = 0;
-	sc->sc_rxchaindetect_thresh5GHz = 35;
-	sc->sc_rxchaindetect_thresh2GHz = 35;
-	sc->sc_rxchaindetect_delta5GHz = 30;
-	sc->sc_rxchaindetect_delta2GHz = 30;
-
 	ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
 	sc->sc_defant = ath9k_hw_getdefantenna(ah);
 
@@ -1344,7 +1194,7 @@
 	tasklet_kill(&sc->intr_tq);
 	tasklet_kill(&sc->bcon_tasklet);
 	ath_stop(sc);
-	if (!sc->sc_invalid)
+	if (!(sc->sc_flags & SC_OP_INVALID))
 		ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
 	ath_rate_detach(sc->sc_rc);
 	/* cleanup tx queues */
@@ -1471,9 +1321,9 @@
 	/* if station reassociates, tear down the aggregation state. */
 	if (!isnew) {
 		for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
-			if (sc->sc_txaggr)
+			if (sc->sc_flags & SC_OP_TXAGGR)
 				ath_tx_aggr_teardown(sc, an, tidno);
-			if (sc->sc_rxaggr)
+			if (sc->sc_flags & SC_OP_RXAGGR)
 				ath_rx_aggr_teardown(sc, an, tidno);
 		}
 	}
@@ -1822,13 +1672,6 @@
 /* Utilities */
 /*************/
 
-void ath_internal_reset(struct ath_softc *sc)
-{
-	ath_reset_start(sc, 0);
-	ath_reset(sc);
-	ath_reset_end(sc, 0);
-}
-
 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
 {
 	int qnum;
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
index 2f84093..872f0c5 100644
--- a/drivers/net/wireless/ath9k/core.h
+++ b/drivers/net/wireless/ath9k/core.h
@@ -39,6 +39,8 @@
 #include <linux/scatterlist.h>
 #include <asm/page.h>
 #include <net/mac80211.h>
+#include <linux/leds.h>
+#include <linux/rfkill.h>
 
 #include "ath9k.h"
 #include "rc.h"
@@ -79,12 +81,12 @@
 		}				\
 	} while (0)
 
+#define TSF_TO_TU(_h,_l) \
+	((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+
 /* XXX: remove */
 #define memzero(_buf, _len) memset(_buf, 0, _len)
 
-#define get_dma_mem_context(var, field) (&((var)->field))
-#define copy_dma_mem_context(dst, src)  (*dst = *src)
-
 #define ATH9K_BH_STATUS_INTACT		0
 #define ATH9K_BH_STATUS_CHANGE		1
 
@@ -95,6 +97,8 @@
 	return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ);
 }
 
+static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
 /*************/
 /* Debugging */
 /*************/
@@ -175,11 +179,6 @@
 /* Descriptor Management */
 /*************************/
 
-/* Number of descriptors per buffer. The only case where we see skbuff
-chains is due to FF aggregation in the driver. */
-#define	ATH_TXDESC	    1
-/* if there's more fragment for this MSDU */
-#define ATH_BF_MORE_MPDU    1
 #define ATH_TXBUF_RESET(_bf) do {				\
 		(_bf)->bf_status = 0;				\
 		(_bf)->bf_lastbf = NULL;			\
@@ -189,28 +188,29 @@
 			    sizeof(struct ath_buf_state));	\
 	} while (0)
 
+enum buffer_type {
+	BUF_DATA		= BIT(0),
+	BUF_AGGR		= BIT(1),
+	BUF_AMPDU		= BIT(2),
+	BUF_HT			= BIT(3),
+	BUF_RETRY		= BIT(4),
+	BUF_XRETRY		= BIT(5),
+	BUF_SHORT_PREAMBLE	= BIT(6),
+	BUF_BAR			= BIT(7),
+	BUF_PSPOLL		= BIT(8),
+	BUF_AGGR_BURST		= BIT(9),
+	BUF_CALC_AIRTIME	= BIT(10),
+};
+
 struct ath_buf_state {
-	int bfs_nframes;	/* # frames in aggregate */
-	u16 bfs_al;		/* length of aggregate */
-	u16 bfs_frmlen;		/* length of frame */
-	int bfs_seqno;		/* sequence number */
-	int bfs_tidno;		/* tid of this frame */
-	int bfs_retries;	/* current retries */
+	int bfs_nframes;			/* # frames in aggregate */
+	u16 bfs_al;				/* length of aggregate */
+	u16 bfs_frmlen;				/* length of frame */
+	int bfs_seqno;				/* sequence number */
+	int bfs_tidno;				/* tid of this frame */
+	int bfs_retries;			/* current retries */
 	struct ath_rc_series bfs_rcs[4];	/* rate series */
-	u8 bfs_isdata:1;	/* is a data frame/aggregate */
-	u8 bfs_isaggr:1;	/* is an aggregate */
-	u8 bfs_isampdu:1;	/* is an a-mpdu, aggregate or not */
-	u8 bfs_ht:1;		/* is an HT frame */
-	u8 bfs_isretried:1;	/* is retried */
-	u8 bfs_isxretried:1;	/* is excessive retried */
-	u8 bfs_shpreamble:1;	/* is short preamble */
-	u8 bfs_isbar:1;		/* is a BAR */
-	u8 bfs_ispspoll:1;	/* is a PS-Poll */
-	u8 bfs_aggrburst:1;	/* is a aggr burst */
-	u8 bfs_calcairtime:1;	/* requests airtime be calculated
-				when set for tx frame */
-	int bfs_rifsburst_elem;	/* RIFS burst/bar */
-	int bfs_nrifsubframes;	/* # of elements in burst */
+	u32 bf_type;				/* BUF_* (enum buffer_type) */
 	/* key type use to encrypt this frame */
 	enum ath9k_key_type bfs_keytype;
 };
@@ -222,26 +222,22 @@
 #define bf_seqno        	bf_state.bfs_seqno
 #define bf_tidno        	bf_state.bfs_tidno
 #define bf_rcs          	bf_state.bfs_rcs
-#define bf_isdata       	bf_state.bfs_isdata
-#define bf_isaggr       	bf_state.bfs_isaggr
-#define bf_isampdu      	bf_state.bfs_isampdu
-#define bf_ht           	bf_state.bfs_ht
-#define bf_isretried    	bf_state.bfs_isretried
-#define bf_isxretried   	bf_state.bfs_isxretried
-#define bf_shpreamble   	bf_state.bfs_shpreamble
-#define bf_rifsburst_elem  	bf_state.bfs_rifsburst_elem
-#define bf_nrifsubframes  	bf_state.bfs_nrifsubframes
 #define bf_keytype      	bf_state.bfs_keytype
-#define bf_isbar        	bf_state.bfs_isbar
-#define bf_ispspoll     	bf_state.bfs_ispspoll
-#define bf_aggrburst    	bf_state.bfs_aggrburst
-#define bf_calcairtime  	bf_state.bfs_calcairtime
+#define bf_isdata(bf)		(bf->bf_state.bf_type & BUF_DATA)
+#define bf_isaggr(bf)		(bf->bf_state.bf_type & BUF_AGGR)
+#define bf_isampdu(bf)		(bf->bf_state.bf_type & BUF_AMPDU)
+#define bf_isht(bf)		(bf->bf_state.bf_type & BUF_HT)
+#define bf_isretried(bf)	(bf->bf_state.bf_type & BUF_RETRY)
+#define bf_isxretried(bf)	(bf->bf_state.bf_type & BUF_XRETRY)
+#define bf_isshpreamble(bf)	(bf->bf_state.bf_type & BUF_SHORT_PREAMBLE)
+#define bf_isbar(bf)		(bf->bf_state.bf_type & BUF_BAR)
+#define bf_ispspoll(bf) 	(bf->bf_state.bf_type & BUF_PSPOLL)
+#define bf_isaggrburst(bf)	(bf->bf_state.bf_type & BUF_AGGR_BURST)
 
 /*
  * Abstraction of a contiguous buffer to transmit/receive.  There is only
  * a single hw descriptor encapsulated here.
  */
-
 struct ath_buf {
 	struct list_head list;
 	struct list_head *last;
@@ -391,10 +387,10 @@
 		 struct sk_buff *skb,
 		 struct ath_recv_status *rx_status,
 		 enum ATH_RX_TYPE *status);
-int ath__rx_indicate(struct ath_softc *sc,
-		    struct sk_buff *skb,
-		    struct ath_recv_status *status,
-		    u16 keyix);
+int _ath_rx_indicate(struct ath_softc *sc,
+		     struct sk_buff *skb,
+		     struct ath_recv_status *status,
+		     u16 keyix);
 int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
 		    struct ath_recv_status *status);
 
@@ -402,8 +398,7 @@
 /* TX */
 /******/
 
-#define ATH_FRAG_PER_MSDU       1
-#define ATH_TXBUF               (512/ATH_FRAG_PER_MSDU)
+#define ATH_TXBUF               512
 /* max number of transmit attempts (tries) */
 #define ATH_TXMAXTRY            13
 /* max number of 11n transmit attempts (tries) */
@@ -522,7 +517,6 @@
 	u32 keyix;
 	int min_rate;
 	int mcast_rate;
-	u16 nextfraglen;
 	struct ath_softc *dev;
 	dma_addr_t dmacontext;
 };
@@ -557,10 +551,10 @@
 int ath_tx_setup(struct ath_softc *sc, int haltype);
 void ath_draintxq(struct ath_softc *sc, bool retry_tx);
 void ath_tx_draintxq(struct ath_softc *sc,
-	struct ath_txq *txq, bool retry_tx);
+		     struct ath_txq *txq, bool retry_tx);
 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
 void ath_tx_node_cleanup(struct ath_softc *sc,
-	struct ath_node *an, bool bh_flag);
+			 struct ath_node *an, bool bh_flag);
 void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an);
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
 int ath_tx_init(struct ath_softc *sc, int nbufs);
@@ -575,6 +569,7 @@
 void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth);
 void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 		     struct ath_xmit_status *tx_status, struct ath_node *an);
+void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb);
 
 /**********************/
 /* Node / Aggregation */
@@ -585,7 +580,6 @@
 /* indicates the node is 80211 power save */
 #define ATH_NODE_PWRSAVE        0x2
 
-#define ADDBA_TIMEOUT              200 /* 200 milliseconds */
 #define ADDBA_EXCHANGE_ATTEMPTS    10
 #define ATH_AGGR_DELIM_SZ          4   /* delimiter size   */
 #define ATH_AGGR_MINPLEN           256 /* in bytes, minimum packet length */
@@ -705,9 +699,6 @@
 #define	ATH_BCBUF               	4   /* number of beacon buffers */
 #define ATH_DEFAULT_BINTVAL     	100 /* default beacon interval in TU */
 #define ATH_DEFAULT_BMISS_LIMIT 	10
-#define	ATH_BEACON_AIFS_DEFAULT		0  /* Default aifs for ap beacon q */
-#define	ATH_BEACON_CWMIN_DEFAULT	0  /* Default cwmin for ap beacon q */
-#define	ATH_BEACON_CWMAX_DEFAULT	0  /* Default cwmax for ap beacon q */
 #define IEEE80211_MS_TO_TU(x)           (((x) * 1000) / 1024)
 
 /* beacon configuration */
@@ -724,30 +715,16 @@
 	} u; /* last received beacon/probe response timestamp of this BSS. */
 };
 
-/* offsets in a beacon frame for
- * quick acess of beacon content by low-level driver */
-struct ath_beacon_offset {
-	u8 *bo_tim;	/* start of atim/dtim */
-};
-
 void ath9k_beacon_tasklet(unsigned long data);
 void ath_beacon_config(struct ath_softc *sc, int if_id);
 int ath_beaconq_setup(struct ath_hal *ah);
 int ath_beacon_alloc(struct ath_softc *sc, int if_id);
 void ath_bstuck_process(struct ath_softc *sc);
-void ath_beacon_tasklet(struct ath_softc *sc, int *needmark);
-void ath_beacon_free(struct ath_softc *sc);
 void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp);
 void ath_beacon_sync(struct ath_softc *sc, int if_id);
-void ath_update_beacon_info(struct ath_softc *sc, int avgbrssi);
 void ath_get_beaconconfig(struct ath_softc *sc,
 			  int if_id,
 			  struct ath_beacon_config *conf);
-int ath_update_beacon(struct ath_softc *sc,
-		      int if_id,
-		      struct ath_beacon_offset *bo,
-		      struct sk_buff *skb,
-		      int mcast);
 /********/
 /* VAPs */
 /********/
@@ -774,10 +751,8 @@
 	struct ieee80211_vif *av_if_data;
 	enum ath9k_opmode av_opmode;	/* VAP operational mode */
 	struct ath_buf *av_bcbuf;	/* beacon buffer */
-	struct ath_beacon_offset av_boff; /* dynamic update state */
 	struct ath_tx_control av_btxctl;  /* txctl information for beacon */
 	int av_bslot;			/* beacon slot index */
-	struct ath_txq av_mcastq;	/* multicast transmit queue */
 	struct ath_vap_config av_config;/* vap configuration parameters*/
 	struct ath_rate_node *rc_node;
 };
@@ -788,8 +763,7 @@
 		   enum ath9k_opmode opmode);
 int ath_vap_detach(struct ath_softc *sc, int if_id);
 int ath_vap_config(struct ath_softc *sc,
-	int if_id, struct ath_vap_config *if_config);
-int ath_vap_listen(struct ath_softc *sc, int if_id);
+		   int if_id, struct ath_vap_config *if_config);
 
 /*********************/
 /* Antenna diversity */
@@ -830,6 +804,36 @@
 void ath_setdefantenna(void *sc, u32 antenna);
 
 /********************/
+/*   LED Control    */
+/********************/
+
+#define ATH_LED_PIN	1
+
+enum ath_led_type {
+	ATH_LED_RADIO,
+	ATH_LED_ASSOC,
+	ATH_LED_TX,
+	ATH_LED_RX
+};
+
+struct ath_led {
+	struct ath_softc *sc;
+	struct led_classdev led_cdev;
+	enum ath_led_type led_type;
+	char name[32];
+	bool registered;
+};
+
+/* Rfkill */
+#define ATH_RFKILL_POLL_INTERVAL	2000 /* msecs */
+
+struct ath_rfkill {
+	struct rfkill *rfkill;
+	struct delayed_work rfkill_poll;
+	char rfkill_name[32];
+};
+
+/********************/
 /* Main driver core */
 /********************/
 
@@ -841,11 +845,7 @@
 #define	ATH_DEFAULT_NOISE_FLOOR -95
 #define ATH_REGCLASSIDS_MAX     10
 #define ATH_CABQ_READY_TIME     80  /* % of beacon interval */
-#define ATH_PREAMBLE_SHORT	(1<<0)
-#define ATH_PROTECT_ENABLE	(1<<1)
 #define ATH_MAX_SW_RETRIES      10
-/* Num farmes difference in tx to flip default recv */
-#define	ATH_ANTENNA_DIFF	2
 #define ATH_CHAN_MAX            255
 #define IEEE80211_WEP_NKID      4       /* number of key ids */
 #define IEEE80211_RATE_VAL      0x7f
@@ -859,9 +859,7 @@
  */
 #define	ATH_KEYMAX	        128        /* max key cache size we handle */
 
-#define RESET_RETRY_TXQ         0x00000001
 #define ATH_IF_ID_ANY   	0xff
-
 #define ATH_TXPOWER_MAX         100     /* .5 dBm units */
 
 #define RSSI_LPF_THRESHOLD         -20
@@ -907,60 +905,64 @@
 	u8 ext_chan_offset;
 };
 
+#define SC_OP_INVALID		BIT(0)
+#define SC_OP_BEACONS		BIT(1)
+#define SC_OP_RXAGGR		BIT(2)
+#define SC_OP_TXAGGR		BIT(3)
+#define SC_OP_CHAINMASK_UPDATE	BIT(4)
+#define SC_OP_FULL_RESET	BIT(5)
+#define SC_OP_NO_RESET		BIT(6)
+#define SC_OP_PREAMBLE_SHORT	BIT(7)
+#define SC_OP_PROTECT_ENABLE	BIT(8)
+#define SC_OP_RXFLUSH		BIT(9)
+#define SC_OP_LED_ASSOCIATED	BIT(10)
+#define SC_OP_RFKILL_REGISTERED	BIT(11)
+#define SC_OP_RFKILL_SW_BLOCKED	BIT(12)
+#define SC_OP_RFKILL_HW_BLOCKED	BIT(13)
+
 struct ath_softc {
 	struct ieee80211_hw *hw;
 	struct pci_dev *pdev;
-	void __iomem *mem;
 	struct tasklet_struct intr_tq;
 	struct tasklet_struct bcon_tasklet;
-	struct ath_config sc_config;	/* load-time parameters */
-	int sc_debug;
+	struct ath_config sc_config;
 	struct ath_hal *sc_ah;
-	struct ath_rate_softc *sc_rc;	/* tx rate control support */
-	u32 sc_intrstatus;
-	enum ath9k_opmode sc_opmode;	/* current operating mode */
+	struct ath_rate_softc *sc_rc;
+	void __iomem *mem;
 
-	u8 sc_invalid;			/* being detached */
-	u8 sc_beacons;			/* beacons running */
-	u8 sc_scanning;			/* scanning active */
-	u8 sc_txaggr;			/* enable 11n tx aggregation */
-	u8 sc_rxaggr;			/* enable 11n rx aggregation */
-	u8 sc_update_chainmask;		/* change chain mask */
-	u8 sc_full_reset;		/* force full reset */
-	enum wireless_mode sc_curmode;	/* current phy mode */
-	u16 sc_curtxpow;
-	u16 sc_curaid;
 	u8 sc_curbssid[ETH_ALEN];
 	u8 sc_myaddr[ETH_ALEN];
-	enum PROT_MODE sc_protmode;
-	u8 sc_mcastantenna;
-	u8 sc_txantenna;		/* data tx antenna (fixed or auto) */
-	u8 sc_nbcnvaps;			/* # of vaps sending beacons */
-	u16 sc_nvaps;			/* # of active virtual ap's */
-	struct ath_vap *sc_vaps[ATH_BCBUF];
-	enum ath9k_int sc_imask;
 	u8 sc_bssidmask[ETH_ALEN];
-	u8 sc_defant;			/* current default antenna */
-	u8 sc_rxotherant;		/* rx's on non-default antenna */
+
+	int sc_debug;
+	u32 sc_intrstatus;
+	u32 sc_flags; /* SC_OP_* */
+	unsigned int rx_filter;
+	u16 sc_curtxpow;
+	u16 sc_curaid;
 	u16 sc_cachelsz;
 	int sc_slotupdate;		/* slot to next advance fsm */
 	int sc_slottime;
-	u8 sc_noreset;
 	int sc_bslot[ATH_BCBUF];
+	u8 sc_tx_chainmask;
+	u8 sc_rx_chainmask;
+	enum ath9k_int sc_imask;
+	enum wireless_mode sc_curmode;	/* current phy mode */
+	enum PROT_MODE sc_protmode;
+
+	u8 sc_nbcnvaps;			/* # of vaps sending beacons */
+	u16 sc_nvaps;			/* # of active virtual ap's */
+	struct ath_vap *sc_vaps[ATH_BCBUF];
+
+	u8 sc_mcastantenna;
+	u8 sc_defant;			/* current default antenna */
+	u8 sc_rxotherant;		/* rx's on non-default antenna */
+
 	struct ath9k_node_stats sc_halstats; /* station-mode rssi stats */
 	struct list_head node_list;
 	struct ath_ht_info sc_ht_info;
-	int16_t sc_noise_floor;		/* signal noise floor in dBm */
 	enum ath9k_ht_extprotspacing sc_ht_extprotspacing;
-	u8 sc_tx_chainmask;
-	u8 sc_rx_chainmask;
-	u8 sc_rxchaindetect_ref;
-	u8 sc_rxchaindetect_thresh5GHz;
-	u8 sc_rxchaindetect_thresh2GHz;
-	u8 sc_rxchaindetect_delta5GHz;
-	u8 sc_rxchaindetect_delta2GHz;
-	u32 sc_rtsaggrlimit;		/* Chipset specific aggr limit */
-	u32 sc_flags;
+
 #ifdef CONFIG_SLOW_ANT_DIV
 	struct ath_antdiv sc_antdiv;
 #endif
@@ -980,8 +982,6 @@
 	struct ath_descdma sc_rxdma;
 	int sc_rxbufsize;	/* rx size based on mtu */
 	u32 *sc_rxlink;		/* link ptr in last RX desc */
-	u32 sc_rxflush;		/* rx flush in progress */
-	u64 sc_lastrx;		/* tsf of last rx'd frame */
 
 	/* TX */
 	struct list_head sc_txbuf;
@@ -990,7 +990,6 @@
 	u32 sc_txqsetup;
 	u32 sc_txintrperiod;	/* tx interrupt batching */
 	int sc_haltype2q[ATH9K_WME_AC_VO+1]; /* HAL WME	AC -> h/w qnum */
-	u32 sc_ant_tx[8];	/* recent tx frames/antenna */
 	u16 seq_no; /* TX sequence number */
 
 	/* Beacon */
@@ -1001,6 +1000,7 @@
 	u32 sc_bhalq;
 	u32 sc_bmisscount;
 	u32 ast_be_xmit;	/* beacons transmitted */
+	u64 bc_tstamp;
 
 	/* Rate */
 	struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX];
@@ -1015,7 +1015,6 @@
 	/* Channel, Band */
 	struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX];
 	struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
-	struct ath9k_channel sc_curchan;
 
 	/* Locks */
 	spinlock_t sc_rxflushlock;
@@ -1023,6 +1022,15 @@
 	spinlock_t sc_txbuflock;
 	spinlock_t sc_resetlock;
 	spinlock_t node_lock;
+
+	/* LEDs */
+	struct ath_led radio_led;
+	struct ath_led assoc_led;
+	struct ath_led tx_led;
+	struct ath_led rx_led;
+
+	/* Rfkill */
+	struct ath_rfkill rf_kill;
 };
 
 int ath_init(u16 devid, struct ath_softc *sc);
@@ -1030,14 +1038,8 @@
 int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan);
 int ath_suspend(struct ath_softc *sc);
 irqreturn_t ath_isr(int irq, void *dev);
-int ath_reset(struct ath_softc *sc);
-void ath_scan_start(struct ath_softc *sc);
-void ath_scan_end(struct ath_softc *sc);
+int ath_reset(struct ath_softc *sc, bool retry_tx);
 int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan);
-void ath_setup_rate(struct ath_softc *sc,
-		    enum wireless_mode wMode,
-		    enum RATE_TYPE type,
-		    const struct ath9k_rate_table *rt);
 
 /*********************/
 /* Utility Functions */
@@ -1056,17 +1058,5 @@
 void ath_get_currentCountry(struct ath_softc *sc,
 	struct ath9k_country_entry *ctry);
 u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp);
-void ath_internal_reset(struct ath_softc *sc);
-u32 ath_chan2flags(struct ieee80211_channel *chan, struct ath_softc *sc);
-dma_addr_t ath_skb_map_single(struct ath_softc *sc,
-			      struct sk_buff *skb,
-			      int direction,
-			      dma_addr_t *pa);
-void ath_skb_unmap_single(struct ath_softc *sc,
-			  struct sk_buff *skb,
-			  int direction,
-			  dma_addr_t *pa);
-void ath_mcast_merge(struct ath_softc *sc, u32 mfilt[2]);
-enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc);
 
 #endif /* CORE_H */
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
index 6dbfed0..272c758 100644
--- a/drivers/net/wireless/ath9k/hw.c
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -85,29 +85,6 @@
 	ath9k_hw_adc_dccal_calibrate
 };
 
-static const struct ath_hal ar5416hal = {
-	AR5416_MAGIC,
-	0,
-	0,
-	NULL,
-	NULL,
-	CTRY_DEFAULT,
-	0,
-	0,
-	0,
-	0,
-	0,
-	{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	},
-};
-
 static struct ath9k_rate_table ar5416_11a_table = {
 	8,
 	{0},
@@ -371,7 +348,7 @@
 	ah->ah_config.intr_mitigation = 0;
 }
 
-static inline void ath9k_hw_override_ini(struct ath_hal *ah,
+static void ath9k_hw_override_ini(struct ath_hal *ah,
 					 struct ath9k_channel *chan)
 {
 	if (!AR_SREV_5416_V20_OR_LATER(ah)
@@ -381,8 +358,8 @@
 	REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
 }
 
-static inline void ath9k_hw_init_bb(struct ath_hal *ah,
-				    struct ath9k_channel *chan)
+static void ath9k_hw_init_bb(struct ath_hal *ah,
+			     struct ath9k_channel *chan)
 {
 	u32 synthDelay;
 
@@ -397,8 +374,8 @@
 	udelay(synthDelay + BASE_ACTIVATE_DELAY);
 }
 
-static inline void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
-						 enum ath9k_opmode opmode)
+static void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
+					  enum ath9k_opmode opmode)
 {
 	struct ath_hal_5416 *ahp = AH5416(ah);
 
@@ -428,7 +405,7 @@
 	}
 }
 
-static inline void ath9k_hw_init_qos(struct ath_hal *ah)
+static void ath9k_hw_init_qos(struct ath_hal *ah)
 {
 	REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
 	REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
@@ -523,7 +500,7 @@
 		return ath9k_hw_eeprom_read(ah, off, data);
 }
 
-static inline bool ath9k_hw_fill_eeprom(struct ath_hal *ah)
+static bool ath9k_hw_fill_eeprom(struct ath_hal *ah)
 {
 	struct ath_hal_5416 *ahp = AH5416(ah);
 	struct ar5416_eeprom *eep = &ahp->ah_eeprom;
@@ -790,7 +767,7 @@
 	return true;
 }
 
-static inline int ath9k_hw_check_eeprom(struct ath_hal *ah)
+static int ath9k_hw_check_eeprom(struct ath_hal *ah)
 {
 	u32 sum = 0, el;
 	u16 *eepdata;
@@ -1196,11 +1173,12 @@
 
 	ah = &ahp->ah;
 
-	memcpy(&ahp->ah, &ar5416hal, sizeof(struct ath_hal));
-
 	ah->ah_sc = sc;
 	ah->ah_sh = mem;
 
+	ah->ah_magic = AR5416_MAGIC;
+	ah->ah_countryCode = CTRY_DEFAULT;
+
 	ah->ah_devid = devid;
 	ah->ah_subvendorid = 0;
 
@@ -1294,7 +1272,7 @@
 	}
 }
 
-static inline int ath9k_hw_get_radiorev(struct ath_hal *ah)
+static int ath9k_hw_get_radiorev(struct ath_hal *ah)
 {
 	u32 val;
 	int i;
@@ -1307,7 +1285,7 @@
 	return ath9k_hw_reverse_bits(val, 8);
 }
 
-static inline int ath9k_hw_init_macaddr(struct ath_hal *ah)
+static int ath9k_hw_init_macaddr(struct ath_hal *ah)
 {
 	u32 sum;
 	int i;
@@ -1389,7 +1367,7 @@
 	return spur_val;
 }
 
-static inline int ath9k_hw_rfattach(struct ath_hal *ah)
+static int ath9k_hw_rfattach(struct ath_hal *ah)
 {
 	bool rfStatus = false;
 	int ecode = 0;
@@ -1434,8 +1412,8 @@
 	return 0;
 }
 
-static inline void ath9k_hw_init_pll(struct ath_hal *ah,
-				     struct ath9k_channel *chan)
+static void ath9k_hw_init_pll(struct ath_hal *ah,
+			      struct ath9k_channel *chan)
 {
 	u32 pll;
 
@@ -1553,7 +1531,7 @@
 	}
 }
 
-static inline void
+static void
 ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan)
 {
 	u32 rfMode = 0;
@@ -1623,7 +1601,7 @@
 	return true;
 }
 
-static inline bool ath9k_hw_set_reset_power_on(struct ath_hal *ah)
+static bool ath9k_hw_set_reset_power_on(struct ath_hal *ah)
 {
 	REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
 		  AR_RTC_FORCE_WAKE_ON_INT);
@@ -1664,7 +1642,7 @@
 	}
 }
 
-static inline
+static
 struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah,
 					  struct ath9k_channel *chan)
 {
@@ -2098,7 +2076,7 @@
 		ahp->ah_procPhyErr |= HAL_PROCESS_ANI;
 }
 
-static inline void ath9k_hw_ani_setup(struct ath_hal *ah)
+static void ath9k_hw_ani_setup(struct ath_hal *ah)
 {
 	struct ath_hal_5416 *ahp = AH5416(ah);
 	int i;
@@ -2548,6 +2526,11 @@
 	}
 }
 
+/*
+ * Process a MIB interrupt.  We may potentially be invoked because
+ * any of the MIB counters overflow/trigger so don't assume we're
+ * here because a PHY error counter triggered.
+ */
 void ath9k_hw_procmibevent(struct ath_hal *ah,
 			   const struct ath9k_node_stats *stats)
 {
@@ -2555,18 +2538,20 @@
 	u32 phyCnt1, phyCnt2;
 
 	DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Processing Mib Intr\n");
-
+	/* Reset these counters regardless */
 	REG_WRITE(ah, AR_FILT_OFDM, 0);
 	REG_WRITE(ah, AR_FILT_CCK, 0);
 	if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
 		REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
 
+	/* Clear the mib counters and save them in the stats */
 	ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
 	ahp->ah_stats.ast_nodestats = *stats;
 
 	if (!DO_ANI(ah))
 		return;
 
+	/* NB: these are not reset-on-read */
 	phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
 	phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
 	if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
@@ -2574,6 +2559,7 @@
 		struct ar5416AniState *aniState = ahp->ah_curani;
 		u32 ofdmPhyErrCnt, cckPhyErrCnt;
 
+		/* NB: only use ast_ani_*errs with AH_PRIVATE_DIAG */
 		ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
 		ahp->ah_stats.ast_ani_ofdmerrs +=
 			ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
@@ -2584,11 +2570,17 @@
 			cckPhyErrCnt - aniState->cckPhyErrCount;
 		aniState->cckPhyErrCount = cckPhyErrCnt;
 
+		/*
+		 * NB: figure out which counter triggered.  If both
+		 * trigger we'll only deal with one as the processing
+		 * clobbers the error counter so the trigger threshold
+		 * check will never be true.
+		 */
 		if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
 			ath9k_hw_ani_ofdm_err_trigger(ah);
 		if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
 			ath9k_hw_ani_cck_err_trigger(ah);
-
+		/* NB: always restart to insure the h/w counters are reset */
 		ath9k_ani_restart(ah);
 	}
 }
@@ -2822,32 +2814,11 @@
 	}
 }
 
-static bool ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
-				enum ath9k_gpio_output_mux_type
-				halSignalType)
+void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
+			 u32 ah_signal_type)
 {
-	u32 ah_signal_type;
 	u32 gpio_shift;
 
-	static u32 MuxSignalConversionTable[] = {
-
-		AR_GPIO_OUTPUT_MUX_AS_OUTPUT,
-
-		AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
-
-		AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
-
-		AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
-
-		AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
-	};
-
-	if ((halSignalType >= 0)
-	    && (halSignalType < ARRAY_SIZE(MuxSignalConversionTable)))
-		ah_signal_type = MuxSignalConversionTable[halSignalType];
-	else
-		return false;
-
 	ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
 
 	gpio_shift = 2 * gpio;
@@ -2856,19 +2827,46 @@
 		AR_GPIO_OE_OUT,
 		(AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
 		(AR_GPIO_OE_OUT_DRV << gpio_shift));
-
-	return true;
 }
 
-static bool ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio,
-			      u32 val)
+void ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, u32 val)
 {
 	REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
 		AR_GPIO_BIT(gpio));
-	return true;
 }
 
-static u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio)
+/*
+ * Configure GPIO Input lines
+ */
+void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio)
+{
+	u32 gpio_shift;
+
+	ASSERT(gpio < ah->ah_caps.num_gpio_pins);
+
+	gpio_shift = gpio << 1;
+
+	REG_RMW(ah,
+		AR_GPIO_OE_OUT,
+		(AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
+		(AR_GPIO_OE_OUT_DRV << gpio_shift));
+}
+
+#ifdef CONFIG_RFKILL
+static void ath9k_enable_rfkill(struct ath_hal *ah)
+{
+	REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+		    AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+
+	REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
+		    AR_GPIO_INPUT_MUX2_RFSILENT);
+
+	ath9k_hw_cfg_gpio_input(ah, ah->ah_rfkill_gpio);
+	REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+}
+#endif
+
+u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio)
 {
 	if (gpio >= ah->ah_caps.num_gpio_pins)
 		return 0xffffffff;
@@ -2883,7 +2881,7 @@
 	}
 }
 
-static inline int ath9k_hw_post_attach(struct ath_hal *ah)
+static int ath9k_hw_post_attach(struct ath_hal *ah)
 {
 	int ecode;
 
@@ -3081,17 +3079,17 @@
 
 	pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
 
+#ifdef CONFIG_RFKILL
 	ah->ah_rfsilent = ath9k_hw_get_eeprom(ahp, EEP_RF_SILENT);
 	if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) {
-		ahp->ah_gpioSelect =
+		ah->ah_rfkill_gpio =
 			MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL);
-		ahp->ah_polarity =
+		ah->ah_rfkill_polarity =
 			MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY);
 
-		ath9k_hw_setcapability(ah, ATH9K_CAP_RFSILENT, 1, true,
-				       NULL);
 		pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
 	}
+#endif
 
 	if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) ||
 	    (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) ||
@@ -3595,7 +3593,7 @@
 	return true;
 }
 
-static inline void
+static void
 ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah,
 				    struct ath9k_channel *chan,
 				    struct cal_data_per_freq *pRawDataSet,
@@ -3777,7 +3775,7 @@
 	return;
 }
 
-static inline bool
+static bool
 ath9k_hw_set_power_cal_table(struct ath_hal *ah,
 			     struct ar5416_eeprom *pEepData,
 			     struct ath9k_channel *chan,
@@ -3980,7 +3978,7 @@
 	}
 }
 
-static inline void
+static void
 ath9k_hw_get_legacy_target_powers(struct ath_hal *ah,
 				  struct ath9k_channel *chan,
 				  struct cal_target_power_leg *powInfo,
@@ -4046,7 +4044,7 @@
 	}
 }
 
-static inline void
+static void
 ath9k_hw_get_target_powers(struct ath_hal *ah,
 			   struct ath9k_channel *chan,
 			   struct cal_target_power_ht *powInfo,
@@ -4113,7 +4111,7 @@
 	}
 }
 
-static inline u16
+static u16
 ath9k_hw_get_max_edge_power(u16 freq,
 			    struct cal_ctl_edges *pRdEdgesPower,
 			    bool is2GHz)
@@ -4143,7 +4141,7 @@
 	return twiceMaxEdgePower;
 }
 
-static inline bool
+static bool
 ath9k_hw_set_power_per_rate_table(struct ath_hal *ah,
 				  struct ar5416_eeprom *pEepData,
 				  struct ath9k_channel *chan,
@@ -5122,7 +5120,7 @@
 	REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
 }
 
-static inline void ath9k_hw_init_chain_masks(struct ath_hal *ah)
+static void ath9k_hw_init_chain_masks(struct ath_hal *ah)
 {
 	struct ath_hal_5416 *ahp = AH5416(ah);
 	int rx_chainmask, tx_chainmask;
@@ -5326,7 +5324,7 @@
 	}
 }
 
-static inline void ath9k_hw_init_user_settings(struct ath_hal *ah)
+static void ath9k_hw_init_user_settings(struct ath_hal *ah)
 {
 	struct ath_hal_5416 *ahp = AH5416(ah);
 
@@ -5345,7 +5343,7 @@
 		ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout);
 }
 
-static inline int
+static int
 ath9k_hw_process_ini(struct ath_hal *ah,
 		     struct ath9k_channel *chan,
 		     enum ath9k_ht_macmode macmode)
@@ -5476,7 +5474,7 @@
 	return 0;
 }
 
-static inline void ath9k_hw_setup_calibration(struct ath_hal *ah,
+static void ath9k_hw_setup_calibration(struct ath_hal *ah,
 					      struct hal_cal_list *currCal)
 {
 	REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
@@ -5512,8 +5510,8 @@
 		    AR_PHY_TIMING_CTRL4_DO_CAL);
 }
 
-static inline void ath9k_hw_reset_calibration(struct ath_hal *ah,
-					      struct hal_cal_list *currCal)
+static void ath9k_hw_reset_calibration(struct ath_hal *ah,
+				       struct hal_cal_list *currCal)
 {
 	struct ath_hal_5416 *ahp = AH5416(ah);
 	int i;
@@ -5532,7 +5530,7 @@
 	ahp->ah_CalSamples = 0;
 }
 
-static inline void
+static void
 ath9k_hw_per_calibration(struct ath_hal *ah,
 			 struct ath9k_channel *ichan,
 			 u8 rxchainmask,
@@ -5622,7 +5620,7 @@
 	return true;
 }
 
-static inline bool
+static bool
 ath9k_hw_channel_change(struct ath_hal *ah,
 			struct ath9k_channel *chan,
 			enum ath9k_ht_macmode macmode)
@@ -5799,8 +5797,8 @@
 	return retval;
 }
 
-static inline bool ath9k_hw_init_cal(struct ath_hal *ah,
-				     struct ath9k_channel *chan)
+static bool ath9k_hw_init_cal(struct ath_hal *ah,
+			      struct ath9k_channel *chan)
 {
 	struct ath_hal_5416 *ahp = AH5416(ah);
 	struct ath9k_channel *ichan =
@@ -5861,7 +5859,7 @@
 }
 
 
-bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
+bool ath9k_hw_reset(struct ath_hal *ah,
 		    struct ath9k_channel *chan,
 		    enum ath9k_ht_macmode macmode,
 		    u8 txchainmask, u8 rxchainmask,
@@ -5945,7 +5943,7 @@
 			else
 				ath9k_hw_set_gpio(ah, 9, 1);
 		}
-		ath9k_hw_cfg_output(ah, 9, ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT);
+		ath9k_hw_cfg_output(ah, 9, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
 	}
 
 	ecode = ath9k_hw_process_ini(ah, chan, macmode);
@@ -5975,7 +5973,7 @@
 		  | (ah->ah_config.
 		     ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
 		  | ahp->ah_staId1Defaults);
-	ath9k_hw_set_operating_mode(ah, opmode);
+	ath9k_hw_set_operating_mode(ah, ah->ah_opmode);
 
 	REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
 	REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
@@ -6005,13 +6003,15 @@
 	for (i = 0; i < ah->ah_caps.total_queues; i++)
 		ath9k_hw_resettxqueue(ah, i);
 
-	ath9k_hw_init_interrupt_masks(ah, opmode);
+	ath9k_hw_init_interrupt_masks(ah, ah->ah_opmode);
 	ath9k_hw_init_qos(ah);
 
+#ifdef CONFIG_RFKILL
+	if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+		ath9k_enable_rfkill(ah);
+#endif
 	ath9k_hw_init_user_settings(ah);
 
-	ah->ah_opmode = opmode;
-
 	REG_WRITE(ah, AR_STA_ID1,
 		  REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM);
 
@@ -6539,31 +6539,6 @@
 	return true;
 }
 
-#ifdef CONFIG_ATH9K_RFKILL
-static void ath9k_enable_rfkill(struct ath_hal *ah)
-{
-	struct ath_hal_5416 *ahp = AH5416(ah);
-
-	REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
-		    AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
-
-	REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
-		    AR_GPIO_INPUT_MUX2_RFSILENT);
-
-	ath9k_hw_cfg_gpio_input(ah, ahp->ah_gpioSelect);
-	REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
-
-	if (ahp->ah_gpioBit == ath9k_hw_gpio_get(ah, ahp->ah_gpioSelect)) {
-
-		ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
-				       !ahp->ah_gpioBit);
-	} else {
-		ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
-				       ahp->ah_gpioBit);
-	}
-}
-#endif
-
 void
 ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
 		       u16 assocId)
@@ -7678,8 +7653,7 @@
 	REG_WRITE(ah, AR_DRETRY_LIMIT(q),
 		  SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
 		  | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
-		  | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
-		);
+		  | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
 
 	REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
 	REG_WRITE(ah, AR_DMISC(q),
@@ -8324,15 +8298,7 @@
 		*error = -ENXIO;
 		break;
 	}
-	if (ah != NULL) {
-		ah->ah_devid = ah->ah_devid;
-		ah->ah_subvendorid = ah->ah_subvendorid;
-		ah->ah_macVersion = ah->ah_macVersion;
-		ah->ah_macRev = ah->ah_macRev;
-		ah->ah_phyRev = ah->ah_phyRev;
-		ah->ah_analog5GhzRev = ah->ah_analog5GhzRev;
-		ah->ah_analog2GhzRev = ah->ah_analog2GhzRev;
-	}
+
 	return ah;
 }
 
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h
index ae680f2..2113818 100644
--- a/drivers/net/wireless/ath9k/hw.h
+++ b/drivers/net/wireless/ath9k/hw.h
@@ -314,14 +314,11 @@
 #define RXSTATUS_RATE(ah, ads)  (AR_SREV_5416_V20_OR_LATER(ah) ?	\
 				 MS(ads->ds_rxstatus0, AR_RxRate) :	\
 				 (ads->ds_rxstatus3 >> 2) & 0xFF)
-#define RXSTATUS_DUPLICATE(ah, ads)  (AR_SREV_5416_V20_OR_LATER(ah) ?	\
-				      MS(ads->ds_rxstatus3, AR_Parallel40) : \
-				      (ads->ds_rxstatus3 >> 10) & 0x1)
 
-#define set11nTries(_series, _index)				\
+#define set11nTries(_series, _index) \
 	(SM((_series)[_index].Tries, AR_XmitDataTries##_index))
 
-#define set11nRate(_series, _index)				\
+#define set11nRate(_series, _index) \
 	(SM((_series)[_index].Rate, AR_XmitRate##_index))
 
 #define set11nPktDurRTSCTS(_series, _index)				\
@@ -330,11 +327,11 @@
 		AR_RTSCTSQual##_index : 0))
 
 #define set11nRateFlags(_series, _index)				\
-	(((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \
-		AR_2040_##_index : 0) \
-	|((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
-		AR_GI##_index : 0) \
-	|SM((_series)[_index].ChSel, AR_ChainSel##_index))
+	(((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ?		\
+	  AR_2040_##_index : 0)						\
+	 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ?	\
+	   AR_GI##_index : 0)						\
+	 |SM((_series)[_index].ChSel, AR_ChainSel##_index))
 
 #define AR_SREV_9100(ah) ((ah->ah_macVersion) == AR_SREV_VERSION_9100)
 
@@ -346,9 +343,6 @@
 #define MAX_TX_FIFO_THRESHOLD   ((4096 / 64) - 1)
 #define INIT_TX_FIFO_THRESHOLD  MIN_TX_FIFO_THRESHOLD
 
-#define NUM_CORNER_FIX_BITS_2133    7
-#define CCK_OFDM_GAIN_DELTA         15
-
 struct ar5416AniState {
 	struct ath9k_channel c;
 	u8 noiseImmunityLevel;
@@ -377,11 +371,8 @@
 };
 
 #define HAL_PROCESS_ANI     0x00000001
-#define HAL_RADAR_EN        0x80000000
-#define HAL_AR_EN           0x40000000
-
 #define DO_ANI(ah) \
-    ((AH5416(ah)->ah_procPhyErr & HAL_PROCESS_ANI))
+	((AH5416(ah)->ah_procPhyErr & HAL_PROCESS_ANI))
 
 struct ar5416Stats {
 	u32 ast_ani_niup;
@@ -425,7 +416,6 @@
 #define AR5416_EEP_MINOR_VER_7       0x7
 #define AR5416_EEP_MINOR_VER_9       0x9
 
-#define AR5416_EEP_START_LOC            256
 #define AR5416_NUM_5G_CAL_PIERS         8
 #define AR5416_NUM_2G_CAL_PIERS         4
 #define AR5416_NUM_5G_20_TARGET_POWERS  8
@@ -441,25 +431,10 @@
 #define AR5416_EEPROM_MODAL_SPURS       5
 #define AR5416_MAX_RATE_POWER           63
 #define AR5416_NUM_PDADC_VALUES         128
-#define AR5416_NUM_RATES                16
 #define AR5416_BCHAN_UNUSED             0xFF
 #define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
-#define AR5416_EEPMISC_BIG_ENDIAN       0x01
 #define AR5416_MAX_CHAINS               3
-#define AR5416_ANT_16S                  25
-
-#define AR5416_NUM_ANT_CHAIN_FIELDS     7
-#define AR5416_NUM_ANT_COMMON_FIELDS    4
-#define AR5416_SIZE_ANT_CHAIN_FIELD     3
-#define AR5416_SIZE_ANT_COMMON_FIELD    4
-#define AR5416_ANT_CHAIN_MASK           0x7
-#define AR5416_ANT_COMMON_MASK          0xf
-#define AR5416_CHAIN_0_IDX              0
-#define AR5416_CHAIN_1_IDX              1
-#define AR5416_CHAIN_2_IDX              2
-
 #define AR5416_PWR_TABLE_OFFSET         -5
-#define AR5416_LEGACY_CHAINMASK         1
 
 enum eeprom_param {
 	EEP_NFTHRESH_5,
@@ -633,7 +608,7 @@
 };
 
 #define INIT_INI_ARRAY(iniarray, array, rows, columns) do {	\
-		(iniarray)->ia_array = (u32 *)(array);    \
+		(iniarray)->ia_array = (u32 *)(array);		\
 		(iniarray)->ia_rows = (rows);			\
 		(iniarray)->ia_columns = (columns);		\
 	} while (0)
@@ -641,16 +616,16 @@
 #define INI_RA(iniarray, row, column) \
 	(((iniarray)->ia_array)[(row) *	((iniarray)->ia_columns) + (column)])
 
-#define INIT_CAL(_perCal) do { \
-		(_perCal)->calState = CAL_WAITING; \
-		(_perCal)->calNext = NULL; \
+#define INIT_CAL(_perCal) do {				\
+		(_perCal)->calState = CAL_WAITING;	\
+		(_perCal)->calNext = NULL;		\
 	} while (0)
 
 #define INSERT_CAL(_ahp, _perCal)					\
 	do {								\
 		if ((_ahp)->ah_cal_list_last == NULL) {			\
-			(_ahp)->ah_cal_list = \
-				(_ahp)->ah_cal_list_last = (_perCal); \
+			(_ahp)->ah_cal_list =				\
+				(_ahp)->ah_cal_list_last = (_perCal);	\
 			((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
 		} else {						\
 			((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
@@ -696,25 +671,29 @@
 struct ath_hal_5416 {
 	struct ath_hal ah;
 	struct ar5416_eeprom ah_eeprom;
+	struct ar5416Stats ah_stats;
+	struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES];
+	void __iomem *ah_cal_mem;
+
 	u8 ah_macaddr[ETH_ALEN];
 	u8 ah_bssid[ETH_ALEN];
 	u8 ah_bssidmask[ETH_ALEN];
 	u16 ah_assocId;
+
 	int16_t ah_curchanRadIndex;
 	u32 ah_maskReg;
-	struct ar5416Stats ah_stats;
-	u32 ah_txDescMask;
 	u32 ah_txOkInterruptMask;
 	u32 ah_txErrInterruptMask;
 	u32 ah_txDescInterruptMask;
 	u32 ah_txEolInterruptMask;
 	u32 ah_txUrnInterruptMask;
-	struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES];
-	enum ath9k_power_mode ah_powerMode;
 	bool ah_chipFullSleep;
 	u32 ah_atimWindow;
-	enum ath9k_ant_setting ah_diversityControl;
 	u16 ah_antennaSwitchSwap;
+	enum ath9k_power_mode ah_powerMode;
+	enum ath9k_ant_setting ah_diversityControl;
+
+	/* Calibration */
 	enum hal_cal_types ah_suppCals;
 	struct hal_cal_list ah_iqCalData;
 	struct hal_cal_list ah_adcGainCalData;
@@ -751,16 +730,16 @@
 		int32_t sign[AR5416_MAX_CHAINS];
 	} ah_Meas3;
 	u16 ah_CalSamples;
-	u32 ah_tx6PowerInHalfDbm;
+
 	u32 ah_staId1Defaults;
 	u32 ah_miscMode;
-	bool ah_tpcEnabled;
-	u32 ah_beaconInterval;
 	enum {
 		AUTO_32KHZ,
 		USE_32KHZ,
 		DONT_USE_32KHZ,
 	} ah_enable32kHzClock;
+
+	/* RF */
 	u32 *ah_analogBank0Data;
 	u32 *ah_analogBank1Data;
 	u32 *ah_analogBank2Data;
@@ -770,8 +749,9 @@
 	u32 *ah_analogBank7Data;
 	u32 *ah_addac5416_21;
 	u32 *ah_bank6Temp;
-	u32 ah_ofdmTxPower;
+
 	int16_t ah_txPowerIndexOffset;
+	u32 ah_beaconInterval;
 	u32 ah_slottime;
 	u32 ah_acktimeout;
 	u32 ah_ctstimeout;
@@ -780,7 +760,8 @@
 	u32 ah_gpioSelect;
 	u32 ah_polarity;
 	u32 ah_gpioBit;
-	bool ah_eepEnabled;
+
+	/* ANI */
 	u32 ah_procPhyErr;
 	bool ah_hasHwPhyCounters;
 	u32 ah_aniPeriod;
@@ -790,18 +771,14 @@
 	int ah_coarseHigh[5];
 	int ah_coarseLow[5];
 	int ah_firpwr[5];
-	u16 ah_ratesArray[16];
+	enum ath9k_ani_cmd ah_ani_function;
+
 	u32 ah_intrTxqs;
 	bool ah_intrMitigation;
-	u32 ah_cycleCount;
-	u32 ah_ctlBusy;
-	u32 ah_extBusy;
 	enum ath9k_ht_extprotspacing ah_extprotspacing;
 	u8 ah_txchainmask;
 	u8 ah_rxchainmask;
-	int ah_hwp;
-	void __iomem *ah_cal_mem;
-	enum ath9k_ani_cmd ah_ani_function;
+
 	struct ar5416IniArray ah_iniModes;
 	struct ar5416IniArray ah_iniCommon;
 	struct ar5416IniArray ah_iniBank0;
@@ -820,10 +797,6 @@
 
 #define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
 
-#define IS_5416_EMU(ah)					\
-	((ah->ah_devid == AR5416_DEVID_EMU) ||		\
-	 (ah->ah_devid == AR5416_DEVID_EMU_PCIE))
-
 #define ar5416RfDetach(ah) do {					\
 		if (AH5416(ah)->ah_rfHal.rfDetach != NULL)	\
 			AH5416(ah)->ah_rfHal.rfDetach(ah);	\
@@ -841,8 +814,8 @@
 #define REG_WRITE_ARRAY(iniarray, column, regWr) do {                   \
 		int r;							\
 		for (r = 0; r < ((iniarray)->ia_rows); r++) {		\
-			REG_WRITE(ah, INI_RA((iniarray), (r), 0), \
-				INI_RA((iniarray), r, (column))); \
+			REG_WRITE(ah, INI_RA((iniarray), (r), 0),	\
+				  INI_RA((iniarray), r, (column)));	\
 			DO_DELAY(regWr);				\
 		}							\
 	} while (0)
@@ -852,30 +825,21 @@
 #define COEF_SCALE_S                24
 #define HT40_CHANNEL_CENTER_SHIFT   10
 
-#define ar5416CheckOpMode(_opmode)					\
-	((_opmode == ATH9K_M_STA) || (_opmode == ATH9K_M_IBSS) ||	\
-	 (_opmode == ATH9K_M_HOSTAP) || (_opmode == ATH9K_M_MONITOR))
-
 #define AR5416_EEPROM_MAGIC_OFFSET  0x0
 
 #define AR5416_EEPROM_S             2
 #define AR5416_EEPROM_OFFSET        0x2000
-#define AR5416_EEPROM_START_ADDR			\
+#define AR5416_EEPROM_START_ADDR \
 	(AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
 #define AR5416_EEPROM_MAX           0xae0
-#define ar5416_get_eep_ver(_ahp)				\
+#define ar5416_get_eep_ver(_ahp) \
 	(((_ahp)->ah_eeprom.baseEepHeader.version >> 12) & 0xF)
-#define ar5416_get_eep_rev(_ahp)				\
+#define ar5416_get_eep_rev(_ahp) \
 	(((_ahp)->ah_eeprom.baseEepHeader.version) & 0xFFF)
-#define ar5416_get_ntxchains(_txchainmask)				\
+#define ar5416_get_ntxchains(_txchainmask) \
 	(((_txchainmask >> 2) & 1) + \
 		((_txchainmask >> 1) & 1) + (_txchainmask & 1))
 
-#define IS_EEP_MINOR_V3(_ahp) \
-	(ath9k_hw_get_eeprom((_ahp), EEP_MINOR_REV)  >= AR5416_EEP_MINOR_VER_3)
-
-#define FIXED_CCA_THRESHOLD 15
-
 #ifdef __BIG_ENDIAN
 #define AR5416_EEPROM_MAGIC 0x5aa5
 #else
@@ -910,8 +874,6 @@
 #define AR_GPIOD_MASK                   0x00001FFF
 #define AR_GPIO_BIT(_gpio)              (1 << (_gpio))
 
-#define MAX_ANALOG_START                319
-
 #define HAL_EP_RND(x, mul) \
 	((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
 #define BEACON_RSSI(ahp) \
@@ -923,8 +885,6 @@
 #define AH_TIMEOUT         100000
 #define AH_TIME_QUANTUM        10
 
-#define IS(_c, _f)       (((_c)->channelFlags & _f) || 0)
-
 #define AR_KEYTABLE_SIZE 128
 #define POWER_UP_TIME    200000
 
@@ -964,6 +924,6 @@
 #define OFDM_SYMBOL_TIME_QUARTER    16
 
 u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
-			      enum eeprom_param param);
+			enum eeprom_param param);
 
 #endif
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index acebdf1..2caba44 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -22,8 +22,6 @@
 #define ATH_PCI_VERSION "0.1"
 
 #define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR	13
-#define IEEE80211_ACTION_CAT_HT			7
-#define IEEE80211_ACTION_HT_TXCHWIDTH		0
 
 static char *dev_info = "ath9k";
 
@@ -142,7 +140,7 @@
 	struct ath9k_keyval hk;
 	const u8 *mac = NULL;
 	int ret = 0;
-	enum ieee80211_if_types opmode;
+	enum nl80211_iftype opmode;
 
 	memset(&hk, 0, sizeof(hk));
 
@@ -181,14 +179,14 @@
 	 */
 	if (is_broadcast_ether_addr(addr)) {
 		switch (opmode) {
-		case IEEE80211_IF_TYPE_STA:
+		case NL80211_IFTYPE_STATION:
 			/* default key:  could be group WPA key
 			 * or could be static WEP key */
 			mac = NULL;
 			break;
-		case IEEE80211_IF_TYPE_IBSS:
+		case NL80211_IFTYPE_ADHOC:
 			break;
-		case IEEE80211_IF_TYPE_AP:
+		case NL80211_IFTYPE_AP:
 			break;
 		default:
 			ASSERT(0);
@@ -211,30 +209,25 @@
 
 static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
 {
-#define ATH_MAX_NUM_KEYS 4
 	int freeslot;
 
-	freeslot = (key->keyidx >= ATH_MAX_NUM_KEYS) ? 1 : 0;
+	freeslot = (key->keyidx >= 4) ? 1 : 0;
 	ath_key_reset(sc, key->keyidx, freeslot);
-#undef ATH_MAX_NUM_KEYS
 }
 
 static void setup_ht_cap(struct ieee80211_ht_info *ht_info)
 {
-/* Until mac80211 includes these fields */
-
-#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
-#define	IEEE80211_HT_CAP_MAXRXAMPDU_65536 0x3   /* 2 ^ 16 */
-#define	IEEE80211_HT_CAP_MPDUDENSITY_8 0x6     	/* 8 usec */
+#define	ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3	/* 2 ^ 16 */
+#define	ATH9K_HT_CAP_MPDUDENSITY_8 0x6		/* 8 usec */
 
 	ht_info->ht_supported = 1;
 	ht_info->cap = (u16)IEEE80211_HT_CAP_SUP_WIDTH
-			|(u16)IEEE80211_HT_CAP_MIMO_PS
+			|(u16)IEEE80211_HT_CAP_SM_PS
 			|(u16)IEEE80211_HT_CAP_SGI_40
 			|(u16)IEEE80211_HT_CAP_DSSSCCK40;
 
-	ht_info->ampdu_factor = IEEE80211_HT_CAP_MAXRXAMPDU_65536;
-	ht_info->ampdu_density = IEEE80211_HT_CAP_MPDUDENSITY_8;
+	ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
+	ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
 	/* setup supported mcs set */
 	memset(ht_info->supp_mcs_set, 0, 16);
 	ht_info->supp_mcs_set[0] = 0xff;
@@ -330,460 +323,6 @@
 	}
 }
 
-static int ath9k_start(struct ieee80211_hw *hw)
-{
-	struct ath_softc *sc = hw->priv;
-	struct ieee80211_channel *curchan = hw->conf.channel;
-	int error = 0, pos;
-
-	DPRINTF(sc, ATH_DBG_CONFIG, "%s: Starting driver with "
-		"initial channel: %d MHz\n", __func__, curchan->center_freq);
-
-	/* setup initial channel */
-
-	pos = ath_get_channel(sc, curchan);
-	if (pos == -1) {
-		DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
-		return -EINVAL;
-	}
-
-	sc->sc_ah->ah_channels[pos].chanmode =
-		(curchan->band == IEEE80211_BAND_2GHZ) ? CHANNEL_G : CHANNEL_A;
-
-	/* open ath_dev */
-	error = ath_open(sc, &sc->sc_ah->ah_channels[pos]);
-	if (error) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"%s: Unable to complete ath_open\n", __func__);
-		return error;
-	}
-
-	ieee80211_wake_queues(hw);
-	return 0;
-}
-
-static int ath9k_tx(struct ieee80211_hw *hw,
-		    struct sk_buff *skb)
-{
-	struct ath_softc *sc = hw->priv;
-	int hdrlen, padsize;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
-	/*
-	 * As a temporary workaround, assign seq# here; this will likely need
-	 * to be cleaned up to work better with Beacon transmission and virtual
-	 * BSSes.
-	 */
-	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
-		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
-			sc->seq_no += 0x10;
-		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
-		hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
-	}
-
-	/* Add the padding after the header if this is not already done */
-	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-	if (hdrlen & 3) {
-		padsize = hdrlen % 4;
-		if (skb_headroom(skb) < padsize)
-			return -1;
-		skb_push(skb, padsize);
-		memmove(skb->data, skb->data + padsize, hdrlen);
-	}
-
-	DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting packet, skb: %p\n",
-		__func__,
-		skb);
-
-	if (ath_tx_start(sc, skb) != 0) {
-		DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
-		dev_kfree_skb_any(skb);
-		/* FIXME: Check for proper return value from ATH_DEV */
-		return 0;
-	}
-
-	return 0;
-}
-
-static void ath9k_stop(struct ieee80211_hw *hw)
-{
-	struct ath_softc *sc = hw->priv;
-	int error;
-
-	DPRINTF(sc, ATH_DBG_CONFIG, "%s: Driver halt\n", __func__);
-
-	error = ath_suspend(sc);
-	if (error)
-		DPRINTF(sc, ATH_DBG_CONFIG,
-			"%s: Device is no longer present\n", __func__);
-
-	ieee80211_stop_queues(hw);
-}
-
-static int ath9k_add_interface(struct ieee80211_hw *hw,
-			       struct ieee80211_if_init_conf *conf)
-{
-	struct ath_softc *sc = hw->priv;
-	int error, ic_opmode = 0;
-
-	/* Support only vap for now */
-
-	if (sc->sc_nvaps)
-		return -ENOBUFS;
-
-	switch (conf->type) {
-	case IEEE80211_IF_TYPE_STA:
-		ic_opmode = ATH9K_M_STA;
-		break;
-	case IEEE80211_IF_TYPE_IBSS:
-		ic_opmode = ATH9K_M_IBSS;
-		break;
-	default:
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"%s: Only STA and IBSS are supported currently\n",
-			__func__);
-		return -EOPNOTSUPP;
-	}
-
-	DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a VAP of type: %d\n",
-		__func__,
-		ic_opmode);
-
-	error = ath_vap_attach(sc, 0, conf->vif, ic_opmode);
-	if (error) {
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"%s: Unable to attach vap, error: %d\n",
-			__func__, error);
-		return error;
-	}
-
-	return 0;
-}
-
-static void ath9k_remove_interface(struct ieee80211_hw *hw,
-				   struct ieee80211_if_init_conf *conf)
-{
-	struct ath_softc *sc = hw->priv;
-	struct ath_vap *avp;
-	int error;
-
-	DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach VAP\n", __func__);
-
-	avp = sc->sc_vaps[0];
-	if (avp == NULL) {
-		DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
-			__func__);
-		return;
-	}
-
-#ifdef CONFIG_SLOW_ANT_DIV
-	ath_slow_ant_div_stop(&sc->sc_antdiv);
-#endif
-
-	/* Update ratectrl */
-	ath_rate_newstate(sc, avp);
-
-	/* Reclaim beacon resources */
-	if (sc->sc_opmode == ATH9K_M_HOSTAP || sc->sc_opmode == ATH9K_M_IBSS) {
-		ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
-		ath_beacon_return(sc, avp);
-	}
-
-	/* Set interrupt mask */
-	sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
-	ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask & ~ATH9K_INT_GLOBAL);
-	sc->sc_beacons = 0;
-
-	error = ath_vap_detach(sc, 0);
-	if (error)
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"%s: Unable to detach vap, error: %d\n",
-			__func__, error);
-}
-
-static int ath9k_config(struct ieee80211_hw *hw,
-			struct ieee80211_conf *conf)
-{
-	struct ath_softc *sc = hw->priv;
-	struct ieee80211_channel *curchan = hw->conf.channel;
-	int pos;
-
-	DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
-		__func__,
-		curchan->center_freq);
-
-	pos = ath_get_channel(sc, curchan);
-	if (pos == -1) {
-		DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
-		return -EINVAL;
-	}
-
-	sc->sc_ah->ah_channels[pos].chanmode =
-		(curchan->band == IEEE80211_BAND_2GHZ) ?
-		CHANNEL_G : CHANNEL_A;
-
-	if (sc->sc_curaid && hw->conf.ht_conf.ht_supported)
-		sc->sc_ah->ah_channels[pos].chanmode =
-			ath_get_extchanmode(sc, curchan);
-
-	sc->sc_config.txpowlimit = 2 * conf->power_level;
-
-	/* set h/w channel */
-	if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
-		DPRINTF(sc, ATH_DBG_FATAL, "%s: Unable to set channel\n",
-			__func__);
-
-	return 0;
-}
-
-static int ath9k_config_interface(struct ieee80211_hw *hw,
-				  struct ieee80211_vif *vif,
-				  struct ieee80211_if_conf *conf)
-{
-	struct ath_softc *sc = hw->priv;
-	struct ath_vap *avp;
-	u32 rfilt = 0;
-	int error, i;
-	DECLARE_MAC_BUF(mac);
-
-	avp = sc->sc_vaps[0];
-	if (avp == NULL) {
-		DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	if ((conf->changed & IEEE80211_IFCC_BSSID) &&
-	    !is_zero_ether_addr(conf->bssid)) {
-		switch (vif->type) {
-		case IEEE80211_IF_TYPE_STA:
-		case IEEE80211_IF_TYPE_IBSS:
-			/* Update ratectrl about the new state */
-			ath_rate_newstate(sc, avp);
-
-			/* Set rx filter */
-			rfilt = ath_calcrxfilter(sc);
-			ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
-
-			/* Set BSSID */
-			memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN);
-			sc->sc_curaid = 0;
-			ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
-					       sc->sc_curaid);
-
-			/* Set aggregation protection mode parameters */
-			sc->sc_config.ath_aggr_prot = 0;
-
-			/*
-			 * Reset our TSF so that its value is lower than the
-			 * beacon that we are trying to catch.
-			 * Only then hw will update its TSF register with the
-			 * new beacon. Reset the TSF before setting the BSSID
-			 * to avoid allowing in any frames that would update
-			 * our TSF only to have us clear it
-			 * immediately thereafter.
-			 */
-			ath9k_hw_reset_tsf(sc->sc_ah);
-
-			/* Disable BMISS interrupt when we're not associated */
-			ath9k_hw_set_interrupts(sc->sc_ah,
-					sc->sc_imask &
-					~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
-			sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
-
-			DPRINTF(sc, ATH_DBG_CONFIG,
-				"%s: RX filter 0x%x bssid %s aid 0x%x\n",
-				__func__, rfilt,
-				print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
-
-			/* need to reconfigure the beacon */
-			sc->sc_beacons = 0;
-
-			break;
-		default:
-			break;
-		}
-	}
-
-	if ((conf->changed & IEEE80211_IFCC_BEACON) &&
-	    (vif->type == IEEE80211_IF_TYPE_IBSS)) {
-		/*
-		 * Allocate and setup the beacon frame.
-		 *
-		 * Stop any previous beacon DMA.  This may be
-		 * necessary, for example, when an ibss merge
-		 * causes reconfiguration; we may be called
-		 * with beacon transmission active.
-		 */
-		ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
-
-		error = ath_beacon_alloc(sc, 0);
-		if (error != 0)
-			return error;
-
-		ath_beacon_sync(sc, 0);
-	}
-
-	/* Check for WLAN_CAPABILITY_PRIVACY ? */
-	if ((avp->av_opmode != IEEE80211_IF_TYPE_STA)) {
-		for (i = 0; i < IEEE80211_WEP_NKID; i++)
-			if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
-				ath9k_hw_keysetmac(sc->sc_ah,
-						   (u16)i,
-						   sc->sc_curbssid);
-	}
-
-	/* Only legacy IBSS for now */
-	if (vif->type == IEEE80211_IF_TYPE_IBSS)
-		ath_update_chainmask(sc, 0);
-
-	return 0;
-}
-
-#define SUPPORTED_FILTERS			\
-	(FIF_PROMISC_IN_BSS |			\
-	FIF_ALLMULTI |				\
-	FIF_CONTROL |				\
-	FIF_OTHER_BSS |				\
-	FIF_BCN_PRBRESP_PROMISC |		\
-	FIF_FCSFAIL)
-
-/* Accept unicast, bcast and mcast frames */
-
-static void ath9k_configure_filter(struct ieee80211_hw *hw,
-				   unsigned int changed_flags,
-				   unsigned int *total_flags,
-				   int mc_count,
-				   struct dev_mc_list *mclist)
-{
-	struct ath_softc *sc = hw->priv;
-
-	changed_flags &= SUPPORTED_FILTERS;
-	*total_flags &= SUPPORTED_FILTERS;
-
-	if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
-		if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
-			ath_scan_start(sc);
-		else
-			ath_scan_end(sc);
-	}
-}
-
-static void ath9k_sta_notify(struct ieee80211_hw *hw,
-			     struct ieee80211_vif *vif,
-			     enum sta_notify_cmd cmd,
-			     const u8 *addr)
-{
-	struct ath_softc *sc = hw->priv;
-	struct ath_node *an;
-	unsigned long flags;
-	DECLARE_MAC_BUF(mac);
-
-	spin_lock_irqsave(&sc->node_lock, flags);
-	an = ath_node_find(sc, (u8 *) addr);
-	spin_unlock_irqrestore(&sc->node_lock, flags);
-
-	switch (cmd) {
-	case STA_NOTIFY_ADD:
-		spin_lock_irqsave(&sc->node_lock, flags);
-		if (!an) {
-			ath_node_attach(sc, (u8 *)addr, 0);
-			DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a node: %s\n",
-				__func__,
-				print_mac(mac, addr));
-		} else {
-			ath_node_get(sc, (u8 *)addr);
-		}
-		spin_unlock_irqrestore(&sc->node_lock, flags);
-		break;
-	case STA_NOTIFY_REMOVE:
-		if (!an)
-			DPRINTF(sc, ATH_DBG_FATAL,
-				"%s: Removal of a non-existent node\n",
-				__func__);
-		else {
-			ath_node_put(sc, an, ATH9K_BH_STATUS_INTACT);
-			DPRINTF(sc, ATH_DBG_CONFIG, "%s: Put a node: %s\n",
-				__func__,
-				print_mac(mac, addr));
-		}
-		break;
-	default:
-		break;
-	}
-}
-
-static int ath9k_conf_tx(struct ieee80211_hw *hw,
-			 u16 queue,
-			 const struct ieee80211_tx_queue_params *params)
-{
-	struct ath_softc *sc = hw->priv;
-	struct ath9k_tx_queue_info qi;
-	int ret = 0, qnum;
-
-	if (queue >= WME_NUM_AC)
-		return 0;
-
-	qi.tqi_aifs = params->aifs;
-	qi.tqi_cwmin = params->cw_min;
-	qi.tqi_cwmax = params->cw_max;
-	qi.tqi_burstTime = params->txop;
-	qnum = ath_get_hal_qnum(queue, sc);
-
-	DPRINTF(sc, ATH_DBG_CONFIG,
-		"%s: Configure tx [queue/halq] [%d/%d],  "
-		"aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
-		__func__,
-		queue,
-		qnum,
-		params->aifs,
-		params->cw_min,
-		params->cw_max,
-		params->txop);
-
-	ret = ath_txq_update(sc, qnum, &qi);
-	if (ret)
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"%s: TXQ Update failed\n", __func__);
-
-	return ret;
-}
-
-static int ath9k_set_key(struct ieee80211_hw *hw,
-			 enum set_key_cmd cmd,
-			 const u8 *local_addr,
-			 const u8 *addr,
-			 struct ieee80211_key_conf *key)
-{
-	struct ath_softc *sc = hw->priv;
-	int ret = 0;
-
-	DPRINTF(sc, ATH_DBG_KEYCACHE, " %s: Set HW Key\n", __func__);
-
-	switch (cmd) {
-	case SET_KEY:
-		ret = ath_key_config(sc, addr, key);
-		if (!ret) {
-			set_bit(key->keyidx, sc->sc_keymap);
-			key->hw_key_idx = key->keyidx;
-			/* push IV and Michael MIC generation to stack */
-			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-			if (key->alg == ALG_TKIP)
-				key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-		}
-		break;
-	case DISABLE_KEY:
-		ath_key_delete(sc, key);
-		clear_bit(key->keyidx, sc->sc_keymap);
-		break;
-	default:
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-
 static void ath9k_ht_conf(struct ath_softc *sc,
 			  struct ieee80211_bss_conf *bss_conf)
 {
@@ -844,7 +383,7 @@
 
 		/* Configure the beacon */
 		ath_beacon_config(sc, 0);
-		sc->sc_beacons = 1;
+		sc->sc_flags |= SC_OP_BEACONS;
 
 		/* Reset rssi stats */
 		sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
@@ -895,145 +434,6 @@
 	}
 }
 
-static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
-				   struct ieee80211_vif *vif,
-				   struct ieee80211_bss_conf *bss_conf,
-				   u32 changed)
-{
-	struct ath_softc *sc = hw->priv;
-
-	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
-		DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed PREAMBLE %d\n",
-			__func__,
-			bss_conf->use_short_preamble);
-		if (bss_conf->use_short_preamble)
-			sc->sc_flags |= ATH_PREAMBLE_SHORT;
-		else
-			sc->sc_flags &= ~ATH_PREAMBLE_SHORT;
-	}
-
-	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
-		DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed CTS PROT %d\n",
-			__func__,
-			bss_conf->use_cts_prot);
-		if (bss_conf->use_cts_prot &&
-		    hw->conf.channel->band != IEEE80211_BAND_5GHZ)
-			sc->sc_flags |= ATH_PROTECT_ENABLE;
-		else
-			sc->sc_flags &= ~ATH_PROTECT_ENABLE;
-	}
-
-	if (changed & BSS_CHANGED_HT) {
-		DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed HT %d\n",
-			__func__,
-			bss_conf->assoc_ht);
-		ath9k_ht_conf(sc, bss_conf);
-	}
-
-	if (changed & BSS_CHANGED_ASSOC) {
-		DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed ASSOC %d\n",
-			__func__,
-			bss_conf->assoc);
-		ath9k_bss_assoc_info(sc, bss_conf);
-	}
-}
-
-static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
-{
-	u64 tsf;
-	struct ath_softc *sc = hw->priv;
-	struct ath_hal *ah = sc->sc_ah;
-
-	tsf = ath9k_hw_gettsf64(ah);
-
-	return tsf;
-}
-
-static void ath9k_reset_tsf(struct ieee80211_hw *hw)
-{
-	struct ath_softc *sc = hw->priv;
-	struct ath_hal *ah = sc->sc_ah;
-
-	ath9k_hw_reset_tsf(ah);
-}
-
-static int ath9k_ampdu_action(struct ieee80211_hw *hw,
-		       enum ieee80211_ampdu_mlme_action action,
-		       const u8 *addr,
-		       u16 tid,
-		       u16 *ssn)
-{
-	struct ath_softc *sc = hw->priv;
-	int ret = 0;
-
-	switch (action) {
-	case IEEE80211_AMPDU_RX_START:
-		ret = ath_rx_aggr_start(sc, addr, tid, ssn);
-		if (ret < 0)
-			DPRINTF(sc, ATH_DBG_FATAL,
-				"%s: Unable to start RX aggregation\n",
-				__func__);
-		break;
-	case IEEE80211_AMPDU_RX_STOP:
-		ret = ath_rx_aggr_stop(sc, addr, tid);
-		if (ret < 0)
-			DPRINTF(sc, ATH_DBG_FATAL,
-				"%s: Unable to stop RX aggregation\n",
-				__func__);
-		break;
-	case IEEE80211_AMPDU_TX_START:
-		ret = ath_tx_aggr_start(sc, addr, tid, ssn);
-		if (ret < 0)
-			DPRINTF(sc, ATH_DBG_FATAL,
-				"%s: Unable to start TX aggregation\n",
-				__func__);
-		else
-			ieee80211_start_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid);
-		break;
-	case IEEE80211_AMPDU_TX_STOP:
-		ret = ath_tx_aggr_stop(sc, addr, tid);
-		if (ret < 0)
-			DPRINTF(sc, ATH_DBG_FATAL,
-				"%s: Unable to stop TX aggregation\n",
-				__func__);
-
-		ieee80211_stop_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid);
-		break;
-	default:
-		DPRINTF(sc, ATH_DBG_FATAL,
-			"%s: Unknown AMPDU action\n", __func__);
-	}
-
-	return ret;
-}
-
-static struct ieee80211_ops ath9k_ops = {
-	.tx 		    = ath9k_tx,
-	.start 		    = ath9k_start,
-	.stop 		    = ath9k_stop,
-	.add_interface 	    = ath9k_add_interface,
-	.remove_interface   = ath9k_remove_interface,
-	.config 	    = ath9k_config,
-	.config_interface   = ath9k_config_interface,
-	.configure_filter   = ath9k_configure_filter,
-	.get_stats          = NULL,
-	.sta_notify         = ath9k_sta_notify,
-	.conf_tx 	    = ath9k_conf_tx,
-	.get_tx_stats 	    = NULL,
-	.bss_info_changed   = ath9k_bss_info_changed,
-	.set_tim            = NULL,
-	.set_key            = ath9k_set_key,
-	.hw_scan            = NULL,
-	.get_tkip_seq       = NULL,
-	.set_rts_threshold  = NULL,
-	.set_frag_threshold = NULL,
-	.set_retry_limit    = NULL,
-	.get_tsf 	    = ath9k_get_tsf,
-	.reset_tsf 	    = ath9k_reset_tsf,
-	.tx_last_beacon     = NULL,
-	.ampdu_action       = ath9k_ampdu_action
-};
-
 void ath_get_beaconconfig(struct ath_softc *sc,
 			  int if_id,
 			  struct ath_beacon_config *conf)
@@ -1048,15 +448,6 @@
 	conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
 }
 
-int ath_update_beacon(struct ath_softc *sc,
-		      int if_id,
-		      struct ath_beacon_offset *bo,
-		      struct sk_buff *skb,
-		      int mcast)
-{
-	return 0;
-}
-
 void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 		     struct ath_xmit_status *tx_status, struct ath_node *an)
 {
@@ -1096,7 +487,7 @@
 		ath_node_put(sc, an, ATH9K_BH_STATUS_CHANGE);
 }
 
-int ath__rx_indicate(struct ath_softc *sc,
+int _ath_rx_indicate(struct ath_softc *sc,
 		     struct sk_buff *skb,
 		     struct ath_recv_status *status,
 		     u16 keyix)
@@ -1116,9 +507,6 @@
 		skb_pull(skb, padsize);
 	}
 
-	/* remove FCS before passing up to protocol stack */
-	skb_trim(skb, (skb->len - FCS_LEN));
-
 	/* Prepare rx status */
 	ath9k_rx_prepare(sc, skb, status, &rx_status);
 
@@ -1167,17 +555,338 @@
 	return 0;
 }
 
-enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc)
+/********************************/
+/*	 LED functions		*/
+/********************************/
+
+static void ath_led_brightness(struct led_classdev *led_cdev,
+			       enum led_brightness brightness)
 {
-	return sc->sc_ht_info.tx_chan_width;
+	struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
+	struct ath_softc *sc = led->sc;
+
+	switch (brightness) {
+	case LED_OFF:
+		if (led->led_type == ATH_LED_ASSOC ||
+		    led->led_type == ATH_LED_RADIO)
+			sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
+		ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
+				(led->led_type == ATH_LED_RADIO) ? 1 :
+				!!(sc->sc_flags & SC_OP_LED_ASSOCIATED));
+		break;
+	case LED_FULL:
+		if (led->led_type == ATH_LED_ASSOC)
+			sc->sc_flags |= SC_OP_LED_ASSOCIATED;
+		ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
+		break;
+	default:
+		break;
+	}
 }
 
+static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
+			    char *trigger)
+{
+	int ret;
+
+	led->sc = sc;
+	led->led_cdev.name = led->name;
+	led->led_cdev.default_trigger = trigger;
+	led->led_cdev.brightness_set = ath_led_brightness;
+
+	ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
+	if (ret)
+		DPRINTF(sc, ATH_DBG_FATAL,
+			"Failed to register led:%s", led->name);
+	else
+		led->registered = 1;
+	return ret;
+}
+
+static void ath_unregister_led(struct ath_led *led)
+{
+	if (led->registered) {
+		led_classdev_unregister(&led->led_cdev);
+		led->registered = 0;
+	}
+}
+
+static void ath_deinit_leds(struct ath_softc *sc)
+{
+	ath_unregister_led(&sc->assoc_led);
+	sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
+	ath_unregister_led(&sc->tx_led);
+	ath_unregister_led(&sc->rx_led);
+	ath_unregister_led(&sc->radio_led);
+	ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
+}
+
+static void ath_init_leds(struct ath_softc *sc)
+{
+	char *trigger;
+	int ret;
+
+	/* Configure gpio 1 for output */
+	ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
+			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+	/* LED off, active low */
+	ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
+
+	trigger = ieee80211_get_radio_led_name(sc->hw);
+	snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
+		"ath9k-%s:radio", wiphy_name(sc->hw->wiphy));
+	ret = ath_register_led(sc, &sc->radio_led, trigger);
+	sc->radio_led.led_type = ATH_LED_RADIO;
+	if (ret)
+		goto fail;
+
+	trigger = ieee80211_get_assoc_led_name(sc->hw);
+	snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
+		"ath9k-%s:assoc", wiphy_name(sc->hw->wiphy));
+	ret = ath_register_led(sc, &sc->assoc_led, trigger);
+	sc->assoc_led.led_type = ATH_LED_ASSOC;
+	if (ret)
+		goto fail;
+
+	trigger = ieee80211_get_tx_led_name(sc->hw);
+	snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
+		"ath9k-%s:tx", wiphy_name(sc->hw->wiphy));
+	ret = ath_register_led(sc, &sc->tx_led, trigger);
+	sc->tx_led.led_type = ATH_LED_TX;
+	if (ret)
+		goto fail;
+
+	trigger = ieee80211_get_rx_led_name(sc->hw);
+	snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
+		"ath9k-%s:rx", wiphy_name(sc->hw->wiphy));
+	ret = ath_register_led(sc, &sc->rx_led, trigger);
+	sc->rx_led.led_type = ATH_LED_RX;
+	if (ret)
+		goto fail;
+
+	return;
+
+fail:
+	ath_deinit_leds(sc);
+}
+
+#ifdef CONFIG_RFKILL
+/*******************/
+/*	Rfkill	   */
+/*******************/
+
+static void ath_radio_enable(struct ath_softc *sc)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	int status;
+
+	spin_lock_bh(&sc->sc_resetlock);
+	if (!ath9k_hw_reset(ah, ah->ah_curchan,
+			    sc->sc_ht_info.tx_chan_width,
+			    sc->sc_tx_chainmask,
+			    sc->sc_rx_chainmask,
+			    sc->sc_ht_extprotspacing,
+			    false, &status)) {
+		DPRINTF(sc, ATH_DBG_FATAL,
+			"%s: unable to reset channel %u (%uMhz) "
+			"flags 0x%x hal status %u\n", __func__,
+			ath9k_hw_mhz2ieee(ah,
+					  ah->ah_curchan->channel,
+					  ah->ah_curchan->channelFlags),
+			ah->ah_curchan->channel,
+			ah->ah_curchan->channelFlags, status);
+	}
+	spin_unlock_bh(&sc->sc_resetlock);
+
+	ath_update_txpow(sc);
+	if (ath_startrecv(sc) != 0) {
+		DPRINTF(sc, ATH_DBG_FATAL,
+			"%s: unable to restart recv logic\n", __func__);
+		return;
+	}
+
+	if (sc->sc_flags & SC_OP_BEACONS)
+		ath_beacon_config(sc, ATH_IF_ID_ANY);	/* restart beacons */
+
+	/* Re-Enable  interrupts */
+	ath9k_hw_set_interrupts(ah, sc->sc_imask);
+
+	/* Enable LED */
+	ath9k_hw_cfg_output(ah, ATH_LED_PIN,
+			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+	ath9k_hw_set_gpio(ah, ATH_LED_PIN, 0);
+
+	ieee80211_wake_queues(sc->hw);
+}
+
+static void ath_radio_disable(struct ath_softc *sc)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	int status;
+
+
+	ieee80211_stop_queues(sc->hw);
+
+	/* Disable LED */
+	ath9k_hw_set_gpio(ah, ATH_LED_PIN, 1);
+	ath9k_hw_cfg_gpio_input(ah, ATH_LED_PIN);
+
+	/* Disable interrupts */
+	ath9k_hw_set_interrupts(ah, 0);
+
+	ath_draintxq(sc, false);	/* clear pending tx frames */
+	ath_stoprecv(sc);		/* turn off frame recv */
+	ath_flushrecv(sc);		/* flush recv queue */
+
+	spin_lock_bh(&sc->sc_resetlock);
+	if (!ath9k_hw_reset(ah, ah->ah_curchan,
+			    sc->sc_ht_info.tx_chan_width,
+			    sc->sc_tx_chainmask,
+			    sc->sc_rx_chainmask,
+			    sc->sc_ht_extprotspacing,
+			    false, &status)) {
+		DPRINTF(sc, ATH_DBG_FATAL,
+			"%s: unable to reset channel %u (%uMhz) "
+			"flags 0x%x hal status %u\n", __func__,
+			ath9k_hw_mhz2ieee(ah,
+				ah->ah_curchan->channel,
+				ah->ah_curchan->channelFlags),
+			ah->ah_curchan->channel,
+			ah->ah_curchan->channelFlags, status);
+	}
+	spin_unlock_bh(&sc->sc_resetlock);
+
+	ath9k_hw_phy_disable(ah);
+	ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
+}
+
+static bool ath_is_rfkill_set(struct ath_softc *sc)
+{
+	struct ath_hal *ah = sc->sc_ah;
+
+	return ath9k_hw_gpio_get(ah, ah->ah_rfkill_gpio) ==
+				  ah->ah_rfkill_polarity;
+}
+
+/* h/w rfkill poll function */
+static void ath_rfkill_poll(struct work_struct *work)
+{
+	struct ath_softc *sc = container_of(work, struct ath_softc,
+					    rf_kill.rfkill_poll.work);
+	bool radio_on;
+
+	if (sc->sc_flags & SC_OP_INVALID)
+		return;
+
+	radio_on = !ath_is_rfkill_set(sc);
+
+	/*
+	 * enable/disable radio only when there is a
+	 * state change in RF switch
+	 */
+	if (radio_on == !!(sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED)) {
+		enum rfkill_state state;
+
+		if (sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED) {
+			state = radio_on ? RFKILL_STATE_SOFT_BLOCKED
+				: RFKILL_STATE_HARD_BLOCKED;
+		} else if (radio_on) {
+			ath_radio_enable(sc);
+			state = RFKILL_STATE_UNBLOCKED;
+		} else {
+			ath_radio_disable(sc);
+			state = RFKILL_STATE_HARD_BLOCKED;
+		}
+
+		if (state == RFKILL_STATE_HARD_BLOCKED)
+			sc->sc_flags |= SC_OP_RFKILL_HW_BLOCKED;
+		else
+			sc->sc_flags &= ~SC_OP_RFKILL_HW_BLOCKED;
+
+		rfkill_force_state(sc->rf_kill.rfkill, state);
+	}
+
+	queue_delayed_work(sc->hw->workqueue, &sc->rf_kill.rfkill_poll,
+			   msecs_to_jiffies(ATH_RFKILL_POLL_INTERVAL));
+}
+
+/* s/w rfkill handler */
+static int ath_sw_toggle_radio(void *data, enum rfkill_state state)
+{
+	struct ath_softc *sc = data;
+
+	switch (state) {
+	case RFKILL_STATE_SOFT_BLOCKED:
+		if (!(sc->sc_flags & (SC_OP_RFKILL_HW_BLOCKED |
+		    SC_OP_RFKILL_SW_BLOCKED)))
+			ath_radio_disable(sc);
+		sc->sc_flags |= SC_OP_RFKILL_SW_BLOCKED;
+		return 0;
+	case RFKILL_STATE_UNBLOCKED:
+		if ((sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED)) {
+			sc->sc_flags &= ~SC_OP_RFKILL_SW_BLOCKED;
+			if (sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED) {
+				DPRINTF(sc, ATH_DBG_FATAL, "Can't turn on the"
+					"radio as it is disabled by h/w \n");
+				return -EPERM;
+			}
+			ath_radio_enable(sc);
+		}
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+/* Init s/w rfkill */
+static int ath_init_sw_rfkill(struct ath_softc *sc)
+{
+	sc->rf_kill.rfkill = rfkill_allocate(wiphy_dev(sc->hw->wiphy),
+					     RFKILL_TYPE_WLAN);
+	if (!sc->rf_kill.rfkill) {
+		DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n");
+		return -ENOMEM;
+	}
+
+	snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
+		"ath9k-%s:rfkill", wiphy_name(sc->hw->wiphy));
+	sc->rf_kill.rfkill->name = sc->rf_kill.rfkill_name;
+	sc->rf_kill.rfkill->data = sc;
+	sc->rf_kill.rfkill->toggle_radio = ath_sw_toggle_radio;
+	sc->rf_kill.rfkill->state = RFKILL_STATE_UNBLOCKED;
+	sc->rf_kill.rfkill->user_claim_unsupported = 1;
+
+	return 0;
+}
+
+/* Deinitialize rfkill */
+static void ath_deinit_rfkill(struct ath_softc *sc)
+{
+	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+		cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
+
+	if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) {
+		rfkill_unregister(sc->rf_kill.rfkill);
+		sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED;
+		sc->rf_kill.rfkill = NULL;
+	}
+}
+#endif /* CONFIG_RFKILL */
+
 static int ath_detach(struct ath_softc *sc)
 {
 	struct ieee80211_hw *hw = sc->hw;
 
 	DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach ATH hw\n", __func__);
 
+	/* Deinit LED control */
+	ath_deinit_leds(sc);
+
+#ifdef CONFIG_RFKILL
+	/* deinit rfkill */
+	ath_deinit_rfkill(sc);
+#endif
+
 	/* Unregister hw */
 
 	ieee80211_unregister_hw(hw);
@@ -1271,23 +980,677 @@
 		goto bad;
 	}
 
+	/* Initialize LED control */
+	ath_init_leds(sc);
+
+#ifdef CONFIG_RFKILL
+	/* Initialze h/w Rfkill */
+	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+		INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll);
+
+	/* Initialize s/w rfkill */
+	if (ath_init_sw_rfkill(sc))
+		goto detach;
+#endif
+
 	/* initialize tx/rx engine */
 
 	error = ath_tx_init(sc, ATH_TXBUF);
 	if (error != 0)
-		goto bad1;
+		goto detach;
 
 	error = ath_rx_init(sc, ATH_RXBUF);
 	if (error != 0)
-		goto bad1;
+		goto detach;
 
 	return 0;
-bad1:
+detach:
 	ath_detach(sc);
 bad:
 	return error;
 }
 
+static int ath9k_start(struct ieee80211_hw *hw)
+{
+	struct ath_softc *sc = hw->priv;
+	struct ieee80211_channel *curchan = hw->conf.channel;
+	int error = 0, pos;
+
+	DPRINTF(sc, ATH_DBG_CONFIG, "%s: Starting driver with "
+		"initial channel: %d MHz\n", __func__, curchan->center_freq);
+
+	/* setup initial channel */
+
+	pos = ath_get_channel(sc, curchan);
+	if (pos == -1) {
+		DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
+		return -EINVAL;
+	}
+
+	sc->sc_ah->ah_channels[pos].chanmode =
+		(curchan->band == IEEE80211_BAND_2GHZ) ? CHANNEL_G : CHANNEL_A;
+
+	/* open ath_dev */
+	error = ath_open(sc, &sc->sc_ah->ah_channels[pos]);
+	if (error) {
+		DPRINTF(sc, ATH_DBG_FATAL,
+			"%s: Unable to complete ath_open\n", __func__);
+		return error;
+	}
+
+#ifdef CONFIG_RFKILL
+	/* Start rfkill polling */
+	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+		queue_delayed_work(sc->hw->workqueue,
+				   &sc->rf_kill.rfkill_poll, 0);
+
+	if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) {
+		if (rfkill_register(sc->rf_kill.rfkill)) {
+			DPRINTF(sc, ATH_DBG_FATAL,
+					"Unable to register rfkill\n");
+			rfkill_free(sc->rf_kill.rfkill);
+
+			/* Deinitialize the device */
+			if (sc->pdev->irq)
+				free_irq(sc->pdev->irq, sc);
+			ath_detach(sc);
+			pci_iounmap(sc->pdev, sc->mem);
+			pci_release_region(sc->pdev, 0);
+			pci_disable_device(sc->pdev);
+			ieee80211_free_hw(hw);
+			return -EIO;
+		} else {
+			sc->sc_flags |= SC_OP_RFKILL_REGISTERED;
+		}
+	}
+#endif
+
+	ieee80211_wake_queues(hw);
+	return 0;
+}
+
+static int ath9k_tx(struct ieee80211_hw *hw,
+		    struct sk_buff *skb)
+{
+	struct ath_softc *sc = hw->priv;
+	int hdrlen, padsize;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	/*
+	 * As a temporary workaround, assign seq# here; this will likely need
+	 * to be cleaned up to work better with Beacon transmission and virtual
+	 * BSSes.
+	 */
+	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+			sc->seq_no += 0x10;
+		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+		hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
+	}
+
+	/* Add the padding after the header if this is not already done */
+	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+	if (hdrlen & 3) {
+		padsize = hdrlen % 4;
+		if (skb_headroom(skb) < padsize)
+			return -1;
+		skb_push(skb, padsize);
+		memmove(skb->data, skb->data + padsize, hdrlen);
+	}
+
+	DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting packet, skb: %p\n",
+		__func__,
+		skb);
+
+	if (ath_tx_start(sc, skb) != 0) {
+		DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
+		dev_kfree_skb_any(skb);
+		/* FIXME: Check for proper return value from ATH_DEV */
+		return 0;
+	}
+
+	return 0;
+}
+
+static void ath9k_stop(struct ieee80211_hw *hw)
+{
+	struct ath_softc *sc = hw->priv;
+	int error;
+
+	DPRINTF(sc, ATH_DBG_CONFIG, "%s: Driver halt\n", __func__);
+
+	error = ath_suspend(sc);
+	if (error)
+		DPRINTF(sc, ATH_DBG_CONFIG,
+			"%s: Device is no longer present\n", __func__);
+
+	ieee80211_stop_queues(hw);
+
+#ifdef CONFIG_RFKILL
+	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+		cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
+#endif
+}
+
+static int ath9k_add_interface(struct ieee80211_hw *hw,
+			       struct ieee80211_if_init_conf *conf)
+{
+	struct ath_softc *sc = hw->priv;
+	int error, ic_opmode = 0;
+
+	/* Support only vap for now */
+
+	if (sc->sc_nvaps)
+		return -ENOBUFS;
+
+	switch (conf->type) {
+	case NL80211_IFTYPE_STATION:
+		ic_opmode = ATH9K_M_STA;
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		ic_opmode = ATH9K_M_IBSS;
+		break;
+	case NL80211_IFTYPE_AP:
+		ic_opmode = ATH9K_M_HOSTAP;
+		break;
+	default:
+		DPRINTF(sc, ATH_DBG_FATAL,
+			"%s: Interface type %d not yet supported\n",
+			__func__, conf->type);
+		return -EOPNOTSUPP;
+	}
+
+	DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a VAP of type: %d\n",
+		__func__,
+		ic_opmode);
+
+	error = ath_vap_attach(sc, 0, conf->vif, ic_opmode);
+	if (error) {
+		DPRINTF(sc, ATH_DBG_FATAL,
+			"%s: Unable to attach vap, error: %d\n",
+			__func__, error);
+		return error;
+	}
+
+	return 0;
+}
+
+static void ath9k_remove_interface(struct ieee80211_hw *hw,
+				   struct ieee80211_if_init_conf *conf)
+{
+	struct ath_softc *sc = hw->priv;
+	struct ath_vap *avp;
+	int error;
+
+	DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach VAP\n", __func__);
+
+	avp = sc->sc_vaps[0];
+	if (avp == NULL) {
+		DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
+			__func__);
+		return;
+	}
+
+#ifdef CONFIG_SLOW_ANT_DIV
+	ath_slow_ant_div_stop(&sc->sc_antdiv);
+#endif
+
+	/* Update ratectrl */
+	ath_rate_newstate(sc, avp);
+
+	/* Reclaim beacon resources */
+	if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP ||
+	    sc->sc_ah->ah_opmode == ATH9K_M_IBSS) {
+		ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
+		ath_beacon_return(sc, avp);
+	}
+
+	/* Set interrupt mask */
+	sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
+	ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask & ~ATH9K_INT_GLOBAL);
+	sc->sc_flags &= ~SC_OP_BEACONS;
+
+	error = ath_vap_detach(sc, 0);
+	if (error)
+		DPRINTF(sc, ATH_DBG_FATAL,
+			"%s: Unable to detach vap, error: %d\n",
+			__func__, error);
+}
+
+static int ath9k_config(struct ieee80211_hw *hw,
+			struct ieee80211_conf *conf)
+{
+	struct ath_softc *sc = hw->priv;
+	struct ieee80211_channel *curchan = hw->conf.channel;
+	int pos;
+
+	DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
+		__func__,
+		curchan->center_freq);
+
+	pos = ath_get_channel(sc, curchan);
+	if (pos == -1) {
+		DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
+		return -EINVAL;
+	}
+
+	sc->sc_ah->ah_channels[pos].chanmode =
+		(curchan->band == IEEE80211_BAND_2GHZ) ?
+		CHANNEL_G : CHANNEL_A;
+
+	if (sc->sc_curaid && hw->conf.ht_conf.ht_supported)
+		sc->sc_ah->ah_channels[pos].chanmode =
+			ath_get_extchanmode(sc, curchan);
+
+	sc->sc_config.txpowlimit = 2 * conf->power_level;
+
+	/* set h/w channel */
+	if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
+		DPRINTF(sc, ATH_DBG_FATAL, "%s: Unable to set channel\n",
+			__func__);
+
+	return 0;
+}
+
+static int ath9k_config_interface(struct ieee80211_hw *hw,
+				  struct ieee80211_vif *vif,
+				  struct ieee80211_if_conf *conf)
+{
+	struct ath_softc *sc = hw->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ath_vap *avp;
+	u32 rfilt = 0;
+	int error, i;
+	DECLARE_MAC_BUF(mac);
+
+	avp = sc->sc_vaps[0];
+	if (avp == NULL) {
+		DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	/* TODO: Need to decide which hw opmode to use for multi-interface
+	 * cases */
+	if (vif->type == NL80211_IFTYPE_AP &&
+	    ah->ah_opmode != ATH9K_M_HOSTAP) {
+		ah->ah_opmode = ATH9K_M_HOSTAP;
+		ath9k_hw_setopmode(ah);
+		ath9k_hw_write_associd(ah, sc->sc_myaddr, 0);
+		/* Request full reset to get hw opmode changed properly */
+		sc->sc_flags |= SC_OP_FULL_RESET;
+	}
+
+	if ((conf->changed & IEEE80211_IFCC_BSSID) &&
+	    !is_zero_ether_addr(conf->bssid)) {
+		switch (vif->type) {
+		case NL80211_IFTYPE_STATION:
+		case NL80211_IFTYPE_ADHOC:
+			/* Update ratectrl about the new state */
+			ath_rate_newstate(sc, avp);
+
+			/* Set BSSID */
+			memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN);
+			sc->sc_curaid = 0;
+			ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
+					       sc->sc_curaid);
+
+			/* Set aggregation protection mode parameters */
+			sc->sc_config.ath_aggr_prot = 0;
+
+			/*
+			 * Reset our TSF so that its value is lower than the
+			 * beacon that we are trying to catch.
+			 * Only then hw will update its TSF register with the
+			 * new beacon. Reset the TSF before setting the BSSID
+			 * to avoid allowing in any frames that would update
+			 * our TSF only to have us clear it
+			 * immediately thereafter.
+			 */
+			ath9k_hw_reset_tsf(sc->sc_ah);
+
+			/* Disable BMISS interrupt when we're not associated */
+			ath9k_hw_set_interrupts(sc->sc_ah,
+					sc->sc_imask &
+					~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
+			sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
+
+			DPRINTF(sc, ATH_DBG_CONFIG,
+				"%s: RX filter 0x%x bssid %s aid 0x%x\n",
+				__func__, rfilt,
+				print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
+
+			/* need to reconfigure the beacon */
+			sc->sc_flags &= ~SC_OP_BEACONS ;
+
+			break;
+		default:
+			break;
+		}
+	}
+
+	if ((conf->changed & IEEE80211_IFCC_BEACON) &&
+	    ((vif->type == NL80211_IFTYPE_ADHOC) ||
+	     (vif->type == NL80211_IFTYPE_AP))) {
+		/*
+		 * Allocate and setup the beacon frame.
+		 *
+		 * Stop any previous beacon DMA.  This may be
+		 * necessary, for example, when an ibss merge
+		 * causes reconfiguration; we may be called
+		 * with beacon transmission active.
+		 */
+		ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
+
+		error = ath_beacon_alloc(sc, 0);
+		if (error != 0)
+			return error;
+
+		ath_beacon_sync(sc, 0);
+	}
+
+	/* Check for WLAN_CAPABILITY_PRIVACY ? */
+	if ((avp->av_opmode != NL80211_IFTYPE_STATION)) {
+		for (i = 0; i < IEEE80211_WEP_NKID; i++)
+			if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
+				ath9k_hw_keysetmac(sc->sc_ah,
+						   (u16)i,
+						   sc->sc_curbssid);
+	}
+
+	/* Only legacy IBSS for now */
+	if (vif->type == NL80211_IFTYPE_ADHOC)
+		ath_update_chainmask(sc, 0);
+
+	return 0;
+}
+
+#define SUPPORTED_FILTERS			\
+	(FIF_PROMISC_IN_BSS |			\
+	FIF_ALLMULTI |				\
+	FIF_CONTROL |				\
+	FIF_OTHER_BSS |				\
+	FIF_BCN_PRBRESP_PROMISC |		\
+	FIF_FCSFAIL)
+
+/* FIXME: sc->sc_full_reset ? */
+static void ath9k_configure_filter(struct ieee80211_hw *hw,
+				   unsigned int changed_flags,
+				   unsigned int *total_flags,
+				   int mc_count,
+				   struct dev_mc_list *mclist)
+{
+	struct ath_softc *sc = hw->priv;
+	u32 rfilt;
+
+	changed_flags &= SUPPORTED_FILTERS;
+	*total_flags &= SUPPORTED_FILTERS;
+
+	sc->rx_filter = *total_flags;
+	rfilt = ath_calcrxfilter(sc);
+	ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
+
+	if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
+		if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
+			ath9k_hw_write_associd(sc->sc_ah, ath_bcast_mac, 0);
+	}
+
+	DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set HW RX filter: 0x%x\n",
+		__func__, sc->rx_filter);
+}
+
+static void ath9k_sta_notify(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif,
+			     enum sta_notify_cmd cmd,
+			     struct ieee80211_sta *sta)
+{
+	struct ath_softc *sc = hw->priv;
+	struct ath_node *an;
+	unsigned long flags;
+	DECLARE_MAC_BUF(mac);
+
+	spin_lock_irqsave(&sc->node_lock, flags);
+	an = ath_node_find(sc, sta->addr);
+	spin_unlock_irqrestore(&sc->node_lock, flags);
+
+	switch (cmd) {
+	case STA_NOTIFY_ADD:
+		spin_lock_irqsave(&sc->node_lock, flags);
+		if (!an) {
+			ath_node_attach(sc, sta->addr, 0);
+			DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a node: %s\n",
+				__func__, print_mac(mac, sta->addr));
+		} else {
+			ath_node_get(sc, sta->addr);
+		}
+		spin_unlock_irqrestore(&sc->node_lock, flags);
+		break;
+	case STA_NOTIFY_REMOVE:
+		if (!an)
+			DPRINTF(sc, ATH_DBG_FATAL,
+				"%s: Removal of a non-existent node\n",
+				__func__);
+		else {
+			ath_node_put(sc, an, ATH9K_BH_STATUS_INTACT);
+			DPRINTF(sc, ATH_DBG_CONFIG, "%s: Put a node: %s\n",
+				__func__,
+				print_mac(mac, sta->addr));
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static int ath9k_conf_tx(struct ieee80211_hw *hw,
+			 u16 queue,
+			 const struct ieee80211_tx_queue_params *params)
+{
+	struct ath_softc *sc = hw->priv;
+	struct ath9k_tx_queue_info qi;
+	int ret = 0, qnum;
+
+	if (queue >= WME_NUM_AC)
+		return 0;
+
+	qi.tqi_aifs = params->aifs;
+	qi.tqi_cwmin = params->cw_min;
+	qi.tqi_cwmax = params->cw_max;
+	qi.tqi_burstTime = params->txop;
+	qnum = ath_get_hal_qnum(queue, sc);
+
+	DPRINTF(sc, ATH_DBG_CONFIG,
+		"%s: Configure tx [queue/halq] [%d/%d],  "
+		"aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+		__func__,
+		queue,
+		qnum,
+		params->aifs,
+		params->cw_min,
+		params->cw_max,
+		params->txop);
+
+	ret = ath_txq_update(sc, qnum, &qi);
+	if (ret)
+		DPRINTF(sc, ATH_DBG_FATAL,
+			"%s: TXQ Update failed\n", __func__);
+
+	return ret;
+}
+
+static int ath9k_set_key(struct ieee80211_hw *hw,
+			 enum set_key_cmd cmd,
+			 const u8 *local_addr,
+			 const u8 *addr,
+			 struct ieee80211_key_conf *key)
+{
+	struct ath_softc *sc = hw->priv;
+	int ret = 0;
+
+	DPRINTF(sc, ATH_DBG_KEYCACHE, " %s: Set HW Key\n", __func__);
+
+	switch (cmd) {
+	case SET_KEY:
+		ret = ath_key_config(sc, addr, key);
+		if (!ret) {
+			set_bit(key->keyidx, sc->sc_keymap);
+			key->hw_key_idx = key->keyidx;
+			/* push IV and Michael MIC generation to stack */
+			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+			if (key->alg == ALG_TKIP)
+				key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+		}
+		break;
+	case DISABLE_KEY:
+		ath_key_delete(sc, key);
+		clear_bit(key->keyidx, sc->sc_keymap);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_bss_conf *bss_conf,
+				   u32 changed)
+{
+	struct ath_softc *sc = hw->priv;
+
+	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+		DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed PREAMBLE %d\n",
+			__func__,
+			bss_conf->use_short_preamble);
+		if (bss_conf->use_short_preamble)
+			sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
+		else
+			sc->sc_flags &= ~SC_OP_PREAMBLE_SHORT;
+	}
+
+	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+		DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed CTS PROT %d\n",
+			__func__,
+			bss_conf->use_cts_prot);
+		if (bss_conf->use_cts_prot &&
+		    hw->conf.channel->band != IEEE80211_BAND_5GHZ)
+			sc->sc_flags |= SC_OP_PROTECT_ENABLE;
+		else
+			sc->sc_flags &= ~SC_OP_PROTECT_ENABLE;
+	}
+
+	if (changed & BSS_CHANGED_HT) {
+		DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed HT %d\n",
+			__func__,
+			bss_conf->assoc_ht);
+		ath9k_ht_conf(sc, bss_conf);
+	}
+
+	if (changed & BSS_CHANGED_ASSOC) {
+		DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed ASSOC %d\n",
+			__func__,
+			bss_conf->assoc);
+		ath9k_bss_assoc_info(sc, bss_conf);
+	}
+}
+
+static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
+{
+	u64 tsf;
+	struct ath_softc *sc = hw->priv;
+	struct ath_hal *ah = sc->sc_ah;
+
+	tsf = ath9k_hw_gettsf64(ah);
+
+	return tsf;
+}
+
+static void ath9k_reset_tsf(struct ieee80211_hw *hw)
+{
+	struct ath_softc *sc = hw->priv;
+	struct ath_hal *ah = sc->sc_ah;
+
+	ath9k_hw_reset_tsf(ah);
+}
+
+static int ath9k_ampdu_action(struct ieee80211_hw *hw,
+		       enum ieee80211_ampdu_mlme_action action,
+		       struct ieee80211_sta *sta,
+		       u16 tid, u16 *ssn)
+{
+	struct ath_softc *sc = hw->priv;
+	int ret = 0;
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		ret = ath_rx_aggr_start(sc, sta->addr, tid, ssn);
+		if (ret < 0)
+			DPRINTF(sc, ATH_DBG_FATAL,
+				"%s: Unable to start RX aggregation\n",
+				__func__);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		ret = ath_rx_aggr_stop(sc, sta->addr, tid);
+		if (ret < 0)
+			DPRINTF(sc, ATH_DBG_FATAL,
+				"%s: Unable to stop RX aggregation\n",
+				__func__);
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		ret = ath_tx_aggr_start(sc, sta->addr, tid, ssn);
+		if (ret < 0)
+			DPRINTF(sc, ATH_DBG_FATAL,
+				"%s: Unable to start TX aggregation\n",
+				__func__);
+		else
+			ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP:
+		ret = ath_tx_aggr_stop(sc, sta->addr, tid);
+		if (ret < 0)
+			DPRINTF(sc, ATH_DBG_FATAL,
+				"%s: Unable to stop TX aggregation\n",
+				__func__);
+
+		ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
+		break;
+	default:
+		DPRINTF(sc, ATH_DBG_FATAL,
+			"%s: Unknown AMPDU action\n", __func__);
+	}
+
+	return ret;
+}
+
+static struct ieee80211_ops ath9k_ops = {
+	.tx 		    = ath9k_tx,
+	.start 		    = ath9k_start,
+	.stop 		    = ath9k_stop,
+	.add_interface 	    = ath9k_add_interface,
+	.remove_interface   = ath9k_remove_interface,
+	.config 	    = ath9k_config,
+	.config_interface   = ath9k_config_interface,
+	.configure_filter   = ath9k_configure_filter,
+	.get_stats          = NULL,
+	.sta_notify         = ath9k_sta_notify,
+	.conf_tx 	    = ath9k_conf_tx,
+	.get_tx_stats 	    = NULL,
+	.bss_info_changed   = ath9k_bss_info_changed,
+	.set_tim            = NULL,
+	.set_key            = ath9k_set_key,
+	.hw_scan            = NULL,
+	.get_tkip_seq       = NULL,
+	.set_rts_threshold  = NULL,
+	.set_frag_threshold = NULL,
+	.set_retry_limit    = NULL,
+	.get_tsf 	    = ath9k_get_tsf,
+	.reset_tsf 	    = ath9k_reset_tsf,
+	.tx_last_beacon     = NULL,
+	.ampdu_action       = ath9k_ampdu_action
+};
+
 static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	void __iomem *mem;
@@ -1361,9 +1724,16 @@
 		goto bad2;
 	}
 
-	hw->flags = IEEE80211_HW_SIGNAL_DBM |
+	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+		IEEE80211_HW_SIGNAL_DBM |
 		IEEE80211_HW_NOISE_DBM;
 
+	hw->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_AP) |
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_ADHOC);
+
 	SET_IEEE80211_DEV(hw, &pdev->dev);
 	pci_set_drvdata(pdev, hw);
 
@@ -1417,7 +1787,7 @@
 		ath9k_hw_set_interrupts(sc->sc_ah, 0);
 		/* clear the ISR */
 		ath9k_hw_getisr(sc->sc_ah, &status);
-		sc->sc_invalid = 1;
+		sc->sc_flags |= SC_OP_INVALID;
 		free_irq(pdev->irq, sc);
 	}
 	ath_detach(sc);
@@ -1432,6 +1802,16 @@
 
 static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 {
+	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+	struct ath_softc *sc = hw->priv;
+
+	ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
+
+#ifdef CONFIG_RFKILL
+	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+		cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
+#endif
+
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
 	pci_set_power_state(pdev, 3);
@@ -1441,6 +1821,8 @@
 
 static int ath_pci_resume(struct pci_dev *pdev)
 {
+	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+	struct ath_softc *sc = hw->priv;
 	u32 val;
 	int err;
 
@@ -1457,6 +1839,21 @@
 	if ((val & 0x0000ff00) != 0)
 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
+	/* Enable LED */
+	ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
+			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+	ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
+
+#ifdef CONFIG_RFKILL
+	/*
+	 * check the h/w rfkill state on resume
+	 * and start the rfkill poll timer
+	 */
+	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+		queue_delayed_work(sc->hw->workqueue,
+				   &sc->rf_kill.rfkill_poll, 0);
+#endif
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h
index 0cd399a..1470234 100644
--- a/drivers/net/wireless/ath9k/phy.h
+++ b/drivers/net/wireless/ath9k/phy.h
@@ -18,19 +18,19 @@
 #define PHY_H
 
 bool ath9k_hw_ar9280_set_channel(struct ath_hal *ah,
-					  struct ath9k_channel
-					  *chan);
+				 struct ath9k_channel
+				 *chan);
 bool ath9k_hw_set_channel(struct ath_hal *ah,
-				   struct ath9k_channel *chan);
+			  struct ath9k_channel *chan);
 void ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex,
 			 u32 freqIndex, int regWrites);
 bool ath9k_hw_set_rf_regs(struct ath_hal *ah,
-				   struct ath9k_channel *chan,
-				   u16 modesIndex);
+			  struct ath9k_channel *chan,
+			  u16 modesIndex);
 void ath9k_hw_decrease_chain_power(struct ath_hal *ah,
 				   struct ath9k_channel *chan);
 bool ath9k_hw_init_rf(struct ath_hal *ah,
-			       int *status);
+		      int *status);
 
 #define AR_PHY_BASE     0x9800
 #define AR_PHY(_n)      (AR_PHY_BASE + ((_n)<<2))
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
index 73c460a..cca2fc5 100644
--- a/drivers/net/wireless/ath9k/rc.c
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -20,6 +20,7 @@
  */
 
 #include "core.h"
+/* FIXME: remove this include! */
 #include "../net/mac80211/rate.h"
 
 static u32 tx_triglevel_max;
@@ -653,8 +654,8 @@
 	rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
 	for (i = 0; i < rate_table->rate_cnt; i++) {
 		valid = (ath_rc_priv->single_stream ?
-				rate_table->info[i].valid_single_stream :
-				rate_table->info[i].valid);
+			 rate_table->info[i].valid_single_stream :
+			 rate_table->info[i].valid);
 		if (valid == TRUE) {
 			u32 phy = rate_table->info[i].phy;
 			u8 valid_rate_count = 0;
@@ -740,14 +741,14 @@
 		for (j = 0; j < rate_table->rate_cnt; j++) {
 			u32 phy = rate_table->info[j].phy;
 			u32 valid = (ath_rc_priv->single_stream ?
-				rate_table->info[j].valid_single_stream :
-				rate_table->info[j].valid);
+				     rate_table->info[j].valid_single_stream :
+				     rate_table->info[j].valid);
 
 			if (((((struct ath_rateset *)
-				mcs_set)->rs_rates[i] & 0x7F) !=
-				(rate_table->info[j].dot11rate & 0x7F)) ||
-					!WLAN_RC_PHY_HT(phy) ||
-					!WLAN_RC_PHY_HT_VALID(valid, capflag))
+			       mcs_set)->rs_rates[i] & 0x7F) !=
+			     (rate_table->info[j].dot11rate & 0x7F)) ||
+			    !WLAN_RC_PHY_HT(phy) ||
+			    !WLAN_RC_PHY_HT_VALID(valid, capflag))
 				continue;
 
 			if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
@@ -847,9 +848,9 @@
 	/* For half and quarter rate channles use different
 	 * rate tables
 	 */
-	if (sc->sc_curchan.channelFlags & CHANNEL_HALF)
+	if (sc->sc_ah->ah_curchan->channelFlags & CHANNEL_HALF)
 		ar5416_sethalf_ratetable(asc);
-	else if (sc->sc_curchan.channelFlags & CHANNEL_QUARTER)
+	else if (sc->sc_ah->ah_curchan->channelFlags & CHANNEL_QUARTER)
 		ar5416_setquarter_ratetable(asc);
 	else /* full rate */
 		ar5416_setfull_ratetable(asc);
@@ -866,10 +867,10 @@
 }
 
 static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
-				   struct ath_rate_node *ath_rc_priv,
-				   const struct ath_rate_table *rate_table,
-				   int probe_allowed, int *is_probing,
-				   int is_retry)
+			     struct ath_rate_node *ath_rc_priv,
+			     const struct ath_rate_table *rate_table,
+			     int probe_allowed, int *is_probing,
+			     int is_retry)
 {
 	u32 dt, best_thruput, this_thruput, now_msec;
 	u8 rate, next_rate, best_rate, maxindex, minindex;
@@ -997,8 +998,8 @@
 		rate = rate_ctrl->rate_table_size - 1;
 
 	ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) ||
-		(rate_table->info[rate].valid_single_stream &&
-			ath_rc_priv->single_stream));
+	       (rate_table->info[rate].valid_single_stream &&
+		ath_rc_priv->single_stream));
 
 	return rate;
 }
@@ -1023,10 +1024,10 @@
 }
 
 static u8 ath_rc_rate_getidx(struct ath_softc *sc,
-				   struct ath_rate_node *ath_rc_priv,
-				   const struct ath_rate_table *rate_table,
-				   u8 rix, u16 stepdown,
-				   u16 min_rate)
+			     struct ath_rate_node *ath_rc_priv,
+			     const struct ath_rate_table *rate_table,
+			     u8 rix, u16 stepdown,
+			     u16 min_rate)
 {
 	u32 j;
 	u8 nextindex;
@@ -1066,8 +1067,8 @@
 	rate_table =
 		(struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
 	rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table,
-				(rcflag & ATH_RC_PROBE_ALLOWED) ? 1 : 0,
-				is_probe, is_retry);
+				 (rcflag & ATH_RC_PROBE_ALLOWED) ? 1 : 0,
+				 is_probe, is_retry);
 	nrix = rix;
 
 	if ((rcflag & ATH_RC_PROBE_ALLOWED) && (*is_probe)) {
@@ -1099,13 +1100,13 @@
 		try_num = ((i + 1) == num_rates) ?
 			num_tries - (try_per_rate * i) : try_per_rate ;
 		min_rate = (((i + 1) == num_rates) &&
-			(rcflag & ATH_RC_MINRATE_LASTRATE)) ? 1 : 0;
+			    (rcflag & ATH_RC_MINRATE_LASTRATE)) ? 1 : 0;
 
 		nrix = ath_rc_rate_getidx(sc, ath_rc_priv,
-			rate_table, nrix, 1, min_rate);
+					  rate_table, nrix, 1, min_rate);
 		/* All other rates in the series have RTS enabled */
 		ath_rc_rate_set_series(rate_table,
-			&series[i], try_num, nrix, TRUE);
+				       &series[i], try_num, nrix, TRUE);
 	}
 
 	/*
@@ -1124,13 +1125,13 @@
 	 * above conditions.
 	 */
 	if ((sc->sc_curmode == ATH9K_MODE_11NG_HT20) ||
-			(sc->sc_curmode == ATH9K_MODE_11NG_HT40PLUS) ||
-			(sc->sc_curmode == ATH9K_MODE_11NG_HT40MINUS)) {
+	    (sc->sc_curmode == ATH9K_MODE_11NG_HT40PLUS) ||
+	    (sc->sc_curmode == ATH9K_MODE_11NG_HT40MINUS)) {
 		u8  dot11rate = rate_table->info[rix].dot11rate;
 		u8 phy = rate_table->info[rix].phy;
 		if (i == 4 &&
 		    ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
-		    (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
+		     (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
 			series[3].rix = series[2].rix;
 			series[3].flags = series[2].flags;
 			series[3].max_4ms_framelen = series[2].max_4ms_framelen;
@@ -1141,18 +1142,19 @@
 /*
  * Return the Tx rate series.
  */
-void ath_rate_findrate(struct ath_softc *sc,
-		       struct ath_rate_node *ath_rc_priv,
-		       int num_tries,
-		       int num_rates,
-		       unsigned int rcflag,
-		       struct ath_rc_series series[],
-		       int *is_probe,
-		       int is_retry)
+static void ath_rate_findrate(struct ath_softc *sc,
+			      struct ath_rate_node *ath_rc_priv,
+			      int num_tries,
+			      int num_rates,
+			      unsigned int rcflag,
+			      struct ath_rc_series series[],
+			      int *is_probe,
+			      int is_retry)
 {
 	struct ath_vap *avp = ath_rc_priv->avp;
 
-	DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
+	DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
+
 	if (!num_rates || !num_tries)
 		return;
 
@@ -1174,9 +1176,8 @@
 			unsigned int    mcs;
 			u8 series_rix = 0;
 
-			series[idx].tries =
-				IEEE80211_RATE_IDX_ENTRY(
-					avp->av_config.av_fixed_retryset, idx);
+			series[idx].tries = IEEE80211_RATE_IDX_ENTRY(
+				avp->av_config.av_fixed_retryset, idx);
 
 			mcs = IEEE80211_RATE_IDX_ENTRY(
 				avp->av_config.av_fixed_rateset, idx);
@@ -1228,7 +1229,7 @@
 	u32 now_msec = jiffies_to_msecs(jiffies);
 	int state_change = FALSE, rate, count;
 	u8 last_per;
-	struct ath_rate_softc  *asc = (struct ath_rate_softc *)sc->sc_rc;
+	struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
 	struct ath_rate_table *rate_table =
 		(struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
 
@@ -1272,14 +1273,14 @@
 		} else {
 			/* xretries == 2 */
 			count = sizeof(nretry_to_per_lookup) /
-					sizeof(nretry_to_per_lookup[0]);
+				sizeof(nretry_to_per_lookup[0]);
 			if (retries >= count)
 				retries = count - 1;
 			/* new_PER = 7/8*old_PER + 1/8*(currentPER) */
 			rate_ctrl->state[tx_rate].per =
 				(u8)(rate_ctrl->state[tx_rate].per -
-				(rate_ctrl->state[tx_rate].per >> 3) +
-				((100) >> 3));
+				     (rate_ctrl->state[tx_rate].per >> 3) +
+				     ((100) >> 3));
 		}
 
 		/* xretries == 1 or 2 */
@@ -1295,8 +1296,7 @@
 		if (retries >= count)
 			retries = count - 1;
 		if (info_priv->n_bad_frames) {
-			/* new_PER = 7/8*old_PER + 1/8*(currentPER)  */
-			/*
+			/* new_PER = 7/8*old_PER + 1/8*(currentPER)
 			 * Assuming that n_frames is not 0.  The current PER
 			 * from the retries is 100 * retries / (retries+1),
 			 * since the first retries attempts failed, and the
@@ -1386,7 +1386,7 @@
 			 * rssi_ack values.
 			 */
 			if (tx_rate == rate_ctrl->rate_max_phy &&
-					rate_ctrl->hw_maxretry_pktcnt < 255) {
+			    rate_ctrl->hw_maxretry_pktcnt < 255) {
 				rate_ctrl->hw_maxretry_pktcnt++;
 			}
 
@@ -1418,7 +1418,7 @@
 					/* Now reduce the current
 					 * rssi threshold. */
 					if ((rssi_ackAvg < rssi_thres + 2) &&
-						(rssi_thres > rssi_ack_vmin)) {
+					    (rssi_thres > rssi_ack_vmin)) {
 						rate_ctrl->state[tx_rate].
 							rssi_thres--;
 					}
@@ -1436,10 +1436,10 @@
 	 * a while (except if we are probing).
 	 */
 	if (rate_ctrl->state[tx_rate].per >= 55 && tx_rate > 0 &&
-			rate_table->info[tx_rate].ratekbps <=
-			rate_table->info[rate_ctrl->rate_max_phy].ratekbps) {
+	    rate_table->info[tx_rate].ratekbps <=
+	    rate_table->info[rate_ctrl->rate_max_phy].ratekbps) {
 		ath_rc_get_nextlowervalid_txrate(rate_table, rate_ctrl,
-				(u8) tx_rate, &rate_ctrl->rate_max_phy);
+				 (u8) tx_rate, &rate_ctrl->rate_max_phy);
 
 		/* Don't probe for a little while. */
 		rate_ctrl->probe_time = now_msec;
@@ -1460,43 +1460,43 @@
 				break;
 
 			if (rate_ctrl->state[rate].rssi_thres +
-				rate_table->info[rate].rssi_ack_deltamin >
-					rate_ctrl->state[rate+1].rssi_thres) {
+			    rate_table->info[rate].rssi_ack_deltamin >
+			    rate_ctrl->state[rate+1].rssi_thres) {
 				rate_ctrl->state[rate+1].rssi_thres =
 					rate_ctrl->state[rate].
-						rssi_thres +
+					rssi_thres +
 					rate_table->info[rate].
-						rssi_ack_deltamin;
+					rssi_ack_deltamin;
 			}
 		}
 
 		/* Make sure the rates below this have lower rssi thresholds. */
 		for (rate = tx_rate - 1; rate >= 0; rate--) {
 			if (rate_table->info[rate].phy !=
-				rate_table->info[tx_rate].phy)
+			    rate_table->info[tx_rate].phy)
 				break;
 
 			if (rate_ctrl->state[rate].rssi_thres +
-				rate_table->info[rate].rssi_ack_deltamin >
-					rate_ctrl->state[rate+1].rssi_thres) {
+			    rate_table->info[rate].rssi_ack_deltamin >
+			    rate_ctrl->state[rate+1].rssi_thres) {
 				if (rate_ctrl->state[rate+1].rssi_thres <
-					rate_table->info[rate].
-					rssi_ack_deltamin)
+				    rate_table->info[rate].
+				    rssi_ack_deltamin)
 					rate_ctrl->state[rate].rssi_thres = 0;
 				else {
 					rate_ctrl->state[rate].rssi_thres =
 						rate_ctrl->state[rate+1].
-							rssi_thres -
-							rate_table->info[rate].
-							rssi_ack_deltamin;
+						rssi_thres -
+						rate_table->info[rate].
+						rssi_ack_deltamin;
 				}
 
 				if (rate_ctrl->state[rate].rssi_thres <
-					rate_table->info[rate].
-						rssi_ack_validmin) {
+				    rate_table->info[rate].
+				    rssi_ack_validmin) {
 					rate_ctrl->state[rate].rssi_thres =
 						rate_table->info[rate].
-							rssi_ack_validmin;
+						rssi_ack_validmin;
 				}
 			}
 		}
@@ -1507,11 +1507,11 @@
 	if (rate_ctrl->state[tx_rate].per < last_per) {
 		for (rate = tx_rate - 1; rate >= 0; rate--) {
 			if (rate_table->info[rate].phy !=
-				rate_table->info[tx_rate].phy)
+			    rate_table->info[tx_rate].phy)
 				break;
 
 			if (rate_ctrl->state[rate].per >
-					rate_ctrl->state[rate+1].per) {
+			    rate_ctrl->state[rate+1].per) {
 				rate_ctrl->state[rate].per =
 					rate_ctrl->state[rate+1].per;
 			}
@@ -1528,11 +1528,11 @@
 	/* Every so often, we reduce the thresholds and
 	 * PER (different for CCK and OFDM). */
 	if (now_msec - rate_ctrl->rssi_down_time >=
-		rate_table->rssi_reduce_interval) {
+	    rate_table->rssi_reduce_interval) {
 
 		for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
 			if (rate_ctrl->state[rate].rssi_thres >
-				rate_table->info[rate].rssi_ack_validmin)
+			    rate_table->info[rate].rssi_ack_validmin)
 				rate_ctrl->state[rate].rssi_thres -= 1;
 		}
 		rate_ctrl->rssi_down_time = now_msec;
@@ -1541,7 +1541,7 @@
 	/* Every so often, we reduce the thresholds
 	 * and PER (different for CCK and OFDM). */
 	if (now_msec - rate_ctrl->per_down_time >=
-		rate_table->rssi_reduce_interval) {
+	    rate_table->rssi_reduce_interval) {
 		for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
 			rate_ctrl->state[rate].per =
 				7 * rate_ctrl->state[rate].per / 8;
@@ -1560,7 +1560,7 @@
 			  struct ath_tx_info_priv *info_priv, int final_ts_idx,
 			  int xretries, int long_retry)
 {
-	struct ath_rate_softc  *asc = (struct ath_rate_softc *)sc->sc_rc;
+	struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
 	struct ath_rate_table *rate_table;
 	struct ath_tx_ratectrl *rate_ctrl;
 	struct ath_rc_series rcs[4];
@@ -1637,7 +1637,6 @@
 		xretries, long_retry);
 }
 
-
 /*
  * Process a tx descriptor for a completed transmit (success or failure).
  */
@@ -1651,13 +1650,13 @@
 	struct ath_vap *avp;
 
 	avp = rc_priv->avp;
-	if ((avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE)
-			|| info_priv->tx.ts_status & ATH9K_TXERR_FILT)
+	if ((avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) ||
+	    (info_priv->tx.ts_status & ATH9K_TXERR_FILT))
 		return;
 
 	if (info_priv->tx.ts_rssi > 0) {
 		ATH_RSSI_LPF(an->an_chainmask_sel.tx_avgrssi,
-				info_priv->tx.ts_rssi);
+			     info_priv->tx.ts_rssi);
 	}
 
 	/*
@@ -1682,7 +1681,6 @@
 		      info_priv->tx.ts_longretry);
 }
 
-
 /*
  *  Update the SIB's rate control information
  *
@@ -1701,8 +1699,8 @@
 	struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
 	struct ath_rateset *rateset = negotiated_rates;
 	u8 *ht_mcs = (u8 *)negotiated_htrates;
-	struct ath_tx_ratectrl *rate_ctrl  = (struct ath_tx_ratectrl *)
-		(ath_rc_priv);
+	struct ath_tx_ratectrl *rate_ctrl =
+		(struct ath_tx_ratectrl *)ath_rc_priv;
 	u8 i, j, k, hi = 0, hthi = 0;
 
 	rate_table = (struct ath_rate_table *)
@@ -1815,19 +1813,18 @@
 }
 
 
-static void ath_setup_rates(struct ieee80211_local *local, struct sta_info *sta)
+static void ath_setup_rates(struct ath_softc *sc,
+			    struct ieee80211_supported_band *sband,
+			    struct ieee80211_sta *sta,
+			    struct ath_rate_node *rc_priv)
 
 {
-	struct ieee80211_supported_band *sband;
-	struct ieee80211_hw *hw = local_to_hw(local);
-	struct ath_softc *sc = hw->priv;
-	struct ath_rate_node *rc_priv = sta->rate_ctrl_priv;
 	int i, j = 0;
 
-	DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
-	sband =  local->hw.wiphy->bands[local->hw.conf.channel->band];
+	DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
+
 	for (i = 0; i < sband->n_bitrates; i++) {
-		if (sta->supp_rates[local->hw.conf.channel->band] & BIT(i)) {
+		if (sta->supp_rates[sband->band] & BIT(i)) {
 			rc_priv->neg_rates.rs_rates[j]
 				= (sband->bitrates[i].bitrate * 2) / 10;
 			j++;
@@ -1854,19 +1851,17 @@
 }
 
 /* Rate Control callbacks */
-static void ath_tx_status(void *priv, struct net_device *dev,
+static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
+			  struct ieee80211_sta *sta, void *priv_sta,
 			  struct sk_buff *skb)
 {
 	struct ath_softc *sc = priv;
 	struct ath_tx_info_priv *tx_info_priv;
 	struct ath_node *an;
-	struct sta_info *sta;
-	struct ieee80211_local *local;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr;
 	__le16 fc;
 
-	local = hw_to_local(sc->hw);
 	hdr = (struct ieee80211_hdr *)skb->data;
 	fc = hdr->frame_control;
 	tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
@@ -1875,8 +1870,7 @@
 	an = ath_node_find(sc, hdr->addr1);
 	spin_unlock_bh(&sc->node_lock);
 
-	sta = sta_info_get(local, hdr->addr1);
-	if (!an || !sta || !ieee80211_is_data(fc)) {
+	if (!an || !priv_sta || !ieee80211_is_data(fc)) {
 		if (tx_info->driver_data[0] != NULL) {
 			kfree(tx_info->driver_data[0]);
 			tx_info->driver_data[0] = NULL;
@@ -1884,37 +1878,40 @@
 		return;
 	}
 	if (tx_info->driver_data[0] != NULL) {
-		ath_rate_tx_complete(sc, an, sta->rate_ctrl_priv, tx_info_priv);
+		ath_rate_tx_complete(sc, an, priv_sta, tx_info_priv);
 		kfree(tx_info->driver_data[0]);
 		tx_info->driver_data[0] = NULL;
 	}
 }
 
 static void ath_tx_aggr_resp(struct ath_softc *sc,
-			     struct sta_info *sta,
+			     struct ieee80211_supported_band *sband,
+			     struct ieee80211_sta *sta,
 			     struct ath_node *an,
 			     u8 tidno)
 {
-	struct ieee80211_hw *hw = sc->hw;
-	struct ieee80211_local *local;
 	struct ath_atx_tid *txtid;
-	struct ieee80211_supported_band *sband;
 	u16 buffersize = 0;
 	int state;
-	DECLARE_MAC_BUF(mac);
+	struct sta_info *si;
 
-	if (!sc->sc_txaggr)
+	if (!(sc->sc_flags & SC_OP_TXAGGR))
 		return;
 
 	txtid = ATH_AN_2_TID(an, tidno);
 	if (!txtid->paused)
 		return;
 
-	local = hw_to_local(sc->hw);
-	sband = hw->wiphy->bands[hw->conf.channel->band];
+	/*
+	 * XXX: This is entirely busted, we aren't supposed to
+	 *	access the sta from here because it's internal
+	 *	to mac80211, and looking at the state without
+	 *	locking is wrong too.
+	 */
+	si = container_of(sta, struct sta_info, sta);
 	buffersize = IEEE80211_MIN_AMPDU_BUF <<
 		sband->ht_info.ampdu_factor; /* FIXME */
-	state = sta->ampdu_mlme.tid_state_tx[tidno];
+	state = si->ampdu_mlme.tid_state_tx[tidno];
 
 	if (state & HT_ADDBA_RECEIVED_MSK) {
 		txtid->addba_exchangecomplete = 1;
@@ -1930,21 +1927,18 @@
 	}
 }
 
-static void ath_get_rate(void *priv, struct net_device *dev,
-			 struct ieee80211_supported_band *sband,
-			 struct sk_buff *skb,
-			 struct rate_selection *sel)
+static void ath_get_rate(void *priv, struct ieee80211_supported_band *sband,
+			 struct ieee80211_sta *sta, void *priv_sta,
+			 struct sk_buff *skb, struct rate_selection *sel)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct sta_info *sta;
-	struct ath_softc *sc = (struct ath_softc *)priv;
+	struct ath_softc *sc = priv;
 	struct ieee80211_hw *hw = sc->hw;
 	struct ath_tx_info_priv *tx_info_priv;
-	struct ath_rate_node *ath_rc_priv;
+	struct ath_rate_node *ath_rc_priv = priv_sta;
 	struct ath_node *an;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-	int is_probe, chk, ret;
+	int is_probe = FALSE, chk, ret;
 	s8 lowest_idx;
 	__le16 fc = hdr->frame_control;
 	u8 *qc, tid;
@@ -1957,18 +1951,15 @@
 	ASSERT(tx_info->driver_data[0] != NULL);
 	tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
 
-	sta = sta_info_get(local, hdr->addr1);
-	lowest_idx = rate_lowest_index(local, sband, sta);
+	lowest_idx = rate_lowest_index(sband, sta);
 	tx_info_priv->min_rate = (sband->bitrates[lowest_idx].bitrate * 2) / 10;
 	/* lowest rate for management and multicast/broadcast frames */
 	if (!ieee80211_is_data(fc) ||
-			is_multicast_ether_addr(hdr->addr1) || !sta) {
+	    is_multicast_ether_addr(hdr->addr1) || !sta) {
 		sel->rate_idx = lowest_idx;
 		return;
 	}
 
-	ath_rc_priv = sta->rate_ctrl_priv;
-
 	/* Find tx rate for unicast frames */
 	ath_rate_findrate(sc, ath_rc_priv,
 			  ATH_11N_TXMAXTRY, 4,
@@ -1977,8 +1968,7 @@
 			  &is_probe,
 			  false);
 	if (is_probe)
-		sel->probe_idx = ((struct ath_tx_ratectrl *)
-			sta->rate_ctrl_priv)->probe_rate;
+		sel->probe_idx = ath_rc_priv->tx_ratectrl.probe_rate;
 
 	/* Ratecontrol sometimes returns invalid rate index */
 	if (tx_info_priv->rcs[0].rix != 0xff)
@@ -2022,38 +2012,31 @@
 						__func__,
 						print_mac(mac, hdr->addr1));
 			} else if (chk == AGGR_EXCHANGE_PROGRESS)
-				ath_tx_aggr_resp(sc, sta, an, tid);
+				ath_tx_aggr_resp(sc, sband, sta, an, tid);
 		}
 	}
 }
 
-static void ath_rate_init(void *priv, void *priv_sta,
-			  struct ieee80211_local *local,
-			  struct sta_info *sta)
+static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
+                          struct ieee80211_sta *sta, void *priv_sta)
 {
-	struct ieee80211_supported_band *sband;
-	struct ieee80211_hw *hw = local_to_hw(local);
-	struct ieee80211_conf *conf = &local->hw.conf;
-	struct ath_softc *sc = hw->priv;
+	struct ath_softc *sc = priv;
+	struct ath_rate_node *ath_rc_priv = priv_sta;
 	int i, j = 0;
 
 	DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-	sta->txrate_idx = rate_lowest_index(local, sband, sta);
-
-	ath_setup_rates(local, sta);
-	if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
+	ath_setup_rates(sc, sband, sta, ath_rc_priv);
+	if (sc->hw->conf.flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
 		for (i = 0; i < MCS_SET_SIZE; i++) {
-			if (conf->ht_conf.supp_mcs_set[i/8] & (1<<(i%8)))
-				((struct ath_rate_node *)
-				priv_sta)->neg_ht_rates.rs_rates[j++] = i;
+			if (sc->hw->conf.ht_conf.supp_mcs_set[i/8] & (1<<(i%8)))
+				ath_rc_priv->neg_ht_rates.rs_rates[j++] = i;
 			if (j == ATH_RATE_MAX)
 				break;
 		}
-		((struct ath_rate_node *)priv_sta)->neg_ht_rates.rs_nrates = j;
+		ath_rc_priv->neg_ht_rates.rs_nrates = j;
 	}
-	ath_rc_node_update(hw, priv_sta);
+	ath_rc_node_update(sc->hw, priv_sta);
 }
 
 static void ath_rate_clear(void *priv)
@@ -2061,13 +2044,12 @@
 	return;
 }
 
-static void *ath_rate_alloc(struct ieee80211_local *local)
+static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
 {
-	struct ieee80211_hw *hw = local_to_hw(local);
 	struct ath_softc *sc = hw->priv;
 
-	DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
-	return local->hw.priv;
+	DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
+	return hw->priv;
 }
 
 static void ath_rate_free(void *priv)
@@ -2075,24 +2057,28 @@
 	return;
 }
 
-static void *ath_rate_alloc_sta(void *priv, gfp_t gfp)
+static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
 {
 	struct ath_softc *sc = priv;
 	struct ath_vap *avp = sc->sc_vaps[0];
 	struct ath_rate_node *rate_priv;
 
-	DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
+	DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
+
 	rate_priv = ath_rate_node_alloc(avp, sc->sc_rc, gfp);
 	if (!rate_priv) {
-		DPRINTF(sc, ATH_DBG_FATAL, "%s:Unable to allocate"
-				"private rate control structure", __func__);
+		DPRINTF(sc, ATH_DBG_FATAL,
+			"%s: Unable to allocate private rc structure\n",
+			__func__);
 		return NULL;
 	}
 	ath_rc_sib_init(rate_priv);
+
 	return rate_priv;
 }
 
-static void ath_rate_free_sta(void *priv, void *priv_sta)
+static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
+			      void *priv_sta)
 {
 	struct ath_rate_node *rate_priv = priv_sta;
 	struct ath_softc *sc = priv;
@@ -2111,7 +2097,7 @@
 	.alloc = ath_rate_alloc,
 	.free = ath_rate_free,
 	.alloc_sta = ath_rate_alloc_sta,
-	.free_sta = ath_rate_free_sta
+	.free_sta = ath_rate_free_sta,
 };
 
 int ath_rate_control_register(void)
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h
index 71aef9c..b95b415 100644
--- a/drivers/net/wireless/ath9k/rc.h
+++ b/drivers/net/wireless/ath9k/rc.h
@@ -71,9 +71,6 @@
  */
 #define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8)))
 
-#define SHORT_PRE 1
-#define LONG_PRE 0
-
 #define WLAN_PHY_HT_20_SS       WLAN_RC_PHY_HT_20_SS
 #define WLAN_PHY_HT_20_DS       WLAN_RC_PHY_HT_20_DS
 #define WLAN_PHY_HT_20_DS_HGI   WLAN_RC_PHY_HT_20_DS_HGI
@@ -102,18 +99,18 @@
 	WLAN_RC_PHY_MAX
 };
 
-#define WLAN_RC_PHY_DS(_phy)   ((_phy == WLAN_RC_PHY_HT_20_DS)           \
-	|| (_phy == WLAN_RC_PHY_HT_40_DS)        \
-	|| (_phy == WLAN_RC_PHY_HT_20_DS_HGI)    \
-	|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
-#define WLAN_RC_PHY_40(_phy)   ((_phy == WLAN_RC_PHY_HT_40_SS)           \
-	|| (_phy == WLAN_RC_PHY_HT_40_DS)        \
-	|| (_phy == WLAN_RC_PHY_HT_40_SS_HGI)    \
-	|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
+#define WLAN_RC_PHY_DS(_phy)   ((_phy == WLAN_RC_PHY_HT_20_DS)		\
+				|| (_phy == WLAN_RC_PHY_HT_40_DS)	\
+				|| (_phy == WLAN_RC_PHY_HT_20_DS_HGI)	\
+				|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
+#define WLAN_RC_PHY_40(_phy)   ((_phy == WLAN_RC_PHY_HT_40_SS)		\
+				|| (_phy == WLAN_RC_PHY_HT_40_DS)	\
+				|| (_phy == WLAN_RC_PHY_HT_40_SS_HGI)	\
+				|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
 #define WLAN_RC_PHY_SGI(_phy)  ((_phy == WLAN_RC_PHY_HT_20_SS_HGI)      \
-	|| (_phy == WLAN_RC_PHY_HT_20_DS_HGI)   \
-	|| (_phy == WLAN_RC_PHY_HT_40_SS_HGI)   \
-	|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
+				|| (_phy == WLAN_RC_PHY_HT_20_DS_HGI)   \
+				|| (_phy == WLAN_RC_PHY_HT_40_SS_HGI)   \
+				|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
 
 #define WLAN_RC_PHY_HT(_phy)    (_phy >= WLAN_RC_PHY_HT_20_SS)
 
@@ -135,56 +132,59 @@
 #define WLAN_RC_SGI_FLAG        (0x04)
 #define WLAN_RC_HT_FLAG         (0x08)
 
-/* Index into the rate table */
-#define INIT_RATE_MAX_20	23
-#define INIT_RATE_MAX_40	40
-
 #define RATE_TABLE_SIZE		64
 
-/* XXX: Convert to kdoc */
+/**
+ * struct ath_rate_table - Rate Control table
+ * @valid: valid for use in rate control
+ * @valid_single_stream: valid for use in rate control for
+ * 	single stream operation
+ * @phy: CCK/OFDM
+ * @ratekbps: rate in Kbits per second
+ * @user_ratekbps: user rate in Kbits per second
+ * @ratecode: rate that goes into HW descriptors
+ * @short_preamble: Mask for enabling short preamble in ratecode for CCK
+ * @dot11rate: value that goes into supported
+ * 	rates info element of MLME
+ * @ctrl_rate: Index of next lower basic rate, used for duration computation
+ * @max_4ms_framelen: maximum frame length(bytes) for tx duration
+ * @probe_interval: interval for rate control to probe for other rates
+ * @rssi_reduce_interval: interval for rate control to reduce rssi
+ * @initial_ratemax: initial ratemax value used in ath_rc_sib_update()
+ */
 struct ath_rate_table {
 	int rate_cnt;
 	struct {
-		int valid;            /* Valid for use in rate control */
-		int valid_single_stream;/* Valid for use in rate control
-					for single stream operation */
-		u8 phy;              /* CCK/OFDM/TURBO/XR */
-		u32 ratekbps;         /* Rate in Kbits per second */
-		u32 user_ratekbps;     /* User rate in KBits per second */
-		u8 ratecode;         /* rate that goes into
-					hw descriptors */
-		u8 short_preamble;    /* Mask for enabling short preamble
-						in rate code for CCK */
-		u8 dot11rate;        /* Value that goes into supported
-					rates info element of MLME */
-		u8 ctrl_rate;      /* Index of next lower basic rate,
-					used for duration computation */
-		int8_t rssi_ack_validmin;  /* Rate control related */
-		int8_t rssi_ack_deltamin;  /* Rate control related */
-		u8 base_index;        /* base rate index */
-		u8 cw40index;        /* 40cap rate index */
-		u8 sgi_index;         /* shortgi rate index */
-		u8 ht_index;          /* shortgi rate index */
-		u32 max_4ms_framelen;   /* Maximum frame length(bytes)
-						for 4ms tx duration */
+		int valid;
+		int valid_single_stream;
+		u8 phy;
+		u32 ratekbps;
+		u32 user_ratekbps;
+		u8 ratecode;
+		u8 short_preamble;
+		u8 dot11rate;
+		u8 ctrl_rate;
+		int8_t rssi_ack_validmin;
+		int8_t rssi_ack_deltamin;
+		u8 base_index;
+		u8 cw40index;
+		u8 sgi_index;
+		u8 ht_index;
+		u32 max_4ms_framelen;
 	} info[RATE_TABLE_SIZE];
-	u32 probe_interval;        /* interval for ratectrl to
-					probe for other rates */
-	u32 rssi_reduce_interval;   /* interval for ratectrl
-						to reduce RSSI */
-	u8 initial_ratemax;   /* the initial ratemax value used
-					in ath_rc_sib_update() */
+	u32 probe_interval;
+	u32 rssi_reduce_interval;
+	u8 initial_ratemax;
 };
 
 #define ATH_RC_PROBE_ALLOWED            0x00000001
 #define ATH_RC_MINRATE_LASTRATE         0x00000002
-#define ATH_RC_SHORT_PREAMBLE           0x00000004
 
 struct ath_rc_series {
-	u8    rix;
-	u8    tries;
-	u8    flags;
-	u32   max_4ms_framelen;
+	u8 rix;
+	u8 tries;
+	u8 flags;
+	u32 max_4ms_framelen;
 };
 
 /* rcs_flags definition */
@@ -201,42 +201,56 @@
 #define MAX_TX_RATE_PHY         48
 
 struct ath_tx_ratectrl_state {
-	int8_t rssi_thres; /* required rssi for this rate (dB) */
-	u8 per; /* recent estimate of packet error rate (%) */
+	int8_t rssi_thres;	/* required rssi for this rate (dB) */
+	u8 per;			/* recent estimate of packet error rate (%) */
 };
 
+/**
+ * struct ath_tx_ratectrl - TX Rate control Information
+ * @state: RC state
+ * @rssi_last: last ACK rssi
+ * @rssi_last_lookup: last ACK rssi used for lookup
+ * @rssi_last_prev: previous last ACK rssi
+ * @rssi_last_prev2: 2nd previous last ACK rssi
+ * @rssi_sum_cnt: count of rssi_sum for averaging
+ * @rssi_sum_rate: rate that we are averaging
+ * @rssi_sum: running sum of rssi for averaging
+ * @probe_rate: rate we are probing at
+ * @rssi_time: msec timestamp for last ack rssi
+ * @rssi_down_time: msec timestamp for last down step
+ * @probe_time: msec timestamp for last probe
+ * @hw_maxretry_pktcnt: num of packets since we got HW max retry error
+ * @max_valid_rate: maximum number of valid rate
+ * @per_down_time: msec timestamp for last PER down step
+ * @valid_phy_ratecnt: valid rate count
+ * @rate_max_phy: phy index for the max rate
+ * @probe_interval: interval for ratectrl to probe for other rates
+ */
 struct ath_tx_ratectrl {
-	struct ath_tx_ratectrl_state state[MAX_TX_RATE_TBL]; /* state */
-	int8_t rssi_last;            /* last ack rssi */
-	int8_t rssi_last_lookup;	/* last ack rssi used for lookup */
-	int8_t rssi_last_prev;	/* previous last ack rssi */
-	int8_t rssi_last_prev2;	/* 2nd previous last ack rssi */
-	int32_t rssi_sum_cnt;        /* count of rssi_sum for averaging */
-	int32_t rssi_sum_rate;       /* rate that we are averaging */
-	int32_t rssi_sum;           /* running sum of rssi for averaging */
-	u32 valid_txrate_mask;   /* mask of valid rates */
-	u8 rate_table_size;      /* rate table size */
-	u8 rate_max;            /* max rate that has recently worked */
-	u8 probe_rate;          /* rate we are probing at */
-	u32 rssi_time;          /* msec timestamp for last ack rssi */
-	u32 rssi_down_time;      /* msec timestamp for last down step */
-	u32 probe_time;         /* msec timestamp for last probe */
-	u8 hw_maxretry_pktcnt;   /* num packets since we got
-					HW max retry error */
-	u8 max_valid_rate;       /* maximum number of valid rate */
-	u8 valid_rate_index[MAX_TX_RATE_TBL]; /* valid rate index */
-	u32 per_down_time;       /* msec timstamp for last
-					PER down step */
+	struct ath_tx_ratectrl_state state[MAX_TX_RATE_TBL];
+	int8_t rssi_last;
+	int8_t rssi_last_lookup;
+	int8_t rssi_last_prev;
+	int8_t rssi_last_prev2;
+	int32_t rssi_sum_cnt;
+	int32_t rssi_sum_rate;
+	int32_t rssi_sum;
+	u8 rate_table_size;
+	u8 probe_rate;
+	u32 rssi_time;
+	u32 rssi_down_time;
+	u32 probe_time;
+	u8 hw_maxretry_pktcnt;
+	u8 max_valid_rate;
+	u8 valid_rate_index[MAX_TX_RATE_TBL];
+	u32 per_down_time;
 
 	/* 11n state */
-	u8  valid_phy_ratecnt[WLAN_RC_PHY_MAX]; /* valid rate count */
-	u8  valid_phy_rateidx[WLAN_RC_PHY_MAX][MAX_TX_RATE_TBL];
-	u8  rc_phy_mode;
-	u8  rate_max_phy;        /* Phy index for the max rate */
-	u32 rate_max_lastused;   /* msec timstamp of when we
-					last used rateMaxPhy */
-	u32 probe_interval;     /* interval for ratectrl to probe
-					for other rates */
+	u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX];
+	u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][MAX_TX_RATE_TBL];
+	u8 rc_phy_mode;
+	u8 rate_max_phy;
+	u32 probe_interval;
 };
 
 struct ath_rateset {
@@ -248,29 +262,32 @@
 struct ath_rate_softc {
 	/* phy tables that contain rate control data */
 	const void *hw_rate_table[ATH9K_MODE_MAX];
-	int fixedrix;	/* -1 or index of fixed rate */
+
+	/* -1 or index of fixed rate */
+	int fixedrix;
 };
 
 /* per-node state */
 struct ath_rate_node {
-	struct ath_tx_ratectrl tx_ratectrl;	/* rate control state proper */
-	u32 prev_data_rix;	/* rate idx of last data frame */
+	struct ath_tx_ratectrl tx_ratectrl;
 
-	/* map of rate ix -> negotiated rate set ix */
-	u8 rixmap[MAX_TX_RATE_TBL];
+	/* rate idx of last data frame */
+	u32 prev_data_rix;
 
-	/* map of ht rate ix -> negotiated rate set ix */
-	u8 ht_rixmap[MAX_TX_RATE_TBL];
+	/* ht capabilities */
+	u8 ht_cap;
 
-	u8 ht_cap;		/* ht capabilities */
-	u8 ant_tx;		/* current transmit antenna */
+	/* When TRUE, only single stream Tx possible */
+	u8 single_stream;
 
-	u8 single_stream;   /* When TRUE, only single
-				stream Tx possible */
-	struct ath_rateset neg_rates;	/* Negotiated rates */
-	struct ath_rateset neg_ht_rates;	/* Negotiated HT rates */
-	struct ath_rate_softc *asc; /* back pointer to atheros softc */
-	struct ath_vap *avp;	/* back pointer to vap */
+	/* Negotiated rates */
+	struct ath_rateset neg_rates;
+
+	/* Negotiated HT rates */
+	struct ath_rateset neg_ht_rates;
+
+	struct ath_rate_softc *asc;
+	struct ath_vap *avp;
 };
 
 /* Driver data of ieee80211_tx_info */
@@ -297,17 +314,10 @@
 void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp);
 
 /*
- * Return the tx rate series.
- */
-void ath_rate_findrate(struct ath_softc *sc, struct ath_rate_node *ath_rc_priv,
-		       int num_tries, int num_rates,
-		       unsigned int rcflag, struct ath_rc_series[],
-		       int *is_probe, int isretry);
-/*
  * Return rate index for given Dot11 Rate.
  */
 u8 ath_rate_findrateix(struct ath_softc *sc,
-			     u8 dot11_rate);
+		       u8 dot11_rate);
 
 /* Routines to register/unregister rate control algorithm */
 int ath_rate_control_register(void);
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 20ddb7a..4982563 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -184,7 +184,7 @@
 		tid = qc[0] & 0xf;
 	}
 
-	if (sc->sc_opmode == ATH9K_M_STA) {
+	if (sc->sc_ah->ah_opmode == ATH9K_M_STA) {
 		/* Drop the frame not belonging to me. */
 		if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) {
 			dev_kfree_skb(skb);
@@ -449,17 +449,16 @@
 	int type;
 
 	/* indicate frame to the stack, which will free the old skb. */
-	type = ath__rx_indicate(sc, skb, status, keyix);
+	type = _ath_rx_indicate(sc, skb, status, keyix);
 
 	/* allocate a new skb and queue it to for H/W processing */
 	nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
 	if (nskb != NULL) {
 		bf->bf_mpdu = nskb;
-		bf->bf_buf_addr = ath_skb_map_single(sc,
-			nskb,
-			PCI_DMA_FROMDEVICE,
-			/* XXX: Remove get_dma_mem_context() */
-			get_dma_mem_context(bf, bf_dmacontext));
+		bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data,
+					 skb_end_pointer(nskb) - nskb->head,
+					 PCI_DMA_FROMDEVICE);
+		bf->bf_dmacontext = bf->bf_buf_addr;
 		ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
 
 		/* queue the new wbuf to H/W */
@@ -505,7 +504,7 @@
 
 	do {
 		spin_lock_init(&sc->sc_rxflushlock);
-		sc->sc_rxflush = 0;
+		sc->sc_flags &= ~SC_OP_RXFLUSH;
 		spin_lock_init(&sc->sc_rxbuflock);
 
 		/*
@@ -542,9 +541,10 @@
 			}
 
 			bf->bf_mpdu = skb;
-			bf->bf_buf_addr =
-				ath_skb_map_single(sc, skb, PCI_DMA_FROMDEVICE,
-				       get_dma_mem_context(bf, bf_dmacontext));
+			bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
+					 skb_end_pointer(skb) - skb->head,
+					 PCI_DMA_FROMDEVICE);
+			bf->bf_dmacontext = bf->bf_buf_addr;
 			ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
 		}
 		sc->sc_rxlink = NULL;
@@ -598,6 +598,7 @@
 u32 ath_calcrxfilter(struct ath_softc *sc)
 {
 #define	RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
+
 	u32 rfilt;
 
 	rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
@@ -605,25 +606,29 @@
 		| ATH9K_RX_FILTER_MCAST;
 
 	/* If not a STA, enable processing of Probe Requests */
-	if (sc->sc_opmode != ATH9K_M_STA)
+	if (sc->sc_ah->ah_opmode != ATH9K_M_STA)
 		rfilt |= ATH9K_RX_FILTER_PROBEREQ;
 
 	/* Can't set HOSTAP into promiscous mode */
-	if (sc->sc_opmode == ATH9K_M_MONITOR) {
+	if (((sc->sc_ah->ah_opmode != ATH9K_M_HOSTAP) &&
+	     (sc->rx_filter & FIF_PROMISC_IN_BSS)) ||
+	    (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR)) {
 		rfilt |= ATH9K_RX_FILTER_PROM;
 		/* ??? To prevent from sending ACK */
 		rfilt &= ~ATH9K_RX_FILTER_UCAST;
 	}
 
-	if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS ||
-	    sc->sc_scanning)
+	if (((sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
+	     (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)) ||
+	    (sc->sc_ah->ah_opmode == ATH9K_M_IBSS))
 		rfilt |= ATH9K_RX_FILTER_BEACON;
 
 	/* If in HOSTAP mode, want to enable reception of PSPOLL frames
 	   & beacon frames */
-	if (sc->sc_opmode == ATH9K_M_HOSTAP)
+	if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP)
 		rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
 	return rfilt;
+
 #undef RX_FILTER_PRESERVE
 }
 
@@ -703,11 +708,11 @@
 	 * progress (see references to sc_rxflush)
 	 */
 	spin_lock_bh(&sc->sc_rxflushlock);
-	sc->sc_rxflush = 1;
+	sc->sc_flags |= SC_OP_RXFLUSH;
 
 	ath_rx_tasklet(sc, 1);
 
-	sc->sc_rxflush = 0;
+	sc->sc_flags &= ~SC_OP_RXFLUSH;
 	spin_unlock_bh(&sc->sc_rxflushlock);
 }
 
@@ -720,7 +725,7 @@
 		 struct ath_recv_status *rx_status,
 		 enum ATH_RX_TYPE *status)
 {
-	if (is_ampdu && sc->sc_rxaggr) {
+	if (is_ampdu && (sc->sc_flags & SC_OP_RXAGGR)) {
 		*status = ATH_RX_CONSUMED;
 		return ath_ampdu_input(sc, an, skb, rx_status);
 	} else {
@@ -751,7 +756,7 @@
 
 	do {
 		/* If handling rx interrupt and flush is in progress => exit */
-		if (sc->sc_rxflush && (flush == 0))
+		if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
 			break;
 
 		spin_lock_bh(&sc->sc_rxbuflock);
@@ -901,7 +906,7 @@
 			 * Enable this if you want to see
 			 * error frames in Monitor mode.
 			 */
-			if (sc->sc_opmode != ATH9K_M_MONITOR)
+			if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR)
 				goto rx_next;
 #endif
 			/* fall thru for monitor mode handling... */
@@ -946,7 +951,7 @@
 			 * decryption and MIC failures. For monitor mode,
 			 * we also ignore the CRC error.
 			 */
-			if (sc->sc_opmode == ATH9K_M_MONITOR) {
+			if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) {
 				if (ds->ds_rxstat.rs_status &
 				    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
 					ATH9K_RXERR_CRC))
@@ -1090,7 +1095,7 @@
 			"%s: Reset rx chain mask. "
 			"Do internal reset\n", __func__);
 		ASSERT(flush == 0);
-		ath_internal_reset(sc);
+		ath_reset(sc, false);
 	}
 
 	return 0;
@@ -1128,7 +1133,7 @@
 	rxtid = &an->an_aggr.rx.tid[tid];
 
 	spin_lock_bh(&rxtid->tidlock);
-	if (sc->sc_rxaggr) {
+	if (sc->sc_flags & SC_OP_RXAGGR) {
 		/* Allow aggregation reception
 		 * Adjust rx BA window size. Peer might indicate a
 		 * zero buffer size for a _dont_care_ condition.
@@ -1228,7 +1233,7 @@
 
 void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
 {
-	if (sc->sc_rxaggr) {
+	if (sc->sc_flags & SC_OP_RXAGGR) {
 		struct ath_arx_tid *rxtid;
 		int tidno;
 
@@ -1260,7 +1265,7 @@
 
 void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
 {
-	if (sc->sc_rxaggr) {
+	if (sc->sc_flags & SC_OP_RXAGGR) {
 		struct ath_arx_tid *rxtid;
 		int tidno, i;
 
@@ -1293,27 +1298,3 @@
 {
 	ath_rx_node_cleanup(sc, an);
 }
-
-dma_addr_t ath_skb_map_single(struct ath_softc *sc,
-			      struct sk_buff *skb,
-			      int direction,
-			      dma_addr_t *pa)
-{
-	/*
-	 * NB: do NOT use skb->len, which is 0 on initialization.
-	 * Use skb's entire data area instead.
-	 */
-	*pa = pci_map_single(sc->pdev, skb->data,
-		skb_end_pointer(skb) - skb->head, direction);
-	return *pa;
-}
-
-void ath_skb_unmap_single(struct ath_softc *sc,
-			  struct sk_buff *skb,
-			  int direction,
-			  dma_addr_t *pa)
-{
-	/* Unmap skb's entire data area */
-	pci_unmap_single(sc->pdev, *pa,
-		skb_end_pointer(skb) - skb->head, direction);
-}
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h
index 42b0890..60617ae 100644
--- a/drivers/net/wireless/ath9k/reg.h
+++ b/drivers/net/wireless/ath9k/reg.h
@@ -899,12 +899,6 @@
 #define AR_GPIO_OUTPUT_MUX2                      0x4064
 #define AR_GPIO_OUTPUT_MUX3                      0x4068
 
-#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT             0
-#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
-#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED     2
-#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED    5
-#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED      6
-
 #define AR_INPUT_STATE                           0x406c
 
 #define AR_EEPROM_STATUS_DATA                    0x407c
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index 8b332e1..2592905 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -60,79 +60,6 @@
 #define IS_HT_RATE(_rate)     ((_rate) & 0x80)
 
 /*
- * Insert a chain of ath_buf (descriptors) on a multicast txq
- * but do NOT start tx DMA on this queue.
- * NB: must be called with txq lock held
- */
-
-static void ath_tx_mcastqaddbuf(struct ath_softc *sc,
-				struct ath_txq *txq,
-				struct list_head *head)
-{
-	struct ath_hal *ah = sc->sc_ah;
-	struct ath_buf *bf;
-
-	if (list_empty(head))
-		return;
-
-	/*
-	 * Insert the frame on the outbound list and
-	 * pass it on to the hardware.
-	 */
-	bf = list_first_entry(head, struct ath_buf, list);
-
-	/*
-	 * The CAB queue is started from the SWBA handler since
-	 * frames only go out on DTIM and to avoid possible races.
-	 */
-	ath9k_hw_set_interrupts(ah, 0);
-
-	/*
-	 * If there is anything in the mcastq, we want to set
-	 * the "more data" bit in the last item in the queue to
-	 * indicate that there is "more data". It makes sense to add
-	 * it here since you are *always* going to have
-	 * more data when adding to this queue, no matter where
-	 * you call from.
-	 */
-
-	if (txq->axq_depth) {
-		struct ath_buf *lbf;
-		struct ieee80211_hdr *hdr;
-
-		/*
-		 * Add the "more data flag" to the last frame
-		 */
-
-		lbf = list_entry(txq->axq_q.prev, struct ath_buf, list);
-		hdr = (struct ieee80211_hdr *)
-			((struct sk_buff *)(lbf->bf_mpdu))->data;
-		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
-	}
-
-	/*
-	 * Now, concat the frame onto the queue
-	 */
-	list_splice_tail_init(head, &txq->axq_q);
-	txq->axq_depth++;
-	txq->axq_totalqueued++;
-	txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
-
-	DPRINTF(sc, ATH_DBG_QUEUE,
-		"%s: txq depth = %d\n", __func__, txq->axq_depth);
-	if (txq->axq_link != NULL) {
-		*txq->axq_link = bf->bf_daddr;
-		DPRINTF(sc, ATH_DBG_XMIT,
-			"%s: link[%u](%p)=%llx (%p)\n",
-			__func__,
-			txq->axq_qnum, txq->axq_link,
-			ito64(bf->bf_daddr), bf->bf_desc);
-	}
-	txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
-	ath9k_hw_set_interrupts(ah, sc->sc_imask);
-}
-
-/*
  * Insert a chain of ath_buf (descriptors) on a txq and
  * assume the descriptors are already chained together by caller.
  * NB: must be called with txq lock held
@@ -277,8 +204,6 @@
 	__le16 fc;
 	u8 *qc;
 
-	memset(txctl, 0, sizeof(struct ath_tx_control));
-
 	txctl->dev = sc;
 	hdr = (struct ieee80211_hdr *)skb->data;
 	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
@@ -302,7 +227,6 @@
 	}
 
 	txctl->if_id = 0;
-	txctl->nextfraglen = 0;
 	txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
 	txctl->txpower = MAX_RATE_POWER; /* FIXME */
 
@@ -329,12 +253,18 @@
 
 	/* Fill qnum */
 
-	txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
-	txq = &sc->sc_txq[txctl->qnum];
+	if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) {
+		txctl->qnum = 0;
+		txq = sc->sc_cabq;
+	} else {
+		txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
+		txq = &sc->sc_txq[txctl->qnum];
+	}
 	spin_lock_bh(&txq->axq_lock);
 
 	/* Try to avoid running out of descriptors */
-	if (txq->axq_depth >= (ATH_TXBUF - 20)) {
+	if (txq->axq_depth >= (ATH_TXBUF - 20) &&
+	    !(txctl->flags & ATH9K_TXDESC_CAB)) {
 		DPRINTF(sc, ATH_DBG_FATAL,
 			"%s: TX queue: %d is full, depth: %d\n",
 			__func__,
@@ -354,7 +284,7 @@
 
 	/* Fill flags */
 
-	txctl->flags = ATH9K_TXDESC_CLRDMASK;    /* needed for crypto errors */
+	txctl->flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
 
 	if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
 		txctl->flags |= ATH9K_TXDESC_NOACK;
@@ -392,7 +322,7 @@
 		 * incremented by the fragmentation routine.
 		 */
 		if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
-			txctl->ht && sc->sc_txaggr) {
+		    txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
 			struct ath_atx_tid *tid;
 
 			tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
@@ -413,50 +343,18 @@
 	}
 	rix = rcs[0].rix;
 
-	/*
-	 * Calculate duration.  This logically belongs in the 802.11
-	 * layer but it lacks sufficient information to calculate it.
-	 */
-	if ((txctl->flags & ATH9K_TXDESC_NOACK) == 0 && !ieee80211_is_ctl(fc)) {
-		u16 dur;
+	if (ieee80211_has_morefrags(fc) ||
+	    (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
 		/*
-		 * XXX not right with fragmentation.
-		 */
-		if (sc->sc_flags & ATH_PREAMBLE_SHORT)
-			dur = rt->info[rix].spAckDuration;
-		else
-			dur = rt->info[rix].lpAckDuration;
-
-		if (le16_to_cpu(hdr->frame_control) &
-				IEEE80211_FCTL_MOREFRAGS) {
-			dur += dur;  /* Add additional 'SIFS + ACK' */
-
-			/*
-			** Compute size of next fragment in order to compute
-			** durations needed to update NAV.
-			** The last fragment uses the ACK duration only.
-			** Add time for next fragment.
-			*/
-			dur += ath9k_hw_computetxtime(sc->sc_ah, rt,
-					txctl->nextfraglen,
-					rix, sc->sc_flags & ATH_PREAMBLE_SHORT);
-		}
-
-		if (ieee80211_has_morefrags(fc) ||
-		     (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
-			/*
-			**  Force hardware to use computed duration for next
-			**  fragment by disabling multi-rate retry, which
-			**  updates duration based on the multi-rate
-			**  duration table.
-			*/
-			rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
-			rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
-			/* reset tries but keep rate index */
-			rcs[0].tries = ATH_TXMAXTRY;
-		}
-
-		hdr->duration_id = cpu_to_le16(dur);
+		**  Force hardware to use computed duration for next
+		**  fragment by disabling multi-rate retry, which
+		**  updates duration based on the multi-rate
+		**  duration table.
+		*/
+		rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
+		rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
+		/* reset tries but keep rate index */
+		rcs[0].tries = ATH_TXMAXTRY;
 	}
 
 	/*
@@ -484,12 +382,8 @@
 	if (is_multicast_ether_addr(hdr->addr1)) {
 		antenna = sc->sc_mcastantenna + 1;
 		sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
-	} else
-		antenna = sc->sc_txantenna;
+	}
 
-#ifdef USE_LEGACY_HAL
-	txctl->antenna = antenna;
-#endif
 	return 0;
 }
 
@@ -502,7 +396,6 @@
 {
 	struct sk_buff *skb = bf->bf_mpdu;
 	struct ath_xmit_status tx_status;
-	dma_addr_t *pa;
 
 	/*
 	 * Set retry information.
@@ -518,13 +411,12 @@
 	if (!txok) {
 		tx_status.flags |= ATH_TX_ERROR;
 
-		if (bf->bf_isxretried)
+		if (bf_isxretried(bf))
 			tx_status.flags |= ATH_TX_XRETRY;
 	}
 	/* Unmap this frame */
-	pa = get_dma_mem_context(bf, bf_dmacontext);
 	pci_unmap_single(sc->pdev,
-			 *pa,
+			 bf->bf_dmacontext,
 			 skb->len,
 			 PCI_DMA_TODEVICE);
 	/* complete this frame */
@@ -629,7 +521,7 @@
 	if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
 		return 0;
 
-	isaggr = bf->bf_isaggr;
+	isaggr = bf_isaggr(bf);
 	if (isaggr) {
 		seq_st = ATH_DS_BA_SEQ(ds);
 		memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
@@ -651,7 +543,7 @@
 	struct sk_buff *skb;
 	struct ieee80211_hdr *hdr;
 
-	bf->bf_isretried = 1;
+	bf->bf_state.bf_type |= BUF_RETRY;
 	bf->bf_retries++;
 
 	skb = bf->bf_mpdu;
@@ -698,7 +590,7 @@
 	u8 rc;
 	int streams, pktlen;
 
-	pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_frmlen;
+	pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
 	rc = rt->info[rix].rateCode;
 
 	/*
@@ -742,7 +634,7 @@
 	int i, flags, rtsctsena = 0, dynamic_mimops = 0;
 	u32 ctsduration = 0;
 	u8 rix = 0, cix, ctsrate = 0;
-	u32 aggr_limit_with_rts = sc->sc_rtsaggrlimit;
+	u32 aggr_limit_with_rts = ah->ah_caps.rts_aggr_limit;
 	struct ath_node *an = (struct ath_node *) bf->bf_node;
 
 	/*
@@ -781,7 +673,7 @@
 	 * let rate series flags determine which rates will actually
 	 * use RTS.
 	 */
-	if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf->bf_isdata) {
+	if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
 		BUG_ON(!an);
 		/*
 		 * 802.11g protection not needed, use our default behavior
@@ -793,7 +685,7 @@
 		 * and the second aggregate should have any protection at all.
 		 */
 		if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
-			if (!bf->bf_aggrburst) {
+			if (!bf_isaggrburst(bf)) {
 				flags = ATH9K_TXDESC_RTSENA;
 				dynamic_mimops = 1;
 			} else {
@@ -806,7 +698,7 @@
 	 * Set protection if aggregate protection on
 	 */
 	if (sc->sc_config.ath_aggr_prot &&
-	    (!bf->bf_isaggr || (bf->bf_isaggr && bf->bf_al < 8192))) {
+	    (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
 		flags = ATH9K_TXDESC_RTSENA;
 		cix = rt->info[sc->sc_protrix].controlRate;
 		rtsctsena = 1;
@@ -815,7 +707,7 @@
 	/*
 	 *  For AR5416 - RTS cannot be followed by a frame larger than 8K.
 	 */
-	if (bf->bf_isaggr && (bf->bf_al > aggr_limit_with_rts)) {
+	if (bf_isaggr(bf) && (bf->bf_al > aggr_limit_with_rts)) {
 		/*
 		 * Ensure that in the case of SM Dynamic power save
 		 * while we are bursting the second aggregate the
@@ -832,7 +724,7 @@
 	/* NB: cix is set above where RTS/CTS is enabled */
 	BUG_ON(cix == 0xff);
 	ctsrate = rt->info[cix].rateCode |
-		(bf->bf_shpreamble ? rt->info[cix].shortPreamble : 0);
+		(bf_isshpreamble(bf) ? rt->info[cix].shortPreamble : 0);
 
 	/*
 	 * Setup HAL rate series
@@ -846,7 +738,7 @@
 		rix = bf->bf_rcs[i].rix;
 
 		series[i].Rate = rt->info[rix].rateCode |
-			(bf->bf_shpreamble ? rt->info[rix].shortPreamble : 0);
+			(bf_isshpreamble(bf) ? rt->info[rix].shortPreamble : 0);
 
 		series[i].Tries = bf->bf_rcs[i].tries;
 
@@ -862,7 +754,7 @@
 			sc, rix, bf,
 			(bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
 			(bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
-			bf->bf_shpreamble);
+			bf_isshpreamble(bf));
 
 		if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
 		    (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
@@ -875,7 +767,7 @@
 			 */
 			series[i].ChSel = sc->sc_tx_chainmask;
 		} else {
-			if (bf->bf_ht)
+			if (bf_isht(bf))
 				series[i].ChSel =
 					ath_chainmask_sel_logic(sc, an);
 			else
@@ -908,7 +800,7 @@
 		 *     use the precalculated ACK durations.
 		 */
 		if (flags & ATH9K_TXDESC_RTSENA) {    /* SIFS + CTS */
-			ctsduration += bf->bf_shpreamble ?
+			ctsduration += bf_isshpreamble(bf) ?
 				rt->info[cix].spAckDuration :
 				rt->info[cix].lpAckDuration;
 		}
@@ -916,7 +808,7 @@
 		ctsduration += series[0].PktDuration;
 
 		if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
-			ctsduration += bf->bf_shpreamble ?
+			ctsduration += bf_isshpreamble(bf) ?
 				rt->info[rix].spAckDuration :
 				rt->info[rix].lpAckDuration;
 		}
@@ -932,10 +824,10 @@
 	 * set dur_update_en for l-sig computation except for PS-Poll frames
 	 */
 	ath9k_hw_set11n_ratescenario(ah, ds, lastds,
-				    !bf->bf_ispspoll,
-				    ctsrate,
-				    ctsduration,
-				    series, 4, flags);
+				     !bf_ispspoll(bf),
+				     ctsrate,
+				     ctsduration,
+				     series, 4, flags);
 	if (sc->sc_config.ath_aggr_prot && flags)
 		ath9k_hw_set11n_burstduration(ah, ds, 8192);
 }
@@ -958,7 +850,7 @@
 	BUG_ON(list_empty(bf_head));
 
 	bf = list_first_entry(bf_head, struct ath_buf, list);
-	bf->bf_isampdu = 0; /* regular HT frame */
+	bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
 
 	skb = (struct sk_buff *)bf->bf_mpdu;
 	tx_info = IEEE80211_SKB_CB(skb);
@@ -998,7 +890,7 @@
 
 	while (!list_empty(&tid->buf_q)) {
 		bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
-		ASSERT(!bf->bf_isretried);
+		ASSERT(!bf_isretried(bf));
 		list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
 		ath_tx_send_normal(sc, txq, tid, &bf_head);
 	}
@@ -1025,7 +917,7 @@
 	int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
 	int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
 
-	isaggr = bf->bf_isaggr;
+	isaggr = bf_isaggr(bf);
 	if (isaggr) {
 		if (txok) {
 			if (ATH_DS_TX_BA(ds)) {
@@ -1047,7 +939,7 @@
 				 * when perform internal reset in this routine.
 				 * Only enable reset in STA mode for now.
 				 */
-				if (sc->sc_opmode == ATH9K_M_STA)
+				if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
 					needreset = 1;
 			}
 		} else {
@@ -1075,7 +967,7 @@
 					ath_tx_set_retry(sc, bf);
 					txpending = 1;
 				} else {
-					bf->bf_isxretried = 1;
+					bf->bf_state.bf_type |= BUF_XRETRY;
 					txfail = 1;
 					sendbar = 1;
 				}
@@ -1175,11 +1067,8 @@
 						tbf->bf_lastfrm->bf_desc);
 
 					/* copy the DMA context */
-					copy_dma_mem_context(
-						get_dma_mem_context(tbf,
-							bf_dmacontext),
-						get_dma_mem_context(bf_last,
-							bf_dmacontext));
+					tbf->bf_dmacontext =
+						bf_last->bf_dmacontext;
 				}
 				list_add_tail(&tbf->list, &bf_head);
 			} else {
@@ -1188,7 +1077,7 @@
 				 * software retry
 				 */
 				ath9k_hw_cleartxdesc(sc->sc_ah,
-					bf->bf_lastfrm->bf_desc);
+						     bf->bf_lastfrm->bf_desc);
 			}
 
 			/*
@@ -1242,7 +1131,7 @@
 	}
 
 	if (needreset)
-		ath_internal_reset(sc);
+		ath_reset(sc, false);
 
 	return;
 }
@@ -1331,7 +1220,7 @@
 
 		txq->axq_depth--;
 
-		if (bf->bf_isaggr)
+		if (bf_isaggr(bf))
 			txq->axq_aggr_depth--;
 
 		txok = (ds->ds_txstat.ts_status == 0);
@@ -1345,14 +1234,14 @@
 			spin_unlock_bh(&sc->sc_txbuflock);
 		}
 
-		if (!bf->bf_isampdu) {
+		if (!bf_isampdu(bf)) {
 			/*
 			 * This frame is sent out as a single frame.
 			 * Use hardware retry status for this frame.
 			 */
 			bf->bf_retries = ds->ds_txstat.ts_longretry;
 			if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
-				bf->bf_isxretried = 1;
+				bf->bf_state.bf_type |= BUF_XRETRY;
 			nbad = 0;
 		} else {
 			nbad = ath_tx_num_badfrms(sc, bf, txok);
@@ -1368,7 +1257,7 @@
 			if (ds->ds_txstat.ts_status == 0)
 				nacked++;
 
-			if (bf->bf_isdata) {
+			if (bf_isdata(bf)) {
 				if (isrifs)
 					tmp_ds = bf->bf_rifslast->bf_desc;
 				else
@@ -1384,7 +1273,7 @@
 		/*
 		 * Complete this transmit unit
 		 */
-		if (bf->bf_isampdu)
+		if (bf_isampdu(bf))
 			ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
 		else
 			ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
@@ -1406,7 +1295,7 @@
 		/*
 		 * schedule any pending packets if aggregation is enabled
 		 */
-		if (sc->sc_txaggr)
+		if (sc->sc_flags & SC_OP_TXAGGR)
 			ath_txq_schedule(sc, txq);
 		spin_unlock_bh(&txq->axq_lock);
 	}
@@ -1430,10 +1319,9 @@
 	struct ath_hal *ah = sc->sc_ah;
 	int i;
 	int npend = 0;
-	enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
 
 	/* XXX return value */
-	if (!sc->sc_invalid) {
+	if (!(sc->sc_flags & SC_OP_INVALID)) {
 		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
 			if (ATH_TXQ_SETUP(sc, i)) {
 				ath_tx_stopdma(sc, &sc->sc_txq[i]);
@@ -1454,10 +1342,11 @@
 			"%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
 
 		spin_lock_bh(&sc->sc_resetlock);
-		if (!ath9k_hw_reset(ah, sc->sc_opmode,
-			&sc->sc_curchan, ht_macmode,
-			sc->sc_tx_chainmask, sc->sc_rx_chainmask,
-			sc->sc_ht_extprotspacing, true, &status)) {
+		if (!ath9k_hw_reset(ah,
+				    sc->sc_ah->ah_curchan,
+				    sc->sc_ht_info.tx_chan_width,
+				    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
+				    sc->sc_ht_extprotspacing, true, &status)) {
 
 			DPRINTF(sc, ATH_DBG_FATAL,
 				"%s: unable to reset hardware; hal status %u\n",
@@ -1481,7 +1370,7 @@
 {
 	int index, cindex;
 
-	if (bf->bf_isretried)
+	if (bf_isretried(bf))
 		return;
 
 	index  = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
@@ -1516,7 +1405,7 @@
 	BUG_ON(list_empty(bf_head));
 
 	bf = list_first_entry(bf_head, struct ath_buf, list);
-	bf->bf_isampdu = 1;
+	bf->bf_state.bf_type |= BUF_AMPDU;
 	bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
 	bf->bf_tidno = txctl->tidno;
 
@@ -1860,7 +1749,7 @@
 		if (bf->bf_nframes == 1) {
 			ASSERT(bf->bf_lastfrm == bf_last);
 
-			bf->bf_isaggr = 0;
+			bf->bf_state.bf_type &= ~BUF_AGGR;
 			/*
 			 * clear aggr bits for every descriptor
 			 * XXX TODO: is there a way to optimize it?
@@ -1877,7 +1766,7 @@
 		/*
 		 * setup first desc with rate and aggr info
 		 */
-		bf->bf_isaggr  = 1;
+		bf->bf_state.bf_type |= BUF_AGGR;
 		ath_buf_set_rate(sc, bf);
 		ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
 
@@ -1925,7 +1814,7 @@
 		list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
 
 		/* update baw for software retried frame */
-		if (bf->bf_isretried)
+		if (bf_isretried(bf))
 			ath_tx_update_baw(sc, tid, bf->bf_seqno);
 
 		/*
@@ -1990,13 +1879,18 @@
 	struct list_head bf_head;
 	struct ath_desc *ds;
 	struct ath_hal *ah = sc->sc_ah;
-	struct ath_txq *txq = &sc->sc_txq[txctl->qnum];
+	struct ath_txq *txq;
 	struct ath_tx_info_priv *tx_info_priv;
 	struct ath_rc_series *rcs;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_tx_info *tx_info =  IEEE80211_SKB_CB(skb);
 	__le16 fc = hdr->frame_control;
 
+	if (unlikely(txctl->flags & ATH9K_TXDESC_CAB))
+		txq = sc->sc_cabq;
+	else
+		txq = &sc->sc_txq[txctl->qnum];
+
 	/* For each sglist entry, allocate an ath_buf for DMA */
 	INIT_LIST_HEAD(&bf_head);
 	spin_lock_bh(&sc->sc_txbuflock);
@@ -2014,11 +1908,21 @@
 	/* set up this buffer */
 	ATH_TXBUF_RESET(bf);
 	bf->bf_frmlen = txctl->frmlen;
-	bf->bf_isdata = ieee80211_is_data(fc);
-	bf->bf_isbar = ieee80211_is_back_req(fc);
-	bf->bf_ispspoll = ieee80211_is_pspoll(fc);
+
+	ieee80211_is_data(fc) ?
+		(bf->bf_state.bf_type |= BUF_DATA) :
+		(bf->bf_state.bf_type &= ~BUF_DATA);
+	ieee80211_is_back_req(fc) ?
+		(bf->bf_state.bf_type |= BUF_BAR) :
+		(bf->bf_state.bf_type &= ~BUF_BAR);
+	ieee80211_is_pspoll(fc) ?
+		(bf->bf_state.bf_type |= BUF_PSPOLL) :
+		(bf->bf_state.bf_type &= ~BUF_PSPOLL);
+	(sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
+		(bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
+		(bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
+
 	bf->bf_flags = txctl->flags;
-	bf->bf_shpreamble = sc->sc_flags & ATH_PREAMBLE_SHORT;
 	bf->bf_keytype = txctl->keytype;
 	tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
 	rcs = tx_info_priv->rcs;
@@ -2038,8 +1942,7 @@
 	/*
 	 * Save the DMA context in the first ath_buf
 	 */
-	copy_dma_mem_context(get_dma_mem_context(bf, bf_dmacontext),
-			     get_dma_mem_context(txctl, dmacontext));
+	bf->bf_dmacontext = txctl->dmacontext;
 
 	/*
 	 * Formulate first tx descriptor with tx controls.
@@ -2060,11 +1963,13 @@
 			    ds);                /* first descriptor */
 
 	bf->bf_lastfrm = bf;
-	bf->bf_ht = txctl->ht;
+	(txctl->ht) ?
+		(bf->bf_state.bf_type |= BUF_HT) :
+		(bf->bf_state.bf_type &= ~BUF_HT);
 
 	spin_lock_bh(&txq->axq_lock);
 
-	if (txctl->ht && sc->sc_txaggr) {
+	if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
 		struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
 		if (ath_aggr_query(sc, an, txctl->tidno)) {
 			/*
@@ -2090,27 +1995,7 @@
 			bf->bf_tidno = txctl->tidno;
 		}
 
-		if (is_multicast_ether_addr(hdr->addr1)) {
-			struct ath_vap *avp = sc->sc_vaps[txctl->if_id];
-
-			/*
-			 * When servicing one or more stations in power-save
-			 * mode (or) if there is some mcast data waiting on
-			 * mcast queue (to prevent out of order delivery of
-			 * mcast,bcast packets) multicast frames must be
-			 * buffered until after the beacon. We use the private
-			 * mcast queue for that.
-			 */
-			/* XXX? more bit in 802.11 frame header */
-			spin_lock_bh(&avp->av_mcastq.axq_lock);
-			if (txctl->ps || avp->av_mcastq.axq_depth)
-				ath_tx_mcastqaddbuf(sc,
-					&avp->av_mcastq, &bf_head);
-			else
-				ath_tx_txqaddbuf(sc, txq, &bf_head);
-			spin_unlock_bh(&avp->av_mcastq.axq_lock);
-		} else
-			ath_tx_txqaddbuf(sc, txq, &bf_head);
+		ath_tx_txqaddbuf(sc, txq, &bf_head);
 	}
 	spin_unlock_bh(&txq->axq_lock);
 	return 0;
@@ -2118,30 +2003,31 @@
 
 static void xmit_map_sg(struct ath_softc *sc,
 			struct sk_buff *skb,
-			dma_addr_t *pa,
 			struct ath_tx_control *txctl)
 {
 	struct ath_xmit_status tx_status;
 	struct ath_atx_tid *tid;
 	struct scatterlist sg;
 
-	*pa = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+	txctl->dmacontext = pci_map_single(sc->pdev, skb->data,
+					   skb->len, PCI_DMA_TODEVICE);
 
 	/* setup S/G list */
 	memset(&sg, 0, sizeof(struct scatterlist));
-	sg_dma_address(&sg) = *pa;
+	sg_dma_address(&sg) = txctl->dmacontext;
 	sg_dma_len(&sg) = skb->len;
 
 	if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
 		/*
 		 *  We have to do drop frame here.
 		 */
-		pci_unmap_single(sc->pdev, *pa, skb->len, PCI_DMA_TODEVICE);
+		pci_unmap_single(sc->pdev, txctl->dmacontext,
+				 skb->len, PCI_DMA_TODEVICE);
 
 		tx_status.retries = 0;
 		tx_status.flags = ATH_TX_ERROR;
 
-		if (txctl->ht && sc->sc_txaggr) {
+		if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
 			/* Reclaim the seqno. */
 			tid = ATH_AN_2_TID((struct ath_node *)
 				txctl->an, txctl->tidno);
@@ -2162,7 +2048,7 @@
 
 		/* Setup tx descriptors */
 		error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
-			"tx", nbufs * ATH_FRAG_PER_MSDU, ATH_TXDESC);
+			"tx", nbufs, 1);
 		if (error != 0) {
 			DPRINTF(sc, ATH_DBG_FATAL,
 				"%s: failed to allocate tx descriptors: %d\n",
@@ -2403,6 +2289,7 @@
 	struct ath_tx_control txctl;
 	int error = 0;
 
+	memset(&txctl, 0, sizeof(struct ath_tx_control));
 	error = ath_tx_prepare(sc, skb, &txctl);
 	if (error == 0)
 		/*
@@ -2410,9 +2297,7 @@
 		 * ath_tx_start_dma() will be called either synchronously
 		 * or asynchrounsly once DMA is complete.
 		 */
-		xmit_map_sg(sc, skb,
-			    get_dma_mem_context(&txctl, dmacontext),
-			    &txctl);
+		xmit_map_sg(sc, skb, &txctl);
 	else
 		ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
 
@@ -2424,8 +2309,7 @@
 
 void ath_tx_tasklet(struct ath_softc *sc)
 {
-	u64 tsf = ath9k_hw_gettsf64(sc->sc_ah);
-	int i, nacked = 0;
+	int i;
 	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
 
 	ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
@@ -2435,10 +2319,8 @@
 	 */
 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
 		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
-			nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
+			ath_tx_processq(sc, &sc->sc_txq[i]);
 	}
-	if (nacked)
-		sc->sc_lastrx = tsf;
 }
 
 void ath_tx_draintxq(struct ath_softc *sc,
@@ -2486,14 +2368,14 @@
 
 		spin_unlock_bh(&txq->axq_lock);
 
-		if (bf->bf_isampdu)
+		if (bf_isampdu(bf))
 			ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
 		else
 			ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
 	}
 
 	/* flush any pending frames if aggregation is enabled */
-	if (sc->sc_txaggr) {
+	if (sc->sc_flags & SC_OP_TXAGGR) {
 		if (!retry_tx) {
 			spin_lock_bh(&txq->axq_lock);
 			ath_txq_drain_pending_buffers(sc, txq,
@@ -2509,7 +2391,7 @@
 {
 	/* stop beacon queue. The beacon will be freed when
 	 * we go to INIT state */
-	if (!sc->sc_invalid) {
+	if (!(sc->sc_flags & SC_OP_INVALID)) {
 		(void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
 		DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
 			ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
@@ -2536,7 +2418,7 @@
 	struct ath_atx_tid *txtid;
 	DECLARE_MAC_BUF(mac);
 
-	if (!sc->sc_txaggr)
+	if (!(sc->sc_flags & SC_OP_TXAGGR))
 		return AGGR_NOT_REQUIRED;
 
 	/* ADDBA exchange must be completed before sending aggregates */
@@ -2583,7 +2465,7 @@
 		return -1;
 	}
 
-	if (sc->sc_txaggr) {
+	if (sc->sc_flags & SC_OP_TXAGGR) {
 		txtid = ATH_AN_2_TID(an, tid);
 		txtid->addba_exchangeinprogress = 1;
 		ath_tx_pause_tid(sc, txtid);
@@ -2647,7 +2529,7 @@
 	spin_lock_bh(&txq->axq_lock);
 	while (!list_empty(&txtid->buf_q)) {
 		bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
-		if (!bf->bf_isretried) {
+		if (!bf_isretried(bf)) {
 			/*
 			 * NB: it's based on the assumption that
 			 * software retried frame will always stay
@@ -2743,7 +2625,7 @@
 
 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
 {
-	if (sc->sc_txaggr) {
+	if (sc->sc_flags & SC_OP_TXAGGR) {
 		struct ath_atx_tid *tid;
 		struct ath_atx_ac *ac;
 		int tidno, acno;
@@ -2855,7 +2737,7 @@
 
 void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
 {
-	if (sc->sc_txaggr) {
+	if (sc->sc_flags & SC_OP_TXAGGR) {
 		struct ath_atx_tid *tid;
 		int tidno, i;
 
@@ -2869,3 +2751,57 @@
 		}
 	}
 }
+
+void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
+{
+	int hdrlen, padsize;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ath_tx_control txctl;
+
+	/*
+	 * As a temporary workaround, assign seq# here; this will likely need
+	 * to be cleaned up to work better with Beacon transmission and virtual
+	 * BSSes.
+	 */
+	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+			sc->seq_no += 0x10;
+		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+		hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
+	}
+
+	/* Add the padding after the header if this is not already done */
+	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+	if (hdrlen & 3) {
+		padsize = hdrlen % 4;
+		if (skb_headroom(skb) < padsize) {
+			DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding "
+				"failed\n", __func__);
+			dev_kfree_skb_any(skb);
+			return;
+		}
+		skb_push(skb, padsize);
+		memmove(skb->data, skb->data + padsize, hdrlen);
+	}
+
+	DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
+		__func__,
+		skb);
+
+	memset(&txctl, 0, sizeof(struct ath_tx_control));
+	txctl.flags = ATH9K_TXDESC_CAB;
+	if (ath_tx_prepare(sc, skb, &txctl) == 0) {
+		/*
+		 * Start DMA mapping.
+		 * ath_tx_start_dma() will be called either synchronously
+		 * or asynchrounsly once DMA is complete.
+		 */
+		xmit_map_sg(sc, skb, &txctl);
+	} else {
+		ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
+		DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ failed\n", __func__);
+		dev_kfree_skb_any(skb);
+	}
+}
+
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index bd65c48..ecb02bd 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -2258,7 +2258,7 @@
 
 static int atmel_set_scan(struct net_device *dev,
 			  struct iw_request_info *info,
-			  struct iw_param *vwrq,
+			  struct iw_point *dwrq,
 			  char *extra)
 {
 	struct atmel_private *priv = netdev_priv(dev);
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 12617cd..d2388e8 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -158,7 +158,7 @@
 	DEBUG(0, "atmel_attach()\n");
 
 	/* Interrupt setup */
-	p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+	p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
 	p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
 	p_dev->irq.Handler = NULL;
 
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 1fa043d..1f81d36 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -80,6 +80,18 @@
 
 	  SAY N.
 
+config B43_PHY_LP
+	bool "IEEE 802.11g LP-PHY support (BROKEN)"
+	depends on B43 && EXPERIMENTAL && BROKEN
+	---help---
+	  Support for the LP-PHY.
+	  The LP-PHY is an IEEE 802.11g based PHY built into some notebooks
+	  and embedded devices.
+
+	  THIS IS BROKEN AND DOES NOT WORK YET.
+
+	  SAY N.
+
 # This config option automatically enables b43 LEDS support,
 # if it's possible.
 config B43_LEDS
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 8c52b0b..14a02b3 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -1,8 +1,11 @@
 b43-y				+= main.o
 b43-y				+= tables.o
 b43-$(CONFIG_B43_NPHY)		+= tables_nphy.o
-b43-y				+= phy.o
-b43-$(CONFIG_B43_NPHY)		+= nphy.o
+b43-y				+= phy_common.o
+b43-y				+= phy_g.o
+b43-y				+= phy_a.o
+b43-$(CONFIG_B43_NPHY)		+= phy_n.o
+b43-$(CONFIG_B43_PHY_LP)	+= phy_lp.o
 b43-y				+= sysfs.o
 b43-y				+= xmit.o
 b43-y				+= lo.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index edcdfa3..427b820 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -12,7 +12,7 @@
 #include "leds.h"
 #include "rfkill.h"
 #include "lo.h"
-#include "phy.h"
+#include "phy_common.h"
 
 
 /* The unique identifier of the firmware that's officially supported by
@@ -173,6 +173,11 @@
 #define B43_SHM_SH_CHAN			0x00A0	/* Current channel (low 8bit only) */
 #define  B43_SHM_SH_CHAN_5GHZ		0x0100	/* Bit set, if 5Ghz channel */
 #define B43_SHM_SH_BCMCFIFOID		0x0108	/* Last posted cookie to the bcast/mcast FIFO */
+/* TSSI information */
+#define B43_SHM_SH_TSSI_CCK		0x0058	/* TSSI for last 4 CCK frames (32bit) */
+#define B43_SHM_SH_TSSI_OFDM_A		0x0068	/* TSSI for last 4 OFDM frames (32bit) */
+#define B43_SHM_SH_TSSI_OFDM_G		0x0070	/* TSSI for last 4 OFDM frames (32bit) */
+#define  B43_TSSI_MAX			0x7F	/* Max value for one TSSI value */
 /* SHM_SHARED TX FIFO variables */
 #define B43_SHM_SH_SIZE01		0x0098	/* TX FIFO size for FIFO 0 (low) and 1 (high) */
 #define B43_SHM_SH_SIZE23		0x009A	/* TX FIFO size for FIFO 2 and 3 */
@@ -508,122 +513,6 @@
 } __attribute__((__packed__));
 
 
-struct b43_phy {
-	/* Band support flags. */
-	bool supports_2ghz;
-	bool supports_5ghz;
-
-	/* GMODE bit enabled? */
-	bool gmode;
-
-	/* Analog Type */
-	u8 analog;
-	/* B43_PHYTYPE_ */
-	u8 type;
-	/* PHY revision number. */
-	u8 rev;
-
-	/* Radio versioning */
-	u16 radio_manuf;	/* Radio manufacturer */
-	u16 radio_ver;		/* Radio version */
-	u8 radio_rev;		/* Radio revision */
-
-	bool dyn_tssi_tbl;	/* tssi2dbm is kmalloc()ed. */
-
-	/* ACI (adjacent channel interference) flags. */
-	bool aci_enable;
-	bool aci_wlan_automatic;
-	bool aci_hw_rssi;
-
-	/* Radio switched on/off */
-	bool radio_on;
-	struct {
-		/* Values saved when turning the radio off.
-		 * They are needed when turning it on again. */
-		bool valid;
-		u16 rfover;
-		u16 rfoverval;
-	} radio_off_context;
-
-	u16 minlowsig[2];
-	u16 minlowsigpos[2];
-
-	/* TSSI to dBm table in use */
-	const s8 *tssi2dbm;
-	/* Target idle TSSI */
-	int tgt_idle_tssi;
-	/* Current idle TSSI */
-	int cur_idle_tssi;
-
-	/* LocalOscillator control values. */
-	struct b43_txpower_lo_control *lo_control;
-	/* Values from b43_calc_loopback_gain() */
-	s16 max_lb_gain;	/* Maximum Loopback gain in hdB */
-	s16 trsw_rx_gain;	/* TRSW RX gain in hdB */
-	s16 lna_lod_gain;	/* LNA lod */
-	s16 lna_gain;		/* LNA */
-	s16 pga_gain;		/* PGA */
-
-	/* Desired TX power level (in dBm).
-	 * This is set by the user and adjusted in b43_phy_xmitpower(). */
-	u8 power_level;
-	/* A-PHY TX Power control value. */
-	u16 txpwr_offset;
-
-	/* Current TX power level attenuation control values */
-	struct b43_bbatt bbatt;
-	struct b43_rfatt rfatt;
-	u8 tx_control;		/* B43_TXCTL_XXX */
-
-	/* Hardware Power Control enabled? */
-	bool hardware_power_control;
-
-	/* Current Interference Mitigation mode */
-	int interfmode;
-	/* Stack of saved values from the Interference Mitigation code.
-	 * Each value in the stack is layed out as follows:
-	 * bit 0-11:  offset
-	 * bit 12-15: register ID
-	 * bit 16-32: value
-	 * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT
-	 */
-#define B43_INTERFSTACK_SIZE	26
-	u32 interfstack[B43_INTERFSTACK_SIZE];	//FIXME: use a data structure
-
-	/* Saved values from the NRSSI Slope calculation */
-	s16 nrssi[2];
-	s32 nrssislope;
-	/* In memory nrssi lookup table. */
-	s8 nrssi_lt[64];
-
-	/* current channel */
-	u8 channel;
-
-	u16 lofcal;
-
-	u16 initval;		//FIXME rename?
-
-	/* PHY TX errors counter. */
-	atomic_t txerr_cnt;
-
-	/* The device does address auto increment for the OFDM tables.
-	 * We cache the previously used address here and omit the address
-	 * write on the next table access, if possible. */
-	u16 ofdmtab_addr; /* The address currently set in hardware. */
-	enum { /* The last data flow direction. */
-		B43_OFDMTAB_DIRECTION_UNKNOWN = 0,
-		B43_OFDMTAB_DIRECTION_READ,
-		B43_OFDMTAB_DIRECTION_WRITE,
-	} ofdmtab_addr_direction;
-
-#if B43_DEBUG
-	/* Manual TX-power control enabled? */
-	bool manual_txpower_control;
-	/* PHY registers locked by b43_phy_lock()? */
-	bool phy_locked;
-#endif /* B43_DEBUG */
-};
-
 /* Data structures for DMA transmission, per 80211 core. */
 struct b43_dma {
 	struct b43_dmaring *tx_ring_AC_BK; /* Background */
@@ -680,7 +569,7 @@
 #define B43_QOS_VOICE		B43_QOS_PARAMS(3)
 
 /* QOS parameter hardware data structure offsets. */
-#define B43_NR_QOSPARAMS	22
+#define B43_NR_QOSPARAMS	16
 enum {
 	B43_QOSPARAM_TXOP = 0,
 	B43_QOSPARAM_CWMIN,
@@ -696,8 +585,6 @@
 struct b43_qos_params {
 	/* The QOS parameters */
 	struct ieee80211_tx_queue_params p;
-	/* Does this need to get uploaded to hardware? */
-	bool need_hw_update;
 };
 
 struct b43_wldev;
@@ -759,11 +646,13 @@
 	bool beacon_templates_virgin; /* Never wrote the templates? */
 	struct work_struct beacon_update_trigger;
 
-	/* The current QOS parameters for the 4 queues.
-	 * This is protected by the irq_lock. */
+	/* The current QOS parameters for the 4 queues. */
 	struct b43_qos_params qos_params[4];
-	/* Workqueue for updating QOS parameters in hardware. */
-	struct work_struct qos_update_work;
+
+	/* Work for adjustment of the transmission power.
+	 * This is scheduled when we determine that the actual TX output
+	 * power doesn't match what we want. */
+	struct work_struct txpower_adjust_work;
 };
 
 /* In-memory representation of a cached microcode file. */
@@ -908,6 +797,15 @@
 	return (wl->operating && wl->if_type == type);
 }
 
+/**
+ * b43_current_band - Returns the currently used band.
+ * Returns one of IEEE80211_BAND_2GHZ and IEEE80211_BAND_5GHZ.
+ */
+static inline enum ieee80211_band b43_current_band(struct b43_wl *wl)
+{
+	return wl->hw->conf.channel->band;
+}
+
 static inline u16 b43_read16(struct b43_wldev *dev, u16 offset)
 {
 	return ssb_read16(dev->dev, offset);
diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
index 29851bc..06a01da 100644
--- a/drivers/net/wireless/b43/debugfs.c
+++ b/drivers/net/wireless/b43/debugfs.c
@@ -443,76 +443,6 @@
 	return count;
 }
 
-static ssize_t txpower_g_read_file(struct b43_wldev *dev,
-				   char *buf, size_t bufsize)
-{
-	ssize_t count = 0;
-
-	if (dev->phy.type != B43_PHYTYPE_G) {
-		fappend("Device is not a G-PHY\n");
-		goto out;
-	}
-	fappend("Control:               %s\n", dev->phy.manual_txpower_control ?
-		"MANUAL" : "AUTOMATIC");
-	fappend("Baseband attenuation:  %u\n", dev->phy.bbatt.att);
-	fappend("Radio attenuation:     %u\n", dev->phy.rfatt.att);
-	fappend("TX Mixer Gain:         %s\n",
-		(dev->phy.tx_control & B43_TXCTL_TXMIX) ? "ON" : "OFF");
-	fappend("PA Gain 2dB:           %s\n",
-		(dev->phy.tx_control & B43_TXCTL_PA2DB) ? "ON" : "OFF");
-	fappend("PA Gain 3dB:           %s\n",
-		(dev->phy.tx_control & B43_TXCTL_PA3DB) ? "ON" : "OFF");
-	fappend("\n\n");
-	fappend("You can write to this file:\n");
-	fappend("Writing \"auto\" enables automatic txpower control.\n");
-	fappend
-	    ("Writing the attenuation values as \"bbatt rfatt txmix pa2db pa3db\" "
-	     "enables manual txpower control.\n");
-	fappend("Example: 5 4 0 0 1\n");
-	fappend("Enables manual control with Baseband attenuation 5, "
-		"Radio attenuation 4, No TX Mixer Gain, "
-		"No PA Gain 2dB, With PA Gain 3dB.\n");
-out:
-	return count;
-}
-
-static int txpower_g_write_file(struct b43_wldev *dev,
-				const char *buf, size_t count)
-{
-	if (dev->phy.type != B43_PHYTYPE_G)
-		return -ENODEV;
-	if ((count >= 4) && (memcmp(buf, "auto", 4) == 0)) {
-		/* Automatic control */
-		dev->phy.manual_txpower_control = 0;
-		b43_phy_xmitpower(dev);
-	} else {
-		int bbatt = 0, rfatt = 0, txmix = 0, pa2db = 0, pa3db = 0;
-		/* Manual control */
-		if (sscanf(buf, "%d %d %d %d %d", &bbatt, &rfatt,
-			   &txmix, &pa2db, &pa3db) != 5)
-			return -EINVAL;
-		b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
-		dev->phy.manual_txpower_control = 1;
-		dev->phy.bbatt.att = bbatt;
-		dev->phy.rfatt.att = rfatt;
-		dev->phy.tx_control = 0;
-		if (txmix)
-			dev->phy.tx_control |= B43_TXCTL_TXMIX;
-		if (pa2db)
-			dev->phy.tx_control |= B43_TXCTL_PA2DB;
-		if (pa3db)
-			dev->phy.tx_control |= B43_TXCTL_PA3DB;
-		b43_phy_lock(dev);
-		b43_radio_lock(dev);
-		b43_set_txpower_g(dev, &dev->phy.bbatt,
-				  &dev->phy.rfatt, dev->phy.tx_control);
-		b43_radio_unlock(dev);
-		b43_phy_unlock(dev);
-	}
-
-	return 0;
-}
-
 /* wl->irq_lock is locked */
 static int restart_write_file(struct b43_wldev *dev,
 			      const char *buf, size_t count)
@@ -560,7 +490,7 @@
 		err = -ENODEV;
 		goto out;
 	}
-	lo = phy->lo_control;
+	lo = phy->g->lo_control;
 	fappend("-- Local Oscillator calibration data --\n\n");
 	fappend("HW-power-control enabled: %d\n",
 		dev->phy.hardware_power_control);
@@ -578,8 +508,8 @@
 	list_for_each_entry(cal, &lo->calib_list, list) {
 		bool active;
 
-		active = (b43_compare_bbatt(&cal->bbatt, &phy->bbatt) &&
-			  b43_compare_rfatt(&cal->rfatt, &phy->rfatt));
+		active = (b43_compare_bbatt(&cal->bbatt, &phy->g->bbatt) &&
+			  b43_compare_rfatt(&cal->rfatt, &phy->g->rfatt));
 		fappend("BB(%d), RF(%d,%d)  ->  I=%d, Q=%d  "
 			"(expires in %lu sec)%s\n",
 			cal->bbatt.att,
@@ -763,7 +693,6 @@
 B43_DEBUGFS_FOPS(mmio32write, NULL, mmio32write__write_file, 1);
 B43_DEBUGFS_FOPS(tsf, tsf_read_file, tsf_write_file, 1);
 B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0);
-B43_DEBUGFS_FOPS(txpower_g, txpower_g_read_file, txpower_g_write_file, 0);
 B43_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1);
 B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL, 0);
 
@@ -877,7 +806,6 @@
 	ADD_FILE(mmio32write, 0200);
 	ADD_FILE(tsf, 0600);
 	ADD_FILE(txstat, 0400);
-	ADD_FILE(txpower_g, 0600);
 	ADD_FILE(restart, 0200);
 	ADD_FILE(loctls, 0400);
 
@@ -907,7 +835,6 @@
 	debugfs_remove(e->file_mmio32write.dentry);
 	debugfs_remove(e->file_tsf.dentry);
 	debugfs_remove(e->file_txstat.dentry);
-	debugfs_remove(e->file_txpower_g.dentry);
 	debugfs_remove(e->file_restart.dentry);
 	debugfs_remove(e->file_loctls.dentry);
 
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 9c854d6..6a18a14 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -29,7 +29,7 @@
 
 #include "b43.h"
 #include "lo.h"
-#include "phy.h"
+#include "phy_g.h"
 #include "main.h"
 
 #include <linux/delay.h>
@@ -174,7 +174,8 @@
 static void lo_measure_txctl_values(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
-	struct b43_txpower_lo_control *lo = phy->lo_control;
+	struct b43_phy_g *gphy = phy->g;
+	struct b43_txpower_lo_control *lo = gphy->lo_control;
 	u16 reg, mask;
 	u16 trsw_rx, pga;
 	u16 radio_pctl_reg;
@@ -195,7 +196,7 @@
 		int lb_gain;	/* Loopback gain (in dB) */
 
 		trsw_rx = 0;
-		lb_gain = phy->max_lb_gain / 2;
+		lb_gain = gphy->max_lb_gain / 2;
 		if (lb_gain > 10) {
 			radio_pctl_reg = 0;
 			pga = abs(10 - lb_gain) / 6;
@@ -226,7 +227,7 @@
 	}
 	b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43)
 				      & 0xFFF0) | radio_pctl_reg);
-	b43_phy_set_baseband_attenuation(dev, 2);
+	b43_gphy_set_baseband_attenuation(dev, 2);
 
 	reg = lo_txctl_register_table(dev, &mask, NULL);
 	mask = ~mask;
@@ -277,7 +278,8 @@
 static void lo_read_power_vector(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
-	struct b43_txpower_lo_control *lo = phy->lo_control;
+	struct b43_phy_g *gphy = phy->g;
+	struct b43_txpower_lo_control *lo = gphy->lo_control;
 	int i;
 	u64 tmp;
 	u64 power_vector = 0;
@@ -298,6 +300,7 @@
 				   s16 max_rx_gain, int use_trsw_rx)
 {
 	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
 	u16 tmp;
 
 	if (max_rx_gain < 0)
@@ -308,7 +311,7 @@
 		int trsw_rx_gain;
 
 		if (use_trsw_rx) {
-			trsw_rx_gain = phy->trsw_rx_gain / 2;
+			trsw_rx_gain = gphy->trsw_rx_gain / 2;
 			if (max_rx_gain >= trsw_rx_gain) {
 				trsw_rx_gain = max_rx_gain - trsw_rx_gain;
 				trsw_rx = 0x20;
@@ -316,38 +319,38 @@
 		} else
 			trsw_rx_gain = max_rx_gain;
 		if (trsw_rx_gain < 9) {
-			phy->lna_lod_gain = 0;
+			gphy->lna_lod_gain = 0;
 		} else {
-			phy->lna_lod_gain = 1;
+			gphy->lna_lod_gain = 1;
 			trsw_rx_gain -= 8;
 		}
 		trsw_rx_gain = clamp_val(trsw_rx_gain, 0, 0x2D);
-		phy->pga_gain = trsw_rx_gain / 3;
-		if (phy->pga_gain >= 5) {
-			phy->pga_gain -= 5;
-			phy->lna_gain = 2;
+		gphy->pga_gain = trsw_rx_gain / 3;
+		if (gphy->pga_gain >= 5) {
+			gphy->pga_gain -= 5;
+			gphy->lna_gain = 2;
 		} else
-			phy->lna_gain = 0;
+			gphy->lna_gain = 0;
 	} else {
-		phy->lna_gain = 0;
-		phy->trsw_rx_gain = 0x20;
+		gphy->lna_gain = 0;
+		gphy->trsw_rx_gain = 0x20;
 		if (max_rx_gain >= 0x14) {
-			phy->lna_lod_gain = 1;
-			phy->pga_gain = 2;
+			gphy->lna_lod_gain = 1;
+			gphy->pga_gain = 2;
 		} else if (max_rx_gain >= 0x12) {
-			phy->lna_lod_gain = 1;
-			phy->pga_gain = 1;
+			gphy->lna_lod_gain = 1;
+			gphy->pga_gain = 1;
 		} else if (max_rx_gain >= 0xF) {
-			phy->lna_lod_gain = 1;
-			phy->pga_gain = 0;
+			gphy->lna_lod_gain = 1;
+			gphy->pga_gain = 0;
 		} else {
-			phy->lna_lod_gain = 0;
-			phy->pga_gain = 0;
+			gphy->lna_lod_gain = 0;
+			gphy->pga_gain = 0;
 		}
 	}
 
 	tmp = b43_radio_read16(dev, 0x7A);
-	if (phy->lna_lod_gain == 0)
+	if (gphy->lna_lod_gain == 0)
 		tmp &= ~0x0008;
 	else
 		tmp |= 0x0008;
@@ -392,10 +395,11 @@
 {
 	struct ssb_sprom *sprom = &dev->dev->bus->sprom;
 	struct b43_phy *phy = &dev->phy;
-	struct b43_txpower_lo_control *lo = phy->lo_control;
+	struct b43_phy_g *gphy = phy->g;
+	struct b43_txpower_lo_control *lo = gphy->lo_control;
 	u16 tmp;
 
-	if (b43_has_hardware_pctl(phy)) {
+	if (b43_has_hardware_pctl(dev)) {
 		sav->phy_lo_mask = b43_phy_read(dev, B43_PHY_LO_MASK);
 		sav->phy_extg_01 = b43_phy_read(dev, B43_PHY_EXTG(0x01));
 		sav->phy_dacctl_hwpctl = b43_phy_read(dev, B43_PHY_DACCTL);
@@ -496,7 +500,7 @@
 		b43_phy_write(dev, B43_PHY_CCK(0x2B), 0x0802);
 	if (phy->rev >= 2)
 		b43_dummy_transmission(dev);
-	b43_radio_selectchannel(dev, 6, 0);
+	b43_gphy_channel_switch(dev, 6, 0);
 	b43_radio_read16(dev, 0x51);	/* dummy read */
 	if (phy->type == B43_PHYTYPE_G)
 		b43_phy_write(dev, B43_PHY_CCK(0x2F), 0);
@@ -520,18 +524,19 @@
 			       struct lo_g_saved_values *sav)
 {
 	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
 	u16 tmp;
 
 	if (phy->rev >= 2) {
 		b43_phy_write(dev, B43_PHY_PGACTL, 0xE300);
-		tmp = (phy->pga_gain << 8);
+		tmp = (gphy->pga_gain << 8);
 		b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA0);
 		udelay(5);
 		b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA2);
 		udelay(2);
 		b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA3);
 	} else {
-		tmp = (phy->pga_gain | 0xEFA0);
+		tmp = (gphy->pga_gain | 0xEFA0);
 		b43_phy_write(dev, B43_PHY_PGACTL, tmp);
 	}
 	if (phy->type == B43_PHYTYPE_G) {
@@ -572,7 +577,7 @@
 		b43_phy_write(dev, B43_PHY_CCK(0x3E), sav->phy_cck_3E);
 		b43_phy_write(dev, B43_PHY_CRS0, sav->phy_crs0);
 	}
-	if (b43_has_hardware_pctl(phy)) {
+	if (b43_has_hardware_pctl(dev)) {
 		tmp = (sav->phy_lo_mask & 0xBFFF);
 		b43_phy_write(dev, B43_PHY_LO_MASK, tmp);
 		b43_phy_write(dev, B43_PHY_EXTG(0x01), sav->phy_extg_01);
@@ -580,7 +585,7 @@
 		b43_phy_write(dev, B43_PHY_CCK(0x14), sav->phy_cck_14);
 		b43_phy_write(dev, B43_PHY_HPWR_TSSICTL, sav->phy_hpwr_tssictl);
 	}
-	b43_radio_selectchannel(dev, sav->old_channel, 1);
+	b43_gphy_channel_switch(dev, sav->old_channel, 1);
 }
 
 struct b43_lo_g_statemachine {
@@ -597,6 +602,7 @@
 				    struct b43_lo_g_statemachine *d)
 {
 	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
 	struct b43_loctl test_loctl;
 	struct b43_loctl orig_loctl;
 	struct b43_loctl prev_loctl = {
@@ -646,9 +652,9 @@
 		     test_loctl.q != prev_loctl.q) &&
 		    (abs(test_loctl.i) <= 16 && abs(test_loctl.q) <= 16)) {
 			b43_lo_write(dev, &test_loctl);
-			feedth = lo_measure_feedthrough(dev, phy->lna_gain,
-							phy->pga_gain,
-							phy->trsw_rx_gain);
+			feedth = lo_measure_feedthrough(dev, gphy->lna_gain,
+							gphy->pga_gain,
+							gphy->trsw_rx_gain);
 			if (feedth < d->lowest_feedth) {
 				memcpy(probe_loctl, &test_loctl,
 				       sizeof(struct b43_loctl));
@@ -677,6 +683,7 @@
 					 int *max_rx_gain)
 {
 	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
 	struct b43_lo_g_statemachine d;
 	u16 feedth;
 	int found_lower;
@@ -693,17 +700,17 @@
 		max_repeat = 4;
 	do {
 		b43_lo_write(dev, &d.min_loctl);
-		feedth = lo_measure_feedthrough(dev, phy->lna_gain,
-						phy->pga_gain,
-						phy->trsw_rx_gain);
+		feedth = lo_measure_feedthrough(dev, gphy->lna_gain,
+						gphy->pga_gain,
+						gphy->trsw_rx_gain);
 		if (feedth < 0x258) {
 			if (feedth >= 0x12C)
 				*max_rx_gain += 6;
 			else
 				*max_rx_gain += 3;
-			feedth = lo_measure_feedthrough(dev, phy->lna_gain,
-							phy->pga_gain,
-							phy->trsw_rx_gain);
+			feedth = lo_measure_feedthrough(dev, gphy->lna_gain,
+							gphy->pga_gain,
+							gphy->trsw_rx_gain);
 		}
 		d.lowest_feedth = feedth;
 
@@ -752,6 +759,7 @@
 					       const struct b43_rfatt *rfatt)
 {
 	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
 	struct b43_loctl loctl = {
 		.i = 0,
 		.q = 0,
@@ -782,11 +790,11 @@
 	if (rfatt->with_padmix)
 		max_rx_gain -= pad_mix_gain;
 	if (has_loopback_gain(phy))
-		max_rx_gain += phy->max_lb_gain;
+		max_rx_gain += gphy->max_lb_gain;
 	lo_measure_gain_values(dev, max_rx_gain,
 			       has_loopback_gain(phy));
 
-	b43_phy_set_baseband_attenuation(dev, bbatt->att);
+	b43_gphy_set_baseband_attenuation(dev, bbatt->att);
 	lo_probe_loctls_statemachine(dev, &loctl, &max_rx_gain);
 
 	lo_measure_restore(dev, &saved_regs);
@@ -820,7 +828,7 @@
 						const struct b43_bbatt *bbatt,
 						const struct b43_rfatt *rfatt)
 {
-	struct b43_txpower_lo_control *lo = dev->phy.lo_control;
+	struct b43_txpower_lo_control *lo = dev->phy.g->lo_control;
 	struct b43_lo_calib *c;
 
 	c = b43_find_lo_calib(lo, bbatt, rfatt);
@@ -839,7 +847,8 @@
 void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
 {
 	struct b43_phy *phy = &dev->phy;
-	struct b43_txpower_lo_control *lo = phy->lo_control;
+	struct b43_phy_g *gphy = phy->g;
+	struct b43_txpower_lo_control *lo = gphy->lo_control;
 	int i;
 	int rf_offset, bb_offset;
 	const struct b43_rfatt *rfatt;
@@ -917,14 +926,14 @@
 
 void b43_lo_g_adjust(struct b43_wldev *dev)
 {
-	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = dev->phy.g;
 	struct b43_lo_calib *cal;
 	struct b43_rfatt rf;
 
-	memcpy(&rf, &phy->rfatt, sizeof(rf));
+	memcpy(&rf, &gphy->rfatt, sizeof(rf));
 	b43_lo_fixup_rfatt(&rf);
 
-	cal = b43_get_calib_lo_settings(dev, &phy->bbatt, &rf);
+	cal = b43_get_calib_lo_settings(dev, &gphy->bbatt, &rf);
 	if (!cal)
 		return;
 	b43_lo_write(dev, &cal->ctl);
@@ -952,7 +961,8 @@
 void b43_lo_g_maintanance_work(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
-	struct b43_txpower_lo_control *lo = phy->lo_control;
+	struct b43_phy_g *gphy = phy->g;
+	struct b43_txpower_lo_control *lo = gphy->lo_control;
 	unsigned long now;
 	unsigned long expire;
 	struct b43_lo_calib *cal, *tmp;
@@ -962,7 +972,7 @@
 	if (!lo)
 		return;
 	now = jiffies;
-	hwpctl = b43_has_hardware_pctl(phy);
+	hwpctl = b43_has_hardware_pctl(dev);
 
 	if (hwpctl) {
 		/* Read the power vector and update it, if needed. */
@@ -983,8 +993,8 @@
 		if (!time_before(cal->calib_time, expire))
 			continue;
 		/* This item expired. */
-		if (b43_compare_bbatt(&cal->bbatt, &phy->bbatt) &&
-		    b43_compare_rfatt(&cal->rfatt, &phy->rfatt)) {
+		if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) &&
+		    b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) {
 			B43_WARN_ON(current_item_expired);
 			current_item_expired = 1;
 		}
@@ -1002,7 +1012,7 @@
 		/* Recalibrate currently used LO setting. */
 		if (b43_debug(dev, B43_DBG_LO))
 			b43dbg(dev->wl, "LO: Recalibrating current LO setting\n");
-		cal = b43_calibrate_lo_setting(dev, &phy->bbatt, &phy->rfatt);
+		cal = b43_calibrate_lo_setting(dev, &gphy->bbatt, &gphy->rfatt);
 		if (cal) {
 			list_add(&cal->list, &lo->calib_list);
 			b43_lo_write(dev, &cal->ctl);
@@ -1013,7 +1023,7 @@
 
 void b43_lo_g_cleanup(struct b43_wldev *dev)
 {
-	struct b43_txpower_lo_control *lo = dev->phy.lo_control;
+	struct b43_txpower_lo_control *lo = dev->phy.g->lo_control;
 	struct b43_lo_calib *cal, *tmp;
 
 	if (!lo)
@@ -1027,9 +1037,7 @@
 /* LO Initialization */
 void b43_lo_g_init(struct b43_wldev *dev)
 {
-	struct b43_phy *phy = &dev->phy;
-
-	if (b43_has_hardware_pctl(phy)) {
+	if (b43_has_hardware_pctl(dev)) {
 		lo_read_power_vector(dev);
 		b43_gphy_dc_lt_init(dev, 1);
 	}
diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h
index 1da321c..3b27e20 100644
--- a/drivers/net/wireless/b43/lo.h
+++ b/drivers/net/wireless/b43/lo.h
@@ -1,7 +1,9 @@
 #ifndef B43_LO_H_
 #define B43_LO_H_
 
-#include "phy.h"
+/* G-PHY Local Oscillator */
+
+#include "phy_g.h"
 
 struct b43_wldev;
 
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 7205a93..3bf74e2 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -44,8 +44,9 @@
 #include "b43.h"
 #include "main.h"
 #include "debugfs.h"
-#include "phy.h"
-#include "nphy.h"
+#include "phy_common.h"
+#include "phy_g.h"
+#include "phy_n.h"
 #include "dma.h"
 #include "pio.h"
 #include "sysfs.h"
@@ -814,7 +815,7 @@
 			break;
 		udelay(10);
 	}
-	for (i = 0x00; i < 0x0A; i++) {
+	for (i = 0x00; i < 0x19; i++) {
 		value = b43_read16(dev, 0x0690);
 		if (!(value & 0x0100))
 			break;
@@ -1051,23 +1052,6 @@
 	}
 }
 
-/* Turn the Analog ON/OFF */
-static void b43_switch_analog(struct b43_wldev *dev, int on)
-{
-	switch (dev->phy.type) {
-	case B43_PHYTYPE_A:
-	case B43_PHYTYPE_G:
-		b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
-		break;
-	case B43_PHYTYPE_N:
-		b43_phy_write(dev, B43_NPHY_AFECTL_OVER,
-			      on ? 0 : 0x7FFF);
-		break;
-	default:
-		B43_WARN_ON(1);
-	}
-}
-
 void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags)
 {
 	u32 tmslow;
@@ -1090,8 +1074,12 @@
 	ssb_read32(dev->dev, SSB_TMSLOW);	/* flush */
 	msleep(1);
 
-	/* Turn Analog ON */
-	b43_switch_analog(dev, 1);
+	/* Turn Analog ON, but only if we already know the PHY-type.
+	 * This protects against very early setup where we don't know the
+	 * PHY-type, yet. wireless_core_reset will be called once again later,
+	 * when we know the PHY-type. */
+	if (dev->phy.ops)
+		dev->phy.ops->switch_analog(dev, 1);
 
 	macctl = b43_read32(dev, B43_MMIO_MACCTL);
 	macctl &= ~B43_MACCTL_GMODE;
@@ -1174,6 +1162,8 @@
 {
 	/* Top half of Link Quality calculation. */
 
+	if (dev->phy.type != B43_PHYTYPE_G)
+		return;
 	if (dev->noisecalc.calculation_running)
 		return;
 	dev->noisecalc.calculation_running = 1;
@@ -1184,7 +1174,7 @@
 
 static void handle_irq_noise(struct b43_wldev *dev)
 {
-	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *phy = dev->phy.g;
 	u16 tmp;
 	u8 noise[4];
 	u8 i, j;
@@ -1192,6 +1182,9 @@
 
 	/* Bottom half of Link Quality calculation. */
 
+	if (dev->phy.type != B43_PHYTYPE_G)
+		return;
+
 	/* Possible race condition: It might be possible that the user
 	 * changed to a different channel in the meantime since we
 	 * started the calculation. We ignore that fact, since it's
@@ -1251,13 +1244,13 @@
 
 static void handle_irq_tbtt_indication(struct b43_wldev *dev)
 {
-	if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) {
+	if (b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) {
 		///TODO: PS TBTT
 	} else {
 		if (1 /*FIXME: the last PSpoll frame was sent successfully */ )
 			b43_power_saving_ctl_bits(dev, 0);
 	}
-	if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS))
+	if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
 		dev->dfq_valid = 1;
 }
 
@@ -1606,8 +1599,8 @@
 	struct b43_wl *wl = dev->wl;
 	u32 cmd, beacon0_valid, beacon1_valid;
 
-	if (!b43_is_mode(wl, IEEE80211_IF_TYPE_AP) &&
-	    !b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT))
+	if (!b43_is_mode(wl, NL80211_IFTYPE_AP) &&
+	    !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
 		return;
 
 	/* This is the bottom half of the asynchronous beacon update. */
@@ -2575,10 +2568,10 @@
 	ctl &= ~B43_MACCTL_BEACPROMISC;
 	ctl |= B43_MACCTL_INFRA;
 
-	if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) ||
-	    b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT))
+	if (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
+	    b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
 		ctl |= B43_MACCTL_AP;
-	else if (b43_is_mode(wl, IEEE80211_IF_TYPE_IBSS))
+	else if (b43_is_mode(wl, NL80211_IFTYPE_ADHOC))
 		ctl &= ~B43_MACCTL_INFRA;
 
 	if (wl->filter_flags & FIF_CONTROL)
@@ -2688,9 +2681,8 @@
 /* This is the opposite of b43_chip_init() */
 static void b43_chip_exit(struct b43_wldev *dev)
 {
-	b43_radio_turn_off(dev, 1);
+	b43_phy_exit(dev);
 	b43_gpio_cleanup(dev);
-	b43_lo_g_cleanup(dev);
 	/* firmware is released later */
 }
 
@@ -2700,7 +2692,7 @@
 static int b43_chip_init(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
-	int err, tmp;
+	int err;
 	u32 value32, macctl;
 	u16 value16;
 
@@ -2725,19 +2717,20 @@
 	err = b43_upload_initvals(dev);
 	if (err)
 		goto err_gpio_clean;
-	b43_radio_turn_on(dev);
 
-	b43_write16(dev, 0x03E6, 0x0000);
+	/* Turn the Analog on and initialize the PHY. */
+	phy->ops->switch_analog(dev, 1);
 	err = b43_phy_init(dev);
 	if (err)
-		goto err_radio_off;
+		goto err_gpio_clean;
 
-	/* Select initial Interference Mitigation. */
-	tmp = phy->interfmode;
-	phy->interfmode = B43_INTERFMODE_NONE;
-	b43_radio_set_interference_mitigation(dev, tmp);
+	/* Disable Interference Mitigation. */
+	if (phy->ops->interf_mitigation)
+		phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE);
 
-	b43_set_rx_antenna(dev, B43_ANTENNA_DEFAULT);
+	/* Select the antennae */
+	if (phy->ops->set_rx_antenna)
+		phy->ops->set_rx_antenna(dev, B43_ANTENNA_DEFAULT);
 	b43_mgmtframe_txantenna(dev, B43_ANTENNA_DEFAULT);
 
 	if (phy->type == B43_PHYTYPE_B) {
@@ -2790,8 +2783,6 @@
 out:
 	return err;
 
-err_radio_off:
-	b43_radio_turn_off(dev, 1);
 err_gpio_clean:
 	b43_gpio_cleanup(dev);
 	return err;
@@ -2799,25 +2790,13 @@
 
 static void b43_periodic_every60sec(struct b43_wldev *dev)
 {
-	struct b43_phy *phy = &dev->phy;
+	const struct b43_phy_operations *ops = dev->phy.ops;
 
-	if (phy->type != B43_PHYTYPE_G)
-		return;
-	if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI) {
-		b43_mac_suspend(dev);
-		b43_calc_nrssi_slope(dev);
-		if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) {
-			u8 old_chan = phy->channel;
+	if (ops->pwork_60sec)
+		ops->pwork_60sec(dev);
 
-			/* VCO Calibration */
-			if (old_chan >= 8)
-				b43_radio_selectchannel(dev, 1, 0);
-			else
-				b43_radio_selectchannel(dev, 13, 0);
-			b43_radio_selectchannel(dev, old_chan, 0);
-		}
-		b43_mac_enable(dev);
-	}
+	/* Force check the TX power emission now. */
+	b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME);
 }
 
 static void b43_periodic_every30sec(struct b43_wldev *dev)
@@ -2845,32 +2824,8 @@
 		}
 	}
 
-	if (phy->type == B43_PHYTYPE_G) {
-		//TODO: update_aci_moving_average
-		if (phy->aci_enable && phy->aci_wlan_automatic) {
-			b43_mac_suspend(dev);
-			if (!phy->aci_enable && 1 /*TODO: not scanning? */ ) {
-				if (0 /*TODO: bunch of conditions */ ) {
-					b43_radio_set_interference_mitigation
-					    (dev, B43_INTERFMODE_MANUALWLAN);
-				}
-			} else if (1 /*TODO*/) {
-				/*
-				   if ((aci_average > 1000) && !(b43_radio_aci_scan(dev))) {
-				   b43_radio_set_interference_mitigation(dev,
-				   B43_INTERFMODE_NONE);
-				   }
-				 */
-			}
-			b43_mac_enable(dev);
-		} else if (phy->interfmode == B43_INTERFMODE_NONWLAN &&
-			   phy->rev == 1) {
-			//TODO: implement rev1 workaround
-		}
-	}
-	b43_phy_xmitpower(dev);	//FIXME: unless scanning?
-	b43_lo_g_maintanance_work(dev);
-	//TODO for APHY (temperature?)
+	if (phy->ops->pwork_15sec)
+		phy->ops->pwork_15sec(dev);
 
 	atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
 	wmb();
@@ -3104,36 +3059,31 @@
 	}
 }
 
-/* Update the QOS parameters in hardware. */
-static void b43_qos_update(struct b43_wldev *dev)
+/* Mapping of mac80211 queue numbers to b43 QoS SHM offsets. */
+static const u16 b43_qos_shm_offsets[] = {
+	/* [mac80211-queue-nr] = SHM_OFFSET, */
+	[0] = B43_QOS_VOICE,
+	[1] = B43_QOS_VIDEO,
+	[2] = B43_QOS_BESTEFFORT,
+	[3] = B43_QOS_BACKGROUND,
+};
+
+/* Update all QOS parameters in hardware. */
+static void b43_qos_upload_all(struct b43_wldev *dev)
 {
 	struct b43_wl *wl = dev->wl;
 	struct b43_qos_params *params;
-	unsigned long flags;
 	unsigned int i;
 
-	/* Mapping of mac80211 queues to b43 SHM offsets. */
-	static const u16 qos_shm_offsets[] = {
-		[0] = B43_QOS_VOICE,
-		[1] = B43_QOS_VIDEO,
-		[2] = B43_QOS_BESTEFFORT,
-		[3] = B43_QOS_BACKGROUND,
-	};
-	BUILD_BUG_ON(ARRAY_SIZE(qos_shm_offsets) != ARRAY_SIZE(wl->qos_params));
+	BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) !=
+		     ARRAY_SIZE(wl->qos_params));
 
 	b43_mac_suspend(dev);
-	spin_lock_irqsave(&wl->irq_lock, flags);
-
 	for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) {
 		params = &(wl->qos_params[i]);
-		if (params->need_hw_update) {
-			b43_qos_params_upload(dev, &(params->p),
-					      qos_shm_offsets[i]);
-			params->need_hw_update = 0;
-		}
+		b43_qos_params_upload(dev, &(params->p),
+				      b43_qos_shm_offsets[i]);
 	}
-
-	spin_unlock_irqrestore(&wl->irq_lock, flags);
 	b43_mac_enable(dev);
 }
 
@@ -3142,25 +3092,50 @@
 	struct b43_qos_params *params;
 	unsigned int i;
 
+	/* Initialize QoS parameters to sane defaults. */
+
+	BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) !=
+		     ARRAY_SIZE(wl->qos_params));
+
 	for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) {
 		params = &(wl->qos_params[i]);
 
-		memset(&(params->p), 0, sizeof(params->p));
-		params->p.aifs = -1;
-		params->need_hw_update = 1;
+		switch (b43_qos_shm_offsets[i]) {
+		case B43_QOS_VOICE:
+			params->p.txop = 0;
+			params->p.aifs = 2;
+			params->p.cw_min = 0x0001;
+			params->p.cw_max = 0x0001;
+			break;
+		case B43_QOS_VIDEO:
+			params->p.txop = 0;
+			params->p.aifs = 2;
+			params->p.cw_min = 0x0001;
+			params->p.cw_max = 0x0001;
+			break;
+		case B43_QOS_BESTEFFORT:
+			params->p.txop = 0;
+			params->p.aifs = 3;
+			params->p.cw_min = 0x0001;
+			params->p.cw_max = 0x03FF;
+			break;
+		case B43_QOS_BACKGROUND:
+			params->p.txop = 0;
+			params->p.aifs = 7;
+			params->p.cw_min = 0x0001;
+			params->p.cw_max = 0x03FF;
+			break;
+		default:
+			B43_WARN_ON(1);
+		}
 	}
 }
 
 /* Initialize the core's QOS capabilities */
 static void b43_qos_init(struct b43_wldev *dev)
 {
-	struct b43_wl *wl = dev->wl;
-	unsigned int i;
-
 	/* Upload the current QOS parameters. */
-	for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++)
-		wl->qos_params[i].need_hw_update = 1;
-	b43_qos_update(dev);
+	b43_qos_upload_all(dev);
 
 	/* Enable QOS support. */
 	b43_hf_write(dev, b43_hf_read(dev) | B43_HF_EDCF);
@@ -3169,25 +3144,13 @@
 		    | B43_MMIO_IFSCTL_USE_EDCF);
 }
 
-static void b43_qos_update_work(struct work_struct *work)
-{
-	struct b43_wl *wl = container_of(work, struct b43_wl, qos_update_work);
-	struct b43_wldev *dev;
-
-	mutex_lock(&wl->mutex);
-	dev = wl->current_dev;
-	if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED)))
-		b43_qos_update(dev);
-	mutex_unlock(&wl->mutex);
-}
-
 static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue,
 			  const struct ieee80211_tx_queue_params *params)
 {
 	struct b43_wl *wl = hw_to_b43_wl(hw);
-	unsigned long flags;
+	struct b43_wldev *dev;
 	unsigned int queue = (unsigned int)_queue;
-	struct b43_qos_params *p;
+	int err = -ENODEV;
 
 	if (queue >= ARRAY_SIZE(wl->qos_params)) {
 		/* Queue not available or don't support setting
@@ -3195,16 +3158,25 @@
 		 * confuse mac80211. */
 		return 0;
 	}
+	BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) !=
+		     ARRAY_SIZE(wl->qos_params));
 
-	spin_lock_irqsave(&wl->irq_lock, flags);
-	p = &(wl->qos_params[queue]);
-	memcpy(&(p->p), params, sizeof(p->p));
-	p->need_hw_update = 1;
-	spin_unlock_irqrestore(&wl->irq_lock, flags);
+	mutex_lock(&wl->mutex);
+	dev = wl->current_dev;
+	if (unlikely(!dev || (b43_status(dev) < B43_STAT_INITIALIZED)))
+		goto out_unlock;
 
-	queue_work(hw->workqueue, &wl->qos_update_work);
+	memcpy(&(wl->qos_params[queue].p), params, sizeof(*params));
+	b43_mac_suspend(dev);
+	b43_qos_params_upload(dev, &(wl->qos_params[queue].p),
+			      b43_qos_shm_offsets[queue]);
+	b43_mac_enable(dev);
+	err = 0;
 
-	return 0;
+out_unlock:
+	mutex_unlock(&wl->mutex);
+
+	return err;
 }
 
 static int b43_op_get_tx_stats(struct ieee80211_hw *hw,
@@ -3401,7 +3373,7 @@
 	/* Switch to the requested channel.
 	 * The firmware takes care of races with the TX handler. */
 	if (conf->channel->hw_value != phy->channel)
-		b43_radio_selectchannel(dev, conf->channel->hw_value, 0);
+		b43_switch_channel(dev, conf->channel->hw_value);
 
 	/* Enable/Disable ShortSlot timing. */
 	if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) !=
@@ -3417,26 +3389,30 @@
 
 	/* Adjust the desired TX power level. */
 	if (conf->power_level != 0) {
-		if (conf->power_level != phy->power_level) {
-			phy->power_level = conf->power_level;
-			b43_phy_xmitpower(dev);
+		spin_lock_irqsave(&wl->irq_lock, flags);
+		if (conf->power_level != phy->desired_txpower) {
+			phy->desired_txpower = conf->power_level;
+			b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME |
+						   B43_TXPWR_IGNORE_TSSI);
 		}
+		spin_unlock_irqrestore(&wl->irq_lock, flags);
 	}
 
 	/* Antennas for RX and management frame TX. */
 	antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_tx);
 	b43_mgmtframe_txantenna(dev, antenna);
 	antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_rx);
-	b43_set_rx_antenna(dev, antenna);
+	if (phy->ops->set_rx_antenna)
+		phy->ops->set_rx_antenna(dev, antenna);
 
 	/* Update templates for AP/mesh mode. */
-	if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) ||
-	    b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT))
+	if (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
+	    b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
 		b43_set_beacon_int(dev, conf->beacon_int);
 
 	if (!!conf->radio_enabled != phy->radio_on) {
 		if (conf->radio_enabled) {
-			b43_radio_turn_on(dev);
+			b43_software_rfkill(dev, RFKILL_STATE_UNBLOCKED);
 			b43info(dev->wl, "Radio turned on by software\n");
 			if (!dev->radio_hw_enable) {
 				b43info(dev->wl, "The hardware RF-kill button "
@@ -3444,7 +3420,7 @@
 					"Press the button to turn it on.\n");
 			}
 		} else {
-			b43_radio_turn_off(dev, 0);
+			b43_software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED);
 			b43info(dev->wl, "Radio turned off by software\n");
 		}
 	}
@@ -3619,14 +3595,14 @@
 	else
 		memset(wl->bssid, 0, ETH_ALEN);
 	if (b43_status(dev) >= B43_STAT_INITIALIZED) {
-		if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) ||
-		    b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT)) {
+		if (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
+		    b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) {
 			B43_WARN_ON(vif->type != wl->if_type);
 			if (conf->changed & IEEE80211_IFCC_SSID)
 				b43_set_ssid(dev, conf->ssid, conf->ssid_len);
 			if (conf->changed & IEEE80211_IFCC_BEACON)
 				b43_update_templates(wl);
-		} else if (b43_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) {
+		} else if (b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) {
 			if (conf->changed & IEEE80211_IFCC_BEACON)
 				b43_update_templates(wl);
 		}
@@ -3818,48 +3794,10 @@
 static void setup_struct_phy_for_init(struct b43_wldev *dev,
 				      struct b43_phy *phy)
 {
-	struct b43_txpower_lo_control *lo;
-	int i;
-
-	memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
-	memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos));
-
-	phy->aci_enable = 0;
-	phy->aci_wlan_automatic = 0;
-	phy->aci_hw_rssi = 0;
-
-	phy->radio_off_context.valid = 0;
-
-	lo = phy->lo_control;
-	if (lo) {
-		memset(lo, 0, sizeof(*(phy->lo_control)));
-		lo->tx_bias = 0xFF;
-		INIT_LIST_HEAD(&lo->calib_list);
-	}
-	phy->max_lb_gain = 0;
-	phy->trsw_rx_gain = 0;
-	phy->txpwr_offset = 0;
-
-	/* NRSSI */
-	phy->nrssislope = 0;
-	for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++)
-		phy->nrssi[i] = -1000;
-	for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++)
-		phy->nrssi_lt[i] = i;
-
-	phy->lofcal = 0xFFFF;
-	phy->initval = 0xFFFF;
-
-	phy->interfmode = B43_INTERFMODE_NONE;
-	phy->channel = 0xFF;
-
 	phy->hardware_power_control = !!modparam_hwpctl;
-
+	phy->next_txpwr_check_time = jiffies;
 	/* PHY TX errors counter. */
 	atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
-
-	/* OFDM-table address caching. */
-	phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_UNKNOWN;
 }
 
 static void setup_struct_wldev_for_init(struct b43_wldev *dev)
@@ -3965,7 +3903,7 @@
 		pu_delay = 3700;
 	else
 		pu_delay = 1050;
-	if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS) || idle)
+	if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle)
 		pu_delay = 500;
 	if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8))
 		pu_delay = max(pu_delay, (u16)2400);
@@ -3979,7 +3917,7 @@
 	u16 pretbtt;
 
 	/* The time value is in microseconds. */
-	if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) {
+	if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) {
 		pretbtt = 2;
 	} else {
 		if (dev->phy.type == B43_PHYTYPE_A)
@@ -3995,7 +3933,6 @@
 /* Locking: wl->mutex */
 static void b43_wireless_core_exit(struct b43_wldev *dev)
 {
-	struct b43_phy *phy = &dev->phy;
 	u32 macctl;
 
 	B43_WARN_ON(b43_status(dev) > B43_STAT_INITIALIZED);
@@ -4016,12 +3953,7 @@
 	b43_dma_free(dev);
 	b43_pio_free(dev);
 	b43_chip_exit(dev);
-	b43_radio_turn_off(dev, 1);
-	b43_switch_analog(dev, 0);
-	if (phy->dyn_tssi_tbl)
-		kfree(phy->tssi2dbm);
-	kfree(phy->lo_control);
-	phy->lo_control = NULL;
+	dev->phy.ops->switch_analog(dev, 0);
 	if (dev->wl->current_beacon) {
 		dev_kfree_skb_any(dev->wl->current_beacon);
 		dev->wl->current_beacon = NULL;
@@ -4052,29 +3984,23 @@
 		b43_wireless_core_reset(dev, tmp);
 	}
 
-	if ((phy->type == B43_PHYTYPE_B) || (phy->type == B43_PHYTYPE_G)) {
-		phy->lo_control =
-		    kzalloc(sizeof(*(phy->lo_control)), GFP_KERNEL);
-		if (!phy->lo_control) {
-			err = -ENOMEM;
-			goto err_busdown;
-		}
-	}
+	/* Reset all data structures. */
 	setup_struct_wldev_for_init(dev);
-
-	err = b43_phy_init_tssi2dbm_table(dev);
-	if (err)
-		goto err_kfree_lo_control;
+	phy->ops->prepare_structs(dev);
 
 	/* Enable IRQ routing to this device. */
 	ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev);
 
 	b43_imcfglo_timeouts_workaround(dev);
 	b43_bluetooth_coext_disable(dev);
-	b43_phy_early_init(dev);
+	if (phy->ops->prepare_hardware) {
+		err = phy->ops->prepare_hardware(dev);
+		if (err)
+			goto err_busdown;
+	}
 	err = b43_chip_init(dev);
 	if (err)
-		goto err_kfree_tssitbl;
+		goto err_busdown;
 	b43_shm_write16(dev, B43_SHM_SHARED,
 			B43_SHM_SH_WLCOREREV, dev->dev->id.revision);
 	hf = b43_hf_read(dev);
@@ -4140,15 +4066,9 @@
 out:
 	return err;
 
-      err_chip_exit:
+err_chip_exit:
 	b43_chip_exit(dev);
-      err_kfree_tssitbl:
-	if (phy->dyn_tssi_tbl)
-		kfree(phy->tssi2dbm);
-      err_kfree_lo_control:
-	kfree(phy->lo_control);
-	phy->lo_control = NULL;
-      err_busdown:
+err_busdown:
 	ssb_bus_may_powerdown(bus);
 	B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT);
 	return err;
@@ -4164,11 +4084,11 @@
 
 	/* TODO: allow WDS/AP devices to coexist */
 
-	if (conf->type != IEEE80211_IF_TYPE_AP &&
-	    conf->type != IEEE80211_IF_TYPE_MESH_POINT &&
-	    conf->type != IEEE80211_IF_TYPE_STA &&
-	    conf->type != IEEE80211_IF_TYPE_WDS &&
-	    conf->type != IEEE80211_IF_TYPE_IBSS)
+	if (conf->type != NL80211_IFTYPE_AP &&
+	    conf->type != NL80211_IFTYPE_MESH_POINT &&
+	    conf->type != NL80211_IFTYPE_STATION &&
+	    conf->type != NL80211_IFTYPE_WDS &&
+	    conf->type != NL80211_IFTYPE_ADHOC)
 		return -EOPNOTSUPP;
 
 	mutex_lock(&wl->mutex);
@@ -4283,7 +4203,6 @@
 	struct b43_wldev *dev = wl->current_dev;
 
 	b43_rfkill_exit(dev);
-	cancel_work_sync(&(wl->qos_update_work));
 	cancel_work_sync(&(wl->beacon_update_trigger));
 
 	mutex_lock(&wl->mutex);
@@ -4291,6 +4210,8 @@
 		b43_wireless_core_stop(dev);
 	b43_wireless_core_exit(dev);
 	mutex_unlock(&wl->mutex);
+
+	cancel_work_sync(&(wl->txpower_adjust_work));
 }
 
 static int b43_op_set_retry_limit(struct ieee80211_hw *hw,
@@ -4313,7 +4234,8 @@
 	return err;
 }
 
-static int b43_op_beacon_set_tim(struct ieee80211_hw *hw, int aid, int set)
+static int b43_op_beacon_set_tim(struct ieee80211_hw *hw,
+				 struct ieee80211_sta *sta, bool set)
 {
 	struct b43_wl *wl = hw_to_b43_wl(hw);
 	unsigned long flags;
@@ -4328,7 +4250,7 @@
 static void b43_op_sta_notify(struct ieee80211_hw *hw,
 			      struct ieee80211_vif *vif,
 			      enum sta_notify_cmd notify_cmd,
-			      const u8 *addr)
+			      struct ieee80211_sta *sta)
 {
 	struct b43_wl *wl = hw_to_b43_wl(hw);
 
@@ -4422,6 +4344,7 @@
 	/* We release firmware that late to not be required to re-request
 	 * is all the time when we reinit the core. */
 	b43_release_firmware(dev);
+	b43_phy_free(dev);
 }
 
 static int b43_wireless_core_attach(struct b43_wldev *dev)
@@ -4495,30 +4418,35 @@
 		}
 	}
 
+	err = b43_phy_allocate(dev);
+	if (err)
+		goto err_powerdown;
+
 	dev->phy.gmode = have_2ghz_phy;
 	tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0;
 	b43_wireless_core_reset(dev, tmp);
 
 	err = b43_validate_chipaccess(dev);
 	if (err)
-		goto err_powerdown;
+		goto err_phy_free;
 	err = b43_setup_bands(dev, have_2ghz_phy, have_5ghz_phy);
 	if (err)
-		goto err_powerdown;
+		goto err_phy_free;
 
 	/* Now set some default "current_dev" */
 	if (!wl->current_dev)
 		wl->current_dev = dev;
 	INIT_WORK(&dev->restart_work, b43_chip_reset);
 
-	b43_radio_turn_off(dev, 1);
-	b43_switch_analog(dev, 0);
+	dev->phy.ops->switch_analog(dev, 0);
 	ssb_device_disable(dev->dev, 0);
 	ssb_bus_may_powerdown(bus);
 
 out:
 	return err;
 
+err_phy_free:
+	b43_phy_free(dev);
 err_powerdown:
 	ssb_bus_may_powerdown(bus);
 	return err;
@@ -4615,9 +4543,11 @@
 		pdev = bus->host_pci;
 		if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) ||
 		    IS_PDEV(pdev, BROADCOM, 0x4320,    DELL, 0x0003) ||
+		    IS_PDEV(pdev, BROADCOM, 0x4320,      HP, 0x12f8) ||
 		    IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) ||
 		    IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0014) ||
-		    IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013))
+		    IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013) ||
+		    IS_PDEV(pdev, BROADCOM, 0x4320, MOTOROLA, 0x7010))
 			bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST;
 	}
 }
@@ -4650,6 +4580,13 @@
 		    IEEE80211_HW_SIGNAL_DBM |
 		    IEEE80211_HW_NOISE_DBM;
 
+	hw->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_AP) |
+		BIT(NL80211_IFTYPE_MESH_POINT) |
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_WDS) |
+		BIT(NL80211_IFTYPE_ADHOC);
+
 	hw->queues = b43_modparam_qos ? 4 : 1;
 	SET_IEEE80211_DEV(hw, dev->dev);
 	if (is_valid_ether_addr(sprom->et1mac))
@@ -4667,8 +4604,8 @@
 	spin_lock_init(&wl->shm_lock);
 	mutex_init(&wl->mutex);
 	INIT_LIST_HEAD(&wl->devlist);
-	INIT_WORK(&wl->qos_update_work, b43_qos_update_work);
 	INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
+	INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
 
 	ssb_set_devtypedata(dev, wl);
 	b43info(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id);
diff --git a/drivers/net/wireless/b43/phy.h b/drivers/net/wireless/b43/phy.h
deleted file mode 100644
index 4aab109..0000000
--- a/drivers/net/wireless/b43/phy.h
+++ /dev/null
@@ -1,340 +0,0 @@
-#ifndef B43_PHY_H_
-#define B43_PHY_H_
-
-#include <linux/types.h>
-
-struct b43_wldev;
-struct b43_phy;
-
-/*** PHY Registers ***/
-
-/* Routing */
-#define B43_PHYROUTE			0x0C00 /* PHY register routing bits mask */
-#define  B43_PHYROUTE_BASE		0x0000 /* Base registers */
-#define  B43_PHYROUTE_OFDM_GPHY		0x0400 /* OFDM register routing for G-PHYs */
-#define  B43_PHYROUTE_EXT_GPHY		0x0800 /* Extended G-PHY registers */
-#define  B43_PHYROUTE_N_BMODE		0x0C00 /* N-PHY BMODE registers */
-
-/* CCK (B-PHY) registers. */
-#define B43_PHY_CCK(reg)		((reg) | B43_PHYROUTE_BASE)
-/* N-PHY registers. */
-#define B43_PHY_N(reg)			((reg) | B43_PHYROUTE_BASE)
-/* N-PHY BMODE registers. */
-#define B43_PHY_N_BMODE(reg)		((reg) | B43_PHYROUTE_N_BMODE)
-/* OFDM (A-PHY) registers. */
-#define B43_PHY_OFDM(reg)		((reg) | B43_PHYROUTE_OFDM_GPHY)
-/* Extended G-PHY registers. */
-#define B43_PHY_EXTG(reg)		((reg) | B43_PHYROUTE_EXT_GPHY)
-
-/* OFDM (A) PHY Registers */
-#define B43_PHY_VERSION_OFDM		B43_PHY_OFDM(0x00)	/* Versioning register for A-PHY */
-#define B43_PHY_BBANDCFG		B43_PHY_OFDM(0x01)	/* Baseband config */
-#define  B43_PHY_BBANDCFG_RXANT		0x180	/* RX Antenna selection */
-#define  B43_PHY_BBANDCFG_RXANT_SHIFT	7
-#define B43_PHY_PWRDOWN			B43_PHY_OFDM(0x03)	/* Powerdown */
-#define B43_PHY_CRSTHRES1_R1		B43_PHY_OFDM(0x06)	/* CRS Threshold 1 (phy.rev 1 only) */
-#define B43_PHY_LNAHPFCTL		B43_PHY_OFDM(0x1C)	/* LNA/HPF control */
-#define B43_PHY_LPFGAINCTL		B43_PHY_OFDM(0x20)	/* LPF Gain control */
-#define B43_PHY_ADIVRELATED		B43_PHY_OFDM(0x27)	/* FIXME rename */
-#define B43_PHY_CRS0			B43_PHY_OFDM(0x29)
-#define  B43_PHY_CRS0_EN		0x4000
-#define B43_PHY_PEAK_COUNT		B43_PHY_OFDM(0x30)
-#define B43_PHY_ANTDWELL		B43_PHY_OFDM(0x2B)	/* Antenna dwell */
-#define  B43_PHY_ANTDWELL_AUTODIV1	0x0100	/* Automatic RX diversity start antenna */
-#define B43_PHY_ENCORE			B43_PHY_OFDM(0x49)	/* "Encore" (RangeMax / BroadRange) */
-#define  B43_PHY_ENCORE_EN		0x0200	/* Encore enable */
-#define B43_PHY_LMS			B43_PHY_OFDM(0x55)
-#define B43_PHY_OFDM61			B43_PHY_OFDM(0x61)	/* FIXME rename */
-#define  B43_PHY_OFDM61_10		0x0010	/* FIXME rename */
-#define B43_PHY_IQBAL			B43_PHY_OFDM(0x69)	/* I/Q balance */
-#define B43_PHY_BBTXDC_BIAS		B43_PHY_OFDM(0x6B)	/* Baseband TX DC bias */
-#define B43_PHY_OTABLECTL		B43_PHY_OFDM(0x72)	/* OFDM table control (see below) */
-#define  B43_PHY_OTABLEOFF		0x03FF	/* OFDM table offset (see below) */
-#define  B43_PHY_OTABLENR		0xFC00	/* OFDM table number (see below) */
-#define  B43_PHY_OTABLENR_SHIFT		10
-#define B43_PHY_OTABLEI			B43_PHY_OFDM(0x73)	/* OFDM table data I */
-#define B43_PHY_OTABLEQ			B43_PHY_OFDM(0x74)	/* OFDM table data Q */
-#define B43_PHY_HPWR_TSSICTL		B43_PHY_OFDM(0x78)	/* Hardware power TSSI control */
-#define B43_PHY_ADCCTL			B43_PHY_OFDM(0x7A)	/* ADC control */
-#define B43_PHY_IDLE_TSSI		B43_PHY_OFDM(0x7B)
-#define B43_PHY_A_TEMP_SENSE		B43_PHY_OFDM(0x7C)	/* A PHY temperature sense */
-#define B43_PHY_NRSSITHRES		B43_PHY_OFDM(0x8A)	/* NRSSI threshold */
-#define B43_PHY_ANTWRSETT		B43_PHY_OFDM(0x8C)	/* Antenna WR settle */
-#define  B43_PHY_ANTWRSETT_ARXDIV	0x2000	/* Automatic RX diversity enabled */
-#define B43_PHY_CLIPPWRDOWNT		B43_PHY_OFDM(0x93)	/* Clip powerdown threshold */
-#define B43_PHY_OFDM9B			B43_PHY_OFDM(0x9B)	/* FIXME rename */
-#define B43_PHY_N1P1GAIN		B43_PHY_OFDM(0xA0)
-#define B43_PHY_P1P2GAIN		B43_PHY_OFDM(0xA1)
-#define B43_PHY_N1N2GAIN		B43_PHY_OFDM(0xA2)
-#define B43_PHY_CLIPTHRES		B43_PHY_OFDM(0xA3)
-#define B43_PHY_CLIPN1P2THRES		B43_PHY_OFDM(0xA4)
-#define B43_PHY_CCKSHIFTBITS_WA		B43_PHY_OFDM(0xA5)	/* CCK shiftbits workaround, FIXME rename */
-#define B43_PHY_CCKSHIFTBITS		B43_PHY_OFDM(0xA7)	/* FIXME rename */
-#define B43_PHY_DIVSRCHIDX		B43_PHY_OFDM(0xA8)	/* Divider search gain/index */
-#define B43_PHY_CLIPP2THRES		B43_PHY_OFDM(0xA9)
-#define B43_PHY_CLIPP3THRES		B43_PHY_OFDM(0xAA)
-#define B43_PHY_DIVP1P2GAIN		B43_PHY_OFDM(0xAB)
-#define B43_PHY_DIVSRCHGAINBACK		B43_PHY_OFDM(0xAD)	/* Divider search gain back */
-#define B43_PHY_DIVSRCHGAINCHNG		B43_PHY_OFDM(0xAE)	/* Divider search gain change */
-#define B43_PHY_CRSTHRES1		B43_PHY_OFDM(0xC0)	/* CRS Threshold 1 (phy.rev >= 2 only) */
-#define B43_PHY_CRSTHRES2		B43_PHY_OFDM(0xC1)	/* CRS Threshold 2 (phy.rev >= 2 only) */
-#define B43_PHY_TSSIP_LTBASE		B43_PHY_OFDM(0x380)	/* TSSI power lookup table base */
-#define B43_PHY_DC_LTBASE		B43_PHY_OFDM(0x3A0)	/* DC lookup table base */
-#define B43_PHY_GAIN_LTBASE		B43_PHY_OFDM(0x3C0)	/* Gain lookup table base */
-
-/* CCK (B) PHY Registers */
-#define B43_PHY_VERSION_CCK		B43_PHY_CCK(0x00)	/* Versioning register for B-PHY */
-#define B43_PHY_CCKBBANDCFG		B43_PHY_CCK(0x01)	/* Contains antenna 0/1 control bit */
-#define B43_PHY_PGACTL			B43_PHY_CCK(0x15)	/* PGA control */
-#define  B43_PHY_PGACTL_LPF		0x1000	/* Low pass filter (?) */
-#define  B43_PHY_PGACTL_LOWBANDW	0x0040	/* Low bandwidth flag */
-#define  B43_PHY_PGACTL_UNKNOWN		0xEFA0
-#define B43_PHY_FBCTL1			B43_PHY_CCK(0x18)	/* Frequency bandwidth control 1 */
-#define B43_PHY_ITSSI			B43_PHY_CCK(0x29)	/* Idle TSSI */
-#define B43_PHY_LO_LEAKAGE		B43_PHY_CCK(0x2D)	/* Measured LO leakage */
-#define B43_PHY_ENERGY			B43_PHY_CCK(0x33)	/* Energy */
-#define B43_PHY_SYNCCTL			B43_PHY_CCK(0x35)
-#define B43_PHY_FBCTL2			B43_PHY_CCK(0x38)	/* Frequency bandwidth control 2 */
-#define B43_PHY_DACCTL			B43_PHY_CCK(0x60)	/* DAC control */
-#define B43_PHY_RCCALOVER		B43_PHY_CCK(0x78)	/* RC calibration override */
-
-/* Extended G-PHY Registers */
-#define B43_PHY_CLASSCTL		B43_PHY_EXTG(0x02)	/* Classify control */
-#define B43_PHY_GTABCTL			B43_PHY_EXTG(0x03)	/* G-PHY table control (see below) */
-#define  B43_PHY_GTABOFF		0x03FF	/* G-PHY table offset (see below) */
-#define  B43_PHY_GTABNR			0xFC00	/* G-PHY table number (see below) */
-#define  B43_PHY_GTABNR_SHIFT		10
-#define B43_PHY_GTABDATA		B43_PHY_EXTG(0x04)	/* G-PHY table data */
-#define B43_PHY_LO_MASK			B43_PHY_EXTG(0x0F)	/* Local Oscillator control mask */
-#define B43_PHY_LO_CTL			B43_PHY_EXTG(0x10)	/* Local Oscillator control */
-#define B43_PHY_RFOVER			B43_PHY_EXTG(0x11)	/* RF override */
-#define B43_PHY_RFOVERVAL		B43_PHY_EXTG(0x12)	/* RF override value */
-#define  B43_PHY_RFOVERVAL_EXTLNA	0x8000
-#define  B43_PHY_RFOVERVAL_LNA		0x7000
-#define  B43_PHY_RFOVERVAL_LNA_SHIFT	12
-#define  B43_PHY_RFOVERVAL_PGA		0x0F00
-#define  B43_PHY_RFOVERVAL_PGA_SHIFT	8
-#define  B43_PHY_RFOVERVAL_UNK		0x0010	/* Unknown, always set. */
-#define  B43_PHY_RFOVERVAL_TRSWRX	0x00E0
-#define  B43_PHY_RFOVERVAL_BW		0x0003	/* Bandwidth flags */
-#define   B43_PHY_RFOVERVAL_BW_LPF	0x0001	/* Low Pass Filter */
-#define   B43_PHY_RFOVERVAL_BW_LBW	0x0002	/* Low Bandwidth (when set), high when unset */
-#define B43_PHY_ANALOGOVER		B43_PHY_EXTG(0x14)	/* Analog override */
-#define B43_PHY_ANALOGOVERVAL		B43_PHY_EXTG(0x15)	/* Analog override value */
-
-/*** OFDM table numbers ***/
-#define B43_OFDMTAB(number, offset)	(((number) << B43_PHY_OTABLENR_SHIFT) | (offset))
-#define B43_OFDMTAB_AGC1		B43_OFDMTAB(0x00, 0)
-#define B43_OFDMTAB_GAIN0		B43_OFDMTAB(0x00, 0)
-#define B43_OFDMTAB_GAINX		B43_OFDMTAB(0x01, 0)	//TODO rename
-#define B43_OFDMTAB_GAIN1		B43_OFDMTAB(0x01, 4)
-#define B43_OFDMTAB_AGC3		B43_OFDMTAB(0x02, 0)
-#define B43_OFDMTAB_GAIN2		B43_OFDMTAB(0x02, 3)
-#define B43_OFDMTAB_LNAHPFGAIN1		B43_OFDMTAB(0x03, 0)
-#define B43_OFDMTAB_WRSSI		B43_OFDMTAB(0x04, 0)
-#define B43_OFDMTAB_LNAHPFGAIN2		B43_OFDMTAB(0x04, 0)
-#define B43_OFDMTAB_NOISESCALE		B43_OFDMTAB(0x05, 0)
-#define B43_OFDMTAB_AGC2		B43_OFDMTAB(0x06, 0)
-#define B43_OFDMTAB_ROTOR		B43_OFDMTAB(0x08, 0)
-#define B43_OFDMTAB_ADVRETARD		B43_OFDMTAB(0x09, 0)
-#define B43_OFDMTAB_DAC			B43_OFDMTAB(0x0C, 0)
-#define B43_OFDMTAB_DC			B43_OFDMTAB(0x0E, 7)
-#define B43_OFDMTAB_PWRDYN2		B43_OFDMTAB(0x0E, 12)
-#define B43_OFDMTAB_LNAGAIN		B43_OFDMTAB(0x0E, 13)
-#define B43_OFDMTAB_UNKNOWN_0F		B43_OFDMTAB(0x0F, 0)	//TODO rename
-#define B43_OFDMTAB_UNKNOWN_APHY	B43_OFDMTAB(0x0F, 7)	//TODO rename
-#define B43_OFDMTAB_LPFGAIN		B43_OFDMTAB(0x0F, 12)
-#define B43_OFDMTAB_RSSI		B43_OFDMTAB(0x10, 0)
-#define B43_OFDMTAB_UNKNOWN_11		B43_OFDMTAB(0x11, 4)	//TODO rename
-#define B43_OFDMTAB_AGC1_R1		B43_OFDMTAB(0x13, 0)
-#define B43_OFDMTAB_GAINX_R1		B43_OFDMTAB(0x14, 0)	//TODO remove!
-#define B43_OFDMTAB_MINSIGSQ		B43_OFDMTAB(0x14, 0)
-#define B43_OFDMTAB_AGC3_R1		B43_OFDMTAB(0x15, 0)
-#define B43_OFDMTAB_WRSSI_R1		B43_OFDMTAB(0x15, 4)
-#define B43_OFDMTAB_TSSI		B43_OFDMTAB(0x15, 0)
-#define B43_OFDMTAB_DACRFPABB		B43_OFDMTAB(0x16, 0)
-#define B43_OFDMTAB_DACOFF		B43_OFDMTAB(0x17, 0)
-#define B43_OFDMTAB_DCBIAS		B43_OFDMTAB(0x18, 0)
-
-u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset);
-void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table,
-			 u16 offset, u16 value);
-u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset);
-void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
-			 u16 offset, u32 value);
-
-/*** G-PHY table numbers */
-#define B43_GTAB(number, offset)	(((number) << B43_PHY_GTABNR_SHIFT) | (offset))
-#define B43_GTAB_NRSSI			B43_GTAB(0x00, 0)
-#define B43_GTAB_TRFEMW			B43_GTAB(0x0C, 0x120)
-#define B43_GTAB_ORIGTR			B43_GTAB(0x2E, 0x298)
-
-u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset);	//TODO implement
-void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value);	//TODO implement
-
-#define B43_DEFAULT_CHANNEL_A	36
-#define B43_DEFAULT_CHANNEL_BG	6
-
-enum {
-	B43_ANTENNA0,		/* Antenna 0 */
-	B43_ANTENNA1,		/* Antenna 0 */
-	B43_ANTENNA_AUTO1,	/* Automatic, starting with antenna 1 */
-	B43_ANTENNA_AUTO0,	/* Automatic, starting with antenna 0 */
-	B43_ANTENNA2,
-	B43_ANTENNA3 = 8,
-
-	B43_ANTENNA_AUTO = B43_ANTENNA_AUTO0,
-	B43_ANTENNA_DEFAULT = B43_ANTENNA_AUTO,
-};
-
-enum {
-	B43_INTERFMODE_NONE,
-	B43_INTERFMODE_NONWLAN,
-	B43_INTERFMODE_MANUALWLAN,
-	B43_INTERFMODE_AUTOWLAN,
-};
-
-/* Masks for the different PHY versioning registers. */
-#define B43_PHYVER_ANALOG		0xF000
-#define B43_PHYVER_ANALOG_SHIFT		12
-#define B43_PHYVER_TYPE			0x0F00
-#define B43_PHYVER_TYPE_SHIFT		8
-#define B43_PHYVER_VERSION		0x00FF
-
-void b43_phy_lock(struct b43_wldev *dev);
-void b43_phy_unlock(struct b43_wldev *dev);
-
-
-/* Read a value from a PHY register */
-u16 b43_phy_read(struct b43_wldev *dev, u16 offset);
-/* Write a value to a PHY register */
-void b43_phy_write(struct b43_wldev *dev, u16 offset, u16 val);
-/* Mask a PHY register with a mask */
-void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask);
-/* OR a PHY register with a bitmap */
-void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set);
-/* Mask and OR a PHY register with a mask and bitmap */
-void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
-
-
-int b43_phy_init_tssi2dbm_table(struct b43_wldev *dev);
-
-void b43_phy_early_init(struct b43_wldev *dev);
-int b43_phy_init(struct b43_wldev *dev);
-
-void b43_set_rx_antenna(struct b43_wldev *dev, int antenna);
-
-void b43_phy_xmitpower(struct b43_wldev *dev);
-
-/* Returns the boolean whether the board has HardwarePowerControl */
-bool b43_has_hardware_pctl(struct b43_phy *phy);
-/* Returns the boolean whether "TX Magnification" is enabled. */
-#define has_tx_magnification(phy) \
-	(((phy)->rev >= 2) &&			\
-	 ((phy)->radio_ver == 0x2050) &&	\
-	 ((phy)->radio_rev == 8))
-/* Card uses the loopback gain stuff */
-#define has_loopback_gain(phy) \
-	(((phy)->rev > 1) || ((phy)->gmode))
-
-/* Radio Attenuation (RF Attenuation) */
-struct b43_rfatt {
-	u8 att;			/* Attenuation value */
-	bool with_padmix;	/* Flag, PAD Mixer enabled. */
-};
-struct b43_rfatt_list {
-	/* Attenuation values list */
-	const struct b43_rfatt *list;
-	u8 len;
-	/* Minimum/Maximum attenuation values */
-	u8 min_val;
-	u8 max_val;
-};
-
-/* Returns true, if the values are the same. */
-static inline bool b43_compare_rfatt(const struct b43_rfatt *a,
-				     const struct b43_rfatt *b)
-{
-	return ((a->att == b->att) &&
-		(a->with_padmix == b->with_padmix));
-}
-
-/* Baseband Attenuation */
-struct b43_bbatt {
-	u8 att;			/* Attenuation value */
-};
-struct b43_bbatt_list {
-	/* Attenuation values list */
-	const struct b43_bbatt *list;
-	u8 len;
-	/* Minimum/Maximum attenuation values */
-	u8 min_val;
-	u8 max_val;
-};
-
-/* Returns true, if the values are the same. */
-static inline bool b43_compare_bbatt(const struct b43_bbatt *a,
-				     const struct b43_bbatt *b)
-{
-	return (a->att == b->att);
-}
-
-/* tx_control bits. */
-#define B43_TXCTL_PA3DB		0x40	/* PA Gain 3dB */
-#define B43_TXCTL_PA2DB		0x20	/* PA Gain 2dB */
-#define B43_TXCTL_TXMIX		0x10	/* TX Mixer Gain */
-
-/* Write BasebandAttenuation value to the device. */
-void b43_phy_set_baseband_attenuation(struct b43_wldev *dev,
-				      u16 baseband_attenuation);
-
-extern const u8 b43_radio_channel_codes_bg[];
-
-void b43_radio_lock(struct b43_wldev *dev);
-void b43_radio_unlock(struct b43_wldev *dev);
-
-
-/* Read a value from a 16bit radio register */
-u16 b43_radio_read16(struct b43_wldev *dev, u16 offset);
-/* Write a value to a 16bit radio register */
-void b43_radio_write16(struct b43_wldev *dev, u16 offset, u16 val);
-/* Mask a 16bit radio register with a mask */
-void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask);
-/* OR a 16bit radio register with a bitmap */
-void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set);
-/* Mask and OR a PHY register with a mask and bitmap */
-void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
-
-
-u16 b43_radio_init2050(struct b43_wldev *dev);
-void b43_radio_init2060(struct b43_wldev *dev);
-
-void b43_radio_turn_on(struct b43_wldev *dev);
-void b43_radio_turn_off(struct b43_wldev *dev, bool force);
-
-int b43_radio_selectchannel(struct b43_wldev *dev, u8 channel,
-			    int synthetic_pu_workaround);
-
-u8 b43_radio_aci_detect(struct b43_wldev *dev, u8 channel);
-u8 b43_radio_aci_scan(struct b43_wldev *dev);
-
-int b43_radio_set_interference_mitigation(struct b43_wldev *dev, int mode);
-
-void b43_calc_nrssi_slope(struct b43_wldev *dev);
-void b43_calc_nrssi_threshold(struct b43_wldev *dev);
-s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset);
-void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val);
-void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val);
-void b43_nrssi_mem_update(struct b43_wldev *dev);
-
-void b43_radio_set_tx_iq(struct b43_wldev *dev);
-u16 b43_radio_calibrationvalue(struct b43_wldev *dev);
-
-void b43_put_attenuation_into_ranges(struct b43_wldev *dev,
-				     int *_bbatt, int *_rfatt);
-
-void b43_set_txpower_g(struct b43_wldev *dev,
-		       const struct b43_bbatt *bbatt,
-		       const struct b43_rfatt *rfatt, u8 tx_control);
-
-#endif /* B43_PHY_H_ */
diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c
new file mode 100644
index 0000000..0f1a84c
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_a.c
@@ -0,0 +1,643 @@
+/*
+
+  Broadcom B43 wireless driver
+  IEEE 802.11a PHY driver
+
+  Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
+  Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
+  Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
+  Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
+  Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; see the file COPYING.  If not, write to
+  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+  Boston, MA 02110-1301, USA.
+
+*/
+
+#include "b43.h"
+#include "phy_a.h"
+#include "phy_common.h"
+#include "wa.h"
+#include "tables.h"
+#include "main.h"
+
+
+/* Get the freq, as it has to be written to the device. */
+static inline u16 channel2freq_a(u8 channel)
+{
+	B43_WARN_ON(channel > 200);
+
+	return (5000 + 5 * channel);
+}
+
+static inline u16 freq_r3A_value(u16 frequency)
+{
+	u16 value;
+
+	if (frequency < 5091)
+		value = 0x0040;
+	else if (frequency < 5321)
+		value = 0x0000;
+	else if (frequency < 5806)
+		value = 0x0080;
+	else
+		value = 0x0040;
+
+	return value;
+}
+
+#if 0
+/* This function converts a TSSI value to dBm in Q5.2 */
+static s8 b43_aphy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_a *aphy = phy->a;
+	s8 dbm = 0;
+	s32 tmp;
+
+	tmp = (aphy->tgt_idle_tssi - aphy->cur_idle_tssi + tssi);
+	tmp += 0x80;
+	tmp = clamp_val(tmp, 0x00, 0xFF);
+	dbm = aphy->tssi2dbm[tmp];
+	//TODO: There's a FIXME on the specs
+
+	return dbm;
+}
+#endif
+
+void b43_radio_set_tx_iq(struct b43_wldev *dev)
+{
+	static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 };
+	static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A };
+	u16 tmp = b43_radio_read16(dev, 0x001E);
+	int i, j;
+
+	for (i = 0; i < 5; i++) {
+		for (j = 0; j < 5; j++) {
+			if (tmp == (data_high[i] << 4 | data_low[j])) {
+				b43_phy_write(dev, 0x0069,
+					      (i - j) << 8 | 0x00C0);
+				return;
+			}
+		}
+	}
+}
+
+static void aphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
+{
+	u16 freq, r8, tmp;
+
+	freq = channel2freq_a(channel);
+
+	r8 = b43_radio_read16(dev, 0x0008);
+	b43_write16(dev, 0x03F0, freq);
+	b43_radio_write16(dev, 0x0008, r8);
+
+	//TODO: write max channel TX power? to Radio 0x2D
+	tmp = b43_radio_read16(dev, 0x002E);
+	tmp &= 0x0080;
+	//TODO: OR tmp with the Power out estimation for this channel?
+	b43_radio_write16(dev, 0x002E, tmp);
+
+	if (freq >= 4920 && freq <= 5500) {
+		/*
+		 * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F;
+		 *    = (freq * 0.025862069
+		 */
+		r8 = 3 * freq / 116;	/* is equal to r8 = freq * 0.025862 */
+	}
+	b43_radio_write16(dev, 0x0007, (r8 << 4) | r8);
+	b43_radio_write16(dev, 0x0020, (r8 << 4) | r8);
+	b43_radio_write16(dev, 0x0021, (r8 << 4) | r8);
+	b43_radio_write16(dev, 0x0022, (b43_radio_read16(dev, 0x0022)
+					& 0x000F) | (r8 << 4));
+	b43_radio_write16(dev, 0x002A, (r8 << 4));
+	b43_radio_write16(dev, 0x002B, (r8 << 4));
+	b43_radio_write16(dev, 0x0008, (b43_radio_read16(dev, 0x0008)
+					& 0x00F0) | (r8 << 4));
+	b43_radio_write16(dev, 0x0029, (b43_radio_read16(dev, 0x0029)
+					& 0xFF0F) | 0x00B0);
+	b43_radio_write16(dev, 0x0035, 0x00AA);
+	b43_radio_write16(dev, 0x0036, 0x0085);
+	b43_radio_write16(dev, 0x003A, (b43_radio_read16(dev, 0x003A)
+					& 0xFF20) |
+			  freq_r3A_value(freq));
+	b43_radio_write16(dev, 0x003D,
+			  b43_radio_read16(dev, 0x003D) & 0x00FF);
+	b43_radio_write16(dev, 0x0081, (b43_radio_read16(dev, 0x0081)
+					& 0xFF7F) | 0x0080);
+	b43_radio_write16(dev, 0x0035,
+			  b43_radio_read16(dev, 0x0035) & 0xFFEF);
+	b43_radio_write16(dev, 0x0035, (b43_radio_read16(dev, 0x0035)
+					& 0xFFEF) | 0x0010);
+	b43_radio_set_tx_iq(dev);
+	//TODO: TSSI2dbm workaround
+//FIXME	b43_phy_xmitpower(dev);
+}
+
+void b43_radio_init2060(struct b43_wldev *dev)
+{
+	b43_radio_write16(dev, 0x0004, 0x00C0);
+	b43_radio_write16(dev, 0x0005, 0x0008);
+	b43_radio_write16(dev, 0x0009, 0x0040);
+	b43_radio_write16(dev, 0x0005, 0x00AA);
+	b43_radio_write16(dev, 0x0032, 0x008F);
+	b43_radio_write16(dev, 0x0006, 0x008F);
+	b43_radio_write16(dev, 0x0034, 0x008F);
+	b43_radio_write16(dev, 0x002C, 0x0007);
+	b43_radio_write16(dev, 0x0082, 0x0080);
+	b43_radio_write16(dev, 0x0080, 0x0000);
+	b43_radio_write16(dev, 0x003F, 0x00DA);
+	b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008);
+	b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0010);
+	b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020);
+	b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020);
+	msleep(1);		/* delay 400usec */
+
+	b43_radio_write16(dev, 0x0081,
+			  (b43_radio_read16(dev, 0x0081) & ~0x0020) | 0x0010);
+	msleep(1);		/* delay 400usec */
+
+	b43_radio_write16(dev, 0x0005,
+			  (b43_radio_read16(dev, 0x0005) & ~0x0008) | 0x0008);
+	b43_radio_write16(dev, 0x0085, b43_radio_read16(dev, 0x0085) & ~0x0010);
+	b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008);
+	b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0040);
+	b43_radio_write16(dev, 0x0081,
+			  (b43_radio_read16(dev, 0x0081) & ~0x0040) | 0x0040);
+	b43_radio_write16(dev, 0x0005,
+			  (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008);
+	b43_phy_write(dev, 0x0063, 0xDDC6);
+	b43_phy_write(dev, 0x0069, 0x07BE);
+	b43_phy_write(dev, 0x006A, 0x0000);
+
+	aphy_channel_switch(dev, dev->phy.ops->get_default_chan(dev));
+
+	msleep(1);
+}
+
+static void b43_phy_rssiagc(struct b43_wldev *dev, u8 enable)
+{
+	int i;
+
+	if (dev->phy.rev < 3) {
+		if (enable)
+			for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
+				b43_ofdmtab_write16(dev,
+					B43_OFDMTAB_LNAHPFGAIN1, i, 0xFFF8);
+				b43_ofdmtab_write16(dev,
+					B43_OFDMTAB_WRSSI, i, 0xFFF8);
+			}
+		else
+			for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
+				b43_ofdmtab_write16(dev,
+					B43_OFDMTAB_LNAHPFGAIN1, i, b43_tab_rssiagc1[i]);
+				b43_ofdmtab_write16(dev,
+					B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc1[i]);
+			}
+	} else {
+		if (enable)
+			for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++)
+				b43_ofdmtab_write16(dev,
+					B43_OFDMTAB_WRSSI, i, 0x0820);
+		else
+			for (i = 0; i < B43_TAB_RSSIAGC2_SIZE; i++)
+				b43_ofdmtab_write16(dev,
+					B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc2[i]);
+	}
+}
+
+static void b43_phy_ww(struct b43_wldev *dev)
+{
+	u16 b, curr_s, best_s = 0xFFFF;
+	int i;
+
+	b43_phy_write(dev, B43_PHY_CRS0,
+		b43_phy_read(dev, B43_PHY_CRS0) & ~B43_PHY_CRS0_EN);
+	b43_phy_write(dev, B43_PHY_OFDM(0x1B),
+		b43_phy_read(dev, B43_PHY_OFDM(0x1B)) | 0x1000);
+	b43_phy_write(dev, B43_PHY_OFDM(0x82),
+		(b43_phy_read(dev, B43_PHY_OFDM(0x82)) & 0xF0FF) | 0x0300);
+	b43_radio_write16(dev, 0x0009,
+		b43_radio_read16(dev, 0x0009) | 0x0080);
+	b43_radio_write16(dev, 0x0012,
+		(b43_radio_read16(dev, 0x0012) & 0xFFFC) | 0x0002);
+	b43_wa_initgains(dev);
+	b43_phy_write(dev, B43_PHY_OFDM(0xBA), 0x3ED5);
+	b = b43_phy_read(dev, B43_PHY_PWRDOWN);
+	b43_phy_write(dev, B43_PHY_PWRDOWN, (b & 0xFFF8) | 0x0005);
+	b43_radio_write16(dev, 0x0004,
+		b43_radio_read16(dev, 0x0004) | 0x0004);
+	for (i = 0x10; i <= 0x20; i++) {
+		b43_radio_write16(dev, 0x0013, i);
+		curr_s = b43_phy_read(dev, B43_PHY_OTABLEQ) & 0x00FF;
+		if (!curr_s) {
+			best_s = 0x0000;
+			break;
+		} else if (curr_s >= 0x0080)
+			curr_s = 0x0100 - curr_s;
+		if (curr_s < best_s)
+			best_s = curr_s;
+	}
+	b43_phy_write(dev, B43_PHY_PWRDOWN, b);
+	b43_radio_write16(dev, 0x0004,
+		b43_radio_read16(dev, 0x0004) & 0xFFFB);
+	b43_radio_write16(dev, 0x0013, best_s);
+	b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 0xFFEC);
+	b43_phy_write(dev, B43_PHY_OFDM(0xB7), 0x1E80);
+	b43_phy_write(dev, B43_PHY_OFDM(0xB6), 0x1C00);
+	b43_phy_write(dev, B43_PHY_OFDM(0xB5), 0x0EC0);
+	b43_phy_write(dev, B43_PHY_OFDM(0xB2), 0x00C0);
+	b43_phy_write(dev, B43_PHY_OFDM(0xB9), 0x1FFF);
+	b43_phy_write(dev, B43_PHY_OFDM(0xBB),
+		(b43_phy_read(dev, B43_PHY_OFDM(0xBB)) & 0xF000) | 0x0053);
+	b43_phy_write(dev, B43_PHY_OFDM61,
+		(b43_phy_read(dev, B43_PHY_OFDM61) & 0xFE1F) | 0x0120);
+	b43_phy_write(dev, B43_PHY_OFDM(0x13),
+		(b43_phy_read(dev, B43_PHY_OFDM(0x13)) & 0x0FFF) | 0x3000);
+	b43_phy_write(dev, B43_PHY_OFDM(0x14),
+		(b43_phy_read(dev, B43_PHY_OFDM(0x14)) & 0x0FFF) | 0x3000);
+	b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 6, 0x0017);
+	for (i = 0; i < 6; i++)
+		b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, i, 0x000F);
+	b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0D, 0x000E);
+	b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0E, 0x0011);
+	b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0F, 0x0013);
+	b43_phy_write(dev, B43_PHY_OFDM(0x33), 0x5030);
+	b43_phy_write(dev, B43_PHY_CRS0,
+		b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN);
+}
+
+static void hardware_pctl_init_aphy(struct b43_wldev *dev)
+{
+	//TODO
+}
+
+void b43_phy_inita(struct b43_wldev *dev)
+{
+	struct ssb_bus *bus = dev->dev->bus;
+	struct b43_phy *phy = &dev->phy;
+
+	/* This lowlevel A-PHY init is also called from G-PHY init.
+	 * So we must not access phy->a, if called from G-PHY code.
+	 */
+	B43_WARN_ON((phy->type != B43_PHYTYPE_A) &&
+		    (phy->type != B43_PHYTYPE_G));
+
+	might_sleep();
+
+	if (phy->rev >= 6) {
+		if (phy->type == B43_PHYTYPE_A)
+			b43_phy_write(dev, B43_PHY_OFDM(0x1B),
+				b43_phy_read(dev, B43_PHY_OFDM(0x1B)) & ~0x1000);
+		if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
+			b43_phy_write(dev, B43_PHY_ENCORE,
+				b43_phy_read(dev, B43_PHY_ENCORE) | 0x0010);
+		else
+			b43_phy_write(dev, B43_PHY_ENCORE,
+				b43_phy_read(dev, B43_PHY_ENCORE) & ~0x1010);
+	}
+
+	b43_wa_all(dev);
+
+	if (phy->type == B43_PHYTYPE_A) {
+		if (phy->gmode && (phy->rev < 3))
+			b43_phy_write(dev, 0x0034,
+				b43_phy_read(dev, 0x0034) | 0x0001);
+		b43_phy_rssiagc(dev, 0);
+
+		b43_phy_write(dev, B43_PHY_CRS0,
+			b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN);
+
+		b43_radio_init2060(dev);
+
+		if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
+		    ((bus->boardinfo.type == SSB_BOARD_BU4306) ||
+		     (bus->boardinfo.type == SSB_BOARD_BU4309))) {
+			; //TODO: A PHY LO
+		}
+
+		if (phy->rev >= 3)
+			b43_phy_ww(dev);
+
+		hardware_pctl_init_aphy(dev);
+
+		//TODO: radar detection
+	}
+
+	if ((phy->type == B43_PHYTYPE_G) &&
+	    (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) {
+		b43_phy_write(dev, B43_PHY_OFDM(0x6E),
+				  (b43_phy_read(dev, B43_PHY_OFDM(0x6E))
+				   & 0xE000) | 0x3CF);
+	}
+}
+
+/* Initialise the TSSI->dBm lookup table */
+static int b43_aphy_init_tssi2dbm_table(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_a *aphy = phy->a;
+	s16 pab0, pab1, pab2;
+
+	pab0 = (s16) (dev->dev->bus->sprom.pa1b0);
+	pab1 = (s16) (dev->dev->bus->sprom.pa1b1);
+	pab2 = (s16) (dev->dev->bus->sprom.pa1b2);
+
+	if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
+	    pab0 != -1 && pab1 != -1 && pab2 != -1) {
+		/* The pabX values are set in SPROM. Use them. */
+		if ((s8) dev->dev->bus->sprom.itssi_a != 0 &&
+		    (s8) dev->dev->bus->sprom.itssi_a != -1)
+			aphy->tgt_idle_tssi =
+			    (s8) (dev->dev->bus->sprom.itssi_a);
+		else
+			aphy->tgt_idle_tssi = 62;
+		aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
+							       pab1, pab2);
+		if (!aphy->tssi2dbm)
+			return -ENOMEM;
+	} else {
+		/* pabX values not set in SPROM,
+		 * but APHY needs a generated table. */
+		aphy->tssi2dbm = NULL;
+		b43err(dev->wl, "Could not generate tssi2dBm "
+		       "table (wrong SPROM info)!\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int b43_aphy_op_allocate(struct b43_wldev *dev)
+{
+	struct b43_phy_a *aphy;
+	int err;
+
+	aphy = kzalloc(sizeof(*aphy), GFP_KERNEL);
+	if (!aphy)
+		return -ENOMEM;
+	dev->phy.a = aphy;
+
+	err = b43_aphy_init_tssi2dbm_table(dev);
+	if (err)
+		goto err_free_aphy;
+
+	return 0;
+
+err_free_aphy:
+	kfree(aphy);
+	dev->phy.a = NULL;
+
+	return err;
+}
+
+static void b43_aphy_op_prepare_structs(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_a *aphy = phy->a;
+	const void *tssi2dbm;
+	int tgt_idle_tssi;
+
+	/* tssi2dbm table is constant, so it is initialized at alloc time.
+	 * Save a copy of the pointer. */
+	tssi2dbm = aphy->tssi2dbm;
+	tgt_idle_tssi = aphy->tgt_idle_tssi;
+
+	/* Zero out the whole PHY structure. */
+	memset(aphy, 0, sizeof(*aphy));
+
+	aphy->tssi2dbm = tssi2dbm;
+	aphy->tgt_idle_tssi = tgt_idle_tssi;
+
+	//TODO init struct b43_phy_a
+
+}
+
+static void b43_aphy_op_free(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_a *aphy = phy->a;
+
+	kfree(aphy->tssi2dbm);
+	aphy->tssi2dbm = NULL;
+
+	kfree(aphy);
+	dev->phy.a = NULL;
+}
+
+static int b43_aphy_op_init(struct b43_wldev *dev)
+{
+	b43_phy_inita(dev);
+
+	return 0;
+}
+
+static inline u16 adjust_phyreg(struct b43_wldev *dev, u16 offset)
+{
+	/* OFDM registers are base-registers for the A-PHY. */
+	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
+		offset &= ~B43_PHYROUTE;
+		offset |= B43_PHYROUTE_BASE;
+	}
+
+#if B43_DEBUG
+	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) {
+		/* Ext-G registers are only available on G-PHYs */
+		b43err(dev->wl, "Invalid EXT-G PHY access at "
+		       "0x%04X on A-PHY\n", offset);
+		dump_stack();
+	}
+	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) {
+		/* N-BMODE registers are only available on N-PHYs */
+		b43err(dev->wl, "Invalid N-BMODE PHY access at "
+		       "0x%04X on A-PHY\n", offset);
+		dump_stack();
+	}
+#endif /* B43_DEBUG */
+
+	return offset;
+}
+
+static u16 b43_aphy_op_read(struct b43_wldev *dev, u16 reg)
+{
+	reg = adjust_phyreg(dev, reg);
+	b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+	return b43_read16(dev, B43_MMIO_PHY_DATA);
+}
+
+static void b43_aphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
+{
+	reg = adjust_phyreg(dev, reg);
+	b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+	b43_write16(dev, B43_MMIO_PHY_DATA, value);
+}
+
+static u16 b43_aphy_op_radio_read(struct b43_wldev *dev, u16 reg)
+{
+	/* Register 1 is a 32-bit register. */
+	B43_WARN_ON(reg == 1);
+	/* A-PHY needs 0x40 for read access */
+	reg |= 0x40;
+
+	b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
+	return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
+}
+
+static void b43_aphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
+{
+	/* Register 1 is a 32-bit register. */
+	B43_WARN_ON(reg == 1);
+
+	b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
+	b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
+}
+
+static bool b43_aphy_op_supports_hwpctl(struct b43_wldev *dev)
+{
+	return (dev->phy.rev >= 5);
+}
+
+static void b43_aphy_op_software_rfkill(struct b43_wldev *dev,
+					enum rfkill_state state)
+{
+	struct b43_phy *phy = &dev->phy;
+
+	if (state == RFKILL_STATE_UNBLOCKED) {
+		if (phy->radio_on)
+			return;
+		b43_radio_write16(dev, 0x0004, 0x00C0);
+		b43_radio_write16(dev, 0x0005, 0x0008);
+		b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) & 0xFFF7);
+		b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) & 0xFFF7);
+		b43_radio_init2060(dev);
+	} else {
+		b43_radio_write16(dev, 0x0004, 0x00FF);
+		b43_radio_write16(dev, 0x0005, 0x00FB);
+		b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) | 0x0008);
+		b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) | 0x0008);
+	}
+}
+
+static int b43_aphy_op_switch_channel(struct b43_wldev *dev,
+				      unsigned int new_channel)
+{
+	if (new_channel > 200)
+		return -EINVAL;
+	aphy_channel_switch(dev, new_channel);
+
+	return 0;
+}
+
+static unsigned int b43_aphy_op_get_default_chan(struct b43_wldev *dev)
+{
+	return 36; /* Default to channel 36 */
+}
+
+static void b43_aphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
+{//TODO
+	struct b43_phy *phy = &dev->phy;
+	u64 hf;
+	u16 tmp;
+	int autodiv = 0;
+
+	if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1)
+		autodiv = 1;
+
+	hf = b43_hf_read(dev);
+	hf &= ~B43_HF_ANTDIVHELP;
+	b43_hf_write(dev, hf);
+
+	tmp = b43_phy_read(dev, B43_PHY_BBANDCFG);
+	tmp &= ~B43_PHY_BBANDCFG_RXANT;
+	tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna)
+	    << B43_PHY_BBANDCFG_RXANT_SHIFT;
+	b43_phy_write(dev, B43_PHY_BBANDCFG, tmp);
+
+	if (autodiv) {
+		tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
+		if (antenna == B43_ANTENNA_AUTO0)
+			tmp &= ~B43_PHY_ANTDWELL_AUTODIV1;
+		else
+			tmp |= B43_PHY_ANTDWELL_AUTODIV1;
+		b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
+	}
+	if (phy->rev < 3) {
+		tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
+		tmp = (tmp & 0xFF00) | 0x24;
+		b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
+	} else {
+		tmp = b43_phy_read(dev, B43_PHY_OFDM61);
+		tmp |= 0x10;
+		b43_phy_write(dev, B43_PHY_OFDM61, tmp);
+		if (phy->analog == 3) {
+			b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT,
+				      0x1D);
+			b43_phy_write(dev, B43_PHY_ADIVRELATED,
+				      8);
+		} else {
+			b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT,
+				      0x3A);
+			tmp =
+			    b43_phy_read(dev,
+					 B43_PHY_ADIVRELATED);
+			tmp = (tmp & 0xFF00) | 8;
+			b43_phy_write(dev, B43_PHY_ADIVRELATED,
+				      tmp);
+		}
+	}
+
+	hf |= B43_HF_ANTDIVHELP;
+	b43_hf_write(dev, hf);
+}
+
+static void b43_aphy_op_adjust_txpower(struct b43_wldev *dev)
+{//TODO
+}
+
+static enum b43_txpwr_result b43_aphy_op_recalc_txpower(struct b43_wldev *dev,
+							bool ignore_tssi)
+{//TODO
+	return B43_TXPWR_RES_DONE;
+}
+
+static void b43_aphy_op_pwork_15sec(struct b43_wldev *dev)
+{//TODO
+}
+
+static void b43_aphy_op_pwork_60sec(struct b43_wldev *dev)
+{//TODO
+}
+
+const struct b43_phy_operations b43_phyops_a = {
+	.allocate		= b43_aphy_op_allocate,
+	.free			= b43_aphy_op_free,
+	.prepare_structs	= b43_aphy_op_prepare_structs,
+	.init			= b43_aphy_op_init,
+	.phy_read		= b43_aphy_op_read,
+	.phy_write		= b43_aphy_op_write,
+	.radio_read		= b43_aphy_op_radio_read,
+	.radio_write		= b43_aphy_op_radio_write,
+	.supports_hwpctl	= b43_aphy_op_supports_hwpctl,
+	.software_rfkill	= b43_aphy_op_software_rfkill,
+	.switch_analog		= b43_phyop_switch_analog_generic,
+	.switch_channel		= b43_aphy_op_switch_channel,
+	.get_default_chan	= b43_aphy_op_get_default_chan,
+	.set_rx_antenna		= b43_aphy_op_set_rx_antenna,
+	.recalc_txpower		= b43_aphy_op_recalc_txpower,
+	.adjust_txpower		= b43_aphy_op_adjust_txpower,
+	.pwork_15sec		= b43_aphy_op_pwork_15sec,
+	.pwork_60sec		= b43_aphy_op_pwork_60sec,
+};
diff --git a/drivers/net/wireless/b43/phy_a.h b/drivers/net/wireless/b43/phy_a.h
new file mode 100644
index 0000000..5cfaab7
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_a.h
@@ -0,0 +1,130 @@
+#ifndef LINUX_B43_PHY_A_H_
+#define LINUX_B43_PHY_A_H_
+
+#include "phy_common.h"
+
+
+/* OFDM (A) PHY Registers */
+#define B43_PHY_VERSION_OFDM		B43_PHY_OFDM(0x00)	/* Versioning register for A-PHY */
+#define B43_PHY_BBANDCFG		B43_PHY_OFDM(0x01)	/* Baseband config */
+#define  B43_PHY_BBANDCFG_RXANT		0x180	/* RX Antenna selection */
+#define  B43_PHY_BBANDCFG_RXANT_SHIFT	7
+#define B43_PHY_PWRDOWN			B43_PHY_OFDM(0x03)	/* Powerdown */
+#define B43_PHY_CRSTHRES1_R1		B43_PHY_OFDM(0x06)	/* CRS Threshold 1 (phy.rev 1 only) */
+#define B43_PHY_LNAHPFCTL		B43_PHY_OFDM(0x1C)	/* LNA/HPF control */
+#define B43_PHY_LPFGAINCTL		B43_PHY_OFDM(0x20)	/* LPF Gain control */
+#define B43_PHY_ADIVRELATED		B43_PHY_OFDM(0x27)	/* FIXME rename */
+#define B43_PHY_CRS0			B43_PHY_OFDM(0x29)
+#define  B43_PHY_CRS0_EN		0x4000
+#define B43_PHY_PEAK_COUNT		B43_PHY_OFDM(0x30)
+#define B43_PHY_ANTDWELL		B43_PHY_OFDM(0x2B)	/* Antenna dwell */
+#define  B43_PHY_ANTDWELL_AUTODIV1	0x0100	/* Automatic RX diversity start antenna */
+#define B43_PHY_ENCORE			B43_PHY_OFDM(0x49)	/* "Encore" (RangeMax / BroadRange) */
+#define  B43_PHY_ENCORE_EN		0x0200	/* Encore enable */
+#define B43_PHY_LMS			B43_PHY_OFDM(0x55)
+#define B43_PHY_OFDM61			B43_PHY_OFDM(0x61)	/* FIXME rename */
+#define  B43_PHY_OFDM61_10		0x0010	/* FIXME rename */
+#define B43_PHY_IQBAL			B43_PHY_OFDM(0x69)	/* I/Q balance */
+#define B43_PHY_BBTXDC_BIAS		B43_PHY_OFDM(0x6B)	/* Baseband TX DC bias */
+#define B43_PHY_OTABLECTL		B43_PHY_OFDM(0x72)	/* OFDM table control (see below) */
+#define  B43_PHY_OTABLEOFF		0x03FF	/* OFDM table offset (see below) */
+#define  B43_PHY_OTABLENR		0xFC00	/* OFDM table number (see below) */
+#define  B43_PHY_OTABLENR_SHIFT		10
+#define B43_PHY_OTABLEI			B43_PHY_OFDM(0x73)	/* OFDM table data I */
+#define B43_PHY_OTABLEQ			B43_PHY_OFDM(0x74)	/* OFDM table data Q */
+#define B43_PHY_HPWR_TSSICTL		B43_PHY_OFDM(0x78)	/* Hardware power TSSI control */
+#define B43_PHY_ADCCTL			B43_PHY_OFDM(0x7A)	/* ADC control */
+#define B43_PHY_IDLE_TSSI		B43_PHY_OFDM(0x7B)
+#define B43_PHY_A_TEMP_SENSE		B43_PHY_OFDM(0x7C)	/* A PHY temperature sense */
+#define B43_PHY_NRSSITHRES		B43_PHY_OFDM(0x8A)	/* NRSSI threshold */
+#define B43_PHY_ANTWRSETT		B43_PHY_OFDM(0x8C)	/* Antenna WR settle */
+#define  B43_PHY_ANTWRSETT_ARXDIV	0x2000	/* Automatic RX diversity enabled */
+#define B43_PHY_CLIPPWRDOWNT		B43_PHY_OFDM(0x93)	/* Clip powerdown threshold */
+#define B43_PHY_OFDM9B			B43_PHY_OFDM(0x9B)	/* FIXME rename */
+#define B43_PHY_N1P1GAIN		B43_PHY_OFDM(0xA0)
+#define B43_PHY_P1P2GAIN		B43_PHY_OFDM(0xA1)
+#define B43_PHY_N1N2GAIN		B43_PHY_OFDM(0xA2)
+#define B43_PHY_CLIPTHRES		B43_PHY_OFDM(0xA3)
+#define B43_PHY_CLIPN1P2THRES		B43_PHY_OFDM(0xA4)
+#define B43_PHY_CCKSHIFTBITS_WA		B43_PHY_OFDM(0xA5)	/* CCK shiftbits workaround, FIXME rename */
+#define B43_PHY_CCKSHIFTBITS		B43_PHY_OFDM(0xA7)	/* FIXME rename */
+#define B43_PHY_DIVSRCHIDX		B43_PHY_OFDM(0xA8)	/* Divider search gain/index */
+#define B43_PHY_CLIPP2THRES		B43_PHY_OFDM(0xA9)
+#define B43_PHY_CLIPP3THRES		B43_PHY_OFDM(0xAA)
+#define B43_PHY_DIVP1P2GAIN		B43_PHY_OFDM(0xAB)
+#define B43_PHY_DIVSRCHGAINBACK		B43_PHY_OFDM(0xAD)	/* Divider search gain back */
+#define B43_PHY_DIVSRCHGAINCHNG		B43_PHY_OFDM(0xAE)	/* Divider search gain change */
+#define B43_PHY_CRSTHRES1		B43_PHY_OFDM(0xC0)	/* CRS Threshold 1 (phy.rev >= 2 only) */
+#define B43_PHY_CRSTHRES2		B43_PHY_OFDM(0xC1)	/* CRS Threshold 2 (phy.rev >= 2 only) */
+#define B43_PHY_TSSIP_LTBASE		B43_PHY_OFDM(0x380)	/* TSSI power lookup table base */
+#define B43_PHY_DC_LTBASE		B43_PHY_OFDM(0x3A0)	/* DC lookup table base */
+#define B43_PHY_GAIN_LTBASE		B43_PHY_OFDM(0x3C0)	/* Gain lookup table base */
+
+/*** OFDM table numbers ***/
+#define B43_OFDMTAB(number, offset)	(((number) << B43_PHY_OTABLENR_SHIFT) | (offset))
+#define B43_OFDMTAB_AGC1		B43_OFDMTAB(0x00, 0)
+#define B43_OFDMTAB_GAIN0		B43_OFDMTAB(0x00, 0)
+#define B43_OFDMTAB_GAINX		B43_OFDMTAB(0x01, 0)	//TODO rename
+#define B43_OFDMTAB_GAIN1		B43_OFDMTAB(0x01, 4)
+#define B43_OFDMTAB_AGC3		B43_OFDMTAB(0x02, 0)
+#define B43_OFDMTAB_GAIN2		B43_OFDMTAB(0x02, 3)
+#define B43_OFDMTAB_LNAHPFGAIN1		B43_OFDMTAB(0x03, 0)
+#define B43_OFDMTAB_WRSSI		B43_OFDMTAB(0x04, 0)
+#define B43_OFDMTAB_LNAHPFGAIN2		B43_OFDMTAB(0x04, 0)
+#define B43_OFDMTAB_NOISESCALE		B43_OFDMTAB(0x05, 0)
+#define B43_OFDMTAB_AGC2		B43_OFDMTAB(0x06, 0)
+#define B43_OFDMTAB_ROTOR		B43_OFDMTAB(0x08, 0)
+#define B43_OFDMTAB_ADVRETARD		B43_OFDMTAB(0x09, 0)
+#define B43_OFDMTAB_DAC			B43_OFDMTAB(0x0C, 0)
+#define B43_OFDMTAB_DC			B43_OFDMTAB(0x0E, 7)
+#define B43_OFDMTAB_PWRDYN2		B43_OFDMTAB(0x0E, 12)
+#define B43_OFDMTAB_LNAGAIN		B43_OFDMTAB(0x0E, 13)
+#define B43_OFDMTAB_UNKNOWN_0F		B43_OFDMTAB(0x0F, 0)	//TODO rename
+#define B43_OFDMTAB_UNKNOWN_APHY	B43_OFDMTAB(0x0F, 7)	//TODO rename
+#define B43_OFDMTAB_LPFGAIN		B43_OFDMTAB(0x0F, 12)
+#define B43_OFDMTAB_RSSI		B43_OFDMTAB(0x10, 0)
+#define B43_OFDMTAB_UNKNOWN_11		B43_OFDMTAB(0x11, 4)	//TODO rename
+#define B43_OFDMTAB_AGC1_R1		B43_OFDMTAB(0x13, 0)
+#define B43_OFDMTAB_GAINX_R1		B43_OFDMTAB(0x14, 0)	//TODO remove!
+#define B43_OFDMTAB_MINSIGSQ		B43_OFDMTAB(0x14, 0)
+#define B43_OFDMTAB_AGC3_R1		B43_OFDMTAB(0x15, 0)
+#define B43_OFDMTAB_WRSSI_R1		B43_OFDMTAB(0x15, 4)
+#define B43_OFDMTAB_TSSI		B43_OFDMTAB(0x15, 0)
+#define B43_OFDMTAB_DACRFPABB		B43_OFDMTAB(0x16, 0)
+#define B43_OFDMTAB_DACOFF		B43_OFDMTAB(0x17, 0)
+#define B43_OFDMTAB_DCBIAS		B43_OFDMTAB(0x18, 0)
+
+u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset);
+void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table,
+			 u16 offset, u16 value);
+u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset);
+void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
+			 u16 offset, u32 value);
+
+
+struct b43_phy_a {
+	/* Pointer to the table used to convert a
+	 * TSSI value to dBm-Q5.2 */
+	const s8 *tssi2dbm;
+	/* Target idle TSSI */
+	int tgt_idle_tssi;
+	/* Current idle TSSI */
+	int cur_idle_tssi;//FIXME value currently not set
+
+	/* A-PHY TX Power control value. */
+	u16 txpwr_offset;
+
+	//TODO lots of missing stuff
+};
+
+/**
+ * b43_phy_inita - Lowlevel A-PHY init routine.
+ * This is _only_ used by the G-PHY code.
+ */
+void b43_phy_inita(struct b43_wldev *dev);
+
+
+struct b43_phy_operations;
+extern const struct b43_phy_operations b43_phyops_a;
+
+#endif /* LINUX_B43_PHY_A_H_ */
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
new file mode 100644
index 0000000..af37abc
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -0,0 +1,381 @@
+/*
+
+  Broadcom B43 wireless driver
+  Common PHY routines
+
+  Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
+  Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
+  Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
+  Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
+  Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; see the file COPYING.  If not, write to
+  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+  Boston, MA 02110-1301, USA.
+
+*/
+
+#include "phy_common.h"
+#include "phy_g.h"
+#include "phy_a.h"
+#include "phy_n.h"
+#include "phy_lp.h"
+#include "b43.h"
+#include "main.h"
+
+
+int b43_phy_allocate(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &(dev->phy);
+	int err;
+
+	phy->ops = NULL;
+
+	switch (phy->type) {
+	case B43_PHYTYPE_A:
+		phy->ops = &b43_phyops_a;
+		break;
+	case B43_PHYTYPE_G:
+		phy->ops = &b43_phyops_g;
+		break;
+	case B43_PHYTYPE_N:
+#ifdef CONFIG_B43_NPHY
+		phy->ops = &b43_phyops_n;
+#endif
+		break;
+	case B43_PHYTYPE_LP:
+#ifdef CONFIG_B43_PHY_LP
+		phy->ops = &b43_phyops_lp;
+#endif
+		break;
+	}
+	if (B43_WARN_ON(!phy->ops))
+		return -ENODEV;
+
+	err = phy->ops->allocate(dev);
+	if (err)
+		phy->ops = NULL;
+
+	return err;
+}
+
+void b43_phy_free(struct b43_wldev *dev)
+{
+	dev->phy.ops->free(dev);
+	dev->phy.ops = NULL;
+}
+
+int b43_phy_init(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	const struct b43_phy_operations *ops = phy->ops;
+	int err;
+
+	phy->channel = ops->get_default_chan(dev);
+
+	ops->software_rfkill(dev, RFKILL_STATE_UNBLOCKED);
+	err = ops->init(dev);
+	if (err) {
+		b43err(dev->wl, "PHY init failed\n");
+		goto err_block_rf;
+	}
+	/* Make sure to switch hardware and firmware (SHM) to
+	 * the default channel. */
+	err = b43_switch_channel(dev, ops->get_default_chan(dev));
+	if (err) {
+		b43err(dev->wl, "PHY init: Channel switch to default failed\n");
+		goto err_phy_exit;
+	}
+
+	return 0;
+
+err_phy_exit:
+	if (ops->exit)
+		ops->exit(dev);
+err_block_rf:
+	ops->software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED);
+
+	return err;
+}
+
+void b43_phy_exit(struct b43_wldev *dev)
+{
+	const struct b43_phy_operations *ops = dev->phy.ops;
+
+	ops->software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED);
+	if (ops->exit)
+		ops->exit(dev);
+}
+
+bool b43_has_hardware_pctl(struct b43_wldev *dev)
+{
+	if (!dev->phy.hardware_power_control)
+		return 0;
+	if (!dev->phy.ops->supports_hwpctl)
+		return 0;
+	return dev->phy.ops->supports_hwpctl(dev);
+}
+
+void b43_radio_lock(struct b43_wldev *dev)
+{
+	u32 macctl;
+
+	macctl = b43_read32(dev, B43_MMIO_MACCTL);
+	B43_WARN_ON(macctl & B43_MACCTL_RADIOLOCK);
+	macctl |= B43_MACCTL_RADIOLOCK;
+	b43_write32(dev, B43_MMIO_MACCTL, macctl);
+	/* Commit the write and wait for the device
+	 * to exit any radio register access. */
+	b43_read32(dev, B43_MMIO_MACCTL);
+	udelay(10);
+}
+
+void b43_radio_unlock(struct b43_wldev *dev)
+{
+	u32 macctl;
+
+	/* Commit any write */
+	b43_read16(dev, B43_MMIO_PHY_VER);
+	/* unlock */
+	macctl = b43_read32(dev, B43_MMIO_MACCTL);
+	B43_WARN_ON(!(macctl & B43_MACCTL_RADIOLOCK));
+	macctl &= ~B43_MACCTL_RADIOLOCK;
+	b43_write32(dev, B43_MMIO_MACCTL, macctl);
+}
+
+void b43_phy_lock(struct b43_wldev *dev)
+{
+#if B43_DEBUG
+	B43_WARN_ON(dev->phy.phy_locked);
+	dev->phy.phy_locked = 1;
+#endif
+	B43_WARN_ON(dev->dev->id.revision < 3);
+
+	if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP))
+		b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
+}
+
+void b43_phy_unlock(struct b43_wldev *dev)
+{
+#if B43_DEBUG
+	B43_WARN_ON(!dev->phy.phy_locked);
+	dev->phy.phy_locked = 0;
+#endif
+	B43_WARN_ON(dev->dev->id.revision < 3);
+
+	if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP))
+		b43_power_saving_ctl_bits(dev, 0);
+}
+
+u16 b43_radio_read(struct b43_wldev *dev, u16 reg)
+{
+	return dev->phy.ops->radio_read(dev, reg);
+}
+
+void b43_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
+{
+	dev->phy.ops->radio_write(dev, reg, value);
+}
+
+void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask)
+{
+	b43_radio_write16(dev, offset,
+			  b43_radio_read16(dev, offset) & mask);
+}
+
+void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set)
+{
+	b43_radio_write16(dev, offset,
+			  b43_radio_read16(dev, offset) | set);
+}
+
+void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
+{
+	b43_radio_write16(dev, offset,
+			  (b43_radio_read16(dev, offset) & mask) | set);
+}
+
+u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
+{
+	return dev->phy.ops->phy_read(dev, reg);
+}
+
+void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value)
+{
+	dev->phy.ops->phy_write(dev, reg, value);
+}
+
+void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask)
+{
+	b43_phy_write(dev, offset,
+		      b43_phy_read(dev, offset) & mask);
+}
+
+void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set)
+{
+	b43_phy_write(dev, offset,
+		      b43_phy_read(dev, offset) | set);
+}
+
+void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
+{
+	b43_phy_write(dev, offset,
+		      (b43_phy_read(dev, offset) & mask) | set);
+}
+
+int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
+{
+	struct b43_phy *phy = &(dev->phy);
+	u16 channelcookie, savedcookie;
+	int err;
+
+	if (new_channel == B43_DEFAULT_CHANNEL)
+		new_channel = phy->ops->get_default_chan(dev);
+
+	/* First we set the channel radio code to prevent the
+	 * firmware from sending ghost packets.
+	 */
+	channelcookie = new_channel;
+	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+		channelcookie |= 0x100;
+	//FIXME set 40Mhz flag if required
+	savedcookie = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN);
+	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN, channelcookie);
+
+	/* Now try to switch the PHY hardware channel. */
+	err = phy->ops->switch_channel(dev, new_channel);
+	if (err)
+		goto err_restore_cookie;
+
+	dev->phy.channel = new_channel;
+	/* Wait for the radio to tune to the channel and stabilize. */
+	msleep(8);
+
+	return 0;
+
+err_restore_cookie:
+	b43_shm_write16(dev, B43_SHM_SHARED,
+			B43_SHM_SH_CHAN, savedcookie);
+
+	return err;
+}
+
+void b43_software_rfkill(struct b43_wldev *dev, enum rfkill_state state)
+{
+	struct b43_phy *phy = &dev->phy;
+
+	if (state == RFKILL_STATE_HARD_BLOCKED) {
+		/* We cannot hardware-block the device */
+		state = RFKILL_STATE_SOFT_BLOCKED;
+	}
+
+	phy->ops->software_rfkill(dev, state);
+	phy->radio_on = (state == RFKILL_STATE_UNBLOCKED);
+}
+
+/**
+ * b43_phy_txpower_adjust_work - TX power workqueue.
+ *
+ * Workqueue for updating the TX power parameters in hardware.
+ */
+void b43_phy_txpower_adjust_work(struct work_struct *work)
+{
+	struct b43_wl *wl = container_of(work, struct b43_wl,
+					 txpower_adjust_work);
+	struct b43_wldev *dev;
+
+	mutex_lock(&wl->mutex);
+	dev = wl->current_dev;
+
+	if (likely(dev && (b43_status(dev) >= B43_STAT_STARTED)))
+		dev->phy.ops->adjust_txpower(dev);
+
+	mutex_unlock(&wl->mutex);
+}
+
+/* Called with wl->irq_lock locked */
+void b43_phy_txpower_check(struct b43_wldev *dev, unsigned int flags)
+{
+	struct b43_phy *phy = &dev->phy;
+	unsigned long now = jiffies;
+	enum b43_txpwr_result result;
+
+	if (!(flags & B43_TXPWR_IGNORE_TIME)) {
+		/* Check if it's time for a TXpower check. */
+		if (time_before(now, phy->next_txpwr_check_time))
+			return; /* Not yet */
+	}
+	/* The next check will be needed in two seconds, or later. */
+	phy->next_txpwr_check_time = round_jiffies(now + (HZ * 2));
+
+	if ((dev->dev->bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
+	    (dev->dev->bus->boardinfo.type == SSB_BOARD_BU4306))
+		return; /* No software txpower adjustment needed */
+
+	result = phy->ops->recalc_txpower(dev, !!(flags & B43_TXPWR_IGNORE_TSSI));
+	if (result == B43_TXPWR_RES_DONE)
+		return; /* We are done. */
+	B43_WARN_ON(result != B43_TXPWR_RES_NEED_ADJUST);
+	B43_WARN_ON(phy->ops->adjust_txpower == NULL);
+
+	/* We must adjust the transmission power in hardware.
+	 * Schedule b43_phy_txpower_adjust_work(). */
+	queue_work(dev->wl->hw->workqueue, &dev->wl->txpower_adjust_work);
+}
+
+int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset)
+{
+	const bool is_ofdm = (shm_offset != B43_SHM_SH_TSSI_CCK);
+	unsigned int a, b, c, d;
+	unsigned int average;
+	u32 tmp;
+
+	tmp = b43_shm_read32(dev, B43_SHM_SHARED, shm_offset);
+	a = tmp & 0xFF;
+	b = (tmp >> 8) & 0xFF;
+	c = (tmp >> 16) & 0xFF;
+	d = (tmp >> 24) & 0xFF;
+	if (a == 0 || a == B43_TSSI_MAX ||
+	    b == 0 || b == B43_TSSI_MAX ||
+	    c == 0 || c == B43_TSSI_MAX ||
+	    d == 0 || d == B43_TSSI_MAX)
+		return -ENOENT;
+	/* The values are OK. Clear them. */
+	tmp = B43_TSSI_MAX | (B43_TSSI_MAX << 8) |
+	      (B43_TSSI_MAX << 16) | (B43_TSSI_MAX << 24);
+	b43_shm_write32(dev, B43_SHM_SHARED, shm_offset, tmp);
+
+	if (is_ofdm) {
+		a = (a + 32) & 0x3F;
+		b = (b + 32) & 0x3F;
+		c = (c + 32) & 0x3F;
+		d = (d + 32) & 0x3F;
+	}
+
+	/* Get the average of the values with 0.5 added to each value. */
+	average = (a + b + c + d + 2) / 4;
+	if (is_ofdm) {
+		/* Adjust for CCK-boost */
+		if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO)
+		    & B43_HF_CCKBOOST)
+			average = (average >= 13) ? (average - 13) : 0;
+	}
+
+	return average;
+}
+
+void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on)
+{
+	b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
+}
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
new file mode 100644
index 0000000..c9f5430
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -0,0 +1,413 @@
+#ifndef LINUX_B43_PHY_COMMON_H_
+#define LINUX_B43_PHY_COMMON_H_
+
+#include <linux/rfkill.h>
+
+struct b43_wldev;
+
+
+/* PHY register routing bits */
+#define B43_PHYROUTE			0x0C00 /* PHY register routing bits mask */
+#define  B43_PHYROUTE_BASE		0x0000 /* Base registers */
+#define  B43_PHYROUTE_OFDM_GPHY		0x0400 /* OFDM register routing for G-PHYs */
+#define  B43_PHYROUTE_EXT_GPHY		0x0800 /* Extended G-PHY registers */
+#define  B43_PHYROUTE_N_BMODE		0x0C00 /* N-PHY BMODE registers */
+
+/* CCK (B-PHY) registers. */
+#define B43_PHY_CCK(reg)		((reg) | B43_PHYROUTE_BASE)
+/* N-PHY registers. */
+#define B43_PHY_N(reg)			((reg) | B43_PHYROUTE_BASE)
+/* N-PHY BMODE registers. */
+#define B43_PHY_N_BMODE(reg)		((reg) | B43_PHYROUTE_N_BMODE)
+/* OFDM (A-PHY) registers. */
+#define B43_PHY_OFDM(reg)		((reg) | B43_PHYROUTE_OFDM_GPHY)
+/* Extended G-PHY registers. */
+#define B43_PHY_EXTG(reg)		((reg) | B43_PHYROUTE_EXT_GPHY)
+
+
+/* Masks for the PHY versioning registers. */
+#define B43_PHYVER_ANALOG		0xF000
+#define B43_PHYVER_ANALOG_SHIFT		12
+#define B43_PHYVER_TYPE			0x0F00
+#define B43_PHYVER_TYPE_SHIFT		8
+#define B43_PHYVER_VERSION		0x00FF
+
+/**
+ * enum b43_interference_mitigation - Interference Mitigation mode
+ *
+ * @B43_INTERFMODE_NONE:	Disabled
+ * @B43_INTERFMODE_NONWLAN:	Non-WLAN Interference Mitigation
+ * @B43_INTERFMODE_MANUALWLAN:	WLAN Interference Mitigation
+ * @B43_INTERFMODE_AUTOWLAN:	Automatic WLAN Interference Mitigation
+ */
+enum b43_interference_mitigation {
+	B43_INTERFMODE_NONE,
+	B43_INTERFMODE_NONWLAN,
+	B43_INTERFMODE_MANUALWLAN,
+	B43_INTERFMODE_AUTOWLAN,
+};
+
+/* Antenna identifiers */
+enum {
+	B43_ANTENNA0,		/* Antenna 0 */
+	B43_ANTENNA1,		/* Antenna 0 */
+	B43_ANTENNA_AUTO1,	/* Automatic, starting with antenna 1 */
+	B43_ANTENNA_AUTO0,	/* Automatic, starting with antenna 0 */
+	B43_ANTENNA2,
+	B43_ANTENNA3 = 8,
+
+	B43_ANTENNA_AUTO = B43_ANTENNA_AUTO0,
+	B43_ANTENNA_DEFAULT = B43_ANTENNA_AUTO,
+};
+
+/**
+ * enum b43_txpwr_result - Return value for the recalc_txpower PHY op.
+ *
+ * @B43_TXPWR_RES_NEED_ADJUST:	Values changed. Hardware adjustment is needed.
+ * @B43_TXPWR_RES_DONE:		No more work to do. Everything is done.
+ */
+enum b43_txpwr_result {
+	B43_TXPWR_RES_NEED_ADJUST,
+	B43_TXPWR_RES_DONE,
+};
+
+/**
+ * struct b43_phy_operations - Function pointers for PHY ops.
+ *
+ * @allocate:		Allocate and initialise the PHY data structures.
+ * 			Must not be NULL.
+ * @free:		Destroy and free the PHY data structures.
+ * 			Must not be NULL.
+ *
+ * @prepare_structs:	Prepare the PHY data structures.
+ * 			The data structures allocated in @allocate are
+ * 			initialized here.
+ * 			Must not be NULL.
+ * @prepare_hardware:	Prepare the PHY. This is called before b43_chip_init to
+ * 			do some early early PHY hardware init.
+ * 			Can be NULL, if not required.
+ * @init:		Initialize the PHY.
+ * 			Must not be NULL.
+ * @exit:		Shutdown the PHY.
+ * 			Can be NULL, if not required.
+ *
+ * @phy_read:		Read from a PHY register.
+ * 			Must not be NULL.
+ * @phy_write:		Write to a PHY register.
+ * 			Must not be NULL.
+ * @radio_read:		Read from a Radio register.
+ * 			Must not be NULL.
+ * @radio_write:	Write to a Radio register.
+ * 			Must not be NULL.
+ *
+ * @supports_hwpctl:	Returns a boolean whether Hardware Power Control
+ * 			is supported or not.
+ * 			If NULL, hwpctl is assumed to be never supported.
+ * @software_rfkill:	Turn the radio ON or OFF.
+ * 			Possible state values are
+ * 			RFKILL_STATE_SOFT_BLOCKED or
+ * 			RFKILL_STATE_UNBLOCKED
+ * 			Must not be NULL.
+ * @switch_analog:	Turn the Analog on/off.
+ * 			Must not be NULL.
+ * @switch_channel:	Switch the radio to another channel.
+ * 			Must not be NULL.
+ * @get_default_chan:	Just returns the default channel number.
+ * 			Must not be NULL.
+ * @set_rx_antenna:	Set the antenna used for RX.
+ * 			Can be NULL, if not supported.
+ * @interf_mitigation:	Switch the Interference Mitigation mode.
+ * 			Can be NULL, if not supported.
+ *
+ * @recalc_txpower:	Recalculate the transmission power parameters.
+ * 			This callback has to recalculate the TX power settings,
+ * 			but does not need to write them to the hardware, yet.
+ * 			Returns enum b43_txpwr_result to indicate whether the hardware
+ * 			needs to be adjusted.
+ * 			If B43_TXPWR_NEED_ADJUST is returned, @adjust_txpower
+ * 			will be called later.
+ * 			If the parameter "ignore_tssi" is true, the TSSI values should
+ * 			be ignored and a recalculation of the power settings should be
+ * 			done even if the TSSI values did not change.
+ * 			This callback is called with wl->irq_lock held and must not sleep.
+ * 			Must not be NULL.
+ * @adjust_txpower:	Write the previously calculated TX power settings
+ * 			(from @recalc_txpower) to the hardware.
+ * 			This function may sleep.
+ * 			Can be NULL, if (and ONLY if) @recalc_txpower _always_
+ * 			returns B43_TXPWR_RES_DONE.
+ *
+ * @pwork_15sec:	Periodic work. Called every 15 seconds.
+ * 			Can be NULL, if not required.
+ * @pwork_60sec:	Periodic work. Called every 60 seconds.
+ * 			Can be NULL, if not required.
+ */
+struct b43_phy_operations {
+	/* Initialisation */
+	int (*allocate)(struct b43_wldev *dev);
+	void (*free)(struct b43_wldev *dev);
+	void (*prepare_structs)(struct b43_wldev *dev);
+	int (*prepare_hardware)(struct b43_wldev *dev);
+	int (*init)(struct b43_wldev *dev);
+	void (*exit)(struct b43_wldev *dev);
+
+	/* Register access */
+	u16 (*phy_read)(struct b43_wldev *dev, u16 reg);
+	void (*phy_write)(struct b43_wldev *dev, u16 reg, u16 value);
+	u16 (*radio_read)(struct b43_wldev *dev, u16 reg);
+	void (*radio_write)(struct b43_wldev *dev, u16 reg, u16 value);
+
+	/* Radio */
+	bool (*supports_hwpctl)(struct b43_wldev *dev);
+	void (*software_rfkill)(struct b43_wldev *dev, enum rfkill_state state);
+	void (*switch_analog)(struct b43_wldev *dev, bool on);
+	int (*switch_channel)(struct b43_wldev *dev, unsigned int new_channel);
+	unsigned int (*get_default_chan)(struct b43_wldev *dev);
+	void (*set_rx_antenna)(struct b43_wldev *dev, int antenna);
+	int (*interf_mitigation)(struct b43_wldev *dev,
+				 enum b43_interference_mitigation new_mode);
+
+	/* Transmission power adjustment */
+	enum b43_txpwr_result (*recalc_txpower)(struct b43_wldev *dev,
+						bool ignore_tssi);
+	void (*adjust_txpower)(struct b43_wldev *dev);
+
+	/* Misc */
+	void (*pwork_15sec)(struct b43_wldev *dev);
+	void (*pwork_60sec)(struct b43_wldev *dev);
+};
+
+struct b43_phy_a;
+struct b43_phy_g;
+struct b43_phy_n;
+struct b43_phy_lp;
+
+struct b43_phy {
+	/* Hardware operation callbacks. */
+	const struct b43_phy_operations *ops;
+
+	/* Most hardware context information is stored in the standard-
+	 * specific data structures pointed to by the pointers below.
+	 * Only one of them is valid (the currently enabled PHY). */
+#ifdef CONFIG_B43_DEBUG
+	/* No union for debug build to force NULL derefs in buggy code. */
+	struct {
+#else
+	union {
+#endif
+		/* A-PHY specific information */
+		struct b43_phy_a *a;
+		/* G-PHY specific information */
+		struct b43_phy_g *g;
+		/* N-PHY specific information */
+		struct b43_phy_n *n;
+		/* LP-PHY specific information */
+		struct b43_phy_lp *lp;
+	};
+
+	/* Band support flags. */
+	bool supports_2ghz;
+	bool supports_5ghz;
+
+	/* GMODE bit enabled? */
+	bool gmode;
+
+	/* Analog Type */
+	u8 analog;
+	/* B43_PHYTYPE_ */
+	u8 type;
+	/* PHY revision number. */
+	u8 rev;
+
+	/* Radio versioning */
+	u16 radio_manuf;	/* Radio manufacturer */
+	u16 radio_ver;		/* Radio version */
+	u8 radio_rev;		/* Radio revision */
+
+	/* Software state of the radio */
+	bool radio_on;
+
+	/* Desired TX power level (in dBm).
+	 * This is set by the user and adjusted in b43_phy_xmitpower(). */
+	int desired_txpower;
+
+	/* Hardware Power Control enabled? */
+	bool hardware_power_control;
+
+	/* The time (in absolute jiffies) when the next TX power output
+	 * check is needed. */
+	unsigned long next_txpwr_check_time;
+
+	/* current channel */
+	unsigned int channel;
+
+	/* PHY TX errors counter. */
+	atomic_t txerr_cnt;
+
+#ifdef CONFIG_B43_DEBUG
+	/* PHY registers locked by b43_phy_lock()? */
+	bool phy_locked;
+#endif /* B43_DEBUG */
+};
+
+
+/**
+ * b43_phy_allocate - Allocate PHY structs
+ * Allocate the PHY data structures, based on the current dev->phy.type
+ */
+int b43_phy_allocate(struct b43_wldev *dev);
+
+/**
+ * b43_phy_free - Free PHY structs
+ */
+void b43_phy_free(struct b43_wldev *dev);
+
+/**
+ * b43_phy_init - Initialise the PHY
+ */
+int b43_phy_init(struct b43_wldev *dev);
+
+/**
+ * b43_phy_exit - Cleanup PHY
+ */
+void b43_phy_exit(struct b43_wldev *dev);
+
+/**
+ * b43_has_hardware_pctl - Hardware Power Control supported?
+ * Returns a boolean, whether hardware power control is supported.
+ */
+bool b43_has_hardware_pctl(struct b43_wldev *dev);
+
+/**
+ * b43_phy_read - 16bit PHY register read access
+ */
+u16 b43_phy_read(struct b43_wldev *dev, u16 reg);
+
+/**
+ * b43_phy_write - 16bit PHY register write access
+ */
+void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value);
+
+/**
+ * b43_phy_mask - Mask a PHY register with a mask
+ */
+void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask);
+
+/**
+ * b43_phy_set - OR a PHY register with a bitmap
+ */
+void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set);
+
+/**
+ * b43_phy_maskset - Mask and OR a PHY register with a mask and bitmap
+ */
+void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
+
+/**
+ * b43_radio_read - 16bit Radio register read access
+ */
+u16 b43_radio_read(struct b43_wldev *dev, u16 reg);
+#define b43_radio_read16	b43_radio_read /* DEPRECATED */
+
+/**
+ * b43_radio_write - 16bit Radio register write access
+ */
+void b43_radio_write(struct b43_wldev *dev, u16 reg, u16 value);
+#define b43_radio_write16	b43_radio_write /* DEPRECATED */
+
+/**
+ * b43_radio_mask - Mask a 16bit radio register with a mask
+ */
+void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask);
+
+/**
+ * b43_radio_set - OR a 16bit radio register with a bitmap
+ */
+void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set);
+
+/**
+ * b43_radio_maskset - Mask and OR a radio register with a mask and bitmap
+ */
+void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
+
+/**
+ * b43_radio_lock - Lock firmware radio register access
+ */
+void b43_radio_lock(struct b43_wldev *dev);
+
+/**
+ * b43_radio_unlock - Unlock firmware radio register access
+ */
+void b43_radio_unlock(struct b43_wldev *dev);
+
+/**
+ * b43_phy_lock - Lock firmware PHY register access
+ */
+void b43_phy_lock(struct b43_wldev *dev);
+
+/**
+ * b43_phy_unlock - Unlock firmware PHY register access
+ */
+void b43_phy_unlock(struct b43_wldev *dev);
+
+/**
+ * b43_switch_channel - Switch to another channel
+ */
+int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel);
+/**
+ * B43_DEFAULT_CHANNEL - Switch to the default channel.
+ */
+#define B43_DEFAULT_CHANNEL	UINT_MAX
+
+/**
+ * b43_software_rfkill - Turn the radio ON or OFF in software.
+ */
+void b43_software_rfkill(struct b43_wldev *dev, enum rfkill_state state);
+
+/**
+ * b43_phy_txpower_check - Check TX power output.
+ *
+ * Compare the current TX power output to the desired power emission
+ * and schedule an adjustment in case it mismatches.
+ * Requires wl->irq_lock locked.
+ *
+ * @flags:	OR'ed enum b43_phy_txpower_check_flags flags.
+ * 		See the docs below.
+ */
+void b43_phy_txpower_check(struct b43_wldev *dev, unsigned int flags);
+/**
+ * enum b43_phy_txpower_check_flags - Flags for b43_phy_txpower_check()
+ *
+ * @B43_TXPWR_IGNORE_TIME: Ignore the schedule time and force-redo
+ *                         the check now.
+ * @B43_TXPWR_IGNORE_TSSI: Redo the recalculation, even if the average
+ *                         TSSI did not change.
+ */
+enum b43_phy_txpower_check_flags {
+	B43_TXPWR_IGNORE_TIME		= (1 << 0),
+	B43_TXPWR_IGNORE_TSSI		= (1 << 1),
+};
+
+struct work_struct;
+void b43_phy_txpower_adjust_work(struct work_struct *work);
+
+/**
+ * b43_phy_shm_tssi_read - Read the average of the last 4 TSSI from SHM.
+ *
+ * @shm_offset:		The SHM address to read the values from.
+ *
+ * Returns the average of the 4 TSSI values, or a negative error code.
+ */
+int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset);
+
+/**
+ * b43_phy_switch_analog_generic - Generic PHY operation for switching the Analog.
+ *
+ * It does the switching based on the PHY0 core register.
+ * Do _not_ call this directly. Only use it as a switch_analog callback
+ * for struct b43_phy_operations.
+ */
+void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on);
+
+
+#endif /* LINUX_B43_PHY_COMMON_H_ */
diff --git a/drivers/net/wireless/b43/phy.c b/drivers/net/wireless/b43/phy_g.c
similarity index 61%
rename from drivers/net/wireless/b43/phy.c
rename to drivers/net/wireless/b43/phy_g.c
index 305d4cd..232181f 100644
--- a/drivers/net/wireless/b43/phy.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1,10 +1,11 @@
 /*
 
   Broadcom B43 wireless driver
+  IEEE 802.11g PHY driver
 
   Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
   Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
-  Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
+  Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
   Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
   Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
 
@@ -25,38 +26,14 @@
 
 */
 
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/types.h>
+#include "b43.h"
+#include "phy_g.h"
+#include "phy_common.h"
+#include "lo.h"
+#include "main.h"
+
 #include <linux/bitrev.h>
 
-#include "b43.h"
-#include "phy.h"
-#include "nphy.h"
-#include "main.h"
-#include "tables.h"
-#include "lo.h"
-#include "wa.h"
-
-
-static const s8 b43_tssi2dbm_b_table[] = {
-	0x4D, 0x4C, 0x4B, 0x4A,
-	0x4A, 0x49, 0x48, 0x47,
-	0x47, 0x46, 0x45, 0x45,
-	0x44, 0x43, 0x42, 0x42,
-	0x41, 0x40, 0x3F, 0x3E,
-	0x3D, 0x3C, 0x3B, 0x3A,
-	0x39, 0x38, 0x37, 0x36,
-	0x35, 0x34, 0x32, 0x31,
-	0x30, 0x2F, 0x2D, 0x2C,
-	0x2B, 0x29, 0x28, 0x26,
-	0x25, 0x23, 0x21, 0x1F,
-	0x1D, 0x1A, 0x17, 0x14,
-	0x10, 0x0C, 0x06, 0x00,
-	-7, -7, -7, -7,
-	-7, -7, -7, -7,
-	-7, -7, -7, -7,
-};
 
 static const s8 b43_tssi2dbm_g_table[] = {
 	77, 77, 77, 76,
@@ -84,8 +61,20 @@
 	72, 84,
 };
 
+
+static void b43_calc_nrssi_threshold(struct b43_wldev *dev);
+
+
 #define bitrev4(tmp) (bitrev8(tmp) >> 4)
-static void b43_phy_initg(struct b43_wldev *dev);
+
+
+/* Get the freq, as it has to be written to the device. */
+static inline u16 channel2freq_bg(u8 channel)
+{
+	B43_WARN_ON(!(channel >= 1 && channel <= 14));
+
+	return b43_radio_channel_codes_bg[channel - 1];
+}
 
 static void generate_rfatt_list(struct b43_wldev *dev,
 				struct b43_rfatt_list *list)
@@ -130,7 +119,7 @@
 		{.att = 9,.with_padmix = 1,},
 	};
 
-	if (!b43_has_hardware_pctl(phy)) {
+	if (!b43_has_hardware_pctl(dev)) {
 		/* Software pctl */
 		list->list = rfatt_0;
 		list->len = ARRAY_SIZE(rfatt_0);
@@ -174,140 +163,55 @@
 	list->max_val = 8;
 }
 
-bool b43_has_hardware_pctl(struct b43_phy *phy)
-{
-	if (!phy->hardware_power_control)
-		return 0;
-	switch (phy->type) {
-	case B43_PHYTYPE_A:
-		if (phy->rev >= 5)
-			return 1;
-		break;
-	case B43_PHYTYPE_G:
-		if (phy->rev >= 6)
-			return 1;
-		break;
-	default:
-		B43_WARN_ON(1);
-	}
-	return 0;
-}
-
 static void b43_shm_clear_tssi(struct b43_wldev *dev)
 {
-	struct b43_phy *phy = &dev->phy;
-
-	switch (phy->type) {
-	case B43_PHYTYPE_A:
-		b43_shm_write16(dev, B43_SHM_SHARED, 0x0068, 0x7F7F);
-		b43_shm_write16(dev, B43_SHM_SHARED, 0x006a, 0x7F7F);
-		break;
-	case B43_PHYTYPE_B:
-	case B43_PHYTYPE_G:
-		b43_shm_write16(dev, B43_SHM_SHARED, 0x0058, 0x7F7F);
-		b43_shm_write16(dev, B43_SHM_SHARED, 0x005a, 0x7F7F);
-		b43_shm_write16(dev, B43_SHM_SHARED, 0x0070, 0x7F7F);
-		b43_shm_write16(dev, B43_SHM_SHARED, 0x0072, 0x7F7F);
-		break;
-	}
+	b43_shm_write16(dev, B43_SHM_SHARED, 0x0058, 0x7F7F);
+	b43_shm_write16(dev, B43_SHM_SHARED, 0x005a, 0x7F7F);
+	b43_shm_write16(dev, B43_SHM_SHARED, 0x0070, 0x7F7F);
+	b43_shm_write16(dev, B43_SHM_SHARED, 0x0072, 0x7F7F);
 }
 
-/* Lock the PHY registers against concurrent access from the microcode.
- * This lock is nonrecursive. */
-void b43_phy_lock(struct b43_wldev *dev)
-{
-#if B43_DEBUG
-	B43_WARN_ON(dev->phy.phy_locked);
-	dev->phy.phy_locked = 1;
-#endif
-	B43_WARN_ON(dev->dev->id.revision < 3);
-
-	if (!b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP))
-		b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
-}
-
-void b43_phy_unlock(struct b43_wldev *dev)
-{
-#if B43_DEBUG
-	B43_WARN_ON(!dev->phy.phy_locked);
-	dev->phy.phy_locked = 0;
-#endif
-	B43_WARN_ON(dev->dev->id.revision < 3);
-
-	if (!b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP))
-		b43_power_saving_ctl_bits(dev, 0);
-}
-
-/* Different PHYs require different register routing flags.
- * This adjusts (and does sanity checks on) the routing flags.
- */
-static inline u16 adjust_phyreg_for_phytype(struct b43_phy *phy,
-					    u16 offset, struct b43_wldev *dev)
-{
-	if (phy->type == B43_PHYTYPE_A) {
-		/* OFDM registers are base-registers for the A-PHY. */
-		if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
-			offset &= ~B43_PHYROUTE;
-			offset |= B43_PHYROUTE_BASE;
-		}
-	}
-
-#if B43_DEBUG
-	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) {
-		/* Ext-G registers are only available on G-PHYs */
-		if (phy->type != B43_PHYTYPE_G) {
-			b43err(dev->wl, "Invalid EXT-G PHY access at "
-			       "0x%04X on PHY type %u\n", offset, phy->type);
-			dump_stack();
-		}
-	}
-	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) {
-		/* N-BMODE registers are only available on N-PHYs */
-		if (phy->type != B43_PHYTYPE_N) {
-			b43err(dev->wl, "Invalid N-BMODE PHY access at "
-			       "0x%04X on PHY type %u\n", offset, phy->type);
-			dump_stack();
-		}
-	}
-#endif /* B43_DEBUG */
-
-	return offset;
-}
-
-u16 b43_phy_read(struct b43_wldev * dev, u16 offset)
+/* Synthetic PU workaround */
+static void b43_synth_pu_workaround(struct b43_wldev *dev, u8 channel)
 {
 	struct b43_phy *phy = &dev->phy;
 
-	offset = adjust_phyreg_for_phytype(phy, offset, dev);
-	b43_write16(dev, B43_MMIO_PHY_CONTROL, offset);
-	return b43_read16(dev, B43_MMIO_PHY_DATA);
+	might_sleep();
+
+	if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) {
+		/* We do not need the workaround. */
+		return;
+	}
+
+	if (channel <= 10) {
+		b43_write16(dev, B43_MMIO_CHANNEL,
+			    channel2freq_bg(channel + 4));
+	} else {
+		b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(1));
+	}
+	msleep(1);
+	b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
 }
 
-void b43_phy_write(struct b43_wldev *dev, u16 offset, u16 val)
+/* Set the baseband attenuation value on chip. */
+void b43_gphy_set_baseband_attenuation(struct b43_wldev *dev,
+				       u16 baseband_attenuation)
 {
 	struct b43_phy *phy = &dev->phy;
 
-	offset = adjust_phyreg_for_phytype(phy, offset, dev);
-	b43_write16(dev, B43_MMIO_PHY_CONTROL, offset);
-	b43_write16(dev, B43_MMIO_PHY_DATA, val);
-}
-
-void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask)
-{
-	b43_phy_write(dev, offset,
-		      b43_phy_read(dev, offset) & mask);
-}
-
-void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set)
-{
-	b43_phy_write(dev, offset,
-		      b43_phy_read(dev, offset) | set);
-}
-
-void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
-{
-	b43_phy_write(dev, offset,
-		      (b43_phy_read(dev, offset) & mask) | set);
+	if (phy->analog == 0) {
+		b43_write16(dev, B43_MMIO_PHY0, (b43_read16(dev, B43_MMIO_PHY0)
+						 & 0xFFF0) |
+			    baseband_attenuation);
+	} else if (phy->analog > 1) {
+		b43_phy_write(dev, B43_PHY_DACCTL,
+			      (b43_phy_read(dev, B43_PHY_DACCTL)
+			       & 0xFFC3) | (baseband_attenuation << 2));
+	} else {
+		b43_phy_write(dev, B43_PHY_DACCTL,
+			      (b43_phy_read(dev, B43_PHY_DACCTL)
+			       & 0xFF87) | (baseband_attenuation << 3));
+	}
 }
 
 /* Adjust the transmission power output (G-PHY) */
@@ -316,7 +220,8 @@
 		       const struct b43_rfatt *rfatt, u8 tx_control)
 {
 	struct b43_phy *phy = &dev->phy;
-	struct b43_txpower_lo_control *lo = phy->lo_control;
+	struct b43_phy_g *gphy = phy->g;
+	struct b43_txpower_lo_control *lo = gphy->lo_control;
 	u16 bb, rf;
 	u16 tx_bias, tx_magn;
 
@@ -327,11 +232,12 @@
 	if (unlikely(tx_bias == 0xFF))
 		tx_bias = 0;
 
-	/* Save the values for later */
-	phy->tx_control = tx_control;
-	memcpy(&phy->rfatt, rfatt, sizeof(*rfatt));
-	phy->rfatt.with_padmix = !!(tx_control & B43_TXCTL_TXMIX);
-	memcpy(&phy->bbatt, bbatt, sizeof(*bbatt));
+	/* Save the values for later. Use memmove, because it's valid
+	 * to pass &gphy->rfatt as rfatt pointer argument. Same for bbatt. */
+	gphy->tx_control = tx_control;
+	memmove(&gphy->rfatt, rfatt, sizeof(*rfatt));
+	gphy->rfatt.with_padmix = !!(tx_control & B43_TXCTL_TXMIX);
+	memmove(&gphy->bbatt, bbatt, sizeof(*bbatt));
 
 	if (b43_debug(dev, B43_DBG_XMITPOWER)) {
 		b43dbg(dev->wl, "Tuning TX-power to bbatt(%u), "
@@ -340,7 +246,7 @@
 		       bb, rf, tx_control, tx_bias, tx_magn);
 	}
 
-	b43_phy_set_baseband_attenuation(dev, bb);
+	b43_gphy_set_baseband_attenuation(dev, bb);
 	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RFATT, rf);
 	if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) {
 		b43_radio_write16(dev, 0x43,
@@ -358,179 +264,23 @@
 		b43_radio_write16(dev, 0x52, (b43_radio_read16(dev, 0x52)
 					      & 0xFFF0) | (tx_bias & 0x000F));
 	}
-	if (phy->type == B43_PHYTYPE_G)
-		b43_lo_g_adjust(dev);
-}
-
-static void default_baseband_attenuation(struct b43_wldev *dev,
-					 struct b43_bbatt *bb)
-{
-	struct b43_phy *phy = &dev->phy;
-
-	if (phy->radio_ver == 0x2050 && phy->radio_rev < 6)
-		bb->att = 0;
-	else
-		bb->att = 2;
-}
-
-static void default_radio_attenuation(struct b43_wldev *dev,
-				      struct b43_rfatt *rf)
-{
-	struct ssb_bus *bus = dev->dev->bus;
-	struct b43_phy *phy = &dev->phy;
-
-	rf->with_padmix = 0;
-
-	if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM &&
-	    bus->boardinfo.type == SSB_BOARD_BCM4309G) {
-		if (bus->boardinfo.rev < 0x43) {
-			rf->att = 2;
-			return;
-		} else if (bus->boardinfo.rev < 0x51) {
-			rf->att = 3;
-			return;
-		}
-	}
-
-	if (phy->type == B43_PHYTYPE_A) {
-		rf->att = 0x60;
-		return;
-	}
-
-	switch (phy->radio_ver) {
-	case 0x2053:
-		switch (phy->radio_rev) {
-		case 1:
-			rf->att = 6;
-			return;
-		}
-		break;
-	case 0x2050:
-		switch (phy->radio_rev) {
-		case 0:
-			rf->att = 5;
-			return;
-		case 1:
-			if (phy->type == B43_PHYTYPE_G) {
-				if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
-				    && bus->boardinfo.type == SSB_BOARD_BCM4309G
-				    && bus->boardinfo.rev >= 30)
-					rf->att = 3;
-				else if (bus->boardinfo.vendor ==
-					 SSB_BOARDVENDOR_BCM
-					 && bus->boardinfo.type ==
-					 SSB_BOARD_BU4306)
-					rf->att = 3;
-				else
-					rf->att = 1;
-			} else {
-				if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
-				    && bus->boardinfo.type == SSB_BOARD_BCM4309G
-				    && bus->boardinfo.rev >= 30)
-					rf->att = 7;
-				else
-					rf->att = 6;
-			}
-			return;
-		case 2:
-			if (phy->type == B43_PHYTYPE_G) {
-				if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
-				    && bus->boardinfo.type == SSB_BOARD_BCM4309G
-				    && bus->boardinfo.rev >= 30)
-					rf->att = 3;
-				else if (bus->boardinfo.vendor ==
-					 SSB_BOARDVENDOR_BCM
-					 && bus->boardinfo.type ==
-					 SSB_BOARD_BU4306)
-					rf->att = 5;
-				else if (bus->chip_id == 0x4320)
-					rf->att = 4;
-				else
-					rf->att = 3;
-			} else
-				rf->att = 6;
-			return;
-		case 3:
-			rf->att = 5;
-			return;
-		case 4:
-		case 5:
-			rf->att = 1;
-			return;
-		case 6:
-		case 7:
-			rf->att = 5;
-			return;
-		case 8:
-			rf->att = 0xA;
-			rf->with_padmix = 1;
-			return;
-		case 9:
-		default:
-			rf->att = 5;
-			return;
-		}
-	}
-	rf->att = 5;
-}
-
-static u16 default_tx_control(struct b43_wldev *dev)
-{
-	struct b43_phy *phy = &dev->phy;
-
-	if (phy->radio_ver != 0x2050)
-		return 0;
-	if (phy->radio_rev == 1)
-		return B43_TXCTL_PA2DB | B43_TXCTL_TXMIX;
-	if (phy->radio_rev < 6)
-		return B43_TXCTL_PA2DB;
-	if (phy->radio_rev == 8)
-		return B43_TXCTL_TXMIX;
-	return 0;
-}
-
-/* This func is called "PHY calibrate" in the specs... */
-void b43_phy_early_init(struct b43_wldev *dev)
-{
-	struct b43_phy *phy = &dev->phy;
-	struct b43_txpower_lo_control *lo = phy->lo_control;
-
-	default_baseband_attenuation(dev, &phy->bbatt);
-	default_radio_attenuation(dev, &phy->rfatt);
-	phy->tx_control = (default_tx_control(dev) << 4);
-
-	/* Commit previous writes */
-	b43_read32(dev, B43_MMIO_MACCTL);
-
-	if (phy->type == B43_PHYTYPE_B || phy->type == B43_PHYTYPE_G) {
-		generate_rfatt_list(dev, &lo->rfatt_list);
-		generate_bbatt_list(dev, &lo->bbatt_list);
-	}
-	if (phy->type == B43_PHYTYPE_G && phy->rev == 1) {
-		/* Workaround: Temporarly disable gmode through the early init
-		 * phase, as the gmode stuff is not needed for phy rev 1 */
-		phy->gmode = 0;
-		b43_wireless_core_reset(dev, 0);
-		b43_phy_initg(dev);
-		phy->gmode = 1;
-		b43_wireless_core_reset(dev, B43_TMSLOW_GMODE);
-	}
+	b43_lo_g_adjust(dev);
 }
 
 /* GPHY_TSSI_Power_Lookup_Table_Init */
 static void b43_gphy_tssi_power_lt_init(struct b43_wldev *dev)
 {
-	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = dev->phy.g;
 	int i;
 	u16 value;
 
 	for (i = 0; i < 32; i++)
-		b43_ofdmtab_write16(dev, 0x3C20, i, phy->tssi2dbm[i]);
+		b43_ofdmtab_write16(dev, 0x3C20, i, gphy->tssi2dbm[i]);
 	for (i = 32; i < 64; i++)
-		b43_ofdmtab_write16(dev, 0x3C00, i - 32, phy->tssi2dbm[i]);
+		b43_ofdmtab_write16(dev, 0x3C00, i - 32, gphy->tssi2dbm[i]);
 	for (i = 0; i < 64; i += 2) {
-		value = (u16) phy->tssi2dbm[i];
-		value |= ((u16) phy->tssi2dbm[i + 1]) << 8;
+		value = (u16) gphy->tssi2dbm[i];
+		value |= ((u16) gphy->tssi2dbm[i + 1]) << 8;
 		b43_phy_write(dev, 0x380 + (i / 2), value);
 	}
 }
@@ -539,7 +289,8 @@
 static void b43_gphy_gain_lt_init(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
-	struct b43_txpower_lo_control *lo = phy->lo_control;
+	struct b43_phy_g *gphy = phy->g;
+	struct b43_txpower_lo_control *lo = gphy->lo_control;
 	u16 nr_written = 0;
 	u16 tmp;
 	u8 rf, bb;
@@ -561,1509 +312,6 @@
 	}
 }
 
-static void hardware_pctl_init_aphy(struct b43_wldev *dev)
-{
-	//TODO
-}
-
-static void hardware_pctl_init_gphy(struct b43_wldev *dev)
-{
-	struct b43_phy *phy = &dev->phy;
-
-	b43_phy_write(dev, 0x0036, (b43_phy_read(dev, 0x0036) & 0xFFC0)
-		      | (phy->tgt_idle_tssi - phy->cur_idle_tssi));
-	b43_phy_write(dev, 0x0478, (b43_phy_read(dev, 0x0478) & 0xFF00)
-		      | (phy->tgt_idle_tssi - phy->cur_idle_tssi));
-	b43_gphy_tssi_power_lt_init(dev);
-	b43_gphy_gain_lt_init(dev);
-	b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060) & 0xFFBF);
-	b43_phy_write(dev, 0x0014, 0x0000);
-
-	B43_WARN_ON(phy->rev < 6);
-	b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478)
-		      | 0x0800);
-	b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478)
-		      & 0xFEFF);
-	b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801)
-		      & 0xFFBF);
-
-	b43_gphy_dc_lt_init(dev, 1);
-}
-
-/* HardwarePowerControl init for A and G PHY */
-static void b43_hardware_pctl_init(struct b43_wldev *dev)
-{
-	struct b43_phy *phy = &dev->phy;
-
-	if (!b43_has_hardware_pctl(phy)) {
-		/* No hardware power control */
-		b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_HWPCTL);
-		return;
-	}
-	/* Init the hwpctl related hardware */
-	switch (phy->type) {
-	case B43_PHYTYPE_A:
-		hardware_pctl_init_aphy(dev);
-		break;
-	case B43_PHYTYPE_G:
-		hardware_pctl_init_gphy(dev);
-		break;
-	default:
-		B43_WARN_ON(1);
-	}
-	/* Enable hardware pctl in firmware. */
-	b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL);
-}
-
-static void b43_hardware_pctl_early_init(struct b43_wldev *dev)
-{
-	struct b43_phy *phy = &dev->phy;
-
-	if (!b43_has_hardware_pctl(phy)) {
-		b43_phy_write(dev, 0x047A, 0xC111);
-		return;
-	}
-
-	b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) & 0xFEFF);
-	b43_phy_write(dev, 0x002F, 0x0202);
-	b43_phy_write(dev, 0x047C, b43_phy_read(dev, 0x047C) | 0x0002);
-	b43_phy_write(dev, 0x047A, b43_phy_read(dev, 0x047A) | 0xF000);
-	if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) {
-		b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A)
-					    & 0xFF0F) | 0x0010);
-		b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D)
-			      | 0x8000);
-		b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E)
-					    & 0xFFC0) | 0x0010);
-		b43_phy_write(dev, 0x002E, 0xC07F);
-		b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
-			      | 0x0400);
-	} else {
-		b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
-			      | 0x0200);
-		b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
-			      | 0x0400);
-		b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D)
-			      & 0x7FFF);
-		b43_phy_write(dev, 0x004F, b43_phy_read(dev, 0x004F)
-			      & 0xFFFE);
-		b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E)
-					    & 0xFFC0) | 0x0010);
-		b43_phy_write(dev, 0x002E, 0xC07F);
-		b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A)
-					    & 0xFF0F) | 0x0010);
-	}
-}
-
-/* Intialize B/G PHY power control
- * as described in http://bcm-specs.sipsolutions.net/InitPowerControl
- */
-static void b43_phy_init_pctl(struct b43_wldev *dev)
-{
-	struct ssb_bus *bus = dev->dev->bus;
-	struct b43_phy *phy = &dev->phy;
-	struct b43_rfatt old_rfatt;
-	struct b43_bbatt old_bbatt;
-	u8 old_tx_control = 0;
-
-	if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
-	    (bus->boardinfo.type == SSB_BOARD_BU4306))
-		return;
-
-	b43_phy_write(dev, 0x0028, 0x8018);
-
-	/* This does something with the Analog... */
-	b43_write16(dev, B43_MMIO_PHY0, b43_read16(dev, B43_MMIO_PHY0)
-		    & 0xFFDF);
-
-	if (phy->type == B43_PHYTYPE_G && !phy->gmode)
-		return;
-	b43_hardware_pctl_early_init(dev);
-	if (phy->cur_idle_tssi == 0) {
-		if (phy->radio_ver == 0x2050 && phy->analog == 0) {
-			b43_radio_write16(dev, 0x0076,
-					  (b43_radio_read16(dev, 0x0076)
-					   & 0x00F7) | 0x0084);
-		} else {
-			struct b43_rfatt rfatt;
-			struct b43_bbatt bbatt;
-
-			memcpy(&old_rfatt, &phy->rfatt, sizeof(old_rfatt));
-			memcpy(&old_bbatt, &phy->bbatt, sizeof(old_bbatt));
-			old_tx_control = phy->tx_control;
-
-			bbatt.att = 11;
-			if (phy->radio_rev == 8) {
-				rfatt.att = 15;
-				rfatt.with_padmix = 1;
-			} else {
-				rfatt.att = 9;
-				rfatt.with_padmix = 0;
-			}
-			b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
-		}
-		b43_dummy_transmission(dev);
-		phy->cur_idle_tssi = b43_phy_read(dev, B43_PHY_ITSSI);
-		if (B43_DEBUG) {
-			/* Current-Idle-TSSI sanity check. */
-			if (abs(phy->cur_idle_tssi - phy->tgt_idle_tssi) >= 20) {
-				b43dbg(dev->wl,
-				       "!WARNING! Idle-TSSI phy->cur_idle_tssi "
-				       "measuring failed. (cur=%d, tgt=%d). Disabling TX power "
-				       "adjustment.\n", phy->cur_idle_tssi,
-				       phy->tgt_idle_tssi);
-				phy->cur_idle_tssi = 0;
-			}
-		}
-		if (phy->radio_ver == 0x2050 && phy->analog == 0) {
-			b43_radio_write16(dev, 0x0076,
-					  b43_radio_read16(dev, 0x0076)
-					  & 0xFF7B);
-		} else {
-			b43_set_txpower_g(dev, &old_bbatt,
-					  &old_rfatt, old_tx_control);
-		}
-	}
-	b43_hardware_pctl_init(dev);
-	b43_shm_clear_tssi(dev);
-}
-
-static void b43_phy_rssiagc(struct b43_wldev *dev, u8 enable)
-{
-	int i;
-
-	if (dev->phy.rev < 3) {
-		if (enable)
-			for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
-				b43_ofdmtab_write16(dev,
-					B43_OFDMTAB_LNAHPFGAIN1, i, 0xFFF8);
-				b43_ofdmtab_write16(dev,
-					B43_OFDMTAB_WRSSI, i, 0xFFF8);
-			}
-		else
-			for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
-				b43_ofdmtab_write16(dev,
-					B43_OFDMTAB_LNAHPFGAIN1, i, b43_tab_rssiagc1[i]);
-				b43_ofdmtab_write16(dev,
-					B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc1[i]);
-			}
-	} else {
-		if (enable)
-			for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++)
-				b43_ofdmtab_write16(dev,
-					B43_OFDMTAB_WRSSI, i, 0x0820);
-		else
-			for (i = 0; i < B43_TAB_RSSIAGC2_SIZE; i++)
-				b43_ofdmtab_write16(dev,
-					B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc2[i]);
-	}
-}
-
-static void b43_phy_ww(struct b43_wldev *dev)
-{
-	u16 b, curr_s, best_s = 0xFFFF;
-	int i;
-
-	b43_phy_write(dev, B43_PHY_CRS0,
-		b43_phy_read(dev, B43_PHY_CRS0) & ~B43_PHY_CRS0_EN);
-	b43_phy_write(dev, B43_PHY_OFDM(0x1B),
-		b43_phy_read(dev, B43_PHY_OFDM(0x1B)) | 0x1000);
-	b43_phy_write(dev, B43_PHY_OFDM(0x82),
-		(b43_phy_read(dev, B43_PHY_OFDM(0x82)) & 0xF0FF) | 0x0300);
-	b43_radio_write16(dev, 0x0009,
-		b43_radio_read16(dev, 0x0009) | 0x0080);
-	b43_radio_write16(dev, 0x0012,
-		(b43_radio_read16(dev, 0x0012) & 0xFFFC) | 0x0002);
-	b43_wa_initgains(dev);
-	b43_phy_write(dev, B43_PHY_OFDM(0xBA), 0x3ED5);
-	b = b43_phy_read(dev, B43_PHY_PWRDOWN);
-	b43_phy_write(dev, B43_PHY_PWRDOWN, (b & 0xFFF8) | 0x0005);
-	b43_radio_write16(dev, 0x0004,
-		b43_radio_read16(dev, 0x0004) | 0x0004);
-	for (i = 0x10; i <= 0x20; i++) {
-		b43_radio_write16(dev, 0x0013, i);
-		curr_s = b43_phy_read(dev, B43_PHY_OTABLEQ) & 0x00FF;
-		if (!curr_s) {
-			best_s = 0x0000;
-			break;
-		} else if (curr_s >= 0x0080)
-			curr_s = 0x0100 - curr_s;
-		if (curr_s < best_s)
-			best_s = curr_s;
-	}
-	b43_phy_write(dev, B43_PHY_PWRDOWN, b);
-	b43_radio_write16(dev, 0x0004,
-		b43_radio_read16(dev, 0x0004) & 0xFFFB);
-	b43_radio_write16(dev, 0x0013, best_s);
-	b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 0xFFEC);
-	b43_phy_write(dev, B43_PHY_OFDM(0xB7), 0x1E80);
-	b43_phy_write(dev, B43_PHY_OFDM(0xB6), 0x1C00);
-	b43_phy_write(dev, B43_PHY_OFDM(0xB5), 0x0EC0);
-	b43_phy_write(dev, B43_PHY_OFDM(0xB2), 0x00C0);
-	b43_phy_write(dev, B43_PHY_OFDM(0xB9), 0x1FFF);
-	b43_phy_write(dev, B43_PHY_OFDM(0xBB),
-		(b43_phy_read(dev, B43_PHY_OFDM(0xBB)) & 0xF000) | 0x0053);
-	b43_phy_write(dev, B43_PHY_OFDM61,
-		(b43_phy_read(dev, B43_PHY_OFDM61) & 0xFE1F) | 0x0120);
-	b43_phy_write(dev, B43_PHY_OFDM(0x13),
-		(b43_phy_read(dev, B43_PHY_OFDM(0x13)) & 0x0FFF) | 0x3000);
-	b43_phy_write(dev, B43_PHY_OFDM(0x14),
-		(b43_phy_read(dev, B43_PHY_OFDM(0x14)) & 0x0FFF) | 0x3000);
-	b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 6, 0x0017);
-	for (i = 0; i < 6; i++)
-		b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, i, 0x000F);
-	b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0D, 0x000E);
-	b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0E, 0x0011);
-	b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0F, 0x0013);
-	b43_phy_write(dev, B43_PHY_OFDM(0x33), 0x5030);
-	b43_phy_write(dev, B43_PHY_CRS0,
-		b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN);
-}
-
-/* Initialize APHY. This is also called for the GPHY in some cases. */
-static void b43_phy_inita(struct b43_wldev *dev)
-{
-	struct ssb_bus *bus = dev->dev->bus;
-	struct b43_phy *phy = &dev->phy;
-
-	might_sleep();
-
-	if (phy->rev >= 6) {
-		if (phy->type == B43_PHYTYPE_A)
-			b43_phy_write(dev, B43_PHY_OFDM(0x1B),
-				b43_phy_read(dev, B43_PHY_OFDM(0x1B)) & ~0x1000);
-		if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
-			b43_phy_write(dev, B43_PHY_ENCORE,
-				b43_phy_read(dev, B43_PHY_ENCORE) | 0x0010);
-		else
-			b43_phy_write(dev, B43_PHY_ENCORE,
-				b43_phy_read(dev, B43_PHY_ENCORE) & ~0x1010);
-	}
-
-	b43_wa_all(dev);
-
-	if (phy->type == B43_PHYTYPE_A) {
-		if (phy->gmode && (phy->rev < 3))
-			b43_phy_write(dev, 0x0034,
-				b43_phy_read(dev, 0x0034) | 0x0001);
-		b43_phy_rssiagc(dev, 0);
-
-		b43_phy_write(dev, B43_PHY_CRS0,
-			b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN);
-
-		b43_radio_init2060(dev);
-
-		if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
-		    ((bus->boardinfo.type == SSB_BOARD_BU4306) ||
-		     (bus->boardinfo.type == SSB_BOARD_BU4309))) {
-			; //TODO: A PHY LO
-		}
-
-		if (phy->rev >= 3)
-			b43_phy_ww(dev);
-
-		hardware_pctl_init_aphy(dev);
-
-		//TODO: radar detection
-	}
-
-	if ((phy->type == B43_PHYTYPE_G) &&
-	    (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) {
-		b43_phy_write(dev, B43_PHY_OFDM(0x6E),
-				  (b43_phy_read(dev, B43_PHY_OFDM(0x6E))
-				   & 0xE000) | 0x3CF);
-	}
-}
-
-static void b43_phy_initb5(struct b43_wldev *dev)
-{
-	struct ssb_bus *bus = dev->dev->bus;
-	struct b43_phy *phy = &dev->phy;
-	u16 offset, value;
-	u8 old_channel;
-
-	if (phy->analog == 1) {
-		b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A)
-				  | 0x0050);
-	}
-	if ((bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM) &&
-	    (bus->boardinfo.type != SSB_BOARD_BU4306)) {
-		value = 0x2120;
-		for (offset = 0x00A8; offset < 0x00C7; offset++) {
-			b43_phy_write(dev, offset, value);
-			value += 0x202;
-		}
-	}
-	b43_phy_write(dev, 0x0035, (b43_phy_read(dev, 0x0035) & 0xF0FF)
-		      | 0x0700);
-	if (phy->radio_ver == 0x2050)
-		b43_phy_write(dev, 0x0038, 0x0667);
-
-	if (phy->gmode || phy->rev >= 2) {
-		if (phy->radio_ver == 0x2050) {
-			b43_radio_write16(dev, 0x007A,
-					  b43_radio_read16(dev, 0x007A)
-					  | 0x0020);
-			b43_radio_write16(dev, 0x0051,
-					  b43_radio_read16(dev, 0x0051)
-					  | 0x0004);
-		}
-		b43_write16(dev, B43_MMIO_PHY_RADIO, 0x0000);
-
-		b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100);
-		b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000);
-
-		b43_phy_write(dev, 0x001C, 0x186A);
-
-		b43_phy_write(dev, 0x0013,
-			      (b43_phy_read(dev, 0x0013) & 0x00FF) | 0x1900);
-		b43_phy_write(dev, 0x0035,
-			      (b43_phy_read(dev, 0x0035) & 0xFFC0) | 0x0064);
-		b43_phy_write(dev, 0x005D,
-			      (b43_phy_read(dev, 0x005D) & 0xFF80) | 0x000A);
-	}
-
-	if (dev->bad_frames_preempt) {
-		b43_phy_write(dev, B43_PHY_RADIO_BITFIELD,
-			      b43_phy_read(dev,
-					   B43_PHY_RADIO_BITFIELD) | (1 << 11));
-	}
-
-	if (phy->analog == 1) {
-		b43_phy_write(dev, 0x0026, 0xCE00);
-		b43_phy_write(dev, 0x0021, 0x3763);
-		b43_phy_write(dev, 0x0022, 0x1BC3);
-		b43_phy_write(dev, 0x0023, 0x06F9);
-		b43_phy_write(dev, 0x0024, 0x037E);
-	} else
-		b43_phy_write(dev, 0x0026, 0xCC00);
-	b43_phy_write(dev, 0x0030, 0x00C6);
-	b43_write16(dev, 0x03EC, 0x3F22);
-
-	if (phy->analog == 1)
-		b43_phy_write(dev, 0x0020, 0x3E1C);
-	else
-		b43_phy_write(dev, 0x0020, 0x301C);
-
-	if (phy->analog == 0)
-		b43_write16(dev, 0x03E4, 0x3000);
-
-	old_channel = phy->channel;
-	/* Force to channel 7, even if not supported. */
-	b43_radio_selectchannel(dev, 7, 0);
-
-	if (phy->radio_ver != 0x2050) {
-		b43_radio_write16(dev, 0x0075, 0x0080);
-		b43_radio_write16(dev, 0x0079, 0x0081);
-	}
-
-	b43_radio_write16(dev, 0x0050, 0x0020);
-	b43_radio_write16(dev, 0x0050, 0x0023);
-
-	if (phy->radio_ver == 0x2050) {
-		b43_radio_write16(dev, 0x0050, 0x0020);
-		b43_radio_write16(dev, 0x005A, 0x0070);
-	}
-
-	b43_radio_write16(dev, 0x005B, 0x007B);
-	b43_radio_write16(dev, 0x005C, 0x00B0);
-
-	b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) | 0x0007);
-
-	b43_radio_selectchannel(dev, old_channel, 0);
-
-	b43_phy_write(dev, 0x0014, 0x0080);
-	b43_phy_write(dev, 0x0032, 0x00CA);
-	b43_phy_write(dev, 0x002A, 0x88A3);
-
-	b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control);
-
-	if (phy->radio_ver == 0x2050)
-		b43_radio_write16(dev, 0x005D, 0x000D);
-
-	b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
-}
-
-static void b43_phy_initb6(struct b43_wldev *dev)
-{
-	struct b43_phy *phy = &dev->phy;
-	u16 offset, val;
-	u8 old_channel;
-
-	b43_phy_write(dev, 0x003E, 0x817A);
-	b43_radio_write16(dev, 0x007A,
-			  (b43_radio_read16(dev, 0x007A) | 0x0058));
-	if (phy->radio_rev == 4 || phy->radio_rev == 5) {
-		b43_radio_write16(dev, 0x51, 0x37);
-		b43_radio_write16(dev, 0x52, 0x70);
-		b43_radio_write16(dev, 0x53, 0xB3);
-		b43_radio_write16(dev, 0x54, 0x9B);
-		b43_radio_write16(dev, 0x5A, 0x88);
-		b43_radio_write16(dev, 0x5B, 0x88);
-		b43_radio_write16(dev, 0x5D, 0x88);
-		b43_radio_write16(dev, 0x5E, 0x88);
-		b43_radio_write16(dev, 0x7D, 0x88);
-		b43_hf_write(dev, b43_hf_read(dev)
-			     | B43_HF_TSSIRPSMW);
-	}
-	B43_WARN_ON(phy->radio_rev == 6 || phy->radio_rev == 7);	/* We had code for these revs here... */
-	if (phy->radio_rev == 8) {
-		b43_radio_write16(dev, 0x51, 0);
-		b43_radio_write16(dev, 0x52, 0x40);
-		b43_radio_write16(dev, 0x53, 0xB7);
-		b43_radio_write16(dev, 0x54, 0x98);
-		b43_radio_write16(dev, 0x5A, 0x88);
-		b43_radio_write16(dev, 0x5B, 0x6B);
-		b43_radio_write16(dev, 0x5C, 0x0F);
-		if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) {
-			b43_radio_write16(dev, 0x5D, 0xFA);
-			b43_radio_write16(dev, 0x5E, 0xD8);
-		} else {
-			b43_radio_write16(dev, 0x5D, 0xF5);
-			b43_radio_write16(dev, 0x5E, 0xB8);
-		}
-		b43_radio_write16(dev, 0x0073, 0x0003);
-		b43_radio_write16(dev, 0x007D, 0x00A8);
-		b43_radio_write16(dev, 0x007C, 0x0001);
-		b43_radio_write16(dev, 0x007E, 0x0008);
-	}
-	val = 0x1E1F;
-	for (offset = 0x0088; offset < 0x0098; offset++) {
-		b43_phy_write(dev, offset, val);
-		val -= 0x0202;
-	}
-	val = 0x3E3F;
-	for (offset = 0x0098; offset < 0x00A8; offset++) {
-		b43_phy_write(dev, offset, val);
-		val -= 0x0202;
-	}
-	val = 0x2120;
-	for (offset = 0x00A8; offset < 0x00C8; offset++) {
-		b43_phy_write(dev, offset, (val & 0x3F3F));
-		val += 0x0202;
-	}
-	if (phy->type == B43_PHYTYPE_G) {
-		b43_radio_write16(dev, 0x007A,
-				  b43_radio_read16(dev, 0x007A) | 0x0020);
-		b43_radio_write16(dev, 0x0051,
-				  b43_radio_read16(dev, 0x0051) | 0x0004);
-		b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100);
-		b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000);
-		b43_phy_write(dev, 0x5B, 0);
-		b43_phy_write(dev, 0x5C, 0);
-	}
-
-	old_channel = phy->channel;
-	if (old_channel >= 8)
-		b43_radio_selectchannel(dev, 1, 0);
-	else
-		b43_radio_selectchannel(dev, 13, 0);
-
-	b43_radio_write16(dev, 0x0050, 0x0020);
-	b43_radio_write16(dev, 0x0050, 0x0023);
-	udelay(40);
-	if (phy->radio_rev < 6 || phy->radio_rev == 8) {
-		b43_radio_write16(dev, 0x7C, (b43_radio_read16(dev, 0x7C)
-					      | 0x0002));
-		b43_radio_write16(dev, 0x50, 0x20);
-	}
-	if (phy->radio_rev <= 2) {
-		b43_radio_write16(dev, 0x7C, 0x20);
-		b43_radio_write16(dev, 0x5A, 0x70);
-		b43_radio_write16(dev, 0x5B, 0x7B);
-		b43_radio_write16(dev, 0x5C, 0xB0);
-	}
-	b43_radio_write16(dev, 0x007A,
-			  (b43_radio_read16(dev, 0x007A) & 0x00F8) | 0x0007);
-
-	b43_radio_selectchannel(dev, old_channel, 0);
-
-	b43_phy_write(dev, 0x0014, 0x0200);
-	if (phy->radio_rev >= 6)
-		b43_phy_write(dev, 0x2A, 0x88C2);
-	else
-		b43_phy_write(dev, 0x2A, 0x8AC0);
-	b43_phy_write(dev, 0x0038, 0x0668);
-	b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control);
-	if (phy->radio_rev <= 5) {
-		b43_phy_write(dev, 0x5D, (b43_phy_read(dev, 0x5D)
-					  & 0xFF80) | 0x0003);
-	}
-	if (phy->radio_rev <= 2)
-		b43_radio_write16(dev, 0x005D, 0x000D);
-
-	if (phy->analog == 4) {
-		b43_write16(dev, 0x3E4, 9);
-		b43_phy_write(dev, 0x61, b43_phy_read(dev, 0x61)
-			      & 0x0FFF);
-	} else {
-		b43_phy_write(dev, 0x0002, (b43_phy_read(dev, 0x0002) & 0xFFC0)
-			      | 0x0004);
-	}
-	if (phy->type == B43_PHYTYPE_B)
-		B43_WARN_ON(1);
-	else if (phy->type == B43_PHYTYPE_G)
-		b43_write16(dev, 0x03E6, 0x0);
-}
-
-static void b43_calc_loopback_gain(struct b43_wldev *dev)
-{
-	struct b43_phy *phy = &dev->phy;
-	u16 backup_phy[16] = { 0 };
-	u16 backup_radio[3];
-	u16 backup_bband;
-	u16 i, j, loop_i_max;
-	u16 trsw_rx;
-	u16 loop1_outer_done, loop1_inner_done;
-
-	backup_phy[0] = b43_phy_read(dev, B43_PHY_CRS0);
-	backup_phy[1] = b43_phy_read(dev, B43_PHY_CCKBBANDCFG);
-	backup_phy[2] = b43_phy_read(dev, B43_PHY_RFOVER);
-	backup_phy[3] = b43_phy_read(dev, B43_PHY_RFOVERVAL);
-	if (phy->rev != 1) {	/* Not in specs, but needed to prevent PPC machine check */
-		backup_phy[4] = b43_phy_read(dev, B43_PHY_ANALOGOVER);
-		backup_phy[5] = b43_phy_read(dev, B43_PHY_ANALOGOVERVAL);
-	}
-	backup_phy[6] = b43_phy_read(dev, B43_PHY_CCK(0x5A));
-	backup_phy[7] = b43_phy_read(dev, B43_PHY_CCK(0x59));
-	backup_phy[8] = b43_phy_read(dev, B43_PHY_CCK(0x58));
-	backup_phy[9] = b43_phy_read(dev, B43_PHY_CCK(0x0A));
-	backup_phy[10] = b43_phy_read(dev, B43_PHY_CCK(0x03));
-	backup_phy[11] = b43_phy_read(dev, B43_PHY_LO_MASK);
-	backup_phy[12] = b43_phy_read(dev, B43_PHY_LO_CTL);
-	backup_phy[13] = b43_phy_read(dev, B43_PHY_CCK(0x2B));
-	backup_phy[14] = b43_phy_read(dev, B43_PHY_PGACTL);
-	backup_phy[15] = b43_phy_read(dev, B43_PHY_LO_LEAKAGE);
-	backup_bband = phy->bbatt.att;
-	backup_radio[0] = b43_radio_read16(dev, 0x52);
-	backup_radio[1] = b43_radio_read16(dev, 0x43);
-	backup_radio[2] = b43_radio_read16(dev, 0x7A);
-
-	b43_phy_write(dev, B43_PHY_CRS0,
-		      b43_phy_read(dev, B43_PHY_CRS0) & 0x3FFF);
-	b43_phy_write(dev, B43_PHY_CCKBBANDCFG,
-		      b43_phy_read(dev, B43_PHY_CCKBBANDCFG) | 0x8000);
-	b43_phy_write(dev, B43_PHY_RFOVER,
-		      b43_phy_read(dev, B43_PHY_RFOVER) | 0x0002);
-	b43_phy_write(dev, B43_PHY_RFOVERVAL,
-		      b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFD);
-	b43_phy_write(dev, B43_PHY_RFOVER,
-		      b43_phy_read(dev, B43_PHY_RFOVER) | 0x0001);
-	b43_phy_write(dev, B43_PHY_RFOVERVAL,
-		      b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFE);
-	if (phy->rev != 1) {	/* Not in specs, but needed to prevent PPC machine check */
-		b43_phy_write(dev, B43_PHY_ANALOGOVER,
-			      b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0001);
-		b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
-			      b43_phy_read(dev,
-					   B43_PHY_ANALOGOVERVAL) & 0xFFFE);
-		b43_phy_write(dev, B43_PHY_ANALOGOVER,
-			      b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0002);
-		b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
-			      b43_phy_read(dev,
-					   B43_PHY_ANALOGOVERVAL) & 0xFFFD);
-	}
-	b43_phy_write(dev, B43_PHY_RFOVER,
-		      b43_phy_read(dev, B43_PHY_RFOVER) | 0x000C);
-	b43_phy_write(dev, B43_PHY_RFOVERVAL,
-		      b43_phy_read(dev, B43_PHY_RFOVERVAL) | 0x000C);
-	b43_phy_write(dev, B43_PHY_RFOVER,
-		      b43_phy_read(dev, B43_PHY_RFOVER) | 0x0030);
-	b43_phy_write(dev, B43_PHY_RFOVERVAL,
-		      (b43_phy_read(dev, B43_PHY_RFOVERVAL)
-		       & 0xFFCF) | 0x10);
-
-	b43_phy_write(dev, B43_PHY_CCK(0x5A), 0x0780);
-	b43_phy_write(dev, B43_PHY_CCK(0x59), 0xC810);
-	b43_phy_write(dev, B43_PHY_CCK(0x58), 0x000D);
-
-	b43_phy_write(dev, B43_PHY_CCK(0x0A),
-		      b43_phy_read(dev, B43_PHY_CCK(0x0A)) | 0x2000);
-	if (phy->rev != 1) {	/* Not in specs, but needed to prevent PPC machine check */
-		b43_phy_write(dev, B43_PHY_ANALOGOVER,
-			      b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0004);
-		b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
-			      b43_phy_read(dev,
-					   B43_PHY_ANALOGOVERVAL) & 0xFFFB);
-	}
-	b43_phy_write(dev, B43_PHY_CCK(0x03),
-		      (b43_phy_read(dev, B43_PHY_CCK(0x03))
-		       & 0xFF9F) | 0x40);
-
-	if (phy->radio_rev == 8) {
-		b43_radio_write16(dev, 0x43, 0x000F);
-	} else {
-		b43_radio_write16(dev, 0x52, 0);
-		b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43)
-					      & 0xFFF0) | 0x9);
-	}
-	b43_phy_set_baseband_attenuation(dev, 11);
-
-	if (phy->rev >= 3)
-		b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020);
-	else
-		b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020);
-	b43_phy_write(dev, B43_PHY_LO_CTL, 0);
-
-	b43_phy_write(dev, B43_PHY_CCK(0x2B),
-		      (b43_phy_read(dev, B43_PHY_CCK(0x2B))
-		       & 0xFFC0) | 0x01);
-	b43_phy_write(dev, B43_PHY_CCK(0x2B),
-		      (b43_phy_read(dev, B43_PHY_CCK(0x2B))
-		       & 0xC0FF) | 0x800);
-
-	b43_phy_write(dev, B43_PHY_RFOVER,
-		      b43_phy_read(dev, B43_PHY_RFOVER) | 0x0100);
-	b43_phy_write(dev, B43_PHY_RFOVERVAL,
-		      b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xCFFF);
-
-	if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) {
-		if (phy->rev >= 7) {
-			b43_phy_write(dev, B43_PHY_RFOVER,
-				      b43_phy_read(dev, B43_PHY_RFOVER)
-				      | 0x0800);
-			b43_phy_write(dev, B43_PHY_RFOVERVAL,
-				      b43_phy_read(dev, B43_PHY_RFOVERVAL)
-				      | 0x8000);
-		}
-	}
-	b43_radio_write16(dev, 0x7A, b43_radio_read16(dev, 0x7A)
-			  & 0x00F7);
-
-	j = 0;
-	loop_i_max = (phy->radio_rev == 8) ? 15 : 9;
-	for (i = 0; i < loop_i_max; i++) {
-		for (j = 0; j < 16; j++) {
-			b43_radio_write16(dev, 0x43, i);
-			b43_phy_write(dev, B43_PHY_RFOVERVAL,
-				      (b43_phy_read(dev, B43_PHY_RFOVERVAL)
-				       & 0xF0FF) | (j << 8));
-			b43_phy_write(dev, B43_PHY_PGACTL,
-				      (b43_phy_read(dev, B43_PHY_PGACTL)
-				       & 0x0FFF) | 0xA000);
-			b43_phy_write(dev, B43_PHY_PGACTL,
-				      b43_phy_read(dev, B43_PHY_PGACTL)
-				      | 0xF000);
-			udelay(20);
-			if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC)
-				goto exit_loop1;
-		}
-	}
-      exit_loop1:
-	loop1_outer_done = i;
-	loop1_inner_done = j;
-	if (j >= 8) {
-		b43_phy_write(dev, B43_PHY_RFOVERVAL,
-			      b43_phy_read(dev, B43_PHY_RFOVERVAL)
-			      | 0x30);
-		trsw_rx = 0x1B;
-		for (j = j - 8; j < 16; j++) {
-			b43_phy_write(dev, B43_PHY_RFOVERVAL,
-				      (b43_phy_read(dev, B43_PHY_RFOVERVAL)
-				       & 0xF0FF) | (j << 8));
-			b43_phy_write(dev, B43_PHY_PGACTL,
-				      (b43_phy_read(dev, B43_PHY_PGACTL)
-				       & 0x0FFF) | 0xA000);
-			b43_phy_write(dev, B43_PHY_PGACTL,
-				      b43_phy_read(dev, B43_PHY_PGACTL)
-				      | 0xF000);
-			udelay(20);
-			trsw_rx -= 3;
-			if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC)
-				goto exit_loop2;
-		}
-	} else
-		trsw_rx = 0x18;
-      exit_loop2:
-
-	if (phy->rev != 1) {	/* Not in specs, but needed to prevent PPC machine check */
-		b43_phy_write(dev, B43_PHY_ANALOGOVER, backup_phy[4]);
-		b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, backup_phy[5]);
-	}
-	b43_phy_write(dev, B43_PHY_CCK(0x5A), backup_phy[6]);
-	b43_phy_write(dev, B43_PHY_CCK(0x59), backup_phy[7]);
-	b43_phy_write(dev, B43_PHY_CCK(0x58), backup_phy[8]);
-	b43_phy_write(dev, B43_PHY_CCK(0x0A), backup_phy[9]);
-	b43_phy_write(dev, B43_PHY_CCK(0x03), backup_phy[10]);
-	b43_phy_write(dev, B43_PHY_LO_MASK, backup_phy[11]);
-	b43_phy_write(dev, B43_PHY_LO_CTL, backup_phy[12]);
-	b43_phy_write(dev, B43_PHY_CCK(0x2B), backup_phy[13]);
-	b43_phy_write(dev, B43_PHY_PGACTL, backup_phy[14]);
-
-	b43_phy_set_baseband_attenuation(dev, backup_bband);
-
-	b43_radio_write16(dev, 0x52, backup_radio[0]);
-	b43_radio_write16(dev, 0x43, backup_radio[1]);
-	b43_radio_write16(dev, 0x7A, backup_radio[2]);
-
-	b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2] | 0x0003);
-	udelay(10);
-	b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2]);
-	b43_phy_write(dev, B43_PHY_RFOVERVAL, backup_phy[3]);
-	b43_phy_write(dev, B43_PHY_CRS0, backup_phy[0]);
-	b43_phy_write(dev, B43_PHY_CCKBBANDCFG, backup_phy[1]);
-
-	phy->max_lb_gain =
-	    ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11;
-	phy->trsw_rx_gain = trsw_rx * 2;
-}
-
-static void b43_phy_initg(struct b43_wldev *dev)
-{
-	struct b43_phy *phy = &dev->phy;
-	u16 tmp;
-
-	if (phy->rev == 1)
-		b43_phy_initb5(dev);
-	else
-		b43_phy_initb6(dev);
-
-	if (phy->rev >= 2 || phy->gmode)
-		b43_phy_inita(dev);
-
-	if (phy->rev >= 2) {
-		b43_phy_write(dev, B43_PHY_ANALOGOVER, 0);
-		b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, 0);
-	}
-	if (phy->rev == 2) {
-		b43_phy_write(dev, B43_PHY_RFOVER, 0);
-		b43_phy_write(dev, B43_PHY_PGACTL, 0xC0);
-	}
-	if (phy->rev > 5) {
-		b43_phy_write(dev, B43_PHY_RFOVER, 0x400);
-		b43_phy_write(dev, B43_PHY_PGACTL, 0xC0);
-	}
-	if (phy->gmode || phy->rev >= 2) {
-		tmp = b43_phy_read(dev, B43_PHY_VERSION_OFDM);
-		tmp &= B43_PHYVER_VERSION;
-		if (tmp == 3 || tmp == 5) {
-			b43_phy_write(dev, B43_PHY_OFDM(0xC2), 0x1816);
-			b43_phy_write(dev, B43_PHY_OFDM(0xC3), 0x8006);
-		}
-		if (tmp == 5) {
-			b43_phy_write(dev, B43_PHY_OFDM(0xCC),
-				      (b43_phy_read(dev, B43_PHY_OFDM(0xCC))
-				       & 0x00FF) | 0x1F00);
-		}
-	}
-	if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2)
-		b43_phy_write(dev, B43_PHY_OFDM(0x7E), 0x78);
-	if (phy->radio_rev == 8) {
-		b43_phy_write(dev, B43_PHY_EXTG(0x01),
-			      b43_phy_read(dev, B43_PHY_EXTG(0x01))
-			      | 0x80);
-		b43_phy_write(dev, B43_PHY_OFDM(0x3E),
-			      b43_phy_read(dev, B43_PHY_OFDM(0x3E))
-			      | 0x4);
-	}
-	if (has_loopback_gain(phy))
-		b43_calc_loopback_gain(dev);
-
-	if (phy->radio_rev != 8) {
-		if (phy->initval == 0xFFFF)
-			phy->initval = b43_radio_init2050(dev);
-		else
-			b43_radio_write16(dev, 0x0078, phy->initval);
-	}
-	b43_lo_g_init(dev);
-	if (has_tx_magnification(phy)) {
-		b43_radio_write16(dev, 0x52,
-				  (b43_radio_read16(dev, 0x52) & 0xFF00)
-				  | phy->lo_control->tx_bias | phy->
-				  lo_control->tx_magn);
-	} else {
-		b43_radio_write16(dev, 0x52,
-				  (b43_radio_read16(dev, 0x52) & 0xFFF0)
-				  | phy->lo_control->tx_bias);
-	}
-	if (phy->rev >= 6) {
-		b43_phy_write(dev, B43_PHY_CCK(0x36),
-			      (b43_phy_read(dev, B43_PHY_CCK(0x36))
-			       & 0x0FFF) | (phy->lo_control->
-					    tx_bias << 12));
-	}
-	if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
-		b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
-	else
-		b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
-	if (phy->rev < 2)
-		b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x101);
-	else
-		b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x202);
-	if (phy->gmode || phy->rev >= 2) {
-		b43_lo_g_adjust(dev);
-		b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078);
-	}
-
-	if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
-		/* The specs state to update the NRSSI LT with
-		 * the value 0x7FFFFFFF here. I think that is some weird
-		 * compiler optimization in the original driver.
-		 * Essentially, what we do here is resetting all NRSSI LT
-		 * entries to -32 (see the clamp_val() in nrssi_hw_update())
-		 */
-		b43_nrssi_hw_update(dev, 0xFFFF);	//FIXME?
-		b43_calc_nrssi_threshold(dev);
-	} else if (phy->gmode || phy->rev >= 2) {
-		if (phy->nrssi[0] == -1000) {
-			B43_WARN_ON(phy->nrssi[1] != -1000);
-			b43_calc_nrssi_slope(dev);
-		} else
-			b43_calc_nrssi_threshold(dev);
-	}
-	if (phy->radio_rev == 8)
-		b43_phy_write(dev, B43_PHY_EXTG(0x05), 0x3230);
-	b43_phy_init_pctl(dev);
-	/* FIXME: The spec says in the following if, the 0 should be replaced
-	   'if OFDM may not be used in the current locale'
-	   but OFDM is legal everywhere */
-	if ((dev->dev->bus->chip_id == 0x4306
-	     && dev->dev->bus->chip_package == 2) || 0) {
-		b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0)
-			      & 0xBFFF);
-		b43_phy_write(dev, B43_PHY_OFDM(0xC3),
-			      b43_phy_read(dev, B43_PHY_OFDM(0xC3))
-			      & 0x7FFF);
-	}
-}
-
-/* Set the baseband attenuation value on chip. */
-void b43_phy_set_baseband_attenuation(struct b43_wldev *dev,
-				      u16 baseband_attenuation)
-{
-	struct b43_phy *phy = &dev->phy;
-
-	if (phy->analog == 0) {
-		b43_write16(dev, B43_MMIO_PHY0, (b43_read16(dev, B43_MMIO_PHY0)
-						 & 0xFFF0) |
-			    baseband_attenuation);
-	} else if (phy->analog > 1) {
-		b43_phy_write(dev, B43_PHY_DACCTL,
-			      (b43_phy_read(dev, B43_PHY_DACCTL)
-			       & 0xFFC3) | (baseband_attenuation << 2));
-	} else {
-		b43_phy_write(dev, B43_PHY_DACCTL,
-			      (b43_phy_read(dev, B43_PHY_DACCTL)
-			       & 0xFF87) | (baseband_attenuation << 3));
-	}
-}
-
-/* http://bcm-specs.sipsolutions.net/EstimatePowerOut
- * This function converts a TSSI value to dBm in Q5.2
- */
-static s8 b43_phy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
-{
-	struct b43_phy *phy = &dev->phy;
-	s8 dbm = 0;
-	s32 tmp;
-
-	tmp = (phy->tgt_idle_tssi - phy->cur_idle_tssi + tssi);
-
-	switch (phy->type) {
-	case B43_PHYTYPE_A:
-		tmp += 0x80;
-		tmp = clamp_val(tmp, 0x00, 0xFF);
-		dbm = phy->tssi2dbm[tmp];
-		//TODO: There's a FIXME on the specs
-		break;
-	case B43_PHYTYPE_B:
-	case B43_PHYTYPE_G:
-		tmp = clamp_val(tmp, 0x00, 0x3F);
-		dbm = phy->tssi2dbm[tmp];
-		break;
-	default:
-		B43_WARN_ON(1);
-	}
-
-	return dbm;
-}
-
-void b43_put_attenuation_into_ranges(struct b43_wldev *dev,
-				     int *_bbatt, int *_rfatt)
-{
-	int rfatt = *_rfatt;
-	int bbatt = *_bbatt;
-	struct b43_txpower_lo_control *lo = dev->phy.lo_control;
-
-	/* Get baseband and radio attenuation values into their permitted ranges.
-	 * Radio attenuation affects power level 4 times as much as baseband. */
-
-	/* Range constants */
-	const int rf_min = lo->rfatt_list.min_val;
-	const int rf_max = lo->rfatt_list.max_val;
-	const int bb_min = lo->bbatt_list.min_val;
-	const int bb_max = lo->bbatt_list.max_val;
-
-	while (1) {
-		if (rfatt > rf_max && bbatt > bb_max - 4)
-			break;	/* Can not get it into ranges */
-		if (rfatt < rf_min && bbatt < bb_min + 4)
-			break;	/* Can not get it into ranges */
-		if (bbatt > bb_max && rfatt > rf_max - 1)
-			break;	/* Can not get it into ranges */
-		if (bbatt < bb_min && rfatt < rf_min + 1)
-			break;	/* Can not get it into ranges */
-
-		if (bbatt > bb_max) {
-			bbatt -= 4;
-			rfatt += 1;
-			continue;
-		}
-		if (bbatt < bb_min) {
-			bbatt += 4;
-			rfatt -= 1;
-			continue;
-		}
-		if (rfatt > rf_max) {
-			rfatt -= 1;
-			bbatt += 4;
-			continue;
-		}
-		if (rfatt < rf_min) {
-			rfatt += 1;
-			bbatt -= 4;
-			continue;
-		}
-		break;
-	}
-
-	*_rfatt = clamp_val(rfatt, rf_min, rf_max);
-	*_bbatt = clamp_val(bbatt, bb_min, bb_max);
-}
-
-/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */
-void b43_phy_xmitpower(struct b43_wldev *dev)
-{
-	struct ssb_bus *bus = dev->dev->bus;
-	struct b43_phy *phy = &dev->phy;
-
-	if (phy->cur_idle_tssi == 0)
-		return;
-	if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
-	    (bus->boardinfo.type == SSB_BOARD_BU4306))
-		return;
-#ifdef CONFIG_B43_DEBUG
-	if (phy->manual_txpower_control)
-		return;
-#endif
-
-	switch (phy->type) {
-	case B43_PHYTYPE_A:{
-
-			//TODO: Nothing for A PHYs yet :-/
-
-			break;
-		}
-	case B43_PHYTYPE_B:
-	case B43_PHYTYPE_G:{
-			u16 tmp;
-			s8 v0, v1, v2, v3;
-			s8 average;
-			int max_pwr;
-			int desired_pwr, estimated_pwr, pwr_adjust;
-			int rfatt_delta, bbatt_delta;
-			int rfatt, bbatt;
-			u8 tx_control;
-
-			tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x0058);
-			v0 = (s8) (tmp & 0x00FF);
-			v1 = (s8) ((tmp & 0xFF00) >> 8);
-			tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x005A);
-			v2 = (s8) (tmp & 0x00FF);
-			v3 = (s8) ((tmp & 0xFF00) >> 8);
-			tmp = 0;
-
-			if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F
-			    || v3 == 0x7F) {
-				tmp =
-				    b43_shm_read16(dev, B43_SHM_SHARED, 0x0070);
-				v0 = (s8) (tmp & 0x00FF);
-				v1 = (s8) ((tmp & 0xFF00) >> 8);
-				tmp =
-				    b43_shm_read16(dev, B43_SHM_SHARED, 0x0072);
-				v2 = (s8) (tmp & 0x00FF);
-				v3 = (s8) ((tmp & 0xFF00) >> 8);
-				if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F
-				    || v3 == 0x7F)
-					return;
-				v0 = (v0 + 0x20) & 0x3F;
-				v1 = (v1 + 0x20) & 0x3F;
-				v2 = (v2 + 0x20) & 0x3F;
-				v3 = (v3 + 0x20) & 0x3F;
-				tmp = 1;
-			}
-			b43_shm_clear_tssi(dev);
-
-			average = (v0 + v1 + v2 + v3 + 2) / 4;
-
-			if (tmp
-			    && (b43_shm_read16(dev, B43_SHM_SHARED, 0x005E) &
-				0x8))
-				average -= 13;
-
-			estimated_pwr =
-			    b43_phy_estimate_power_out(dev, average);
-
-			max_pwr = dev->dev->bus->sprom.maxpwr_bg;
-			if ((dev->dev->bus->sprom.boardflags_lo
-			    & B43_BFL_PACTRL) && (phy->type == B43_PHYTYPE_G))
-				max_pwr -= 0x3;
-			if (unlikely(max_pwr <= 0)) {
-				b43warn(dev->wl,
-					"Invalid max-TX-power value in SPROM.\n");
-				max_pwr = 60;	/* fake it */
-				dev->dev->bus->sprom.maxpwr_bg = max_pwr;
-			}
-
-			/*TODO:
-			   max_pwr = min(REG - dev->dev->bus->sprom.antennagain_bgphy - 0x6, max_pwr)
-			   where REG is the max power as per the regulatory domain
-			 */
-
-			/* Get desired power (in Q5.2) */
-			desired_pwr = INT_TO_Q52(phy->power_level);
-			/* And limit it. max_pwr already is Q5.2 */
-			desired_pwr = clamp_val(desired_pwr, 0, max_pwr);
-			if (b43_debug(dev, B43_DBG_XMITPOWER)) {
-				b43dbg(dev->wl,
-				       "Current TX power output: " Q52_FMT
-				       " dBm, " "Desired TX power output: "
-				       Q52_FMT " dBm\n", Q52_ARG(estimated_pwr),
-				       Q52_ARG(desired_pwr));
-			}
-
-			/* Calculate the adjustment delta. */
-			pwr_adjust = desired_pwr - estimated_pwr;
-
-			/* RF attenuation delta. */
-			rfatt_delta = ((pwr_adjust + 7) / 8);
-			/* Lower attenuation => Bigger power output. Negate it. */
-			rfatt_delta = -rfatt_delta;
-
-			/* Baseband attenuation delta. */
-			bbatt_delta = pwr_adjust / 2;
-			/* Lower attenuation => Bigger power output. Negate it. */
-			bbatt_delta = -bbatt_delta;
-			/* RF att affects power level 4 times as much as
-			 * Baseband attennuation. Subtract it. */
-			bbatt_delta -= 4 * rfatt_delta;
-
-			/* So do we finally need to adjust something? */
-			if ((rfatt_delta == 0) && (bbatt_delta == 0))
-				return;
-
-			/* Calculate the new attenuation values. */
-			bbatt = phy->bbatt.att;
-			bbatt += bbatt_delta;
-			rfatt = phy->rfatt.att;
-			rfatt += rfatt_delta;
-
-			b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
-			tx_control = phy->tx_control;
-			if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) {
-				if (rfatt <= 1) {
-					if (tx_control == 0) {
-						tx_control =
-						    B43_TXCTL_PA2DB |
-						    B43_TXCTL_TXMIX;
-						rfatt += 2;
-						bbatt += 2;
-					} else if (dev->dev->bus->sprom.
-						   boardflags_lo &
-						   B43_BFL_PACTRL) {
-						bbatt += 4 * (rfatt - 2);
-						rfatt = 2;
-					}
-				} else if (rfatt > 4 && tx_control) {
-					tx_control = 0;
-					if (bbatt < 3) {
-						rfatt -= 3;
-						bbatt += 2;
-					} else {
-						rfatt -= 2;
-						bbatt -= 2;
-					}
-				}
-			}
-			/* Save the control values */
-			phy->tx_control = tx_control;
-			b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
-			phy->rfatt.att = rfatt;
-			phy->bbatt.att = bbatt;
-
-			/* Adjust the hardware */
-			b43_phy_lock(dev);
-			b43_radio_lock(dev);
-			b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt,
-					  phy->tx_control);
-			b43_radio_unlock(dev);
-			b43_phy_unlock(dev);
-			break;
-		}
-	case B43_PHYTYPE_N:
-		b43_nphy_xmitpower(dev);
-		break;
-	default:
-		B43_WARN_ON(1);
-	}
-}
-
-static inline s32 b43_tssi2dbm_ad(s32 num, s32 den)
-{
-	if (num < 0)
-		return num / den;
-	else
-		return (num + den / 2) / den;
-}
-
-static inline
-    s8 b43_tssi2dbm_entry(s8 entry[], u8 index, s16 pab0, s16 pab1, s16 pab2)
-{
-	s32 m1, m2, f = 256, q, delta;
-	s8 i = 0;
-
-	m1 = b43_tssi2dbm_ad(16 * pab0 + index * pab1, 32);
-	m2 = max(b43_tssi2dbm_ad(32768 + index * pab2, 256), 1);
-	do {
-		if (i > 15)
-			return -EINVAL;
-		q = b43_tssi2dbm_ad(f * 4096 -
-				    b43_tssi2dbm_ad(m2 * f, 16) * f, 2048);
-		delta = abs(q - f);
-		f = q;
-		i++;
-	} while (delta >= 2);
-	entry[index] = clamp_val(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128);
-	return 0;
-}
-
-/* http://bcm-specs.sipsolutions.net/TSSI_to_DBM_Table */
-int b43_phy_init_tssi2dbm_table(struct b43_wldev *dev)
-{
-	struct b43_phy *phy = &dev->phy;
-	s16 pab0, pab1, pab2;
-	u8 idx;
-	s8 *dyn_tssi2dbm;
-
-	if (phy->type == B43_PHYTYPE_A) {
-		pab0 = (s16) (dev->dev->bus->sprom.pa1b0);
-		pab1 = (s16) (dev->dev->bus->sprom.pa1b1);
-		pab2 = (s16) (dev->dev->bus->sprom.pa1b2);
-	} else {
-		pab0 = (s16) (dev->dev->bus->sprom.pa0b0);
-		pab1 = (s16) (dev->dev->bus->sprom.pa0b1);
-		pab2 = (s16) (dev->dev->bus->sprom.pa0b2);
-	}
-
-	if ((dev->dev->bus->chip_id == 0x4301) && (phy->radio_ver != 0x2050)) {
-		phy->tgt_idle_tssi = 0x34;
-		phy->tssi2dbm = b43_tssi2dbm_b_table;
-		return 0;
-	}
-
-	if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
-	    pab0 != -1 && pab1 != -1 && pab2 != -1) {
-		/* The pabX values are set in SPROM. Use them. */
-		if (phy->type == B43_PHYTYPE_A) {
-			if ((s8) dev->dev->bus->sprom.itssi_a != 0 &&
-			    (s8) dev->dev->bus->sprom.itssi_a != -1)
-				phy->tgt_idle_tssi =
-				    (s8) (dev->dev->bus->sprom.itssi_a);
-			else
-				phy->tgt_idle_tssi = 62;
-		} else {
-			if ((s8) dev->dev->bus->sprom.itssi_bg != 0 &&
-			    (s8) dev->dev->bus->sprom.itssi_bg != -1)
-				phy->tgt_idle_tssi =
-				    (s8) (dev->dev->bus->sprom.itssi_bg);
-			else
-				phy->tgt_idle_tssi = 62;
-		}
-		dyn_tssi2dbm = kmalloc(64, GFP_KERNEL);
-		if (dyn_tssi2dbm == NULL) {
-			b43err(dev->wl, "Could not allocate memory "
-			       "for tssi2dbm table\n");
-			return -ENOMEM;
-		}
-		for (idx = 0; idx < 64; idx++)
-			if (b43_tssi2dbm_entry
-			    (dyn_tssi2dbm, idx, pab0, pab1, pab2)) {
-				phy->tssi2dbm = NULL;
-				b43err(dev->wl, "Could not generate "
-				       "tssi2dBm table\n");
-				kfree(dyn_tssi2dbm);
-				return -ENODEV;
-			}
-		phy->tssi2dbm = dyn_tssi2dbm;
-		phy->dyn_tssi_tbl = 1;
-	} else {
-		/* pabX values not set in SPROM. */
-		switch (phy->type) {
-		case B43_PHYTYPE_A:
-			/* APHY needs a generated table. */
-			phy->tssi2dbm = NULL;
-			b43err(dev->wl, "Could not generate tssi2dBm "
-			       "table (wrong SPROM info)!\n");
-			return -ENODEV;
-		case B43_PHYTYPE_B:
-			phy->tgt_idle_tssi = 0x34;
-			phy->tssi2dbm = b43_tssi2dbm_b_table;
-			break;
-		case B43_PHYTYPE_G:
-			phy->tgt_idle_tssi = 0x34;
-			phy->tssi2dbm = b43_tssi2dbm_g_table;
-			break;
-		}
-	}
-
-	return 0;
-}
-
-int b43_phy_init(struct b43_wldev *dev)
-{
-	struct b43_phy *phy = &dev->phy;
-	bool unsupported = 0;
-	int err = 0;
-
-	switch (phy->type) {
-	case B43_PHYTYPE_A:
-		if (phy->rev == 2 || phy->rev == 3)
-			b43_phy_inita(dev);
-		else
-			unsupported = 1;
-		break;
-	case B43_PHYTYPE_G:
-		b43_phy_initg(dev);
-		break;
-	case B43_PHYTYPE_N:
-		err = b43_phy_initn(dev);
-		break;
-	default:
-		unsupported = 1;
-	}
-	if (unsupported)
-		b43err(dev->wl, "Unknown PHYTYPE found\n");
-
-	return err;
-}
-
-void b43_set_rx_antenna(struct b43_wldev *dev, int antenna)
-{
-	struct b43_phy *phy = &dev->phy;
-	u64 hf;
-	u16 tmp;
-	int autodiv = 0;
-
-	if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1)
-		autodiv = 1;
-
-	hf = b43_hf_read(dev);
-	hf &= ~B43_HF_ANTDIVHELP;
-	b43_hf_write(dev, hf);
-
-	switch (phy->type) {
-	case B43_PHYTYPE_A:
-	case B43_PHYTYPE_G:
-		tmp = b43_phy_read(dev, B43_PHY_BBANDCFG);
-		tmp &= ~B43_PHY_BBANDCFG_RXANT;
-		tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna)
-		    << B43_PHY_BBANDCFG_RXANT_SHIFT;
-		b43_phy_write(dev, B43_PHY_BBANDCFG, tmp);
-
-		if (autodiv) {
-			tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
-			if (antenna == B43_ANTENNA_AUTO0)
-				tmp &= ~B43_PHY_ANTDWELL_AUTODIV1;
-			else
-				tmp |= B43_PHY_ANTDWELL_AUTODIV1;
-			b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
-		}
-		if (phy->type == B43_PHYTYPE_G) {
-			tmp = b43_phy_read(dev, B43_PHY_ANTWRSETT);
-			if (autodiv)
-				tmp |= B43_PHY_ANTWRSETT_ARXDIV;
-			else
-				tmp &= ~B43_PHY_ANTWRSETT_ARXDIV;
-			b43_phy_write(dev, B43_PHY_ANTWRSETT, tmp);
-			if (phy->rev >= 2) {
-				tmp = b43_phy_read(dev, B43_PHY_OFDM61);
-				tmp |= B43_PHY_OFDM61_10;
-				b43_phy_write(dev, B43_PHY_OFDM61, tmp);
-
-				tmp =
-				    b43_phy_read(dev, B43_PHY_DIVSRCHGAINBACK);
-				tmp = (tmp & 0xFF00) | 0x15;
-				b43_phy_write(dev, B43_PHY_DIVSRCHGAINBACK,
-					      tmp);
-
-				if (phy->rev == 2) {
-					b43_phy_write(dev, B43_PHY_ADIVRELATED,
-						      8);
-				} else {
-					tmp =
-					    b43_phy_read(dev,
-							 B43_PHY_ADIVRELATED);
-					tmp = (tmp & 0xFF00) | 8;
-					b43_phy_write(dev, B43_PHY_ADIVRELATED,
-						      tmp);
-				}
-			}
-			if (phy->rev >= 6)
-				b43_phy_write(dev, B43_PHY_OFDM9B, 0xDC);
-		} else {
-			if (phy->rev < 3) {
-				tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
-				tmp = (tmp & 0xFF00) | 0x24;
-				b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
-			} else {
-				tmp = b43_phy_read(dev, B43_PHY_OFDM61);
-				tmp |= 0x10;
-				b43_phy_write(dev, B43_PHY_OFDM61, tmp);
-				if (phy->analog == 3) {
-					b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT,
-						      0x1D);
-					b43_phy_write(dev, B43_PHY_ADIVRELATED,
-						      8);
-				} else {
-					b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT,
-						      0x3A);
-					tmp =
-					    b43_phy_read(dev,
-							 B43_PHY_ADIVRELATED);
-					tmp = (tmp & 0xFF00) | 8;
-					b43_phy_write(dev, B43_PHY_ADIVRELATED,
-						      tmp);
-				}
-			}
-		}
-		break;
-	case B43_PHYTYPE_B:
-		tmp = b43_phy_read(dev, B43_PHY_CCKBBANDCFG);
-		tmp &= ~B43_PHY_BBANDCFG_RXANT;
-		tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna)
-		    << B43_PHY_BBANDCFG_RXANT_SHIFT;
-		b43_phy_write(dev, B43_PHY_CCKBBANDCFG, tmp);
-		break;
-	case B43_PHYTYPE_N:
-		b43_nphy_set_rxantenna(dev, antenna);
-		break;
-	default:
-		B43_WARN_ON(1);
-	}
-
-	hf |= B43_HF_ANTDIVHELP;
-	b43_hf_write(dev, hf);
-}
-
-/* Get the freq, as it has to be written to the device. */
-static inline u16 channel2freq_bg(u8 channel)
-{
-	B43_WARN_ON(!(channel >= 1 && channel <= 14));
-
-	return b43_radio_channel_codes_bg[channel - 1];
-}
-
-/* Get the freq, as it has to be written to the device. */
-static inline u16 channel2freq_a(u8 channel)
-{
-	B43_WARN_ON(channel > 200);
-
-	return (5000 + 5 * channel);
-}
-
-void b43_radio_lock(struct b43_wldev *dev)
-{
-	u32 macctl;
-
-	macctl = b43_read32(dev, B43_MMIO_MACCTL);
-	B43_WARN_ON(macctl & B43_MACCTL_RADIOLOCK);
-	macctl |= B43_MACCTL_RADIOLOCK;
-	b43_write32(dev, B43_MMIO_MACCTL, macctl);
-	/* Commit the write and wait for the device
-	 * to exit any radio register access. */
-	b43_read32(dev, B43_MMIO_MACCTL);
-	udelay(10);
-}
-
-void b43_radio_unlock(struct b43_wldev *dev)
-{
-	u32 macctl;
-
-	/* Commit any write */
-	b43_read16(dev, B43_MMIO_PHY_VER);
-	/* unlock */
-	macctl = b43_read32(dev, B43_MMIO_MACCTL);
-	B43_WARN_ON(!(macctl & B43_MACCTL_RADIOLOCK));
-	macctl &= ~B43_MACCTL_RADIOLOCK;
-	b43_write32(dev, B43_MMIO_MACCTL, macctl);
-}
-
-u16 b43_radio_read16(struct b43_wldev *dev, u16 offset)
-{
-	struct b43_phy *phy = &dev->phy;
-
-	/* Offset 1 is a 32-bit register. */
-	B43_WARN_ON(offset == 1);
-
-	switch (phy->type) {
-	case B43_PHYTYPE_A:
-		offset |= 0x40;
-		break;
-	case B43_PHYTYPE_B:
-		if (phy->radio_ver == 0x2053) {
-			if (offset < 0x70)
-				offset += 0x80;
-			else if (offset < 0x80)
-				offset += 0x70;
-		} else if (phy->radio_ver == 0x2050) {
-			offset |= 0x80;
-		} else
-			B43_WARN_ON(1);
-		break;
-	case B43_PHYTYPE_G:
-		offset |= 0x80;
-		break;
-	case B43_PHYTYPE_N:
-		offset |= 0x100;
-		break;
-	case B43_PHYTYPE_LP:
-		/* No adjustment required. */
-		break;
-	default:
-		B43_WARN_ON(1);
-	}
-
-	b43_write16(dev, B43_MMIO_RADIO_CONTROL, offset);
-	return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
-}
-
-void b43_radio_write16(struct b43_wldev *dev, u16 offset, u16 val)
-{
-	/* Offset 1 is a 32-bit register. */
-	B43_WARN_ON(offset == 1);
-
-	b43_write16(dev, B43_MMIO_RADIO_CONTROL, offset);
-	b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, val);
-}
-
-void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask)
-{
-	b43_radio_write16(dev, offset,
-			  b43_radio_read16(dev, offset) & mask);
-}
-
-void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set)
-{
-	b43_radio_write16(dev, offset,
-			  b43_radio_read16(dev, offset) | set);
-}
-
-void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
-{
-	b43_radio_write16(dev, offset,
-			  (b43_radio_read16(dev, offset) & mask) | set);
-}
-
 static void b43_set_all_gains(struct b43_wldev *dev,
 			      s16 first, s16 second, s16 third)
 {
@@ -2134,108 +382,10 @@
 	b43_dummy_transmission(dev);
 }
 
-/* Synthetic PU workaround */
-static void b43_synth_pu_workaround(struct b43_wldev *dev, u8 channel)
-{
-	struct b43_phy *phy = &dev->phy;
-
-	might_sleep();
-
-	if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) {
-		/* We do not need the workaround. */
-		return;
-	}
-
-	if (channel <= 10) {
-		b43_write16(dev, B43_MMIO_CHANNEL,
-			    channel2freq_bg(channel + 4));
-	} else {
-		b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(1));
-	}
-	msleep(1);
-	b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
-}
-
-u8 b43_radio_aci_detect(struct b43_wldev *dev, u8 channel)
-{
-	struct b43_phy *phy = &dev->phy;
-	u8 ret = 0;
-	u16 saved, rssi, temp;
-	int i, j = 0;
-
-	saved = b43_phy_read(dev, 0x0403);
-	b43_radio_selectchannel(dev, channel, 0);
-	b43_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5);
-	if (phy->aci_hw_rssi)
-		rssi = b43_phy_read(dev, 0x048A) & 0x3F;
-	else
-		rssi = saved & 0x3F;
-	/* clamp temp to signed 5bit */
-	if (rssi > 32)
-		rssi -= 64;
-	for (i = 0; i < 100; i++) {
-		temp = (b43_phy_read(dev, 0x047F) >> 8) & 0x3F;
-		if (temp > 32)
-			temp -= 64;
-		if (temp < rssi)
-			j++;
-		if (j >= 20)
-			ret = 1;
-	}
-	b43_phy_write(dev, 0x0403, saved);
-
-	return ret;
-}
-
-u8 b43_radio_aci_scan(struct b43_wldev * dev)
-{
-	struct b43_phy *phy = &dev->phy;
-	u8 ret[13];
-	unsigned int channel = phy->channel;
-	unsigned int i, j, start, end;
-
-	if (!((phy->type == B43_PHYTYPE_G) && (phy->rev > 0)))
-		return 0;
-
-	b43_phy_lock(dev);
-	b43_radio_lock(dev);
-	b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC);
-	b43_phy_write(dev, B43_PHY_G_CRS,
-		      b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF);
-	b43_set_all_gains(dev, 3, 8, 1);
-
-	start = (channel - 5 > 0) ? channel - 5 : 1;
-	end = (channel + 5 < 14) ? channel + 5 : 13;
-
-	for (i = start; i <= end; i++) {
-		if (abs(channel - i) > 2)
-			ret[i - 1] = b43_radio_aci_detect(dev, i);
-	}
-	b43_radio_selectchannel(dev, channel, 0);
-	b43_phy_write(dev, 0x0802,
-		      (b43_phy_read(dev, 0x0802) & 0xFFFC) | 0x0003);
-	b43_phy_write(dev, 0x0403, b43_phy_read(dev, 0x0403) & 0xFFF8);
-	b43_phy_write(dev, B43_PHY_G_CRS,
-		      b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000);
-	b43_set_original_gains(dev);
-	for (i = 0; i < 13; i++) {
-		if (!ret[i])
-			continue;
-		end = (i + 5 < 13) ? i + 5 : 13;
-		for (j = i; j < end; j++)
-			ret[j] = 1;
-	}
-	b43_radio_unlock(dev);
-	b43_phy_unlock(dev);
-
-	return ret[channel - 1];
-}
-
 /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
 void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val)
 {
 	b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset);
-	mmiowb();
 	b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val);
 }
 
@@ -2267,17 +417,17 @@
 /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
 void b43_nrssi_mem_update(struct b43_wldev *dev)
 {
-	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = dev->phy.g;
 	s16 i, delta;
 	s32 tmp;
 
-	delta = 0x1F - phy->nrssi[0];
+	delta = 0x1F - gphy->nrssi[0];
 	for (i = 0; i < 64; i++) {
-		tmp = (i - delta) * phy->nrssislope;
+		tmp = (i - delta) * gphy->nrssislope;
 		tmp /= 0x10000;
 		tmp += 0x3A;
 		tmp = clamp_val(tmp, 0, 0x3F);
-		phy->nrssi_lt[i] = tmp;
+		gphy->nrssi_lt[i] = tmp;
 	}
 }
 
@@ -2442,347 +592,230 @@
 void b43_calc_nrssi_slope(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
 	u16 backup[18] = { 0 };
 	u16 tmp;
 	s16 nrssi0, nrssi1;
 
-	switch (phy->type) {
-	case B43_PHYTYPE_B:
-		backup[0] = b43_radio_read16(dev, 0x007A);
-		backup[1] = b43_radio_read16(dev, 0x0052);
-		backup[2] = b43_radio_read16(dev, 0x0043);
-		backup[3] = b43_phy_read(dev, 0x0030);
-		backup[4] = b43_phy_read(dev, 0x0026);
-		backup[5] = b43_phy_read(dev, 0x0015);
-		backup[6] = b43_phy_read(dev, 0x002A);
-		backup[7] = b43_phy_read(dev, 0x0020);
-		backup[8] = b43_phy_read(dev, 0x005A);
-		backup[9] = b43_phy_read(dev, 0x0059);
-		backup[10] = b43_phy_read(dev, 0x0058);
-		backup[11] = b43_read16(dev, 0x03E2);
-		backup[12] = b43_read16(dev, 0x03E6);
-		backup[13] = b43_read16(dev, B43_MMIO_CHANNEL_EXT);
+	B43_WARN_ON(phy->type != B43_PHYTYPE_G);
 
-		tmp = b43_radio_read16(dev, 0x007A);
-		tmp &= (phy->rev >= 5) ? 0x007F : 0x000F;
-		b43_radio_write16(dev, 0x007A, tmp);
-		b43_phy_write(dev, 0x0030, 0x00FF);
-		b43_write16(dev, 0x03EC, 0x7F7F);
-		b43_phy_write(dev, 0x0026, 0x0000);
-		b43_phy_write(dev, 0x0015, b43_phy_read(dev, 0x0015) | 0x0020);
-		b43_phy_write(dev, 0x002A, 0x08A3);
-		b43_radio_write16(dev, 0x007A,
-				  b43_radio_read16(dev, 0x007A) | 0x0080);
+	if (phy->radio_rev >= 9)
+		return;
+	if (phy->radio_rev == 8)
+		b43_calc_nrssi_offset(dev);
 
-		nrssi0 = (s16) b43_phy_read(dev, 0x0027);
-		b43_radio_write16(dev, 0x007A,
-				  b43_radio_read16(dev, 0x007A) & 0x007F);
-		if (phy->rev >= 2) {
-			b43_write16(dev, 0x03E6, 0x0040);
-		} else if (phy->rev == 0) {
-			b43_write16(dev, 0x03E6, 0x0122);
-		} else {
-			b43_write16(dev, B43_MMIO_CHANNEL_EXT,
-				    b43_read16(dev,
-					       B43_MMIO_CHANNEL_EXT) & 0x2000);
-		}
-		b43_phy_write(dev, 0x0020, 0x3F3F);
-		b43_phy_write(dev, 0x0015, 0xF330);
-		b43_radio_write16(dev, 0x005A, 0x0060);
-		b43_radio_write16(dev, 0x0043,
-				  b43_radio_read16(dev, 0x0043) & 0x00F0);
-		b43_phy_write(dev, 0x005A, 0x0480);
-		b43_phy_write(dev, 0x0059, 0x0810);
-		b43_phy_write(dev, 0x0058, 0x000D);
-		udelay(20);
-
-		nrssi1 = (s16) b43_phy_read(dev, 0x0027);
-		b43_phy_write(dev, 0x0030, backup[3]);
-		b43_radio_write16(dev, 0x007A, backup[0]);
-		b43_write16(dev, 0x03E2, backup[11]);
-		b43_phy_write(dev, 0x0026, backup[4]);
-		b43_phy_write(dev, 0x0015, backup[5]);
-		b43_phy_write(dev, 0x002A, backup[6]);
-		b43_synth_pu_workaround(dev, phy->channel);
-		if (phy->rev != 0)
-			b43_write16(dev, 0x03F4, backup[13]);
-
-		b43_phy_write(dev, 0x0020, backup[7]);
-		b43_phy_write(dev, 0x005A, backup[8]);
-		b43_phy_write(dev, 0x0059, backup[9]);
-		b43_phy_write(dev, 0x0058, backup[10]);
-		b43_radio_write16(dev, 0x0052, backup[1]);
-		b43_radio_write16(dev, 0x0043, backup[2]);
-
-		if (nrssi0 == nrssi1)
-			phy->nrssislope = 0x00010000;
-		else
-			phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1);
-
-		if (nrssi0 <= -4) {
-			phy->nrssi[0] = nrssi0;
-			phy->nrssi[1] = nrssi1;
-		}
-		break;
-	case B43_PHYTYPE_G:
-		if (phy->radio_rev >= 9)
-			return;
-		if (phy->radio_rev == 8)
-			b43_calc_nrssi_offset(dev);
-
-		b43_phy_write(dev, B43_PHY_G_CRS,
-			      b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF);
-		b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC);
-		backup[7] = b43_read16(dev, 0x03E2);
-		b43_write16(dev, 0x03E2, b43_read16(dev, 0x03E2) | 0x8000);
-		backup[0] = b43_radio_read16(dev, 0x007A);
-		backup[1] = b43_radio_read16(dev, 0x0052);
-		backup[2] = b43_radio_read16(dev, 0x0043);
-		backup[3] = b43_phy_read(dev, 0x0015);
-		backup[4] = b43_phy_read(dev, 0x005A);
-		backup[5] = b43_phy_read(dev, 0x0059);
-		backup[6] = b43_phy_read(dev, 0x0058);
-		backup[8] = b43_read16(dev, 0x03E6);
-		backup[9] = b43_read16(dev, B43_MMIO_CHANNEL_EXT);
-		if (phy->rev >= 3) {
-			backup[10] = b43_phy_read(dev, 0x002E);
-			backup[11] = b43_phy_read(dev, 0x002F);
-			backup[12] = b43_phy_read(dev, 0x080F);
-			backup[13] = b43_phy_read(dev, B43_PHY_G_LO_CONTROL);
-			backup[14] = b43_phy_read(dev, 0x0801);
-			backup[15] = b43_phy_read(dev, 0x0060);
-			backup[16] = b43_phy_read(dev, 0x0014);
-			backup[17] = b43_phy_read(dev, 0x0478);
-			b43_phy_write(dev, 0x002E, 0);
-			b43_phy_write(dev, B43_PHY_G_LO_CONTROL, 0);
-			switch (phy->rev) {
-			case 4:
-			case 6:
-			case 7:
-				b43_phy_write(dev, 0x0478,
-					      b43_phy_read(dev, 0x0478)
-					      | 0x0100);
-				b43_phy_write(dev, 0x0801,
-					      b43_phy_read(dev, 0x0801)
-					      | 0x0040);
-				break;
-			case 3:
-			case 5:
-				b43_phy_write(dev, 0x0801,
-					      b43_phy_read(dev, 0x0801)
-					      & 0xFFBF);
-				break;
-			}
-			b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060)
+	b43_phy_write(dev, B43_PHY_G_CRS,
+		      b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF);
+	b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC);
+	backup[7] = b43_read16(dev, 0x03E2);
+	b43_write16(dev, 0x03E2, b43_read16(dev, 0x03E2) | 0x8000);
+	backup[0] = b43_radio_read16(dev, 0x007A);
+	backup[1] = b43_radio_read16(dev, 0x0052);
+	backup[2] = b43_radio_read16(dev, 0x0043);
+	backup[3] = b43_phy_read(dev, 0x0015);
+	backup[4] = b43_phy_read(dev, 0x005A);
+	backup[5] = b43_phy_read(dev, 0x0059);
+	backup[6] = b43_phy_read(dev, 0x0058);
+	backup[8] = b43_read16(dev, 0x03E6);
+	backup[9] = b43_read16(dev, B43_MMIO_CHANNEL_EXT);
+	if (phy->rev >= 3) {
+		backup[10] = b43_phy_read(dev, 0x002E);
+		backup[11] = b43_phy_read(dev, 0x002F);
+		backup[12] = b43_phy_read(dev, 0x080F);
+		backup[13] = b43_phy_read(dev, B43_PHY_G_LO_CONTROL);
+		backup[14] = b43_phy_read(dev, 0x0801);
+		backup[15] = b43_phy_read(dev, 0x0060);
+		backup[16] = b43_phy_read(dev, 0x0014);
+		backup[17] = b43_phy_read(dev, 0x0478);
+		b43_phy_write(dev, 0x002E, 0);
+		b43_phy_write(dev, B43_PHY_G_LO_CONTROL, 0);
+		switch (phy->rev) {
+		case 4:
+		case 6:
+		case 7:
+			b43_phy_write(dev, 0x0478,
+				      b43_phy_read(dev, 0x0478)
+				      | 0x0100);
+			b43_phy_write(dev, 0x0801,
+				      b43_phy_read(dev, 0x0801)
 				      | 0x0040);
-			b43_phy_write(dev, 0x0014, b43_phy_read(dev, 0x0014)
-				      | 0x0200);
+			break;
+		case 3:
+		case 5:
+			b43_phy_write(dev, 0x0801,
+				      b43_phy_read(dev, 0x0801)
+				      & 0xFFBF);
+			break;
 		}
-		b43_radio_write16(dev, 0x007A,
-				  b43_radio_read16(dev, 0x007A) | 0x0070);
-		b43_set_all_gains(dev, 0, 8, 0);
-		b43_radio_write16(dev, 0x007A,
-				  b43_radio_read16(dev, 0x007A) & 0x00F7);
-		if (phy->rev >= 2) {
-			b43_phy_write(dev, 0x0811,
-				      (b43_phy_read(dev, 0x0811) & 0xFFCF) |
-				      0x0030);
-			b43_phy_write(dev, 0x0812,
-				      (b43_phy_read(dev, 0x0812) & 0xFFCF) |
-				      0x0010);
-		}
-		b43_radio_write16(dev, 0x007A,
-				  b43_radio_read16(dev, 0x007A) | 0x0080);
-		udelay(20);
-
-		nrssi0 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F);
-		if (nrssi0 >= 0x0020)
-			nrssi0 -= 0x0040;
-
-		b43_radio_write16(dev, 0x007A,
-				  b43_radio_read16(dev, 0x007A) & 0x007F);
-		if (phy->rev >= 2) {
-			b43_phy_write(dev, 0x0003, (b43_phy_read(dev, 0x0003)
-						    & 0xFF9F) | 0x0040);
-		}
-
-		b43_write16(dev, B43_MMIO_CHANNEL_EXT,
-			    b43_read16(dev, B43_MMIO_CHANNEL_EXT)
-			    | 0x2000);
-		b43_radio_write16(dev, 0x007A,
-				  b43_radio_read16(dev, 0x007A) | 0x000F);
-		b43_phy_write(dev, 0x0015, 0xF330);
-		if (phy->rev >= 2) {
-			b43_phy_write(dev, 0x0812,
-				      (b43_phy_read(dev, 0x0812) & 0xFFCF) |
-				      0x0020);
-			b43_phy_write(dev, 0x0811,
-				      (b43_phy_read(dev, 0x0811) & 0xFFCF) |
-				      0x0020);
-		}
-
-		b43_set_all_gains(dev, 3, 0, 1);
-		if (phy->radio_rev == 8) {
-			b43_radio_write16(dev, 0x0043, 0x001F);
-		} else {
-			tmp = b43_radio_read16(dev, 0x0052) & 0xFF0F;
-			b43_radio_write16(dev, 0x0052, tmp | 0x0060);
-			tmp = b43_radio_read16(dev, 0x0043) & 0xFFF0;
-			b43_radio_write16(dev, 0x0043, tmp | 0x0009);
-		}
-		b43_phy_write(dev, 0x005A, 0x0480);
-		b43_phy_write(dev, 0x0059, 0x0810);
-		b43_phy_write(dev, 0x0058, 0x000D);
-		udelay(20);
-		nrssi1 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F);
-		if (nrssi1 >= 0x0020)
-			nrssi1 -= 0x0040;
-		if (nrssi0 == nrssi1)
-			phy->nrssislope = 0x00010000;
-		else
-			phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1);
-		if (nrssi0 >= -4) {
-			phy->nrssi[0] = nrssi1;
-			phy->nrssi[1] = nrssi0;
-		}
-		if (phy->rev >= 3) {
-			b43_phy_write(dev, 0x002E, backup[10]);
-			b43_phy_write(dev, 0x002F, backup[11]);
-			b43_phy_write(dev, 0x080F, backup[12]);
-			b43_phy_write(dev, B43_PHY_G_LO_CONTROL, backup[13]);
-		}
-		if (phy->rev >= 2) {
-			b43_phy_write(dev, 0x0812,
-				      b43_phy_read(dev, 0x0812) & 0xFFCF);
-			b43_phy_write(dev, 0x0811,
-				      b43_phy_read(dev, 0x0811) & 0xFFCF);
-		}
-
-		b43_radio_write16(dev, 0x007A, backup[0]);
-		b43_radio_write16(dev, 0x0052, backup[1]);
-		b43_radio_write16(dev, 0x0043, backup[2]);
-		b43_write16(dev, 0x03E2, backup[7]);
-		b43_write16(dev, 0x03E6, backup[8]);
-		b43_write16(dev, B43_MMIO_CHANNEL_EXT, backup[9]);
-		b43_phy_write(dev, 0x0015, backup[3]);
-		b43_phy_write(dev, 0x005A, backup[4]);
-		b43_phy_write(dev, 0x0059, backup[5]);
-		b43_phy_write(dev, 0x0058, backup[6]);
-		b43_synth_pu_workaround(dev, phy->channel);
-		b43_phy_write(dev, 0x0802,
-			      b43_phy_read(dev, 0x0802) | (0x0001 | 0x0002));
-		b43_set_original_gains(dev);
-		b43_phy_write(dev, B43_PHY_G_CRS,
-			      b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000);
-		if (phy->rev >= 3) {
-			b43_phy_write(dev, 0x0801, backup[14]);
-			b43_phy_write(dev, 0x0060, backup[15]);
-			b43_phy_write(dev, 0x0014, backup[16]);
-			b43_phy_write(dev, 0x0478, backup[17]);
-		}
-		b43_nrssi_mem_update(dev);
-		b43_calc_nrssi_threshold(dev);
-		break;
-	default:
-		B43_WARN_ON(1);
+		b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060)
+			      | 0x0040);
+		b43_phy_write(dev, 0x0014, b43_phy_read(dev, 0x0014)
+			      | 0x0200);
 	}
+	b43_radio_write16(dev, 0x007A,
+			  b43_radio_read16(dev, 0x007A) | 0x0070);
+	b43_set_all_gains(dev, 0, 8, 0);
+	b43_radio_write16(dev, 0x007A,
+			  b43_radio_read16(dev, 0x007A) & 0x00F7);
+	if (phy->rev >= 2) {
+		b43_phy_write(dev, 0x0811,
+			      (b43_phy_read(dev, 0x0811) & 0xFFCF) |
+			      0x0030);
+		b43_phy_write(dev, 0x0812,
+			      (b43_phy_read(dev, 0x0812) & 0xFFCF) |
+			      0x0010);
+	}
+	b43_radio_write16(dev, 0x007A,
+			  b43_radio_read16(dev, 0x007A) | 0x0080);
+	udelay(20);
+
+	nrssi0 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F);
+	if (nrssi0 >= 0x0020)
+		nrssi0 -= 0x0040;
+
+	b43_radio_write16(dev, 0x007A,
+			  b43_radio_read16(dev, 0x007A) & 0x007F);
+	if (phy->rev >= 2) {
+		b43_phy_write(dev, 0x0003, (b43_phy_read(dev, 0x0003)
+					    & 0xFF9F) | 0x0040);
+	}
+
+	b43_write16(dev, B43_MMIO_CHANNEL_EXT,
+		    b43_read16(dev, B43_MMIO_CHANNEL_EXT)
+		    | 0x2000);
+	b43_radio_write16(dev, 0x007A,
+			  b43_radio_read16(dev, 0x007A) | 0x000F);
+	b43_phy_write(dev, 0x0015, 0xF330);
+	if (phy->rev >= 2) {
+		b43_phy_write(dev, 0x0812,
+			      (b43_phy_read(dev, 0x0812) & 0xFFCF) |
+			      0x0020);
+		b43_phy_write(dev, 0x0811,
+			      (b43_phy_read(dev, 0x0811) & 0xFFCF) |
+			      0x0020);
+	}
+
+	b43_set_all_gains(dev, 3, 0, 1);
+	if (phy->radio_rev == 8) {
+		b43_radio_write16(dev, 0x0043, 0x001F);
+	} else {
+		tmp = b43_radio_read16(dev, 0x0052) & 0xFF0F;
+		b43_radio_write16(dev, 0x0052, tmp | 0x0060);
+		tmp = b43_radio_read16(dev, 0x0043) & 0xFFF0;
+		b43_radio_write16(dev, 0x0043, tmp | 0x0009);
+	}
+	b43_phy_write(dev, 0x005A, 0x0480);
+	b43_phy_write(dev, 0x0059, 0x0810);
+	b43_phy_write(dev, 0x0058, 0x000D);
+	udelay(20);
+	nrssi1 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F);
+	if (nrssi1 >= 0x0020)
+		nrssi1 -= 0x0040;
+	if (nrssi0 == nrssi1)
+		gphy->nrssislope = 0x00010000;
+	else
+		gphy->nrssislope = 0x00400000 / (nrssi0 - nrssi1);
+	if (nrssi0 >= -4) {
+		gphy->nrssi[0] = nrssi1;
+		gphy->nrssi[1] = nrssi0;
+	}
+	if (phy->rev >= 3) {
+		b43_phy_write(dev, 0x002E, backup[10]);
+		b43_phy_write(dev, 0x002F, backup[11]);
+		b43_phy_write(dev, 0x080F, backup[12]);
+		b43_phy_write(dev, B43_PHY_G_LO_CONTROL, backup[13]);
+	}
+	if (phy->rev >= 2) {
+		b43_phy_write(dev, 0x0812,
+			      b43_phy_read(dev, 0x0812) & 0xFFCF);
+		b43_phy_write(dev, 0x0811,
+			      b43_phy_read(dev, 0x0811) & 0xFFCF);
+	}
+
+	b43_radio_write16(dev, 0x007A, backup[0]);
+	b43_radio_write16(dev, 0x0052, backup[1]);
+	b43_radio_write16(dev, 0x0043, backup[2]);
+	b43_write16(dev, 0x03E2, backup[7]);
+	b43_write16(dev, 0x03E6, backup[8]);
+	b43_write16(dev, B43_MMIO_CHANNEL_EXT, backup[9]);
+	b43_phy_write(dev, 0x0015, backup[3]);
+	b43_phy_write(dev, 0x005A, backup[4]);
+	b43_phy_write(dev, 0x0059, backup[5]);
+	b43_phy_write(dev, 0x0058, backup[6]);
+	b43_synth_pu_workaround(dev, phy->channel);
+	b43_phy_write(dev, 0x0802,
+		      b43_phy_read(dev, 0x0802) | (0x0001 | 0x0002));
+	b43_set_original_gains(dev);
+	b43_phy_write(dev, B43_PHY_G_CRS,
+		      b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000);
+	if (phy->rev >= 3) {
+		b43_phy_write(dev, 0x0801, backup[14]);
+		b43_phy_write(dev, 0x0060, backup[15]);
+		b43_phy_write(dev, 0x0014, backup[16]);
+		b43_phy_write(dev, 0x0478, backup[17]);
+	}
+	b43_nrssi_mem_update(dev);
+	b43_calc_nrssi_threshold(dev);
 }
 
-void b43_calc_nrssi_threshold(struct b43_wldev *dev)
+static void b43_calc_nrssi_threshold(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
-	s32 threshold;
+	struct b43_phy_g *gphy = phy->g;
 	s32 a, b;
 	s16 tmp16;
 	u16 tmp_u16;
 
-	switch (phy->type) {
-	case B43_PHYTYPE_B:{
-			if (phy->radio_ver != 0x2050)
-				return;
-			if (!
-			    (dev->dev->bus->sprom.
-			     boardflags_lo & B43_BFL_RSSI))
-				return;
+	B43_WARN_ON(phy->type != B43_PHYTYPE_G);
 
-			if (phy->radio_rev >= 6) {
-				threshold =
-				    (phy->nrssi[1] - phy->nrssi[0]) * 32;
-				threshold += 20 * (phy->nrssi[0] + 1);
-				threshold /= 40;
-			} else
-				threshold = phy->nrssi[1] - 5;
-
-			threshold = clamp_val(threshold, 0, 0x3E);
-			b43_phy_read(dev, 0x0020);	/* dummy read */
-			b43_phy_write(dev, 0x0020,
-				      (((u16) threshold) << 8) | 0x001C);
-
-			if (phy->radio_rev >= 6) {
-				b43_phy_write(dev, 0x0087, 0x0E0D);
-				b43_phy_write(dev, 0x0086, 0x0C0B);
-				b43_phy_write(dev, 0x0085, 0x0A09);
-				b43_phy_write(dev, 0x0084, 0x0808);
-				b43_phy_write(dev, 0x0083, 0x0808);
-				b43_phy_write(dev, 0x0082, 0x0604);
-				b43_phy_write(dev, 0x0081, 0x0302);
-				b43_phy_write(dev, 0x0080, 0x0100);
-			}
-			break;
-		}
-	case B43_PHYTYPE_G:
-		if (!phy->gmode ||
-		    !(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
-			tmp16 = b43_nrssi_hw_read(dev, 0x20);
-			if (tmp16 >= 0x20)
-				tmp16 -= 0x40;
-			if (tmp16 < 3) {
-				b43_phy_write(dev, 0x048A,
-					      (b43_phy_read(dev, 0x048A)
-					       & 0xF000) | 0x09EB);
-			} else {
-				b43_phy_write(dev, 0x048A,
-					      (b43_phy_read(dev, 0x048A)
-					       & 0xF000) | 0x0AED);
-			}
+	if (!phy->gmode ||
+	    !(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
+		tmp16 = b43_nrssi_hw_read(dev, 0x20);
+		if (tmp16 >= 0x20)
+			tmp16 -= 0x40;
+		if (tmp16 < 3) {
+			b43_phy_write(dev, 0x048A,
+				      (b43_phy_read(dev, 0x048A)
+				       & 0xF000) | 0x09EB);
 		} else {
-			if (phy->interfmode == B43_INTERFMODE_NONWLAN) {
-				a = 0xE;
-				b = 0xA;
-			} else if (!phy->aci_wlan_automatic && phy->aci_enable) {
-				a = 0x13;
-				b = 0x12;
-			} else {
-				a = 0xE;
-				b = 0x11;
-			}
-
-			a = a * (phy->nrssi[1] - phy->nrssi[0]);
-			a += (phy->nrssi[0] << 6);
-			if (a < 32)
-				a += 31;
-			else
-				a += 32;
-			a = a >> 6;
-			a = clamp_val(a, -31, 31);
-
-			b = b * (phy->nrssi[1] - phy->nrssi[0]);
-			b += (phy->nrssi[0] << 6);
-			if (b < 32)
-				b += 31;
-			else
-				b += 32;
-			b = b >> 6;
-			b = clamp_val(b, -31, 31);
-
-			tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000;
-			tmp_u16 |= ((u32) b & 0x0000003F);
-			tmp_u16 |= (((u32) a & 0x0000003F) << 6);
-			b43_phy_write(dev, 0x048A, tmp_u16);
+			b43_phy_write(dev, 0x048A,
+				      (b43_phy_read(dev, 0x048A)
+				       & 0xF000) | 0x0AED);
 		}
-		break;
-	default:
-		B43_WARN_ON(1);
+	} else {
+		if (gphy->interfmode == B43_INTERFMODE_NONWLAN) {
+			a = 0xE;
+			b = 0xA;
+		} else if (!gphy->aci_wlan_automatic && gphy->aci_enable) {
+			a = 0x13;
+			b = 0x12;
+		} else {
+			a = 0xE;
+			b = 0x11;
+		}
+
+		a = a * (gphy->nrssi[1] - gphy->nrssi[0]);
+		a += (gphy->nrssi[0] << 6);
+		if (a < 32)
+			a += 31;
+		else
+			a += 32;
+		a = a >> 6;
+		a = clamp_val(a, -31, 31);
+
+		b = b * (gphy->nrssi[1] - gphy->nrssi[0]);
+		b += (gphy->nrssi[0] << 6);
+		if (b < 32)
+			b += 31;
+		else
+			b += 32;
+		b = b >> 6;
+		b = clamp_val(b, -31, 31);
+
+		tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000;
+		tmp_u16 |= ((u32) b & 0x0000003F);
+		tmp_u16 |= (((u32) a & 0x0000003F) << 6);
+		b43_phy_write(dev, 0x048A, tmp_u16);
 	}
 }
 
@@ -2860,9 +893,10 @@
 b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
 {
 	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
 	u16 tmp, flipped;
 	size_t stackidx = 0;
-	u32 *stack = phy->interfstack;
+	u32 *stack = gphy->interfstack;
 
 	switch (mode) {
 	case B43_INTERFMODE_NONWLAN:
@@ -2928,7 +962,7 @@
 		if (b43_phy_read(dev, 0x0033) & 0x0800)
 			break;
 
-		phy->aci_enable = 1;
+		gphy->aci_enable = 1;
 
 		phy_stacksave(B43_PHY_RADIO_BITFIELD);
 		phy_stacksave(B43_PHY_G_CRS);
@@ -3064,7 +1098,8 @@
 b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
 {
 	struct b43_phy *phy = &dev->phy;
-	u32 *stack = phy->interfstack;
+	struct b43_phy_g *gphy = phy->g;
+	u32 *stack = gphy->interfstack;
 
 	switch (mode) {
 	case B43_INTERFMODE_NONWLAN:
@@ -3103,7 +1138,7 @@
 		if (!(b43_phy_read(dev, 0x0033) & 0x0800))
 			break;
 
-		phy->aci_enable = 0;
+		gphy->aci_enable = 0;
 
 		phy_stackrestore(B43_PHY_RADIO_BITFIELD);
 		phy_stackrestore(B43_PHY_G_CRS);
@@ -3153,47 +1188,6 @@
 #undef ofdmtab_stacksave
 #undef ofdmtab_stackrestore
 
-int b43_radio_set_interference_mitigation(struct b43_wldev *dev, int mode)
-{
-	struct b43_phy *phy = &dev->phy;
-	int currentmode;
-
-	if ((phy->type != B43_PHYTYPE_G) || (phy->rev == 0) || (!phy->gmode))
-		return -ENODEV;
-
-	phy->aci_wlan_automatic = 0;
-	switch (mode) {
-	case B43_INTERFMODE_AUTOWLAN:
-		phy->aci_wlan_automatic = 1;
-		if (phy->aci_enable)
-			mode = B43_INTERFMODE_MANUALWLAN;
-		else
-			mode = B43_INTERFMODE_NONE;
-		break;
-	case B43_INTERFMODE_NONE:
-	case B43_INTERFMODE_NONWLAN:
-	case B43_INTERFMODE_MANUALWLAN:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	currentmode = phy->interfmode;
-	if (currentmode == mode)
-		return 0;
-	if (currentmode != B43_INTERFMODE_NONE)
-		b43_radio_interference_mitigation_disable(dev, currentmode);
-
-	if (mode == B43_INTERFMODE_NONE) {
-		phy->aci_enable = 0;
-		phy->aci_hw_rssi = 0;
-	} else
-		b43_radio_interference_mitigation_enable(dev, mode);
-	phy->interfmode = mode;
-
-	return 0;
-}
-
 static u16 b43_radio_core_calibration_value(struct b43_wldev *dev)
 {
 	u16 reg, index, ret;
@@ -3219,13 +1213,14 @@
 				u16 phy_register, unsigned int lpd)
 {
 	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
 	struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
 
 	if (!phy->gmode)
 		return 0;
 
 	if (has_loopback_gain(phy)) {
-		int max_lb_gain = phy->max_lb_gain;
+		int max_lb_gain = gphy->max_lb_gain;
 		u16 extlna;
 		u16 i;
 
@@ -3606,301 +1601,1682 @@
 	return ret;
 }
 
-void b43_radio_init2060(struct b43_wldev *dev)
+static void b43_phy_initb5(struct b43_wldev *dev)
 {
-	int err;
+	struct ssb_bus *bus = dev->dev->bus;
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+	u16 offset, value;
+	u8 old_channel;
 
-	b43_radio_write16(dev, 0x0004, 0x00C0);
-	b43_radio_write16(dev, 0x0005, 0x0008);
-	b43_radio_write16(dev, 0x0009, 0x0040);
-	b43_radio_write16(dev, 0x0005, 0x00AA);
-	b43_radio_write16(dev, 0x0032, 0x008F);
-	b43_radio_write16(dev, 0x0006, 0x008F);
-	b43_radio_write16(dev, 0x0034, 0x008F);
-	b43_radio_write16(dev, 0x002C, 0x0007);
-	b43_radio_write16(dev, 0x0082, 0x0080);
-	b43_radio_write16(dev, 0x0080, 0x0000);
-	b43_radio_write16(dev, 0x003F, 0x00DA);
-	b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008);
-	b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0010);
-	b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020);
-	b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020);
-	msleep(1);		/* delay 400usec */
-
-	b43_radio_write16(dev, 0x0081,
-			  (b43_radio_read16(dev, 0x0081) & ~0x0020) | 0x0010);
-	msleep(1);		/* delay 400usec */
-
-	b43_radio_write16(dev, 0x0005,
-			  (b43_radio_read16(dev, 0x0005) & ~0x0008) | 0x0008);
-	b43_radio_write16(dev, 0x0085, b43_radio_read16(dev, 0x0085) & ~0x0010);
-	b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008);
-	b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0040);
-	b43_radio_write16(dev, 0x0081,
-			  (b43_radio_read16(dev, 0x0081) & ~0x0040) | 0x0040);
-	b43_radio_write16(dev, 0x0005,
-			  (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008);
-	b43_phy_write(dev, 0x0063, 0xDDC6);
-	b43_phy_write(dev, 0x0069, 0x07BE);
-	b43_phy_write(dev, 0x006A, 0x0000);
-
-	err = b43_radio_selectchannel(dev, B43_DEFAULT_CHANNEL_A, 0);
-	B43_WARN_ON(err);
-
-	msleep(1);
-}
-
-static inline u16 freq_r3A_value(u16 frequency)
-{
-	u16 value;
-
-	if (frequency < 5091)
-		value = 0x0040;
-	else if (frequency < 5321)
-		value = 0x0000;
-	else if (frequency < 5806)
-		value = 0x0080;
-	else
-		value = 0x0040;
-
-	return value;
-}
-
-void b43_radio_set_tx_iq(struct b43_wldev *dev)
-{
-	static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 };
-	static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A };
-	u16 tmp = b43_radio_read16(dev, 0x001E);
-	int i, j;
-
-	for (i = 0; i < 5; i++) {
-		for (j = 0; j < 5; j++) {
-			if (tmp == (data_high[i] << 4 | data_low[j])) {
-				b43_phy_write(dev, 0x0069,
-					      (i - j) << 8 | 0x00C0);
-				return;
-			}
+	if (phy->analog == 1) {
+		b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A)
+				  | 0x0050);
+	}
+	if ((bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM) &&
+	    (bus->boardinfo.type != SSB_BOARD_BU4306)) {
+		value = 0x2120;
+		for (offset = 0x00A8; offset < 0x00C7; offset++) {
+			b43_phy_write(dev, offset, value);
+			value += 0x202;
 		}
 	}
+	b43_phy_write(dev, 0x0035, (b43_phy_read(dev, 0x0035) & 0xF0FF)
+		      | 0x0700);
+	if (phy->radio_ver == 0x2050)
+		b43_phy_write(dev, 0x0038, 0x0667);
+
+	if (phy->gmode || phy->rev >= 2) {
+		if (phy->radio_ver == 0x2050) {
+			b43_radio_write16(dev, 0x007A,
+					  b43_radio_read16(dev, 0x007A)
+					  | 0x0020);
+			b43_radio_write16(dev, 0x0051,
+					  b43_radio_read16(dev, 0x0051)
+					  | 0x0004);
+		}
+		b43_write16(dev, B43_MMIO_PHY_RADIO, 0x0000);
+
+		b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100);
+		b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000);
+
+		b43_phy_write(dev, 0x001C, 0x186A);
+
+		b43_phy_write(dev, 0x0013,
+			      (b43_phy_read(dev, 0x0013) & 0x00FF) | 0x1900);
+		b43_phy_write(dev, 0x0035,
+			      (b43_phy_read(dev, 0x0035) & 0xFFC0) | 0x0064);
+		b43_phy_write(dev, 0x005D,
+			      (b43_phy_read(dev, 0x005D) & 0xFF80) | 0x000A);
+	}
+
+	if (dev->bad_frames_preempt) {
+		b43_phy_write(dev, B43_PHY_RADIO_BITFIELD,
+			      b43_phy_read(dev,
+					   B43_PHY_RADIO_BITFIELD) | (1 << 11));
+	}
+
+	if (phy->analog == 1) {
+		b43_phy_write(dev, 0x0026, 0xCE00);
+		b43_phy_write(dev, 0x0021, 0x3763);
+		b43_phy_write(dev, 0x0022, 0x1BC3);
+		b43_phy_write(dev, 0x0023, 0x06F9);
+		b43_phy_write(dev, 0x0024, 0x037E);
+	} else
+		b43_phy_write(dev, 0x0026, 0xCC00);
+	b43_phy_write(dev, 0x0030, 0x00C6);
+	b43_write16(dev, 0x03EC, 0x3F22);
+
+	if (phy->analog == 1)
+		b43_phy_write(dev, 0x0020, 0x3E1C);
+	else
+		b43_phy_write(dev, 0x0020, 0x301C);
+
+	if (phy->analog == 0)
+		b43_write16(dev, 0x03E4, 0x3000);
+
+	old_channel = phy->channel;
+	/* Force to channel 7, even if not supported. */
+	b43_gphy_channel_switch(dev, 7, 0);
+
+	if (phy->radio_ver != 0x2050) {
+		b43_radio_write16(dev, 0x0075, 0x0080);
+		b43_radio_write16(dev, 0x0079, 0x0081);
+	}
+
+	b43_radio_write16(dev, 0x0050, 0x0020);
+	b43_radio_write16(dev, 0x0050, 0x0023);
+
+	if (phy->radio_ver == 0x2050) {
+		b43_radio_write16(dev, 0x0050, 0x0020);
+		b43_radio_write16(dev, 0x005A, 0x0070);
+	}
+
+	b43_radio_write16(dev, 0x005B, 0x007B);
+	b43_radio_write16(dev, 0x005C, 0x00B0);
+
+	b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) | 0x0007);
+
+	b43_gphy_channel_switch(dev, old_channel, 0);
+
+	b43_phy_write(dev, 0x0014, 0x0080);
+	b43_phy_write(dev, 0x0032, 0x00CA);
+	b43_phy_write(dev, 0x002A, 0x88A3);
+
+	b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
+
+	if (phy->radio_ver == 0x2050)
+		b43_radio_write16(dev, 0x005D, 0x000D);
+
+	b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
 }
 
-int b43_radio_selectchannel(struct b43_wldev *dev,
-			    u8 channel, int synthetic_pu_workaround)
+static void b43_phy_initb6(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
-	u16 r8, tmp;
-	u16 freq;
-	u16 channelcookie, savedcookie;
-	int err = 0;
+	struct b43_phy_g *gphy = phy->g;
+	u16 offset, val;
+	u8 old_channel;
 
-	if (channel == 0xFF) {
-		switch (phy->type) {
-		case B43_PHYTYPE_A:
-			channel = B43_DEFAULT_CHANNEL_A;
-			break;
-		case B43_PHYTYPE_B:
-		case B43_PHYTYPE_G:
-			channel = B43_DEFAULT_CHANNEL_BG;
-			break;
-		case B43_PHYTYPE_N:
-			//FIXME check if we are on 2.4GHz or 5GHz and set a default channel.
-			channel = 1;
-			break;
-		default:
-			B43_WARN_ON(1);
-		}
+	b43_phy_write(dev, 0x003E, 0x817A);
+	b43_radio_write16(dev, 0x007A,
+			  (b43_radio_read16(dev, 0x007A) | 0x0058));
+	if (phy->radio_rev == 4 || phy->radio_rev == 5) {
+		b43_radio_write16(dev, 0x51, 0x37);
+		b43_radio_write16(dev, 0x52, 0x70);
+		b43_radio_write16(dev, 0x53, 0xB3);
+		b43_radio_write16(dev, 0x54, 0x9B);
+		b43_radio_write16(dev, 0x5A, 0x88);
+		b43_radio_write16(dev, 0x5B, 0x88);
+		b43_radio_write16(dev, 0x5D, 0x88);
+		b43_radio_write16(dev, 0x5E, 0x88);
+		b43_radio_write16(dev, 0x7D, 0x88);
+		b43_hf_write(dev, b43_hf_read(dev)
+			     | B43_HF_TSSIRPSMW);
 	}
-
-	/* First we set the channel radio code to prevent the
-	 * firmware from sending ghost packets.
-	 */
-	channelcookie = channel;
-	if (0 /*FIXME on 5Ghz */)
-		channelcookie |= 0x100;
-	//FIXME set 40Mhz flag if required
-	savedcookie = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN);
-	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN, channelcookie);
-
-	switch (phy->type) {
-	case B43_PHYTYPE_A:
-		if (channel > 200) {
-			err = -EINVAL;
-			goto out;
-		}
-		freq = channel2freq_a(channel);
-
-		r8 = b43_radio_read16(dev, 0x0008);
-		b43_write16(dev, 0x03F0, freq);
-		b43_radio_write16(dev, 0x0008, r8);
-
-		//TODO: write max channel TX power? to Radio 0x2D
-		tmp = b43_radio_read16(dev, 0x002E);
-		tmp &= 0x0080;
-		//TODO: OR tmp with the Power out estimation for this channel?
-		b43_radio_write16(dev, 0x002E, tmp);
-
-		if (freq >= 4920 && freq <= 5500) {
-			/*
-			 * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F;
-			 *    = (freq * 0.025862069
-			 */
-			r8 = 3 * freq / 116;	/* is equal to r8 = freq * 0.025862 */
-		}
-		b43_radio_write16(dev, 0x0007, (r8 << 4) | r8);
-		b43_radio_write16(dev, 0x0020, (r8 << 4) | r8);
-		b43_radio_write16(dev, 0x0021, (r8 << 4) | r8);
-		b43_radio_write16(dev, 0x0022, (b43_radio_read16(dev, 0x0022)
-						& 0x000F) | (r8 << 4));
-		b43_radio_write16(dev, 0x002A, (r8 << 4));
-		b43_radio_write16(dev, 0x002B, (r8 << 4));
-		b43_radio_write16(dev, 0x0008, (b43_radio_read16(dev, 0x0008)
-						& 0x00F0) | (r8 << 4));
-		b43_radio_write16(dev, 0x0029, (b43_radio_read16(dev, 0x0029)
-						& 0xFF0F) | 0x00B0);
-		b43_radio_write16(dev, 0x0035, 0x00AA);
-		b43_radio_write16(dev, 0x0036, 0x0085);
-		b43_radio_write16(dev, 0x003A, (b43_radio_read16(dev, 0x003A)
-						& 0xFF20) |
-				  freq_r3A_value(freq));
-		b43_radio_write16(dev, 0x003D,
-				  b43_radio_read16(dev, 0x003D) & 0x00FF);
-		b43_radio_write16(dev, 0x0081, (b43_radio_read16(dev, 0x0081)
-						& 0xFF7F) | 0x0080);
-		b43_radio_write16(dev, 0x0035,
-				  b43_radio_read16(dev, 0x0035) & 0xFFEF);
-		b43_radio_write16(dev, 0x0035, (b43_radio_read16(dev, 0x0035)
-						& 0xFFEF) | 0x0010);
-		b43_radio_set_tx_iq(dev);
-		//TODO: TSSI2dbm workaround
-		b43_phy_xmitpower(dev);	//FIXME correct?
-		break;
-	case B43_PHYTYPE_G:
-		if ((channel < 1) || (channel > 14)) {
-			err = -EINVAL;
-			goto out;
-		}
-
-		if (synthetic_pu_workaround)
-			b43_synth_pu_workaround(dev, channel);
-
-		b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
-
-		if (channel == 14) {
-			if (dev->dev->bus->sprom.country_code ==
-			    SSB_SPROM1CCODE_JAPAN)
-				b43_hf_write(dev,
-					     b43_hf_read(dev) & ~B43_HF_ACPR);
-			else
-				b43_hf_write(dev,
-					     b43_hf_read(dev) | B43_HF_ACPR);
-			b43_write16(dev, B43_MMIO_CHANNEL_EXT,
-				    b43_read16(dev, B43_MMIO_CHANNEL_EXT)
-				    | (1 << 11));
+	B43_WARN_ON(phy->radio_rev == 6 || phy->radio_rev == 7);	/* We had code for these revs here... */
+	if (phy->radio_rev == 8) {
+		b43_radio_write16(dev, 0x51, 0);
+		b43_radio_write16(dev, 0x52, 0x40);
+		b43_radio_write16(dev, 0x53, 0xB7);
+		b43_radio_write16(dev, 0x54, 0x98);
+		b43_radio_write16(dev, 0x5A, 0x88);
+		b43_radio_write16(dev, 0x5B, 0x6B);
+		b43_radio_write16(dev, 0x5C, 0x0F);
+		if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) {
+			b43_radio_write16(dev, 0x5D, 0xFA);
+			b43_radio_write16(dev, 0x5E, 0xD8);
 		} else {
-			b43_write16(dev, B43_MMIO_CHANNEL_EXT,
-				    b43_read16(dev, B43_MMIO_CHANNEL_EXT)
-				    & 0xF7BF);
+			b43_radio_write16(dev, 0x5D, 0xF5);
+			b43_radio_write16(dev, 0x5E, 0xB8);
 		}
-		break;
-	case B43_PHYTYPE_N:
-		err = b43_nphy_selectchannel(dev, channel);
-		if (err)
-			goto out;
-		break;
-	default:
-		B43_WARN_ON(1);
+		b43_radio_write16(dev, 0x0073, 0x0003);
+		b43_radio_write16(dev, 0x007D, 0x00A8);
+		b43_radio_write16(dev, 0x007C, 0x0001);
+		b43_radio_write16(dev, 0x007E, 0x0008);
+	}
+	val = 0x1E1F;
+	for (offset = 0x0088; offset < 0x0098; offset++) {
+		b43_phy_write(dev, offset, val);
+		val -= 0x0202;
+	}
+	val = 0x3E3F;
+	for (offset = 0x0098; offset < 0x00A8; offset++) {
+		b43_phy_write(dev, offset, val);
+		val -= 0x0202;
+	}
+	val = 0x2120;
+	for (offset = 0x00A8; offset < 0x00C8; offset++) {
+		b43_phy_write(dev, offset, (val & 0x3F3F));
+		val += 0x0202;
+	}
+	if (phy->type == B43_PHYTYPE_G) {
+		b43_radio_write16(dev, 0x007A,
+				  b43_radio_read16(dev, 0x007A) | 0x0020);
+		b43_radio_write16(dev, 0x0051,
+				  b43_radio_read16(dev, 0x0051) | 0x0004);
+		b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100);
+		b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000);
+		b43_phy_write(dev, 0x5B, 0);
+		b43_phy_write(dev, 0x5C, 0);
 	}
 
-	phy->channel = channel;
-	/* Wait for the radio to tune to the channel and stabilize. */
-	msleep(8);
-out:
-	if (err) {
-		b43_shm_write16(dev, B43_SHM_SHARED,
-				B43_SHM_SH_CHAN, savedcookie);
+	old_channel = phy->channel;
+	if (old_channel >= 8)
+		b43_gphy_channel_switch(dev, 1, 0);
+	else
+		b43_gphy_channel_switch(dev, 13, 0);
+
+	b43_radio_write16(dev, 0x0050, 0x0020);
+	b43_radio_write16(dev, 0x0050, 0x0023);
+	udelay(40);
+	if (phy->radio_rev < 6 || phy->radio_rev == 8) {
+		b43_radio_write16(dev, 0x7C, (b43_radio_read16(dev, 0x7C)
+					      | 0x0002));
+		b43_radio_write16(dev, 0x50, 0x20);
 	}
+	if (phy->radio_rev <= 2) {
+		b43_radio_write16(dev, 0x7C, 0x20);
+		b43_radio_write16(dev, 0x5A, 0x70);
+		b43_radio_write16(dev, 0x5B, 0x7B);
+		b43_radio_write16(dev, 0x5C, 0xB0);
+	}
+	b43_radio_write16(dev, 0x007A,
+			  (b43_radio_read16(dev, 0x007A) & 0x00F8) | 0x0007);
+
+	b43_gphy_channel_switch(dev, old_channel, 0);
+
+	b43_phy_write(dev, 0x0014, 0x0200);
+	if (phy->radio_rev >= 6)
+		b43_phy_write(dev, 0x2A, 0x88C2);
+	else
+		b43_phy_write(dev, 0x2A, 0x8AC0);
+	b43_phy_write(dev, 0x0038, 0x0668);
+	b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
+	if (phy->radio_rev <= 5) {
+		b43_phy_write(dev, 0x5D, (b43_phy_read(dev, 0x5D)
+					  & 0xFF80) | 0x0003);
+	}
+	if (phy->radio_rev <= 2)
+		b43_radio_write16(dev, 0x005D, 0x000D);
+
+	if (phy->analog == 4) {
+		b43_write16(dev, 0x3E4, 9);
+		b43_phy_write(dev, 0x61, b43_phy_read(dev, 0x61)
+			      & 0x0FFF);
+	} else {
+		b43_phy_write(dev, 0x0002, (b43_phy_read(dev, 0x0002) & 0xFFC0)
+			      | 0x0004);
+	}
+	if (phy->type == B43_PHYTYPE_B)
+		B43_WARN_ON(1);
+	else if (phy->type == B43_PHYTYPE_G)
+		b43_write16(dev, 0x03E6, 0x0);
+}
+
+static void b43_calc_loopback_gain(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+	u16 backup_phy[16] = { 0 };
+	u16 backup_radio[3];
+	u16 backup_bband;
+	u16 i, j, loop_i_max;
+	u16 trsw_rx;
+	u16 loop1_outer_done, loop1_inner_done;
+
+	backup_phy[0] = b43_phy_read(dev, B43_PHY_CRS0);
+	backup_phy[1] = b43_phy_read(dev, B43_PHY_CCKBBANDCFG);
+	backup_phy[2] = b43_phy_read(dev, B43_PHY_RFOVER);
+	backup_phy[3] = b43_phy_read(dev, B43_PHY_RFOVERVAL);
+	if (phy->rev != 1) {	/* Not in specs, but needed to prevent PPC machine check */
+		backup_phy[4] = b43_phy_read(dev, B43_PHY_ANALOGOVER);
+		backup_phy[5] = b43_phy_read(dev, B43_PHY_ANALOGOVERVAL);
+	}
+	backup_phy[6] = b43_phy_read(dev, B43_PHY_CCK(0x5A));
+	backup_phy[7] = b43_phy_read(dev, B43_PHY_CCK(0x59));
+	backup_phy[8] = b43_phy_read(dev, B43_PHY_CCK(0x58));
+	backup_phy[9] = b43_phy_read(dev, B43_PHY_CCK(0x0A));
+	backup_phy[10] = b43_phy_read(dev, B43_PHY_CCK(0x03));
+	backup_phy[11] = b43_phy_read(dev, B43_PHY_LO_MASK);
+	backup_phy[12] = b43_phy_read(dev, B43_PHY_LO_CTL);
+	backup_phy[13] = b43_phy_read(dev, B43_PHY_CCK(0x2B));
+	backup_phy[14] = b43_phy_read(dev, B43_PHY_PGACTL);
+	backup_phy[15] = b43_phy_read(dev, B43_PHY_LO_LEAKAGE);
+	backup_bband = gphy->bbatt.att;
+	backup_radio[0] = b43_radio_read16(dev, 0x52);
+	backup_radio[1] = b43_radio_read16(dev, 0x43);
+	backup_radio[2] = b43_radio_read16(dev, 0x7A);
+
+	b43_phy_write(dev, B43_PHY_CRS0,
+		      b43_phy_read(dev, B43_PHY_CRS0) & 0x3FFF);
+	b43_phy_write(dev, B43_PHY_CCKBBANDCFG,
+		      b43_phy_read(dev, B43_PHY_CCKBBANDCFG) | 0x8000);
+	b43_phy_write(dev, B43_PHY_RFOVER,
+		      b43_phy_read(dev, B43_PHY_RFOVER) | 0x0002);
+	b43_phy_write(dev, B43_PHY_RFOVERVAL,
+		      b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFD);
+	b43_phy_write(dev, B43_PHY_RFOVER,
+		      b43_phy_read(dev, B43_PHY_RFOVER) | 0x0001);
+	b43_phy_write(dev, B43_PHY_RFOVERVAL,
+		      b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFE);
+	if (phy->rev != 1) {	/* Not in specs, but needed to prevent PPC machine check */
+		b43_phy_write(dev, B43_PHY_ANALOGOVER,
+			      b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0001);
+		b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
+			      b43_phy_read(dev,
+					   B43_PHY_ANALOGOVERVAL) & 0xFFFE);
+		b43_phy_write(dev, B43_PHY_ANALOGOVER,
+			      b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0002);
+		b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
+			      b43_phy_read(dev,
+					   B43_PHY_ANALOGOVERVAL) & 0xFFFD);
+	}
+	b43_phy_write(dev, B43_PHY_RFOVER,
+		      b43_phy_read(dev, B43_PHY_RFOVER) | 0x000C);
+	b43_phy_write(dev, B43_PHY_RFOVERVAL,
+		      b43_phy_read(dev, B43_PHY_RFOVERVAL) | 0x000C);
+	b43_phy_write(dev, B43_PHY_RFOVER,
+		      b43_phy_read(dev, B43_PHY_RFOVER) | 0x0030);
+	b43_phy_write(dev, B43_PHY_RFOVERVAL,
+		      (b43_phy_read(dev, B43_PHY_RFOVERVAL)
+		       & 0xFFCF) | 0x10);
+
+	b43_phy_write(dev, B43_PHY_CCK(0x5A), 0x0780);
+	b43_phy_write(dev, B43_PHY_CCK(0x59), 0xC810);
+	b43_phy_write(dev, B43_PHY_CCK(0x58), 0x000D);
+
+	b43_phy_write(dev, B43_PHY_CCK(0x0A),
+		      b43_phy_read(dev, B43_PHY_CCK(0x0A)) | 0x2000);
+	if (phy->rev != 1) {	/* Not in specs, but needed to prevent PPC machine check */
+		b43_phy_write(dev, B43_PHY_ANALOGOVER,
+			      b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0004);
+		b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
+			      b43_phy_read(dev,
+					   B43_PHY_ANALOGOVERVAL) & 0xFFFB);
+	}
+	b43_phy_write(dev, B43_PHY_CCK(0x03),
+		      (b43_phy_read(dev, B43_PHY_CCK(0x03))
+		       & 0xFF9F) | 0x40);
+
+	if (phy->radio_rev == 8) {
+		b43_radio_write16(dev, 0x43, 0x000F);
+	} else {
+		b43_radio_write16(dev, 0x52, 0);
+		b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43)
+					      & 0xFFF0) | 0x9);
+	}
+	b43_gphy_set_baseband_attenuation(dev, 11);
+
+	if (phy->rev >= 3)
+		b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020);
+	else
+		b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020);
+	b43_phy_write(dev, B43_PHY_LO_CTL, 0);
+
+	b43_phy_write(dev, B43_PHY_CCK(0x2B),
+		      (b43_phy_read(dev, B43_PHY_CCK(0x2B))
+		       & 0xFFC0) | 0x01);
+	b43_phy_write(dev, B43_PHY_CCK(0x2B),
+		      (b43_phy_read(dev, B43_PHY_CCK(0x2B))
+		       & 0xC0FF) | 0x800);
+
+	b43_phy_write(dev, B43_PHY_RFOVER,
+		      b43_phy_read(dev, B43_PHY_RFOVER) | 0x0100);
+	b43_phy_write(dev, B43_PHY_RFOVERVAL,
+		      b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xCFFF);
+
+	if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) {
+		if (phy->rev >= 7) {
+			b43_phy_write(dev, B43_PHY_RFOVER,
+				      b43_phy_read(dev, B43_PHY_RFOVER)
+				      | 0x0800);
+			b43_phy_write(dev, B43_PHY_RFOVERVAL,
+				      b43_phy_read(dev, B43_PHY_RFOVERVAL)
+				      | 0x8000);
+		}
+	}
+	b43_radio_write16(dev, 0x7A, b43_radio_read16(dev, 0x7A)
+			  & 0x00F7);
+
+	j = 0;
+	loop_i_max = (phy->radio_rev == 8) ? 15 : 9;
+	for (i = 0; i < loop_i_max; i++) {
+		for (j = 0; j < 16; j++) {
+			b43_radio_write16(dev, 0x43, i);
+			b43_phy_write(dev, B43_PHY_RFOVERVAL,
+				      (b43_phy_read(dev, B43_PHY_RFOVERVAL)
+				       & 0xF0FF) | (j << 8));
+			b43_phy_write(dev, B43_PHY_PGACTL,
+				      (b43_phy_read(dev, B43_PHY_PGACTL)
+				       & 0x0FFF) | 0xA000);
+			b43_phy_write(dev, B43_PHY_PGACTL,
+				      b43_phy_read(dev, B43_PHY_PGACTL)
+				      | 0xF000);
+			udelay(20);
+			if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC)
+				goto exit_loop1;
+		}
+	}
+      exit_loop1:
+	loop1_outer_done = i;
+	loop1_inner_done = j;
+	if (j >= 8) {
+		b43_phy_write(dev, B43_PHY_RFOVERVAL,
+			      b43_phy_read(dev, B43_PHY_RFOVERVAL)
+			      | 0x30);
+		trsw_rx = 0x1B;
+		for (j = j - 8; j < 16; j++) {
+			b43_phy_write(dev, B43_PHY_RFOVERVAL,
+				      (b43_phy_read(dev, B43_PHY_RFOVERVAL)
+				       & 0xF0FF) | (j << 8));
+			b43_phy_write(dev, B43_PHY_PGACTL,
+				      (b43_phy_read(dev, B43_PHY_PGACTL)
+				       & 0x0FFF) | 0xA000);
+			b43_phy_write(dev, B43_PHY_PGACTL,
+				      b43_phy_read(dev, B43_PHY_PGACTL)
+				      | 0xF000);
+			udelay(20);
+			trsw_rx -= 3;
+			if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC)
+				goto exit_loop2;
+		}
+	} else
+		trsw_rx = 0x18;
+      exit_loop2:
+
+	if (phy->rev != 1) {	/* Not in specs, but needed to prevent PPC machine check */
+		b43_phy_write(dev, B43_PHY_ANALOGOVER, backup_phy[4]);
+		b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, backup_phy[5]);
+	}
+	b43_phy_write(dev, B43_PHY_CCK(0x5A), backup_phy[6]);
+	b43_phy_write(dev, B43_PHY_CCK(0x59), backup_phy[7]);
+	b43_phy_write(dev, B43_PHY_CCK(0x58), backup_phy[8]);
+	b43_phy_write(dev, B43_PHY_CCK(0x0A), backup_phy[9]);
+	b43_phy_write(dev, B43_PHY_CCK(0x03), backup_phy[10]);
+	b43_phy_write(dev, B43_PHY_LO_MASK, backup_phy[11]);
+	b43_phy_write(dev, B43_PHY_LO_CTL, backup_phy[12]);
+	b43_phy_write(dev, B43_PHY_CCK(0x2B), backup_phy[13]);
+	b43_phy_write(dev, B43_PHY_PGACTL, backup_phy[14]);
+
+	b43_gphy_set_baseband_attenuation(dev, backup_bband);
+
+	b43_radio_write16(dev, 0x52, backup_radio[0]);
+	b43_radio_write16(dev, 0x43, backup_radio[1]);
+	b43_radio_write16(dev, 0x7A, backup_radio[2]);
+
+	b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2] | 0x0003);
+	udelay(10);
+	b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2]);
+	b43_phy_write(dev, B43_PHY_RFOVERVAL, backup_phy[3]);
+	b43_phy_write(dev, B43_PHY_CRS0, backup_phy[0]);
+	b43_phy_write(dev, B43_PHY_CCKBBANDCFG, backup_phy[1]);
+
+	gphy->max_lb_gain =
+	    ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11;
+	gphy->trsw_rx_gain = trsw_rx * 2;
+}
+
+static void b43_hardware_pctl_early_init(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+
+	if (!b43_has_hardware_pctl(dev)) {
+		b43_phy_write(dev, 0x047A, 0xC111);
+		return;
+	}
+
+	b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) & 0xFEFF);
+	b43_phy_write(dev, 0x002F, 0x0202);
+	b43_phy_write(dev, 0x047C, b43_phy_read(dev, 0x047C) | 0x0002);
+	b43_phy_write(dev, 0x047A, b43_phy_read(dev, 0x047A) | 0xF000);
+	if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) {
+		b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A)
+					    & 0xFF0F) | 0x0010);
+		b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D)
+			      | 0x8000);
+		b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E)
+					    & 0xFFC0) | 0x0010);
+		b43_phy_write(dev, 0x002E, 0xC07F);
+		b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
+			      | 0x0400);
+	} else {
+		b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
+			      | 0x0200);
+		b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
+			      | 0x0400);
+		b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D)
+			      & 0x7FFF);
+		b43_phy_write(dev, 0x004F, b43_phy_read(dev, 0x004F)
+			      & 0xFFFE);
+		b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E)
+					    & 0xFFC0) | 0x0010);
+		b43_phy_write(dev, 0x002E, 0xC07F);
+		b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A)
+					    & 0xFF0F) | 0x0010);
+	}
+}
+
+/* Hardware power control for G-PHY */
+static void b43_hardware_pctl_init_gphy(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+
+	if (!b43_has_hardware_pctl(dev)) {
+		/* No hardware power control */
+		b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_HWPCTL);
+		return;
+	}
+
+	b43_phy_write(dev, 0x0036, (b43_phy_read(dev, 0x0036) & 0xFFC0)
+		      | (gphy->tgt_idle_tssi - gphy->cur_idle_tssi));
+	b43_phy_write(dev, 0x0478, (b43_phy_read(dev, 0x0478) & 0xFF00)
+		      | (gphy->tgt_idle_tssi - gphy->cur_idle_tssi));
+	b43_gphy_tssi_power_lt_init(dev);
+	b43_gphy_gain_lt_init(dev);
+	b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060) & 0xFFBF);
+	b43_phy_write(dev, 0x0014, 0x0000);
+
+	B43_WARN_ON(phy->rev < 6);
+	b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478)
+		      | 0x0800);
+	b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478)
+		      & 0xFEFF);
+	b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801)
+		      & 0xFFBF);
+
+	b43_gphy_dc_lt_init(dev, 1);
+
+	/* Enable hardware pctl in firmware. */
+	b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL);
+}
+
+/* Intialize B/G PHY power control */
+static void b43_phy_init_pctl(struct b43_wldev *dev)
+{
+	struct ssb_bus *bus = dev->dev->bus;
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+	struct b43_rfatt old_rfatt;
+	struct b43_bbatt old_bbatt;
+	u8 old_tx_control = 0;
+
+	B43_WARN_ON(phy->type != B43_PHYTYPE_G);
+
+	if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
+	    (bus->boardinfo.type == SSB_BOARD_BU4306))
+		return;
+
+	b43_phy_write(dev, 0x0028, 0x8018);
+
+	/* This does something with the Analog... */
+	b43_write16(dev, B43_MMIO_PHY0, b43_read16(dev, B43_MMIO_PHY0)
+		    & 0xFFDF);
+
+	if (!phy->gmode)
+		return;
+	b43_hardware_pctl_early_init(dev);
+	if (gphy->cur_idle_tssi == 0) {
+		if (phy->radio_ver == 0x2050 && phy->analog == 0) {
+			b43_radio_write16(dev, 0x0076,
+					  (b43_radio_read16(dev, 0x0076)
+					   & 0x00F7) | 0x0084);
+		} else {
+			struct b43_rfatt rfatt;
+			struct b43_bbatt bbatt;
+
+			memcpy(&old_rfatt, &gphy->rfatt, sizeof(old_rfatt));
+			memcpy(&old_bbatt, &gphy->bbatt, sizeof(old_bbatt));
+			old_tx_control = gphy->tx_control;
+
+			bbatt.att = 11;
+			if (phy->radio_rev == 8) {
+				rfatt.att = 15;
+				rfatt.with_padmix = 1;
+			} else {
+				rfatt.att = 9;
+				rfatt.with_padmix = 0;
+			}
+			b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
+		}
+		b43_dummy_transmission(dev);
+		gphy->cur_idle_tssi = b43_phy_read(dev, B43_PHY_ITSSI);
+		if (B43_DEBUG) {
+			/* Current-Idle-TSSI sanity check. */
+			if (abs(gphy->cur_idle_tssi - gphy->tgt_idle_tssi) >= 20) {
+				b43dbg(dev->wl,
+				       "!WARNING! Idle-TSSI phy->cur_idle_tssi "
+				       "measuring failed. (cur=%d, tgt=%d). Disabling TX power "
+				       "adjustment.\n", gphy->cur_idle_tssi,
+				       gphy->tgt_idle_tssi);
+				gphy->cur_idle_tssi = 0;
+			}
+		}
+		if (phy->radio_ver == 0x2050 && phy->analog == 0) {
+			b43_radio_write16(dev, 0x0076,
+					  b43_radio_read16(dev, 0x0076)
+					  & 0xFF7B);
+		} else {
+			b43_set_txpower_g(dev, &old_bbatt,
+					  &old_rfatt, old_tx_control);
+		}
+	}
+	b43_hardware_pctl_init_gphy(dev);
+	b43_shm_clear_tssi(dev);
+}
+
+static void b43_phy_initg(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+	u16 tmp;
+
+	if (phy->rev == 1)
+		b43_phy_initb5(dev);
+	else
+		b43_phy_initb6(dev);
+
+	if (phy->rev >= 2 || phy->gmode)
+		b43_phy_inita(dev);
+
+	if (phy->rev >= 2) {
+		b43_phy_write(dev, B43_PHY_ANALOGOVER, 0);
+		b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, 0);
+	}
+	if (phy->rev == 2) {
+		b43_phy_write(dev, B43_PHY_RFOVER, 0);
+		b43_phy_write(dev, B43_PHY_PGACTL, 0xC0);
+	}
+	if (phy->rev > 5) {
+		b43_phy_write(dev, B43_PHY_RFOVER, 0x400);
+		b43_phy_write(dev, B43_PHY_PGACTL, 0xC0);
+	}
+	if (phy->gmode || phy->rev >= 2) {
+		tmp = b43_phy_read(dev, B43_PHY_VERSION_OFDM);
+		tmp &= B43_PHYVER_VERSION;
+		if (tmp == 3 || tmp == 5) {
+			b43_phy_write(dev, B43_PHY_OFDM(0xC2), 0x1816);
+			b43_phy_write(dev, B43_PHY_OFDM(0xC3), 0x8006);
+		}
+		if (tmp == 5) {
+			b43_phy_write(dev, B43_PHY_OFDM(0xCC),
+				      (b43_phy_read(dev, B43_PHY_OFDM(0xCC))
+				       & 0x00FF) | 0x1F00);
+		}
+	}
+	if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2)
+		b43_phy_write(dev, B43_PHY_OFDM(0x7E), 0x78);
+	if (phy->radio_rev == 8) {
+		b43_phy_write(dev, B43_PHY_EXTG(0x01),
+			      b43_phy_read(dev, B43_PHY_EXTG(0x01))
+			      | 0x80);
+		b43_phy_write(dev, B43_PHY_OFDM(0x3E),
+			      b43_phy_read(dev, B43_PHY_OFDM(0x3E))
+			      | 0x4);
+	}
+	if (has_loopback_gain(phy))
+		b43_calc_loopback_gain(dev);
+
+	if (phy->radio_rev != 8) {
+		if (gphy->initval == 0xFFFF)
+			gphy->initval = b43_radio_init2050(dev);
+		else
+			b43_radio_write16(dev, 0x0078, gphy->initval);
+	}
+	b43_lo_g_init(dev);
+	if (has_tx_magnification(phy)) {
+		b43_radio_write16(dev, 0x52,
+				  (b43_radio_read16(dev, 0x52) & 0xFF00)
+				  | gphy->lo_control->tx_bias | gphy->
+				  lo_control->tx_magn);
+	} else {
+		b43_radio_write16(dev, 0x52,
+				  (b43_radio_read16(dev, 0x52) & 0xFFF0)
+				  | gphy->lo_control->tx_bias);
+	}
+	if (phy->rev >= 6) {
+		b43_phy_write(dev, B43_PHY_CCK(0x36),
+			      (b43_phy_read(dev, B43_PHY_CCK(0x36))
+			       & 0x0FFF) | (gphy->lo_control->
+					    tx_bias << 12));
+	}
+	if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
+		b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
+	else
+		b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
+	if (phy->rev < 2)
+		b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x101);
+	else
+		b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x202);
+	if (phy->gmode || phy->rev >= 2) {
+		b43_lo_g_adjust(dev);
+		b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078);
+	}
+
+	if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
+		/* The specs state to update the NRSSI LT with
+		 * the value 0x7FFFFFFF here. I think that is some weird
+		 * compiler optimization in the original driver.
+		 * Essentially, what we do here is resetting all NRSSI LT
+		 * entries to -32 (see the clamp_val() in nrssi_hw_update())
+		 */
+		b43_nrssi_hw_update(dev, 0xFFFF);	//FIXME?
+		b43_calc_nrssi_threshold(dev);
+	} else if (phy->gmode || phy->rev >= 2) {
+		if (gphy->nrssi[0] == -1000) {
+			B43_WARN_ON(gphy->nrssi[1] != -1000);
+			b43_calc_nrssi_slope(dev);
+		} else
+			b43_calc_nrssi_threshold(dev);
+	}
+	if (phy->radio_rev == 8)
+		b43_phy_write(dev, B43_PHY_EXTG(0x05), 0x3230);
+	b43_phy_init_pctl(dev);
+	/* FIXME: The spec says in the following if, the 0 should be replaced
+	   'if OFDM may not be used in the current locale'
+	   but OFDM is legal everywhere */
+	if ((dev->dev->bus->chip_id == 0x4306
+	     && dev->dev->bus->chip_package == 2) || 0) {
+		b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0)
+			      & 0xBFFF);
+		b43_phy_write(dev, B43_PHY_OFDM(0xC3),
+			      b43_phy_read(dev, B43_PHY_OFDM(0xC3))
+			      & 0x7FFF);
+	}
+}
+
+void b43_gphy_channel_switch(struct b43_wldev *dev,
+			     unsigned int channel,
+			     bool synthetic_pu_workaround)
+{
+	if (synthetic_pu_workaround)
+		b43_synth_pu_workaround(dev, channel);
+
+	b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
+
+	if (channel == 14) {
+		if (dev->dev->bus->sprom.country_code ==
+		    SSB_SPROM1CCODE_JAPAN)
+			b43_hf_write(dev,
+				     b43_hf_read(dev) & ~B43_HF_ACPR);
+		else
+			b43_hf_write(dev,
+				     b43_hf_read(dev) | B43_HF_ACPR);
+		b43_write16(dev, B43_MMIO_CHANNEL_EXT,
+			    b43_read16(dev, B43_MMIO_CHANNEL_EXT)
+			    | (1 << 11));
+	} else {
+		b43_write16(dev, B43_MMIO_CHANNEL_EXT,
+			    b43_read16(dev, B43_MMIO_CHANNEL_EXT)
+			    & 0xF7BF);
+	}
+}
+
+static void default_baseband_attenuation(struct b43_wldev *dev,
+					 struct b43_bbatt *bb)
+{
+	struct b43_phy *phy = &dev->phy;
+
+	if (phy->radio_ver == 0x2050 && phy->radio_rev < 6)
+		bb->att = 0;
+	else
+		bb->att = 2;
+}
+
+static void default_radio_attenuation(struct b43_wldev *dev,
+				      struct b43_rfatt *rf)
+{
+	struct ssb_bus *bus = dev->dev->bus;
+	struct b43_phy *phy = &dev->phy;
+
+	rf->with_padmix = 0;
+
+	if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM &&
+	    bus->boardinfo.type == SSB_BOARD_BCM4309G) {
+		if (bus->boardinfo.rev < 0x43) {
+			rf->att = 2;
+			return;
+		} else if (bus->boardinfo.rev < 0x51) {
+			rf->att = 3;
+			return;
+		}
+	}
+
+	if (phy->type == B43_PHYTYPE_A) {
+		rf->att = 0x60;
+		return;
+	}
+
+	switch (phy->radio_ver) {
+	case 0x2053:
+		switch (phy->radio_rev) {
+		case 1:
+			rf->att = 6;
+			return;
+		}
+		break;
+	case 0x2050:
+		switch (phy->radio_rev) {
+		case 0:
+			rf->att = 5;
+			return;
+		case 1:
+			if (phy->type == B43_PHYTYPE_G) {
+				if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
+				    && bus->boardinfo.type == SSB_BOARD_BCM4309G
+				    && bus->boardinfo.rev >= 30)
+					rf->att = 3;
+				else if (bus->boardinfo.vendor ==
+					 SSB_BOARDVENDOR_BCM
+					 && bus->boardinfo.type ==
+					 SSB_BOARD_BU4306)
+					rf->att = 3;
+				else
+					rf->att = 1;
+			} else {
+				if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
+				    && bus->boardinfo.type == SSB_BOARD_BCM4309G
+				    && bus->boardinfo.rev >= 30)
+					rf->att = 7;
+				else
+					rf->att = 6;
+			}
+			return;
+		case 2:
+			if (phy->type == B43_PHYTYPE_G) {
+				if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
+				    && bus->boardinfo.type == SSB_BOARD_BCM4309G
+				    && bus->boardinfo.rev >= 30)
+					rf->att = 3;
+				else if (bus->boardinfo.vendor ==
+					 SSB_BOARDVENDOR_BCM
+					 && bus->boardinfo.type ==
+					 SSB_BOARD_BU4306)
+					rf->att = 5;
+				else if (bus->chip_id == 0x4320)
+					rf->att = 4;
+				else
+					rf->att = 3;
+			} else
+				rf->att = 6;
+			return;
+		case 3:
+			rf->att = 5;
+			return;
+		case 4:
+		case 5:
+			rf->att = 1;
+			return;
+		case 6:
+		case 7:
+			rf->att = 5;
+			return;
+		case 8:
+			rf->att = 0xA;
+			rf->with_padmix = 1;
+			return;
+		case 9:
+		default:
+			rf->att = 5;
+			return;
+		}
+	}
+	rf->att = 5;
+}
+
+static u16 default_tx_control(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+
+	if (phy->radio_ver != 0x2050)
+		return 0;
+	if (phy->radio_rev == 1)
+		return B43_TXCTL_PA2DB | B43_TXCTL_TXMIX;
+	if (phy->radio_rev < 6)
+		return B43_TXCTL_PA2DB;
+	if (phy->radio_rev == 8)
+		return B43_TXCTL_TXMIX;
+	return 0;
+}
+
+static u8 b43_gphy_aci_detect(struct b43_wldev *dev, u8 channel)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+	u8 ret = 0;
+	u16 saved, rssi, temp;
+	int i, j = 0;
+
+	saved = b43_phy_read(dev, 0x0403);
+	b43_switch_channel(dev, channel);
+	b43_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5);
+	if (gphy->aci_hw_rssi)
+		rssi = b43_phy_read(dev, 0x048A) & 0x3F;
+	else
+		rssi = saved & 0x3F;
+	/* clamp temp to signed 5bit */
+	if (rssi > 32)
+		rssi -= 64;
+	for (i = 0; i < 100; i++) {
+		temp = (b43_phy_read(dev, 0x047F) >> 8) & 0x3F;
+		if (temp > 32)
+			temp -= 64;
+		if (temp < rssi)
+			j++;
+		if (j >= 20)
+			ret = 1;
+	}
+	b43_phy_write(dev, 0x0403, saved);
+
+	return ret;
+}
+
+static u8 b43_gphy_aci_scan(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	u8 ret[13];
+	unsigned int channel = phy->channel;
+	unsigned int i, j, start, end;
+
+	if (!((phy->type == B43_PHYTYPE_G) && (phy->rev > 0)))
+		return 0;
+
+	b43_phy_lock(dev);
+	b43_radio_lock(dev);
+	b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC);
+	b43_phy_write(dev, B43_PHY_G_CRS,
+		      b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF);
+	b43_set_all_gains(dev, 3, 8, 1);
+
+	start = (channel - 5 > 0) ? channel - 5 : 1;
+	end = (channel + 5 < 14) ? channel + 5 : 13;
+
+	for (i = start; i <= end; i++) {
+		if (abs(channel - i) > 2)
+			ret[i - 1] = b43_gphy_aci_detect(dev, i);
+	}
+	b43_switch_channel(dev, channel);
+	b43_phy_write(dev, 0x0802,
+		      (b43_phy_read(dev, 0x0802) & 0xFFFC) | 0x0003);
+	b43_phy_write(dev, 0x0403, b43_phy_read(dev, 0x0403) & 0xFFF8);
+	b43_phy_write(dev, B43_PHY_G_CRS,
+		      b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000);
+	b43_set_original_gains(dev);
+	for (i = 0; i < 13; i++) {
+		if (!ret[i])
+			continue;
+		end = (i + 5 < 13) ? i + 5 : 13;
+		for (j = i; j < end; j++)
+			ret[j] = 1;
+	}
+	b43_radio_unlock(dev);
+	b43_phy_unlock(dev);
+
+	return ret[channel - 1];
+}
+
+static s32 b43_tssi2dbm_ad(s32 num, s32 den)
+{
+	if (num < 0)
+		return num / den;
+	else
+		return (num + den / 2) / den;
+}
+
+static s8 b43_tssi2dbm_entry(s8 entry[], u8 index,
+			     s16 pab0, s16 pab1, s16 pab2)
+{
+	s32 m1, m2, f = 256, q, delta;
+	s8 i = 0;
+
+	m1 = b43_tssi2dbm_ad(16 * pab0 + index * pab1, 32);
+	m2 = max(b43_tssi2dbm_ad(32768 + index * pab2, 256), 1);
+	do {
+		if (i > 15)
+			return -EINVAL;
+		q = b43_tssi2dbm_ad(f * 4096 -
+				    b43_tssi2dbm_ad(m2 * f, 16) * f, 2048);
+		delta = abs(q - f);
+		f = q;
+		i++;
+	} while (delta >= 2);
+	entry[index] = clamp_val(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128);
+	return 0;
+}
+
+u8 * b43_generate_dyn_tssi2dbm_tab(struct b43_wldev *dev,
+				   s16 pab0, s16 pab1, s16 pab2)
+{
+	unsigned int i;
+	u8 *tab;
+	int err;
+
+	tab = kmalloc(64, GFP_KERNEL);
+	if (!tab) {
+		b43err(dev->wl, "Could not allocate memory "
+		       "for tssi2dbm table\n");
+		return NULL;
+	}
+	for (i = 0; i < 64; i++) {
+		err = b43_tssi2dbm_entry(tab, i, pab0, pab1, pab2);
+		if (err) {
+			b43err(dev->wl, "Could not generate "
+			       "tssi2dBm table\n");
+			kfree(tab);
+			return NULL;
+		}
+	}
+
+	return tab;
+}
+
+/* Initialise the TSSI->dBm lookup table */
+static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+	s16 pab0, pab1, pab2;
+
+	pab0 = (s16) (dev->dev->bus->sprom.pa0b0);
+	pab1 = (s16) (dev->dev->bus->sprom.pa0b1);
+	pab2 = (s16) (dev->dev->bus->sprom.pa0b2);
+
+	B43_WARN_ON((dev->dev->bus->chip_id == 0x4301) &&
+		    (phy->radio_ver != 0x2050)); /* Not supported anymore */
+
+	gphy->dyn_tssi_tbl = 0;
+
+	if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
+	    pab0 != -1 && pab1 != -1 && pab2 != -1) {
+		/* The pabX values are set in SPROM. Use them. */
+		if ((s8) dev->dev->bus->sprom.itssi_bg != 0 &&
+		    (s8) dev->dev->bus->sprom.itssi_bg != -1) {
+			gphy->tgt_idle_tssi =
+				(s8) (dev->dev->bus->sprom.itssi_bg);
+		} else
+			gphy->tgt_idle_tssi = 62;
+		gphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
+							       pab1, pab2);
+		if (!gphy->tssi2dbm)
+			return -ENOMEM;
+		gphy->dyn_tssi_tbl = 1;
+	} else {
+		/* pabX values not set in SPROM. */
+		gphy->tgt_idle_tssi = 52;
+		gphy->tssi2dbm = b43_tssi2dbm_g_table;
+	}
+
+	return 0;
+}
+
+static int b43_gphy_op_allocate(struct b43_wldev *dev)
+{
+	struct b43_phy_g *gphy;
+	struct b43_txpower_lo_control *lo;
+	int err;
+
+	gphy = kzalloc(sizeof(*gphy), GFP_KERNEL);
+	if (!gphy) {
+		err = -ENOMEM;
+		goto error;
+	}
+	dev->phy.g = gphy;
+
+	lo = kzalloc(sizeof(*lo), GFP_KERNEL);
+	if (!lo) {
+		err = -ENOMEM;
+		goto err_free_gphy;
+	}
+	gphy->lo_control = lo;
+
+	err = b43_gphy_init_tssi2dbm_table(dev);
+	if (err)
+		goto err_free_lo;
+
+	return 0;
+
+err_free_lo:
+	kfree(lo);
+err_free_gphy:
+	kfree(gphy);
+error:
 	return err;
 }
 
-void b43_radio_turn_on(struct b43_wldev *dev)
+static void b43_gphy_op_prepare_structs(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
-	int err;
-	u8 channel;
+	struct b43_phy_g *gphy = phy->g;
+	const void *tssi2dbm;
+	int tgt_idle_tssi;
+	struct b43_txpower_lo_control *lo;
+	unsigned int i;
+
+	/* tssi2dbm table is constant, so it is initialized at alloc time.
+	 * Save a copy of the pointer. */
+	tssi2dbm = gphy->tssi2dbm;
+	tgt_idle_tssi = gphy->tgt_idle_tssi;
+	/* Save the LO pointer. */
+	lo = gphy->lo_control;
+
+	/* Zero out the whole PHY structure. */
+	memset(gphy, 0, sizeof(*gphy));
+
+	/* Restore pointers. */
+	gphy->tssi2dbm = tssi2dbm;
+	gphy->tgt_idle_tssi = tgt_idle_tssi;
+	gphy->lo_control = lo;
+
+	memset(gphy->minlowsig, 0xFF, sizeof(gphy->minlowsig));
+
+	/* NRSSI */
+	for (i = 0; i < ARRAY_SIZE(gphy->nrssi); i++)
+		gphy->nrssi[i] = -1000;
+	for (i = 0; i < ARRAY_SIZE(gphy->nrssi_lt); i++)
+		gphy->nrssi_lt[i] = i;
+
+	gphy->lofcal = 0xFFFF;
+	gphy->initval = 0xFFFF;
+
+	gphy->interfmode = B43_INTERFMODE_NONE;
+
+	/* OFDM-table address caching. */
+	gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_UNKNOWN;
+
+	gphy->average_tssi = 0xFF;
+
+	/* Local Osciallator structure */
+	lo->tx_bias = 0xFF;
+	INIT_LIST_HEAD(&lo->calib_list);
+}
+
+static void b43_gphy_op_free(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+
+	kfree(gphy->lo_control);
+
+	if (gphy->dyn_tssi_tbl)
+		kfree(gphy->tssi2dbm);
+	gphy->dyn_tssi_tbl = 0;
+	gphy->tssi2dbm = NULL;
+
+	kfree(gphy);
+	dev->phy.g = NULL;
+}
+
+static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+	struct b43_txpower_lo_control *lo = gphy->lo_control;
+
+	B43_WARN_ON(phy->type != B43_PHYTYPE_G);
+
+	default_baseband_attenuation(dev, &gphy->bbatt);
+	default_radio_attenuation(dev, &gphy->rfatt);
+	gphy->tx_control = (default_tx_control(dev) << 4);
+	generate_rfatt_list(dev, &lo->rfatt_list);
+	generate_bbatt_list(dev, &lo->bbatt_list);
+
+	/* Commit previous writes */
+	b43_read32(dev, B43_MMIO_MACCTL);
+
+	if (phy->rev == 1) {
+		/* Workaround: Temporarly disable gmode through the early init
+		 * phase, as the gmode stuff is not needed for phy rev 1 */
+		phy->gmode = 0;
+		b43_wireless_core_reset(dev, 0);
+		b43_phy_initg(dev);
+		phy->gmode = 1;
+		b43_wireless_core_reset(dev, B43_TMSLOW_GMODE);
+	}
+
+	return 0;
+}
+
+static int b43_gphy_op_init(struct b43_wldev *dev)
+{
+	b43_phy_initg(dev);
+
+	return 0;
+}
+
+static void b43_gphy_op_exit(struct b43_wldev *dev)
+{
+	b43_lo_g_cleanup(dev);
+}
+
+static u16 b43_gphy_op_read(struct b43_wldev *dev, u16 reg)
+{
+	b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+	return b43_read16(dev, B43_MMIO_PHY_DATA);
+}
+
+static void b43_gphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
+{
+	b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+	b43_write16(dev, B43_MMIO_PHY_DATA, value);
+}
+
+static u16 b43_gphy_op_radio_read(struct b43_wldev *dev, u16 reg)
+{
+	/* Register 1 is a 32-bit register. */
+	B43_WARN_ON(reg == 1);
+	/* G-PHY needs 0x80 for read access. */
+	reg |= 0x80;
+
+	b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
+	return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
+}
+
+static void b43_gphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
+{
+	/* Register 1 is a 32-bit register. */
+	B43_WARN_ON(reg == 1);
+
+	b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
+	b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
+}
+
+static bool b43_gphy_op_supports_hwpctl(struct b43_wldev *dev)
+{
+	return (dev->phy.rev >= 6);
+}
+
+static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
+					enum rfkill_state state)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+	unsigned int channel;
 
 	might_sleep();
 
-	if (phy->radio_on)
-		return;
+	if (state == RFKILL_STATE_UNBLOCKED) {
+		/* Turn radio ON */
+		if (phy->radio_on)
+			return;
 
-	switch (phy->type) {
-	case B43_PHYTYPE_A:
-		b43_radio_write16(dev, 0x0004, 0x00C0);
-		b43_radio_write16(dev, 0x0005, 0x0008);
-		b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) & 0xFFF7);
-		b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) & 0xFFF7);
-		b43_radio_init2060(dev);
-		break;
-	case B43_PHYTYPE_B:
-	case B43_PHYTYPE_G:
 		b43_phy_write(dev, 0x0015, 0x8000);
 		b43_phy_write(dev, 0x0015, 0xCC00);
 		b43_phy_write(dev, 0x0015, (phy->gmode ? 0x00C0 : 0x0000));
-		if (phy->radio_off_context.valid) {
+		if (gphy->radio_off_context.valid) {
 			/* Restore the RFover values. */
 			b43_phy_write(dev, B43_PHY_RFOVER,
-				      phy->radio_off_context.rfover);
+				      gphy->radio_off_context.rfover);
 			b43_phy_write(dev, B43_PHY_RFOVERVAL,
-				      phy->radio_off_context.rfoverval);
-			phy->radio_off_context.valid = 0;
+				      gphy->radio_off_context.rfoverval);
+			gphy->radio_off_context.valid = 0;
 		}
 		channel = phy->channel;
-		err = b43_radio_selectchannel(dev, B43_DEFAULT_CHANNEL_BG, 1);
-		err |= b43_radio_selectchannel(dev, channel, 0);
-		B43_WARN_ON(err);
-		break;
-	case B43_PHYTYPE_N:
-		b43_nphy_radio_turn_on(dev);
-		break;
-	default:
-		B43_WARN_ON(1);
-	}
-	phy->radio_on = 1;
-}
-
-void b43_radio_turn_off(struct b43_wldev *dev, bool force)
-{
-	struct b43_phy *phy = &dev->phy;
-
-	if (!phy->radio_on && !force)
-		return;
-
-	switch (phy->type) {
-	case B43_PHYTYPE_N:
-		b43_nphy_radio_turn_off(dev);
-		break;
-	case B43_PHYTYPE_A:
-		b43_radio_write16(dev, 0x0004, 0x00FF);
-		b43_radio_write16(dev, 0x0005, 0x00FB);
-		b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) | 0x0008);
-		b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) | 0x0008);
-		break;
-	case B43_PHYTYPE_G: {
+		b43_gphy_channel_switch(dev, 6, 1);
+		b43_gphy_channel_switch(dev, channel, 0);
+	} else {
+		/* Turn radio OFF */
 		u16 rfover, rfoverval;
 
 		rfover = b43_phy_read(dev, B43_PHY_RFOVER);
 		rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL);
-		if (!force) {
-			phy->radio_off_context.rfover = rfover;
-			phy->radio_off_context.rfoverval = rfoverval;
-			phy->radio_off_context.valid = 1;
-		}
+		gphy->radio_off_context.rfover = rfover;
+		gphy->radio_off_context.rfoverval = rfoverval;
+		gphy->radio_off_context.valid = 1;
 		b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C);
 		b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73);
+	}
+}
+
+static int b43_gphy_op_switch_channel(struct b43_wldev *dev,
+				      unsigned int new_channel)
+{
+	if ((new_channel < 1) || (new_channel > 14))
+		return -EINVAL;
+	b43_gphy_channel_switch(dev, new_channel, 0);
+
+	return 0;
+}
+
+static unsigned int b43_gphy_op_get_default_chan(struct b43_wldev *dev)
+{
+	return 1; /* Default to channel 1 */
+}
+
+static void b43_gphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
+{
+	struct b43_phy *phy = &dev->phy;
+	u64 hf;
+	u16 tmp;
+	int autodiv = 0;
+
+	if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1)
+		autodiv = 1;
+
+	hf = b43_hf_read(dev);
+	hf &= ~B43_HF_ANTDIVHELP;
+	b43_hf_write(dev, hf);
+
+	tmp = b43_phy_read(dev, B43_PHY_BBANDCFG);
+	tmp &= ~B43_PHY_BBANDCFG_RXANT;
+	tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna)
+			<< B43_PHY_BBANDCFG_RXANT_SHIFT;
+	b43_phy_write(dev, B43_PHY_BBANDCFG, tmp);
+
+	if (autodiv) {
+		tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
+		if (antenna == B43_ANTENNA_AUTO0)
+			tmp &= ~B43_PHY_ANTDWELL_AUTODIV1;
+		else
+			tmp |= B43_PHY_ANTDWELL_AUTODIV1;
+		b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
+	}
+	tmp = b43_phy_read(dev, B43_PHY_ANTWRSETT);
+	if (autodiv)
+		tmp |= B43_PHY_ANTWRSETT_ARXDIV;
+	else
+		tmp &= ~B43_PHY_ANTWRSETT_ARXDIV;
+	b43_phy_write(dev, B43_PHY_ANTWRSETT, tmp);
+	if (phy->rev >= 2) {
+		tmp = b43_phy_read(dev, B43_PHY_OFDM61);
+		tmp |= B43_PHY_OFDM61_10;
+		b43_phy_write(dev, B43_PHY_OFDM61, tmp);
+
+		tmp =
+		    b43_phy_read(dev, B43_PHY_DIVSRCHGAINBACK);
+		tmp = (tmp & 0xFF00) | 0x15;
+		b43_phy_write(dev, B43_PHY_DIVSRCHGAINBACK,
+			      tmp);
+
+		if (phy->rev == 2) {
+			b43_phy_write(dev, B43_PHY_ADIVRELATED,
+				      8);
+		} else {
+			tmp =
+			    b43_phy_read(dev,
+					 B43_PHY_ADIVRELATED);
+			tmp = (tmp & 0xFF00) | 8;
+			b43_phy_write(dev, B43_PHY_ADIVRELATED,
+				      tmp);
+		}
+	}
+	if (phy->rev >= 6)
+		b43_phy_write(dev, B43_PHY_OFDM9B, 0xDC);
+
+	hf |= B43_HF_ANTDIVHELP;
+	b43_hf_write(dev, hf);
+}
+
+static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
+					 enum b43_interference_mitigation mode)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+	int currentmode;
+
+	B43_WARN_ON(phy->type != B43_PHYTYPE_G);
+	if ((phy->rev == 0) || (!phy->gmode))
+		return -ENODEV;
+
+	gphy->aci_wlan_automatic = 0;
+	switch (mode) {
+	case B43_INTERFMODE_AUTOWLAN:
+		gphy->aci_wlan_automatic = 1;
+		if (gphy->aci_enable)
+			mode = B43_INTERFMODE_MANUALWLAN;
+		else
+			mode = B43_INTERFMODE_NONE;
+		break;
+	case B43_INTERFMODE_NONE:
+	case B43_INTERFMODE_NONWLAN:
+	case B43_INTERFMODE_MANUALWLAN:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	currentmode = gphy->interfmode;
+	if (currentmode == mode)
+		return 0;
+	if (currentmode != B43_INTERFMODE_NONE)
+		b43_radio_interference_mitigation_disable(dev, currentmode);
+
+	if (mode == B43_INTERFMODE_NONE) {
+		gphy->aci_enable = 0;
+		gphy->aci_hw_rssi = 0;
+	} else
+		b43_radio_interference_mitigation_enable(dev, mode);
+	gphy->interfmode = mode;
+
+	return 0;
+}
+
+/* http://bcm-specs.sipsolutions.net/EstimatePowerOut
+ * This function converts a TSSI value to dBm in Q5.2
+ */
+static s8 b43_gphy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
+{
+	struct b43_phy_g *gphy = dev->phy.g;
+	s8 dbm;
+	s32 tmp;
+
+	tmp = (gphy->tgt_idle_tssi - gphy->cur_idle_tssi + tssi);
+	tmp = clamp_val(tmp, 0x00, 0x3F);
+	dbm = gphy->tssi2dbm[tmp];
+
+	return dbm;
+}
+
+static void b43_put_attenuation_into_ranges(struct b43_wldev *dev,
+					    int *_bbatt, int *_rfatt)
+{
+	int rfatt = *_rfatt;
+	int bbatt = *_bbatt;
+	struct b43_txpower_lo_control *lo = dev->phy.g->lo_control;
+
+	/* Get baseband and radio attenuation values into their permitted ranges.
+	 * Radio attenuation affects power level 4 times as much as baseband. */
+
+	/* Range constants */
+	const int rf_min = lo->rfatt_list.min_val;
+	const int rf_max = lo->rfatt_list.max_val;
+	const int bb_min = lo->bbatt_list.min_val;
+	const int bb_max = lo->bbatt_list.max_val;
+
+	while (1) {
+		if (rfatt > rf_max && bbatt > bb_max - 4)
+			break;	/* Can not get it into ranges */
+		if (rfatt < rf_min && bbatt < bb_min + 4)
+			break;	/* Can not get it into ranges */
+		if (bbatt > bb_max && rfatt > rf_max - 1)
+			break;	/* Can not get it into ranges */
+		if (bbatt < bb_min && rfatt < rf_min + 1)
+			break;	/* Can not get it into ranges */
+
+		if (bbatt > bb_max) {
+			bbatt -= 4;
+			rfatt += 1;
+			continue;
+		}
+		if (bbatt < bb_min) {
+			bbatt += 4;
+			rfatt -= 1;
+			continue;
+		}
+		if (rfatt > rf_max) {
+			rfatt -= 1;
+			bbatt += 4;
+			continue;
+		}
+		if (rfatt < rf_min) {
+			rfatt += 1;
+			bbatt -= 4;
+			continue;
+		}
 		break;
 	}
-	default:
-		B43_WARN_ON(1);
-	}
-	phy->radio_on = 0;
+
+	*_rfatt = clamp_val(rfatt, rf_min, rf_max);
+	*_bbatt = clamp_val(bbatt, bb_min, bb_max);
 }
+
+static void b43_gphy_op_adjust_txpower(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+	int rfatt, bbatt;
+	u8 tx_control;
+
+	spin_lock_irq(&dev->wl->irq_lock);
+
+	/* Calculate the new attenuation values. */
+	bbatt = gphy->bbatt.att;
+	bbatt += gphy->bbatt_delta;
+	rfatt = gphy->rfatt.att;
+	rfatt += gphy->rfatt_delta;
+
+	b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
+	tx_control = gphy->tx_control;
+	if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) {
+		if (rfatt <= 1) {
+			if (tx_control == 0) {
+				tx_control =
+				    B43_TXCTL_PA2DB |
+				    B43_TXCTL_TXMIX;
+				rfatt += 2;
+				bbatt += 2;
+			} else if (dev->dev->bus->sprom.
+				   boardflags_lo &
+				   B43_BFL_PACTRL) {
+				bbatt += 4 * (rfatt - 2);
+				rfatt = 2;
+			}
+		} else if (rfatt > 4 && tx_control) {
+			tx_control = 0;
+			if (bbatt < 3) {
+				rfatt -= 3;
+				bbatt += 2;
+			} else {
+				rfatt -= 2;
+				bbatt -= 2;
+			}
+		}
+	}
+	/* Save the control values */
+	gphy->tx_control = tx_control;
+	b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
+	gphy->rfatt.att = rfatt;
+	gphy->bbatt.att = bbatt;
+
+	/* We drop the lock early, so we can sleep during hardware
+	 * adjustment. Possible races with op_recalc_txpower are harmless,
+	 * as we will be called once again in case we raced. */
+	spin_unlock_irq(&dev->wl->irq_lock);
+
+	if (b43_debug(dev, B43_DBG_XMITPOWER))
+		b43dbg(dev->wl, "Adjusting TX power\n");
+
+	/* Adjust the hardware */
+	b43_phy_lock(dev);
+	b43_radio_lock(dev);
+	b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt,
+			  gphy->tx_control);
+	b43_radio_unlock(dev);
+	b43_phy_unlock(dev);
+}
+
+static enum b43_txpwr_result b43_gphy_op_recalc_txpower(struct b43_wldev *dev,
+							bool ignore_tssi)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+	unsigned int average_tssi;
+	int cck_result, ofdm_result;
+	int estimated_pwr, desired_pwr, pwr_adjust;
+	int rfatt_delta, bbatt_delta;
+	unsigned int max_pwr;
+
+	/* First get the average TSSI */
+	cck_result = b43_phy_shm_tssi_read(dev, B43_SHM_SH_TSSI_CCK);
+	ofdm_result = b43_phy_shm_tssi_read(dev, B43_SHM_SH_TSSI_OFDM_G);
+	if ((cck_result < 0) && (ofdm_result < 0)) {
+		/* No TSSI information available */
+		if (!ignore_tssi)
+			goto no_adjustment_needed;
+		cck_result = 0;
+		ofdm_result = 0;
+	}
+	if (cck_result < 0)
+		average_tssi = ofdm_result;
+	else if (ofdm_result < 0)
+		average_tssi = cck_result;
+	else
+		average_tssi = (cck_result + ofdm_result) / 2;
+	/* Merge the average with the stored value. */
+	if (likely(gphy->average_tssi != 0xFF))
+		average_tssi = (average_tssi + gphy->average_tssi) / 2;
+	gphy->average_tssi = average_tssi;
+	B43_WARN_ON(average_tssi >= B43_TSSI_MAX);
+
+	/* Estimate the TX power emission based on the TSSI */
+	estimated_pwr = b43_gphy_estimate_power_out(dev, average_tssi);
+
+	B43_WARN_ON(phy->type != B43_PHYTYPE_G);
+	max_pwr = dev->dev->bus->sprom.maxpwr_bg;
+	if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
+		max_pwr -= 3; /* minus 0.75 */
+	if (unlikely(max_pwr >= INT_TO_Q52(30/*dBm*/))) {
+		b43warn(dev->wl,
+			"Invalid max-TX-power value in SPROM.\n");
+		max_pwr = INT_TO_Q52(20); /* fake it */
+		dev->dev->bus->sprom.maxpwr_bg = max_pwr;
+	}
+
+	/* Get desired power (in Q5.2) */
+	if (phy->desired_txpower < 0)
+		desired_pwr = INT_TO_Q52(0);
+	else
+		desired_pwr = INT_TO_Q52(phy->desired_txpower);
+	/* And limit it. max_pwr already is Q5.2 */
+	desired_pwr = clamp_val(desired_pwr, 0, max_pwr);
+	if (b43_debug(dev, B43_DBG_XMITPOWER)) {
+		b43dbg(dev->wl,
+		       "[TX power]  current = " Q52_FMT
+		       " dBm,  desired = " Q52_FMT
+		       " dBm,  max = " Q52_FMT "\n",
+		       Q52_ARG(estimated_pwr),
+		       Q52_ARG(desired_pwr),
+		       Q52_ARG(max_pwr));
+	}
+
+	/* Calculate the adjustment delta. */
+	pwr_adjust = desired_pwr - estimated_pwr;
+	if (pwr_adjust == 0)
+		goto no_adjustment_needed;
+
+	/* RF attenuation delta. */
+	rfatt_delta = ((pwr_adjust + 7) / 8);
+	/* Lower attenuation => Bigger power output. Negate it. */
+	rfatt_delta = -rfatt_delta;
+
+	/* Baseband attenuation delta. */
+	bbatt_delta = pwr_adjust / 2;
+	/* Lower attenuation => Bigger power output. Negate it. */
+	bbatt_delta = -bbatt_delta;
+	/* RF att affects power level 4 times as much as
+	 * Baseband attennuation. Subtract it. */
+	bbatt_delta -= 4 * rfatt_delta;
+
+	if (b43_debug(dev, B43_DBG_XMITPOWER)) {
+		int dbm = pwr_adjust < 0 ? -pwr_adjust : pwr_adjust;
+		b43dbg(dev->wl,
+		       "[TX power deltas]  %s" Q52_FMT " dBm   =>   "
+		       "bbatt-delta = %d,  rfatt-delta = %d\n",
+		       (pwr_adjust < 0 ? "-" : ""), Q52_ARG(dbm),
+		       bbatt_delta, rfatt_delta);
+	}
+	/* So do we finally need to adjust something in hardware? */
+	if ((rfatt_delta == 0) && (bbatt_delta == 0))
+		goto no_adjustment_needed;
+
+	/* Save the deltas for later when we adjust the power. */
+	gphy->bbatt_delta = bbatt_delta;
+	gphy->rfatt_delta = rfatt_delta;
+
+	/* We need to adjust the TX power on the device. */
+	return B43_TXPWR_RES_NEED_ADJUST;
+
+no_adjustment_needed:
+	return B43_TXPWR_RES_DONE;
+}
+
+static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
+
+	//TODO: update_aci_moving_average
+	if (gphy->aci_enable && gphy->aci_wlan_automatic) {
+		b43_mac_suspend(dev);
+		if (!gphy->aci_enable && 1 /*TODO: not scanning? */ ) {
+			if (0 /*TODO: bunch of conditions */ ) {
+				phy->ops->interf_mitigation(dev,
+					B43_INTERFMODE_MANUALWLAN);
+			}
+		} else if (0 /*TODO*/) {
+			   if (/*(aci_average > 1000) &&*/ !b43_gphy_aci_scan(dev))
+				phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE);
+		}
+		b43_mac_enable(dev);
+	} else if (gphy->interfmode == B43_INTERFMODE_NONWLAN &&
+		   phy->rev == 1) {
+		//TODO: implement rev1 workaround
+	}
+	b43_lo_g_maintanance_work(dev);
+}
+
+static void b43_gphy_op_pwork_60sec(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+
+	if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI))
+		return;
+
+	b43_mac_suspend(dev);
+	b43_calc_nrssi_slope(dev);
+	if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) {
+		u8 old_chan = phy->channel;
+
+		/* VCO Calibration */
+		if (old_chan >= 8)
+			b43_switch_channel(dev, 1);
+		else
+			b43_switch_channel(dev, 13);
+		b43_switch_channel(dev, old_chan);
+	}
+	b43_mac_enable(dev);
+}
+
+const struct b43_phy_operations b43_phyops_g = {
+	.allocate		= b43_gphy_op_allocate,
+	.free			= b43_gphy_op_free,
+	.prepare_structs	= b43_gphy_op_prepare_structs,
+	.prepare_hardware	= b43_gphy_op_prepare_hardware,
+	.init			= b43_gphy_op_init,
+	.exit			= b43_gphy_op_exit,
+	.phy_read		= b43_gphy_op_read,
+	.phy_write		= b43_gphy_op_write,
+	.radio_read		= b43_gphy_op_radio_read,
+	.radio_write		= b43_gphy_op_radio_write,
+	.supports_hwpctl	= b43_gphy_op_supports_hwpctl,
+	.software_rfkill	= b43_gphy_op_software_rfkill,
+	.switch_analog		= b43_phyop_switch_analog_generic,
+	.switch_channel		= b43_gphy_op_switch_channel,
+	.get_default_chan	= b43_gphy_op_get_default_chan,
+	.set_rx_antenna		= b43_gphy_op_set_rx_antenna,
+	.interf_mitigation	= b43_gphy_op_interf_mitigation,
+	.recalc_txpower		= b43_gphy_op_recalc_txpower,
+	.adjust_txpower		= b43_gphy_op_adjust_txpower,
+	.pwork_15sec		= b43_gphy_op_pwork_15sec,
+	.pwork_60sec		= b43_gphy_op_pwork_60sec,
+};
diff --git a/drivers/net/wireless/b43/phy_g.h b/drivers/net/wireless/b43/phy_g.h
new file mode 100644
index 0000000..718947f
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_g.h
@@ -0,0 +1,209 @@
+#ifndef LINUX_B43_PHY_G_H_
+#define LINUX_B43_PHY_G_H_
+
+/* OFDM PHY registers are defined in the A-PHY header. */
+#include "phy_a.h"
+
+/* CCK (B) PHY Registers */
+#define B43_PHY_VERSION_CCK		B43_PHY_CCK(0x00)	/* Versioning register for B-PHY */
+#define B43_PHY_CCKBBANDCFG		B43_PHY_CCK(0x01)	/* Contains antenna 0/1 control bit */
+#define B43_PHY_PGACTL			B43_PHY_CCK(0x15)	/* PGA control */
+#define  B43_PHY_PGACTL_LPF		0x1000	/* Low pass filter (?) */
+#define  B43_PHY_PGACTL_LOWBANDW	0x0040	/* Low bandwidth flag */
+#define  B43_PHY_PGACTL_UNKNOWN		0xEFA0
+#define B43_PHY_FBCTL1			B43_PHY_CCK(0x18)	/* Frequency bandwidth control 1 */
+#define B43_PHY_ITSSI			B43_PHY_CCK(0x29)	/* Idle TSSI */
+#define B43_PHY_LO_LEAKAGE		B43_PHY_CCK(0x2D)	/* Measured LO leakage */
+#define B43_PHY_ENERGY			B43_PHY_CCK(0x33)	/* Energy */
+#define B43_PHY_SYNCCTL			B43_PHY_CCK(0x35)
+#define B43_PHY_FBCTL2			B43_PHY_CCK(0x38)	/* Frequency bandwidth control 2 */
+#define B43_PHY_DACCTL			B43_PHY_CCK(0x60)	/* DAC control */
+#define B43_PHY_RCCALOVER		B43_PHY_CCK(0x78)	/* RC calibration override */
+
+/* Extended G-PHY Registers */
+#define B43_PHY_CLASSCTL		B43_PHY_EXTG(0x02)	/* Classify control */
+#define B43_PHY_GTABCTL			B43_PHY_EXTG(0x03)	/* G-PHY table control (see below) */
+#define  B43_PHY_GTABOFF		0x03FF	/* G-PHY table offset (see below) */
+#define  B43_PHY_GTABNR			0xFC00	/* G-PHY table number (see below) */
+#define  B43_PHY_GTABNR_SHIFT		10
+#define B43_PHY_GTABDATA		B43_PHY_EXTG(0x04)	/* G-PHY table data */
+#define B43_PHY_LO_MASK			B43_PHY_EXTG(0x0F)	/* Local Oscillator control mask */
+#define B43_PHY_LO_CTL			B43_PHY_EXTG(0x10)	/* Local Oscillator control */
+#define B43_PHY_RFOVER			B43_PHY_EXTG(0x11)	/* RF override */
+#define B43_PHY_RFOVERVAL		B43_PHY_EXTG(0x12)	/* RF override value */
+#define  B43_PHY_RFOVERVAL_EXTLNA	0x8000
+#define  B43_PHY_RFOVERVAL_LNA		0x7000
+#define  B43_PHY_RFOVERVAL_LNA_SHIFT	12
+#define  B43_PHY_RFOVERVAL_PGA		0x0F00
+#define  B43_PHY_RFOVERVAL_PGA_SHIFT	8
+#define  B43_PHY_RFOVERVAL_UNK		0x0010	/* Unknown, always set. */
+#define  B43_PHY_RFOVERVAL_TRSWRX	0x00E0
+#define  B43_PHY_RFOVERVAL_BW		0x0003	/* Bandwidth flags */
+#define   B43_PHY_RFOVERVAL_BW_LPF	0x0001	/* Low Pass Filter */
+#define   B43_PHY_RFOVERVAL_BW_LBW	0x0002	/* Low Bandwidth (when set), high when unset */
+#define B43_PHY_ANALOGOVER		B43_PHY_EXTG(0x14)	/* Analog override */
+#define B43_PHY_ANALOGOVERVAL		B43_PHY_EXTG(0x15)	/* Analog override value */
+
+
+/*** G-PHY table numbers */
+#define B43_GTAB(number, offset)	(((number) << B43_PHY_GTABNR_SHIFT) | (offset))
+#define B43_GTAB_NRSSI			B43_GTAB(0x00, 0)
+#define B43_GTAB_TRFEMW			B43_GTAB(0x0C, 0x120)
+#define B43_GTAB_ORIGTR			B43_GTAB(0x2E, 0x298)
+
+u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset);
+void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value);
+
+
+/* Returns the boolean whether "TX Magnification" is enabled. */
+#define has_tx_magnification(phy) \
+	(((phy)->rev >= 2) &&			\
+	 ((phy)->radio_ver == 0x2050) &&	\
+	 ((phy)->radio_rev == 8))
+/* Card uses the loopback gain stuff */
+#define has_loopback_gain(phy) \
+	(((phy)->rev > 1) || ((phy)->gmode))
+
+/* Radio Attenuation (RF Attenuation) */
+struct b43_rfatt {
+	u8 att;			/* Attenuation value */
+	bool with_padmix;	/* Flag, PAD Mixer enabled. */
+};
+struct b43_rfatt_list {
+	/* Attenuation values list */
+	const struct b43_rfatt *list;
+	u8 len;
+	/* Minimum/Maximum attenuation values */
+	u8 min_val;
+	u8 max_val;
+};
+
+/* Returns true, if the values are the same. */
+static inline bool b43_compare_rfatt(const struct b43_rfatt *a,
+				     const struct b43_rfatt *b)
+{
+	return ((a->att == b->att) &&
+		(a->with_padmix == b->with_padmix));
+}
+
+/* Baseband Attenuation */
+struct b43_bbatt {
+	u8 att;			/* Attenuation value */
+};
+struct b43_bbatt_list {
+	/* Attenuation values list */
+	const struct b43_bbatt *list;
+	u8 len;
+	/* Minimum/Maximum attenuation values */
+	u8 min_val;
+	u8 max_val;
+};
+
+/* Returns true, if the values are the same. */
+static inline bool b43_compare_bbatt(const struct b43_bbatt *a,
+				     const struct b43_bbatt *b)
+{
+	return (a->att == b->att);
+}
+
+/* tx_control bits. */
+#define B43_TXCTL_PA3DB		0x40	/* PA Gain 3dB */
+#define B43_TXCTL_PA2DB		0x20	/* PA Gain 2dB */
+#define B43_TXCTL_TXMIX		0x10	/* TX Mixer Gain */
+
+struct b43_txpower_lo_control;
+
+struct b43_phy_g {
+	/* ACI (adjacent channel interference) flags. */
+	bool aci_enable;
+	bool aci_wlan_automatic;
+	bool aci_hw_rssi;
+
+	/* Radio switched on/off */
+	bool radio_on;
+	struct {
+		/* Values saved when turning the radio off.
+		 * They are needed when turning it on again. */
+		bool valid;
+		u16 rfover;
+		u16 rfoverval;
+	} radio_off_context;
+
+	u16 minlowsig[2];
+	u16 minlowsigpos[2];
+
+	/* Pointer to the table used to convert a
+	 * TSSI value to dBm-Q5.2 */
+	const s8 *tssi2dbm;
+	/* tssi2dbm is kmalloc()ed. Only used for free()ing. */
+	bool dyn_tssi_tbl;
+	/* Target idle TSSI */
+	int tgt_idle_tssi;
+	/* Current idle TSSI */
+	int cur_idle_tssi;
+	/* The current average TSSI.
+	 * Needs irq_lock, as it's updated in the IRQ path. */
+	u8 average_tssi;
+	/* Current TX power level attenuation control values */
+	struct b43_bbatt bbatt;
+	struct b43_rfatt rfatt;
+	u8 tx_control;		/* B43_TXCTL_XXX */
+	/* The calculated attenuation deltas that are used later
+	 * when adjusting the actual power output. */
+	int bbatt_delta;
+	int rfatt_delta;
+
+	/* LocalOscillator control values. */
+	struct b43_txpower_lo_control *lo_control;
+	/* Values from b43_calc_loopback_gain() */
+	s16 max_lb_gain;	/* Maximum Loopback gain in hdB */
+	s16 trsw_rx_gain;	/* TRSW RX gain in hdB */
+	s16 lna_lod_gain;	/* LNA lod */
+	s16 lna_gain;		/* LNA */
+	s16 pga_gain;		/* PGA */
+
+	/* Current Interference Mitigation mode */
+	int interfmode;
+	/* Stack of saved values from the Interference Mitigation code.
+	 * Each value in the stack is layed out as follows:
+	 * bit 0-11:  offset
+	 * bit 12-15: register ID
+	 * bit 16-32: value
+	 * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT
+	 */
+#define B43_INTERFSTACK_SIZE	26
+	u32 interfstack[B43_INTERFSTACK_SIZE];	//FIXME: use a data structure
+
+	/* Saved values from the NRSSI Slope calculation */
+	s16 nrssi[2];
+	s32 nrssislope;
+	/* In memory nrssi lookup table. */
+	s8 nrssi_lt[64];
+
+	u16 lofcal;
+
+	u16 initval;		//FIXME rename?
+
+	/* The device does address auto increment for the OFDM tables.
+	 * We cache the previously used address here and omit the address
+	 * write on the next table access, if possible. */
+	u16 ofdmtab_addr; /* The address currently set in hardware. */
+	enum { /* The last data flow direction. */
+		B43_OFDMTAB_DIRECTION_UNKNOWN = 0,
+		B43_OFDMTAB_DIRECTION_READ,
+		B43_OFDMTAB_DIRECTION_WRITE,
+	} ofdmtab_addr_direction;
+};
+
+void b43_gphy_set_baseband_attenuation(struct b43_wldev *dev,
+				       u16 baseband_attenuation);
+void b43_gphy_channel_switch(struct b43_wldev *dev,
+			     unsigned int channel,
+			     bool synthetic_pu_workaround);
+u8 * b43_generate_dyn_tssi2dbm_tab(struct b43_wldev *dev,
+				   s16 pab0, s16 pab1, s16 pab2);
+
+struct b43_phy_operations;
+extern const struct b43_phy_operations b43_phyops_g;
+
+#endif /* LINUX_B43_PHY_G_H_ */
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
new file mode 100644
index 0000000..c5d9dc3
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -0,0 +1,155 @@
+/*
+
+  Broadcom B43 wireless driver
+  IEEE 802.11g LP-PHY driver
+
+  Copyright (c) 2008 Michael Buesch <mb@bu3sch.de>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; see the file COPYING.  If not, write to
+  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+  Boston, MA 02110-1301, USA.
+
+*/
+
+#include "b43.h"
+#include "phy_lp.h"
+#include "phy_common.h"
+
+
+static int b43_lpphy_op_allocate(struct b43_wldev *dev)
+{
+	struct b43_phy_lp *lpphy;
+
+	lpphy = kzalloc(sizeof(*lpphy), GFP_KERNEL);
+	if (!lpphy)
+		return -ENOMEM;
+	dev->phy.lp = lpphy;
+
+	return 0;
+}
+
+static void b43_lpphy_op_prepare_structs(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_lp *lpphy = phy->lp;
+
+	memset(lpphy, 0, sizeof(*lpphy));
+
+	//TODO
+}
+
+static void b43_lpphy_op_free(struct b43_wldev *dev)
+{
+	struct b43_phy_lp *lpphy = dev->phy.lp;
+
+	kfree(lpphy);
+	dev->phy.lp = NULL;
+}
+
+static int b43_lpphy_op_init(struct b43_wldev *dev)
+{
+	//TODO
+
+	return 0;
+}
+
+static u16 b43_lpphy_op_read(struct b43_wldev *dev, u16 reg)
+{
+	b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+	return b43_read16(dev, B43_MMIO_PHY_DATA);
+}
+
+static void b43_lpphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
+{
+	b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+	b43_write16(dev, B43_MMIO_PHY_DATA, value);
+}
+
+static u16 b43_lpphy_op_radio_read(struct b43_wldev *dev, u16 reg)
+{
+	/* Register 1 is a 32-bit register. */
+	B43_WARN_ON(reg == 1);
+	/* LP-PHY needs a special bit set for read access */
+	if (dev->phy.rev < 2) {
+		if (reg != 0x4001)
+			reg |= 0x100;
+	} else
+		reg |= 0x200;
+
+	b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
+	return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
+}
+
+static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
+{
+	/* Register 1 is a 32-bit register. */
+	B43_WARN_ON(reg == 1);
+
+	b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
+	b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
+}
+
+static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev,
+					 enum rfkill_state state)
+{
+	//TODO
+}
+
+static int b43_lpphy_op_switch_channel(struct b43_wldev *dev,
+				       unsigned int new_channel)
+{
+	//TODO
+	return 0;
+}
+
+static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev)
+{
+	return 1; /* Default to channel 1 */
+}
+
+static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
+{
+	//TODO
+}
+
+static void b43_lpphy_op_adjust_txpower(struct b43_wldev *dev)
+{
+	//TODO
+}
+
+static enum b43_txpwr_result b43_lpphy_op_recalc_txpower(struct b43_wldev *dev,
+							 bool ignore_tssi)
+{
+	//TODO
+	return B43_TXPWR_RES_DONE;
+}
+
+
+const struct b43_phy_operations b43_phyops_lp = {
+	.allocate		= b43_lpphy_op_allocate,
+	.free			= b43_lpphy_op_free,
+	.prepare_structs	= b43_lpphy_op_prepare_structs,
+	.init			= b43_lpphy_op_init,
+	.phy_read		= b43_lpphy_op_read,
+	.phy_write		= b43_lpphy_op_write,
+	.radio_read		= b43_lpphy_op_radio_read,
+	.radio_write		= b43_lpphy_op_radio_write,
+	.software_rfkill	= b43_lpphy_op_software_rfkill,
+	.switch_analog		= b43_phyop_switch_analog_generic,
+	.switch_channel		= b43_lpphy_op_switch_channel,
+	.get_default_chan	= b43_lpphy_op_get_default_chan,
+	.set_rx_antenna		= b43_lpphy_op_set_rx_antenna,
+	.recalc_txpower		= b43_lpphy_op_recalc_txpower,
+	.adjust_txpower		= b43_lpphy_op_adjust_txpower,
+};
diff --git a/drivers/net/wireless/b43/phy_lp.h b/drivers/net/wireless/b43/phy_lp.h
new file mode 100644
index 0000000..b0b5357
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_lp.h
@@ -0,0 +1,540 @@
+#ifndef LINUX_B43_PHY_LP_H_
+#define LINUX_B43_PHY_LP_H_
+
+/* Definitions for the LP-PHY */
+
+
+
+
+#define B43_LP_RADIO(radio_reg)			(radio_reg)
+#define B43_LP_NORTH(radio_reg)			B43_LP_RADIO(radio_reg)
+#define B43_LP_SOUTH(radio_reg)			B43_LP_RADIO((radio_reg) | 0x4000)
+
+
+/*** Broadcom 2062 NORTH radio registers ***/
+#define B2062_N_COMM1				B43_LP_NORTH(0x000) /* Common 01 (north) */
+#define B2062_N_COMM2				B43_LP_NORTH(0x002) /* Common 02 (north) */
+#define B2062_N_COMM3				B43_LP_NORTH(0x003) /* Common 03 (north) */
+#define B2062_N_COMM4				B43_LP_NORTH(0x004) /* Common 04 (north) */
+#define B2062_N_COMM5				B43_LP_NORTH(0x005) /* Common 05 (north) */
+#define B2062_N_COMM6				B43_LP_NORTH(0x006) /* Common 06 (north) */
+#define B2062_N_COMM7				B43_LP_NORTH(0x007) /* Common 07 (north) */
+#define B2062_N_COMM8				B43_LP_NORTH(0x008) /* Common 08 (north) */
+#define B2062_N_COMM9				B43_LP_NORTH(0x009) /* Common 09 (north) */
+#define B2062_N_COMM10				B43_LP_NORTH(0x00A) /* Common 10 (north) */
+#define B2062_N_COMM11				B43_LP_NORTH(0x00B) /* Common 11 (north) */
+#define B2062_N_COMM12				B43_LP_NORTH(0x00C) /* Common 12 (north) */
+#define B2062_N_COMM13				B43_LP_NORTH(0x00D) /* Common 13 (north) */
+#define B2062_N_COMM14				B43_LP_NORTH(0x00E) /* Common 14 (north) */
+#define B2062_N_COMM15				B43_LP_NORTH(0x00F) /* Common 15 (north) */
+#define B2062_N_PDN_CTL0			B43_LP_NORTH(0x010) /* PDN Control 0 (north) */
+#define B2062_N_PDN_CTL1			B43_LP_NORTH(0x011) /* PDN Control 1 (north) */
+#define B2062_N_PDN_CTL2			B43_LP_NORTH(0x012) /* PDN Control 2 (north) */
+#define B2062_N_PDN_CTL3			B43_LP_NORTH(0x013) /* PDN Control 3 (north) */
+#define B2062_N_PDN_CTL4			B43_LP_NORTH(0x014) /* PDN Control 4 (north) */
+#define B2062_N_GEN_CTL0			B43_LP_NORTH(0x015) /* GEN Control 0 (north) */
+#define B2062_N_IQ_CALIB			B43_LP_NORTH(0x016) /* IQ Calibration (north) */
+#define B2062_N_LGENC				B43_LP_NORTH(0x017) /* LGENC (north) */
+#define B2062_N_LGENA_LPF			B43_LP_NORTH(0x018) /* LGENA LPF (north) */
+#define B2062_N_LGENA_BIAS0			B43_LP_NORTH(0x019) /* LGENA Bias 0 (north) */
+#define B2062_N_LGNEA_BIAS1			B43_LP_NORTH(0x01A) /* LGNEA Bias 1 (north) */
+#define B2062_N_LGENA_CTL0			B43_LP_NORTH(0x01B) /* LGENA Control 0 (north) */
+#define B2062_N_LGENA_CTL1			B43_LP_NORTH(0x01C) /* LGENA Control 1 (north) */
+#define B2062_N_LGENA_CTL2			B43_LP_NORTH(0x01D) /* LGENA Control 2 (north) */
+#define B2062_N_LGENA_TUNE0			B43_LP_NORTH(0x01E) /* LGENA Tune 0 (north) */
+#define B2062_N_LGENA_TUNE1			B43_LP_NORTH(0x01F) /* LGENA Tune 1 (north) */
+#define B2062_N_LGENA_TUNE2			B43_LP_NORTH(0x020) /* LGENA Tune 2 (north) */
+#define B2062_N_LGENA_TUNE3			B43_LP_NORTH(0x021) /* LGENA Tune 3 (north) */
+#define B2062_N_LGENA_CTL3			B43_LP_NORTH(0x022) /* LGENA Control 3 (north) */
+#define B2062_N_LGENA_CTL4			B43_LP_NORTH(0x023) /* LGENA Control 4 (north) */
+#define B2062_N_LGENA_CTL5			B43_LP_NORTH(0x024) /* LGENA Control 5 (north) */
+#define B2062_N_LGENA_CTL6			B43_LP_NORTH(0x025) /* LGENA Control 6 (north) */
+#define B2062_N_LGENA_CTL7			B43_LP_NORTH(0x026) /* LGENA Control 7 (north) */
+#define B2062_N_RXA_CTL0			B43_LP_NORTH(0x027) /* RXA Control 0 (north) */
+#define B2062_N_RXA_CTL1			B43_LP_NORTH(0x028) /* RXA Control 1 (north) */
+#define B2062_N_RXA_CTL2			B43_LP_NORTH(0x029) /* RXA Control 2 (north) */
+#define B2062_N_RXA_CTL3			B43_LP_NORTH(0x02A) /* RXA Control 3 (north) */
+#define B2062_N_RXA_CTL4			B43_LP_NORTH(0x02B) /* RXA Control 4 (north) */
+#define B2062_N_RXA_CTL5			B43_LP_NORTH(0x02C) /* RXA Control 5 (north) */
+#define B2062_N_RXA_CTL6			B43_LP_NORTH(0x02D) /* RXA Control 6 (north) */
+#define B2062_N_RXA_CTL7			B43_LP_NORTH(0x02E) /* RXA Control 7 (north) */
+#define B2062_N_RXBB_CTL0			B43_LP_NORTH(0x02F) /* RXBB Control 0 (north) */
+#define B2062_N_RXBB_CTL1			B43_LP_NORTH(0x030) /* RXBB Control 1 (north) */
+#define B2062_N_RXBB_CTL2			B43_LP_NORTH(0x031) /* RXBB Control 2 (north) */
+#define B2062_N_RXBB_GAIN0			B43_LP_NORTH(0x032) /* RXBB Gain 0 (north) */
+#define B2062_N_RXBB_GAIN1			B43_LP_NORTH(0x033) /* RXBB Gain 1 (north) */
+#define B2062_N_RXBB_GAIN2			B43_LP_NORTH(0x034) /* RXBB Gain 2 (north) */
+#define B2062_N_RXBB_GAIN3			B43_LP_NORTH(0x035) /* RXBB Gain 3 (north) */
+#define B2062_N_RXBB_RSSI0			B43_LP_NORTH(0x036) /* RXBB RSSI 0 (north) */
+#define B2062_N_RXBB_RSSI1			B43_LP_NORTH(0x037) /* RXBB RSSI 1 (north) */
+#define B2062_N_RXBB_CALIB0			B43_LP_NORTH(0x038) /* RXBB Calibration0 (north) */
+#define B2062_N_RXBB_CALIB1			B43_LP_NORTH(0x039) /* RXBB Calibration1 (north) */
+#define B2062_N_RXBB_CALIB2			B43_LP_NORTH(0x03A) /* RXBB Calibration2 (north) */
+#define B2062_N_RXBB_BIAS0			B43_LP_NORTH(0x03B) /* RXBB Bias 0 (north) */
+#define B2062_N_RXBB_BIAS1			B43_LP_NORTH(0x03C) /* RXBB Bias 1 (north) */
+#define B2062_N_RXBB_BIAS2			B43_LP_NORTH(0x03D) /* RXBB Bias 2 (north) */
+#define B2062_N_RXBB_BIAS3			B43_LP_NORTH(0x03E) /* RXBB Bias 3 (north) */
+#define B2062_N_RXBB_BIAS4			B43_LP_NORTH(0x03F) /* RXBB Bias 4 (north) */
+#define B2062_N_RXBB_BIAS5			B43_LP_NORTH(0x040) /* RXBB Bias 5 (north) */
+#define B2062_N_RXBB_RSSI2			B43_LP_NORTH(0x041) /* RXBB RSSI 2 (north) */
+#define B2062_N_RXBB_RSSI3			B43_LP_NORTH(0x042) /* RXBB RSSI 3 (north) */
+#define B2062_N_RXBB_RSSI4			B43_LP_NORTH(0x043) /* RXBB RSSI 4 (north) */
+#define B2062_N_RXBB_RSSI5			B43_LP_NORTH(0x044) /* RXBB RSSI 5 (north) */
+#define B2062_N_TX_CTL0				B43_LP_NORTH(0x045) /* TX Control 0 (north) */
+#define B2062_N_TX_CTL1				B43_LP_NORTH(0x046) /* TX Control 1 (north) */
+#define B2062_N_TX_CTL2				B43_LP_NORTH(0x047) /* TX Control 2 (north) */
+#define B2062_N_TX_CTL3				B43_LP_NORTH(0x048) /* TX Control 3 (north) */
+#define B2062_N_TX_CTL4				B43_LP_NORTH(0x049) /* TX Control 4 (north) */
+#define B2062_N_TX_CTL5				B43_LP_NORTH(0x04A) /* TX Control 5 (north) */
+#define B2062_N_TX_CTL6				B43_LP_NORTH(0x04B) /* TX Control 6 (north) */
+#define B2062_N_TX_CTL7				B43_LP_NORTH(0x04C) /* TX Control 7 (north) */
+#define B2062_N_TX_CTL8				B43_LP_NORTH(0x04D) /* TX Control 8 (north) */
+#define B2062_N_TX_CTL9				B43_LP_NORTH(0x04E) /* TX Control 9 (north) */
+#define B2062_N_TX_CTL_A			B43_LP_NORTH(0x04F) /* TX Control A (north) */
+#define B2062_N_TX_GC2G				B43_LP_NORTH(0x050) /* TX GC2G (north) */
+#define B2062_N_TX_GC5G				B43_LP_NORTH(0x051) /* TX GC5G (north) */
+#define B2062_N_TX_TUNE				B43_LP_NORTH(0x052) /* TX Tune (north) */
+#define B2062_N_TX_PAD				B43_LP_NORTH(0x053) /* TX PAD (north) */
+#define B2062_N_TX_PGA				B43_LP_NORTH(0x054) /* TX PGA (north) */
+#define B2062_N_TX_PADAUX			B43_LP_NORTH(0x055) /* TX PADAUX (north) */
+#define B2062_N_TX_PGAAUX			B43_LP_NORTH(0x056) /* TX PGAAUX (north) */
+#define B2062_N_TSSI_CTL0			B43_LP_NORTH(0x057) /* TSSI Control 0 (north) */
+#define B2062_N_TSSI_CTL1			B43_LP_NORTH(0x058) /* TSSI Control 1 (north) */
+#define B2062_N_TSSI_CTL2			B43_LP_NORTH(0x059) /* TSSI Control 2 (north) */
+#define B2062_N_IQ_CALIB_CTL0			B43_LP_NORTH(0x05A) /* IQ Calibration Control 0 (north) */
+#define B2062_N_IQ_CALIB_CTL1			B43_LP_NORTH(0x05B) /* IQ Calibration Control 1 (north) */
+#define B2062_N_IQ_CALIB_CTL2			B43_LP_NORTH(0x05C) /* IQ Calibration Control 2 (north) */
+#define B2062_N_CALIB_TS			B43_LP_NORTH(0x05D) /* Calibration TS (north) */
+#define B2062_N_CALIB_CTL0			B43_LP_NORTH(0x05E) /* Calibration Control 0 (north) */
+#define B2062_N_CALIB_CTL1			B43_LP_NORTH(0x05F) /* Calibration Control 1 (north) */
+#define B2062_N_CALIB_CTL2			B43_LP_NORTH(0x060) /* Calibration Control 2 (north) */
+#define B2062_N_CALIB_CTL3			B43_LP_NORTH(0x061) /* Calibration Control 3 (north) */
+#define B2062_N_CALIB_CTL4			B43_LP_NORTH(0x062) /* Calibration Control 4 (north) */
+#define B2062_N_CALIB_DBG0			B43_LP_NORTH(0x063) /* Calibration Debug 0 (north) */
+#define B2062_N_CALIB_DBG1			B43_LP_NORTH(0x064) /* Calibration Debug 1 (north) */
+#define B2062_N_CALIB_DBG2			B43_LP_NORTH(0x065) /* Calibration Debug 2 (north) */
+#define B2062_N_CALIB_DBG3			B43_LP_NORTH(0x066) /* Calibration Debug 3 (north) */
+#define B2062_N_PSENSE_CTL0			B43_LP_NORTH(0x069) /* PSENSE Control 0 (north) */
+#define B2062_N_PSENSE_CTL1			B43_LP_NORTH(0x06A) /* PSENSE Control 1 (north) */
+#define B2062_N_PSENSE_CTL2			B43_LP_NORTH(0x06B) /* PSENSE Control 2 (north) */
+#define B2062_N_TEST_BUF0			B43_LP_NORTH(0x06C) /* TEST BUF0 (north) */
+
+/*** Broadcom 2062 SOUTH radio registers ***/
+#define B2062_S_COMM1				B43_LP_SOUTH(0x000) /* Common 01 (south) */
+#define B2062_S_RADIO_ID_CODE			B43_LP_SOUTH(0x001) /* Radio ID code (south) */
+#define B2062_S_COMM2				B43_LP_SOUTH(0x002) /* Common 02 (south) */
+#define B2062_S_COMM3				B43_LP_SOUTH(0x003) /* Common 03 (south) */
+#define B2062_S_COMM4				B43_LP_SOUTH(0x004) /* Common 04 (south) */
+#define B2062_S_COMM5				B43_LP_SOUTH(0x005) /* Common 05 (south) */
+#define B2062_S_COMM6				B43_LP_SOUTH(0x006) /* Common 06 (south) */
+#define B2062_S_COMM7				B43_LP_SOUTH(0x007) /* Common 07 (south) */
+#define B2062_S_COMM8				B43_LP_SOUTH(0x008) /* Common 08 (south) */
+#define B2062_S_COMM9				B43_LP_SOUTH(0x009) /* Common 09 (south) */
+#define B2062_S_COMM10				B43_LP_SOUTH(0x00A) /* Common 10 (south) */
+#define B2062_S_COMM11				B43_LP_SOUTH(0x00B) /* Common 11 (south) */
+#define B2062_S_COMM12				B43_LP_SOUTH(0x00C) /* Common 12 (south) */
+#define B2062_S_COMM13				B43_LP_SOUTH(0x00D) /* Common 13 (south) */
+#define B2062_S_COMM14				B43_LP_SOUTH(0x00E) /* Common 14 (south) */
+#define B2062_S_COMM15				B43_LP_SOUTH(0x00F) /* Common 15 (south) */
+#define B2062_S_PDS_CTL0			B43_LP_SOUTH(0x010) /* PDS Control 0 (south) */
+#define B2062_S_PDS_CTL1			B43_LP_SOUTH(0x011) /* PDS Control 1 (south) */
+#define B2062_S_PDS_CTL2			B43_LP_SOUTH(0x012) /* PDS Control 2 (south) */
+#define B2062_S_PDS_CTL3			B43_LP_SOUTH(0x013) /* PDS Control 3 (south) */
+#define B2062_S_BG_CTL0				B43_LP_SOUTH(0x014) /* BG Control 0 (south) */
+#define B2062_S_BG_CTL1				B43_LP_SOUTH(0x015) /* BG Control 1 (south) */
+#define B2062_S_BG_CTL2				B43_LP_SOUTH(0x016) /* BG Control 2 (south) */
+#define B2062_S_LGENG_CTL0			B43_LP_SOUTH(0x017) /* LGENG Control 00 (south) */
+#define B2062_S_LGENG_CTL1			B43_LP_SOUTH(0x018) /* LGENG Control 01 (south) */
+#define B2062_S_LGENG_CTL2			B43_LP_SOUTH(0x019) /* LGENG Control 02 (south) */
+#define B2062_S_LGENG_CTL3			B43_LP_SOUTH(0x01A) /* LGENG Control 03 (south) */
+#define B2062_S_LGENG_CTL4			B43_LP_SOUTH(0x01B) /* LGENG Control 04 (south) */
+#define B2062_S_LGENG_CTL5			B43_LP_SOUTH(0x01C) /* LGENG Control 05 (south) */
+#define B2062_S_LGENG_CTL6			B43_LP_SOUTH(0x01D) /* LGENG Control 06 (south) */
+#define B2062_S_LGENG_CTL7			B43_LP_SOUTH(0x01E) /* LGENG Control 07 (south) */
+#define B2062_S_LGENG_CTL8			B43_LP_SOUTH(0x01F) /* LGENG Control 08 (south) */
+#define B2062_S_LGENG_CTL9			B43_LP_SOUTH(0x020) /* LGENG Control 09 (south) */
+#define B2062_S_LGENG_CTL10			B43_LP_SOUTH(0x021) /* LGENG Control 10 (south) */
+#define B2062_S_LGENG_CTL11			B43_LP_SOUTH(0x022) /* LGENG Control 11 (south) */
+#define B2062_S_REFPLL_CTL0			B43_LP_SOUTH(0x023) /* REFPLL Control 00 (south) */
+#define B2062_S_REFPLL_CTL1			B43_LP_SOUTH(0x024) /* REFPLL Control 01 (south) */
+#define B2062_S_REFPLL_CTL2			B43_LP_SOUTH(0x025) /* REFPLL Control 02 (south) */
+#define B2062_S_REFPLL_CTL3			B43_LP_SOUTH(0x026) /* REFPLL Control 03 (south) */
+#define B2062_S_REFPLL_CTL4			B43_LP_SOUTH(0x027) /* REFPLL Control 04 (south) */
+#define B2062_S_REFPLL_CTL5			B43_LP_SOUTH(0x028) /* REFPLL Control 05 (south) */
+#define B2062_S_REFPLL_CTL6			B43_LP_SOUTH(0x029) /* REFPLL Control 06 (south) */
+#define B2062_S_REFPLL_CTL7			B43_LP_SOUTH(0x02A) /* REFPLL Control 07 (south) */
+#define B2062_S_REFPLL_CTL8			B43_LP_SOUTH(0x02B) /* REFPLL Control 08 (south) */
+#define B2062_S_REFPLL_CTL9			B43_LP_SOUTH(0x02C) /* REFPLL Control 09 (south) */
+#define B2062_S_REFPLL_CTL10			B43_LP_SOUTH(0x02D) /* REFPLL Control 10 (south) */
+#define B2062_S_REFPLL_CTL11			B43_LP_SOUTH(0x02E) /* REFPLL Control 11 (south) */
+#define B2062_S_REFPLL_CTL12			B43_LP_SOUTH(0x02F) /* REFPLL Control 12 (south) */
+#define B2062_S_REFPLL_CTL13			B43_LP_SOUTH(0x030) /* REFPLL Control 13 (south) */
+#define B2062_S_REFPLL_CTL14			B43_LP_SOUTH(0x031) /* REFPLL Control 14 (south) */
+#define B2062_S_REFPLL_CTL15			B43_LP_SOUTH(0x032) /* REFPLL Control 15 (south) */
+#define B2062_S_REFPLL_CTL16			B43_LP_SOUTH(0x033) /* REFPLL Control 16 (south) */
+#define B2062_S_RFPLL_CTL0			B43_LP_SOUTH(0x034) /* RFPLL Control 00 (south) */
+#define B2062_S_RFPLL_CTL1			B43_LP_SOUTH(0x035) /* RFPLL Control 01 (south) */
+#define B2062_S_RFPLL_CTL2			B43_LP_SOUTH(0x036) /* RFPLL Control 02 (south) */
+#define B2062_S_RFPLL_CTL3			B43_LP_SOUTH(0x037) /* RFPLL Control 03 (south) */
+#define B2062_S_RFPLL_CTL4			B43_LP_SOUTH(0x038) /* RFPLL Control 04 (south) */
+#define B2062_S_RFPLL_CTL5			B43_LP_SOUTH(0x039) /* RFPLL Control 05 (south) */
+#define B2062_S_RFPLL_CTL6			B43_LP_SOUTH(0x03A) /* RFPLL Control 06 (south) */
+#define B2062_S_RFPLL_CTL7			B43_LP_SOUTH(0x03B) /* RFPLL Control 07 (south) */
+#define B2062_S_RFPLL_CTL8			B43_LP_SOUTH(0x03C) /* RFPLL Control 08 (south) */
+#define B2062_S_RFPLL_CTL9			B43_LP_SOUTH(0x03D) /* RFPLL Control 09 (south) */
+#define B2062_S_RFPLL_CTL10			B43_LP_SOUTH(0x03E) /* RFPLL Control 10 (south) */
+#define B2062_S_RFPLL_CTL11			B43_LP_SOUTH(0x03F) /* RFPLL Control 11 (south) */
+#define B2062_S_RFPLL_CTL12			B43_LP_SOUTH(0x040) /* RFPLL Control 12 (south) */
+#define B2062_S_RFPLL_CTL13			B43_LP_SOUTH(0x041) /* RFPLL Control 13 (south) */
+#define B2062_S_RFPLL_CTL14			B43_LP_SOUTH(0x042) /* RFPLL Control 14 (south) */
+#define B2062_S_RFPLL_CTL15			B43_LP_SOUTH(0x043) /* RFPLL Control 15 (south) */
+#define B2062_S_RFPLL_CTL16			B43_LP_SOUTH(0x044) /* RFPLL Control 16 (south) */
+#define B2062_S_RFPLL_CTL17			B43_LP_SOUTH(0x045) /* RFPLL Control 17 (south) */
+#define B2062_S_RFPLL_CTL18			B43_LP_SOUTH(0x046) /* RFPLL Control 18 (south) */
+#define B2062_S_RFPLL_CTL19			B43_LP_SOUTH(0x047) /* RFPLL Control 19 (south) */
+#define B2062_S_RFPLL_CTL20			B43_LP_SOUTH(0x048) /* RFPLL Control 20 (south) */
+#define B2062_S_RFPLL_CTL21			B43_LP_SOUTH(0x049) /* RFPLL Control 21 (south) */
+#define B2062_S_RFPLL_CTL22			B43_LP_SOUTH(0x04A) /* RFPLL Control 22 (south) */
+#define B2062_S_RFPLL_CTL23			B43_LP_SOUTH(0x04B) /* RFPLL Control 23 (south) */
+#define B2062_S_RFPLL_CTL24			B43_LP_SOUTH(0x04C) /* RFPLL Control 24 (south) */
+#define B2062_S_RFPLL_CTL25			B43_LP_SOUTH(0x04D) /* RFPLL Control 25 (south) */
+#define B2062_S_RFPLL_CTL26			B43_LP_SOUTH(0x04E) /* RFPLL Control 26 (south) */
+#define B2062_S_RFPLL_CTL27			B43_LP_SOUTH(0x04F) /* RFPLL Control 27 (south) */
+#define B2062_S_RFPLL_CTL28			B43_LP_SOUTH(0x050) /* RFPLL Control 28 (south) */
+#define B2062_S_RFPLL_CTL29			B43_LP_SOUTH(0x051) /* RFPLL Control 29 (south) */
+#define B2062_S_RFPLL_CTL30			B43_LP_SOUTH(0x052) /* RFPLL Control 30 (south) */
+#define B2062_S_RFPLL_CTL31			B43_LP_SOUTH(0x053) /* RFPLL Control 31 (south) */
+#define B2062_S_RFPLL_CTL32			B43_LP_SOUTH(0x054) /* RFPLL Control 32 (south) */
+#define B2062_S_RFPLL_CTL33			B43_LP_SOUTH(0x055) /* RFPLL Control 33 (south) */
+#define B2062_S_RFPLL_CTL34			B43_LP_SOUTH(0x056) /* RFPLL Control 34 (south) */
+#define B2062_S_RXG_CNT0			B43_LP_SOUTH(0x057) /* RXG Counter 00 (south) */
+#define B2062_S_RXG_CNT1			B43_LP_SOUTH(0x058) /* RXG Counter 01 (south) */
+#define B2062_S_RXG_CNT2			B43_LP_SOUTH(0x059) /* RXG Counter 02 (south) */
+#define B2062_S_RXG_CNT3			B43_LP_SOUTH(0x05A) /* RXG Counter 03 (south) */
+#define B2062_S_RXG_CNT4			B43_LP_SOUTH(0x05B) /* RXG Counter 04 (south) */
+#define B2062_S_RXG_CNT5			B43_LP_SOUTH(0x05C) /* RXG Counter 05 (south) */
+#define B2062_S_RXG_CNT6			B43_LP_SOUTH(0x05D) /* RXG Counter 06 (south) */
+#define B2062_S_RXG_CNT7			B43_LP_SOUTH(0x05E) /* RXG Counter 07 (south) */
+#define B2062_S_RXG_CNT8			B43_LP_SOUTH(0x05F) /* RXG Counter 08 (south) */
+#define B2062_S_RXG_CNT9			B43_LP_SOUTH(0x060) /* RXG Counter 09 (south) */
+#define B2062_S_RXG_CNT10			B43_LP_SOUTH(0x061) /* RXG Counter 10 (south) */
+#define B2062_S_RXG_CNT11			B43_LP_SOUTH(0x062) /* RXG Counter 11 (south) */
+#define B2062_S_RXG_CNT12			B43_LP_SOUTH(0x063) /* RXG Counter 12 (south) */
+#define B2062_S_RXG_CNT13			B43_LP_SOUTH(0x064) /* RXG Counter 13 (south) */
+#define B2062_S_RXG_CNT14			B43_LP_SOUTH(0x065) /* RXG Counter 14 (south) */
+#define B2062_S_RXG_CNT15			B43_LP_SOUTH(0x066) /* RXG Counter 15 (south) */
+#define B2062_S_RXG_CNT16			B43_LP_SOUTH(0x067) /* RXG Counter 16 (south) */
+#define B2062_S_RXG_CNT17			B43_LP_SOUTH(0x068) /* RXG Counter 17 (south) */
+
+
+
+/*** Broadcom 2063 radio registers ***/
+#define B2063_RADIO_ID_CODE			B43_LP_RADIO(0x001) /* Radio ID code */
+#define B2063_COMM1				B43_LP_RADIO(0x000) /* Common 01 */
+#define B2063_COMM2				B43_LP_RADIO(0x002) /* Common 02 */
+#define B2063_COMM3				B43_LP_RADIO(0x003) /* Common 03 */
+#define B2063_COMM4				B43_LP_RADIO(0x004) /* Common 04 */
+#define B2063_COMM5				B43_LP_RADIO(0x005) /* Common 05 */
+#define B2063_COMM6				B43_LP_RADIO(0x006) /* Common 06 */
+#define B2063_COMM7				B43_LP_RADIO(0x007) /* Common 07 */
+#define B2063_COMM8				B43_LP_RADIO(0x008) /* Common 08 */
+#define B2063_COMM9				B43_LP_RADIO(0x009) /* Common 09 */
+#define B2063_COMM10				B43_LP_RADIO(0x00A) /* Common 10 */
+#define B2063_COMM11				B43_LP_RADIO(0x00B) /* Common 11 */
+#define B2063_COMM12				B43_LP_RADIO(0x00C) /* Common 12 */
+#define B2063_COMM13				B43_LP_RADIO(0x00D) /* Common 13 */
+#define B2063_COMM14				B43_LP_RADIO(0x00E) /* Common 14 */
+#define B2063_COMM15				B43_LP_RADIO(0x00F) /* Common 15 */
+#define B2063_COMM16				B43_LP_RADIO(0x010) /* Common 16 */
+#define B2063_COMM17				B43_LP_RADIO(0x011) /* Common 17 */
+#define B2063_COMM18				B43_LP_RADIO(0x012) /* Common 18 */
+#define B2063_COMM19				B43_LP_RADIO(0x013) /* Common 19 */
+#define B2063_COMM20				B43_LP_RADIO(0x014) /* Common 20 */
+#define B2063_COMM21				B43_LP_RADIO(0x015) /* Common 21 */
+#define B2063_COMM22				B43_LP_RADIO(0x016) /* Common 22 */
+#define B2063_COMM23				B43_LP_RADIO(0x017) /* Common 23 */
+#define B2063_COMM24				B43_LP_RADIO(0x018) /* Common 24 */
+#define B2063_PWR_SWITCH_CTL			B43_LP_RADIO(0x019) /* POWER SWITCH Control */
+#define B2063_PLL_SP1				B43_LP_RADIO(0x01A) /* PLL SP 1 */
+#define B2063_PLL_SP2				B43_LP_RADIO(0x01B) /* PLL SP 2 */
+#define B2063_LOGEN_SP1				B43_LP_RADIO(0x01C) /* LOGEN SP 1 */
+#define B2063_LOGEN_SP2				B43_LP_RADIO(0x01D) /* LOGEN SP 2 */
+#define B2063_LOGEN_SP3				B43_LP_RADIO(0x01E) /* LOGEN SP 3 */
+#define B2063_LOGEN_SP4				B43_LP_RADIO(0x01F) /* LOGEN SP 4 */
+#define B2063_LOGEN_SP5				B43_LP_RADIO(0x020) /* LOGEN SP 5 */
+#define B2063_G_RX_SP1				B43_LP_RADIO(0x021) /* G RX SP 1 */
+#define B2063_G_RX_SP2				B43_LP_RADIO(0x022) /* G RX SP 2 */
+#define B2063_G_RX_SP3				B43_LP_RADIO(0x023) /* G RX SP 3 */
+#define B2063_G_RX_SP4				B43_LP_RADIO(0x024) /* G RX SP 4 */
+#define B2063_G_RX_SP5				B43_LP_RADIO(0x025) /* G RX SP 5 */
+#define B2063_G_RX_SP6				B43_LP_RADIO(0x026) /* G RX SP 6 */
+#define B2063_G_RX_SP7				B43_LP_RADIO(0x027) /* G RX SP 7 */
+#define B2063_G_RX_SP8				B43_LP_RADIO(0x028) /* G RX SP 8 */
+#define B2063_G_RX_SP9				B43_LP_RADIO(0x029) /* G RX SP 9 */
+#define B2063_G_RX_SP10				B43_LP_RADIO(0x02A) /* G RX SP 10 */
+#define B2063_G_RX_SP11				B43_LP_RADIO(0x02B) /* G RX SP 11 */
+#define B2063_A_RX_SP1				B43_LP_RADIO(0x02C) /* A RX SP 1 */
+#define B2063_A_RX_SP2				B43_LP_RADIO(0x02D) /* A RX SP 2 */
+#define B2063_A_RX_SP3				B43_LP_RADIO(0x02E) /* A RX SP 3 */
+#define B2063_A_RX_SP4				B43_LP_RADIO(0x02F) /* A RX SP 4 */
+#define B2063_A_RX_SP5				B43_LP_RADIO(0x030) /* A RX SP 5 */
+#define B2063_A_RX_SP6				B43_LP_RADIO(0x031) /* A RX SP 6 */
+#define B2063_A_RX_SP7				B43_LP_RADIO(0x032) /* A RX SP 7 */
+#define B2063_RX_BB_SP1				B43_LP_RADIO(0x033) /* RX BB SP 1 */
+#define B2063_RX_BB_SP2				B43_LP_RADIO(0x034) /* RX BB SP 2 */
+#define B2063_RX_BB_SP3				B43_LP_RADIO(0x035) /* RX BB SP 3 */
+#define B2063_RX_BB_SP4				B43_LP_RADIO(0x036) /* RX BB SP 4 */
+#define B2063_RX_BB_SP5				B43_LP_RADIO(0x037) /* RX BB SP 5 */
+#define B2063_RX_BB_SP6				B43_LP_RADIO(0x038) /* RX BB SP 6 */
+#define B2063_RX_BB_SP7				B43_LP_RADIO(0x039) /* RX BB SP 7 */
+#define B2063_RX_BB_SP8				B43_LP_RADIO(0x03A) /* RX BB SP 8 */
+#define B2063_TX_RF_SP1				B43_LP_RADIO(0x03B) /* TX RF SP 1 */
+#define B2063_TX_RF_SP2				B43_LP_RADIO(0x03C) /* TX RF SP 2 */
+#define B2063_TX_RF_SP3				B43_LP_RADIO(0x03D) /* TX RF SP 3 */
+#define B2063_TX_RF_SP4				B43_LP_RADIO(0x03E) /* TX RF SP 4 */
+#define B2063_TX_RF_SP5				B43_LP_RADIO(0x03F) /* TX RF SP 5 */
+#define B2063_TX_RF_SP6				B43_LP_RADIO(0x040) /* TX RF SP 6 */
+#define B2063_TX_RF_SP7				B43_LP_RADIO(0x041) /* TX RF SP 7 */
+#define B2063_TX_RF_SP8				B43_LP_RADIO(0x042) /* TX RF SP 8 */
+#define B2063_TX_RF_SP9				B43_LP_RADIO(0x043) /* TX RF SP 9 */
+#define B2063_TX_RF_SP10			B43_LP_RADIO(0x044) /* TX RF SP 10 */
+#define B2063_TX_RF_SP11			B43_LP_RADIO(0x045) /* TX RF SP 11 */
+#define B2063_TX_RF_SP12			B43_LP_RADIO(0x046) /* TX RF SP 12 */
+#define B2063_TX_RF_SP13			B43_LP_RADIO(0x047) /* TX RF SP 13 */
+#define B2063_TX_RF_SP14			B43_LP_RADIO(0x048) /* TX RF SP 14 */
+#define B2063_TX_RF_SP15			B43_LP_RADIO(0x049) /* TX RF SP 15 */
+#define B2063_TX_RF_SP16			B43_LP_RADIO(0x04A) /* TX RF SP 16 */
+#define B2063_TX_RF_SP17			B43_LP_RADIO(0x04B) /* TX RF SP 17 */
+#define B2063_PA_SP1				B43_LP_RADIO(0x04C) /* PA SP 1 */
+#define B2063_PA_SP2				B43_LP_RADIO(0x04D) /* PA SP 2 */
+#define B2063_PA_SP3				B43_LP_RADIO(0x04E) /* PA SP 3 */
+#define B2063_PA_SP4				B43_LP_RADIO(0x04F) /* PA SP 4 */
+#define B2063_PA_SP5				B43_LP_RADIO(0x050) /* PA SP 5 */
+#define B2063_PA_SP6				B43_LP_RADIO(0x051) /* PA SP 6 */
+#define B2063_PA_SP7				B43_LP_RADIO(0x052) /* PA SP 7 */
+#define B2063_TX_BB_SP1				B43_LP_RADIO(0x053) /* TX BB SP 1 */
+#define B2063_TX_BB_SP2				B43_LP_RADIO(0x054) /* TX BB SP 2 */
+#define B2063_TX_BB_SP3				B43_LP_RADIO(0x055) /* TX BB SP 3 */
+#define B2063_REG_SP1				B43_LP_RADIO(0x056) /* REG SP 1 */
+#define B2063_BANDGAP_CTL1			B43_LP_RADIO(0x057) /* BANDGAP Control 1 */
+#define B2063_BANDGAP_CTL2			B43_LP_RADIO(0x058) /* BANDGAP Control 2 */
+#define B2063_LPO_CTL1				B43_LP_RADIO(0x059) /* LPO Control 1 */
+#define B2063_RC_CALIB_CTL1			B43_LP_RADIO(0x05A) /* RC Calibration Control 1 */
+#define B2063_RC_CALIB_CTL2			B43_LP_RADIO(0x05B) /* RC Calibration Control 2 */
+#define B2063_RC_CALIB_CTL3			B43_LP_RADIO(0x05C) /* RC Calibration Control 3 */
+#define B2063_RC_CALIB_CTL4			B43_LP_RADIO(0x05D) /* RC Calibration Control 4 */
+#define B2063_RC_CALIB_CTL5			B43_LP_RADIO(0x05E) /* RC Calibration Control 5 */
+#define B2063_RC_CALIB_CTL6			B43_LP_RADIO(0x05F) /* RC Calibration Control 6 */
+#define B2063_RC_CALIB_CTL7			B43_LP_RADIO(0x060) /* RC Calibration Control 7 */
+#define B2063_RC_CALIB_CTL8			B43_LP_RADIO(0x061) /* RC Calibration Control 8 */
+#define B2063_RC_CALIB_CTL9			B43_LP_RADIO(0x062) /* RC Calibration Control 9 */
+#define B2063_RC_CALIB_CTL10			B43_LP_RADIO(0x063) /* RC Calibration Control 10 */
+#define B2063_PLL_JTAG_CALNRST			B43_LP_RADIO(0x064) /* PLL JTAG CALNRST */
+#define B2063_PLL_JTAG_IN_PLL1			B43_LP_RADIO(0x065) /* PLL JTAG IN PLL 1 */
+#define B2063_PLL_JTAG_IN_PLL2			B43_LP_RADIO(0x066) /* PLL JTAG IN PLL 2 */
+#define B2063_PLL_JTAG_PLL_CP1			B43_LP_RADIO(0x067) /* PLL JTAG PLL CP 1 */
+#define B2063_PLL_JTAG_PLL_CP2			B43_LP_RADIO(0x068) /* PLL JTAG PLL CP 2 */
+#define B2063_PLL_JTAG_PLL_CP3			B43_LP_RADIO(0x069) /* PLL JTAG PLL CP 3 */
+#define B2063_PLL_JTAG_PLL_CP4			B43_LP_RADIO(0x06A) /* PLL JTAG PLL CP 4 */
+#define B2063_PLL_JTAG_PLL_CTL1			B43_LP_RADIO(0x06B) /* PLL JTAG PLL Control 1 */
+#define B2063_PLL_JTAG_PLL_LF1			B43_LP_RADIO(0x06C) /* PLL JTAG PLL LF 1 */
+#define B2063_PLL_JTAG_PLL_LF2			B43_LP_RADIO(0x06D) /* PLL JTAG PLL LF 2 */
+#define B2063_PLL_JTAG_PLL_LF3			B43_LP_RADIO(0x06E) /* PLL JTAG PLL LF 3 */
+#define B2063_PLL_JTAG_PLL_LF4			B43_LP_RADIO(0x06F) /* PLL JTAG PLL LF 4 */
+#define B2063_PLL_JTAG_PLL_SG1			B43_LP_RADIO(0x070) /* PLL JTAG PLL SG 1 */
+#define B2063_PLL_JTAG_PLL_SG2			B43_LP_RADIO(0x071) /* PLL JTAG PLL SG 2 */
+#define B2063_PLL_JTAG_PLL_SG3			B43_LP_RADIO(0x072) /* PLL JTAG PLL SG 3 */
+#define B2063_PLL_JTAG_PLL_SG4			B43_LP_RADIO(0x073) /* PLL JTAG PLL SG 4 */
+#define B2063_PLL_JTAG_PLL_SG5			B43_LP_RADIO(0x074) /* PLL JTAG PLL SG 5 */
+#define B2063_PLL_JTAG_PLL_VCO1			B43_LP_RADIO(0x075) /* PLL JTAG PLL VCO 1 */
+#define B2063_PLL_JTAG_PLL_VCO2			B43_LP_RADIO(0x076) /* PLL JTAG PLL VCO 2 */
+#define B2063_PLL_JTAG_PLL_VCO_CALIB1		B43_LP_RADIO(0x077) /* PLL JTAG PLL VCO Calibration 1 */
+#define B2063_PLL_JTAG_PLL_VCO_CALIB2		B43_LP_RADIO(0x078) /* PLL JTAG PLL VCO Calibration 2 */
+#define B2063_PLL_JTAG_PLL_VCO_CALIB3		B43_LP_RADIO(0x079) /* PLL JTAG PLL VCO Calibration 3 */
+#define B2063_PLL_JTAG_PLL_VCO_CALIB4		B43_LP_RADIO(0x07A) /* PLL JTAG PLL VCO Calibration 4 */
+#define B2063_PLL_JTAG_PLL_VCO_CALIB5		B43_LP_RADIO(0x07B) /* PLL JTAG PLL VCO Calibration 5 */
+#define B2063_PLL_JTAG_PLL_VCO_CALIB6		B43_LP_RADIO(0x07C) /* PLL JTAG PLL VCO Calibration 6 */
+#define B2063_PLL_JTAG_PLL_VCO_CALIB7		B43_LP_RADIO(0x07D) /* PLL JTAG PLL VCO Calibration 7 */
+#define B2063_PLL_JTAG_PLL_VCO_CALIB8		B43_LP_RADIO(0x07E) /* PLL JTAG PLL VCO Calibration 8 */
+#define B2063_PLL_JTAG_PLL_VCO_CALIB9		B43_LP_RADIO(0x07F) /* PLL JTAG PLL VCO Calibration 9 */
+#define B2063_PLL_JTAG_PLL_VCO_CALIB10		B43_LP_RADIO(0x080) /* PLL JTAG PLL VCO Calibration 10 */
+#define B2063_PLL_JTAG_PLL_XTAL_12		B43_LP_RADIO(0x081) /* PLL JTAG PLL XTAL 1 2 */
+#define B2063_PLL_JTAG_PLL_XTAL3		B43_LP_RADIO(0x082) /* PLL JTAG PLL XTAL 3 */
+#define B2063_LOGEN_ACL1			B43_LP_RADIO(0x083) /* LOGEN ACL 1 */
+#define B2063_LOGEN_ACL2			B43_LP_RADIO(0x084) /* LOGEN ACL 2 */
+#define B2063_LOGEN_ACL3			B43_LP_RADIO(0x085) /* LOGEN ACL 3 */
+#define B2063_LOGEN_ACL4			B43_LP_RADIO(0x086) /* LOGEN ACL 4 */
+#define B2063_LOGEN_ACL5			B43_LP_RADIO(0x087) /* LOGEN ACL 5 */
+#define B2063_LO_CALIB_INPUTS			B43_LP_RADIO(0x088) /* LO Calibration INPUTS */
+#define B2063_LO_CALIB_CTL1			B43_LP_RADIO(0x089) /* LO Calibration Control 1 */
+#define B2063_LO_CALIB_CTL2			B43_LP_RADIO(0x08A) /* LO Calibration Control 2 */
+#define B2063_LO_CALIB_CTL3			B43_LP_RADIO(0x08B) /* LO Calibration Control 3 */
+#define B2063_LO_CALIB_WAITCNT			B43_LP_RADIO(0x08C) /* LO Calibration WAITCNT */
+#define B2063_LO_CALIB_OVR1			B43_LP_RADIO(0x08D) /* LO Calibration OVR 1 */
+#define B2063_LO_CALIB_OVR2			B43_LP_RADIO(0x08E) /* LO Calibration OVR 2 */
+#define B2063_LO_CALIB_OVAL1			B43_LP_RADIO(0x08F) /* LO Calibration OVAL 1 */
+#define B2063_LO_CALIB_OVAL2			B43_LP_RADIO(0x090) /* LO Calibration OVAL 2 */
+#define B2063_LO_CALIB_OVAL3			B43_LP_RADIO(0x091) /* LO Calibration OVAL 3 */
+#define B2063_LO_CALIB_OVAL4			B43_LP_RADIO(0x092) /* LO Calibration OVAL 4 */
+#define B2063_LO_CALIB_OVAL5			B43_LP_RADIO(0x093) /* LO Calibration OVAL 5 */
+#define B2063_LO_CALIB_OVAL6			B43_LP_RADIO(0x094) /* LO Calibration OVAL 6 */
+#define B2063_LO_CALIB_OVAL7			B43_LP_RADIO(0x095) /* LO Calibration OVAL 7 */
+#define B2063_LO_CALIB_CALVLD1			B43_LP_RADIO(0x096) /* LO Calibration CALVLD 1 */
+#define B2063_LO_CALIB_CALVLD2			B43_LP_RADIO(0x097) /* LO Calibration CALVLD 2 */
+#define B2063_LO_CALIB_CVAL1			B43_LP_RADIO(0x098) /* LO Calibration CVAL 1 */
+#define B2063_LO_CALIB_CVAL2			B43_LP_RADIO(0x099) /* LO Calibration CVAL 2 */
+#define B2063_LO_CALIB_CVAL3			B43_LP_RADIO(0x09A) /* LO Calibration CVAL 3 */
+#define B2063_LO_CALIB_CVAL4			B43_LP_RADIO(0x09B) /* LO Calibration CVAL 4 */
+#define B2063_LO_CALIB_CVAL5			B43_LP_RADIO(0x09C) /* LO Calibration CVAL 5 */
+#define B2063_LO_CALIB_CVAL6			B43_LP_RADIO(0x09D) /* LO Calibration CVAL 6 */
+#define B2063_LO_CALIB_CVAL7			B43_LP_RADIO(0x09E) /* LO Calibration CVAL 7 */
+#define B2063_LOGEN_CALIB_EN			B43_LP_RADIO(0x09F) /* LOGEN Calibration EN */
+#define B2063_LOGEN_PEAKDET1			B43_LP_RADIO(0x0A0) /* LOGEN PEAKDET 1 */
+#define B2063_LOGEN_RCCR1			B43_LP_RADIO(0x0A1) /* LOGEN RCCR 1 */
+#define B2063_LOGEN_VCOBUF1			B43_LP_RADIO(0x0A2) /* LOGEN VCOBUF 1 */
+#define B2063_LOGEN_MIXER1			B43_LP_RADIO(0x0A3) /* LOGEN MIXER 1 */
+#define B2063_LOGEN_MIXER2			B43_LP_RADIO(0x0A4) /* LOGEN MIXER 2 */
+#define B2063_LOGEN_BUF1			B43_LP_RADIO(0x0A5) /* LOGEN BUF 1 */
+#define B2063_LOGEN_BUF2			B43_LP_RADIO(0x0A6) /* LOGEN BUF 2 */
+#define B2063_LOGEN_DIV1			B43_LP_RADIO(0x0A7) /* LOGEN DIV 1 */
+#define B2063_LOGEN_DIV2			B43_LP_RADIO(0x0A8) /* LOGEN DIV 2 */
+#define B2063_LOGEN_DIV3			B43_LP_RADIO(0x0A9) /* LOGEN DIV 3 */
+#define B2063_LOGEN_CBUFRX1			B43_LP_RADIO(0x0AA) /* LOGEN CBUFRX 1 */
+#define B2063_LOGEN_CBUFRX2			B43_LP_RADIO(0x0AB) /* LOGEN CBUFRX 2 */
+#define B2063_LOGEN_CBUFTX1			B43_LP_RADIO(0x0AC) /* LOGEN CBUFTX 1 */
+#define B2063_LOGEN_CBUFTX2			B43_LP_RADIO(0x0AD) /* LOGEN CBUFTX 2 */
+#define B2063_LOGEN_IDAC1			B43_LP_RADIO(0x0AE) /* LOGEN IDAC 1 */
+#define B2063_LOGEN_SPARE1			B43_LP_RADIO(0x0AF) /* LOGEN SPARE 1 */
+#define B2063_LOGEN_SPARE2			B43_LP_RADIO(0x0B0) /* LOGEN SPARE 2 */
+#define B2063_LOGEN_SPARE3			B43_LP_RADIO(0x0B1) /* LOGEN SPARE 3 */
+#define B2063_G_RX_1ST1				B43_LP_RADIO(0x0B2) /* G RX 1ST 1 */
+#define B2063_G_RX_1ST2				B43_LP_RADIO(0x0B3) /* G RX 1ST 2 */
+#define B2063_G_RX_1ST3				B43_LP_RADIO(0x0B4) /* G RX 1ST 3 */
+#define B2063_G_RX_2ND1				B43_LP_RADIO(0x0B5) /* G RX 2ND 1 */
+#define B2063_G_RX_2ND2				B43_LP_RADIO(0x0B6) /* G RX 2ND 2 */
+#define B2063_G_RX_2ND3				B43_LP_RADIO(0x0B7) /* G RX 2ND 3 */
+#define B2063_G_RX_2ND4				B43_LP_RADIO(0x0B8) /* G RX 2ND 4 */
+#define B2063_G_RX_2ND5				B43_LP_RADIO(0x0B9) /* G RX 2ND 5 */
+#define B2063_G_RX_2ND6				B43_LP_RADIO(0x0BA) /* G RX 2ND 6 */
+#define B2063_G_RX_2ND7				B43_LP_RADIO(0x0BB) /* G RX 2ND 7 */
+#define B2063_G_RX_2ND8				B43_LP_RADIO(0x0BC) /* G RX 2ND 8 */
+#define B2063_G_RX_PS1				B43_LP_RADIO(0x0BD) /* G RX PS 1 */
+#define B2063_G_RX_PS2				B43_LP_RADIO(0x0BE) /* G RX PS 2 */
+#define B2063_G_RX_PS3				B43_LP_RADIO(0x0BF) /* G RX PS 3 */
+#define B2063_G_RX_PS4				B43_LP_RADIO(0x0C0) /* G RX PS 4 */
+#define B2063_G_RX_PS5				B43_LP_RADIO(0x0C1) /* G RX PS 5 */
+#define B2063_G_RX_MIX1				B43_LP_RADIO(0x0C2) /* G RX MIX 1 */
+#define B2063_G_RX_MIX2				B43_LP_RADIO(0x0C3) /* G RX MIX 2 */
+#define B2063_G_RX_MIX3				B43_LP_RADIO(0x0C4) /* G RX MIX 3 */
+#define B2063_G_RX_MIX4				B43_LP_RADIO(0x0C5) /* G RX MIX 4 */
+#define B2063_G_RX_MIX5				B43_LP_RADIO(0x0C6) /* G RX MIX 5 */
+#define B2063_G_RX_MIX6				B43_LP_RADIO(0x0C7) /* G RX MIX 6 */
+#define B2063_G_RX_MIX7				B43_LP_RADIO(0x0C8) /* G RX MIX 7 */
+#define B2063_G_RX_MIX8				B43_LP_RADIO(0x0C9) /* G RX MIX 8 */
+#define B2063_G_RX_PDET1			B43_LP_RADIO(0x0CA) /* G RX PDET 1 */
+#define B2063_G_RX_SPARES1			B43_LP_RADIO(0x0CB) /* G RX SPARES 1 */
+#define B2063_G_RX_SPARES2			B43_LP_RADIO(0x0CC) /* G RX SPARES 2 */
+#define B2063_G_RX_SPARES3			B43_LP_RADIO(0x0CD) /* G RX SPARES 3 */
+#define B2063_A_RX_1ST1				B43_LP_RADIO(0x0CE) /* A RX 1ST 1 */
+#define B2063_A_RX_1ST2				B43_LP_RADIO(0x0CF) /* A RX 1ST 2 */
+#define B2063_A_RX_1ST3				B43_LP_RADIO(0x0D0) /* A RX 1ST 3 */
+#define B2063_A_RX_1ST4				B43_LP_RADIO(0x0D1) /* A RX 1ST 4 */
+#define B2063_A_RX_1ST5				B43_LP_RADIO(0x0D2) /* A RX 1ST 5 */
+#define B2063_A_RX_2ND1				B43_LP_RADIO(0x0D3) /* A RX 2ND 1 */
+#define B2063_A_RX_2ND2				B43_LP_RADIO(0x0D4) /* A RX 2ND 2 */
+#define B2063_A_RX_2ND3				B43_LP_RADIO(0x0D5) /* A RX 2ND 3 */
+#define B2063_A_RX_2ND4				B43_LP_RADIO(0x0D6) /* A RX 2ND 4 */
+#define B2063_A_RX_2ND5				B43_LP_RADIO(0x0D7) /* A RX 2ND 5 */
+#define B2063_A_RX_2ND6				B43_LP_RADIO(0x0D8) /* A RX 2ND 6 */
+#define B2063_A_RX_2ND7				B43_LP_RADIO(0x0D9) /* A RX 2ND 7 */
+#define B2063_A_RX_PS1				B43_LP_RADIO(0x0DA) /* A RX PS 1 */
+#define B2063_A_RX_PS2				B43_LP_RADIO(0x0DB) /* A RX PS 2 */
+#define B2063_A_RX_PS3				B43_LP_RADIO(0x0DC) /* A RX PS 3 */
+#define B2063_A_RX_PS4				B43_LP_RADIO(0x0DD) /* A RX PS 4 */
+#define B2063_A_RX_PS5				B43_LP_RADIO(0x0DE) /* A RX PS 5 */
+#define B2063_A_RX_PS6				B43_LP_RADIO(0x0DF) /* A RX PS 6 */
+#define B2063_A_RX_MIX1				B43_LP_RADIO(0x0E0) /* A RX MIX 1 */
+#define B2063_A_RX_MIX2				B43_LP_RADIO(0x0E1) /* A RX MIX 2 */
+#define B2063_A_RX_MIX3				B43_LP_RADIO(0x0E2) /* A RX MIX 3 */
+#define B2063_A_RX_MIX4				B43_LP_RADIO(0x0E3) /* A RX MIX 4 */
+#define B2063_A_RX_MIX5				B43_LP_RADIO(0x0E4) /* A RX MIX 5 */
+#define B2063_A_RX_MIX6				B43_LP_RADIO(0x0E5) /* A RX MIX 6 */
+#define B2063_A_RX_MIX7				B43_LP_RADIO(0x0E6) /* A RX MIX 7 */
+#define B2063_A_RX_MIX8				B43_LP_RADIO(0x0E7) /* A RX MIX 8 */
+#define B2063_A_RX_PWRDET1			B43_LP_RADIO(0x0E8) /* A RX PWRDET 1 */
+#define B2063_A_RX_SPARE1			B43_LP_RADIO(0x0E9) /* A RX SPARE 1 */
+#define B2063_A_RX_SPARE2			B43_LP_RADIO(0x0EA) /* A RX SPARE 2 */
+#define B2063_A_RX_SPARE3			B43_LP_RADIO(0x0EB) /* A RX SPARE 3 */
+#define B2063_RX_TIA_CTL1			B43_LP_RADIO(0x0EC) /* RX TIA Control 1 */
+#define B2063_RX_TIA_CTL2			B43_LP_RADIO(0x0ED) /* RX TIA Control 2 */
+#define B2063_RX_TIA_CTL3			B43_LP_RADIO(0x0EE) /* RX TIA Control 3 */
+#define B2063_RX_TIA_CTL4			B43_LP_RADIO(0x0EF) /* RX TIA Control 4 */
+#define B2063_RX_TIA_CTL5			B43_LP_RADIO(0x0F0) /* RX TIA Control 5 */
+#define B2063_RX_TIA_CTL6			B43_LP_RADIO(0x0F1) /* RX TIA Control 6 */
+#define B2063_RX_BB_CTL1			B43_LP_RADIO(0x0F2) /* RX BB Control 1 */
+#define B2063_RX_BB_CTL2			B43_LP_RADIO(0x0F3) /* RX BB Control 2 */
+#define B2063_RX_BB_CTL3			B43_LP_RADIO(0x0F4) /* RX BB Control 3 */
+#define B2063_RX_BB_CTL4			B43_LP_RADIO(0x0F5) /* RX BB Control 4 */
+#define B2063_RX_BB_CTL5			B43_LP_RADIO(0x0F6) /* RX BB Control 5 */
+#define B2063_RX_BB_CTL6			B43_LP_RADIO(0x0F7) /* RX BB Control 6 */
+#define B2063_RX_BB_CTL7			B43_LP_RADIO(0x0F8) /* RX BB Control 7 */
+#define B2063_RX_BB_CTL8			B43_LP_RADIO(0x0F9) /* RX BB Control 8 */
+#define B2063_RX_BB_CTL9			B43_LP_RADIO(0x0FA) /* RX BB Control 9 */
+#define B2063_TX_RF_CTL1			B43_LP_RADIO(0x0FB) /* TX RF Control 1 */
+#define B2063_TX_RF_IDAC_LO_RF_I		B43_LP_RADIO(0x0FC) /* TX RF IDAC LO RF I */
+#define B2063_TX_RF_IDAC_LO_RF_Q		B43_LP_RADIO(0x0FD) /* TX RF IDAC LO RF Q */
+#define B2063_TX_RF_IDAC_LO_BB_I		B43_LP_RADIO(0x0FE) /* TX RF IDAC LO BB I */
+#define B2063_TX_RF_IDAC_LO_BB_Q		B43_LP_RADIO(0x0FF) /* TX RF IDAC LO BB Q */
+#define B2063_TX_RF_CTL2			B43_LP_RADIO(0x100) /* TX RF Control 2 */
+#define B2063_TX_RF_CTL3			B43_LP_RADIO(0x101) /* TX RF Control 3 */
+#define B2063_TX_RF_CTL4			B43_LP_RADIO(0x102) /* TX RF Control 4 */
+#define B2063_TX_RF_CTL5			B43_LP_RADIO(0x103) /* TX RF Control 5 */
+#define B2063_TX_RF_CTL6			B43_LP_RADIO(0x104) /* TX RF Control 6 */
+#define B2063_TX_RF_CTL7			B43_LP_RADIO(0x105) /* TX RF Control 7 */
+#define B2063_TX_RF_CTL8			B43_LP_RADIO(0x106) /* TX RF Control 8 */
+#define B2063_TX_RF_CTL9			B43_LP_RADIO(0x107) /* TX RF Control 9 */
+#define B2063_TX_RF_CTL10			B43_LP_RADIO(0x108) /* TX RF Control 10 */
+#define B2063_TX_RF_CTL14			B43_LP_RADIO(0x109) /* TX RF Control 14 */
+#define B2063_TX_RF_CTL15			B43_LP_RADIO(0x10A) /* TX RF Control 15 */
+#define B2063_PA_CTL1				B43_LP_RADIO(0x10B) /* PA Control 1 */
+#define B2063_PA_CTL2				B43_LP_RADIO(0x10C) /* PA Control 2 */
+#define B2063_PA_CTL3				B43_LP_RADIO(0x10D) /* PA Control 3 */
+#define B2063_PA_CTL4				B43_LP_RADIO(0x10E) /* PA Control 4 */
+#define B2063_PA_CTL5				B43_LP_RADIO(0x10F) /* PA Control 5 */
+#define B2063_PA_CTL6				B43_LP_RADIO(0x110) /* PA Control 6 */
+#define B2063_PA_CTL7				B43_LP_RADIO(0x111) /* PA Control 7 */
+#define B2063_PA_CTL8				B43_LP_RADIO(0x112) /* PA Control 8 */
+#define B2063_PA_CTL9				B43_LP_RADIO(0x113) /* PA Control 9 */
+#define B2063_PA_CTL10				B43_LP_RADIO(0x114) /* PA Control 10 */
+#define B2063_PA_CTL11				B43_LP_RADIO(0x115) /* PA Control 11 */
+#define B2063_PA_CTL12				B43_LP_RADIO(0x116) /* PA Control 12 */
+#define B2063_PA_CTL13				B43_LP_RADIO(0x117) /* PA Control 13 */
+#define B2063_TX_BB_CTL1			B43_LP_RADIO(0x118) /* TX BB Control 1 */
+#define B2063_TX_BB_CTL2			B43_LP_RADIO(0x119) /* TX BB Control 2 */
+#define B2063_TX_BB_CTL3			B43_LP_RADIO(0x11A) /* TX BB Control 3 */
+#define B2063_TX_BB_CTL4			B43_LP_RADIO(0x11B) /* TX BB Control 4 */
+#define B2063_GPIO_CTL1				B43_LP_RADIO(0x11C) /* GPIO Control 1 */
+#define B2063_VREG_CTL1				B43_LP_RADIO(0x11D) /* VREG Control 1 */
+#define B2063_AMUX_CTL1				B43_LP_RADIO(0x11E) /* AMUX Control 1 */
+#define B2063_IQ_CALIB_GVAR			B43_LP_RADIO(0x11F) /* IQ Calibration GVAR */
+#define B2063_IQ_CALIB_CTL1			B43_LP_RADIO(0x120) /* IQ Calibration Control 1 */
+#define B2063_IQ_CALIB_CTL2			B43_LP_RADIO(0x121) /* IQ Calibration Control 2 */
+#define B2063_TEMPSENSE_CTL1			B43_LP_RADIO(0x122) /* TEMPSENSE Control 1 */
+#define B2063_TEMPSENSE_CTL2			B43_LP_RADIO(0x123) /* TEMPSENSE Control 2 */
+#define B2063_TX_RX_LOOPBACK1			B43_LP_RADIO(0x124) /* TX/RX LOOPBACK 1 */
+#define B2063_TX_RX_LOOPBACK2			B43_LP_RADIO(0x125) /* TX/RX LOOPBACK 2 */
+#define B2063_EXT_TSSI_CTL1			B43_LP_RADIO(0x126) /* EXT TSSI Control 1 */
+#define B2063_EXT_TSSI_CTL2			B43_LP_RADIO(0x127) /* EXT TSSI Control 2 */
+#define B2063_AFE_CTL				B43_LP_RADIO(0x128) /* AFE Control */
+
+
+
+struct b43_phy_lp {
+	//TODO
+};
+
+
+struct b43_phy_operations;
+extern const struct b43_phy_operations b43_phyops_lp;
+
+#endif /* LINUX_B43_PHY_LP_H_ */
diff --git a/drivers/net/wireless/b43/nphy.c b/drivers/net/wireless/b43/phy_n.c
similarity index 79%
rename from drivers/net/wireless/b43/nphy.c
rename to drivers/net/wireless/b43/phy_n.c
index 644eed9..8bcfda5 100644
--- a/drivers/net/wireless/b43/nphy.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -26,7 +26,7 @@
 #include <linux/types.h>
 
 #include "b43.h"
-#include "nphy.h"
+#include "phy_n.h"
 #include "tables_nphy.h"
 
 
@@ -34,10 +34,16 @@
 {//TODO
 }
 
-void b43_nphy_xmitpower(struct b43_wldev *dev)
+static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
 {//TODO
 }
 
+static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
+							bool ignore_tssi)
+{//TODO
+	return B43_TXPWR_RES_DONE;
+}
+
 static void b43_chantab_radio_upload(struct b43_wldev *dev,
 				     const struct b43_nphy_channeltab_entry *e)
 {
@@ -81,9 +87,8 @@
 	//TODO
 }
 
-/* Tune the hardware to a new channel. Don't call this directly.
- * Use b43_radio_selectchannel() */
-int b43_nphy_selectchannel(struct b43_wldev *dev, u8 channel)
+/* Tune the hardware to a new channel. */
+static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
 {
 	const struct b43_nphy_channeltab_entry *tabent;
 
@@ -162,7 +167,7 @@
 	msleep(1);
 	b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
 	msleep(1);
-	b43_radio_selectchannel(dev, dev->phy.channel, 0);
+	nphy_channel_switch(dev, dev->phy.channel);
 	b43_radio_write16(dev, B2055_C1_RX_BB_LPF, 0x9);
 	b43_radio_write16(dev, B2055_C2_RX_BB_LPF, 0x9);
 	b43_radio_write16(dev, B2055_C1_RX_BB_MIDACHP, 0x83);
@@ -484,3 +489,140 @@
 	b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
 	return 0;
 }
+
+static int b43_nphy_op_allocate(struct b43_wldev *dev)
+{
+	struct b43_phy_n *nphy;
+
+	nphy = kzalloc(sizeof(*nphy), GFP_KERNEL);
+	if (!nphy)
+		return -ENOMEM;
+	dev->phy.n = nphy;
+
+	return 0;
+}
+
+static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_n *nphy = phy->n;
+
+	memset(nphy, 0, sizeof(*nphy));
+
+	//TODO init struct b43_phy_n
+}
+
+static void b43_nphy_op_free(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_n *nphy = phy->n;
+
+	kfree(nphy);
+	phy->n = NULL;
+}
+
+static int b43_nphy_op_init(struct b43_wldev *dev)
+{
+	return b43_phy_initn(dev);
+}
+
+static inline void check_phyreg(struct b43_wldev *dev, u16 offset)
+{
+#if B43_DEBUG
+	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
+		/* OFDM registers are onnly available on A/G-PHYs */
+		b43err(dev->wl, "Invalid OFDM PHY access at "
+		       "0x%04X on N-PHY\n", offset);
+		dump_stack();
+	}
+	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) {
+		/* Ext-G registers are only available on G-PHYs */
+		b43err(dev->wl, "Invalid EXT-G PHY access at "
+		       "0x%04X on N-PHY\n", offset);
+		dump_stack();
+	}
+#endif /* B43_DEBUG */
+}
+
+static u16 b43_nphy_op_read(struct b43_wldev *dev, u16 reg)
+{
+	check_phyreg(dev, reg);
+	b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+	return b43_read16(dev, B43_MMIO_PHY_DATA);
+}
+
+static void b43_nphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
+{
+	check_phyreg(dev, reg);
+	b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+	b43_write16(dev, B43_MMIO_PHY_DATA, value);
+}
+
+static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
+{
+	/* Register 1 is a 32-bit register. */
+	B43_WARN_ON(reg == 1);
+	/* N-PHY needs 0x100 for read access */
+	reg |= 0x100;
+
+	b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
+	return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
+}
+
+static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
+{
+	/* Register 1 is a 32-bit register. */
+	B43_WARN_ON(reg == 1);
+
+	b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
+	b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
+}
+
+static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
+					enum rfkill_state state)
+{//TODO
+}
+
+static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
+{
+	b43_phy_write(dev, B43_NPHY_AFECTL_OVER,
+		      on ? 0 : 0x7FFF);
+}
+
+static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
+				      unsigned int new_channel)
+{
+	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if ((new_channel < 1) || (new_channel > 14))
+			return -EINVAL;
+	} else {
+		if (new_channel > 200)
+			return -EINVAL;
+	}
+
+	return nphy_channel_switch(dev, new_channel);
+}
+
+static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev)
+{
+	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+		return 1;
+	return 36;
+}
+
+const struct b43_phy_operations b43_phyops_n = {
+	.allocate		= b43_nphy_op_allocate,
+	.free			= b43_nphy_op_free,
+	.prepare_structs	= b43_nphy_op_prepare_structs,
+	.init			= b43_nphy_op_init,
+	.phy_read		= b43_nphy_op_read,
+	.phy_write		= b43_nphy_op_write,
+	.radio_read		= b43_nphy_op_radio_read,
+	.radio_write		= b43_nphy_op_radio_write,
+	.software_rfkill	= b43_nphy_op_software_rfkill,
+	.switch_analog		= b43_nphy_op_switch_analog,
+	.switch_channel		= b43_nphy_op_switch_channel,
+	.get_default_chan	= b43_nphy_op_get_default_chan,
+	.recalc_txpower		= b43_nphy_op_recalc_txpower,
+	.adjust_txpower		= b43_nphy_op_adjust_txpower,
+};
diff --git a/drivers/net/wireless/b43/nphy.h b/drivers/net/wireless/b43/phy_n.h
similarity index 98%
rename from drivers/net/wireless/b43/nphy.h
rename to drivers/net/wireless/b43/phy_n.h
index faf46b9..1749aef 100644
--- a/drivers/net/wireless/b43/nphy.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -1,7 +1,7 @@
 #ifndef B43_NPHY_H_
 #define B43_NPHY_H_
 
-#include "phy.h"
+#include "phy_common.h"
 
 
 /* N-PHY registers. */
@@ -919,54 +919,12 @@
 
 struct b43_wldev;
 
-
-#ifdef CONFIG_B43_NPHY
-/* N-PHY support enabled */
-
-int b43_phy_initn(struct b43_wldev *dev);
-
-void b43_nphy_radio_turn_on(struct b43_wldev *dev);
-void b43_nphy_radio_turn_off(struct b43_wldev *dev);
-
-int b43_nphy_selectchannel(struct b43_wldev *dev, u8 channel);
-
-void b43_nphy_xmitpower(struct b43_wldev *dev);
-void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna);
+struct b43_phy_n {
+	//TODO lots of missing stuff
+};
 
 
-#else /* CONFIG_B43_NPHY */
-/* N-PHY support disabled */
+struct b43_phy_operations;
+extern const struct b43_phy_operations b43_phyops_n;
 
-
-static inline
-int b43_phy_initn(struct b43_wldev *dev)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline
-void b43_nphy_radio_turn_on(struct b43_wldev *dev)
-{
-}
-static inline
-void b43_nphy_radio_turn_off(struct b43_wldev *dev)
-{
-}
-
-static inline
-int b43_nphy_selectchannel(struct b43_wldev *dev, u8 channel)
-{
-	return -ENOSYS;
-}
-
-static inline
-void b43_nphy_xmitpower(struct b43_wldev *dev)
-{
-}
-static inline
-void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
-{
-}
-
-#endif /* CONFIG_B43_NPHY */
 #endif /* B43_NPHY_H_ */
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 34ae125..7137537 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -24,6 +24,7 @@
 
 #include "rfkill.h"
 #include "b43.h"
+#include "phy_common.h"
 
 #include <linux/kmod.h>
 
@@ -96,11 +97,11 @@
 			goto out_unlock;
 		}
 		if (!dev->phy.radio_on)
-			b43_radio_turn_on(dev);
+			b43_software_rfkill(dev, state);
 		break;
 	case RFKILL_STATE_SOFT_BLOCKED:
 		if (dev->phy.radio_on)
-			b43_radio_turn_off(dev, 0);
+			b43_software_rfkill(dev, state);
 		break;
 	default:
 		b43warn(wl, "Received unexpected rfkill state %d.\n", state);
@@ -169,6 +170,11 @@
 			"The built-in radio LED will not work.\n");
 #endif /* CONFIG_RFKILL_INPUT */
 
+#if !defined(CONFIG_RFKILL_INPUT) && !defined(CONFIG_RFKILL_INPUT_MODULE)
+	b43warn(wl, "The rfkill-input subsystem is not available. "
+		"The built-in radio LED will not work.\n");
+#endif
+
 	err = input_register_polled_device(rfk->poll_dev);
 	if (err)
 		goto err_unreg_rfk;
diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c
index 275095b..5adaa36 100644
--- a/drivers/net/wireless/b43/sysfs.c
+++ b/drivers/net/wireless/b43/sysfs.c
@@ -29,7 +29,7 @@
 #include "b43.h"
 #include "sysfs.h"
 #include "main.h"
-#include "phy.h"
+#include "phy_common.h"
 
 #define GENERIC_FILESIZE	64
 
@@ -59,7 +59,12 @@
 
 	mutex_lock(&wldev->wl->mutex);
 
-	switch (wldev->phy.interfmode) {
+	if (wldev->phy.type != B43_PHYTYPE_G) {
+		mutex_unlock(&wldev->wl->mutex);
+		return -ENOSYS;
+	}
+
+	switch (wldev->phy.g->interfmode) {
 	case B43_INTERFMODE_NONE:
 		count =
 		    snprintf(buf, PAGE_SIZE,
@@ -117,11 +122,15 @@
 	mutex_lock(&wldev->wl->mutex);
 	spin_lock_irqsave(&wldev->wl->irq_lock, flags);
 
-	err = b43_radio_set_interference_mitigation(wldev, mode);
-	if (err) {
-		b43err(wldev->wl, "Interference Mitigation not "
-		       "supported by device\n");
-	}
+	if (wldev->phy.ops->interf_mitigation) {
+		err = wldev->phy.ops->interf_mitigation(wldev, mode);
+		if (err) {
+			b43err(wldev->wl, "Interference Mitigation not "
+			       "supported by device\n");
+		}
+	} else
+		err = -ENOSYS;
+
 	mmiowb();
 	spin_unlock_irqrestore(&wldev->wl->irq_lock, flags);
 	mutex_unlock(&wldev->wl->mutex);
diff --git a/drivers/net/wireless/b43/tables.c b/drivers/net/wireless/b43/tables.c
index 3f5ea06..1ef9a64 100644
--- a/drivers/net/wireless/b43/tables.c
+++ b/drivers/net/wireless/b43/tables.c
@@ -27,7 +27,8 @@
 
 #include "b43.h"
 #include "tables.h"
-#include "phy.h"
+#include "phy_g.h"
+
 
 const u32 b43_tab_rotor[] = {
 	0xFEB93FFD, 0xFEC63FFD,	/* 0 */
@@ -377,17 +378,17 @@
 
 u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset)
 {
-	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = dev->phy.g;
 	u16 addr;
 
 	addr = table + offset;
-	if ((phy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) ||
-	    (addr - 1 != phy->ofdmtab_addr)) {
+	if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) ||
+	    (addr - 1 != gphy->ofdmtab_addr)) {
 		/* The hardware has a different address in memory. Update it. */
 		b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
-		phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ;
+		gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ;
 	}
-	phy->ofdmtab_addr = addr;
+	gphy->ofdmtab_addr = addr;
 
 	return b43_phy_read(dev, B43_PHY_OTABLEI);
 
@@ -398,34 +399,34 @@
 void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table,
 			 u16 offset, u16 value)
 {
-	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = dev->phy.g;
 	u16 addr;
 
 	addr = table + offset;
-	if ((phy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) ||
-	    (addr - 1 != phy->ofdmtab_addr)) {
+	if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) ||
+	    (addr - 1 != gphy->ofdmtab_addr)) {
 		/* The hardware has a different address in memory. Update it. */
 		b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
-		phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE;
+		gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE;
 	}
-	phy->ofdmtab_addr = addr;
+	gphy->ofdmtab_addr = addr;
 	b43_phy_write(dev, B43_PHY_OTABLEI, value);
 }
 
 u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset)
 {
-	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = dev->phy.g;
 	u32 ret;
 	u16 addr;
 
 	addr = table + offset;
-	if ((phy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) ||
-	    (addr - 1 != phy->ofdmtab_addr)) {
+	if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) ||
+	    (addr - 1 != gphy->ofdmtab_addr)) {
 		/* The hardware has a different address in memory. Update it. */
 		b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
-		phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ;
+		gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ;
 	}
-	phy->ofdmtab_addr = addr;
+	gphy->ofdmtab_addr = addr;
 	ret = b43_phy_read(dev, B43_PHY_OTABLEQ);
 	ret <<= 16;
 	ret |= b43_phy_read(dev, B43_PHY_OTABLEI);
@@ -436,17 +437,17 @@
 void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
 			 u16 offset, u32 value)
 {
-	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = dev->phy.g;
 	u16 addr;
 
 	addr = table + offset;
-	if ((phy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) ||
-	    (addr - 1 != phy->ofdmtab_addr)) {
+	if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) ||
+	    (addr - 1 != gphy->ofdmtab_addr)) {
 		/* The hardware has a different address in memory. Update it. */
 		b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
-		phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE;
+		gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE;
 	}
-	phy->ofdmtab_addr = addr;
+	gphy->ofdmtab_addr = addr;
 
 	b43_phy_write(dev, B43_PHY_OTABLEI, value);
 	b43_phy_write(dev, B43_PHY_OTABLEQ, (value >> 16));
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 2aa5755..4e23363 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -24,8 +24,8 @@
 
 #include "b43.h"
 #include "tables_nphy.h"
-#include "phy.h"
-#include "nphy.h"
+#include "phy_common.h"
+#include "phy_n.h"
 
 
 struct b2055_inittab_entry {
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index daa9421..0c0fb15 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -27,7 +27,7 @@
 #include "b43.h"
 #include "main.h"
 #include "tables.h"
-#include "phy.h"
+#include "phy_common.h"
 #include "wa.h"
 
 static void b43_wa_papd(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 9dda816..5e0b71c 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -28,7 +28,7 @@
 */
 
 #include "xmit.h"
-#include "phy.h"
+#include "phy_common.h"
 #include "dma.h"
 #include "pio.h"
 
@@ -431,6 +431,7 @@
 			       int adjust_2053, int adjust_2050)
 {
 	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_g *gphy = phy->g;
 	s32 tmp;
 
 	switch (phy->radio_ver) {
@@ -450,7 +451,8 @@
 			    boardflags_lo & B43_BFL_RSSI) {
 				if (in_rssi > 63)
 					in_rssi = 63;
-				tmp = phy->nrssi_lt[in_rssi];
+				B43_WARN_ON(phy->type != B43_PHYTYPE_G);
+				tmp = gphy->nrssi_lt[in_rssi];
 				tmp = 31 - tmp;
 				tmp *= -131;
 				tmp /= 128;
@@ -678,6 +680,8 @@
 		b43_pio_handle_txstatus(dev, status);
 	else
 		b43_dma_handle_txstatus(dev, status);
+
+	b43_phy_txpower_check(dev, 0);
 }
 
 /* Fill out the mac80211 TXstatus report based on the b43-specific
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1cb77db..9fb1421 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -888,13 +888,13 @@
 
 static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev)
 {
-	if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) {
+	if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP)) {
 		/* TODO: PS TBTT */
 	} else {
 		if (1/*FIXME: the last PSpoll frame was sent successfully */)
 			b43legacy_power_saving_ctl_bits(dev, -1, -1);
 	}
-	if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS))
+	if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
 		dev->dfq_valid = 1;
 }
 
@@ -1201,7 +1201,7 @@
 	struct b43legacy_wl *wl = dev->wl;
 	u32 cmd;
 
-	if (!b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP))
+	if (!b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
 		return;
 
 	/* This is the bottom half of the asynchronous beacon update. */
@@ -1936,9 +1936,9 @@
 	ctl &= ~B43legacy_MACCTL_BEACPROMISC;
 	ctl |= B43legacy_MACCTL_INFRA;
 
-	if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP))
+	if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
 		ctl |= B43legacy_MACCTL_AP;
-	else if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_IBSS))
+	else if (b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC))
 		ctl &= ~B43legacy_MACCTL_INFRA;
 
 	if (wl->filter_flags & FIF_CONTROL)
@@ -2646,7 +2646,7 @@
 	b43legacy_mgmtframe_txantenna(dev, antenna_tx);
 
 	/* Update templates for AP mode. */
-	if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP))
+	if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
 		b43legacy_set_beacon_int(dev, conf->beacon_int);
 
 
@@ -2733,12 +2733,12 @@
 	else
 		memset(wl->bssid, 0, ETH_ALEN);
 	if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) {
-		if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) {
-			B43legacy_WARN_ON(vif->type != IEEE80211_IF_TYPE_AP);
+		if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP)) {
+			B43legacy_WARN_ON(vif->type != NL80211_IFTYPE_AP);
 			b43legacy_set_ssid(dev, conf->ssid, conf->ssid_len);
 			if (conf->changed & IEEE80211_IFCC_BEACON)
 				b43legacy_update_templates(wl);
-		} else if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) {
+		} else if (b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC)) {
 			if (conf->changed & IEEE80211_IFCC_BEACON)
 				b43legacy_update_templates(wl);
 		}
@@ -3020,7 +3020,7 @@
 					  bool idle) {
 	u16 pu_delay = 1050;
 
-	if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS) || idle)
+	if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle)
 		pu_delay = 500;
 	if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8))
 		pu_delay = max(pu_delay, (u16)2400);
@@ -3035,7 +3035,7 @@
 	u16 pretbtt;
 
 	/* The time value is in microseconds. */
-	if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS))
+	if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
 		pretbtt = 2;
 	else
 		pretbtt = 250;
@@ -3259,10 +3259,10 @@
 
 	/* TODO: allow WDS/AP devices to coexist */
 
-	if (conf->type != IEEE80211_IF_TYPE_AP &&
-	    conf->type != IEEE80211_IF_TYPE_STA &&
-	    conf->type != IEEE80211_IF_TYPE_WDS &&
-	    conf->type != IEEE80211_IF_TYPE_IBSS)
+	if (conf->type != NL80211_IFTYPE_AP &&
+	    conf->type != NL80211_IFTYPE_STATION &&
+	    conf->type != NL80211_IFTYPE_WDS &&
+	    conf->type != NL80211_IFTYPE_ADHOC)
 		return -EOPNOTSUPP;
 
 	mutex_lock(&wl->mutex);
@@ -3403,7 +3403,7 @@
 }
 
 static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
-				       int aid, int set)
+				       struct ieee80211_sta *sta, bool set)
 {
 	struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
 	unsigned long flags;
@@ -3704,6 +3704,11 @@
 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
 		    IEEE80211_HW_SIGNAL_DBM |
 		    IEEE80211_HW_NOISE_DBM;
+	hw->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_AP) |
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_WDS) |
+		BIT(NL80211_IFTYPE_ADHOC);
 	hw->queues = 1; /* FIXME: hardware has more queues */
 	SET_IEEE80211_DEV(hw, dev->dev);
 	if (is_valid_ether_addr(sprom->et1mac))
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 768cccb..4c9442b 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -103,7 +103,7 @@
 	if (dev->dev->id.revision < 3) {
 		b43legacy_mac_suspend(dev);
 	} else {
-		if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP))
+		if (!b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP))
 			b43legacy_power_saving_ctl_bits(dev, -1, 1);
 	}
 }
@@ -118,7 +118,7 @@
 	if (dev->dev->id.revision < 3) {
 		b43legacy_mac_enable(dev);
 	} else {
-		if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP))
+		if (!b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP))
 			b43legacy_power_saving_ctl_bits(dev, -1, -1);
 	}
 }
@@ -595,12 +595,14 @@
 				    0x0035) & 0xFFC0) | 0x0064);
 		b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev,
 				    0x005D) & 0xFF80) | 0x000A);
+		b43legacy_phy_write(dev, 0x5B, 0x0000);
+		b43legacy_phy_write(dev, 0x5C, 0x0000);
 	}
 
 	if (dev->bad_frames_preempt)
 		b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD,
 				    b43legacy_phy_read(dev,
-				    B43legacy_PHY_RADIO_BITFIELD) | (1 << 11));
+				    B43legacy_PHY_RADIO_BITFIELD) | (1 << 12));
 
 	if (phy->analog == 1) {
 		b43legacy_phy_write(dev, 0x0026, 0xCE00);
@@ -753,7 +755,7 @@
 		b43legacy_radio_write16(dev, 0x0050, 0x0020);
 	}
 	if (phy->radio_rev <= 2) {
-		b43legacy_radio_write16(dev, 0x007C, 0x0020);
+		b43legacy_radio_write16(dev, 0x0050, 0x0020);
 		b43legacy_radio_write16(dev, 0x005A, 0x0070);
 		b43legacy_radio_write16(dev, 0x005B, 0x007B);
 		b43legacy_radio_write16(dev, 0x005C, 0x00B0);
@@ -771,7 +773,7 @@
 		b43legacy_phy_write(dev, 0x002A, 0x8AC0);
 	b43legacy_phy_write(dev, 0x0038, 0x0668);
 	b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF);
-	if (phy->radio_rev <= 5)
+	if (phy->radio_rev == 4 || phy->radio_rev == 5)
 		b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev,
 				    0x005D) & 0xFF80) | 0x0003);
 	if (phy->radio_rev <= 2)
@@ -1010,7 +1012,7 @@
 		b43legacy_phy_initb5(dev);
 	else
 		b43legacy_phy_initb6(dev);
-	if (phy->rev >= 2 || phy->gmode)
+	if (phy->rev >= 2 && phy->gmode)
 		b43legacy_phy_inita(dev);
 
 	if (phy->rev >= 2) {
@@ -1025,18 +1027,22 @@
 		b43legacy_phy_write(dev, 0x0811, 0x0400);
 		b43legacy_phy_write(dev, 0x0015, 0x00C0);
 	}
-	if (phy->rev >= 2 || phy->gmode) {
+	if (phy->gmode) {
 		tmp = b43legacy_phy_read(dev, 0x0400) & 0xFF;
-		if (tmp == 3 || tmp == 5) {
+		if (tmp == 3) {
+			b43legacy_phy_write(dev, 0x04C2, 0x1816);
+			b43legacy_phy_write(dev, 0x04C3, 0x8606);
+		}
+		if (tmp == 4 || tmp == 5) {
 			b43legacy_phy_write(dev, 0x04C2, 0x1816);
 			b43legacy_phy_write(dev, 0x04C3, 0x8006);
-			if (tmp == 5)
-				b43legacy_phy_write(dev, 0x04CC,
-						    (b43legacy_phy_read(dev,
-						     0x04CC) & 0x00FF) |
-						     0x1F00);
+			b43legacy_phy_write(dev, 0x04CC,
+					    (b43legacy_phy_read(dev,
+					     0x04CC) & 0x00FF) |
+					     0x1F00);
 		}
-		b43legacy_phy_write(dev, 0x047E, 0x0078);
+		if (phy->rev >= 2)
+			b43legacy_phy_write(dev, 0x047E, 0x0078);
 	}
 	if (phy->radio_rev == 8) {
 		b43legacy_phy_write(dev, 0x0801, b43legacy_phy_read(dev, 0x0801)
@@ -1078,7 +1084,7 @@
 		else
 			b43legacy_phy_write(dev, 0x002F, 0x0202);
 	}
-	if (phy->gmode || phy->rev >= 2) {
+	if (phy->gmode) {
 		b43legacy_phy_lo_adjust(dev, 0);
 		b43legacy_phy_write(dev, 0x080F, 0x8078);
 	}
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 68e1f8c..6835064 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -193,7 +193,6 @@
 {
 	const struct ieee80211_hdr *wlhdr;
 	int use_encryption = !!info->control.hw_key;
-	u16 fctl;
 	u8 rate;
 	struct ieee80211_rate *rate_fb;
 	int rate_ofdm;
@@ -204,7 +203,6 @@
 	struct ieee80211_rate *tx_rate;
 
 	wlhdr = (const struct ieee80211_hdr *)fragment_data;
-	fctl = le16_to_cpu(wlhdr->frame_control);
 
 	memset(txhdr, 0, sizeof(*txhdr));
 
@@ -253,7 +251,7 @@
 			mac_ctl |= (key->algorithm <<
 				   B43legacy_TX4_MAC_KEYALG_SHIFT) &
 				   B43legacy_TX4_MAC_KEYALG;
-			wlhdr_len = ieee80211_get_hdrlen(fctl);
+			wlhdr_len = ieee80211_hdrlen(wlhdr->frame_control);
 			iv_len = min((size_t)info->control.iv_len,
 				     ARRAY_SIZE(txhdr->iv));
 			memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
@@ -626,7 +624,7 @@
 	tmp = hw->count;
 	status.frame_count = (tmp >> 4);
 	status.rts_count = (tmp & 0x0F);
-	tmp = hw->flags;
+	tmp = hw->flags << 1;
 	status.supp_reason = ((tmp & 0x1C) >> 2);
 	status.pm_indicated = !!(tmp & 0x80);
 	status.intermediate = !!(tmp & 0x40);
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c
index 29d3910..bfa3753 100644
--- a/drivers/net/wireless/hermes.c
+++ b/drivers/net/wireless/hermes.c
@@ -87,7 +87,8 @@
 
    Callable from any context.
 */
-static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0)
+static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0,
+			    u16 param1, u16 param2)
 {
 	int k = CMD_BUSY_TIMEOUT;
 	u16 reg;
@@ -103,8 +104,8 @@
 		return -EBUSY;
 	}
 
-	hermes_write_regn(hw, PARAM2, 0);
-	hermes_write_regn(hw, PARAM1, 0);
+	hermes_write_regn(hw, PARAM2, param2);
+	hermes_write_regn(hw, PARAM1, param1);
 	hermes_write_regn(hw, PARAM0, param0);
 	hermes_write_regn(hw, CMD, cmd);
 	
@@ -115,16 +116,72 @@
  * Function definitions
  */
 
+/* For doing cmds that wipe the magic constant in SWSUPPORT0 */
+int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
+		       u16 parm0, u16 parm1, u16 parm2,
+		       struct hermes_response *resp)
+{
+	int err = 0;
+	int k;
+	u16 status, reg;
+
+	err = hermes_issue_cmd(hw, cmd, parm0, parm1, parm2);
+	if (err)
+		return err;
+
+	reg = hermes_read_regn(hw, EVSTAT);
+	k = CMD_INIT_TIMEOUT;
+	while ((!(reg & HERMES_EV_CMD)) && k) {
+		k--;
+		udelay(10);
+		reg = hermes_read_regn(hw, EVSTAT);
+	}
+
+	hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
+
+	if (!hermes_present(hw)) {
+		DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n",
+		       hw->iobase);
+		err = -ENODEV;
+		goto out;
+	}
+
+	if (!(reg & HERMES_EV_CMD)) {
+		printk(KERN_ERR "hermes @ %p: "
+		       "Timeout waiting for card to reset (reg=0x%04x)!\n",
+		       hw->iobase, reg);
+		err = -ETIMEDOUT;
+		goto out;
+	}
+
+	status = hermes_read_regn(hw, STATUS);
+	if (resp) {
+		resp->status = status;
+		resp->resp0 = hermes_read_regn(hw, RESP0);
+		resp->resp1 = hermes_read_regn(hw, RESP1);
+		resp->resp2 = hermes_read_regn(hw, RESP2);
+	}
+
+	hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
+
+	if (status & HERMES_STATUS_RESULT)
+		err = -EIO;
+out:
+	return err;
+}
+EXPORT_SYMBOL(hermes_doicmd_wait);
+
 void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
 {
 	hw->iobase = address;
 	hw->reg_spacing = reg_spacing;
 	hw->inten = 0x0;
 }
+EXPORT_SYMBOL(hermes_struct_init);
 
 int hermes_init(hermes_t *hw)
 {
-	u16 status, reg;
+	u16 reg;
 	int err = 0;
 	int k;
 
@@ -162,45 +219,11 @@
 
 	/* We don't use hermes_docmd_wait here, because the reset wipes
 	   the magic constant in SWSUPPORT0 away, and it gets confused */
-	err = hermes_issue_cmd(hw, HERMES_CMD_INIT, 0);
-	if (err)
-		return err;
+	err = hermes_doicmd_wait(hw, HERMES_CMD_INIT, 0, 0, 0, NULL);
 
-	reg = hermes_read_regn(hw, EVSTAT);
-	k = CMD_INIT_TIMEOUT;
-	while ( (! (reg & HERMES_EV_CMD)) && k) {
-		k--;
-		udelay(10);
-		reg = hermes_read_regn(hw, EVSTAT);
-	}
-
-	hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
-
-	if (! hermes_present(hw)) {
-		DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n",
-		       hw->iobase);
-		err = -ENODEV;
-		goto out;
-	}
-		
-	if (! (reg & HERMES_EV_CMD)) {
-		printk(KERN_ERR "hermes @ %p: " 
-		       "Timeout waiting for card to reset (reg=0x%04x)!\n",
-		       hw->iobase, reg);
-		err = -ETIMEDOUT;
-		goto out;
-	}
-
-	status = hermes_read_regn(hw, STATUS);
-
-	hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
-
-	if (status & HERMES_STATUS_RESULT)
-		err = -EIO;
-
- out:
 	return err;
 }
+EXPORT_SYMBOL(hermes_init);
 
 /* Issue a command to the chip, and (busy!) wait for it to
  * complete.
@@ -216,7 +239,7 @@
 	u16 reg;
 	u16 status;
 
-	err = hermes_issue_cmd(hw, cmd, parm0);
+	err = hermes_issue_cmd(hw, cmd, parm0, 0, 0);
 	if (err) {
 		if (! hermes_present(hw)) {
 			if (net_ratelimit())
@@ -271,6 +294,7 @@
  out:
 	return err;
 }
+EXPORT_SYMBOL(hermes_docmd_wait);
 
 int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
 {
@@ -313,7 +337,7 @@
 	
 	return 0;
 }
-
+EXPORT_SYMBOL(hermes_allocate);
 
 /* Set up a BAP to read a particular chunk of data from card's internal buffer.
  *
@@ -397,6 +421,7 @@
  out:
 	return err;
 }
+EXPORT_SYMBOL(hermes_bap_pread);
 
 /* Write a block of data to the chip's buffer, via the
  * BAP. Synchronization/serialization is the caller's problem.
@@ -422,6 +447,7 @@
  out:	
 	return err;
 }
+EXPORT_SYMBOL(hermes_bap_pwrite);
 
 /* Read a Length-Type-Value record from the card.
  *
@@ -463,7 +489,7 @@
 	if (rtype != rid)
 		printk(KERN_WARNING "hermes @ %p: %s(): "
 		       "rid (0x%04x) does not match type (0x%04x)\n",
-		       hw->iobase, __FUNCTION__, rid, rtype);
+		       hw->iobase, __func__, rid, rtype);
 	if (HERMES_RECLEN_TO_BYTES(rlength) > bufsize)
 		printk(KERN_WARNING "hermes @ %p: "
 		       "Truncating LTV record from %d to %d bytes. "
@@ -475,6 +501,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(hermes_read_ltv);
 
 int hermes_write_ltv(hermes_t *hw, int bap, u16 rid, 
 		     u16 length, const void *value)
@@ -497,20 +524,11 @@
 
 	hermes_write_bytes(hw, dreg, value, count << 1);
 
-	err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE, 
+	err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE,
 				rid, NULL);
 
 	return err;
 }
-
-EXPORT_SYMBOL(hermes_struct_init);
-EXPORT_SYMBOL(hermes_init);
-EXPORT_SYMBOL(hermes_docmd_wait);
-EXPORT_SYMBOL(hermes_allocate);
-
-EXPORT_SYMBOL(hermes_bap_pread);
-EXPORT_SYMBOL(hermes_bap_pwrite);
-EXPORT_SYMBOL(hermes_read_ltv);
 EXPORT_SYMBOL(hermes_write_ltv);
 
 static int __init init_hermes(void)
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h
index 8e3f0e3..8b13c8f 100644
--- a/drivers/net/wireless/hermes.h
+++ b/drivers/net/wireless/hermes.h
@@ -179,17 +179,23 @@
 #define HERMES_802_11_OFFSET		(14)
 #define HERMES_802_3_OFFSET		(14+32)
 #define HERMES_802_2_OFFSET		(14+32+14)
+#define HERMES_TXCNTL2_OFFSET		(HERMES_802_3_OFFSET - 2)
 
 #define HERMES_RXSTAT_ERR		(0x0003)
 #define	HERMES_RXSTAT_BADCRC		(0x0001)
 #define	HERMES_RXSTAT_UNDECRYPTABLE	(0x0002)
+#define	HERMES_RXSTAT_MIC		(0x0010)	/* Frame contains MIC */
 #define	HERMES_RXSTAT_MACPORT		(0x0700)
 #define HERMES_RXSTAT_PCF		(0x1000)	/* Frame was received in CF period */
+#define	HERMES_RXSTAT_MIC_KEY_ID	(0x1800)	/* MIC key used */
 #define	HERMES_RXSTAT_MSGTYPE		(0xE000)
 #define	HERMES_RXSTAT_1042		(0x2000)	/* RFC-1042 frame */
 #define	HERMES_RXSTAT_TUNNEL		(0x4000)	/* bridge-tunnel encoded frame */
 #define	HERMES_RXSTAT_WMP		(0x6000)	/* Wavelan-II Management Protocol frame */
 
+/* Shift amount for key ID in RXSTAT and TXCTRL */
+#define	HERMES_MIC_KEY_ID_SHIFT		11
+
 struct hermes_tx_descriptor {
 	__le16 status;
 	__le16 reserved1;
@@ -208,6 +214,8 @@
 #define HERMES_TXCTRL_TX_OK		(0x0002)	/* ?? interrupt on Tx complete */
 #define HERMES_TXCTRL_TX_EX		(0x0004)	/* ?? interrupt on Tx exception */
 #define HERMES_TXCTRL_802_11		(0x0008)	/* We supply 802.11 header */
+#define HERMES_TXCTRL_MIC		(0x0010)	/* 802.3 + TKIP */
+#define HERMES_TXCTRL_MIC_KEY_ID	(0x1800)	/* MIC Key ID mask */
 #define HERMES_TXCTRL_ALT_RTRY		(0x0020)
 
 /* Inquiry constants and data types */
@@ -302,6 +310,40 @@
 	struct symbol_scan_apinfo	s;
 };
 
+/* Extended scan struct for HERMES_INQ_CHANNELINFO.
+ * wl_lkm calls this an ACS scan (Automatic Channel Select).
+ * Keep out of union hermes_scan_info because it is much bigger than
+ * the older scan structures. */
+struct agere_ext_scan_info {
+	__le16	reserved0;
+
+	u8	noise;
+	u8	level;
+	u8	rx_flow;
+	u8	rate;
+	__le16	reserved1[2];
+
+	__le16	frame_control;
+	__le16	dur_id;
+	u8	addr1[ETH_ALEN];
+	u8	addr2[ETH_ALEN];
+	u8	bssid[ETH_ALEN];
+	__le16	sequence;
+	u8	addr4[ETH_ALEN];
+
+	__le16	data_length;
+
+	/* Next 3 fields do not get filled in. */
+	u8	daddr[ETH_ALEN];
+	u8	saddr[ETH_ALEN];
+	__le16	len_type;
+
+	__le64	timestamp;
+	__le16	beacon_interval;
+	__le16	capabilities;
+	u8	data[316];
+} __attribute__ ((packed));
+
 #define HERMES_LINKSTATUS_NOT_CONNECTED   (0x0000)  
 #define HERMES_LINKSTATUS_CONNECTED       (0x0001)
 #define HERMES_LINKSTATUS_DISCONNECTED    (0x0002)
@@ -353,6 +395,9 @@
 int hermes_init(hermes_t *hw);
 int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
 		      struct hermes_response *resp);
+int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
+		       u16 parm0, u16 parm1, u16 parm2,
+		       struct hermes_response *resp);
 int hermes_allocate(hermes_t *hw, u16 size, u16 *fid);
 
 int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
diff --git a/drivers/net/wireless/hermes_dld.c b/drivers/net/wireless/hermes_dld.c
new file mode 100644
index 0000000..d8c626e
--- /dev/null
+++ b/drivers/net/wireless/hermes_dld.c
@@ -0,0 +1,730 @@
+/*
+ * Hermes download helper driver.
+ *
+ * This could be entirely merged into hermes.c.
+ *
+ * I'm keeping it separate to minimise the amount of merging between
+ * kernel upgrades. It also means the memory overhead for drivers that
+ * don't need firmware download low.
+ *
+ * This driver:
+ *  - is capable of writing to the volatile area of the hermes device
+ *  - is currently not capable of writing to non-volatile areas
+ *  - provide helpers to identify and update plugin data
+ *  - is not capable of interpreting a fw image directly. That is up to
+ *    the main card driver.
+ *  - deals with Hermes I devices. It can probably be modified to deal
+ *    with Hermes II devices
+ *
+ * Copyright (C) 2007, David Kilroy
+ *
+ * Plug data code slightly modified from spectrum_cs driver
+ *    Copyright (C) 2002-2005 Pavel Roskin <proski@gnu.org>
+ * Portions based on information in wl_lkm_718 Agere driver
+ *    COPYRIGHT (C) 2001-2004 by Agere Systems Inc. All Rights Reserved
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above.  If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL.  If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include "hermes.h"
+#include "hermes_dld.h"
+
+MODULE_DESCRIPTION("Download helper for Lucent Hermes chipset");
+MODULE_AUTHOR("David Kilroy <kilroyd@gmail.com>");
+MODULE_LICENSE("Dual MPL/GPL");
+
+#define PFX "hermes_dld: "
+
+/*
+ * AUX port access.  To unlock the AUX port write the access keys to the
+ * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
+ * register.  Then read it and make sure it's HERMES_AUX_ENABLED.
+ */
+#define HERMES_AUX_ENABLE	0x8000	/* Enable auxiliary port access */
+#define HERMES_AUX_DISABLE	0x4000	/* Disable to auxiliary port access */
+#define HERMES_AUX_ENABLED	0xC000	/* Auxiliary port is open */
+#define HERMES_AUX_DISABLED	0x0000	/* Auxiliary port is closed */
+
+#define HERMES_AUX_PW0	0xFE01
+#define HERMES_AUX_PW1	0xDC23
+#define HERMES_AUX_PW2	0xBA45
+
+/* HERMES_CMD_DOWNLD */
+#define HERMES_PROGRAM_DISABLE             (0x0000 | HERMES_CMD_DOWNLD)
+#define HERMES_PROGRAM_ENABLE_VOLATILE     (0x0100 | HERMES_CMD_DOWNLD)
+#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
+#define HERMES_PROGRAM_NON_VOLATILE        (0x0300 | HERMES_CMD_DOWNLD)
+
+/* End markers used in dblocks */
+#define PDI_END		0x00000000	/* End of PDA */
+#define BLOCK_END	0xFFFFFFFF	/* Last image block */
+#define TEXT_END	0x1A		/* End of text header */
+
+/*
+ * PDA == Production Data Area
+ *
+ * In principle, the max. size of the PDA is is 4096 words. Currently,
+ * however, only about 500 bytes of this area are used.
+ *
+ * Some USB implementations can't handle sizes in excess of 1016. Note
+ * that PDA is not actually used in those USB environments, but may be
+ * retrieved by common code.
+ */
+#define MAX_PDA_SIZE	1000
+
+/* Limit the amout we try to download in a single shot.
+ * Size is in bytes.
+ */
+#define MAX_DL_SIZE 1024
+#define LIMIT_PROGRAM_SIZE 0
+
+/*
+ * The following structures have little-endian fields denoted by
+ * the leading underscore.  Don't access them directly - use inline
+ * functions defined below.
+ */
+
+/*
+ * The binary image to be downloaded consists of series of data blocks.
+ * Each block has the following structure.
+ */
+struct dblock {
+	__le32 addr;		/* adapter address where to write the block */
+	__le16 len;		/* length of the data only, in bytes */
+	char data[0];		/* data to be written */
+} __attribute__ ((packed));
+
+/*
+ * Plug Data References are located in in the image after the last data
+ * block.  They refer to areas in the adapter memory where the plug data
+ * items with matching ID should be written.
+ */
+struct pdr {
+	__le32 id;		/* record ID */
+	__le32 addr;		/* adapter address where to write the data */
+	__le32 len;		/* expected length of the data, in bytes */
+	char next[0];		/* next PDR starts here */
+} __attribute__ ((packed));
+
+/*
+ * Plug Data Items are located in the EEPROM read from the adapter by
+ * primary firmware.  They refer to the device-specific data that should
+ * be plugged into the secondary firmware.
+ */
+struct pdi {
+	__le16 len;		/* length of ID and data, in words */
+	__le16 id;		/* record ID */
+	char data[0];		/* plug data */
+} __attribute__ ((packed));
+
+/*** FW data block access functions ***/
+
+static inline u32
+dblock_addr(const struct dblock *blk)
+{
+	return le32_to_cpu(blk->addr);
+}
+
+static inline u32
+dblock_len(const struct dblock *blk)
+{
+	return le16_to_cpu(blk->len);
+}
+
+/*** PDR Access functions ***/
+
+static inline u32
+pdr_id(const struct pdr *pdr)
+{
+	return le32_to_cpu(pdr->id);
+}
+
+static inline u32
+pdr_addr(const struct pdr *pdr)
+{
+	return le32_to_cpu(pdr->addr);
+}
+
+static inline u32
+pdr_len(const struct pdr *pdr)
+{
+	return le32_to_cpu(pdr->len);
+}
+
+/*** PDI Access functions ***/
+
+static inline u32
+pdi_id(const struct pdi *pdi)
+{
+	return le16_to_cpu(pdi->id);
+}
+
+/* Return length of the data only, in bytes */
+static inline u32
+pdi_len(const struct pdi *pdi)
+{
+	return 2 * (le16_to_cpu(pdi->len) - 1);
+}
+
+/*** Hermes AUX control ***/
+
+static inline void
+hermes_aux_setaddr(hermes_t *hw, u32 addr)
+{
+	hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
+	hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
+}
+
+static inline int
+hermes_aux_control(hermes_t *hw, int enabled)
+{
+	int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
+	int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
+	int i;
+
+	/* Already open? */
+	if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
+		return 0;
+
+	hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
+	hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
+	hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
+	hermes_write_reg(hw, HERMES_CONTROL, action);
+
+	for (i = 0; i < 20; i++) {
+		udelay(10);
+		if (hermes_read_reg(hw, HERMES_CONTROL) ==
+		    desired_state)
+			return 0;
+	}
+
+	return -EBUSY;
+}
+
+/*** Plug Data Functions ***/
+
+/*
+ * Scan PDR for the record with the specified RECORD_ID.
+ * If it's not found, return NULL.
+ */
+static struct pdr *
+hermes_find_pdr(struct pdr *first_pdr, u32 record_id)
+{
+	struct pdr *pdr = first_pdr;
+	void *end = (void *)first_pdr + MAX_PDA_SIZE;
+
+	while (((void *)pdr < end) &&
+	       (pdr_id(pdr) != PDI_END)) {
+		/*
+		 * PDR area is currently not terminated by PDI_END.
+		 * It's followed by CRC records, which have the type
+		 * field where PDR has length.  The type can be 0 or 1.
+		 */
+		if (pdr_len(pdr) < 2)
+			return NULL;
+
+		/* If the record ID matches, we are done */
+		if (pdr_id(pdr) == record_id)
+			return pdr;
+
+		pdr = (struct pdr *) pdr->next;
+	}
+	return NULL;
+}
+
+/* Scan production data items for a particular entry */
+static struct pdi *
+hermes_find_pdi(struct pdi *first_pdi, u32 record_id)
+{
+	struct pdi *pdi = first_pdi;
+
+	while (pdi_id(pdi) != PDI_END) {
+
+		/* If the record ID matches, we are done */
+		if (pdi_id(pdi) == record_id)
+			return pdi;
+
+		pdi = (struct pdi *) &pdi->data[pdi_len(pdi)];
+	}
+	return NULL;
+}
+
+/* Process one Plug Data Item - find corresponding PDR and plug it */
+static int
+hermes_plug_pdi(hermes_t *hw, struct pdr *first_pdr, const struct pdi *pdi)
+{
+	struct pdr *pdr;
+
+	/* Find the PDR corresponding to this PDI */
+	pdr = hermes_find_pdr(first_pdr, pdi_id(pdi));
+
+	/* No match is found, safe to ignore */
+	if (!pdr)
+		return 0;
+
+	/* Lengths of the data in PDI and PDR must match */
+	if (pdi_len(pdi) != pdr_len(pdr))
+		return -EINVAL;
+
+	/* do the actual plugging */
+	hermes_aux_setaddr(hw, pdr_addr(pdr));
+	hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
+
+	return 0;
+}
+
+/* Read PDA from the adapter */
+int hermes_read_pda(hermes_t *hw,
+		    __le16 *pda,
+		    u32 pda_addr,
+		    u16 pda_len,
+		    int use_eeprom) /* can we get this into hw? */
+{
+	int ret;
+	u16 pda_size;
+	u16 data_len = pda_len;
+	__le16 *data = pda;
+
+	if (use_eeprom) {
+		/* PDA of spectrum symbol is in eeprom */
+
+		/* Issue command to read EEPROM */
+		ret = hermes_docmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
+		if (ret)
+			return ret;
+	} else {
+		/* wl_lkm does not include PDA size in the PDA area.
+		 * We will pad the information into pda, so other routines
+		 * don't have to be modified */
+		pda[0] = cpu_to_le16(pda_len - 2);
+			/* Includes CFG_PROD_DATA but not itself */
+		pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
+		data_len = pda_len - 4;
+		data = pda + 2;
+	}
+
+	/* Open auxiliary port */
+	ret = hermes_aux_control(hw, 1);
+	printk(KERN_DEBUG PFX "AUX enable returned %d\n", ret);
+	if (ret)
+		return ret;
+
+	/* read PDA from EEPROM */
+	hermes_aux_setaddr(hw, pda_addr);
+	hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
+
+	/* Close aux port */
+	ret = hermes_aux_control(hw, 0);
+	printk(KERN_DEBUG PFX "AUX disable returned %d\n", ret);
+
+	/* Check PDA length */
+	pda_size = le16_to_cpu(pda[0]);
+	printk(KERN_DEBUG PFX "Actual PDA length %d, Max allowed %d\n",
+	       pda_size, pda_len);
+	if (pda_size > pda_len)
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL(hermes_read_pda);
+
+/* Parse PDA and write the records into the adapter
+ *
+ * Attempt to write every records that is in the specified pda
+ * which also has a valid production data record for the firmware.
+ */
+int hermes_apply_pda(hermes_t *hw,
+		     const char *first_pdr,
+		     const __le16 *pda)
+{
+	int ret;
+	const struct pdi *pdi;
+	struct pdr *pdr;
+
+	pdr = (struct pdr *) first_pdr;
+
+	/* Go through every PDI and plug them into the adapter */
+	pdi = (const struct pdi *) (pda + 2);
+	while (pdi_id(pdi) != PDI_END) {
+		ret = hermes_plug_pdi(hw, pdr, pdi);
+		if (ret)
+			return ret;
+
+		/* Increment to the next PDI */
+		pdi = (const struct pdi *) &pdi->data[pdi_len(pdi)];
+	}
+	return 0;
+}
+EXPORT_SYMBOL(hermes_apply_pda);
+
+/* Identify the total number of bytes in all blocks
+ * including the header data.
+ */
+size_t
+hermes_blocks_length(const char *first_block)
+{
+	const struct dblock *blk = (const struct dblock *) first_block;
+	int total_len = 0;
+	int len;
+
+	/* Skip all blocks to locate Plug Data References
+	 * (Spectrum CS) */
+	while (dblock_addr(blk) != BLOCK_END) {
+		len = dblock_len(blk);
+		total_len += sizeof(*blk) + len;
+		blk = (struct dblock *) &blk->data[len];
+	}
+
+	return total_len;
+}
+EXPORT_SYMBOL(hermes_blocks_length);
+
+/*** Hermes programming ***/
+
+/* About to start programming data (Hermes I)
+ * offset is the entry point
+ *
+ * Spectrum_cs' Symbol fw does not require this
+ * wl_lkm Agere fw does
+ * Don't know about intersil
+ */
+int hermesi_program_init(hermes_t *hw, u32 offset)
+{
+	int err;
+
+	/* Disable interrupts?*/
+	/*hw->inten = 0x0;*/
+	/*hermes_write_regn(hw, INTEN, 0);*/
+	/*hermes_set_irqmask(hw, 0);*/
+
+	/* Acknowledge any outstanding command */
+	hermes_write_regn(hw, EVACK, 0xFFFF);
+
+	/* Using doicmd_wait rather than docmd_wait */
+	err = hermes_doicmd_wait(hw,
+				 0x0100 | HERMES_CMD_INIT,
+				 0, 0, 0, NULL);
+	if (err)
+		return err;
+
+	err = hermes_doicmd_wait(hw,
+				 0x0000 | HERMES_CMD_INIT,
+				 0, 0, 0, NULL);
+	if (err)
+		return err;
+
+	err = hermes_aux_control(hw, 1);
+	printk(KERN_DEBUG PFX "AUX enable returned %d\n", err);
+
+	if (err)
+		return err;
+
+	printk(KERN_DEBUG PFX "Enabling volatile, EP 0x%08x\n", offset);
+	err = hermes_doicmd_wait(hw,
+				 HERMES_PROGRAM_ENABLE_VOLATILE,
+				 offset & 0xFFFFu,
+				 offset >> 16,
+				 0,
+				 NULL);
+	printk(KERN_DEBUG PFX "PROGRAM_ENABLE returned %d\n",
+	       err);
+
+	return err;
+}
+EXPORT_SYMBOL(hermesi_program_init);
+
+/* Done programming data (Hermes I)
+ *
+ * Spectrum_cs' Symbol fw does not require this
+ * wl_lkm Agere fw does
+ * Don't know about intersil
+ */
+int hermesi_program_end(hermes_t *hw)
+{
+	struct hermes_response resp;
+	int rc = 0;
+	int err;
+
+	rc = hermes_docmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
+
+	printk(KERN_DEBUG PFX "PROGRAM_DISABLE returned %d, "
+	       "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
+	       rc, resp.resp0, resp.resp1, resp.resp2);
+
+	if ((rc == 0) &&
+	    ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
+		rc = -EIO;
+
+	err = hermes_aux_control(hw, 0);
+	printk(KERN_DEBUG PFX "AUX disable returned %d\n", err);
+
+	/* Acknowledge any outstanding command */
+	hermes_write_regn(hw, EVACK, 0xFFFF);
+
+	/* Reinitialise, ignoring return */
+	(void) hermes_doicmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
+				  0, 0, 0, NULL);
+
+	return rc ? rc : err;
+}
+EXPORT_SYMBOL(hermesi_program_end);
+
+/* Program the data blocks */
+int hermes_program(hermes_t *hw, const char *first_block, const char *end)
+{
+	const struct dblock *blk;
+	u32 blkaddr;
+	u32 blklen;
+#if LIMIT_PROGRAM_SIZE
+	u32 addr;
+	u32 len;
+#endif
+
+	blk = (const struct dblock *) first_block;
+
+	if ((const char *) blk > (end - sizeof(*blk)))
+		return -EIO;
+
+	blkaddr = dblock_addr(blk);
+	blklen = dblock_len(blk);
+
+	while ((blkaddr != BLOCK_END) &&
+	       (((const char *) blk + blklen) <= end)) {
+		printk(KERN_DEBUG PFX
+		       "Programming block of length %d to address 0x%08x\n",
+		       blklen, blkaddr);
+
+#if !LIMIT_PROGRAM_SIZE
+		/* wl_lkm driver splits this into writes of 2000 bytes */
+		hermes_aux_setaddr(hw, blkaddr);
+		hermes_write_bytes(hw, HERMES_AUXDATA, blk->data,
+				   blklen);
+#else
+		len = (blklen < MAX_DL_SIZE) ? blklen : MAX_DL_SIZE;
+		addr = blkaddr;
+
+		while (addr < (blkaddr + blklen)) {
+			printk(KERN_DEBUG PFX
+			       "Programming subblock of length %d "
+			       "to address 0x%08x. Data @ %p\n",
+			       len, addr, &blk->data[addr - blkaddr]);
+
+			hermes_aux_setaddr(hw, addr);
+			hermes_write_bytes(hw, HERMES_AUXDATA,
+					   &blk->data[addr - blkaddr],
+					   len);
+
+			addr += len;
+			len = ((blkaddr + blklen - addr) < MAX_DL_SIZE) ?
+				(blkaddr + blklen - addr) : MAX_DL_SIZE;
+		}
+#endif
+		blk = (const struct dblock *) &blk->data[blklen];
+
+		if ((const char *) blk > (end - sizeof(*blk)))
+			return -EIO;
+
+		blkaddr = dblock_addr(blk);
+		blklen = dblock_len(blk);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(hermes_program);
+
+static int __init init_hermes_dld(void)
+{
+	return 0;
+}
+
+static void __exit exit_hermes_dld(void)
+{
+}
+
+module_init(init_hermes_dld);
+module_exit(exit_hermes_dld);
+
+/*** Default plugging data for Hermes I ***/
+/* Values from wl_lkm_718/hcf/dhf.c */
+
+#define DEFINE_DEFAULT_PDR(pid, length, data)				\
+static const struct {							\
+	__le16 len;							\
+	__le16 id;							\
+	u8 val[length];							\
+} __attribute__ ((packed)) default_pdr_data_##pid = {			\
+	__constant_cpu_to_le16((sizeof(default_pdr_data_##pid)/		\
+				sizeof(__le16)) - 1),			\
+	__constant_cpu_to_le16(pid),					\
+	data								\
+}
+
+#define DEFAULT_PDR(pid) default_pdr_data_##pid
+
+/*  HWIF Compatiblity */
+DEFINE_DEFAULT_PDR(0x0005, 10, "\x00\x00\x06\x00\x01\x00\x01\x00\x01\x00");
+
+/* PPPPSign */
+DEFINE_DEFAULT_PDR(0x0108, 4, "\x00\x00\x00\x00");
+
+/* PPPPProf */
+DEFINE_DEFAULT_PDR(0x0109, 10, "\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00");
+
+/* Antenna diversity */
+DEFINE_DEFAULT_PDR(0x0150, 2, "\x00\x3F");
+
+/* Modem VCO band Set-up */
+DEFINE_DEFAULT_PDR(0x0160, 28,
+		   "\x00\x00\x00\x00\x00\x00\x00\x00"
+		   "\x00\x00\x00\x00\x00\x00\x00\x00"
+		   "\x00\x00\x00\x00\x00\x00\x00\x00"
+		   "\x00\x00\x00\x00");
+
+/* Modem Rx Gain Table Values */
+DEFINE_DEFAULT_PDR(0x0161, 256,
+		   "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
+		   "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
+		   "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
+		   "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
+		   "\x3F\x01\x3E\01\x3E\x01\x3D\x01"
+		   "\x3D\x01\x3C\01\x3C\x01\x3B\x01"
+		   "\x3B\x01\x3A\01\x3A\x01\x39\x01"
+		   "\x39\x01\x38\01\x38\x01\x37\x01"
+		   "\x37\x01\x36\01\x36\x01\x35\x01"
+		   "\x35\x01\x34\01\x34\x01\x33\x01"
+		   "\x33\x01\x32\x01\x32\x01\x31\x01"
+		   "\x31\x01\x30\x01\x30\x01\x7B\x01"
+		   "\x7B\x01\x7A\x01\x7A\x01\x79\x01"
+		   "\x79\x01\x78\x01\x78\x01\x77\x01"
+		   "\x77\x01\x76\x01\x76\x01\x75\x01"
+		   "\x75\x01\x74\x01\x74\x01\x73\x01"
+		   "\x73\x01\x72\x01\x72\x01\x71\x01"
+		   "\x71\x01\x70\x01\x70\x01\x68\x01"
+		   "\x68\x01\x67\x01\x67\x01\x66\x01"
+		   "\x66\x01\x65\x01\x65\x01\x57\x01"
+		   "\x57\x01\x56\x01\x56\x01\x55\x01"
+		   "\x55\x01\x54\x01\x54\x01\x53\x01"
+		   "\x53\x01\x52\x01\x52\x01\x51\x01"
+		   "\x51\x01\x50\x01\x50\x01\x48\x01"
+		   "\x48\x01\x47\x01\x47\x01\x46\x01"
+		   "\x46\x01\x45\x01\x45\x01\x44\x01"
+		   "\x44\x01\x43\x01\x43\x01\x42\x01"
+		   "\x42\x01\x41\x01\x41\x01\x40\x01"
+		   "\x40\x01\x40\x01\x40\x01\x40\x01"
+		   "\x40\x01\x40\x01\x40\x01\x40\x01"
+		   "\x40\x01\x40\x01\x40\x01\x40\x01"
+		   "\x40\x01\x40\x01\x40\x01\x40\x01");
+
+/* Write PDA according to certain rules.
+ *
+ * For every production data record, look for a previous setting in
+ * the pda, and use that.
+ *
+ * For certain records, use defaults if they are not found in pda.
+ */
+int hermes_apply_pda_with_defaults(hermes_t *hw,
+				   const char *first_pdr,
+				   const __le16 *pda)
+{
+	const struct pdr *pdr = (const struct pdr *) first_pdr;
+	struct pdi *first_pdi = (struct pdi *) &pda[2];
+	struct pdi *pdi;
+	struct pdi *default_pdi = NULL;
+	struct pdi *outdoor_pdi;
+	void *end = (void *)first_pdr + MAX_PDA_SIZE;
+	int record_id;
+
+	while (((void *)pdr < end) &&
+	       (pdr_id(pdr) != PDI_END)) {
+		/*
+		 * For spectrum_cs firmwares,
+		 * PDR area is currently not terminated by PDI_END.
+		 * It's followed by CRC records, which have the type
+		 * field where PDR has length.  The type can be 0 or 1.
+		 */
+		if (pdr_len(pdr) < 2)
+			break;
+		record_id = pdr_id(pdr);
+
+		pdi = hermes_find_pdi(first_pdi, record_id);
+		if (pdi)
+			printk(KERN_DEBUG PFX "Found record 0x%04x at %p\n",
+			       record_id, pdi);
+
+		switch (record_id) {
+		case 0x110: /* Modem REFDAC values */
+		case 0x120: /* Modem VGDAC values */
+			outdoor_pdi = hermes_find_pdi(first_pdi, record_id + 1);
+			default_pdi = NULL;
+			if (outdoor_pdi) {
+				pdi = outdoor_pdi;
+				printk(KERN_DEBUG PFX
+				       "Using outdoor record 0x%04x at %p\n",
+				       record_id + 1, pdi);
+			}
+			break;
+		case 0x5: /*  HWIF Compatiblity */
+			default_pdi = (struct pdi *) &DEFAULT_PDR(0x0005);
+			break;
+		case 0x108: /* PPPPSign */
+			default_pdi = (struct pdi *) &DEFAULT_PDR(0x0108);
+			break;
+		case 0x109: /* PPPPProf */
+			default_pdi = (struct pdi *) &DEFAULT_PDR(0x0109);
+			break;
+		case 0x150: /* Antenna diversity */
+			default_pdi = (struct pdi *) &DEFAULT_PDR(0x0150);
+			break;
+		case 0x160: /* Modem VCO band Set-up */
+			default_pdi = (struct pdi *) &DEFAULT_PDR(0x0160);
+			break;
+		case 0x161: /* Modem Rx Gain Table Values */
+			default_pdi = (struct pdi *) &DEFAULT_PDR(0x0161);
+			break;
+		default:
+			default_pdi = NULL;
+			break;
+		}
+		if (!pdi && default_pdi) {
+			/* Use default */
+			pdi = default_pdi;
+			printk(KERN_DEBUG PFX
+			       "Using default record 0x%04x at %p\n",
+			       record_id, pdi);
+		}
+
+		if (pdi) {
+			/* Lengths of the data in PDI and PDR must match */
+			if (pdi_len(pdi) == pdr_len(pdr)) {
+				/* do the actual plugging */
+				hermes_aux_setaddr(hw, pdr_addr(pdr));
+				hermes_write_bytes(hw, HERMES_AUXDATA,
+						   pdi->data, pdi_len(pdi));
+			}
+		}
+
+		pdr++;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(hermes_apply_pda_with_defaults);
diff --git a/drivers/net/wireless/hermes_dld.h b/drivers/net/wireless/hermes_dld.h
new file mode 100644
index 0000000..6fcb262
--- /dev/null
+++ b/drivers/net/wireless/hermes_dld.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2007, David Kilroy
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above.  If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL.  If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+#ifndef _HERMES_DLD_H
+#define _HERMES_DLD_H
+
+#include "hermes.h"
+
+int hermesi_program_init(hermes_t *hw, u32 offset);
+int hermesi_program_end(hermes_t *hw);
+int hermes_program(hermes_t *hw, const char *first_block, const char *end);
+
+int hermes_read_pda(hermes_t *hw,
+		    __le16 *pda,
+		    u32 pda_addr,
+		    u16 pda_len,
+		    int use_eeprom);
+int hermes_apply_pda(hermes_t *hw,
+		     const char *first_pdr,
+		     const __le16 *pda);
+int hermes_apply_pda_with_defaults(hermes_t *hw,
+				   const char *first_pdr,
+				   const __le16 *pda);
+
+size_t hermes_blocks_length(const char *first_block);
+
+#endif /* _HERMES_DLD_H */
diff --git a/drivers/net/wireless/hermes_rid.h b/drivers/net/wireless/hermes_rid.h
index 4f46b48..42eb67d 100644
--- a/drivers/net/wireless/hermes_rid.h
+++ b/drivers/net/wireless/hermes_rid.h
@@ -30,6 +30,7 @@
 #define HERMES_RID_CNFWEPENABLED_AGERE		0xFC20
 #define HERMES_RID_CNFAUTHENTICATION_AGERE	0xFC21
 #define HERMES_RID_CNFMANDATORYBSSID_SYMBOL	0xFC21
+#define HERMES_RID_CNFDROPUNENCRYPTED		0xFC22
 #define HERMES_RID_CNFWEPDEFAULTKEYID		0xFC23
 #define HERMES_RID_CNFDEFAULTKEY0		0xFC24
 #define HERMES_RID_CNFDEFAULTKEY1		0xFC25
@@ -85,6 +86,16 @@
 #define HERMES_RID_CNFSCANSSID_AGERE		0xFCB2
 #define HERMES_RID_CNFBASICRATES		0xFCB3
 #define HERMES_RID_CNFSUPPORTEDRATES		0xFCB4
+#define HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE	0xFCB4
+#define HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE	0xFCB5
+#define HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE	0xFCB6
+#define HERMES_RID_CNFADDMAPPEDTKIPKEY_AGERE	0xFCB7
+#define HERMES_RID_CNFREMMAPPEDTKIPKEY_AGERE	0xFCB8
+#define HERMES_RID_CNFSETWPACAPABILITIES_AGERE	0xFCB9
+#define HERMES_RID_CNFCACHEDPMKADDRESS		0xFCBA
+#define HERMES_RID_CNFREMOVEPMKADDRESS		0xFCBB
+#define HERMES_RID_CNFSCANCHANNELS2GHZ		0xFCC2
+#define HERMES_RID_CNFDISASSOCIATE		0xFCC8
 #define HERMES_RID_CNFTICKTIME			0xFCE0
 #define HERMES_RID_CNFSCANREQUEST		0xFCE1
 #define HERMES_RID_CNFJOINREQUEST		0xFCE2
@@ -137,6 +148,12 @@
 #define HERMES_RID_CURRENTTXRATE6		0xFD85
 #define HERMES_RID_OWNMACADDR			0xFD86
 #define HERMES_RID_SCANRESULTSTABLE		0xFD88
+#define HERMES_RID_CURRENT_COUNTRY_INFO		0xFD89
+#define HERMES_RID_CURRENT_WPA_IE		0xFD8A
+#define HERMES_RID_CURRENT_TKIP_IV		0xFD8B
+#define HERMES_RID_CURRENT_ASSOC_REQ_INFO	0xFD8C
+#define HERMES_RID_CURRENT_ASSOC_RESP_INFO	0xFD8D
+#define HERMES_RID_TXQUEUEEMPTY			0xFD91
 #define HERMES_RID_PHYTYPE			0xFDC0
 #define HERMES_RID_CURRENTCHANNEL		0xFDC1
 #define HERMES_RID_CURRENTPOWERSTATE		0xFDC2
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 19a401c..bca7481 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -211,7 +211,7 @@
 do { \
 	if (ipw2100_debug_level & (level)) { \
 		printk(KERN_DEBUG "ipw2100: %c %s ", \
-                       in_interrupt() ? 'I' : 'U',  __FUNCTION__); \
+                       in_interrupt() ? 'I' : 'U',  __func__); \
 		printk(message); \
 	} \
 } while (0)
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index d4ab28b..0bad1ec 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1394,13 +1394,13 @@
 #define IPW_DEBUG(level, fmt, args...) \
 do { if (ipw_debug_level & (level)) \
   printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
-         in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
+         in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
 
 #ifdef CONFIG_IPW2200_DEBUG
 #define IPW_LL_DEBUG(level, fmt, args...) \
 do { if (ipw_debug_level & (level)) \
   printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
-         in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
+         in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
 #else
 #define IPW_LL_DEBUG(level, fmt, args...) do {} while (0)
 #endif				/* CONFIG_IPW2200_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debug.h b/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
index f1d002f..33016fb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
@@ -34,12 +34,12 @@
 #define IWL_DEBUG(level, fmt, args...) \
 do { if (iwl3945_debug_level & (level)) \
   printk(KERN_ERR DRV_NAME": %c %s " fmt, \
-	 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
+	 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
 
 #define IWL_DEBUG_LIMIT(level, fmt, args...) \
 do { if ((iwl3945_debug_level & (level)) && net_ratelimit()) \
   printk(KERN_ERR DRV_NAME": %c %s " fmt, \
-	 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
+	 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
 
 static inline void iwl3945_print_hex_dump(int level, void *p, u32 len)
 {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-io.h b/drivers/net/wireless/iwlwifi/iwl-3945-io.h
index 0b94751..b3fe48d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-io.h
@@ -59,7 +59,7 @@
  *
  */
 
-#define _iwl3945_write32(priv, ofs, val) writel((val), (priv)->hw_base + (ofs))
+#define _iwl3945_write32(priv, ofs, val) iowrite32((val), (priv)->hw_base + (ofs))
 #ifdef CONFIG_IWL3945_DEBUG
 static inline void __iwl3945_write32(const char *f, u32 l, struct iwl3945_priv *priv,
 				 u32 ofs, u32 val)
@@ -73,14 +73,14 @@
 #define iwl3945_write32(priv, ofs, val) _iwl3945_write32(priv, ofs, val)
 #endif
 
-#define _iwl3945_read32(priv, ofs) readl((priv)->hw_base + (ofs))
+#define _iwl3945_read32(priv, ofs) ioread32((priv)->hw_base + (ofs))
 #ifdef CONFIG_IWL3945_DEBUG
 static inline u32 __iwl3945_read32(char *f, u32 l, struct iwl3945_priv *priv, u32 ofs)
 {
 	IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l);
 	return _iwl3945_read32(priv, ofs);
 }
-#define iwl3945_read32(priv, ofs) __iwl3945_read32(__FILE__, __LINE__, priv, ofs)
+#define iwl3945_read32(priv, ofs)__iwl3945_read32(__FILE__, __LINE__, priv, ofs)
 #else
 #define iwl3945_read32(p, o) _iwl3945_read32(p, o)
 #endif
@@ -153,28 +153,10 @@
 static inline int _iwl3945_grab_nic_access(struct iwl3945_priv *priv)
 {
 	int ret;
-	u32 gp_ctl;
-
 #ifdef CONFIG_IWL3945_DEBUG
 	if (atomic_read(&priv->restrict_refcnt))
 		return 0;
 #endif
-	if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
-	    test_bit(STATUS_RF_KILL_SW, &priv->status)) {
-		IWL_WARNING("WARNING: Requesting MAC access during RFKILL "
-			"wakes up NIC\n");
-
-		/* 10 msec allows time for NIC to complete its data save */
-		gp_ctl = _iwl3945_read32(priv, CSR_GP_CNTRL);
-		if (gp_ctl & CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
-			IWL_DEBUG_RF_KILL("Wait for complete power-down, "
-				"gpctl = 0x%08x\n", gp_ctl);
-			mdelay(10);
-		} else
-			IWL_DEBUG_RF_KILL("power-down complete, "
-					  "gpctl = 0x%08x\n", gp_ctl);
-	}
-
 	/* this bit wakes up the NIC */
 	_iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
 	ret = _iwl3945_poll_bit(priv, CSR_GP_CNTRL,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 10c64bd..6fc5e73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -36,8 +36,6 @@
 
 #include <linux/workqueue.h>
 
-#include "../net/mac80211/rate.h"
-
 #include "iwl-3945.h"
 
 #define RS_NAME "iwl-3945-rs"
@@ -65,6 +63,9 @@
 	u8 ibss_sta_added;
 	struct timer_list rate_scale_flush;
 	struct iwl3945_rate_scale_data win[IWL_RATE_COUNT];
+
+	/* used to be in sta_info */
+	int last_txrate_idx;
 };
 
 static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT] = {
@@ -316,9 +317,10 @@
 	}
 }
 
-static void rs_rate_init(void *priv_rate, void *priv_sta,
-			 struct ieee80211_local *local, struct sta_info *sta)
+static void rs_rate_init(void *priv, struct ieee80211_supported_band *sband,
+			 struct ieee80211_sta *sta, void *priv_sta)
 {
+	struct iwl3945_rs_sta *rs_sta = priv_sta;
 	int i;
 
 	IWL_DEBUG_RATE("enter\n");
@@ -329,24 +331,22 @@
 	 * after assoc.. */
 
 	for (i = IWL_RATE_COUNT - 1; i >= 0; i--) {
-		if (sta->supp_rates[local->hw.conf.channel->band] & (1 << i)) {
-			sta->txrate_idx = i;
+		if (sta->supp_rates[sband->band] & (1 << i)) {
+			rs_sta->last_txrate_idx = i;
 			break;
 		}
 	}
 
-	sta->last_txrate_idx = sta->txrate_idx;
-
 	/* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
-	if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
-		sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
+	if (sband->band == IEEE80211_BAND_5GHZ)
+		rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
 
 	IWL_DEBUG_RATE("leave\n");
 }
 
-static void *rs_alloc(struct ieee80211_local *local)
+static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
 {
-	return local->hw.priv;
+	return hw->priv;
 }
 
 /* rate scale requires free function to be implemented */
@@ -354,17 +354,24 @@
 {
 	return;
 }
+
 static void rs_clear(void *priv)
 {
 	return;
 }
 
 
-static void *rs_alloc_sta(void *priv, gfp_t gfp)
+static void *rs_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
 {
 	struct iwl3945_rs_sta *rs_sta;
+	struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
 	int i;
 
+	/*
+	 * XXX: If it's using sta->drv_priv anyway, it might
+	 *	as well just put all the information there.
+	 */
+
 	IWL_DEBUG_RATE("enter\n");
 
 	rs_sta = kzalloc(sizeof(struct iwl3945_rs_sta), gfp);
@@ -373,6 +380,8 @@
 		return NULL;
 	}
 
+	psta->rs_sta = rs_sta;
+
 	spin_lock_init(&rs_sta->lock);
 
 	rs_sta->start_rate = IWL_RATE_INVALID;
@@ -398,10 +407,14 @@
 	return rs_sta;
 }
 
-static void rs_free_sta(void *priv, void *priv_sta)
+static void rs_free_sta(void *priv, struct ieee80211_sta *sta,
+			void *priv_sta)
 {
+	struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
 	struct iwl3945_rs_sta *rs_sta = priv_sta;
 
+	psta->rs_sta = NULL;
+
 	IWL_DEBUG_RATE("enter\n");
 	del_timer_sync(&rs_sta->rate_scale_flush);
 	kfree(rs_sta);
@@ -443,26 +456,19 @@
  * NOTE: Uses iwl3945_priv->retry_rate for the # of retries attempted by
  * the hardware for each rate.
  */
-static void rs_tx_status(void *priv_rate,
-			 struct net_device *dev,
+static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
+			 struct ieee80211_sta *sta, void *priv_sta,
 			 struct sk_buff *skb)
 {
 	u8 retries, current_count;
 	int scale_rate_index, first_index, last_index;
 	unsigned long flags;
-	struct sta_info *sta;
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_rate;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct iwl3945_rs_sta *rs_sta;
-	struct ieee80211_supported_band *sband;
+	struct iwl3945_rs_sta *rs_sta = priv_sta;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
 	IWL_DEBUG_RATE("enter\n");
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-
-
 	retries = info->status.retry_count;
 	first_index = sband->bitrates[info->tx_rate_idx].hw_value;
 	if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) {
@@ -470,17 +476,11 @@
 		return;
 	}
 
-	rcu_read_lock();
-
-	sta = sta_info_get(local, hdr->addr1);
-	if (!sta || !sta->rate_ctrl_priv) {
-		rcu_read_unlock();
+	if (!priv_sta) {
 		IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
 		return;
 	}
 
-	rs_sta = (void *)sta->rate_ctrl_priv;
-
 	rs_sta->tx_packets++;
 
 	scale_rate_index = first_index;
@@ -547,8 +547,6 @@
 
 	spin_unlock_irqrestore(&rs_sta->lock, flags);
 
-	rcu_read_unlock();
-
 	IWL_DEBUG_RATE("leave\n");
 
 	return;
@@ -632,16 +630,15 @@
  * rate table and must reference the driver allocated rate table
  *
  */
-static void rs_get_rate(void *priv_rate, struct net_device *dev,
-			struct ieee80211_supported_band *sband,
-			struct sk_buff *skb,
-			struct rate_selection *sel)
+static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
+			struct ieee80211_sta *sta, void *priv_sta,
+			struct sk_buff *skb, struct rate_selection *sel)
 {
 	u8 low = IWL_RATE_INVALID;
 	u8 high = IWL_RATE_INVALID;
 	u16 high_low;
 	int index;
-	struct iwl3945_rs_sta *rs_sta;
+	struct iwl3945_rs_sta *rs_sta = priv_sta;
 	struct iwl3945_rate_scale_data *window = NULL;
 	int current_tpt = IWL_INV_TPT;
 	int low_tpt = IWL_INV_TPT;
@@ -649,40 +646,31 @@
 	u32 fail_count;
 	s8 scale_action = 0;
 	unsigned long flags;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct sta_info *sta;
 	u16 fc, rate_mask;
-	struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_rate;
+	struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r;
 	DECLARE_MAC_BUF(mac);
 
 	IWL_DEBUG_RATE("enter\n");
 
-	rcu_read_lock();
-
-	sta = sta_info_get(local, hdr->addr1);
-
 	/* Send management frames and broadcast/multicast data using lowest
 	 * rate. */
 	fc = le16_to_cpu(hdr->frame_control);
 	if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
 	    is_multicast_ether_addr(hdr->addr1) ||
-	    !sta || !sta->rate_ctrl_priv) {
+	    !sta || !priv_sta) {
 		IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
-		sel->rate_idx = rate_lowest_index(local, sband, sta);
-		rcu_read_unlock();
+		sel->rate_idx = rate_lowest_index(sband, sta);
 		return;
 	}
 
 	rate_mask = sta->supp_rates[sband->band];
-	index = min(sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1);
+	index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1);
 
 	if (sband->band == IEEE80211_BAND_5GHZ)
 		rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
 
-	rs_sta = (void *)sta->rate_ctrl_priv;
-
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
+	if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
 	    !rs_sta->ibss_sta_added) {
 		u8 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
 
@@ -803,17 +791,13 @@
 
  out:
 
-	sta->last_txrate_idx = index;
+	rs_sta->last_txrate_idx = index;
 	if (sband->band == IEEE80211_BAND_5GHZ)
-		sta->txrate_idx = sta->last_txrate_idx - IWL_FIRST_OFDM_RATE;
+		sel->rate_idx = rs_sta->last_txrate_idx - IWL_FIRST_OFDM_RATE;
 	else
-		sta->txrate_idx = sta->last_txrate_idx;
-
-	rcu_read_unlock();
+		sel->rate_idx = rs_sta->last_txrate_idx;
 
 	IWL_DEBUG_RATE("leave: %d\n", index);
-
-	sel->rate_idx = sta->txrate_idx;
 }
 
 static struct rate_control_ops rs_ops = {
@@ -829,114 +813,28 @@
 	.free_sta = rs_free_sta,
 };
 
-int iwl3945_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
-{
-	struct ieee80211_local *local = hw_to_local(hw);
-	struct iwl3945_priv *priv = hw->priv;
-	struct iwl3945_rs_sta *rs_sta;
-	struct sta_info *sta;
-	unsigned long flags;
-	int count = 0, i;
-	u32 samples = 0, success = 0, good = 0;
-	unsigned long now = jiffies;
-	u32 max_time = 0;
-
-	rcu_read_lock();
-
-	sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr);
-	if (!sta || !sta->rate_ctrl_priv) {
-		if (sta)
-			IWL_DEBUG_RATE("leave - no private rate data!\n");
-		else
-			IWL_DEBUG_RATE("leave - no station!\n");
-		rcu_read_unlock();
-		return sprintf(buf, "station %d not found\n", sta_id);
-	}
-
-	rs_sta = (void *)sta->rate_ctrl_priv;
-	spin_lock_irqsave(&rs_sta->lock, flags);
-	i = IWL_RATE_54M_INDEX;
-	while (1) {
-		u64 mask;
-		int j;
-
-		count +=
-		    sprintf(&buf[count], " %2dMbs: ", iwl3945_rates[i].ieee / 2);
-
-		mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1));
-		for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1)
-			buf[count++] =
-			    (rs_sta->win[i].data & mask) ? '1' : '0';
-
-		samples += rs_sta->win[i].counter;
-		good += rs_sta->win[i].success_counter;
-		success += rs_sta->win[i].success_counter *
-						iwl3945_rates[i].ieee;
-
-		if (rs_sta->win[i].stamp) {
-			int delta =
-			    jiffies_to_msecs(now - rs_sta->win[i].stamp);
-
-			if (delta > max_time)
-				max_time = delta;
-
-			count += sprintf(&buf[count], "%5dms\n", delta);
-		} else
-			buf[count++] = '\n';
-
-		j = iwl3945_get_prev_ieee_rate(i);
-		if (j == i)
-			break;
-		i = j;
-	}
-	spin_unlock_irqrestore(&rs_sta->lock, flags);
-	rcu_read_unlock();
-
-	/* Display the average rate of all samples taken.
-	 *
-	 * NOTE:  We multiple # of samples by 2 since the IEEE measurement
-	 * added from iwl3945_rates is actually 2X the rate */
-	if (samples)
-		count += sprintf(
-			&buf[count],
-			"\nAverage rate is %3d.%02dMbs over last %4dms\n"
-			"%3d%% success (%d good packets over %d tries)\n",
-			success / (2 * samples), (success * 5 / samples) % 10,
-			max_time, good * 100 / samples, good, samples);
-	else
-		count += sprintf(&buf[count], "\nAverage rate: 0Mbs\n");
-
-	return count;
-}
-
 void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
 {
 	struct iwl3945_priv *priv = hw->priv;
 	s32 rssi = 0;
 	unsigned long flags;
-	struct ieee80211_local *local = hw_to_local(hw);
 	struct iwl3945_rs_sta *rs_sta;
-	struct sta_info *sta;
+	struct ieee80211_sta *sta;
+	struct iwl3945_sta_priv *psta;
 
 	IWL_DEBUG_RATE("enter\n");
 
-	if (!local->rate_ctrl->ops->name ||
-	    strcmp(local->rate_ctrl->ops->name, RS_NAME)) {
-		IWL_WARNING("iwl-3945-rs not selected as rate control algo!\n");
-		IWL_DEBUG_RATE("leave - mac80211 picked the wrong RC algo.\n");
-		return;
-	}
-
 	rcu_read_lock();
 
-	sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr);
-	if (!sta || !sta->rate_ctrl_priv) {
+	sta = ieee80211_find_sta(hw, priv->stations[sta_id].sta.sta.addr);
+	psta = (void *) sta->drv_priv;
+	if (!sta || !psta) {
 		IWL_DEBUG_RATE("leave - no private rate data!\n");
 		rcu_read_unlock();
 		return;
 	}
 
-	rs_sta = (void *)sta->rate_ctrl_priv;
+	rs_sta = psta->rs_sta;
 
 	spin_lock_irqsave(&rs_sta->lock, flags);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
index f085d33..98b17ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
@@ -176,15 +176,6 @@
 }
 
 /**
- * iwl3945_fill_rs_info - Fill an output text buffer with the rate representation
- *
- * NOTE:  This is provided as a quick mechanism for a user to visualize
- * the performance of the rate control algorithm and is not meant to be
- * parsed software.
- */
-extern int iwl3945_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id);
-
-/**
  * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
  *
  * The specific throughput table used is based on the type of network
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 3f51f36..7ca5627 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -520,10 +520,10 @@
 	/* Filter incoming packets to determine if they are targeted toward
 	 * this network, discarding packets coming from ourselves */
 	switch (priv->iw_mode) {
-	case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source    | BSSID */
+	case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source    | BSSID */
 		/* packets to our IBSS update information */
 		return !compare_ether_addr(header->addr3, priv->bssid);
-	case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
+	case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
 		/* packets to our IBSS update information */
 		return !compare_ether_addr(header->addr2, priv->bssid);
 	default:
@@ -531,99 +531,6 @@
 	}
 }
 
-static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
-				 struct sk_buff *skb,
-				 struct iwl3945_rx_frame_hdr *rx_hdr,
-				 struct ieee80211_rx_status *stats)
-{
-	/* First cache any information we need before we overwrite
-	 * the information provided in the skb from the hardware */
-	s8 signal = stats->signal;
-	s8 noise = 0;
-	int rate = stats->rate_idx;
-	u64 tsf = stats->mactime;
-	__le16 phy_flags_hw = rx_hdr->phy_flags, antenna;
-
-	struct iwl3945_rt_rx_hdr {
-		struct ieee80211_radiotap_header rt_hdr;
-		__le64 rt_tsf;		/* TSF */
-		u8 rt_flags;		/* radiotap packet flags */
-		u8 rt_rate;		/* rate in 500kb/s */
-		__le16 rt_channelMHz;	/* channel in MHz */
-		__le16 rt_chbitmask;	/* channel bitfield */
-		s8 rt_dbmsignal;	/* signal in dBm, kluged to signed */
-		s8 rt_dbmnoise;
-		u8 rt_antenna;		/* antenna number */
-	} __attribute__ ((packed)) *iwl3945_rt;
-
-	if (skb_headroom(skb) < sizeof(*iwl3945_rt)) {
-		if (net_ratelimit())
-			printk(KERN_ERR "not enough headroom [%d] for "
-			       "radiotap head [%zd]\n",
-			       skb_headroom(skb), sizeof(*iwl3945_rt));
-		return;
-	}
-
-	/* put radiotap header in front of 802.11 header and data */
-	iwl3945_rt = (void *)skb_push(skb, sizeof(*iwl3945_rt));
-
-	/* initialise radiotap header */
-	iwl3945_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
-	iwl3945_rt->rt_hdr.it_pad = 0;
-
-	/* total header + data */
-	put_unaligned_le16(sizeof(*iwl3945_rt), &iwl3945_rt->rt_hdr.it_len);
-
-	/* Indicate all the fields we add to the radiotap header */
-	put_unaligned_le32((1 << IEEE80211_RADIOTAP_TSFT) |
-			   (1 << IEEE80211_RADIOTAP_FLAGS) |
-			   (1 << IEEE80211_RADIOTAP_RATE) |
-			   (1 << IEEE80211_RADIOTAP_CHANNEL) |
-			   (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
-			   (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
-			   (1 << IEEE80211_RADIOTAP_ANTENNA),
-			&iwl3945_rt->rt_hdr.it_present);
-
-	/* Zero the flags, we'll add to them as we go */
-	iwl3945_rt->rt_flags = 0;
-
-	put_unaligned_le64(tsf, &iwl3945_rt->rt_tsf);
-
-	iwl3945_rt->rt_dbmsignal = signal;
-	iwl3945_rt->rt_dbmnoise = noise;
-
-	/* Convert the channel frequency and set the flags */
-	put_unaligned_le16(stats->freq, &iwl3945_rt->rt_channelMHz);
-	if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
-		put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
-			      &iwl3945_rt->rt_chbitmask);
-	else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
-		put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
-			      &iwl3945_rt->rt_chbitmask);
-	else	/* 802.11g */
-		put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
-			      &iwl3945_rt->rt_chbitmask);
-
-	if (rate == -1)
-		iwl3945_rt->rt_rate = 0;
-	else {
-		if (stats->band == IEEE80211_BAND_5GHZ)
-			rate += IWL_FIRST_OFDM_RATE;
-
-		iwl3945_rt->rt_rate = iwl3945_rates[rate].ieee;
-	}
-
-	/* antenna number */
-	antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
-	iwl3945_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
-
-	/* set the preamble flag if we have it */
-	if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
-		iwl3945_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
-
-	stats->flag |= RX_FLAG_RADIOTAP;
-}
-
 static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv,
 				   struct iwl3945_rx_mem_buffer *rxb,
 				   struct ieee80211_rx_status *stats)
@@ -657,9 +564,6 @@
 		iwl3945_set_decrypted_flag(priv, rxb->skb,
 				       le32_to_cpu(rx_end->status), stats);
 
-	if (priv->add_radiotap)
-		iwl3945_add_radiotap(priv, rxb->skb, rx_hdr, stats);
-
 #ifdef CONFIG_IWL3945_LEDS
 	if (ieee80211_is_data(hdr->frame_control))
 		priv->rxtxpackets += len;
@@ -684,7 +588,6 @@
 	u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff);
 	u8 network_packet;
 
-	rx_status.antenna = 0;
 	rx_status.flag = 0;
 	rx_status.mactime = le64_to_cpu(rx_end->timestamp);
 	rx_status.freq =
@@ -696,6 +599,13 @@
 	if (rx_status.band == IEEE80211_BAND_5GHZ)
 		rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
 
+	rx_status.antenna = le16_to_cpu(rx_hdr->phy_flags &
+					RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
+
+	/* set the preamble flag if appropriate */
+	if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+		rx_status.flag |= RX_FLAG_SHORTPRE;
+
 	if ((unlikely(rx_stats->phy_count > 20))) {
 		IWL_DEBUG_DROP
 		    ("dsp size out of range [0,20]: "
@@ -771,100 +681,7 @@
 		priv->last_rx_noise = rx_status.noise;
 	}
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
-		iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
-		return;
-	}
-
-	switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) {
-	case IEEE80211_FTYPE_MGMT:
-		switch (le16_to_cpu(header->frame_control) &
-			IEEE80211_FCTL_STYPE) {
-		case IEEE80211_STYPE_PROBE_RESP:
-		case IEEE80211_STYPE_BEACON:{
-				/* If this is a beacon or probe response for
-				 * our network then cache the beacon
-				 * timestamp */
-				if ((((priv->iw_mode == IEEE80211_IF_TYPE_STA)
-				      && !compare_ether_addr(header->addr2,
-							     priv->bssid)) ||
-				     ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
-				      && !compare_ether_addr(header->addr3,
-							     priv->bssid)))) {
-					struct ieee80211_mgmt *mgmt =
-					    (struct ieee80211_mgmt *)header;
-					__le32 *pos;
-					pos = (__le32 *)&mgmt->u.beacon.
-					    timestamp;
-					priv->timestamp0 = le32_to_cpu(pos[0]);
-					priv->timestamp1 = le32_to_cpu(pos[1]);
-					priv->beacon_int = le16_to_cpu(
-					    mgmt->u.beacon.beacon_int);
-					if (priv->call_post_assoc_from_beacon &&
-					    (priv->iw_mode ==
-						IEEE80211_IF_TYPE_STA))
-						queue_work(priv->workqueue,
-						    &priv->post_associate.work);
-
-					priv->call_post_assoc_from_beacon = 0;
-				}
-
-				break;
-			}
-
-		case IEEE80211_STYPE_ACTION:
-			/* TODO: Parse 802.11h frames for CSA... */
-			break;
-
-			/*
-			 * TODO: Use the new callback function from
-			 * mac80211 instead of sniffing these packets.
-			 */
-		case IEEE80211_STYPE_ASSOC_RESP:
-		case IEEE80211_STYPE_REASSOC_RESP:{
-				struct ieee80211_mgmt *mgnt =
-				    (struct ieee80211_mgmt *)header;
-
-				/* We have just associated, give some
-				 * time for the 4-way handshake if
-				 * any. Don't start scan too early. */
-				priv->next_scan_jiffies = jiffies +
-					IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
-
-				priv->assoc_id = (~((1 << 15) | (1 << 14)) &
-						  le16_to_cpu(mgnt->u.
-							      assoc_resp.aid));
-				priv->assoc_capability =
-				    le16_to_cpu(mgnt->u.assoc_resp.capab_info);
-				if (priv->beacon_int)
-					queue_work(priv->workqueue,
-					    &priv->post_associate.work);
-				else
-					priv->call_post_assoc_from_beacon = 1;
-				break;
-			}
-
-		case IEEE80211_STYPE_PROBE_REQ:{
-				DECLARE_MAC_BUF(mac1);
-				DECLARE_MAC_BUF(mac2);
-				DECLARE_MAC_BUF(mac3);
-				if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
-					IWL_DEBUG_DROP
-					    ("Dropping (non network): %s"
-					     ", %s, %s\n",
-					     print_mac(mac1, header->addr1),
-					     print_mac(mac2, header->addr2),
-					     print_mac(mac3, header->addr3));
-				return;
-			}
-		}
-
-	case IEEE80211_FTYPE_DATA:
-		/* fall through */
-	default:
-		iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
-		break;
-	}
+	iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
 }
 
 int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl3945_priv *priv, void *ptr,
@@ -990,7 +807,7 @@
 
 	priv->stations[sta_id].current_rate.rate_n_flags = rate;
 
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
+	if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
 	    (sta_id != priv->hw_setting.bcast_sta_id) &&
 		(sta_id != IWL_MULTICAST_ID))
 		priv->stations[IWL_STA_ID].current_rate.rate_n_flags = rate;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index fa81ba1..bdd3247 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -73,6 +73,10 @@
 extern int iwl3945_param_hwcrypto;
 extern int iwl3945_param_queues_num;
 
+struct iwl3945_sta_priv {
+	struct iwl3945_rs_sta *rs_sta;
+};
+
 enum iwl3945_antenna {
 	IWL_ANTENNA_DIVERSITY,
 	IWL_ANTENNA_MAIN,
@@ -707,7 +711,6 @@
 
 	enum ieee80211_band band;
 	int alloc_rxb_skb;
-	bool add_radiotap;
 
 	void (*rx_handlers[REPLY_MAX])(struct iwl3945_priv *priv,
 				       struct iwl3945_rx_mem_buffer *rxb);
@@ -852,7 +855,7 @@
 	/* eeprom */
 	struct iwl3945_eeprom eeprom;
 
-	enum ieee80211_if_types iw_mode;
+	enum nl80211_iftype iw_mode;
 
 	struct sk_buff *ibss_beacon;
 
@@ -895,7 +898,6 @@
 	struct delayed_work thermal_periodic;
 	struct delayed_work gather_stats;
 	struct delayed_work scan_check;
-	struct delayed_work post_associate;
 
 #define IWL_DEFAULT_TX_POWER 0x0F
 	s8 user_txpower_limit;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index fce950f..f4793a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -98,16 +98,17 @@
 #define IWL_RSSI_OFFSET	44
 
 
-#include "iwl-commands.h"
 
 /* PCI registers */
-#define PCI_LINK_CTRL      0x0F0	/* 1 byte */
-#define PCI_POWER_SOURCE   0x0C8
-#define PCI_REG_WUM8       0x0E8
+#define PCI_CFG_RETRY_TIMEOUT	0x041
+#define PCI_CFG_POWER_SOURCE	0x0C8
+#define PCI_REG_WUM8		0x0E8
+#define PCI_CFG_LINK_CTRL	0x0F0
 
 /* PCI register values */
-#define PCI_LINK_VAL_L0S_EN	0x01
-#define PCI_LINK_VAL_L1_EN	0x02
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN	0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN	0x02
+#define PCI_CFG_CMD_REG_INT_DIS_MSK	0x04
 #define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT         (0x80000000)
 
 #define TFD_QUEUE_SIZE_MAX      (256)
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 23fed32..9838de5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -399,7 +399,7 @@
 	unsigned long flags;
 	u32 val;
 	u16 radio_cfg;
-	u8 val_link;
+	u16 link;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
@@ -410,10 +410,10 @@
 				       val & ~(1 << 11));
 	}
 
-	pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
+	pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &link);
 
 	/* L1 is enabled by BIOS */
-	if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
+	if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
 		/* diable L0S disabled L1A enabled */
 		iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
 	else
@@ -1607,8 +1607,8 @@
 	return ret;
 }
 
-
-int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
+#ifdef IEEE80211_CONF_CHANNEL_SWITCH
+static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
 {
 	int rc;
 	u8 band = 0;
@@ -1648,6 +1648,7 @@
 	rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
 	return rc;
 }
+#endif
 
 static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
 {
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 17d4f31..c479ee2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -129,6 +129,13 @@
 	__le32 padding2;
 } __attribute__ ((packed));
 
+/* calibrations defined for 5000 */
+/* defines the order in which results should be sent to the runtime uCode */
+enum iwl5000_calib {
+	IWL5000_CALIB_LO,
+	IWL5000_CALIB_TX_IQ,
+	IWL5000_CALIB_TX_IQ_PERD,
+};
 
 #endif /* __iwl_5000_hw_h__ */
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index b08036a9..f6003e7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -209,14 +209,14 @@
 {
 	unsigned long flags;
 	u16 radio_cfg;
-	u8 val_link;
+	u16 link;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
-	pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
+	pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &link);
 
 	/* L1 is enabled by BIOS */
-	if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
+	if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
 		/* diable L0S disabled L1A enabled */
 		iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
 	else
@@ -445,48 +445,6 @@
 				sizeof(cal_cmd), &cal_cmd);
 }
 
-static int iwl5000_send_calib_results(struct iwl_priv *priv)
-{
-	int ret = 0;
-
-	struct iwl_host_cmd hcmd = {
-		.id = REPLY_PHY_CALIBRATION_CMD,
-		.meta.flags = CMD_SIZE_HUGE,
-	};
-
-	if (priv->calib_results.lo_res) {
-		hcmd.len = priv->calib_results.lo_res_len;
-		hcmd.data = priv->calib_results.lo_res;
-		ret = iwl_send_cmd_sync(priv, &hcmd);
-
-		if (ret)
-			goto err;
-	}
-
-	if (priv->calib_results.tx_iq_res) {
-		hcmd.len = priv->calib_results.tx_iq_res_len;
-		hcmd.data = priv->calib_results.tx_iq_res;
-		ret = iwl_send_cmd_sync(priv, &hcmd);
-
-		if (ret)
-			goto err;
-	}
-
-	if (priv->calib_results.tx_iq_perd_res) {
-		hcmd.len = priv->calib_results.tx_iq_perd_res_len;
-		hcmd.data = priv->calib_results.tx_iq_perd_res;
-		ret = iwl_send_cmd_sync(priv, &hcmd);
-
-		if (ret)
-			goto err;
-	}
-
-	return 0;
-err:
-	IWL_ERROR("Error %d\n", ret);
-	return ret;
-}
-
 static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
 {
 	struct iwl5000_calib_cfg_cmd calib_cfg_cmd;
@@ -511,33 +469,30 @@
 	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
 	struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw;
 	int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK;
-
-	iwl_free_calib_results(priv);
+	int index;
 
 	/* reduce the size of the length field itself */
 	len -= 4;
 
+	/* Define the order in which the results will be sent to the runtime
+	 * uCode. iwl_send_calib_results sends them in a row according to their
+	 * index. We sort them here */
 	switch (hdr->op_code) {
 	case IWL5000_PHY_CALIBRATE_LO_CMD:
-		priv->calib_results.lo_res = kzalloc(len, GFP_ATOMIC);
-		priv->calib_results.lo_res_len = len;
-		memcpy(priv->calib_results.lo_res, pkt->u.raw, len);
+		index = IWL5000_CALIB_LO;
 		break;
 	case IWL5000_PHY_CALIBRATE_TX_IQ_CMD:
-		priv->calib_results.tx_iq_res = kzalloc(len, GFP_ATOMIC);
-		priv->calib_results.tx_iq_res_len = len;
-		memcpy(priv->calib_results.tx_iq_res, pkt->u.raw, len);
+		index = IWL5000_CALIB_TX_IQ;
 		break;
 	case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD:
-		priv->calib_results.tx_iq_perd_res = kzalloc(len, GFP_ATOMIC);
-		priv->calib_results.tx_iq_perd_res_len = len;
-		memcpy(priv->calib_results.tx_iq_perd_res, pkt->u.raw, len);
+		index = IWL5000_CALIB_TX_IQ_PERD;
 		break;
 	default:
 		IWL_ERROR("Unknown calibration notification %d\n",
 			  hdr->op_code);
 		return;
 	}
+	iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
 }
 
 static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
@@ -832,7 +787,7 @@
 	iwl5000_send_Xtal_calib(priv);
 
 	if (priv->ucode_type == UCODE_RT)
-		iwl5000_send_calib_results(priv);
+		iwl_send_calib_results(priv);
 
 	return 0;
 }
@@ -1614,6 +1569,8 @@
 	.mod_params = &iwl50_mod_params,
 };
 
+MODULE_FIRMWARE("iwlwifi-5000" IWL5000_UCODE_API ".ucode");
+
 module_param_named(disable50, iwl50_mod_params.disable, int, 0444);
 MODULE_PARM_DESC(disable50,
 		  "manually disable the 50XX radio (default 0 [radio on])");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 90a2b6d..93944de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -35,8 +35,6 @@
 
 #include <linux/workqueue.h>
 
-#include "../net/mac80211/rate.h"
-
 #include "iwl-dev.h"
 #include "iwl-sta.h"
 #include "iwl-core.h"
@@ -163,12 +161,15 @@
 	u32 dbg_fixed_rate;
 #endif
 	struct iwl_priv *drv;
+
+	/* used to be in sta_info */
+	int last_txrate_idx;
 };
 
 static void rs_rate_scale_perform(struct iwl_priv *priv,
-				   struct net_device *dev,
 				   struct ieee80211_hdr *hdr,
-				   struct sta_info *sta);
+				   struct ieee80211_sta *sta,
+				   struct iwl_lq_sta *lq_sta);
 static void rs_fill_link_cmd(const struct iwl_priv *priv,
 			     struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
 
@@ -354,17 +355,11 @@
 
 static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
 				      struct iwl_lq_sta *lq_data, u8 tid,
-				      struct sta_info *sta)
+				      struct ieee80211_sta *sta)
 {
-	unsigned long state;
 	DECLARE_MAC_BUF(mac);
 
-	spin_lock_bh(&sta->lock);
-	state = sta->ampdu_mlme.tid_state_tx[tid];
-	spin_unlock_bh(&sta->lock);
-
-	if (state == HT_AGG_STATE_IDLE &&
-	    rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
+	if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
 		IWL_DEBUG_HT("Starting Tx agg: STA: %s tid: %d\n",
 				print_mac(mac, sta->addr), tid);
 		ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid);
@@ -373,7 +368,7 @@
 
 static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
 			      struct iwl_lq_sta *lq_data,
-			      struct sta_info *sta)
+			      struct ieee80211_sta *sta)
 {
 	if ((tid < TID_MAX_LOAD_COUNT))
 		rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
@@ -436,7 +431,7 @@
 		/* Shift bitmap by one frame (throw away oldest history),
 		 * OR in "1", and increment "success" if this
 		 * frame was successful. */
-		window->data <<= 1;;
+		window->data <<= 1;
 		if (successes > 0) {
 			window->success_counter++;
 			window->data |= 0x1;
@@ -773,7 +768,8 @@
 /*
  * mac80211 sends us Tx status
  */
-static void rs_tx_status(void *priv_rate, struct net_device *dev,
+static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
+			 struct ieee80211_sta *sta, void *priv_sta,
 			 struct sk_buff *skb)
 {
 	int status;
@@ -781,11 +777,9 @@
 	int rs_index, index = 0;
 	struct iwl_lq_sta *lq_sta;
 	struct iwl_link_quality_cmd *table;
-	struct sta_info *sta;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_hw *hw = local_to_hw(local);
+	struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+	struct ieee80211_hw *hw = priv->hw;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct iwl_rate_scale_data *window = NULL;
 	struct iwl_rate_scale_data *search_win = NULL;
@@ -811,17 +805,9 @@
 	if (retries > 15)
 		retries = 15;
 
-	rcu_read_lock();
+	lq_sta = (struct iwl_lq_sta *)priv_sta;
 
-	sta = sta_info_get(local, hdr->addr1);
-
-	if (!sta || !sta->rate_ctrl_priv)
-		goto out;
-
-
-	lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
-
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
+	if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
 	    !lq_sta->ibss_sta_added)
 		goto out;
 
@@ -965,9 +951,8 @@
 	}
 
 	/* See if there's a better rate or modulation mode to try. */
-	rs_rate_scale_perform(priv, dev, hdr, sta);
+	rs_rate_scale_perform(priv, hdr, sta, lq_sta);
 out:
-	rcu_read_unlock();
 	return;
 }
 
@@ -1128,6 +1113,7 @@
 
 			/* Higher rate not available, use the original */
 			} else {
+				new_rate = rate;
 				break;
 			}
 		}
@@ -1142,7 +1128,7 @@
 static int rs_switch_to_mimo2(struct iwl_priv *priv,
 			     struct iwl_lq_sta *lq_sta,
 			     struct ieee80211_conf *conf,
-			     struct sta_info *sta,
+			     struct ieee80211_sta *sta,
 			     struct iwl_scale_tbl_info *tbl, int index)
 {
 	u16 rate_mask;
@@ -1153,8 +1139,8 @@
 	    !sta->ht_info.ht_supported)
 		return -1;
 
-	if (((sta->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS) >> 2)
-						== IWL_MIMO_PS_STATIC)
+	if (((sta->ht_info.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
+						== WLAN_HT_CAP_SM_PS_STATIC)
 		return -1;
 
 	/* Need both Tx chains/antennas to support MIMO */
@@ -1210,7 +1196,7 @@
 static int rs_switch_to_siso(struct iwl_priv *priv,
 			     struct iwl_lq_sta *lq_sta,
 			     struct ieee80211_conf *conf,
-			     struct sta_info *sta,
+			     struct ieee80211_sta *sta,
 			     struct iwl_scale_tbl_info *tbl, int index)
 {
 	u16 rate_mask;
@@ -1270,7 +1256,7 @@
 static int rs_move_legacy_other(struct iwl_priv *priv,
 				struct iwl_lq_sta *lq_sta,
 				struct ieee80211_conf *conf,
-				struct sta_info *sta,
+				struct ieee80211_sta *sta,
 				int index)
 {
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1281,15 +1267,23 @@
 		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
 	u8 start_action = tbl->action;
 	u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+	u8 tx_chains_num = priv->hw_params.tx_chains_num;
 	int ret = 0;
 
 	for (; ;) {
 		switch (tbl->action) {
-		case IWL_LEGACY_SWITCH_ANTENNA:
+		case IWL_LEGACY_SWITCH_ANTENNA1:
+		case IWL_LEGACY_SWITCH_ANTENNA2:
 			IWL_DEBUG_RATE("LQ: Legacy toggle Antenna\n");
 
 			lq_sta->action_counter++;
 
+			if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
+							tx_chains_num <= 1) ||
+			    (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
+							tx_chains_num <= 2))
+				break;
+
 			/* Don't change antenna if success has been great */
 			if (window->success_ratio >= IWL_RS_GOOD_RATIO)
 				break;
@@ -1299,7 +1293,7 @@
 
 			if (rs_toggle_antenna(valid_tx_ant,
 				&search_tbl->current_rate, search_tbl)) {
-				lq_sta->search_better_tbl = 1;
+				rs_set_expected_tpt_table(lq_sta, search_tbl);
 				goto out;
 			}
 			break;
@@ -1312,43 +1306,54 @@
 			ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
 						 search_tbl, index);
 			if (!ret) {
-				lq_sta->search_better_tbl = 1;
 				lq_sta->action_counter = 0;
 				goto out;
 			}
 
 			break;
-		case IWL_LEGACY_SWITCH_MIMO2:
+		case IWL_LEGACY_SWITCH_MIMO2_AB:
+		case IWL_LEGACY_SWITCH_MIMO2_AC:
+		case IWL_LEGACY_SWITCH_MIMO2_BC:
 			IWL_DEBUG_RATE("LQ: Legacy switch to MIMO2\n");
 
 			/* Set up search table to try MIMO */
 			memcpy(search_tbl, tbl, sz);
 			search_tbl->is_SGI = 0;
-			search_tbl->ant_type = ANT_AB;/*FIXME:RS*/
-				/*FIXME:RS:need to check ant validity*/
+
+			if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
+				search_tbl->ant_type = ANT_AB;
+			else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
+				search_tbl->ant_type = ANT_AC;
+			else
+				search_tbl->ant_type = ANT_BC;
+
+			if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+				break;
+
 			ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
 						 search_tbl, index);
 			if (!ret) {
-				lq_sta->search_better_tbl = 1;
 				lq_sta->action_counter = 0;
 				goto out;
 			}
 			break;
 		}
 		tbl->action++;
-		if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
-			tbl->action = IWL_LEGACY_SWITCH_ANTENNA;
+		if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
+			tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
 
 		if (tbl->action == start_action)
 			break;
 
 	}
+	search_tbl->lq_type = LQ_NONE;
 	return 0;
 
- out:
+out:
+	lq_sta->search_better_tbl = 1;
 	tbl->action++;
-	if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
-		tbl->action = IWL_LEGACY_SWITCH_ANTENNA;
+	if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
+		tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
 	return 0;
 
 }
@@ -1359,7 +1364,7 @@
 static int rs_move_siso_to_other(struct iwl_priv *priv,
 				 struct iwl_lq_sta *lq_sta,
 				 struct ieee80211_conf *conf,
-				 struct sta_info *sta, int index)
+				 struct ieee80211_sta *sta, int index)
 {
 	u8 is_green = lq_sta->is_green;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1370,34 +1375,51 @@
 		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
 	u8 start_action = tbl->action;
 	u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+	u8 tx_chains_num = priv->hw_params.tx_chains_num;
 	int ret;
 
 	for (;;) {
 		lq_sta->action_counter++;
 		switch (tbl->action) {
-		case IWL_SISO_SWITCH_ANTENNA:
+		case IWL_SISO_SWITCH_ANTENNA1:
+		case IWL_SISO_SWITCH_ANTENNA2:
 			IWL_DEBUG_RATE("LQ: SISO toggle Antenna\n");
+
+			if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
+							tx_chains_num <= 1) ||
+			    (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
+							tx_chains_num <= 2))
+				break;
+
 			if (window->success_ratio >= IWL_RS_GOOD_RATIO)
 				break;
 
 			memcpy(search_tbl, tbl, sz);
 			if (rs_toggle_antenna(valid_tx_ant,
-				       &search_tbl->current_rate, search_tbl)) {
-				lq_sta->search_better_tbl = 1;
+				       &search_tbl->current_rate, search_tbl))
 				goto out;
-			}
 			break;
-		case IWL_SISO_SWITCH_MIMO2:
+		case IWL_SISO_SWITCH_MIMO2_AB:
+		case IWL_SISO_SWITCH_MIMO2_AC:
+		case IWL_SISO_SWITCH_MIMO2_BC:
 			IWL_DEBUG_RATE("LQ: SISO switch to MIMO2\n");
 			memcpy(search_tbl, tbl, sz);
 			search_tbl->is_SGI = 0;
-			search_tbl->ant_type = ANT_AB; /*FIXME:RS*/
+
+			if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
+				search_tbl->ant_type = ANT_AB;
+			else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
+				search_tbl->ant_type = ANT_AC;
+			else
+				search_tbl->ant_type = ANT_BC;
+
+			if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+				break;
+
 			ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
 						 search_tbl, index);
-			if (!ret) {
-				lq_sta->search_better_tbl = 1;
+			if (!ret)
 				goto out;
-			}
 			break;
 		case IWL_SISO_SWITCH_GI:
 			if (!tbl->is_fat &&
@@ -1427,22 +1449,23 @@
 			}
 			search_tbl->current_rate = rate_n_flags_from_tbl(
 						search_tbl, index, is_green);
-			lq_sta->search_better_tbl = 1;
 			goto out;
 		}
 		tbl->action++;
 		if (tbl->action > IWL_SISO_SWITCH_GI)
-			tbl->action = IWL_SISO_SWITCH_ANTENNA;
+			tbl->action = IWL_SISO_SWITCH_ANTENNA1;
 
 		if (tbl->action == start_action)
 			break;
 	}
+	search_tbl->lq_type = LQ_NONE;
 	return 0;
 
  out:
+	lq_sta->search_better_tbl = 1;
 	tbl->action++;
 	if (tbl->action > IWL_SISO_SWITCH_GI)
-		tbl->action = IWL_SISO_SWITCH_ANTENNA;
+		tbl->action = IWL_SISO_SWITCH_ANTENNA1;
 	return 0;
 }
 
@@ -1452,43 +1475,64 @@
 static int rs_move_mimo_to_other(struct iwl_priv *priv,
 				 struct iwl_lq_sta *lq_sta,
 				 struct ieee80211_conf *conf,
-				 struct sta_info *sta, int index)
+				 struct ieee80211_sta *sta, int index)
 {
 	s8 is_green = lq_sta->is_green;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
 	struct iwl_scale_tbl_info *search_tbl =
 				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct iwl_rate_scale_data *window = &(tbl->win[index]);
 	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
 		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
 	u8 start_action = tbl->action;
-	/*u8 valid_tx_ant = priv->hw_params.valid_tx_ant;*/
+	u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+	u8 tx_chains_num = priv->hw_params.tx_chains_num;
 	int ret;
 
 	for (;;) {
 		lq_sta->action_counter++;
 		switch (tbl->action) {
-		case IWL_MIMO_SWITCH_ANTENNA_A:
-		case IWL_MIMO_SWITCH_ANTENNA_B:
+		case IWL_MIMO2_SWITCH_ANTENNA1:
+		case IWL_MIMO2_SWITCH_ANTENNA2:
+			IWL_DEBUG_RATE("LQ: MIMO toggle Antennas\n");
+
+			if (tx_chains_num <= 2)
+				break;
+
+			if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+				break;
+
+			memcpy(search_tbl, tbl, sz);
+			if (rs_toggle_antenna(valid_tx_ant,
+				       &search_tbl->current_rate, search_tbl))
+				goto out;
+			break;
+		case IWL_MIMO2_SWITCH_SISO_A:
+		case IWL_MIMO2_SWITCH_SISO_B:
+		case IWL_MIMO2_SWITCH_SISO_C:
 			IWL_DEBUG_RATE("LQ: MIMO2 switch to SISO\n");
 
 			/* Set up new search table for SISO */
 			memcpy(search_tbl, tbl, sz);
 
-			/*FIXME:RS:need to check ant validity + C*/
-			if (tbl->action == IWL_MIMO_SWITCH_ANTENNA_A)
+			if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
 				search_tbl->ant_type = ANT_A;
-			else
+			else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
 				search_tbl->ant_type = ANT_B;
+			else
+				search_tbl->ant_type = ANT_C;
+
+			if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+				break;
 
 			ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
 						 search_tbl, index);
-			if (!ret) {
-				lq_sta->search_better_tbl = 1;
+			if (!ret)
 				goto out;
-			}
+
 			break;
 
-		case IWL_MIMO_SWITCH_GI:
+		case IWL_MIMO2_SWITCH_GI:
 			if (!tbl->is_fat &&
 				!(priv->current_ht_config.sgf &
 						HT_SHORT_GI_20MHZ))
@@ -1517,23 +1561,23 @@
 			}
 			search_tbl->current_rate = rate_n_flags_from_tbl(
 						search_tbl, index, is_green);
-			lq_sta->search_better_tbl = 1;
 			goto out;
 
 		}
 		tbl->action++;
-		if (tbl->action > IWL_MIMO_SWITCH_GI)
-			tbl->action = IWL_MIMO_SWITCH_ANTENNA_A;
+		if (tbl->action > IWL_MIMO2_SWITCH_GI)
+			tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
 
 		if (tbl->action == start_action)
 			break;
 	}
-
+	search_tbl->lq_type = LQ_NONE;
 	return 0;
  out:
+	lq_sta->search_better_tbl = 1;
 	tbl->action++;
-	if (tbl->action > IWL_MIMO_SWITCH_GI)
-		tbl->action = IWL_MIMO_SWITCH_ANTENNA_A;
+	if (tbl->action > IWL_MIMO2_SWITCH_GI)
+		tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
 	return 0;
 
 }
@@ -1624,12 +1668,11 @@
  * Do rate scaling and search for new modulation mode.
  */
 static void rs_rate_scale_perform(struct iwl_priv *priv,
-				  struct net_device *dev,
 				  struct ieee80211_hdr *hdr,
-				  struct sta_info *sta)
+				  struct ieee80211_sta *sta,
+				  struct iwl_lq_sta *lq_sta)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_hw *hw = local_to_hw(local);
+	struct ieee80211_hw *hw = priv->hw;
 	struct ieee80211_conf *conf = &hw->conf;
 	int low = IWL_RATE_INVALID;
 	int high = IWL_RATE_INVALID;
@@ -1644,7 +1687,6 @@
 	__le16 fc;
 	u16 rate_mask;
 	u8 update_lq = 0;
-	struct iwl_lq_sta *lq_sta;
 	struct iwl_scale_tbl_info *tbl, *tbl1;
 	u16 rate_scale_index_msk = 0;
 	u32 rate;
@@ -1665,10 +1707,10 @@
 		return;
 	}
 
-	if (!sta || !sta->rate_ctrl_priv)
+	if (!sta || !lq_sta)
 		return;
 
-	lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
+	lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
 
 	tid = rs_tl_add_packet(lq_sta, hdr);
 
@@ -1686,7 +1728,7 @@
 	is_green = lq_sta->is_green;
 
 	/* current tx rate */
-	index = sta->last_txrate_idx;
+	index = lq_sta->last_txrate_idx;
 
 	IWL_DEBUG_RATE("Rate scale index %d for type %d\n", index,
 		       tbl->lq_type);
@@ -1747,19 +1789,13 @@
 		rs_stay_in_table(lq_sta);
 
 		goto out;
+	}
 
 	/* Else we have enough samples; calculate estimate of
 	 * actual average throughput */
-	} else {
-		/*FIXME:RS remove this else if we don't get this error*/
-		if (window->average_tpt != ((window->success_ratio *
-				tbl->expected_tpt[index] + 64) / 128)) {
-			IWL_ERROR("expected_tpt should have been calculated"
-								" by now\n");
-			window->average_tpt = ((window->success_ratio *
-					tbl->expected_tpt[index] + 64) / 128);
-		}
-	}
+
+	BUG_ON(window->average_tpt != ((window->success_ratio *
+			tbl->expected_tpt[index] + 64) / 128));
 
 	/* If we are searching for better modulation mode, check success. */
 	if (lq_sta->search_better_tbl) {
@@ -1769,7 +1805,7 @@
 		 * continuing to use the setup that we've been trying. */
 		if (window->average_tpt > lq_sta->last_tpt) {
 
-			IWL_DEBUG_RATE("LQ: SWITCHING TO CURRENT TABLE "
+			IWL_DEBUG_RATE("LQ: SWITCHING TO NEW TABLE "
 					"suc=%d cur-tpt=%d old-tpt=%d\n",
 					window->success_ratio,
 					window->average_tpt,
@@ -2005,15 +2041,7 @@
 out:
 	tbl->current_rate = rate_n_flags_from_tbl(tbl, index, is_green);
 	i = index;
-	sta->last_txrate_idx = i;
-
-	/* sta->txrate_idx is an index to A mode rates which start
-	 * at IWL_FIRST_OFDM_RATE
-	 */
-	if (lq_sta->band == IEEE80211_BAND_5GHZ)
-		sta->txrate_idx = i - IWL_FIRST_OFDM_RATE;
-	else
-		sta->txrate_idx = i;
+	lq_sta->last_txrate_idx = i;
 
 	return;
 }
@@ -2021,9 +2049,9 @@
 
 static void rs_initialize_lq(struct iwl_priv *priv,
 			     struct ieee80211_conf *conf,
-			     struct sta_info *sta)
+			     struct ieee80211_sta *sta,
+			     struct iwl_lq_sta *lq_sta)
 {
-	struct iwl_lq_sta *lq_sta;
 	struct iwl_scale_tbl_info *tbl;
 	int rate_idx;
 	int i;
@@ -2032,14 +2060,13 @@
 	u8 active_tbl = 0;
 	u8 valid_tx_ant;
 
-	if (!sta || !sta->rate_ctrl_priv)
+	if (!sta || !lq_sta)
 		goto out;
 
-	lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
-	i = sta->last_txrate_idx;
+	i = lq_sta->last_txrate_idx;
 
 	if ((lq_sta->lq.sta_id == 0xff) &&
-	    (priv->iw_mode == IEEE80211_IF_TYPE_IBSS))
+	    (priv->iw_mode == NL80211_IFTYPE_ADHOC))
 		goto out;
 
 	valid_tx_ant = priv->hw_params.valid_tx_ant;
@@ -2076,40 +2103,33 @@
 	return;
 }
 
-static void rs_get_rate(void *priv_rate, struct net_device *dev,
-			struct ieee80211_supported_band *sband,
-			struct sk_buff *skb,
-			struct rate_selection *sel)
+static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
+			struct ieee80211_sta *sta, void *priv_sta,
+			struct sk_buff *skb, struct rate_selection *sel)
 {
 
 	int i;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_conf *conf = &local->hw.conf;
+	struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+	struct ieee80211_conf *conf = &priv->hw->conf;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct sta_info *sta;
 	__le16 fc;
-	struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
 	struct iwl_lq_sta *lq_sta;
 
 	IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n");
 
-	rcu_read_lock();
-
-	sta = sta_info_get(local, hdr->addr1);
-
 	/* Send management frames and broadcast/multicast data using lowest
 	 * rate. */
 	fc = hdr->frame_control;
 	if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) ||
-	    !sta || !sta->rate_ctrl_priv) {
-		sel->rate_idx = rate_lowest_index(local, sband, sta);
-		goto out;
+	    !sta || !priv_sta) {
+		sel->rate_idx = rate_lowest_index(sband, sta);
+		return;
 	}
 
-	lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
-	i = sta->last_txrate_idx;
+	lq_sta = (struct iwl_lq_sta *)priv_sta;
+	i = lq_sta->last_txrate_idx;
 
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
+	if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
 	    !lq_sta->ibss_sta_added) {
 		u8 sta_id = iwl_find_station(priv, hdr->addr1);
 		DECLARE_MAC_BUF(mac);
@@ -2124,23 +2144,22 @@
 			lq_sta->lq.sta_id = sta_id;
 			lq_sta->lq.rs_table[0].rate_n_flags = 0;
 			lq_sta->ibss_sta_added = 1;
-			rs_initialize_lq(priv, conf, sta);
+			rs_initialize_lq(priv, conf, sta, lq_sta);
 		}
 	}
 
 	if ((i < 0) || (i > IWL_RATE_COUNT)) {
-		sel->rate_idx = rate_lowest_index(local, sband, sta);
-		goto out;
+		sel->rate_idx = rate_lowest_index(sband, sta);
+		return;
 	}
 
 	if (sband->band == IEEE80211_BAND_5GHZ)
 		i -= IWL_FIRST_OFDM_RATE;
 	sel->rate_idx = i;
-out:
-	rcu_read_unlock();
 }
 
-static void *rs_alloc_sta(void *priv_rate, gfp_t gfp)
+static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
+			  gfp_t gfp)
 {
 	struct iwl_lq_sta *lq_sta;
 	struct iwl_priv *priv;
@@ -2163,33 +2182,28 @@
 	return lq_sta;
 }
 
-static void rs_rate_init(void *priv_rate, void *priv_sta,
-			 struct ieee80211_local *local,
-			 struct sta_info *sta)
+static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
+			 struct ieee80211_sta *sta, void *priv_sta)
 {
 	int i, j;
-	struct ieee80211_conf *conf = &local->hw.conf;
-	struct ieee80211_supported_band *sband;
-	struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
+	struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+	struct ieee80211_conf *conf = &priv->hw->conf;
 	struct iwl_lq_sta *lq_sta = priv_sta;
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-
 	lq_sta->flush_timer = 0;
 	lq_sta->supp_rates = sta->supp_rates[sband->band];
-	sta->txrate_idx = 3;
 	for (j = 0; j < LQ_SIZE; j++)
 		for (i = 0; i < IWL_RATE_COUNT; i++)
 			rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
 
-	IWL_DEBUG_RATE("LQ: *** rate scale global init ***\n");
+	IWL_DEBUG_RATE("LQ: *** rate scale station global init ***\n");
 	/* TODO: what is a good starting rate for STA? About middle? Maybe not
 	 * the lowest or the highest rate.. Could consider using RSSI from
 	 * previous packets? Need to have IEEE 802.1X auth succeed immediately
 	 * after assoc.. */
 
 	lq_sta->ibss_sta_added = 0;
-	if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
+	if (priv->iw_mode == NL80211_IFTYPE_AP) {
 		u8 sta_id = iwl_find_station(priv, sta->addr);
 		DECLARE_MAC_BUF(mac);
 
@@ -2212,15 +2226,14 @@
 	}
 
 	/* Find highest tx rate supported by hardware and destination station */
+	lq_sta->last_txrate_idx = 3;
 	for (i = 0; i < sband->n_bitrates; i++)
 		if (sta->supp_rates[sband->band] & BIT(i))
-			sta->txrate_idx = i;
+			lq_sta->last_txrate_idx = i;
 
-	sta->last_txrate_idx = sta->txrate_idx;
-	/* WTF is with this bogus comment? A doesn't have cck rates */
-	/* For MODE_IEEE80211A, cck rates are at end of rate table */
-	if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
-		sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
+	/* For MODE_IEEE80211A, skip over cck rates in global rate table */
+	if (sband->band == IEEE80211_BAND_5GHZ)
+		lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
 
 	lq_sta->is_dup = 0;
 	lq_sta->is_green = rs_use_green(priv, conf);
@@ -2260,7 +2273,7 @@
 	lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
 	lq_sta->drv = priv;
 
-	rs_initialize_lq(priv, conf, sta);
+	rs_initialize_lq(priv, conf, sta, lq_sta);
 }
 
 static void rs_fill_link_cmd(const struct iwl_priv *priv,
@@ -2382,9 +2395,9 @@
 	lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000);
 }
 
-static void *rs_alloc(struct ieee80211_local *local)
+static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
 {
-	return local->hw.priv;
+	return hw->priv;
 }
 /* rate scale requires free function to be implemented */
 static void rs_free(void *priv_rate)
@@ -2405,12 +2418,12 @@
 #endif /* CONFIG_IWLWIFI_DEBUG */
 }
 
-static void rs_free_sta(void *priv_rate, void *priv_sta)
+static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
+			void *priv_sta)
 {
 	struct iwl_lq_sta *lq_sta = priv_sta;
-	struct iwl_priv *priv;
+	struct iwl_priv *priv = priv_r;
 
-	priv = (struct iwl_priv *)priv_rate;
 	IWL_DEBUG_RATE("enter\n");
 	kfree(lq_sta);
 	IWL_DEBUG_RATE("leave\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 84d4d1e..d148d73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -206,21 +206,28 @@
 #define IWL_RATE_DECREASE_TH		1920	/*  15% */
 
 /* possible actions when in legacy mode */
-#define IWL_LEGACY_SWITCH_ANTENNA	0
-#define IWL_LEGACY_SWITCH_SISO		1
-#define IWL_LEGACY_SWITCH_MIMO2		2
+#define IWL_LEGACY_SWITCH_ANTENNA1      0
+#define IWL_LEGACY_SWITCH_ANTENNA2      1
+#define IWL_LEGACY_SWITCH_SISO          2
+#define IWL_LEGACY_SWITCH_MIMO2_AB      3
+#define IWL_LEGACY_SWITCH_MIMO2_AC      4
+#define IWL_LEGACY_SWITCH_MIMO2_BC      5
 
 /* possible actions when in siso mode */
-#define IWL_SISO_SWITCH_ANTENNA		0
-#define IWL_SISO_SWITCH_MIMO2		1
-#define IWL_SISO_SWITCH_GI		2
+#define IWL_SISO_SWITCH_ANTENNA1        0
+#define IWL_SISO_SWITCH_ANTENNA2        1
+#define IWL_SISO_SWITCH_MIMO2_AB        2
+#define IWL_SISO_SWITCH_MIMO2_AC        3
+#define IWL_SISO_SWITCH_MIMO2_BC        4
+#define IWL_SISO_SWITCH_GI              5
 
 /* possible actions when in mimo mode */
-#define IWL_MIMO_SWITCH_ANTENNA_A	0
-#define IWL_MIMO_SWITCH_ANTENNA_B	1
-#define IWL_MIMO_SWITCH_GI		2
-
-/*FIXME:RS:separate MIMO2/3 transitions*/
+#define IWL_MIMO2_SWITCH_ANTENNA1       0
+#define IWL_MIMO2_SWITCH_ANTENNA2       1
+#define IWL_MIMO2_SWITCH_SISO_A         2
+#define IWL_MIMO2_SWITCH_SISO_B         3
+#define IWL_MIMO2_SWITCH_SISO_C         4
+#define IWL_MIMO2_SWITCH_GI             5
 
 /*FIXME:RS:add posible acctions for MIMO3*/
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index e01f048..204abab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -337,7 +337,7 @@
 	/* If we have set the ASSOC_MSK and we are in BSS mode then
 	 * add the IWL_AP_ID to the station rate table */
 	if (new_assoc) {
-		if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
+		if (priv->iw_mode == NL80211_IFTYPE_STATION) {
 			ret = iwl_rxon_add_station(priv,
 					   priv->active_rxon.bssid_addr, 1);
 			if (ret == IWL_INVALID_STATION) {
@@ -448,8 +448,8 @@
 					  const u8 *dest, int left)
 {
 	if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
-	    ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
-	     (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
+	    ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
+	     (priv->iw_mode != NL80211_IFTYPE_AP)))
 		return 0;
 
 	if (priv->ibss_beacon->len > left)
@@ -485,7 +485,7 @@
 		return IWL_RATE_6M_PLCP;
 }
 
-unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
+static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
 				       struct iwl_frame *frame, u8 rate)
 {
 	struct iwl_tx_beacon_cmd *tx_beacon_cmd;
@@ -564,8 +564,6 @@
 	if (!iwl_conf->is_ht)
 		return;
 
-	priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
-
 	if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
 		iwl_conf->sgf |= HT_SHORT_GI_20MHZ;
 	if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
@@ -586,6 +584,8 @@
 		iwl_conf->supported_chan_width = 0;
 	}
 
+	iwl_conf->sm_ps = (u8)((ht_conf->cap & IEEE80211_HT_CAP_SM_PS) >> 2);
+
 	memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
 
 	iwl_conf->control_channel = ht_bss_conf->primary_channel;
@@ -672,7 +672,7 @@
 	beacon_int = priv->beacon_int;
 	spin_unlock_irqrestore(&priv->lock, flags);
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
+	if (priv->iw_mode == NL80211_IFTYPE_STATION) {
 		if (beacon_int == 0) {
 			priv->rxon_timing.beacon_interval = cpu_to_le16(100);
 			priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
@@ -721,7 +721,7 @@
 		else
 			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 
-		if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
+		if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
 			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 
 		priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
@@ -740,23 +740,23 @@
 	memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
 
 	switch (priv->iw_mode) {
-	case IEEE80211_IF_TYPE_AP:
+	case NL80211_IFTYPE_AP:
 		priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
 		break;
 
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
 		priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
 		break;
 
-	case IEEE80211_IF_TYPE_IBSS:
+	case NL80211_IFTYPE_ADHOC:
 		priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
 		priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
 		priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
 						  RXON_FILTER_ACCEPT_GRP_MSK;
 		break;
 
-	case IEEE80211_IF_TYPE_MNTR:
+	case NL80211_IFTYPE_MONITOR:
 		priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
 		priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
@@ -785,7 +785,7 @@
 	 * in some case A channels are all non IBSS
 	 * in this case force B/G channel
 	 */
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
+	if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
 	    !(is_channel_ibss(ch_info)))
 		ch_info = &priv->channel_info[0];
 
@@ -1182,7 +1182,7 @@
 		le32_to_cpu(beacon->low_tsf), rate);
 #endif
 
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
+	if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
 	    (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
 		queue_work(priv->workqueue, &priv->beacon_update);
 }
@@ -1270,7 +1270,7 @@
 
 	if (src == IWL_PWR_SRC_VAUX) {
 		u32 val;
-		ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
+		ret = pci_read_config_dword(priv->pci_dev, PCI_CFG_POWER_SOURCE,
 					    &val);
 
 		if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
@@ -2388,7 +2388,7 @@
 
 	mutex_lock(&priv->mutex);
 
-	ret = iwl4965_set_mode(priv, IEEE80211_IF_TYPE_MNTR);
+	ret = iwl4965_set_mode(priv, NL80211_IFTYPE_MONITOR);
 
 	if (ret) {
 		if (ret == -EAGAIN)
@@ -2469,7 +2469,7 @@
 	DECLARE_MAC_BUF(mac);
 	unsigned long flags;
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
+	if (priv->iw_mode == NL80211_IFTYPE_AP) {
 		IWL_ERROR("%s Should not be called in AP mode\n", __func__);
 		return;
 	}
@@ -2486,6 +2486,7 @@
 	if (!priv->vif || !priv->is_open)
 		return;
 
+	iwl_power_cancel_timeout(priv);
 	iwl_scan_cancel_timeout(priv, 200);
 
 	conf = ieee80211_get_hw_conf(priv->hw);
@@ -2503,8 +2504,7 @@
 
 	priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
 
-	if (priv->current_ht_config.is_ht)
-		iwl_set_rxon_ht(priv, &priv->current_ht_config);
+	iwl_set_rxon_ht(priv, &priv->current_ht_config);
 
 	iwl_set_rxon_chain(priv);
 	priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
@@ -2523,7 +2523,7 @@
 		else
 			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 
-		if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
+		if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
 			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 
 	}
@@ -2531,10 +2531,10 @@
 	iwl4965_commit_rxon(priv);
 
 	switch (priv->iw_mode) {
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		break;
 
-	case IEEE80211_IF_TYPE_IBSS:
+	case NL80211_IFTYPE_ADHOC:
 
 		/* assume default assoc id */
 		priv->assoc_id = 1;
@@ -2550,20 +2550,23 @@
 		break;
 	}
 
-	/* Enable Rx differential gain and sensitivity calibrations */
-	iwl_chain_noise_reset(priv);
-	priv->start_calib = 1;
-
-	if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
+	if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
 		priv->assoc_station_added = 1;
 
 	spin_lock_irqsave(&priv->lock, flags);
 	iwl_activate_qos(priv, 0);
 	spin_unlock_irqrestore(&priv->lock, flags);
 
-	iwl_power_update_mode(priv, 0);
-	/* we have just associated, don't start scan too early */
-	priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
+	/* the chain noise calibration will enabled PM upon completion
+	 * If chain noise has already been run, then we need to enable
+	 * power management here */
+	if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
+		iwl_power_enable_management(priv);
+
+	/* Enable Rx differential gain and sensitivity calibrations */
+	iwl_chain_noise_reset(priv);
+	priv->start_calib = 1;
+
 }
 
 static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf);
@@ -2728,12 +2731,6 @@
 
 	IWL_DEBUG_MACDUMP("enter\n");
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
-		IWL_DEBUG_MAC80211("leave - monitor\n");
-		dev_kfree_skb_any(skb);
-		return 0;
-	}
-
 	IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
 		     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
@@ -2798,8 +2795,6 @@
 	mutex_lock(&priv->mutex);
 	IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
 
-	priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
-
 	if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) {
 		IWL_DEBUG_MAC80211("leave - RF-KILL - waiting for uCode\n");
 		goto out;
@@ -2830,7 +2825,7 @@
 		goto out;
 	}
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS &&
+	if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
 	    !is_channel_ibss(ch_info)) {
 		IWL_ERROR("channel %d in band %d not IBSS channel\n",
 			conf->channel->hw_value, conf->channel->band);
@@ -2851,7 +2846,7 @@
 	)
 		priv->staging_rxon.flags = 0;
 
-	iwl_set_rxon_channel(priv, conf->channel->band, channel);
+	iwl_set_rxon_channel(priv, conf->channel);
 
 	iwl_set_flags_for_band(priv, conf->channel->band);
 
@@ -2880,6 +2875,13 @@
 		goto out;
 	}
 
+	if (conf->flags & IEEE80211_CONF_PS)
+		ret = iwl_power_set_user_mode(priv, IWL_POWER_INDEX_3);
+	else
+		ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM);
+	if (ret)
+		IWL_DEBUG_MAC80211("Error setting power level\n");
+
 	IWL_DEBUG_MAC80211("TX Power old=%d new=%d\n",
 			   priv->tx_power_user_lmt, conf->power_level);
 
@@ -2945,7 +2947,7 @@
 				priv->staging_rxon.flags &=
 					~RXON_FLG_SHORT_SLOT_MSK;
 
-			if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
+			if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
 				priv->staging_rxon.flags &=
 					~RXON_FLG_SHORT_SLOT_MSK;
 		}
@@ -2984,7 +2986,7 @@
 		return 0;
 	}
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS &&
+	if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
 	    conf->changed & IEEE80211_IFCC_BEACON) {
 		struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
 		if (!beacon)
@@ -2994,7 +2996,7 @@
 			return rc;
 	}
 
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
+	if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
 	    (!conf->ssid_len)) {
 		IWL_DEBUG_MAC80211
 		    ("Leaving in AP mode because HostAPD is not ready.\n");
@@ -3017,7 +3019,7 @@
 	    !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
  */
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
+	if (priv->iw_mode == NL80211_IFTYPE_AP) {
 		if (!conf->bssid) {
 			conf->bssid = priv->mac_addr;
 			memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
@@ -3052,11 +3054,11 @@
 		 * to verify) - jpk */
 		memcpy(priv->bssid, conf->bssid, ETH_ALEN);
 
-		if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
+		if (priv->iw_mode == NL80211_IFTYPE_AP)
 			iwl4965_config_ap(priv);
 		else {
 			rc = iwl4965_commit_rxon(priv);
-			if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
+			if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
 				iwl_rxon_add_station(
 					priv, priv->active_rxon.bssid_addr, 1);
 		}
@@ -3092,7 +3094,7 @@
 
 	if (changed_flags & (*total_flags) & FIF_OTHER_BSS) {
 		IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n",
-				   IEEE80211_IF_TYPE_MNTR,
+				   NL80211_IFTYPE_MONITOR,
 				   changed_flags, *total_flags);
 		/* queue work 'cuz mac80211 is holding a lock which
 		 * prevents us from issuing (synchronous) f/w cmds */
@@ -3173,6 +3175,10 @@
 			priv->power_data.dtim_period = bss_conf->dtim_period;
 			priv->timestamp = bss_conf->timestamp;
 			priv->assoc_capability = bss_conf->assoc_capability;
+
+			/* we have just associated, don't start scan too early
+			 * leave time for EAPOL exchange to complete
+			 */
 			priv->next_scan_jiffies = jiffies +
 					IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
 			mutex_lock(&priv->mutex);
@@ -3189,11 +3195,11 @@
 
 }
 
-static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
+static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t ssid_len)
 {
-	int rc = 0;
 	unsigned long flags;
 	struct iwl_priv *priv = hw->priv;
+	int ret;
 
 	IWL_DEBUG_MAC80211("enter\n");
 
@@ -3201,41 +3207,47 @@
 	spin_lock_irqsave(&priv->lock, flags);
 
 	if (!iwl_is_ready_rf(priv)) {
-		rc = -EIO;
+		ret = -EIO;
 		IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
 		goto out_unlock;
 	}
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {	/* APs don't scan */
-		rc = -EIO;
+	if (priv->iw_mode == NL80211_IFTYPE_AP) {	/* APs don't scan */
+		ret = -EIO;
 		IWL_ERROR("ERROR: APs don't scan\n");
 		goto out_unlock;
 	}
 
-	/* we don't schedule scan within next_scan_jiffies period */
+	/* We don't schedule scan within next_scan_jiffies period.
+	 * Avoid scanning during possible EAPOL exchange, return
+	 * success immediately.
+	 */
 	if (priv->next_scan_jiffies &&
-			time_after(priv->next_scan_jiffies, jiffies)) {
-		rc = -EAGAIN;
+	    time_after(priv->next_scan_jiffies, jiffies)) {
+		IWL_DEBUG_SCAN("scan rejected: within next scan period\n");
+		queue_work(priv->workqueue, &priv->scan_completed);
+		ret = 0;
 		goto out_unlock;
 	}
+
 	/* if we just finished scan ask for delay */
-	if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies +
-				IWL_DELAY_NEXT_SCAN, jiffies)) {
-		rc = -EAGAIN;
+	if (iwl_is_associated(priv) && priv->last_scan_jiffies &&
+	    time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) {
+		IWL_DEBUG_SCAN("scan rejected: within previous scan period\n");
+		queue_work(priv->workqueue, &priv->scan_completed);
+		ret = 0;
 		goto out_unlock;
 	}
-	if (len) {
-		IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
-			       iwl_escape_essid(ssid, len), (int)len);
 
+	if (ssid_len) {
 		priv->one_direct_scan = 1;
-		priv->direct_ssid_len = (u8)
-		    min((u8) len, (u8) IW_ESSID_MAX_SIZE);
+		priv->direct_ssid_len =  min_t(u8, ssid_len, IW_ESSID_MAX_SIZE);
 		memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
-	} else
+	} else {
 		priv->one_direct_scan = 0;
+	}
 
-	rc = iwl_scan_initiate(priv);
+	ret = iwl_scan_initiate(priv);
 
 	IWL_DEBUG_MAC80211("leave\n");
 
@@ -3243,7 +3255,7 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 	mutex_unlock(&priv->mutex);
 
-	return rc;
+	return ret;
 }
 
 static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -3332,7 +3344,7 @@
 	 * in 1X mode.
 	 * In legacy wep mode, we use another host command to the uCode */
 	if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id &&
-		priv->iw_mode != IEEE80211_IF_TYPE_AP) {
+		priv->iw_mode != NL80211_IFTYPE_AP) {
 		if (cmd == SET_KEY)
 			is_default_wep_key = !priv->key_mapping_key;
 		else
@@ -3403,7 +3415,7 @@
 	priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
 	priv->qos_data.qos_active = 1;
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
+	if (priv->iw_mode == NL80211_IFTYPE_AP)
 		iwl_activate_qos(priv, 1);
 	else if (priv->assoc_id && iwl_is_associated(priv))
 		iwl_activate_qos(priv, 0);
@@ -3416,13 +3428,13 @@
 
 static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
 			     enum ieee80211_ampdu_mlme_action action,
-			     const u8 *addr, u16 tid, u16 *ssn)
+			     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 {
 	struct iwl_priv *priv = hw->priv;
 	DECLARE_MAC_BUF(mac);
 
 	IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
-		     print_mac(mac, addr), tid);
+		     print_mac(mac, sta->addr), tid);
 
 	if (!(priv->cfg->sku & IWL_SKU_N))
 		return -EACCES;
@@ -3430,16 +3442,16 @@
 	switch (action) {
 	case IEEE80211_AMPDU_RX_START:
 		IWL_DEBUG_HT("start Rx\n");
-		return iwl_rx_agg_start(priv, addr, tid, *ssn);
+		return iwl_rx_agg_start(priv, sta->addr, tid, *ssn);
 	case IEEE80211_AMPDU_RX_STOP:
 		IWL_DEBUG_HT("stop Rx\n");
-		return iwl_rx_agg_stop(priv, addr, tid);
+		return iwl_rx_agg_stop(priv, sta->addr, tid);
 	case IEEE80211_AMPDU_TX_START:
 		IWL_DEBUG_HT("start Tx\n");
-		return iwl_tx_agg_start(priv, addr, tid, ssn);
+		return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
 	case IEEE80211_AMPDU_TX_STOP:
 		IWL_DEBUG_HT("stop Tx\n");
-		return iwl_tx_agg_stop(priv, addr, tid);
+		return iwl_tx_agg_stop(priv, sta->addr, tid);
 	default:
 		IWL_DEBUG_HT("unknown\n");
 		return -EINVAL;
@@ -3521,7 +3533,7 @@
 
 	priv->beacon_int = priv->hw->conf.beacon_int;
 	priv->timestamp = 0;
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
+	if ((priv->iw_mode == NL80211_IFTYPE_STATION))
 		priv->beacon_int = 0;
 
 	spin_unlock_irqrestore(&priv->lock, flags);
@@ -3535,7 +3547,7 @@
 	/* we are restarting association process
 	 * clear RXON_FILTER_ASSOC_MSK bit
 	 */
-	if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
+	if (priv->iw_mode != NL80211_IFTYPE_AP) {
 		iwl_scan_cancel_timeout(priv, 100);
 		priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 		iwl4965_commit_rxon(priv);
@@ -3544,7 +3556,17 @@
 	iwl_power_update_mode(priv, 0);
 
 	/* Per mac80211.h: This is only used in IBSS mode... */
-	if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
+	if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
+
+		/* switch to CAM during association period.
+		 * the ucode will block any association/authentication
+		 * frome during assiciation period if it can not hear
+		 * the AP because of PM. the timer enable PM back is
+		 * association do not complete
+		 */
+		if (priv->hw->conf.channel->flags & (IEEE80211_CHAN_PASSIVE_SCAN |
+						     IEEE80211_CHAN_RADAR))
+				iwl_power_disable_management(priv, 3000);
 
 		IWL_DEBUG_MAC80211("leave - not in IBSS\n");
 		mutex_unlock(&priv->mutex);
@@ -3573,7 +3595,7 @@
 		return -EIO;
 	}
 
-	if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
+	if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
 		IWL_DEBUG_MAC80211("leave - not IBSS\n");
 		mutex_unlock(&priv->mutex);
 		return -EIO;
@@ -3630,11 +3652,11 @@
 				 const char *buf, size_t count)
 {
 	struct iwl_priv *priv = d->driver_data;
-	char *p = (char *)buf;
-	u32 val;
+	unsigned long val;
+	int ret;
 
-	val = simple_strtoul(p, &p, 0);
-	if (p == buf)
+	ret = strict_strtoul(buf, 0, &val);
+	if (ret)
 		printk(KERN_INFO DRV_NAME
 		       ": %s is not in hex or decimal form.\n", buf);
 	else
@@ -3706,11 +3728,11 @@
 			      const char *buf, size_t count)
 {
 	struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
-	char *p = (char *)buf;
-	u32 val;
+	unsigned long val;
+	int ret;
 
-	val = simple_strtoul(p, &p, 10);
-	if (p == buf)
+	ret = strict_strtoul(buf, 10, &val);
+	if (ret)
 		printk(KERN_INFO DRV_NAME
 		       ": %s is not in decimal form.\n", buf);
 	else
@@ -3734,7 +3756,12 @@
 			   const char *buf, size_t count)
 {
 	struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
-	u32 flags = simple_strtoul(buf, NULL, 0);
+	unsigned long val;
+	u32 flags;
+	int ret = strict_strtoul(buf, 0, &val);
+	if (ret)
+		return ret;
+	flags = (u32)val;
 
 	mutex_lock(&priv->mutex);
 	if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
@@ -3742,8 +3769,7 @@
 		if (iwl_scan_cancel_timeout(priv, 100))
 			IWL_WARNING("Could not cancel scan.\n");
 		else {
-			IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
-				       flags);
+			IWL_DEBUG_INFO("Commit rxon.flags = 0x%04X\n", flags);
 			priv->staging_rxon.flags = cpu_to_le32(flags);
 			iwl4965_commit_rxon(priv);
 		}
@@ -3769,7 +3795,12 @@
 				  const char *buf, size_t count)
 {
 	struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
-	u32 filter_flags = simple_strtoul(buf, NULL, 0);
+	unsigned long val;
+	u32 filter_flags;
+	int ret = strict_strtoul(buf, 0, &val);
+	if (ret)
+		return ret;
+	filter_flags = (u32)val;
 
 	mutex_lock(&priv->mutex);
 	if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
@@ -3870,10 +3901,12 @@
 				const char *buf, size_t count)
 {
 	struct iwl_priv *priv = dev_get_drvdata(d);
+	long val;
+	int ret  = strict_strtol(buf, 10, &val);
+	if (!ret)
+		return ret;
 
-	priv->retry_rate = simple_strtoul(buf, NULL, 0);
-	if (priv->retry_rate <= 0)
-		priv->retry_rate = 1;
+	priv->retry_rate = (val > 0) ? val : 1;
 
 	return count;
 }
@@ -3894,9 +3927,9 @@
 {
 	struct iwl_priv *priv = dev_get_drvdata(d);
 	int ret;
-	int mode;
+	unsigned long mode;
 
-	mode = simple_strtoul(buf, NULL, 0);
+
 	mutex_lock(&priv->mutex);
 
 	if (!iwl_is_ready(priv)) {
@@ -3904,6 +3937,10 @@
 		goto out;
 	}
 
+	ret = strict_strtoul(buf, 10, &mode);
+	if (ret)
+		goto out;
+
 	ret = iwl_power_set_user_mode(priv, mode);
 	if (ret) {
 		IWL_DEBUG_MAC80211("failed setting power mode.\n");
@@ -4083,6 +4120,7 @@
 	/* FIXME : remove when resolved PENDING */
 	INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
 	iwl_setup_scan_deferred_work(priv);
+	iwl_setup_power_deferred_work(priv);
 
 	if (priv->cfg->ops->lib->setup_deferred_work)
 		priv->cfg->ops->lib->setup_deferred_work(priv);
@@ -4102,6 +4140,7 @@
 
 	cancel_delayed_work_sync(&priv->init_alive_start);
 	cancel_delayed_work(&priv->scan_check);
+	cancel_delayed_work_sync(&priv->set_power_save);
 	cancel_delayed_work(&priv->alive_start);
 	cancel_work_sync(&priv->beacon_update);
 	del_timer_sync(&priv->statistics_periodic);
@@ -4150,7 +4189,7 @@
 	.reset_tsf = iwl4965_mac_reset_tsf,
 	.bss_info_changed = iwl4965_bss_info_changed,
 	.ampdu_action = iwl4965_mac_ampdu_action,
-	.hw_scan = iwl4965_mac_hw_scan
+	.hw_scan = iwl_mac_hw_scan
 };
 
 static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -4204,13 +4243,13 @@
 
 	pci_set_master(pdev);
 
-	err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
 	if (!err)
-		err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
 	if (err) {
-		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (!err)
-			err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+			err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 		/* both attempts failed: */
 		if (err) {
 			printk(KERN_WARNING "%s: No suitable DMA available.\n",
@@ -4225,9 +4264,6 @@
 
 	pci_set_drvdata(pdev, priv);
 
-	/* We disable the RETRY_TIMEOUT register (0x41) to keep
-	 * PCI Tx retries from interfering with C3 CPU state */
-	pci_write_config_byte(pdev, 0x41, 0x00);
 
 	/***********************
 	 * 3. Read REV register
@@ -4247,6 +4283,10 @@
 		": Detected Intel Wireless WiFi Link %s REV=0x%X\n",
 		priv->cfg->name, priv->hw_rev);
 
+	/* We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state */
+	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
 	/* amp init */
 	err = priv->cfg->ops->lib->apm_ops.init(priv);
 	if (err < 0) {
@@ -4481,7 +4521,10 @@
 	{IWL_PCI_DEVICE(0x4235, PCI_ANY_ID, iwl5300_agn_cfg)},
 	{IWL_PCI_DEVICE(0x4236, PCI_ANY_ID, iwl5300_agn_cfg)},
 	{IWL_PCI_DEVICE(0x4237, PCI_ANY_ID, iwl5100_agn_cfg)},
-	{IWL_PCI_DEVICE(0x423A, PCI_ANY_ID, iwl5350_agn_cfg)},
+/* 5350 WiFi/WiMax */
+	{IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)},
+	{IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)},
+	{IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)},
 #endif /* CONFIG_IWL5000 */
 	{0}
 };
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index ef49440..72fbf47 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -66,6 +66,66 @@
 #include "iwl-core.h"
 #include "iwl-calib.h"
 
+/*****************************************************************************
+ * INIT calibrations framework
+ *****************************************************************************/
+
+ int iwl_send_calib_results(struct iwl_priv *priv)
+{
+	int ret = 0;
+	int i = 0;
+
+	struct iwl_host_cmd hcmd = {
+		.id = REPLY_PHY_CALIBRATION_CMD,
+		.meta.flags = CMD_SIZE_HUGE,
+	};
+
+	for (i = 0; i < IWL_CALIB_MAX; i++)
+		if (priv->calib_results[i].buf) {
+			hcmd.len = priv->calib_results[i].buf_len;
+			hcmd.data = priv->calib_results[i].buf;
+			ret = iwl_send_cmd_sync(priv, &hcmd);
+			if (ret)
+				goto err;
+		}
+
+	return 0;
+err:
+	IWL_ERROR("Error %d iteration %d\n", ret, i);
+	return ret;
+}
+EXPORT_SYMBOL(iwl_send_calib_results);
+
+int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
+{
+	if (res->buf_len != len) {
+		kfree(res->buf);
+		res->buf = kzalloc(len, GFP_ATOMIC);
+	}
+	if (unlikely(res->buf == NULL))
+		return -ENOMEM;
+
+	res->buf_len = len;
+	memcpy(res->buf, buf, len);
+	return 0;
+}
+EXPORT_SYMBOL(iwl_calib_set);
+
+void iwl_calib_free_results(struct iwl_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < IWL_CALIB_MAX; i++) {
+		kfree(priv->calib_results[i].buf);
+		priv->calib_results[i].buf = NULL;
+		priv->calib_results[i].buf_len = 0;
+	}
+}
+
+/*****************************************************************************
+ * RUNTIME calibrations framework
+ *****************************************************************************/
+
 /* "false alarms" are signals that our DSP tries to lock onto,
  *   but then determines that they are either noise, or transmissions
  *   from a distant wireless network (also "noise", really) that get
@@ -748,13 +808,11 @@
 		}
 	}
 
+	/* Save for use within RXON, TX, SCAN commands, etc. */
+	priv->chain_noise_data.active_chains = active_chains;
 	IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
 			active_chains);
 
-	/* Save for use within RXON, TX, SCAN commands, etc. */
-	/*priv->valid_antenna = active_chains;*/
-	/*FIXME: should be reflected in RX chains in RXON */
-
 	/* Analyze noise for rx balance */
 	average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
 	average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
@@ -779,6 +837,15 @@
 
 	priv->cfg->ops->utils->gain_computation(priv, average_noise,
 		min_average_noise_antenna_i, min_average_noise);
+
+	/* Some power changes may have been made during the calibration.
+	 * Update and commit the RXON
+	 */
+	if (priv->cfg->ops->lib->update_chain_flags)
+		priv->cfg->ops->lib->update_chain_flags(priv);
+
+	data->state = IWL_CHAIN_NOISE_DONE;
+	iwl_power_enable_management(priv);
 }
 EXPORT_SYMBOL(iwl_chain_noise_calibration);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 28b5b09..8d04e96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -163,6 +163,13 @@
 /* iwl_cmd_header flags value */
 #define IWL_CMD_FAILED_MSK 0x40
 
+#define SEQ_TO_QUEUE(s)	(((s) >> 8) & 0x1f)
+#define QUEUE_TO_SEQ(q)	(((q) & 0x1f) << 8)
+#define SEQ_TO_INDEX(s)	((s) & 0xff)
+#define INDEX_TO_SEQ(i)	((i) & 0xff)
+#define SEQ_HUGE_FRAME	__constant_cpu_to_le16(0x4000)
+#define SEQ_RX_FRAME	__constant_cpu_to_le16(0x8000)
+
 /**
  * struct iwl_cmd_header
  *
@@ -171,7 +178,7 @@
  */
 struct iwl_cmd_header {
 	u8 cmd;		/* Command ID:  REPLY_RXON, etc. */
-	u8 flags;	/* IWL_CMD_* */
+	u8 flags;	/* 0:5 reserved, 6 abort, 7 internal */
 	/*
 	 * The driver sets up the sequence number to values of its chosing.
 	 * uCode does not use this value, but passes it back to the driver
@@ -187,11 +194,12 @@
 	 *
 	 * The Linux driver uses the following format:
 	 *
-	 *  0:7    index/position within Tx queue
-	 *  8:13   Tx queue selection
-	 * 14:14   driver sets this to indicate command is in the 'huge'
-	 *         storage at the end of the command buffers, i.e. scan cmd
-	 * 15:15   uCode sets this in uCode-originated response/notification
+	 *  0:7		tfd index - position within TX queue
+	 *  8:12	TX queue id
+	 *  13		reserved
+	 *  14		huge - driver sets this to indicate command is in the
+	 *  		'huge' storage at the end of the command buffers
+	 *  15		unsolicited RX or uCode-originated notification
 	 */
 	__le16 sequence;
 
@@ -2026,8 +2034,8 @@
  *   bit 2 - '0' PM have to walk up every DTIM
  *           '1' PM could sleep over DTIM till listen Interval.
  * PCI power managed
- *   bit 3 - '0' (PCI_LINK_CTRL & 0x1)
- *           '1' !(PCI_LINK_CTRL & 0x1)
+ *   bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
+ *           '1' !(PCI_CFG_LINK_CTRL & 0x1)
  * Force sleep Modes
  *   bit 31/30- '00' use both mac/xtal sleeps
  *              '01' force Mac sleep
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 80f2f84..4c312c5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -306,14 +306,14 @@
 	spin_lock_irqsave(&priv->lock, flags);
 	priv->qos_data.qos_active = 0;
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
+	if (priv->iw_mode == NL80211_IFTYPE_ADHOC) {
 		if (priv->qos_data.qos_enable)
 			priv->qos_data.qos_active = 1;
 		if (!(priv->active_rate & 0xfff0)) {
 			cw_min = 31;
 			is_legacy = 1;
 		}
-	} else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
+	} else if (priv->iw_mode == NL80211_IFTYPE_AP) {
 		if (priv->qos_data.qos_enable)
 			priv->qos_data.qos_active = 1;
 	} else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
@@ -399,8 +399,8 @@
 
 	ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
 	ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
-	ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
-			     (IWL_MIMO_PS_NONE << 2));
+	ht_info->cap |= (u16)(IEEE80211_HT_CAP_SM_PS &
+			     (WLAN_HT_CAP_SM_PS_DISABLED << 2));
 
 	max_bit_rate = MAX_BIT_RATE_20_MHZ;
 	if (priv->hw_params.fat_channel & BIT(band)) {
@@ -646,8 +646,14 @@
 	struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
 	u32 val;
 
-	if (!ht_info->is_ht)
+	if (!ht_info->is_ht) {
+		rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
+			RXON_FLG_CHANNEL_MODE_PURE_40_MSK |
+			RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
+			RXON_FLG_FAT_PROT_MSK |
+			RXON_FLG_HT_PROT_MSK);
 		return;
+	}
 
 	/* Set up channel bandwidth:  20 MHz only, or 20/40 mixed if fat ok */
 	if (iwl_is_fat_tx_allowed(priv, NULL))
@@ -697,8 +703,12 @@
 }
 EXPORT_SYMBOL(iwl_set_rxon_ht);
 
-/*
- * Determine how many receiver/antenna chains to use.
+#define IWL_NUM_RX_CHAINS_MULTIPLE	3
+#define IWL_NUM_RX_CHAINS_SINGLE	2
+#define IWL_NUM_IDLE_CHAINS_DUAL	2
+#define IWL_NUM_IDLE_CHAINS_SINGLE	1
+
+/* Determine how many receiver/antenna chains to use.
  * More provides better reception via diversity.  Fewer saves power.
  * MIMO (dual stream) requires at least 2, but works better with 3.
  * This does not determine *which* chains to use, just how many.
@@ -709,10 +719,11 @@
 	bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
 
 	/* # of Rx chains to use when expecting MIMO. */
-	if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
-		return 2;
+	if (is_single || (!is_cam && (priv->current_ht_config.sm_ps ==
+						 WLAN_HT_CAP_SM_PS_STATIC)))
+		return IWL_NUM_RX_CHAINS_SINGLE;
 	else
-		return 3;
+		return IWL_NUM_RX_CHAINS_MULTIPLE;
 }
 
 static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
@@ -720,17 +731,19 @@
 	int idle_cnt;
 	bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
 	/* # Rx chains when idling and maybe trying to save power */
-	switch (priv->ps_mode) {
-	case IWL_MIMO_PS_STATIC:
-	case IWL_MIMO_PS_DYNAMIC:
-		idle_cnt = (is_cam) ? 2 : 1;
+	switch (priv->current_ht_config.sm_ps) {
+	case WLAN_HT_CAP_SM_PS_STATIC:
+	case WLAN_HT_CAP_SM_PS_DYNAMIC:
+		idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
+					IWL_NUM_IDLE_CHAINS_SINGLE;
 		break;
-	case IWL_MIMO_PS_NONE:
-		idle_cnt = (is_cam) ? active_cnt : 1;
+	case WLAN_HT_CAP_SM_PS_DISABLED:
+		idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE;
 		break;
-	case IWL_MIMO_PS_INVALID:
+	case WLAN_HT_CAP_SM_PS_INVALID:
 	default:
-		IWL_ERROR("invalide mimo ps mode %d\n", priv->ps_mode);
+		IWL_ERROR("invalide mimo ps mode %d\n",
+			   priv->current_ht_config.sm_ps);
 		WARN_ON(1);
 		idle_cnt = -1;
 		break;
@@ -738,6 +751,17 @@
 	return idle_cnt;
 }
 
+/* up to 4 chains */
+static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
+{
+	u8 res;
+	res = (chain_bitmap & BIT(0)) >> 0;
+	res += (chain_bitmap & BIT(1)) >> 1;
+	res += (chain_bitmap & BIT(2)) >> 2;
+	res += (chain_bitmap & BIT(4)) >> 4;
+	return res;
+}
+
 /**
  * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
  *
@@ -748,37 +772,47 @@
 {
 	bool is_single = is_single_rx_stream(priv);
 	bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
-	u8 idle_rx_cnt, active_rx_cnt;
+	u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+	u32 active_chains;
 	u16 rx_chain;
 
 	/* Tell uCode which antennas are actually connected.
 	 * Before first association, we assume all antennas are connected.
 	 * Just after first association, iwl_chain_noise_calibration()
 	 *    checks which antennas actually *are* connected. */
-	rx_chain = priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+	 if (priv->chain_noise_data.active_chains)
+		active_chains = priv->chain_noise_data.active_chains;
+	else
+		active_chains = priv->hw_params.valid_rx_ant;
+
+	rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
 
 	/* How many receivers should we use? */
 	active_rx_cnt = iwl_get_active_rx_chain_count(priv);
 	idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
 
-	/* correct rx chain count accoridng hw settings */
-	if (priv->hw_params.rx_chains_num < active_rx_cnt)
-		active_rx_cnt = priv->hw_params.rx_chains_num;
 
-	if (priv->hw_params.rx_chains_num < idle_rx_cnt)
-		idle_rx_cnt = priv->hw_params.rx_chains_num;
+	/* correct rx chain count according hw settings
+	 * and chain noise calibration
+	 */
+	valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
+	if (valid_rx_cnt < active_rx_cnt)
+		active_rx_cnt = valid_rx_cnt;
+
+	if (valid_rx_cnt < idle_rx_cnt)
+		idle_rx_cnt = valid_rx_cnt;
 
 	rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
 	rx_chain |= idle_rx_cnt  << RXON_RX_CHAIN_CNT_POS;
 
 	priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
 
-	if (!is_single && (active_rx_cnt >= 2) && is_cam)
+	if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
 		priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
 	else
 		priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
 
-	IWL_DEBUG_ASSOC("rx_chain=0x%Xi active=%d idle=%d\n",
+	IWL_DEBUG_ASSOC("rx_chain=0x%X active=%d idle=%d\n",
 			priv->staging_rxon.rx_chain,
 			active_rx_cnt, idle_rx_cnt);
 
@@ -788,7 +822,7 @@
 EXPORT_SYMBOL(iwl_set_rxon_chain);
 
 /**
- * iwlcore_set_rxon_channel - Set the phymode and channel values in staging RXON
+ * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
  * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
  * @channel: Any channel valid for the requested phymode
 
@@ -797,10 +831,11 @@
  * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
  * in the staging RXON flag structure based on the phymode
  */
-int iwl_set_rxon_channel(struct iwl_priv *priv,
-				enum ieee80211_band band,
-				u16 channel)
+int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
 {
+	enum ieee80211_band band = ch->band;
+	u16 channel = ieee80211_frequency_to_channel(ch->center_freq);
+
 	if (!iwl_get_channel_info(priv, band, channel)) {
 		IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
 			       channel, band);
@@ -834,6 +869,10 @@
 	/* Tell mac80211 our characteristics */
 	hw->flags = IEEE80211_HW_SIGNAL_DBM |
 		    IEEE80211_HW_NOISE_DBM;
+	hw->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_AP) |
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_ADHOC);
 	/* Default value; 4 EDCA QOS priorities */
 	hw->queues = 4;
 	/* queues to support 11n aggregation */
@@ -891,7 +930,6 @@
 	spin_lock_init(&priv->power_data.lock);
 	spin_lock_init(&priv->sta_lock);
 	spin_lock_init(&priv->hcmd_lock);
-	spin_lock_init(&priv->lq_mngr.lock);
 
 	INIT_LIST_HEAD(&priv->free_frames);
 
@@ -905,10 +943,10 @@
 	priv->ieee_rates = NULL;
 	priv->band = IEEE80211_BAND_2GHZ;
 
-	priv->iw_mode = IEEE80211_IF_TYPE_STA;
+	priv->iw_mode = NL80211_IFTYPE_STATION;
 
 	priv->use_ant_b_for_management_frame = 1; /* start with ant B */
-	priv->ps_mode = IWL_MIMO_PS_NONE;
+	priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED;
 
 	/* Choose which receivers/antennas to use */
 	iwl_set_rxon_chain(priv);
@@ -922,8 +960,6 @@
 	priv->qos_data.qos_active = 0;
 	priv->qos_data.qos_cap.val = 0;
 
-	iwl_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
-
 	priv->rates_mask = IWL_RATES_MASK;
 	/* If power management is turned on, default to AC mode */
 	priv->power_mode = IWL_POWER_AC;
@@ -950,22 +986,6 @@
 }
 EXPORT_SYMBOL(iwl_init_drv);
 
-void iwl_free_calib_results(struct iwl_priv *priv)
-{
-	kfree(priv->calib_results.lo_res);
-	priv->calib_results.lo_res = NULL;
-	priv->calib_results.lo_res_len = 0;
-
-	kfree(priv->calib_results.tx_iq_res);
-	priv->calib_results.tx_iq_res = NULL;
-	priv->calib_results.tx_iq_res_len = 0;
-
-	kfree(priv->calib_results.tx_iq_perd_res);
-	priv->calib_results.tx_iq_perd_res = NULL;
-	priv->calib_results.tx_iq_perd_res_len = 0;
-}
-EXPORT_SYMBOL(iwl_free_calib_results);
-
 int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
 {
 	int ret = 0;
@@ -993,10 +1013,9 @@
 }
 EXPORT_SYMBOL(iwl_set_tx_power);
 
-
 void iwl_uninit_drv(struct iwl_priv *priv)
 {
-	iwl_free_calib_results(priv);
+	iwl_calib_free_results(priv);
 	iwlcore_free_geos(priv);
 	iwl_free_channel_map(priv);
 	kfree(priv->scan);
@@ -1150,7 +1169,6 @@
 }
 EXPORT_SYMBOL(iwl_verify_ucode);
 
-
 static const char *desc_lookup(int i)
 {
 	switch (i) {
@@ -1231,9 +1249,9 @@
 /**
  * iwl_print_event_log - Dump error event log to syslog
  *
- * NOTE: Must be called with iwl4965_grab_nic_access() already obtained!
+ * NOTE: Must be called with iwl_grab_nic_access() already obtained!
  */
-void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
+static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
 				u32 num_events, u32 mode)
 {
 	u32 i;
@@ -1274,8 +1292,6 @@
 		}
 	}
 }
-EXPORT_SYMBOL(iwl_print_event_log);
-
 
 void iwl_dump_nic_event_log(struct iwl_priv *priv)
 {
@@ -1391,7 +1407,7 @@
 
 	iwl_scan_cancel(priv);
 	/* FIXME: This is a workaround for AP */
-	if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
+	if (priv->iw_mode != NL80211_IFTYPE_AP) {
 		spin_lock_irqsave(&priv->lock, flags);
 		iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
 			    CSR_UCODE_SW_BIT_RFKILL);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 64f139e..55a4b58 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -184,14 +184,10 @@
 struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
 		struct ieee80211_ops *hw_ops);
 void iwl_hw_detect(struct iwl_priv *priv);
-
 void iwl_clear_stations_table(struct iwl_priv *priv);
-void iwl_free_calib_results(struct iwl_priv *priv);
 void iwl_reset_qos(struct iwl_priv *priv);
 void iwl_set_rxon_chain(struct iwl_priv *priv);
-int iwl_set_rxon_channel(struct iwl_priv *priv,
-				enum ieee80211_band band,
-				u16 channel);
+int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
 void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info);
 u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
 			 struct ieee80211_ht_info *sta_ht_inf);
@@ -218,7 +214,6 @@
 int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
 int iwl_rx_agg_start(struct iwl_priv *priv, const u8 *addr, int tid, u16 ssn);
 int iwl_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid);
-/* FIXME: remove when TX is moved to iwl core */
 int iwl_rx_queue_restock(struct iwl_priv *priv);
 int iwl_rx_queue_space(const struct iwl_rx_queue *q);
 void iwl_rx_allocate(struct iwl_priv *priv);
@@ -237,11 +232,7 @@
 ******************************************************/
 int iwl_txq_ctx_reset(struct iwl_priv *priv);
 int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
-/* FIXME: remove when free Tx is fully merged into iwlcore */
-int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
 void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
-int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd,
-					dma_addr_t addr, u16 len);
 int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
 int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
 int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
@@ -256,6 +247,7 @@
  * RF -Kill - here and not in iwl-rfkill.h to be available when
  * RF-kill subsystem is not compiled.
  ****************************************************/
+void iwl_rf_kill(struct iwl_priv *priv);
 void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv);
 int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv);
 
@@ -286,11 +278,17 @@
 void iwl_init_scan_params(struct iwl_priv *priv);
 int iwl_scan_cancel(struct iwl_priv *priv);
 int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
-const char *iwl_escape_essid(const char *essid, u8 essid_len);
 int iwl_scan_initiate(struct iwl_priv *priv);
 void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
 void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
 
+/*******************************************************************************
+ * Calibrations - implemented in iwl-calib.c
+ ******************************************************************************/
+int iwl_send_calib_results(struct iwl_priv *priv);
+int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
+void iwl_calib_free_results(struct iwl_priv *priv);
+
 /*****************************************************
  *   S e n d i n g     H o s t     C o m m a n d s   *
  *****************************************************/
@@ -312,8 +310,6 @@
 /*****************************************************
 *  Error Handling Debugging
 ******************************************************/
-void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
-			 u32 num_events, u32 mode);
 void iwl_dump_nic_error_log(struct iwl_priv *priv);
 void iwl_dump_nic_event_log(struct iwl_priv *priv);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 52629fb..662edf4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -64,7 +64,7 @@
 #define CSR_BASE    (0x000)
 
 #define CSR_HW_IF_CONFIG_REG    (CSR_BASE+0x000) /* hardware interface config */
-#define CSR_INT_COALESCING      (CSR_BASE+0x004) /* accum ints, 32-usec units */
+#define CSR_INT_COALESCING     (CSR_BASE+0x004) /* accum ints, 32-usec units */
 #define CSR_INT                 (CSR_BASE+0x008) /* host interrupt status/ack */
 #define CSR_INT_MASK            (CSR_BASE+0x00c) /* host interrupt enable */
 #define CSR_FH_INT_STATUS       (CSR_BASE+0x010) /* busmaster int status/ack*/
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index d2daa17..e548d67 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -110,11 +110,12 @@
  *
  */
 
-#define IWL_DL_INFO          (1 << 0)
-#define IWL_DL_MAC80211      (1 << 1)
-#define IWL_DL_HOST_COMMAND  (1 << 2)
-#define IWL_DL_STATE         (1 << 3)
+#define IWL_DL_INFO		(1 << 0)
+#define IWL_DL_MAC80211		(1 << 1)
+#define IWL_DL_HCMD		(1 << 2)
+#define IWL_DL_STATE		(1 << 3)
 #define IWL_DL_MACDUMP		(1 << 4)
+#define IWL_DL_HCMD_DUMP	(1 << 5)
 #define IWL_DL_RADIO         (1 << 7)
 #define IWL_DL_POWER         (1 << 8)
 #define IWL_DL_TEMP          (1 << 9)
@@ -162,7 +163,8 @@
 #define IWL_DEBUG_ISR(f, a...)    IWL_DEBUG(IWL_DL_ISR, f, ## a)
 #define IWL_DEBUG_LED(f, a...) IWL_DEBUG(IWL_DL_LED, f, ## a)
 #define IWL_DEBUG_WEP(f, a...)    IWL_DEBUG(IWL_DL_WEP, f, ## a)
-#define IWL_DEBUG_HC(f, a...) IWL_DEBUG(IWL_DL_HOST_COMMAND, f, ## a)
+#define IWL_DEBUG_HC(f, a...) IWL_DEBUG(IWL_DL_HCMD, f, ## a)
+#define IWL_DEBUG_HC_DUMP(f, a...) IWL_DEBUG(IWL_DL_HCMD_DUMP, f, ## a)
 #define IWL_DEBUG_CALIB(f, a...) IWL_DEBUG(IWL_DL_CALIB, f, ## a)
 #define IWL_DEBUG_FW(f, a...) IWL_DEBUG(IWL_DL_FW, f, ## a)
 #define IWL_DEBUG_RF_KILL(f, a...) IWL_DEBUG(IWL_DL_RF_KILL, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index cdfb343..c018121 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -225,12 +225,6 @@
 	struct list_head list;
 };
 
-#define SEQ_TO_QUEUE(x)  ((x >> 8) & 0xbf)
-#define QUEUE_TO_SEQ(x)  ((x & 0xbf) << 8)
-#define SEQ_TO_INDEX(x) ((u8)(x & 0xff))
-#define INDEX_TO_SEQ(x) ((u8)(x & 0xff))
-#define SEQ_HUGE_FRAME  (0x4000)
-#define SEQ_RX_FRAME    __constant_cpu_to_le16(0x8000)
 #define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
 #define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
 #define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
@@ -412,6 +406,7 @@
 	/* self configuration data */
 	u8 is_ht;
 	u8 supported_chan_width;
+	u8 sm_ps;
 	u8 is_green_field;
 	u8 sgf;			/* HT_SHORT_GI_* short guard interval */
 	u8 max_amsdu_size;
@@ -570,50 +565,31 @@
 #define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
 #define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
 
-
 /******************************************************************************
  *
- * Functions implemented in iwl-base.c which are forward declared here
- * for use by iwl-*.c
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
  *
- *****************************************************************************/
-struct iwl_addsta_cmd;
-extern int iwl_send_add_sta(struct iwl_priv *priv,
-			    struct iwl_addsta_cmd *sta, u8 flags);
-u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
-			 u8 flags, struct ieee80211_ht_info *ht_info);
-extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
-					struct ieee80211_hdr *hdr,
-					const u8 *dest, int left);
-extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
-int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
-extern int iwl4965_set_power(struct iwl_priv *priv, void *cmd);
-
-extern const u8 iwl_bcast_addr[ETH_ALEN];
-
-/******************************************************************************
- *
- * Functions implemented in iwl-[34]*.c which are forward declared here
- * for use by iwl-base.c
- *
- * NOTE:  The implementation of these functions are hardware specific
- * which is why they are in the hardware specific files (vs. iwl-base.c)
+ * NOTE:  The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
  *
  * Naming convention --
- * iwl4965_         <-- Its part of iwlwifi (should be changed to iwl4965_)
- * iwl4965_hw_      <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
+ * iwl_         <-- Is part of iwlwifi
  * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
  * iwl4965_bg_      <-- Called from work queue context
  * iwl4965_mac_     <-- mac80211 callback
  *
  ****************************************************************************/
+struct iwl_addsta_cmd;
+extern int iwl_send_add_sta(struct iwl_priv *priv,
+			    struct iwl_addsta_cmd *sta, u8 flags);
+extern u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr,
+			int is_ap, u8 flags, struct ieee80211_ht_info *ht_info);
+extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
+extern int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
+extern const u8 iwl_bcast_addr[ETH_ALEN];
 extern int iwl_rxq_stop(struct iwl_priv *priv);
 extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
-extern unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
-				 struct iwl_frame *frame, u8 rate);
-extern void iwl4965_disable_events(struct iwl_priv *priv);
-
-extern int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel);
 extern int iwl_queue_space(const struct iwl_queue *q);
 static inline int iwl_queue_used(const struct iwl_queue *q, int i)
 {
@@ -636,12 +612,6 @@
 
 struct iwl_priv;
 
-/*
- * Forward declare iwl-4965.c functions for iwl-base.c
- */
-extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv);
-int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
-					u8 tid, int txq_id);
 
 /* Structures, enum, and defines specific to the 4965 */
 
@@ -656,11 +626,6 @@
 #define IWL_CHANNEL_WIDTH_20MHZ   0
 #define IWL_CHANNEL_WIDTH_40MHZ   1
 
-#define IWL_MIMO_PS_STATIC        0
-#define IWL_MIMO_PS_NONE          3
-#define IWL_MIMO_PS_DYNAMIC       1
-#define IWL_MIMO_PS_INVALID       2
-
 #define IWL_OPERATION_MODE_AUTO     0
 #define IWL_OPERATION_MODE_HT_ONLY  1
 #define IWL_OPERATION_MODE_MIXED    2
@@ -671,18 +636,6 @@
 
 #define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
 
-struct iwl4965_lq_mngr {
-	spinlock_t lock;
-	s32 max_window_size;
-	s32 *expected_tpt;
-	u8 *next_higher_rate;
-	u8 *next_lower_rate;
-	unsigned long stamp;
-	unsigned long stamp_last;
-	u32 flush_time;
-	u32 tx_packets;
-};
-
 /* Sensitivity and chain noise calibration */
 #define INTERFERENCE_DATA_AVAILABLE	__constant_cpu_to_le32(1)
 #define INITIALIZATION_VALUE		0xFFFF
@@ -727,8 +680,9 @@
 
 enum iwl4965_chain_noise_state {
 	IWL_CHAIN_NOISE_ALIVE = 0,  /* must be 0 */
-	IWL_CHAIN_NOISE_ACCUMULATE = 1,
-	IWL_CHAIN_NOISE_CALIBRATED = 2,
+	IWL_CHAIN_NOISE_ACCUMULATE,
+	IWL_CHAIN_NOISE_CALIBRATED,
+	IWL_CHAIN_NOISE_DONE,
 };
 
 enum iwl4965_calib_enabled_state {
@@ -745,13 +699,10 @@
 	u32 beacon_energy_c;
 };
 
-struct iwl_calib_results {
-	void *tx_iq_res;
-	void *tx_iq_perd_res;
-	void *lo_res;
-	u32 tx_iq_res_len;
-	u32 tx_iq_perd_res_len;
-	u32 lo_res_len;
+/* Opaque calibration results */
+struct iwl_calib_result {
+	void *buf;
+	size_t buf_len;
 };
 
 enum ucode_type {
@@ -789,17 +740,18 @@
 
 /* Chain noise (differential Rx gain) calib data */
 struct iwl_chain_noise_data {
-	u8 state;
-	u16 beacon_count;
+	u32 active_chains;
 	u32 chain_noise_a;
 	u32 chain_noise_b;
 	u32 chain_noise_c;
 	u32 chain_signal_a;
 	u32 chain_signal_b;
 	u32 chain_signal_c;
+	u16 beacon_count;
 	u8 disconn_array[NUM_RX_CHAINS];
 	u8 delta_gain_code[NUM_RX_CHAINS];
 	u8 radio_write;
+	u8 state;
 };
 
 #define	EEPROM_SEM_TIMEOUT 10		/* milliseconds */
@@ -813,6 +765,7 @@
 
 
 #define IWL_MAX_NUM_QUEUES	20 /* FIXME: do dynamic allocation */
+#define IWL_CALIB_MAX  3
 
 struct iwl_priv {
 
@@ -828,7 +781,6 @@
 
 	enum ieee80211_band band;
 	int alloc_rxb_skb;
-	bool add_radiotap;
 
 	void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
 				       struct iwl_rx_mem_buffer *rxb);
@@ -857,7 +809,7 @@
 	s32 last_temperature;
 
 	/* init calibration results */
-	struct iwl_calib_results calib_results;
+	struct iwl_calib_result calib_results[IWL_CALIB_MAX];
 
 	/* Scan related variables */
 	unsigned long last_scan_jiffies;
@@ -939,9 +891,6 @@
 	u8 last_phy_res[100];
 
 	/* Rate scaling data */
-	struct iwl4965_lq_mngr lq_mngr;
-
-	/* Rate scaling data */
 	s8 data_retry_limit;
 	u8 retry_rate;
 
@@ -1005,7 +954,7 @@
 	u8 *eeprom;
 	struct iwl_eeprom_calib_info *calib_info;
 
-	enum ieee80211_if_types iw_mode;
+	enum nl80211_iftype iw_mode;
 
 	struct sk_buff *ibss_beacon;
 
@@ -1025,7 +974,6 @@
 	 * hardware */
 	u16 assoc_id;
 	u16 assoc_capability;
-	u8 ps_mode;
 
 	struct iwl_qos_info qos_data;
 
@@ -1047,6 +995,7 @@
 
 	struct tasklet_struct irq_tasklet;
 
+	struct delayed_work set_power_save;
 	struct delayed_work init_alive_start;
 	struct delayed_work alive_start;
 	struct delayed_work scan_check;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index cd11c0c..a72efdf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -247,8 +247,8 @@
 #define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
 #define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
 
-#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT	(20)
-#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_BITSHIFT	(4)
+#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS	(20)
+#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS	(4)
 #define RX_RB_TIMEOUT	(0x10)
 
 #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL         (0x00000000)
@@ -260,8 +260,9 @@
 #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K   (0x00020000)
 #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K   (0x00030000)
 
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL       (0x00000000)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL     (0x00001000)
+#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY              (0x00000004)
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL    (0x00000000)
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL  (0x00001000)
 
 
 /**
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 2eb03ee..8300f3d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -120,8 +120,18 @@
 		return 1;
 	}
 
-	IWL_DEBUG_HC("back from %s (0x%08X)\n",
-			get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+#ifdef CONFIG_IWLWIFI_DEBUG
+	switch (cmd->hdr.cmd) {
+	case REPLY_TX_LINK_QUALITY_CMD:
+	case SENSITIVITY_CMD:
+		IWL_DEBUG_HC_DUMP("back from %s (0x%08X)\n",
+				get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+				break;
+	default:
+		IWL_DEBUG_HC("back from %s (0x%08X)\n",
+				get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+	}
+#endif
 
 	/* Let iwl_tx_complete free the response skb */
 	return 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 5bc3df4..9740fcc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -61,7 +61,7 @@
  *
  */
 
-#define _iwl_write32(priv, ofs, val) writel((val), (priv)->hw_base + (ofs))
+#define _iwl_write32(priv, ofs, val) iowrite32((val), (priv)->hw_base + (ofs))
 #ifdef CONFIG_IWLWIFI_DEBUG
 static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
 				 u32 ofs, u32 val)
@@ -75,7 +75,7 @@
 #define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val)
 #endif
 
-#define _iwl_read32(priv, ofs) readl((priv)->hw_base + (ofs))
+#define _iwl_read32(priv, ofs) ioread32((priv)->hw_base + (ofs))
 #ifdef CONFIG_IWLWIFI_DEBUG
 static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
 {
@@ -155,28 +155,10 @@
 static inline int _iwl_grab_nic_access(struct iwl_priv *priv)
 {
 	int ret;
-	u32 gp_ctl;
-
 #ifdef CONFIG_IWLWIFI_DEBUG
 	if (atomic_read(&priv->restrict_refcnt))
 		return 0;
 #endif
-	if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
-	    test_bit(STATUS_RF_KILL_SW, &priv->status)) {
-		IWL_WARNING("WARNING: Requesting MAC access during RFKILL "
-			"wakes up NIC\n");
-
-		/* 10 msec allows time for NIC to complete its data save */
-		gp_ctl = _iwl_read32(priv, CSR_GP_CNTRL);
-		if (gp_ctl & CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
-			IWL_DEBUG_RF_KILL("Wait for complete power-down, "
-				"gpctl = 0x%08x\n", gp_ctl);
-			mdelay(10);
-		} else
-			IWL_DEBUG_RF_KILL("power-down complete, "
-					  "gpctl = 0x%08x\n", gp_ctl);
-	}
-
 	/* this bit wakes up the NIC */
 	_iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
 	ret = _iwl_poll_bit(priv, CSR_GP_CNTRL,
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index a099c9e..60a03d2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -152,9 +152,10 @@
 /* initialize to default */
 static int iwl_power_init_handle(struct iwl_priv *priv)
 {
-	int ret = 0, i;
 	struct iwl_power_mgr *pow_data;
 	int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX;
+	struct iwl_powertable_cmd *cmd;
+	int i;
 	u16 pci_pm;
 
 	IWL_DEBUG_POWER("Initialize power \n");
@@ -167,25 +168,19 @@
 	memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
 	memcpy(&pow_data->pwr_range_2[0], &range_2[0], size);
 
-	ret = pci_read_config_word(priv->pci_dev,
-				  PCI_LINK_CTRL, &pci_pm);
-	if (ret != 0)
-		return 0;
-	else {
-		struct iwl_powertable_cmd *cmd;
+	pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &pci_pm);
 
-		IWL_DEBUG_POWER("adjust power command flags\n");
+	IWL_DEBUG_POWER("adjust power command flags\n");
 
-		for (i = 0; i < IWL_POWER_MAX; i++) {
-			cmd = &pow_data->pwr_range_0[i].cmd;
+	for (i = 0; i < IWL_POWER_MAX; i++) {
+		cmd = &pow_data->pwr_range_0[i].cmd;
 
-			if (pci_pm & 0x1)
-				cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
-			else
-				cmd->flags |= IWL_POWER_PCI_PM_MSK;
-		}
+		if (pci_pm & PCI_CFG_LINK_CTRL_VAL_L0S_EN)
+			cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
+		else
+			cmd->flags |= IWL_POWER_PCI_PM_MSK;
 	}
-	return ret;
+	return 0;
 }
 
 /* adjust power command according to dtim period and power level*/
@@ -255,17 +250,26 @@
 
 
 /*
- * calucaute the final power mode index
+ * compute the final power mode index
  */
-int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
+int iwl_power_update_mode(struct iwl_priv *priv, bool force)
 {
 	struct iwl_power_mgr *setting = &(priv->power_data);
 	int ret = 0;
 	u16 uninitialized_var(final_mode);
 
-       /* If on battery, set to 3,
-	* if plugged into AC power, set to CAM ("continuously aware mode"),
-	* else user level */
+	/* Don't update the RX chain when chain noise calibration is running */
+	if (priv->chain_noise_data.state != IWL_CHAIN_NOISE_DONE &&
+	    priv->chain_noise_data.state != IWL_CHAIN_NOISE_ALIVE) {
+		IWL_DEBUG_POWER("Cannot update the power, chain noise "
+			"calibration running: %d\n",
+			priv->chain_noise_data.state);
+		return -EAGAIN;
+	}
+
+	/* If on battery, set to 3,
+	 * if plugged into AC power, set to CAM ("continuously aware mode"),
+	 * else user level */
 
 	switch (setting->system_power_setting) {
 	case IWL_POWER_SYS_AUTO:
@@ -286,11 +290,11 @@
 		final_mode = setting->critical_power_setting;
 
 	/* driver only support CAM for non STA network */
-	if (priv->iw_mode != IEEE80211_IF_TYPE_STA)
+	if (priv->iw_mode != NL80211_IFTYPE_STATION)
 		final_mode = IWL_POWER_MODE_CAM;
 
 	if (!iwl_is_rfkill(priv) && !setting->power_disabled &&
-	    ((setting->power_mode != final_mode) || refresh)) {
+	    ((setting->power_mode != final_mode) || force)) {
 		struct iwl_powertable_cmd cmd;
 
 		if (final_mode != IWL_POWER_MODE_CAM)
@@ -324,7 +328,7 @@
  * this will be usefull for rate scale to disable PM during heavy
  * Tx/Rx activities
  */
-int iwl_power_disable_management(struct iwl_priv *priv)
+int iwl_power_disable_management(struct iwl_priv *priv, u32 ms)
 {
 	u16 prev_mode;
 	int ret = 0;
@@ -337,6 +341,11 @@
 	ret = iwl_power_update_mode(priv, 0);
 	priv->power_data.power_disabled = 1;
 	priv->power_data.user_power_setting = prev_mode;
+	cancel_delayed_work(&priv->set_power_save);
+	if (ms)
+		queue_delayed_work(priv->workqueue, &priv->set_power_save,
+				   msecs_to_jiffies(ms));
+
 
 	return ret;
 }
@@ -359,35 +368,26 @@
 /* set user_power_setting */
 int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode)
 {
-	int ret = 0;
-
 	if (mode > IWL_POWER_LIMIT)
 		return -EINVAL;
 
 	priv->power_data.user_power_setting = mode;
 
-	ret = iwl_power_update_mode(priv, 0);
-
-	return ret;
+	return iwl_power_update_mode(priv, 0);
 }
 EXPORT_SYMBOL(iwl_power_set_user_mode);
 
-
 /* set system_power_setting. This should be set by over all
  * PM application.
  */
 int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode)
 {
-	int ret = 0;
-
 	if (mode > IWL_POWER_LIMIT)
 		return -EINVAL;
 
 	priv->power_data.system_power_setting = mode;
 
-	ret = iwl_power_update_mode(priv, 0);
-
-	return ret;
+	return iwl_power_update_mode(priv, 0);
 }
 EXPORT_SYMBOL(iwl_power_set_system_mode);
 
@@ -431,3 +431,35 @@
 	return ret;
 }
 EXPORT_SYMBOL(iwl_power_temperature_change);
+
+static void iwl_bg_set_power_save(struct work_struct *work)
+{
+	struct iwl_priv *priv = container_of(work,
+				struct iwl_priv, set_power_save.work);
+	IWL_DEBUG(IWL_DL_STATE, "update power\n");
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	mutex_lock(&priv->mutex);
+
+	/* on starting association we disable power managment
+	 * until association, if association failed then this
+	 * timer will expire and enable PM again.
+	 */
+	if (!iwl_is_associated(priv))
+		iwl_power_enable_management(priv);
+
+	mutex_unlock(&priv->mutex);
+}
+void iwl_setup_power_deferred_work(struct iwl_priv *priv)
+{
+	INIT_DELAYED_WORK(&priv->set_power_save, iwl_bg_set_power_save);
+}
+EXPORT_SYMBOL(iwl_setup_power_deferred_work);
+
+void iwl_power_cancel_timeout(struct iwl_priv *priv)
+{
+	cancel_delayed_work(&priv->set_power_save);
+}
+EXPORT_SYMBOL(iwl_power_cancel_timeout);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index abcbbf9..df484a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -72,14 +72,16 @@
 	/* final power level that used to calculate final power command */
 	u8 power_mode;
 	u8 user_power_setting; /* set by user through mac80211 or sysfs */
-	u8 system_power_setting; /* set by kernel syatem tools */
+	u8 system_power_setting; /* set by kernel system tools */
 	u8 critical_power_setting; /* set if driver over heated */
 	u8 is_battery_active; /* DC/AC power */
 	u8 power_disabled; /* flag to disable using power saving level */
 };
 
-int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh);
-int iwl_power_disable_management(struct iwl_priv *priv);
+void iwl_setup_power_deferred_work(struct iwl_priv *priv);
+void iwl_power_cancel_timeout(struct iwl_priv *priv);
+int iwl_power_update_mode(struct iwl_priv *priv, bool force);
+int iwl_power_disable_management(struct iwl_priv *priv, u32 ms);
 int iwl_power_enable_management(struct iwl_priv *priv);
 int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode);
 int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index e81bfc4..7cde9d7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -376,7 +376,9 @@
 {
 	int ret;
 	unsigned long flags;
-	unsigned int rb_size;
+	u32 rb_size;
+	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+	const u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT why this stalls RX */
 
 	spin_lock_irqsave(&priv->lock, flags);
 	ret = iwl_grab_nic_access(priv);
@@ -398,26 +400,32 @@
 
 	/* Tell device where to find RBD circular buffer in DRAM */
 	iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
-			   rxq->dma_addr >> 8);
+			   (u32)(rxq->dma_addr >> 8));
 
 	/* Tell device where in DRAM to update its Rx status */
 	iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
 			   (priv->shared_phys + priv->rb_closed_offset) >> 4);
 
-	/* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
+	/* Enable Rx DMA
+	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set becuase of HW bug in
+	 *      the credit mechanism in 5000 HW RX FIFO
+	 * Direct rx interrupts to hosts
+	 * Rx buffer size 4 or 8k
+	 * RB timeout 0x10
+	 * 256 RBDs
+	 */
 	iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
 			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
 			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
-			   rb_size |
-			     /* 0x10 << 4 | */
-			   (RX_QUEUE_SIZE_LOG <<
-			      FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
-
-	/*
-	 * iwl_write32(priv,CSR_INT_COAL_REG,0);
-	 */
+			   rb_size|
+			   (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
 
 	iwl_release_nic_access(priv);
+
+	iwl_write32(priv, CSR_INT_COALESCING, 0x40);
+
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 	return 0;
@@ -789,107 +797,6 @@
 }
 #endif
 
-static void iwl_add_radiotap(struct iwl_priv *priv,
-				 struct sk_buff *skb,
-				 struct iwl_rx_phy_res *rx_start,
-				 struct ieee80211_rx_status *stats,
-				 u32 ampdu_status)
-{
-	s8 signal = stats->signal;
-	s8 noise = 0;
-	int rate = stats->rate_idx;
-	u64 tsf = stats->mactime;
-	__le16 antenna;
-	__le16 phy_flags_hw = rx_start->phy_flags;
-	struct iwl4965_rt_rx_hdr {
-		struct ieee80211_radiotap_header rt_hdr;
-		__le64 rt_tsf;		/* TSF */
-		u8 rt_flags;		/* radiotap packet flags */
-		u8 rt_rate;		/* rate in 500kb/s */
-		__le16 rt_channelMHz;	/* channel in MHz */
-		__le16 rt_chbitmask;	/* channel bitfield */
-		s8 rt_dbmsignal;	/* signal in dBm, kluged to signed */
-		s8 rt_dbmnoise;
-		u8 rt_antenna;		/* antenna number */
-	} __attribute__ ((packed)) *iwl4965_rt;
-
-	/* TODO: We won't have enough headroom for HT frames. Fix it later. */
-	if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
-		if (net_ratelimit())
-			printk(KERN_ERR "not enough headroom [%d] for "
-			       "radiotap head [%zd]\n",
-			       skb_headroom(skb), sizeof(*iwl4965_rt));
-		return;
-	}
-
-	/* put radiotap header in front of 802.11 header and data */
-	iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
-
-	/* initialise radiotap header */
-	iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
-	iwl4965_rt->rt_hdr.it_pad = 0;
-
-	/* total header + data */
-	put_unaligned_le16(sizeof(*iwl4965_rt), &iwl4965_rt->rt_hdr.it_len);
-
-	/* Indicate all the fields we add to the radiotap header */
-	put_unaligned_le32((1 << IEEE80211_RADIOTAP_TSFT) |
-			   (1 << IEEE80211_RADIOTAP_FLAGS) |
-			   (1 << IEEE80211_RADIOTAP_RATE) |
-			   (1 << IEEE80211_RADIOTAP_CHANNEL) |
-			   (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
-			   (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
-			   (1 << IEEE80211_RADIOTAP_ANTENNA),
-			   &(iwl4965_rt->rt_hdr.it_present));
-
-	/* Zero the flags, we'll add to them as we go */
-	iwl4965_rt->rt_flags = 0;
-
-	put_unaligned_le64(tsf, &iwl4965_rt->rt_tsf);
-
-	iwl4965_rt->rt_dbmsignal = signal;
-	iwl4965_rt->rt_dbmnoise = noise;
-
-	/* Convert the channel frequency and set the flags */
-	put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
-	if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
-		put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
-				   &iwl4965_rt->rt_chbitmask);
-	else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
-		put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
-				   &iwl4965_rt->rt_chbitmask);
-	else	/* 802.11g */
-		put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
-				   &iwl4965_rt->rt_chbitmask);
-
-	if (rate == -1)
-		iwl4965_rt->rt_rate = 0;
-	else
-		iwl4965_rt->rt_rate = iwl_rates[rate].ieee;
-
-	/*
-	 * "antenna number"
-	 *
-	 * It seems that the antenna field in the phy flags value
-	 * is actually a bitfield. This is undefined by radiotap,
-	 * it wants an actual antenna number but I always get "7"
-	 * for most legacy frames I receive indicating that the
-	 * same frame was received on all three RX chains.
-	 *
-	 * I think this field should be removed in favour of a
-	 * new 802.11n radiotap field "RX chains" that is defined
-	 * as a bitmask.
-	 */
-	antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
-	iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
-
-	/* set the preamble flag if appropriate */
-	if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
-		iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
-
-	stats->flag |= RX_FLAG_RADIOTAP;
-}
-
 static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
 {
 	/* 0 - mgmt, 1 - cnt, 2 - data */
@@ -1074,9 +981,6 @@
 	    iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
 		return;
 
-	if (priv->add_radiotap)
-		iwl_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
-
 	iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len);
 	ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
 	priv->alloc_rxb_skb--;
@@ -1130,10 +1034,10 @@
 	/* Filter incoming packets to determine if they are targeted toward
 	 * this network, discarding packets coming from ourselves */
 	switch (priv->iw_mode) {
-	case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source    | BSSID */
+	case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source    | BSSID */
 		/* packets to our IBSS update information */
 		return !compare_ether_addr(header->addr3, priv->bssid);
-	case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
+	case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
 		/* packets to our IBSS update information */
 		return !compare_ether_addr(header->addr2, priv->bssid);
 	default:
@@ -1171,7 +1075,6 @@
 	if (rx_status.band == IEEE80211_BAND_5GHZ)
 		rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
 
-	rx_status.antenna = 0;
 	rx_status.flag = 0;
 
 	/* TSF isn't reliable. In order to allow smooth user experience,
@@ -1253,8 +1156,28 @@
 		rx_status.signal, rx_status.noise, rx_status.signal,
 		(unsigned long long)rx_status.mactime);
 
+	/*
+	 * "antenna number"
+	 *
+	 * It seems that the antenna field in the phy flags value
+	 * is actually a bitfield. This is undefined by radiotap,
+	 * it wants an actual antenna number but I always get "7"
+	 * for most legacy frames I receive indicating that the
+	 * same frame was received on all three RX chains.
+	 *
+	 * I think this field should be removed in favour of a
+	 * new 802.11n radiotap field "RX chains" that is defined
+	 * as a bitmask.
+	 */
+	rx_status.antenna = le16_to_cpu(rx_start->phy_flags &
+					RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
+
+	/* set the preamble flag if appropriate */
+	if (rx_start->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+		rx_status.flag |= RX_FLAG_SHORTPRE;
+
 	/* Take shortcut when only in monitor mode */
-	if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
+	if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
 		iwl_pass_packet_to_mac80211(priv, include_phy,
 						 rxb, &rx_status);
 		return;
@@ -1271,7 +1194,7 @@
 	switch (fc & IEEE80211_FCTL_FTYPE) {
 	case IEEE80211_FTYPE_MGMT:
 	case IEEE80211_FTYPE_DATA:
-		if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
+		if (priv->iw_mode == NL80211_IFTYPE_AP)
 			iwl_update_ps_mode(priv, fc  & IEEE80211_FCTL_PM,
 						header->addr2);
 		/* fall through */
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 6c8ac3a..bf855c3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -88,7 +88,7 @@
 
 
 
-const char *iwl_escape_essid(const char *essid, u8 essid_len)
+static const char *iwl_escape_essid(const char *essid, u8 essid_len)
 {
 	static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
 	const char *s = essid;
@@ -111,7 +111,6 @@
 	*d = '\0';
 	return escaped;
 }
-EXPORT_SYMBOL(iwl_escape_essid);
 
 /**
  * iwl_scan_cancel - Cancel any currently executing HW scan
@@ -464,11 +463,6 @@
 
 int iwl_scan_initiate(struct iwl_priv *priv)
 {
-	if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
-		IWL_ERROR("APs don't scan.\n");
-		return 0;
-	}
-
 	if (!iwl_is_ready_rf(priv)) {
 		IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
 		return -EIO;
@@ -480,8 +474,7 @@
 	}
 
 	if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
-		IWL_DEBUG_SCAN("Scan request while abort pending.  "
-			       "Queuing.\n");
+		IWL_DEBUG_SCAN("Scan request while abort pending\n");
 		return -EAGAIN;
 	}
 
@@ -869,7 +862,7 @@
 
 	scan->tx_cmd.len = cpu_to_le16(cmd_len);
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
+	if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
 		scan->filter_flags = RXON_FILTER_PROMISC_MSK;
 
 	scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 6283a3a..61797f3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -47,8 +47,8 @@
 	unsigned long flags;
 	DECLARE_MAC_BUF(mac);
 
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
-	    (priv->iw_mode == IEEE80211_IF_TYPE_AP))
+	if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
+	    (priv->iw_mode == NL80211_IFTYPE_AP))
 		start = IWL_STA_ID;
 
 	if (is_broadcast_ether_addr(addr))
@@ -74,7 +74,7 @@
 
 int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
 {
-	if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
+	if (priv->iw_mode == NL80211_IFTYPE_STATION) {
 		return IWL_AP_ID;
 	} else {
 		u8 *da = ieee80211_get_DA(hdr);
@@ -191,20 +191,20 @@
 	if (!sta_ht_inf || !sta_ht_inf->ht_supported)
 		goto done;
 
-	mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
+	mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
 
 	sta_flags = priv->stations[index].sta.station_flags;
 
 	sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
 
 	switch (mimo_ps_mode) {
-	case WLAN_HT_CAP_MIMO_PS_STATIC:
+	case WLAN_HT_CAP_SM_PS_STATIC:
 		sta_flags |= STA_FLG_MIMO_DIS_MSK;
 		break;
-	case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
+	case WLAN_HT_CAP_SM_PS_DYNAMIC:
 		sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
 		break;
-	case WLAN_HT_CAP_MIMO_PS_DISABLED:
+	case WLAN_HT_CAP_SM_PS_DISABLED:
 		break;
 	default:
 		IWL_WARNING("Invalid MIMO PS mode %d\n", mimo_ps_mode);
@@ -286,7 +286,7 @@
 
 	/* BCAST station and IBSS stations do not work in HT mode */
 	if (sta_id != priv->hw_params.bcast_sta_id &&
-	    priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
+	    priv->iw_mode != NL80211_IFTYPE_ADHOC)
 		iwl_set_ht_add_station(priv, sta_id, ht_info);
 
 	spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
@@ -817,7 +817,7 @@
 	};
 
 	if ((lq->sta_id == 0xFF) &&
-	    (priv->iw_mode == IEEE80211_IF_TYPE_IBSS))
+	    (priv->iw_mode == NL80211_IFTYPE_ADHOC))
 		return -EINVAL;
 
 	if (lq->sta_id == 0xFF)
@@ -904,7 +904,7 @@
 
 	if ((is_ap) &&
 	    (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
-	    (priv->iw_mode == IEEE80211_IF_TYPE_STA))
+	    (priv->iw_mode == NL80211_IFTYPE_STATION))
 		sta_id = iwl_add_station_flags(priv, addr, is_ap,
 						   0, cur_ht_config);
 	else
@@ -938,11 +938,11 @@
 
 	/* If we are a client station in a BSS network, use the special
 	 * AP station entry (that's the only station we communicate with) */
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		return IWL_AP_ID;
 
 	/* If we are an AP, then find the station, or use BCAST */
-	case IEEE80211_IF_TYPE_AP:
+	case NL80211_IFTYPE_AP:
 		sta_id = iwl_find_station(priv, hdr->addr1);
 		if (sta_id != IWL_INVALID_STATION)
 			return sta_id;
@@ -950,7 +950,7 @@
 
 	/* If this frame is going out to an IBSS network, find the station,
 	 * or create a new station table entry */
-	case IEEE80211_IF_TYPE_IBSS:
+	case NL80211_IFTYPE_ADHOC:
 		sta_id = iwl_find_station(priv, hdr->addr1);
 		if (sta_id != IWL_INVALID_STATION)
 			return sta_id;
@@ -968,6 +968,11 @@
 		iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
 		return priv->hw_params.bcast_sta_id;
 
+	/* If we are in monitor mode, use BCAST. This is required for
+	 * packet injection. */
+	case NL80211_IFTYPE_MONITOR:
+		return priv->hw_params.bcast_sta_id;
+
 	default:
 		IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode);
 		return priv->hw_params.bcast_sta_id;
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 78b1a7a..907a53e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -63,7 +63,7 @@
  * Does NOT advance any TFD circular buffer read/write indexes
  * Does NOT free the TFD itself (which is within circular buffer)
  */
-int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+static int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
 {
 	struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
 	struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
@@ -115,10 +115,8 @@
 	}
 	return 0;
 }
-EXPORT_SYMBOL(iwl_hw_txq_free_tfd);
 
-
-int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
+static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
 				 dma_addr_t addr, u16 len)
 {
 	int index, is_odd;
@@ -126,7 +124,7 @@
 	u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
 
 	/* Each TFD can point to a maximum 20 Tx buffers */
-	if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
+	if (num_tbs >= MAX_NUM_OF_TBS) {
 		IWL_ERROR("Error can not send more than %d chunks\n",
 			  MAX_NUM_OF_TBS);
 		return -EINVAL;
@@ -151,7 +149,6 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(iwl_hw_txq_attach_buf_to_tfd);
 
 /**
  * iwl_txq_update_write_ptr - Send new write index to hardware
@@ -478,7 +475,6 @@
 }
 EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
 
-
 /**
  * iwl_txq_ctx_reset - Reset TX queue context
  * Destroys all DMA structures and initialise them again
@@ -545,6 +541,7 @@
  error_kw:
 	return ret;
 }
+
 /**
  * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
  */
@@ -796,11 +793,6 @@
 		goto drop_unlock;
 	}
 
-	if (!priv->vif) {
-		IWL_DEBUG_DROP("Dropping - !priv->vif\n");
-		goto drop_unlock;
-	}
-
 	if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
 	     IWL_INVALID_RATE) {
 		IWL_ERROR("ERROR: No TX rate available.\n");
@@ -822,16 +814,18 @@
 
 	/* drop all data frame if we are not associated */
 	if (ieee80211_is_data(fc) &&
-	   (!iwl_is_associated(priv) ||
-	    ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
-	    !priv->assoc_station_added)) {
+	    (priv->iw_mode != NL80211_IFTYPE_MONITOR ||
+	    !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
+	    (!iwl_is_associated(priv) ||
+	     ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
+	     !priv->assoc_station_added)) {
 		IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
 		goto drop_unlock;
 	}
 
 	spin_unlock_irqrestore(&priv->lock, flags);
 
-	hdr_len = ieee80211_get_hdrlen(le16_to_cpu(fc));
+	hdr_len = ieee80211_hdrlen(fc);
 
 	/* Find (or create) index into station table for destination station */
 	sta_id = iwl_get_sta_id(priv, hdr);
@@ -849,7 +843,7 @@
 	txq_id = swq_id;
 	if (ieee80211_is_data_qos(fc)) {
 		qc = ieee80211_get_qos_ctl(hdr);
-		tid = qc[0] & 0xf;
+		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
 		seq_number = priv->stations[sta_id].tid[tid].seq_number;
 		seq_number &= IEEE80211_SCTL_SEQ;
 		hdr->seq_ctrl = hdr->seq_ctrl &
@@ -1064,7 +1058,7 @@
 	out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
 			INDEX_TO_SEQ(q->write_ptr));
 	if (out_cmd->meta.flags & CMD_SIZE_HUGE)
-		out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
+		out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
 	len = (idx == TFD_CMD_SLOTS) ?
 			IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
 	phys_addr = pci_map_single(priv->pci_dev, out_cmd, len,
@@ -1072,12 +1066,26 @@
 	phys_addr += offsetof(struct iwl_cmd, hdr);
 	iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
 
-	IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
-		     "%d bytes at %d[%d]:%d\n",
-		     get_cmd_string(out_cmd->hdr.cmd),
-		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
-		     fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
-
+#ifdef CONFIG_IWLWIFI_DEBUG
+	switch (out_cmd->hdr.cmd) {
+	case REPLY_TX_LINK_QUALITY_CMD:
+	case SENSITIVITY_CMD:
+		IWL_DEBUG_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
+				"%d bytes at %d[%d]:%d\n",
+				get_cmd_string(out_cmd->hdr.cmd),
+				out_cmd->hdr.cmd,
+				le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+				q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
+				break;
+	default:
+		IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
+				"%d bytes at %d[%d]:%d\n",
+				get_cmd_string(out_cmd->hdr.cmd),
+				out_cmd->hdr.cmd,
+				le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+				q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
+	}
+#endif
 	txq->need_update = 1;
 
 	/* Set up entry in queue's byte count circular buffer */
@@ -1185,17 +1193,16 @@
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
 	int txq_id = SEQ_TO_QUEUE(sequence);
 	int index = SEQ_TO_INDEX(sequence);
-	int huge = sequence & SEQ_HUGE_FRAME;
 	int cmd_index;
+	bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
 	struct iwl_cmd *cmd;
 
 	/* If a Tx command is being handled and it isn't in the actual
 	 * command queue then there a command routing bug has been introduced
 	 * in the queue management code. */
-	if (txq_id != IWL_CMD_QUEUE_NUM)
-		IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
-			  txq_id, pkt->hdr.cmd);
-	BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
+	if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
+		 "wrong command queue %d, command id 0x%X\n", txq_id, pkt->hdr.cmd))
+		return;
 
 	cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
 	cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index b775d5b..d15a2c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1160,7 +1160,7 @@
 	/* If we have set the ASSOC_MSK and we are in BSS mode then
 	 * add the IWL_AP_ID to the station rate table */
 	if (iwl3945_is_associated(priv) &&
-	    (priv->iw_mode == IEEE80211_IF_TYPE_STA))
+	    (priv->iw_mode == NL80211_IFTYPE_STATION))
 		if (iwl3945_add_station(priv, priv->active_rxon.bssid_addr, 1, 0)
 		    == IWL_INVALID_STATION) {
 			IWL_ERROR("Error adding AP address for transmit.\n");
@@ -1447,8 +1447,8 @@
 {
 
 	if (!iwl3945_is_associated(priv) || !priv->ibss_beacon ||
-	    ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
-	     (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
+	    ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
+	     (priv->iw_mode != NL80211_IFTYPE_AP)))
 		return 0;
 
 	if (priv->ibss_beacon->len > left)
@@ -1746,14 +1746,14 @@
 	spin_lock_irqsave(&priv->lock, flags);
 	priv->qos_data.qos_active = 0;
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
+	if (priv->iw_mode == NL80211_IFTYPE_ADHOC) {
 		if (priv->qos_data.qos_enable)
 			priv->qos_data.qos_active = 1;
 		if (!(priv->active_rate & 0xfff0)) {
 			cw_min = 31;
 			is_legacy = 1;
 		}
-	} else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
+	} else if (priv->iw_mode == NL80211_IFTYPE_AP) {
 		if (priv->qos_data.qos_enable)
 			priv->qos_data.qos_active = 1;
 	} else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
@@ -2120,7 +2120,7 @@
 	beacon_int = priv->beacon_int;
 	spin_unlock_irqrestore(&priv->lock, flags);
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
+	if (priv->iw_mode == NL80211_IFTYPE_STATION) {
 		if (beacon_int == 0) {
 			priv->rxon_timing.beacon_interval = cpu_to_le16(100);
 			priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
@@ -2156,7 +2156,7 @@
 
 static int iwl3945_scan_initiate(struct iwl3945_priv *priv)
 {
-	if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
+	if (priv->iw_mode == NL80211_IFTYPE_AP) {
 		IWL_ERROR("APs don't scan.\n");
 		return 0;
 	}
@@ -2218,7 +2218,7 @@
 		else
 			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 
-		if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
+		if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
 			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 
 		priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
@@ -2237,23 +2237,23 @@
 	memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
 
 	switch (priv->iw_mode) {
-	case IEEE80211_IF_TYPE_AP:
+	case NL80211_IFTYPE_AP:
 		priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
 		break;
 
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
 		priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
 		break;
 
-	case IEEE80211_IF_TYPE_IBSS:
+	case NL80211_IFTYPE_ADHOC:
 		priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
 		priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
 		priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
 						  RXON_FILTER_ACCEPT_GRP_MSK;
 		break;
 
-	case IEEE80211_IF_TYPE_MNTR:
+	case NL80211_IFTYPE_MONITOR:
 		priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
 		priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
@@ -2282,7 +2282,7 @@
 	 * in some case A channels are all non IBSS
 	 * in this case force B/G channel
 	 */
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
+	if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
 	    !(is_channel_ibss(ch_info)))
 		ch_info = &priv->channel_info[0];
 
@@ -2302,7 +2302,7 @@
 
 static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode)
 {
-	if (mode == IEEE80211_IF_TYPE_IBSS) {
+	if (mode == NL80211_IFTYPE_ADHOC) {
 		const struct iwl3945_channel_info *ch_info;
 
 		ch_info = iwl3945_get_channel_info(priv,
@@ -2469,11 +2469,11 @@
 
 	/* If we are a client station in a BSS network, use the special
 	 * AP station entry (that's the only station we communicate with) */
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		return IWL_AP_ID;
 
 	/* If we are an AP, then find the station, or use BCAST */
-	case IEEE80211_IF_TYPE_AP:
+	case NL80211_IFTYPE_AP:
 		sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
 		if (sta_id != IWL_INVALID_STATION)
 			return sta_id;
@@ -2481,7 +2481,7 @@
 
 	/* If this frame is going out to an IBSS network, find the station,
 	 * or create a new station table entry */
-	case IEEE80211_IF_TYPE_IBSS: {
+	case NL80211_IFTYPE_ADHOC: {
 		DECLARE_MAC_BUF(mac);
 
 		/* Create new station table entry */
@@ -2502,7 +2502,7 @@
 	}
 	/* If we are in monitor mode, use BCAST. This is required for
 	 * packet injection. */
-	case IEEE80211_IF_TYPE_MNTR:
+	case NL80211_IFTYPE_MONITOR:
 		return priv->hw_setting.bcast_sta_id;
 
 	default:
@@ -2565,16 +2565,16 @@
 
 	/* drop all data frame if we are not associated */
 	if (ieee80211_is_data(fc) &&
-	    (priv->iw_mode != IEEE80211_IF_TYPE_MNTR) && /* packet injection */
+	    (priv->iw_mode != NL80211_IFTYPE_MONITOR) && /* packet injection */
 	    (!iwl3945_is_associated(priv) ||
-	     ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id))) {
+	     ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
 		IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n");
 		goto drop_unlock;
 	}
 
 	spin_unlock_irqrestore(&priv->lock, flags);
 
-	hdr_len = ieee80211_get_hdrlen(le16_to_cpu(fc));
+	hdr_len = ieee80211_hdrlen(fc);
 
 	/* Find (or create) index into station table for destination station */
 	sta_id = iwl3945_get_sta_id(priv, hdr);
@@ -2590,7 +2590,7 @@
 
 	if (ieee80211_is_data_qos(fc)) {
 		qc = ieee80211_get_qos_ctl(hdr);
-		tid = qc[0] & 0xf;
+		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
 		seq_number = priv->stations[sta_id].tid[tid].seq_number &
 				IEEE80211_SCTL_SEQ;
 		hdr->seq_ctrl = cpu_to_le16(seq_number) |
@@ -2709,7 +2709,7 @@
 			   sizeof(out_cmd->cmd.tx));
 
 	iwl3945_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
-			   ieee80211_get_hdrlen(le16_to_cpu(fc)));
+			   ieee80211_hdrlen(fc));
 
 	/* Tell device the write index *just past* this latest filled TFD */
 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -2806,7 +2806,7 @@
 	if (disable_radio) {
 		iwl3945_scan_cancel(priv);
 		/* FIXME: This is a workaround for AP */
-		if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
+		if (priv->iw_mode != NL80211_IFTYPE_AP) {
 			spin_lock_irqsave(&priv->lock, flags);
 			iwl3945_write32(priv, CSR_UCODE_DRV_GP1_SET,
 				    CSR_UCODE_SW_BIT_RFKILL);
@@ -3161,7 +3161,7 @@
 		le32_to_cpu(beacon->low_tsf), rate);
 #endif
 
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
+	if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
 	    (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
 		queue_work(priv->workqueue, &priv->beacon_update);
 }
@@ -4782,8 +4782,11 @@
 /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
  * sending probe req.  This should be set long enough to hear probe responses
  * from more than one AP.  */
-#define IWL_ACTIVE_DWELL_TIME_24    (20)	/* all times in msec */
-#define IWL_ACTIVE_DWELL_TIME_52    (10)
+#define IWL_ACTIVE_DWELL_TIME_24    (30)	/* all times in msec */
+#define IWL_ACTIVE_DWELL_TIME_52    (20)
+
+#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
 
 /* For faster active scanning, scan will move to the next channel if fewer than
  * PLCP_QUIET_THRESH packets are heard on this channel within
@@ -4792,7 +4795,7 @@
  * no other traffic).
  * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
 #define IWL_PLCP_QUIET_THRESH       __constant_cpu_to_le16(1)	/* packets */
-#define IWL_ACTIVE_QUIET_TIME       __constant_cpu_to_le16(5)	/* msec */
+#define IWL_ACTIVE_QUIET_TIME       __constant_cpu_to_le16(10)	/* msec */
 
 /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
  * Must be set longer than active dwell time.
@@ -4802,19 +4805,23 @@
 #define IWL_PASSIVE_DWELL_BASE      (100)
 #define IWL_CHANNEL_TUNE_TIME       5
 
+#define IWL_SCAN_PROBE_MASK(n)	 cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+
 static inline u16 iwl3945_get_active_dwell_time(struct iwl3945_priv *priv,
-						enum ieee80211_band band)
+						enum ieee80211_band band,
+						u8 n_probes)
 {
 	if (band == IEEE80211_BAND_5GHZ)
-		return IWL_ACTIVE_DWELL_TIME_52;
+		return IWL_ACTIVE_DWELL_TIME_52 +
+			IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
 	else
-		return IWL_ACTIVE_DWELL_TIME_24;
+		return IWL_ACTIVE_DWELL_TIME_24 +
+			IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
 }
 
 static u16 iwl3945_get_passive_dwell_time(struct iwl3945_priv *priv,
 					  enum ieee80211_band band)
 {
-	u16 active = iwl3945_get_active_dwell_time(priv, band);
 	u16 passive = (band == IEEE80211_BAND_2GHZ) ?
 	    IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
 	    IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
@@ -4829,15 +4836,12 @@
 		passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
 	}
 
-	if (passive <= active)
-		passive = active + 1;
-
 	return passive;
 }
 
 static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
 					 enum ieee80211_band band,
-				     u8 is_active, u8 direct_mask,
+				     u8 is_active, u8 n_probes,
 				     struct iwl3945_scan_channel *scan_ch)
 {
 	const struct ieee80211_channel *channels = NULL;
@@ -4853,9 +4857,12 @@
 
 	channels = sband->channels;
 
-	active_dwell = iwl3945_get_active_dwell_time(priv, band);
+	active_dwell = iwl3945_get_active_dwell_time(priv, band, n_probes);
 	passive_dwell = iwl3945_get_passive_dwell_time(priv, band);
 
+	if (passive_dwell <= active_dwell)
+		passive_dwell = active_dwell + 1;
+
 	for (i = 0, added = 0; i < sband->n_channels; i++) {
 		if (channels[i].flags & IEEE80211_CHAN_DISABLED)
 			continue;
@@ -4875,8 +4882,8 @@
 		else
 			scan_ch->type = 1;	/* active */
 
-		if (scan_ch->type & 1)
-			scan_ch->type |= (direct_mask << 1);
+		if ((scan_ch->type & 1) && n_probes)
+			scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
 
 		scan_ch->active_dwell = cpu_to_le16(active_dwell);
 		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
@@ -6052,7 +6059,7 @@
 	if (!iwl3945_is_ready(priv))
 		IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n");
 	else
-		if (iwl3945_set_mode(priv, IEEE80211_IF_TYPE_MNTR) != 0)
+		if (iwl3945_set_mode(priv, NL80211_IFTYPE_MONITOR) != 0)
 			IWL_ERROR("iwl3945_set_mode() failed\n");
 
 	mutex_unlock(&priv->mutex);
@@ -6093,7 +6100,7 @@
 	int rc = 0;
 	struct iwl3945_scan_cmd *scan;
 	struct ieee80211_conf *conf = NULL;
-	u8 direct_mask;
+	u8 n_probes = 2;
 	enum ieee80211_band band;
 
 	conf = ieee80211_get_hw_conf(priv->hw);
@@ -6201,7 +6208,7 @@
 		scan->direct_scan[0].len = priv->direct_ssid_len;
 		memcpy(scan->direct_scan[0].ssid,
 		       priv->direct_ssid, priv->direct_ssid_len);
-		direct_mask = 1;
+		n_probes++;
 	} else if (!iwl3945_is_associated(priv) && priv->essid_len) {
 		IWL_DEBUG_SCAN
 		  ("Kicking off one direct scan for '%s' when not associated\n",
@@ -6209,11 +6216,9 @@
 		scan->direct_scan[0].id = WLAN_EID_SSID;
 		scan->direct_scan[0].len = priv->essid_len;
 		memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
-		direct_mask = 1;
-	} else {
+		n_probes++;
+	} else
 		IWL_DEBUG_SCAN("Kicking off one indirect scan.\n");
-		direct_mask = 0;
-	}
 
 	/* We don't build a direct scan probe request; the uCode will do
 	 * that based on the direct_mask added to each channel entry */
@@ -6243,21 +6248,13 @@
 	/* select Rx antennas */
 	scan->flags |= iwl3945_get_antenna_flags(priv);
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
+	if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
 		scan->filter_flags = RXON_FILTER_PROMISC_MSK;
 
-	if (direct_mask)
-		scan->channel_count =
-			iwl3945_get_channels_for_scan(
-				priv, band, 1, /* active */
-				direct_mask,
-				(void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
-	else
-		scan->channel_count =
-			iwl3945_get_channels_for_scan(
-				priv, band, 0, /* passive */
-				direct_mask,
-				(void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
+	scan->channel_count =
+		iwl3945_get_channels_for_scan(priv, band, 1, /* active */
+					      n_probes,
+			(void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
 
 	cmd.len += le16_to_cpu(scan->tx_cmd.len) +
 	    scan->channel_count * sizeof(struct iwl3945_scan_channel);
@@ -6320,16 +6317,13 @@
 
 #define IWL_DELAY_NEXT_SCAN (HZ*2)
 
-static void iwl3945_bg_post_associate(struct work_struct *data)
+static void iwl3945_post_associate(struct iwl3945_priv *priv)
 {
-	struct iwl3945_priv *priv = container_of(data, struct iwl3945_priv,
-					     post_associate.work);
-
 	int rc = 0;
 	struct ieee80211_conf *conf = NULL;
 	DECLARE_MAC_BUF(mac);
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
+	if (priv->iw_mode == NL80211_IFTYPE_AP) {
 		IWL_ERROR("%s Should not be called in AP mode\n", __func__);
 		return;
 	}
@@ -6342,12 +6336,9 @@
 	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
 		return;
 
-	mutex_lock(&priv->mutex);
-
-	if (!priv->vif || !priv->is_open) {
-		mutex_unlock(&priv->mutex);
+	if (!priv->vif || !priv->is_open)
 		return;
-	}
+
 	iwl3945_scan_cancel_timeout(priv, 200);
 
 	conf = ieee80211_get_hw_conf(priv->hw);
@@ -6381,7 +6372,7 @@
 		else
 			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 
-		if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
+		if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
 			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 
 	}
@@ -6389,11 +6380,11 @@
 	iwl3945_commit_rxon(priv);
 
 	switch (priv->iw_mode) {
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
 		break;
 
-	case IEEE80211_IF_TYPE_IBSS:
+	case NL80211_IFTYPE_ADHOC:
 
 		/* clear out the station table */
 		iwl3945_clear_stations_table(priv);
@@ -6419,7 +6410,6 @@
 
 	/* we have just associated, don't start scan too early */
 	priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
-	mutex_unlock(&priv->mutex);
 }
 
 static void iwl3945_bg_abort_scan(struct work_struct *work)
@@ -6567,7 +6557,6 @@
 		 */
 		mutex_lock(&priv->mutex);
 		iwl3945_scan_cancel_timeout(priv, 100);
-		cancel_delayed_work(&priv->post_associate);
 		mutex_unlock(&priv->mutex);
 	}
 
@@ -6650,8 +6639,6 @@
 	mutex_lock(&priv->mutex);
 	IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
 
-	priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
-
 	if (!iwl3945_is_ready(priv)) {
 		IWL_DEBUG_MAC80211("leave - not ready\n");
 		ret = -EIO;
@@ -6767,7 +6754,7 @@
 				priv->staging_rxon.flags &=
 					~RXON_FLG_SHORT_SLOT_MSK;
 
-			if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
+			if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
 				priv->staging_rxon.flags &=
 					~RXON_FLG_SHORT_SLOT_MSK;
 		}
@@ -6804,7 +6791,7 @@
 	}
 
 	/* handle this temporarily here */
-	if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS &&
+	if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
 	    conf->changed & IEEE80211_IFCC_BEACON) {
 		struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
 		if (!beacon)
@@ -6816,7 +6803,7 @@
 
 	/* XXX: this MUST use conf->mac_addr */
 
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
+	if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
 	    (!conf->ssid_len)) {
 		IWL_DEBUG_MAC80211
 		    ("Leaving in AP mode because HostAPD is not ready.\n");
@@ -6839,7 +6826,7 @@
 	    !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
  */
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
+	if (priv->iw_mode == NL80211_IFTYPE_AP) {
 		if (!conf->bssid) {
 			conf->bssid = priv->mac_addr;
 			memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
@@ -6874,11 +6861,11 @@
 		 * to verify) - jpk */
 		memcpy(priv->bssid, conf->bssid, ETH_ALEN);
 
-		if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
+		if (priv->iw_mode == NL80211_IFTYPE_AP)
 			iwl3945_config_ap(priv);
 		else {
 			rc = iwl3945_commit_rxon(priv);
-			if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
+			if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
 				iwl3945_add_station(priv,
 					priv->active_rxon.bssid_addr, 1, 0);
 		}
@@ -6914,7 +6901,7 @@
 
 	if (changed_flags & (*total_flags) & FIF_OTHER_BSS) {
 		IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n",
-				   IEEE80211_IF_TYPE_MNTR,
+				   NL80211_IFTYPE_MONITOR,
 				   changed_flags, *total_flags);
 		/* queue work 'cuz mac80211 is holding a lock which
 		 * prevents us from issuing (synchronous) f/w cmds */
@@ -6935,7 +6922,6 @@
 
 	if (iwl3945_is_ready_rf(priv)) {
 		iwl3945_scan_cancel_timeout(priv, 100);
-		cancel_delayed_work(&priv->post_associate);
 		priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 		iwl3945_commit_rxon(priv);
 	}
@@ -6950,6 +6936,63 @@
 	IWL_DEBUG_MAC80211("leave\n");
 }
 
+#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
+
+static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif,
+				     struct ieee80211_bss_conf *bss_conf,
+				     u32 changes)
+{
+	struct iwl3945_priv *priv = hw->priv;
+
+	IWL_DEBUG_MAC80211("changes = 0x%X\n", changes);
+
+	if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+		IWL_DEBUG_MAC80211("ERP_PREAMBLE %d\n",
+				   bss_conf->use_short_preamble);
+		if (bss_conf->use_short_preamble)
+			priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+		else
+			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+	}
+
+	if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+		IWL_DEBUG_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
+		if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
+			priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
+		else
+			priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+	}
+
+	if (changes & BSS_CHANGED_ASSOC) {
+		IWL_DEBUG_MAC80211("ASSOC %d\n", bss_conf->assoc);
+		/* This should never happen as this function should
+		 * never be called from interrupt context. */
+		if (WARN_ON_ONCE(in_interrupt()))
+			return;
+		if (bss_conf->assoc) {
+			priv->assoc_id = bss_conf->aid;
+			priv->beacon_int = bss_conf->beacon_int;
+			priv->timestamp0 = bss_conf->timestamp & 0xFFFFFFFF;
+			priv->timestamp1 = (bss_conf->timestamp >> 32) &
+					     0xFFFFFFFF;
+			priv->assoc_capability = bss_conf->assoc_capability;
+			priv->next_scan_jiffies = jiffies +
+					IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
+			mutex_lock(&priv->mutex);
+			iwl3945_post_associate(priv);
+			mutex_unlock(&priv->mutex);
+		} else {
+			priv->assoc_id = 0;
+			IWL_DEBUG_MAC80211("DISASSOC %d\n", bss_conf->assoc);
+		}
+	} else if (changes && iwl3945_is_associated(priv) && priv->assoc_id) {
+			IWL_DEBUG_MAC80211("Associated Changes %d\n", changes);
+			iwl3945_send_rxon_assoc(priv);
+	}
+
+}
+
 static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
 {
 	int rc = 0;
@@ -6967,7 +7010,7 @@
 		goto out_unlock;
 	}
 
-	if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {	/* APs don't scan */
+	if (priv->iw_mode == NL80211_IFTYPE_AP) {	/* APs don't scan */
 		rc = -EIO;
 		IWL_ERROR("ERROR: APs don't scan\n");
 		goto out_unlock;
@@ -7109,7 +7152,7 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 	mutex_lock(&priv->mutex);
-	if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
+	if (priv->iw_mode == NL80211_IFTYPE_AP)
 		iwl3945_activate_qos(priv, 1);
 	else if (priv->assoc_id && iwl3945_is_associated(priv))
 		iwl3945_activate_qos(priv, 0);
@@ -7182,8 +7225,6 @@
 
 	iwl3945_reset_qos(priv);
 
-	cancel_delayed_work(&priv->post_associate);
-
 	spin_lock_irqsave(&priv->lock, flags);
 	priv->assoc_id = 0;
 	priv->assoc_capability = 0;
@@ -7198,7 +7239,7 @@
 	priv->beacon_int = priv->hw->conf.beacon_int;
 	priv->timestamp1 = 0;
 	priv->timestamp0 = 0;
-	if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
+	if ((priv->iw_mode == NL80211_IFTYPE_STATION))
 		priv->beacon_int = 0;
 
 	spin_unlock_irqrestore(&priv->lock, flags);
@@ -7212,14 +7253,14 @@
 	/* we are restarting association process
 	 * clear RXON_FILTER_ASSOC_MSK bit
 	*/
-	if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
+	if (priv->iw_mode != NL80211_IFTYPE_AP) {
 		iwl3945_scan_cancel_timeout(priv, 100);
 		priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 		iwl3945_commit_rxon(priv);
 	}
 
 	/* Per mac80211.h: This is only used in IBSS mode... */
-	if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
+	if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
 
 		IWL_DEBUG_MAC80211("leave - not in IBSS\n");
 		mutex_unlock(&priv->mutex);
@@ -7248,7 +7289,7 @@
 		return -EIO;
 	}
 
-	if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
+	if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
 		IWL_DEBUG_MAC80211("leave - not IBSS\n");
 		mutex_unlock(&priv->mutex);
 		return -EIO;
@@ -7268,7 +7309,7 @@
 
 	iwl3945_reset_qos(priv);
 
-	queue_work(priv->workqueue, &priv->post_associate.work);
+	iwl3945_post_associate(priv);
 
 	mutex_unlock(&priv->mutex);
 
@@ -7329,15 +7370,6 @@
 
 static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
 
-static ssize_t show_rs_window(struct device *d,
-			      struct device_attribute *attr,
-			      char *buf)
-{
-	struct iwl3945_priv *priv = d->driver_data;
-	return iwl3945_fill_rs_info(priv->hw, buf, IWL_AP_ID);
-}
-static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
-
 static ssize_t show_tx_power(struct device *d,
 			     struct device_attribute *attr, char *buf)
 {
@@ -7767,7 +7799,6 @@
 	INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill);
 	INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
 	INIT_WORK(&priv->set_monitor, iwl3945_bg_set_monitor);
-	INIT_DELAYED_WORK(&priv->post_associate, iwl3945_bg_post_associate);
 	INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
 	INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
 	INIT_DELAYED_WORK(&priv->scan_check, iwl3945_bg_scan_check);
@@ -7785,7 +7816,6 @@
 	cancel_delayed_work_sync(&priv->init_alive_start);
 	cancel_delayed_work(&priv->scan_check);
 	cancel_delayed_work(&priv->alive_start);
-	cancel_delayed_work(&priv->post_associate);
 	cancel_work_sync(&priv->beacon_update);
 }
 
@@ -7801,7 +7831,6 @@
 #endif
 	&dev_attr_power_level.attr,
 	&dev_attr_retry_rate.attr,
-	&dev_attr_rs_window.attr,
 	&dev_attr_statistics.attr,
 	&dev_attr_status.attr,
 	&dev_attr_temperature.attr,
@@ -7830,6 +7859,7 @@
 	.conf_tx = iwl3945_mac_conf_tx,
 	.get_tsf = iwl3945_mac_get_tsf,
 	.reset_tsf = iwl3945_mac_reset_tsf,
+	.bss_info_changed = iwl3945_bss_info_changed,
 	.hw_scan = iwl3945_mac_hw_scan
 };
 
@@ -7868,6 +7898,7 @@
 	SET_IEEE80211_DEV(hw, &pdev->dev);
 
 	hw->rate_control_algorithm = "iwl-3945-rs";
+	hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
 
 	IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
 	priv = hw->priv;
@@ -7890,6 +7921,11 @@
 	hw->flags = IEEE80211_HW_SIGNAL_DBM |
 		    IEEE80211_HW_NOISE_DBM;
 
+	hw->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_AP) |
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_ADHOC);
+
 	/* 4 EDCA QOS priorities */
 	hw->queues = 4;
 
@@ -7951,7 +7987,7 @@
 		IWL_DEBUG_INFO("Radio disabled.\n");
 	}
 
-	priv->iw_mode = IEEE80211_IF_TYPE_STA;
+	priv->iw_mode = NL80211_IFTYPE_STATION;
 
 	printk(KERN_INFO DRV_NAME
 		": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name);
@@ -8331,6 +8367,8 @@
 	iwl3945_rate_control_unregister();
 }
 
+MODULE_FIRMWARE("iwlwifi-3945" IWL3945_UCODE_API ".ucode");
+
 module_param_named(antenna, iwl3945_param_antenna, int, 0444);
 MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
 module_param_named(disable, iwl3945_param_disable, int, 0444);
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index a267d6e..92be604 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -8,6 +8,7 @@
 #include "scan.h"
 #include "cmd.h"
 
+static int lbs_adhoc_post(struct lbs_private *priv, struct cmd_header *resp);
 
 static const u8 bssid_any[ETH_ALEN]  __attribute__ ((aligned (2))) =
 	{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -20,12 +21,88 @@
 #define CAPINFO_MASK	(~(0xda00))
 
 
+/**
+ *  @brief This function finds common rates between rates and card rates.
+ *
+ * It will fill common rates in rates as output if found.
+ *
+ * NOTE: Setting the MSB of the basic rates need to be taken
+ *   care, either before or after calling this function
+ *
+ *  @param priv     A pointer to struct lbs_private structure
+ *  @param rates       the buffer which keeps input and output
+ *  @param rates_size  the size of rate1 buffer; new size of buffer on return
+ *
+ *  @return            0 on success, or -1 on error
+ */
+static int get_common_rates(struct lbs_private *priv,
+	u8 *rates,
+	u16 *rates_size)
+{
+	u8 *card_rates = lbs_bg_rates;
+	size_t num_card_rates = sizeof(lbs_bg_rates);
+	int ret = 0, i, j;
+	u8 tmp[30];
+	size_t tmp_size = 0;
+
+	/* For each rate in card_rates that exists in rate1, copy to tmp */
+	for (i = 0; card_rates[i] && (i < num_card_rates); i++) {
+		for (j = 0; rates[j] && (j < *rates_size); j++) {
+			if (rates[j] == card_rates[i])
+				tmp[tmp_size++] = card_rates[i];
+		}
+	}
+
+	lbs_deb_hex(LBS_DEB_JOIN, "AP rates    ", rates, *rates_size);
+	lbs_deb_hex(LBS_DEB_JOIN, "card rates  ", card_rates, num_card_rates);
+	lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
+	lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
+
+	if (!priv->enablehwauto) {
+		for (i = 0; i < tmp_size; i++) {
+			if (tmp[i] == priv->cur_rate)
+				goto done;
+		}
+		lbs_pr_alert("Previously set fixed data rate %#x isn't "
+		       "compatible with the network.\n", priv->cur_rate);
+		ret = -1;
+		goto done;
+	}
+	ret = 0;
+
+done:
+	memset(rates, 0, *rates_size);
+	*rates_size = min_t(int, tmp_size, *rates_size);
+	memcpy(rates, tmp, *rates_size);
+	return ret;
+}
+
+
+/**
+ *  @brief Sets the MSB on basic rates as the firmware requires
+ *
+ * Scan through an array and set the MSB for basic data rates.
+ *
+ *  @param rates     buffer of data rates
+ *  @param len       size of buffer
+ */
+static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (rates[i] == 0x02 || rates[i] == 0x04 ||
+		    rates[i] == 0x0b || rates[i] == 0x16)
+			rates[i] |= 0x80;
+	}
+}
+
 
 /**
  *  @brief Associate to a specific BSS discovered in a scan
  *
  *  @param priv      A pointer to struct lbs_private structure
- *  @param pbssdesc  Pointer to the BSS descriptor to associate with.
+ *  @param assoc_req The association request describing the BSS to associate with
  *
  *  @return          0-success, otherwise fail
  */
@@ -33,29 +110,29 @@
 	struct assoc_request *assoc_req)
 {
 	int ret;
+	u8 preamble = RADIO_PREAMBLE_LONG;
 
 	lbs_deb_enter(LBS_DEB_ASSOC);
 
 	ret = lbs_prepare_and_send_command(priv, CMD_802_11_AUTHENTICATE,
 				    0, CMD_OPTION_WAITFORRSP,
 				    0, assoc_req->bss.bssid);
-
 	if (ret)
-		goto done;
+		goto out;
 
-	/* set preamble to firmware */
+	/* Use short preamble only when both the BSS and firmware support it */
 	if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
 	    (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
-		priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
-	else
-		priv->preamble = CMD_TYPE_LONG_PREAMBLE;
+		preamble = RADIO_PREAMBLE_SHORT;
 
-	lbs_set_radio_control(priv);
+	ret = lbs_set_radio(priv, preamble, 1);
+	if (ret)
+		goto out;
 
 	ret = lbs_prepare_and_send_command(priv, CMD_802_11_ASSOCIATE,
 				    0, CMD_OPTION_WAITFORRSP, 0, assoc_req);
 
-done:
+out:
 	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
 	return ret;
 }
@@ -64,17 +141,22 @@
  *  @brief Join an adhoc network found in a previous scan
  *
  *  @param priv         A pointer to struct lbs_private structure
- *  @param pbssdesc     Pointer to a BSS descriptor found in a previous scan
- *                      to attempt to join
+ *  @param assoc_req    The association request describing the BSS to join
  *
- *  @return             0--success, -1--fail
+ *  @return             0 on success, error on failure
  */
-static int lbs_join_adhoc_network(struct lbs_private *priv,
+static int lbs_adhoc_join(struct lbs_private *priv,
 	struct assoc_request *assoc_req)
 {
+	struct cmd_ds_802_11_ad_hoc_join cmd;
 	struct bss_descriptor *bss = &assoc_req->bss;
+	u8 preamble = RADIO_PREAMBLE_LONG;
+	DECLARE_MAC_BUF(mac);
+	u16 ratesize = 0;
 	int ret = 0;
 
+	lbs_deb_enter(LBS_DEB_ASSOC);
+
 	lbs_deb_join("current SSID '%s', ssid length %u\n",
 		escape_essid(priv->curbssparams.ssid,
 		priv->curbssparams.ssid_len),
@@ -106,29 +188,106 @@
 		goto out;
 	}
 
-	/* Use shortpreamble only when both creator and card supports
-	   short preamble */
-	if (!(bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) ||
-	    !(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
-		lbs_deb_join("AdhocJoin: Long preamble\n");
-		priv->preamble = CMD_TYPE_LONG_PREAMBLE;
-	} else {
+	/* Use short preamble only when both the BSS and firmware support it */
+	if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
+	    (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
 		lbs_deb_join("AdhocJoin: Short preamble\n");
-		priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
+		preamble = RADIO_PREAMBLE_SHORT;
 	}
 
-	lbs_set_radio_control(priv);
+	ret = lbs_set_radio(priv, preamble, 1);
+	if (ret)
+		goto out;
 
 	lbs_deb_join("AdhocJoin: channel = %d\n", assoc_req->channel);
 	lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
 
 	priv->adhoccreate = 0;
+	priv->curbssparams.channel = bss->channel;
 
-	ret = lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_JOIN,
-				    0, CMD_OPTION_WAITFORRSP,
-				    OID_802_11_SSID, assoc_req);
+	/* Build the join command */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+
+	cmd.bss.type = CMD_BSS_TYPE_IBSS;
+	cmd.bss.beaconperiod = cpu_to_le16(bss->beaconperiod);
+
+	memcpy(&cmd.bss.bssid, &bss->bssid, ETH_ALEN);
+	memcpy(&cmd.bss.ssid, &bss->ssid, bss->ssid_len);
+
+	memcpy(&cmd.bss.phyparamset, &bss->phyparamset,
+	       sizeof(union ieeetypes_phyparamset));
+
+	memcpy(&cmd.bss.ssparamset, &bss->ssparamset,
+	       sizeof(union IEEEtypes_ssparamset));
+
+	cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
+	lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
+	       bss->capability, CAPINFO_MASK);
+
+	/* information on BSSID descriptor passed to FW */
+	lbs_deb_join("ADHOC_J_CMD: BSSID = %s, SSID = '%s'\n",
+			print_mac(mac, cmd.bss.bssid), cmd.bss.ssid);
+
+	/* Only v8 and below support setting these */
+	if (priv->fwrelease < 0x09000000) {
+		/* failtimeout */
+		cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
+		/* probedelay */
+		cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
+	}
+
+	/* Copy Data rates from the rates recorded in scan response */
+	memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates));
+	ratesize = min_t(u16, sizeof(cmd.bss.rates), MAX_RATES);
+	memcpy(cmd.bss.rates, bss->rates, ratesize);
+	if (get_common_rates(priv, cmd.bss.rates, &ratesize)) {
+		lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");
+		ret = -1;
+		goto out;
+	}
+
+	/* Copy the ad-hoc creation rates into Current BSS state structure */
+	memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
+	memcpy(&priv->curbssparams.rates, cmd.bss.rates, ratesize);
+
+	/* Set MSB on basic rates as the firmware requires, but _after_
+	 * copying to current bss rates.
+	 */
+	lbs_set_basic_rate_flags(cmd.bss.rates, ratesize);
+
+	cmd.bss.ssparamset.ibssparamset.atimwindow = cpu_to_le16(bss->atimwindow);
+
+	if (assoc_req->secinfo.wep_enabled) {
+		u16 tmp = le16_to_cpu(cmd.bss.capability);
+		tmp |= WLAN_CAPABILITY_PRIVACY;
+		cmd.bss.capability = cpu_to_le16(tmp);
+	}
+
+	if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
+		__le32 local_ps_mode = cpu_to_le32(LBS802_11POWERMODECAM);
+
+		/* wake up first */
+		ret = lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE,
+						   CMD_ACT_SET, 0, 0,
+						   &local_ps_mode);
+		if (ret) {
+			ret = -1;
+			goto out;
+		}
+	}
+
+	if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
+		ret = -1;
+		goto out;
+	}
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
+	if (ret == 0)
+		ret = lbs_adhoc_post(priv, (struct cmd_header *) &cmd);
 
 out:
+	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
 	return ret;
 }
 
@@ -136,39 +295,131 @@
  *  @brief Start an Adhoc Network
  *
  *  @param priv         A pointer to struct lbs_private structure
- *  @param adhocssid    The ssid of the Adhoc Network
- *  @return             0--success, -1--fail
+ *  @param assoc_req    The association request describing the BSS to start
+ *
+ *  @return             0 on success, error on failure
  */
-static int lbs_start_adhoc_network(struct lbs_private *priv,
+static int lbs_adhoc_start(struct lbs_private *priv,
 	struct assoc_request *assoc_req)
 {
+	struct cmd_ds_802_11_ad_hoc_start cmd;
+	u8 preamble = RADIO_PREAMBLE_LONG;
+	size_t ratesize = 0;
+	u16 tmpcap = 0;
 	int ret = 0;
 
-	priv->adhoccreate = 1;
+	lbs_deb_enter(LBS_DEB_ASSOC);
 
 	if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
-		lbs_deb_join("AdhocStart: Short preamble\n");
-		priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
-	} else {
-		lbs_deb_join("AdhocStart: Long preamble\n");
-		priv->preamble = CMD_TYPE_LONG_PREAMBLE;
+		lbs_deb_join("ADHOC_START: Will use short preamble\n");
+		preamble = RADIO_PREAMBLE_SHORT;
 	}
 
-	lbs_set_radio_control(priv);
+	ret = lbs_set_radio(priv, preamble, 1);
+	if (ret)
+		goto out;
 
-	lbs_deb_join("AdhocStart: channel = %d\n", assoc_req->channel);
-	lbs_deb_join("AdhocStart: band = %d\n", assoc_req->band);
+	/* Build the start command */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
 
-	ret = lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_START,
-				    0, CMD_OPTION_WAITFORRSP, 0, assoc_req);
+	memcpy(cmd.ssid, assoc_req->ssid, assoc_req->ssid_len);
 
+	lbs_deb_join("ADHOC_START: SSID '%s', ssid length %u\n",
+		escape_essid(assoc_req->ssid, assoc_req->ssid_len),
+		assoc_req->ssid_len);
+
+	cmd.bsstype = CMD_BSS_TYPE_IBSS;
+
+	if (priv->beacon_period == 0)
+		priv->beacon_period = MRVDRV_BEACON_INTERVAL;
+	cmd.beaconperiod = cpu_to_le16(priv->beacon_period);
+
+	WARN_ON(!assoc_req->channel);
+
+	/* set Physical parameter set */
+	cmd.phyparamset.dsparamset.elementid = MFIE_TYPE_DS_SET;
+	cmd.phyparamset.dsparamset.len = 1;
+	cmd.phyparamset.dsparamset.currentchan = assoc_req->channel;
+
+	/* set IBSS parameter set */
+	cmd.ssparamset.ibssparamset.elementid = MFIE_TYPE_IBSS_SET;
+	cmd.ssparamset.ibssparamset.len = 2;
+	cmd.ssparamset.ibssparamset.atimwindow = 0;
+
+	/* set capability info */
+	tmpcap = WLAN_CAPABILITY_IBSS;
+	if (assoc_req->secinfo.wep_enabled) {
+		lbs_deb_join("ADHOC_START: WEP enabled, setting privacy on\n");
+		tmpcap |= WLAN_CAPABILITY_PRIVACY;
+	} else
+		lbs_deb_join("ADHOC_START: WEP disabled, setting privacy off\n");
+
+	cmd.capability = cpu_to_le16(tmpcap);
+
+	/* Only v8 and below support setting probe delay */
+	if (priv->fwrelease < 0x09000000)
+		cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
+
+	ratesize = min(sizeof(cmd.rates), sizeof(lbs_bg_rates));
+	memcpy(cmd.rates, lbs_bg_rates, ratesize);
+
+	/* Copy the ad-hoc creating rates into Current BSS state structure */
+	memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
+	memcpy(&priv->curbssparams.rates, &cmd.rates, ratesize);
+
+	/* Set MSB on basic rates as the firmware requires, but _after_
+	 * copying to current bss rates.
+	 */
+	lbs_set_basic_rate_flags(cmd.rates, ratesize);
+
+	lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n",
+	       cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]);
+
+	if (lbs_create_dnld_countryinfo_11d(priv)) {
+		lbs_deb_join("ADHOC_START: dnld_countryinfo_11d failed\n");
+		ret = -1;
+		goto out;
+	}
+
+	lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n",
+		     assoc_req->channel, assoc_req->band);
+
+	priv->adhoccreate = 1;
+	priv->mode = IW_MODE_ADHOC;
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd);
+	if (ret == 0)
+		ret = lbs_adhoc_post(priv, (struct cmd_header *) &cmd);
+
+out:
+	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
 	return ret;
 }
 
-int lbs_stop_adhoc_network(struct lbs_private *priv)
+/**
+ *  @brief Stop and Ad-Hoc network and exit Ad-Hoc mode
+ *
+ *  @param priv         A pointer to struct lbs_private structure
+ *  @return             0 on success, or an error
+ */
+int lbs_adhoc_stop(struct lbs_private *priv)
 {
-	return lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_STOP,
-				     0, CMD_OPTION_WAITFORRSP, 0, NULL);
+	struct cmd_ds_802_11_ad_hoc_stop cmd;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_JOIN);
+
+	memset(&cmd, 0, sizeof (cmd));
+	cmd.hdr.size = cpu_to_le16 (sizeof (cmd));
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
+
+	/* Clean up everything even if there was an error */
+	lbs_mac_event_disconnected(priv);
+
+	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
+	return ret;
 }
 
 static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
@@ -480,14 +731,14 @@
 		if (bss != NULL) {
 			lbs_deb_assoc("SSID found, will join\n");
 			memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
-			lbs_join_adhoc_network(priv, assoc_req);
+			lbs_adhoc_join(priv, assoc_req);
 		} else {
 			/* else send START command */
 			lbs_deb_assoc("SSID not found, creating adhoc network\n");
 			memcpy(&assoc_req->bss.ssid, &assoc_req->ssid,
 				IW_ESSID_MAX_SIZE);
 			assoc_req->bss.ssid_len = assoc_req->ssid_len;
-			lbs_start_adhoc_network(priv, assoc_req);
+			lbs_adhoc_start(priv, assoc_req);
 		}
 	}
 
@@ -520,7 +771,7 @@
 		ret = lbs_associate(priv, assoc_req);
 		lbs_deb_assoc("ASSOC: lbs_associate(bssid) returned %d\n", ret);
 	} else if (assoc_req->mode == IW_MODE_ADHOC) {
-		lbs_join_adhoc_network(priv, assoc_req);
+		lbs_adhoc_join(priv, assoc_req);
 	}
 
 out:
@@ -572,11 +823,7 @@
 	}
 
 	priv->mode = assoc_req->mode;
-	ret = lbs_prepare_and_send_command(priv,
-				    CMD_802_11_SNMP_MIB,
-				    0, CMD_OPTION_WAITFORRSP,
-				    OID_802_11_INFRASTRUCTURE_MODE,
-		/* Shoot me now */  (void *) (size_t) assoc_req->mode);
+	ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, assoc_req->mode);
 
 done:
 	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1029,7 +1276,9 @@
 	 */
 	if (priv->mode == IW_MODE_INFRA) {
 		if (should_deauth_infrastructure(priv, assoc_req)) {
-			ret = lbs_send_deauthentication(priv);
+			ret = lbs_cmd_80211_deauthenticate(priv,
+							   priv->curbssparams.bssid,
+							   WLAN_REASON_DEAUTH_LEAVING);
 			if (ret) {
 				lbs_deb_assoc("Deauthentication due to new "
 					"configuration request failed: %d\n",
@@ -1038,7 +1287,7 @@
 		}
 	} else if (priv->mode == IW_MODE_ADHOC) {
 		if (should_stop_adhoc(priv, assoc_req)) {
-			ret = lbs_stop_adhoc_network(priv);
+			ret = lbs_adhoc_stop(priv);
 			if (ret) {
 				lbs_deb_assoc("Teardown of AdHoc network due to "
 					"new configuration request failed: %d\n",
@@ -1214,94 +1463,6 @@
 
 
 /**
- *  @brief This function finds common rates between rate1 and card rates.
- *
- * It will fill common rates in rate1 as output if found.
- *
- * NOTE: Setting the MSB of the basic rates need to be taken
- *   care, either before or after calling this function
- *
- *  @param priv     A pointer to struct lbs_private structure
- *  @param rate1       the buffer which keeps input and output
- *  @param rate1_size  the size of rate1 buffer; new size of buffer on return
- *
- *  @return            0 or -1
- */
-static int get_common_rates(struct lbs_private *priv,
-	u8 *rates,
-	u16 *rates_size)
-{
-	u8 *card_rates = lbs_bg_rates;
-	size_t num_card_rates = sizeof(lbs_bg_rates);
-	int ret = 0, i, j;
-	u8 tmp[30];
-	size_t tmp_size = 0;
-
-	/* For each rate in card_rates that exists in rate1, copy to tmp */
-	for (i = 0; card_rates[i] && (i < num_card_rates); i++) {
-		for (j = 0; rates[j] && (j < *rates_size); j++) {
-			if (rates[j] == card_rates[i])
-				tmp[tmp_size++] = card_rates[i];
-		}
-	}
-
-	lbs_deb_hex(LBS_DEB_JOIN, "AP rates    ", rates, *rates_size);
-	lbs_deb_hex(LBS_DEB_JOIN, "card rates  ", card_rates, num_card_rates);
-	lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
-	lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
-
-	if (!priv->enablehwauto) {
-		for (i = 0; i < tmp_size; i++) {
-			if (tmp[i] == priv->cur_rate)
-				goto done;
-		}
-		lbs_pr_alert("Previously set fixed data rate %#x isn't "
-		       "compatible with the network.\n", priv->cur_rate);
-		ret = -1;
-		goto done;
-	}
-	ret = 0;
-
-done:
-	memset(rates, 0, *rates_size);
-	*rates_size = min_t(int, tmp_size, *rates_size);
-	memcpy(rates, tmp, *rates_size);
-	return ret;
-}
-
-
-/**
- *  @brief Sets the MSB on basic rates as the firmware requires
- *
- * Scan through an array and set the MSB for basic data rates.
- *
- *  @param rates     buffer of data rates
- *  @param len       size of buffer
- */
-static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
-{
-	int i;
-
-	for (i = 0; i < len; i++) {
-		if (rates[i] == 0x02 || rates[i] == 0x04 ||
-		    rates[i] == 0x0b || rates[i] == 0x16)
-			rates[i] |= 0x80;
-	}
-}
-
-/**
- *  @brief Send Deauthentication Request
- *
- *  @param priv      A pointer to struct lbs_private structure
- *  @return          0--success, -1--fail
- */
-int lbs_send_deauthentication(struct lbs_private *priv)
-{
-	return lbs_prepare_and_send_command(priv, CMD_802_11_DEAUTHENTICATE,
-				     0, CMD_OPTION_WAITFORRSP, 0, NULL);
-}
-
-/**
  *  @brief This function prepares command of authenticate.
  *
  *  @param priv      A pointer to struct lbs_private structure
@@ -1353,26 +1514,37 @@
 	return ret;
 }
 
-int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
-				   struct cmd_ds_command *cmd)
+/**
+ *  @brief Deauthenticate from a specific BSS
+ *
+ *  @param priv        A pointer to struct lbs_private structure
+ *  @param bssid       The specific BSS to deauthenticate from
+ *  @param reason      The 802.11 sec. 7.3.1.7 Reason Code for deauthenticating
+ *
+ *  @return            0 on success, error on failure
+ */
+int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, u8 bssid[ETH_ALEN],
+				 u16 reason)
 {
-	struct cmd_ds_802_11_deauthenticate *dauth = &cmd->params.deauth;
+	struct cmd_ds_802_11_deauthenticate cmd;
+	int ret;
 
 	lbs_deb_enter(LBS_DEB_JOIN);
 
-	cmd->command = cpu_to_le16(CMD_802_11_DEAUTHENTICATE);
-	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_deauthenticate) +
-			     S_DS_GEN);
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	memcpy(cmd.macaddr, &bssid[0], ETH_ALEN);
+	cmd.reasoncode = cpu_to_le16(reason);
 
-	/* set AP MAC address */
-	memmove(dauth->macaddr, priv->curbssparams.bssid, ETH_ALEN);
+	ret = lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd);
 
-	/* Reason code 3 = Station is leaving */
-#define REASON_CODE_STA_LEAVING 3
-	dauth->reasoncode = cpu_to_le16(REASON_CODE_STA_LEAVING);
+	/* Clean up everything even if there was an error; can't assume that
+	 * we're still authenticated to the AP after trying to deauth.
+	 */
+	lbs_mac_event_disconnected(priv);
 
 	lbs_deb_leave(LBS_DEB_JOIN);
-	return 0;
+	return ret;
 }
 
 int lbs_cmd_80211_associate(struct lbs_private *priv,
@@ -1489,231 +1661,6 @@
 	return ret;
 }
 
-int lbs_cmd_80211_ad_hoc_start(struct lbs_private *priv,
-				 struct cmd_ds_command *cmd, void *pdata_buf)
-{
-	struct cmd_ds_802_11_ad_hoc_start *adhs = &cmd->params.ads;
-	int ret = 0;
-	int cmdappendsize = 0;
-	struct assoc_request *assoc_req = pdata_buf;
-	u16 tmpcap = 0;
-	size_t ratesize = 0;
-
-	lbs_deb_enter(LBS_DEB_JOIN);
-
-	if (!priv) {
-		ret = -1;
-		goto done;
-	}
-
-	cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_START);
-
-	/*
-	 * Fill in the parameters for 2 data structures:
-	 *   1. cmd_ds_802_11_ad_hoc_start command
-	 *   2. priv->scantable[i]
-	 *
-	 * Driver will fill up SSID, bsstype,IBSS param, Physical Param,
-	 *   probe delay, and cap info.
-	 *
-	 * Firmware will fill up beacon period, DTIM, Basic rates
-	 *   and operational rates.
-	 */
-
-	memset(adhs->ssid, 0, IW_ESSID_MAX_SIZE);
-	memcpy(adhs->ssid, assoc_req->ssid, assoc_req->ssid_len);
-
-	lbs_deb_join("ADHOC_S_CMD: SSID '%s', ssid length %u\n",
-		escape_essid(assoc_req->ssid, assoc_req->ssid_len),
-		assoc_req->ssid_len);
-
-	/* set the BSS type */
-	adhs->bsstype = CMD_BSS_TYPE_IBSS;
-	priv->mode = IW_MODE_ADHOC;
-	if (priv->beacon_period == 0)
-		priv->beacon_period = MRVDRV_BEACON_INTERVAL;
-	adhs->beaconperiod = cpu_to_le16(priv->beacon_period);
-
-	/* set Physical param set */
-#define DS_PARA_IE_ID   3
-#define DS_PARA_IE_LEN  1
-
-	adhs->phyparamset.dsparamset.elementid = DS_PARA_IE_ID;
-	adhs->phyparamset.dsparamset.len = DS_PARA_IE_LEN;
-
-	WARN_ON(!assoc_req->channel);
-
-	lbs_deb_join("ADHOC_S_CMD: Creating ADHOC on channel %d\n",
-		     assoc_req->channel);
-
-	adhs->phyparamset.dsparamset.currentchan = assoc_req->channel;
-
-	/* set IBSS param set */
-#define IBSS_PARA_IE_ID   6
-#define IBSS_PARA_IE_LEN  2
-
-	adhs->ssparamset.ibssparamset.elementid = IBSS_PARA_IE_ID;
-	adhs->ssparamset.ibssparamset.len = IBSS_PARA_IE_LEN;
-	adhs->ssparamset.ibssparamset.atimwindow = 0;
-
-	/* set capability info */
-	tmpcap = WLAN_CAPABILITY_IBSS;
-	if (assoc_req->secinfo.wep_enabled) {
-		lbs_deb_join("ADHOC_S_CMD: WEP enabled, "
-			"setting privacy on\n");
-		tmpcap |= WLAN_CAPABILITY_PRIVACY;
-	} else {
-		lbs_deb_join("ADHOC_S_CMD: WEP disabled, "
-			"setting privacy off\n");
-	}
-	adhs->capability = cpu_to_le16(tmpcap);
-
-	/* probedelay */
-	adhs->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
-
-	memset(adhs->rates, 0, sizeof(adhs->rates));
-	ratesize = min(sizeof(adhs->rates), sizeof(lbs_bg_rates));
-	memcpy(adhs->rates, lbs_bg_rates, ratesize);
-
-	/* Copy the ad-hoc creating rates into Current BSS state structure */
-	memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
-	memcpy(&priv->curbssparams.rates, &adhs->rates, ratesize);
-
-	/* Set MSB on basic rates as the firmware requires, but _after_
-	 * copying to current bss rates.
-	 */
-	lbs_set_basic_rate_flags(adhs->rates, ratesize);
-
-	lbs_deb_join("ADHOC_S_CMD: rates=%02x %02x %02x %02x \n",
-	       adhs->rates[0], adhs->rates[1], adhs->rates[2], adhs->rates[3]);
-
-	lbs_deb_join("ADHOC_S_CMD: AD HOC Start command is ready\n");
-
-	if (lbs_create_dnld_countryinfo_11d(priv)) {
-		lbs_deb_join("ADHOC_S_CMD: dnld_countryinfo_11d failed\n");
-		ret = -1;
-		goto done;
-	}
-
-	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ad_hoc_start) +
-				S_DS_GEN + cmdappendsize);
-
-	ret = 0;
-done:
-	lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
-	return ret;
-}
-
-int lbs_cmd_80211_ad_hoc_stop(struct cmd_ds_command *cmd)
-{
-	cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_STOP);
-	cmd->size = cpu_to_le16(S_DS_GEN);
-
-	return 0;
-}
-
-int lbs_cmd_80211_ad_hoc_join(struct lbs_private *priv,
-				struct cmd_ds_command *cmd, void *pdata_buf)
-{
-	struct cmd_ds_802_11_ad_hoc_join *join_cmd = &cmd->params.adj;
-	struct assoc_request *assoc_req = pdata_buf;
-	struct bss_descriptor *bss = &assoc_req->bss;
-	int cmdappendsize = 0;
-	int ret = 0;
-	u16 ratesize = 0;
-	DECLARE_MAC_BUF(mac);
-
-	lbs_deb_enter(LBS_DEB_JOIN);
-
-	cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_JOIN);
-
-	join_cmd->bss.type = CMD_BSS_TYPE_IBSS;
-	join_cmd->bss.beaconperiod = cpu_to_le16(bss->beaconperiod);
-
-	memcpy(&join_cmd->bss.bssid, &bss->bssid, ETH_ALEN);
-	memcpy(&join_cmd->bss.ssid, &bss->ssid, bss->ssid_len);
-
-	memcpy(&join_cmd->bss.phyparamset, &bss->phyparamset,
-	       sizeof(union ieeetypes_phyparamset));
-
-	memcpy(&join_cmd->bss.ssparamset, &bss->ssparamset,
-	       sizeof(union IEEEtypes_ssparamset));
-
-	join_cmd->bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
-	lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
-	       bss->capability, CAPINFO_MASK);
-
-	/* information on BSSID descriptor passed to FW */
-	lbs_deb_join(
-	       "ADHOC_J_CMD: BSSID = %s, SSID = '%s'\n",
-	       print_mac(mac, join_cmd->bss.bssid),
-	       join_cmd->bss.ssid);
-
-	/* failtimeout */
-	join_cmd->failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
-
-	/* probedelay */
-	join_cmd->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
-
-	priv->curbssparams.channel = bss->channel;
-
-	/* Copy Data rates from the rates recorded in scan response */
-	memset(join_cmd->bss.rates, 0, sizeof(join_cmd->bss.rates));
-	ratesize = min_t(u16, sizeof(join_cmd->bss.rates), MAX_RATES);
-	memcpy(join_cmd->bss.rates, bss->rates, ratesize);
-	if (get_common_rates(priv, join_cmd->bss.rates, &ratesize)) {
-		lbs_deb_join("ADHOC_J_CMD: get_common_rates returns error.\n");
-		ret = -1;
-		goto done;
-	}
-
-	/* Copy the ad-hoc creating rates into Current BSS state structure */
-	memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
-	memcpy(&priv->curbssparams.rates, join_cmd->bss.rates, ratesize);
-
-	/* Set MSB on basic rates as the firmware requires, but _after_
-	 * copying to current bss rates.
-	 */
-	lbs_set_basic_rate_flags(join_cmd->bss.rates, ratesize);
-
-	join_cmd->bss.ssparamset.ibssparamset.atimwindow =
-	    cpu_to_le16(bss->atimwindow);
-
-	if (assoc_req->secinfo.wep_enabled) {
-		u16 tmp = le16_to_cpu(join_cmd->bss.capability);
-		tmp |= WLAN_CAPABILITY_PRIVACY;
-		join_cmd->bss.capability = cpu_to_le16(tmp);
-	}
-
-	if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
-		/* wake up first */
-		__le32 Localpsmode;
-
-		Localpsmode = cpu_to_le32(LBS802_11POWERMODECAM);
-		ret = lbs_prepare_and_send_command(priv,
-					    CMD_802_11_PS_MODE,
-					    CMD_ACT_SET,
-					    0, 0, &Localpsmode);
-
-		if (ret) {
-			ret = -1;
-			goto done;
-		}
-	}
-
-	if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
-		ret = -1;
-		goto done;
-	}
-
-	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ad_hoc_join) +
-				S_DS_GEN + cmdappendsize);
-
-done:
-	lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
-	return ret;
-}
-
 int lbs_ret_80211_associate(struct lbs_private *priv,
 			      struct cmd_ds_command *resp)
 {
@@ -1815,34 +1762,19 @@
 	return ret;
 }
 
-int lbs_ret_80211_disassociate(struct lbs_private *priv)
-{
-	lbs_deb_enter(LBS_DEB_JOIN);
-
-	lbs_mac_event_disconnected(priv);
-
-	lbs_deb_leave(LBS_DEB_JOIN);
-	return 0;
-}
-
-int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
-				 struct cmd_ds_command *resp)
+static int lbs_adhoc_post(struct lbs_private *priv, struct cmd_header *resp)
 {
 	int ret = 0;
 	u16 command = le16_to_cpu(resp->command);
 	u16 result = le16_to_cpu(resp->result);
-	struct cmd_ds_802_11_ad_hoc_result *padhocresult;
+	struct cmd_ds_802_11_ad_hoc_result *adhoc_resp;
 	union iwreq_data wrqu;
 	struct bss_descriptor *bss;
 	DECLARE_MAC_BUF(mac);
 
 	lbs_deb_enter(LBS_DEB_JOIN);
 
-	padhocresult = &resp->params.result;
-
-	lbs_deb_join("ADHOC_RESP: size = %d\n", le16_to_cpu(resp->size));
-	lbs_deb_join("ADHOC_RESP: command = %x\n", command);
-	lbs_deb_join("ADHOC_RESP: result = %x\n", result);
+	adhoc_resp = (struct cmd_ds_802_11_ad_hoc_result *) resp;
 
 	if (!priv->in_progress_assoc_req) {
 		lbs_deb_join("ADHOC_RESP: no in-progress association "
@@ -1856,26 +1788,19 @@
 	 * Join result code 0 --> SUCCESS
 	 */
 	if (result) {
-		lbs_deb_join("ADHOC_RESP: failed\n");
+		lbs_deb_join("ADHOC_RESP: failed (result 0x%X)\n", result);
 		if (priv->connect_status == LBS_CONNECTED)
 			lbs_mac_event_disconnected(priv);
 		ret = -1;
 		goto done;
 	}
 
-	/*
-	 * Now the join cmd should be successful
-	 * If BSSID has changed use SSID to compare instead of BSSID
-	 */
-	lbs_deb_join("ADHOC_RESP: associated to '%s'\n",
-		escape_essid(bss->ssid, bss->ssid_len));
-
 	/* Send a Media Connected event, according to the Spec */
 	priv->connect_status = LBS_CONNECTED;
 
 	if (command == CMD_RET(CMD_802_11_AD_HOC_START)) {
 		/* Update the created network descriptor with the new BSSID */
-		memcpy(bss->bssid, padhocresult->bssid, ETH_ALEN);
+		memcpy(bss->bssid, adhoc_resp->bssid, ETH_ALEN);
 	}
 
 	/* Set the BSSID from the joined/started descriptor */
@@ -1894,22 +1819,13 @@
 	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
 	wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
 
-	lbs_deb_join("ADHOC_RESP: - Joined/Started Ad Hoc\n");
-	lbs_deb_join("ADHOC_RESP: channel = %d\n", priv->curbssparams.channel);
-	lbs_deb_join("ADHOC_RESP: BSSID = %s\n",
-		     print_mac(mac, padhocresult->bssid));
+	lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %s, channel %d\n",
+		     escape_essid(bss->ssid, bss->ssid_len),
+		     print_mac(mac, priv->curbssparams.bssid),
+		     priv->curbssparams.channel);
 
 done:
 	lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
 	return ret;
 }
 
-int lbs_ret_80211_ad_hoc_stop(struct lbs_private *priv)
-{
-	lbs_deb_enter(LBS_DEB_JOIN);
-
-	lbs_mac_event_disconnected(priv);
-
-	lbs_deb_leave(LBS_DEB_JOIN);
-	return 0;
-}
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
index c516fbe..8b7336d 100644
--- a/drivers/net/wireless/libertas/assoc.h
+++ b/drivers/net/wireless/libertas/assoc.h
@@ -12,28 +12,18 @@
 int lbs_cmd_80211_authenticate(struct lbs_private *priv,
 					struct cmd_ds_command *cmd,
 					void *pdata_buf);
-int lbs_cmd_80211_ad_hoc_join(struct lbs_private *priv,
-				       struct cmd_ds_command *cmd,
-				       void *pdata_buf);
-int lbs_cmd_80211_ad_hoc_stop(struct cmd_ds_command *cmd);
-int lbs_cmd_80211_ad_hoc_start(struct lbs_private *priv,
-					struct cmd_ds_command *cmd,
-					void *pdata_buf);
+
+int lbs_adhoc_stop(struct lbs_private *priv);
+
 int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
-					  struct cmd_ds_command *cmd);
+				 u8 bssid[ETH_ALEN], u16 reason);
 int lbs_cmd_80211_associate(struct lbs_private *priv,
 				     struct cmd_ds_command *cmd,
 				     void *pdata_buf);
 
 int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
 					struct cmd_ds_command *resp);
-int lbs_ret_80211_ad_hoc_stop(struct lbs_private *priv);
-int lbs_ret_80211_disassociate(struct lbs_private *priv);
 int lbs_ret_80211_associate(struct lbs_private *priv,
 				     struct cmd_ds_command *resp);
 
-int lbs_stop_adhoc_network(struct lbs_private *priv);
-
-int lbs_send_deauthentication(struct lbs_private *priv);
-
 #endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 75427e6..a912fb6 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -480,181 +480,166 @@
 	return ret;
 }
 
-static int lbs_cmd_802_11_reset(struct cmd_ds_command *cmd, int cmd_action)
+/**
+ *  @brief Set an SNMP MIB value
+ *
+ *  @param priv    	A pointer to struct lbs_private structure
+ *  @param oid  	The OID to set in the firmware
+ *  @param val  	Value to set the OID to
+ *
+ *  @return 	   	0 on success, error on failure
+ */
+int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val)
 {
-	struct cmd_ds_802_11_reset *reset = &cmd->params.reset;
+	struct cmd_ds_802_11_snmp_mib cmd;
+	int ret;
 
 	lbs_deb_enter(LBS_DEB_CMD);
 
-	cmd->command = cpu_to_le16(CMD_802_11_RESET);
-	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset) + S_DS_GEN);
-	reset->action = cpu_to_le16(cmd_action);
+	memset(&cmd, 0, sizeof (cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_SET);
+	cmd.oid = cpu_to_le16((u16) oid);
 
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
-static int lbs_cmd_802_11_snmp_mib(struct lbs_private *priv,
-				    struct cmd_ds_command *cmd,
-				    int cmd_action,
-				    int cmd_oid, void *pdata_buf)
-{
-	struct cmd_ds_802_11_snmp_mib *pSNMPMIB = &cmd->params.smib;
-	u8 ucTemp;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	lbs_deb_cmd("SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
-
-	cmd->command = cpu_to_le16(CMD_802_11_SNMP_MIB);
-	cmd->size = cpu_to_le16(sizeof(*pSNMPMIB) + S_DS_GEN);
-
-	switch (cmd_oid) {
-	case OID_802_11_INFRASTRUCTURE_MODE:
-	{
-		u8 mode = (u8) (size_t) pdata_buf;
-		pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
-		pSNMPMIB->oid = cpu_to_le16((u16) DESIRED_BSSTYPE_I);
-		pSNMPMIB->bufsize = cpu_to_le16(sizeof(u8));
-		if (mode == IW_MODE_ADHOC) {
-			ucTemp = SNMP_MIB_VALUE_ADHOC;
-		} else {
-			/* Infra and Auto modes */
-			ucTemp = SNMP_MIB_VALUE_INFRA;
-		}
-
-		memmove(pSNMPMIB->value, &ucTemp, sizeof(u8));
-
+	switch (oid) {
+	case SNMP_MIB_OID_BSS_TYPE:
+		cmd.bufsize = cpu_to_le16(sizeof(u8));
+		cmd.value[0] = (val == IW_MODE_ADHOC) ? 2 : 1;
 		break;
-	}
-
-	case OID_802_11D_ENABLE:
-		{
-			u32 ulTemp;
-
-			pSNMPMIB->oid = cpu_to_le16((u16) DOT11D_I);
-
-			if (cmd_action == CMD_ACT_SET) {
-				pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
-				pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16));
-				ulTemp = *(u32 *)pdata_buf;
-				*((__le16 *)(pSNMPMIB->value)) =
-				    cpu_to_le16((u16) ulTemp);
-			}
-			break;
-		}
-
-	case OID_802_11_FRAGMENTATION_THRESHOLD:
-		{
-			u32 ulTemp;
-
-			pSNMPMIB->oid = cpu_to_le16((u16) FRAGTHRESH_I);
-
-			if (cmd_action == CMD_ACT_GET) {
-				pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET);
-			} else if (cmd_action == CMD_ACT_SET) {
-				pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
-				pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16));
-				ulTemp = *((u32 *) pdata_buf);
-				*((__le16 *)(pSNMPMIB->value)) =
-				    cpu_to_le16((u16) ulTemp);
-
-			}
-
-			break;
-		}
-
-	case OID_802_11_RTS_THRESHOLD:
-		{
-
-			u32 ulTemp;
-			pSNMPMIB->oid = cpu_to_le16(RTSTHRESH_I);
-
-			if (cmd_action == CMD_ACT_GET) {
-				pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET);
-			} else if (cmd_action == CMD_ACT_SET) {
-				pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
-				pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16));
-				ulTemp = *((u32 *)pdata_buf);
-				*(__le16 *)(pSNMPMIB->value) =
-				    cpu_to_le16((u16) ulTemp);
-
-			}
-			break;
-		}
-	case OID_802_11_TX_RETRYCOUNT:
-		pSNMPMIB->oid = cpu_to_le16((u16) SHORT_RETRYLIM_I);
-
-		if (cmd_action == CMD_ACT_GET) {
-			pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET);
-		} else if (cmd_action == CMD_ACT_SET) {
-			pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
-			pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16));
-			*((__le16 *)(pSNMPMIB->value)) =
-			    cpu_to_le16((u16) priv->txretrycount);
-		}
-
+	case SNMP_MIB_OID_11D_ENABLE:
+	case SNMP_MIB_OID_FRAG_THRESHOLD:
+	case SNMP_MIB_OID_RTS_THRESHOLD:
+	case SNMP_MIB_OID_SHORT_RETRY_LIMIT:
+	case SNMP_MIB_OID_LONG_RETRY_LIMIT:
+		cmd.bufsize = cpu_to_le16(sizeof(u16));
+		*((__le16 *)(&cmd.value)) = cpu_to_le16(val);
 		break;
 	default:
-		break;
+		lbs_deb_cmd("SNMP_CMD: (set) unhandled OID 0x%x\n", oid);
+		ret = -EINVAL;
+		goto out;
 	}
 
-	lbs_deb_cmd(
-	       "SNMP_CMD: command=0x%x, size=0x%x, seqnum=0x%x, result=0x%x\n",
-	       le16_to_cpu(cmd->command), le16_to_cpu(cmd->size),
-	       le16_to_cpu(cmd->seqnum), le16_to_cpu(cmd->result));
+	lbs_deb_cmd("SNMP_CMD: (set) oid 0x%x, oid size 0x%x, value 0x%x\n",
+		    le16_to_cpu(cmd.oid), le16_to_cpu(cmd.bufsize), val);
 
-	lbs_deb_cmd(
-	       "SNMP_CMD: action 0x%x, oid 0x%x, oidsize 0x%x, value 0x%x\n",
-	       le16_to_cpu(pSNMPMIB->querytype), le16_to_cpu(pSNMPMIB->oid),
-	       le16_to_cpu(pSNMPMIB->bufsize),
-	       le16_to_cpu(*(__le16 *) pSNMPMIB->value));
+	ret = lbs_cmd_with_response(priv, CMD_802_11_SNMP_MIB, &cmd);
 
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
+out:
+	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
+	return ret;
 }
 
-static int lbs_cmd_802_11_rf_tx_power(struct cmd_ds_command *cmd,
-				       u16 cmd_action, void *pdata_buf)
+/**
+ *  @brief Get an SNMP MIB value
+ *
+ *  @param priv    	A pointer to struct lbs_private structure
+ *  @param oid  	The OID to retrieve from the firmware
+ *  @param out_val  	Location for the returned value
+ *
+ *  @return 	   	0 on success, error on failure
+ */
+int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val)
 {
-
-	struct cmd_ds_802_11_rf_tx_power *prtp = &cmd->params.txp;
+	struct cmd_ds_802_11_snmp_mib cmd;
+	int ret;
 
 	lbs_deb_enter(LBS_DEB_CMD);
 
-	cmd->size =
-	    cpu_to_le16((sizeof(struct cmd_ds_802_11_rf_tx_power)) + S_DS_GEN);
-	cmd->command = cpu_to_le16(CMD_802_11_RF_TX_POWER);
-	prtp->action = cpu_to_le16(cmd_action);
+	memset(&cmd, 0, sizeof (cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_GET);
+	cmd.oid = cpu_to_le16(oid);
 
-	lbs_deb_cmd("RF_TX_POWER_CMD: size:%d cmd:0x%x Act:%d\n",
-		    le16_to_cpu(cmd->size), le16_to_cpu(cmd->command),
-		    le16_to_cpu(prtp->action));
+	ret = lbs_cmd_with_response(priv, CMD_802_11_SNMP_MIB, &cmd);
+	if (ret)
+		goto out;
 
-	switch (cmd_action) {
-	case CMD_ACT_TX_POWER_OPT_GET:
-		prtp->action = cpu_to_le16(CMD_ACT_GET);
-		prtp->currentlevel = 0;
+	switch (le16_to_cpu(cmd.bufsize)) {
+	case sizeof(u8):
+		if (oid == SNMP_MIB_OID_BSS_TYPE) {
+			if (cmd.value[0] == 2)
+				*out_val = IW_MODE_ADHOC;
+			else
+				*out_val = IW_MODE_INFRA;
+		} else
+			*out_val = cmd.value[0];
 		break;
-
-	case CMD_ACT_TX_POWER_OPT_SET_HIGH:
-		prtp->action = cpu_to_le16(CMD_ACT_SET);
-		prtp->currentlevel = cpu_to_le16(CMD_ACT_TX_POWER_INDEX_HIGH);
+	case sizeof(u16):
+		*out_val = le16_to_cpu(*((__le16 *)(&cmd.value)));
 		break;
-
-	case CMD_ACT_TX_POWER_OPT_SET_MID:
-		prtp->action = cpu_to_le16(CMD_ACT_SET);
-		prtp->currentlevel = cpu_to_le16(CMD_ACT_TX_POWER_INDEX_MID);
-		break;
-
-	case CMD_ACT_TX_POWER_OPT_SET_LOW:
-		prtp->action = cpu_to_le16(CMD_ACT_SET);
-		prtp->currentlevel = cpu_to_le16(*((u16 *) pdata_buf));
+	default:
+		lbs_deb_cmd("SNMP_CMD: (get) unhandled OID 0x%x size %d\n",
+		            oid, le16_to_cpu(cmd.bufsize));
 		break;
 	}
 
+out:
+	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
+	return ret;
+}
+
+/**
+ *  @brief Get the min, max, and current TX power
+ *
+ *  @param priv    	A pointer to struct lbs_private structure
+ *  @param curlevel  	Current power level in dBm
+ *  @param minlevel  	Minimum supported power level in dBm (optional)
+ *  @param maxlevel  	Maximum supported power level in dBm (optional)
+ *
+ *  @return 	   	0 on success, error on failure
+ */
+int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
+		     s16 *maxlevel)
+{
+	struct cmd_ds_802_11_rf_tx_power cmd;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_GET);
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_RF_TX_POWER, &cmd);
+	if (ret == 0) {
+		*curlevel = le16_to_cpu(cmd.curlevel);
+		if (minlevel)
+			*minlevel = le16_to_cpu(cmd.minlevel);
+		if (maxlevel)
+			*maxlevel = le16_to_cpu(cmd.maxlevel);
+	}
+
 	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
+	return ret;
+}
+
+/**
+ *  @brief Set the TX power
+ *
+ *  @param priv    	A pointer to struct lbs_private structure
+ *  @param dbm  	The desired power level in dBm
+ *
+ *  @return 	   	0 on success, error on failure
+ */
+int lbs_set_tx_power(struct lbs_private *priv, s16 dbm)
+{
+	struct cmd_ds_802_11_rf_tx_power cmd;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_SET);
+	cmd.curlevel = cpu_to_le16(dbm);
+
+	lbs_deb_cmd("SET_RF_TX_POWER: %d dBm\n", dbm);
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_RF_TX_POWER, &cmd);
+
+	lbs_deb_leave(LBS_DEB_CMD);
+	return ret;
 }
 
 static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
@@ -1033,9 +1018,9 @@
 	return ret;
 }
 
-int lbs_mesh_config_send(struct lbs_private *priv,
-			 struct cmd_ds_mesh_config *cmd,
-			 uint16_t action, uint16_t type)
+static int __lbs_mesh_config_send(struct lbs_private *priv,
+				  struct cmd_ds_mesh_config *cmd,
+				  uint16_t action, uint16_t type)
 {
 	int ret;
 
@@ -1054,6 +1039,19 @@
 	return ret;
 }
 
+int lbs_mesh_config_send(struct lbs_private *priv,
+			 struct cmd_ds_mesh_config *cmd,
+			 uint16_t action, uint16_t type)
+{
+	int ret;
+
+	if (!(priv->fwcapinfo & FW_CAPINFO_PERSISTENT_CONFIG))
+		return -EOPNOTSUPP;
+
+	ret = __lbs_mesh_config_send(priv, cmd, action, type);
+	return ret;
+}
+
 /* This function is the CMD_MESH_CONFIG legacy function.  It only handles the
  * START and STOP actions.  The extended actions supported by CMD_MESH_CONFIG
  * are all handled by preparing a struct cmd_ds_mesh_config and passing it to
@@ -1095,7 +1093,7 @@
 		    action, priv->mesh_tlv, chan,
 		    escape_essid(priv->mesh_ssid, priv->mesh_ssid_len));
 
-	return lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
+	return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
 }
 
 static int lbs_cmd_bcn_ctrl(struct lbs_private * priv,
@@ -1256,41 +1254,47 @@
 	priv->cur_cmd = NULL;
 }
 
-int lbs_set_radio_control(struct lbs_private *priv)
+int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on)
 {
-	int ret = 0;
 	struct cmd_ds_802_11_radio_control cmd;
+	int ret = -EINVAL;
 
 	lbs_deb_enter(LBS_DEB_CMD);
 
 	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
 	cmd.action = cpu_to_le16(CMD_ACT_SET);
 
-	switch (priv->preamble) {
-	case CMD_TYPE_SHORT_PREAMBLE:
-		cmd.control = cpu_to_le16(SET_SHORT_PREAMBLE);
-		break;
-
-	case CMD_TYPE_LONG_PREAMBLE:
-		cmd.control = cpu_to_le16(SET_LONG_PREAMBLE);
-		break;
-
-	case CMD_TYPE_AUTO_PREAMBLE:
-	default:
-		cmd.control = cpu_to_le16(SET_AUTO_PREAMBLE);
-		break;
+	/* Only v8 and below support setting the preamble */
+	if (priv->fwrelease < 0x09000000) {
+		switch (preamble) {
+		case RADIO_PREAMBLE_SHORT:
+			if (!(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
+				goto out;
+			/* Fall through */
+		case RADIO_PREAMBLE_AUTO:
+		case RADIO_PREAMBLE_LONG:
+			cmd.control = cpu_to_le16(preamble);
+			break;
+		default:
+			goto out;
+		}
 	}
 
-	if (priv->radioon)
-		cmd.control |= cpu_to_le16(TURN_ON_RF);
-	else
-		cmd.control &= cpu_to_le16(~TURN_ON_RF);
+	if (radio_on)
+		cmd.control |= cpu_to_le16(0x1);
+	else {
+		cmd.control &= cpu_to_le16(~0x1);
+		priv->txpower_cur = 0;
+	}
 
-	lbs_deb_cmd("RADIO_SET: radio %d, preamble %d\n", priv->radioon,
-		    priv->preamble);
+	lbs_deb_cmd("RADIO_CONTROL: radio %s, preamble %d\n",
+		    radio_on ? "ON" : "OFF", preamble);
+
+	priv->radio_on = radio_on;
 
 	ret = lbs_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd);
 
+out:
 	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
 	return ret;
 }
@@ -1380,55 +1384,25 @@
 		ret = lbs_cmd_80211_associate(priv, cmdptr, pdata_buf);
 		break;
 
-	case CMD_802_11_DEAUTHENTICATE:
-		ret = lbs_cmd_80211_deauthenticate(priv, cmdptr);
-		break;
-
-	case CMD_802_11_AD_HOC_START:
-		ret = lbs_cmd_80211_ad_hoc_start(priv, cmdptr, pdata_buf);
-		break;
-
-	case CMD_802_11_RESET:
-		ret = lbs_cmd_802_11_reset(cmdptr, cmd_action);
-		break;
-
 	case CMD_802_11_AUTHENTICATE:
 		ret = lbs_cmd_80211_authenticate(priv, cmdptr, pdata_buf);
 		break;
 
-	case CMD_802_11_SNMP_MIB:
-		ret = lbs_cmd_802_11_snmp_mib(priv, cmdptr,
-					       cmd_action, cmd_oid, pdata_buf);
-		break;
-
 	case CMD_MAC_REG_ACCESS:
 	case CMD_BBP_REG_ACCESS:
 	case CMD_RF_REG_ACCESS:
 		ret = lbs_cmd_reg_access(cmdptr, cmd_action, pdata_buf);
 		break;
 
-	case CMD_802_11_RF_TX_POWER:
-		ret = lbs_cmd_802_11_rf_tx_power(cmdptr,
-						 cmd_action, pdata_buf);
-		break;
-
 	case CMD_802_11_MONITOR_MODE:
 		ret = lbs_cmd_802_11_monitor_mode(cmdptr,
 				          cmd_action, pdata_buf);
 		break;
 
-	case CMD_802_11_AD_HOC_JOIN:
-		ret = lbs_cmd_80211_ad_hoc_join(priv, cmdptr, pdata_buf);
-		break;
-
 	case CMD_802_11_RSSI:
 		ret = lbs_cmd_802_11_rssi(priv, cmdptr);
 		break;
 
-	case CMD_802_11_AD_HOC_STOP:
-		ret = lbs_cmd_80211_ad_hoc_stop(cmdptr);
-		break;
-
 	case CMD_802_11_SET_AFC:
 	case CMD_802_11_GET_AFC:
 
@@ -1953,6 +1927,70 @@
 }
 
 
+/**
+ * @brief Configures the transmission power control functionality.
+ *
+ * @param priv		A pointer to struct lbs_private structure
+ * @param enable	Transmission power control enable
+ * @param p0		Power level when link quality is good (dBm).
+ * @param p1		Power level when link quality is fair (dBm).
+ * @param p2		Power level when link quality is poor (dBm).
+ * @param usesnr	Use Signal to Noise Ratio in TPC
+ *
+ * @return 0 on success
+ */
+int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
+		int8_t p2, int usesnr)
+{
+	struct cmd_ds_802_11_tpc_cfg cmd;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_SET);
+	cmd.enable = !!enable;
+	cmd.usesnr = !!usesnr;
+	cmd.P0 = p0;
+	cmd.P1 = p1;
+	cmd.P2 = p2;
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_TPC_CFG, &cmd);
+
+	return ret;
+}
+
+/**
+ * @brief Configures the power adaptation settings.
+ *
+ * @param priv		A pointer to struct lbs_private structure
+ * @param enable	Power adaptation enable
+ * @param p0		Power level for 1, 2, 5.5 and 11 Mbps (dBm).
+ * @param p1		Power level for 6, 9, 12, 18, 22, 24 and 36 Mbps (dBm).
+ * @param p2		Power level for 48 and 54 Mbps (dBm).
+ *
+ * @return 0 on Success
+ */
+
+int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
+		int8_t p1, int8_t p2)
+{
+	struct cmd_ds_802_11_pa_cfg cmd;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_SET);
+	cmd.enable = !!enable;
+	cmd.P0 = p0;
+	cmd.P1 = p1;
+	cmd.P2 = p2;
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_PA_CFG , &cmd);
+
+	return ret;
+}
+
+
 static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
 	uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
 	int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index a53b51f..36be4c9 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -26,6 +26,18 @@
 	      int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
 	      unsigned long callback_arg);
 
+int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
+		int8_t p1, int8_t p2);
+
+int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
+		int8_t p2, int usesnr);
+
+int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
+		int8_t p1, int8_t p2);
+
+int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
+		int8_t p2, int usesnr);
+
 int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
 		     struct cmd_header *resp);
 
@@ -61,4 +73,14 @@
 int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
 				struct assoc_request *assoc);
 
+int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
+		     s16 *maxlevel);
+int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
+
+int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
+
+int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
+
+int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
+
 #endif /* _LBS_CMD_H */
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 24de3c3..bcf2a97 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -146,63 +146,6 @@
 	return ret;
 }
 
-static int lbs_ret_802_11_snmp_mib(struct lbs_private *priv,
-				    struct cmd_ds_command *resp)
-{
-	struct cmd_ds_802_11_snmp_mib *smib = &resp->params.smib;
-	u16 oid = le16_to_cpu(smib->oid);
-	u16 querytype = le16_to_cpu(smib->querytype);
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	lbs_deb_cmd("SNMP_RESP: oid 0x%x, querytype 0x%x\n", oid,
-	       querytype);
-	lbs_deb_cmd("SNMP_RESP: Buf size %d\n", le16_to_cpu(smib->bufsize));
-
-	if (querytype == CMD_ACT_GET) {
-		switch (oid) {
-		case FRAGTHRESH_I:
-			priv->fragthsd =
-				le16_to_cpu(*((__le16 *)(smib->value)));
-			lbs_deb_cmd("SNMP_RESP: frag threshold %u\n",
-				    priv->fragthsd);
-			break;
-		case RTSTHRESH_I:
-			priv->rtsthsd =
-				le16_to_cpu(*((__le16 *)(smib->value)));
-			lbs_deb_cmd("SNMP_RESP: rts threshold %u\n",
-				    priv->rtsthsd);
-			break;
-		case SHORT_RETRYLIM_I:
-			priv->txretrycount =
-				le16_to_cpu(*((__le16 *)(smib->value)));
-			lbs_deb_cmd("SNMP_RESP: tx retry count %u\n",
-				    priv->rtsthsd);
-			break;
-		default:
-			break;
-		}
-	}
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	return 0;
-}
-
-static int lbs_ret_802_11_rf_tx_power(struct lbs_private *priv,
-				       struct cmd_ds_command *resp)
-{
-	struct cmd_ds_802_11_rf_tx_power *rtp = &resp->params.txp;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	priv->txpowerlevel = le16_to_cpu(rtp->currentlevel);
-
-	lbs_deb_cmd("TX power currently %d\n", priv->txpowerlevel);
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
 static int lbs_ret_802_11_rssi(struct lbs_private *priv,
 				struct cmd_ds_command *resp)
 {
@@ -273,24 +216,6 @@
 		ret = lbs_ret_80211_associate(priv, resp);
 		break;
 
-	case CMD_RET(CMD_802_11_DISASSOCIATE):
-	case CMD_RET(CMD_802_11_DEAUTHENTICATE):
-		ret = lbs_ret_80211_disassociate(priv);
-		break;
-
-	case CMD_RET(CMD_802_11_AD_HOC_START):
-	case CMD_RET(CMD_802_11_AD_HOC_JOIN):
-		ret = lbs_ret_80211_ad_hoc_start(priv, resp);
-		break;
-
-	case CMD_RET(CMD_802_11_SNMP_MIB):
-		ret = lbs_ret_802_11_snmp_mib(priv, resp);
-		break;
-
-	case CMD_RET(CMD_802_11_RF_TX_POWER):
-		ret = lbs_ret_802_11_rf_tx_power(priv, resp);
-		break;
-
 	case CMD_RET(CMD_802_11_SET_AFC):
 	case CMD_RET(CMD_802_11_GET_AFC):
 		spin_lock_irqsave(&priv->driver_lock, flags);
@@ -300,7 +225,6 @@
 
 		break;
 
-	case CMD_RET(CMD_802_11_RESET):
 	case CMD_RET(CMD_802_11_AUTHENTICATE):
 	case CMD_RET(CMD_802_11_BEACON_STOP):
 		break;
@@ -309,10 +233,6 @@
 		ret = lbs_ret_802_11_rssi(priv, resp);
 		break;
 
-	case CMD_RET(CMD_802_11_AD_HOC_STOP):
-		ret = lbs_ret_80211_ad_hoc_stop(priv);
-		break;
-
 	case CMD_RET(CMD_802_11D_DOMAIN_INFO):
 		ret = lbs_ret_802_11d_domain_info(resp);
 		break;
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index a8ac974..1a8888c 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -34,7 +34,6 @@
 void lbs_queue_event(struct lbs_private *priv, u32 event);
 void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
 
-int lbs_set_radio_control(struct lbs_private *priv);
 u32 lbs_fw_index_to_data_rate(u8 index);
 u8 lbs_data_rate_to_fw_index(u32 rate);
 
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 12e6875..076a636 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -189,6 +189,14 @@
 #define MRVDRV_CMD_UPLD_RDY		0x0008
 #define MRVDRV_CARDEVENT		0x0010
 
+/* Automatic TX control default levels */
+#define POW_ADAPT_DEFAULT_P0 13
+#define POW_ADAPT_DEFAULT_P1 15
+#define POW_ADAPT_DEFAULT_P2 18
+#define TPC_DEFAULT_P0 5
+#define TPC_DEFAULT_P1 10
+#define TPC_DEFAULT_P2 13
+
 /** TxPD status */
 
 /*	Station firmware use TxPD status field to report final Tx transmit
@@ -243,6 +251,9 @@
 
 #define	CMD_F_HOSTCMD		(1 << 0)
 #define FW_CAPINFO_WPA  	(1 << 0)
+#define FW_CAPINFO_FIRMWARE_UPGRADE	(1 << 13)
+#define FW_CAPINFO_BOOT2_UPGRADE	(1<<14)
+#define FW_CAPINFO_PERSISTENT_CONFIG	(1<<15)
 
 #define KEY_LEN_WPA_AES			16
 #define KEY_LEN_WPA_TKIP		32
@@ -316,7 +327,8 @@
 enum DNLD_STATE {
 	DNLD_RES_RECEIVED,
 	DNLD_DATA_SENT,
-	DNLD_CMD_SENT
+	DNLD_CMD_SENT,
+	DNLD_BOOTCMD_SENT,
 };
 
 /** LBS_MEDIA_STATE */
@@ -339,27 +351,6 @@
 	MVMS_EVENT
 };
 
-/** SNMP_MIB_INDEX_e */
-enum SNMP_MIB_INDEX_e {
-	DESIRED_BSSTYPE_I = 0,
-	OP_RATESET_I,
-	BCNPERIOD_I,
-	DTIMPERIOD_I,
-	ASSOCRSP_TIMEOUT_I,
-	RTSTHRESH_I,
-	SHORT_RETRYLIM_I,
-	LONG_RETRYLIM_I,
-	FRAGTHRESH_I,
-	DOT11D_I,
-	DOT11H_I,
-	MANUFID_I,
-	PRODID_I,
-	MANUF_OUI_I,
-	MANUF_NAME_I,
-	MANUF_PRODNAME_I,
-	MANUF_PRODVER_I,
-};
-
 /** KEY_TYPE_ID */
 enum KEY_TYPE_ID {
 	KEY_TYPE_ID_WEP = 0,
@@ -374,12 +365,6 @@
 	KEY_INFO_WPA_ENABLED = 0x04
 };
 
-/** SNMP_MIB_VALUE_e */
-enum SNMP_MIB_VALUE_e {
-	SNMP_MIB_VALUE_INFRA = 1,
-	SNMP_MIB_VALUE_ADHOC
-};
-
 /* Default values for fwt commands. */
 #define FWT_DEFAULT_METRIC 0
 #define FWT_DEFAULT_DIR 1
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index f5bb40c..f6f3753 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -58,6 +58,7 @@
 	u8 WPA2enabled;
 	u8 wep_enabled;
 	u8 auth_mode;
+	u32 key_mgmt;
 };
 
 /** Current Basic Service Set State Structure */
@@ -240,9 +241,6 @@
 	uint16_t enablehwauto;
 	uint16_t ratebitmap;
 
-	u32 fragthsd;
-	u32 rtsthsd;
-
 	u8 txretrycount;
 
 	/** Tx-related variables (for single packet tx) */
@@ -253,7 +251,9 @@
 	u32 connect_status;
 	u32 mesh_connect_status;
 	u16 regioncode;
-	u16 txpowerlevel;
+	s16 txpower_cur;
+	s16 txpower_min;
+	s16 txpower_max;
 
 	/** POWER MANAGEMENT AND PnP SUPPORT */
 	u8 surpriseremoved;
@@ -291,8 +291,7 @@
 	u16 nextSNRNF;
 	u16 numSNRNF;
 
-	u8 radioon;
-	u32 preamble;
+	u8 radio_on;
 
 	/** data rate stuff */
 	u8 cur_rate;
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index c92e41b..5004d76 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -9,17 +9,6 @@
 #define DEFAULT_AD_HOC_CHANNEL			6
 #define	DEFAULT_AD_HOC_CHANNEL_A		36
 
-/** IEEE 802.11 oids */
-#define OID_802_11_SSID				0x00008002
-#define OID_802_11_INFRASTRUCTURE_MODE		0x00008008
-#define OID_802_11_FRAGMENTATION_THRESHOLD	0x00008009
-#define OID_802_11_RTS_THRESHOLD		0x0000800A
-#define OID_802_11_TX_ANTENNA_SELECTED		0x0000800D
-#define OID_802_11_SUPPORTED_RATES		0x0000800E
-#define OID_802_11_STATISTICS			0x00008012
-#define OID_802_11_TX_RETRYCOUNT		0x0000801D
-#define OID_802_11D_ENABLE			0x00008020
-
 #define CMD_OPTION_WAITFORRSP			0x0002
 
 /** Host command IDs */
@@ -61,7 +50,6 @@
 #define CMD_RF_REG_MAP				0x0023
 #define CMD_802_11_DEAUTHENTICATE		0x0024
 #define CMD_802_11_REASSOCIATE			0x0025
-#define CMD_802_11_DISASSOCIATE			0x0026
 #define CMD_MAC_CONTROL				0x0028
 #define CMD_802_11_AD_HOC_START			0x002b
 #define CMD_802_11_AD_HOC_JOIN			0x002c
@@ -84,6 +72,7 @@
 #define CMD_802_11_INACTIVITY_TIMEOUT		0x0067
 #define CMD_802_11_SLEEP_PERIOD			0x0068
 #define CMD_802_11_TPC_CFG			0x0072
+#define CMD_802_11_PA_CFG			0x0073
 #define CMD_802_11_FW_WAKE_METHOD		0x0074
 #define CMD_802_11_SUBSCRIBE_EVENT		0x0075
 #define CMD_802_11_RATE_ADAPT_RATESET		0x0076
@@ -153,11 +142,6 @@
 #define CMD_ACT_MAC_ALL_MULTICAST_ENABLE	0x0100
 #define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE	0x0400
 
-/* Define action or option for CMD_802_11_RADIO_CONTROL */
-#define CMD_TYPE_AUTO_PREAMBLE		0x0001
-#define CMD_TYPE_SHORT_PREAMBLE		0x0002
-#define CMD_TYPE_LONG_PREAMBLE		0x0003
-
 /* Event flags for CMD_802_11_SUBSCRIBE_EVENT */
 #define CMD_SUBSCRIBE_RSSI_LOW		0x0001
 #define CMD_SUBSCRIBE_SNR_LOW		0x0002
@@ -166,28 +150,14 @@
 #define CMD_SUBSCRIBE_RSSI_HIGH		0x0010
 #define CMD_SUBSCRIBE_SNR_HIGH		0x0020
 
-#define TURN_ON_RF			0x01
-#define RADIO_ON			0x01
-#define RADIO_OFF			0x00
-
-#define SET_AUTO_PREAMBLE		0x05
-#define SET_SHORT_PREAMBLE		0x03
-#define SET_LONG_PREAMBLE		0x01
+#define RADIO_PREAMBLE_LONG	0x00
+#define RADIO_PREAMBLE_SHORT	0x02
+#define RADIO_PREAMBLE_AUTO	0x04
 
 /* Define action or option for CMD_802_11_RF_CHANNEL */
 #define CMD_OPT_802_11_RF_CHANNEL_GET	0x00
 #define CMD_OPT_802_11_RF_CHANNEL_SET	0x01
 
-/* Define action or option for CMD_802_11_RF_TX_POWER */
-#define CMD_ACT_TX_POWER_OPT_GET	0x0000
-#define CMD_ACT_TX_POWER_OPT_SET_HIGH	0x8007
-#define CMD_ACT_TX_POWER_OPT_SET_MID	0x8004
-#define CMD_ACT_TX_POWER_OPT_SET_LOW	0x8000
-
-#define CMD_ACT_TX_POWER_INDEX_HIGH	0x0007
-#define CMD_ACT_TX_POWER_INDEX_MID	0x0004
-#define CMD_ACT_TX_POWER_INDEX_LOW	0x0000
-
 /* Define action or option for CMD_802_11_DATA_RATE */
 #define CMD_ACT_SET_TX_AUTO		0x0000
 #define CMD_ACT_SET_TX_FIX_RATE		0x0001
@@ -210,6 +180,19 @@
 #define CMD_WAKE_METHOD_COMMAND_INT	0x0001
 #define CMD_WAKE_METHOD_GPIO		0x0002
 
+/* Object IDs for CMD_802_11_SNMP_MIB */
+#define SNMP_MIB_OID_BSS_TYPE		0x0000
+#define SNMP_MIB_OID_OP_RATE_SET	0x0001
+#define SNMP_MIB_OID_BEACON_PERIOD	0x0002  /* Reserved on v9+ */
+#define SNMP_MIB_OID_DTIM_PERIOD	0x0003  /* Reserved on v9+ */
+#define SNMP_MIB_OID_ASSOC_TIMEOUT	0x0004  /* Reserved on v9+ */
+#define SNMP_MIB_OID_RTS_THRESHOLD	0x0005
+#define SNMP_MIB_OID_SHORT_RETRY_LIMIT	0x0006
+#define SNMP_MIB_OID_LONG_RETRY_LIMIT	0x0007
+#define SNMP_MIB_OID_FRAG_THRESHOLD	0x0008
+#define SNMP_MIB_OID_11D_ENABLE		0x0009
+#define SNMP_MIB_OID_11H_ENABLE		0x000A
+
 /* Define action or option for CMD_BT_ACCESS */
 enum cmd_bt_access_opts {
 	/* The bt commands start at 5 instead of 1 because the old dft commands
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index 913b480..d9f9a12 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -151,10 +151,6 @@
 	__le32 fwcapinfo;
 } __attribute__ ((packed));
 
-struct cmd_ds_802_11_reset {
-	__le16 action;
-};
-
 struct cmd_ds_802_11_subscribe_event {
 	struct cmd_header hdr;
 
@@ -232,7 +228,9 @@
 };
 
 struct cmd_ds_802_11_deauthenticate {
-	u8 macaddr[6];
+	struct cmd_header hdr;
+
+	u8 macaddr[ETH_ALEN];
 	__le16 reasoncode;
 };
 
@@ -251,20 +249,10 @@
 #endif
 } __attribute__ ((packed));
 
-struct cmd_ds_802_11_disassociate {
-	u8 destmacaddr[6];
-	__le16 reasoncode;
-};
-
 struct cmd_ds_802_11_associate_rsp {
 	struct ieeetypes_assocrsp assocRsp;
 };
 
-struct cmd_ds_802_11_ad_hoc_result {
-	u8 pad[3];
-	u8 bssid[ETH_ALEN];
-};
-
 struct cmd_ds_802_11_set_wep {
 	struct cmd_header hdr;
 
@@ -309,7 +297,9 @@
 };
 
 struct cmd_ds_802_11_snmp_mib {
-	__le16 querytype;
+	struct cmd_header hdr;
+
+	__le16 action;
 	__le16 oid;
 	__le16 bufsize;
 	u8 value[128];
@@ -435,8 +425,12 @@
 };
 
 struct cmd_ds_802_11_rf_tx_power {
+	struct cmd_header hdr;
+
 	__le16 action;
-	__le16 currentlevel;
+	__le16 curlevel;
+	s8 maxlevel;
+	s8 minlevel;
 };
 
 struct cmd_ds_802_11_rf_antenna {
@@ -507,10 +501,12 @@
 };
 
 struct cmd_ds_802_11_ad_hoc_start {
+	struct cmd_header hdr;
+
 	u8 ssid[IW_ESSID_MAX_SIZE];
 	u8 bsstype;
 	__le16 beaconperiod;
-	u8 dtimperiod;
+	u8 dtimperiod;   /* Reserved on v9 and later */
 	union IEEEtypes_ssparamset ssparamset;
 	union ieeetypes_phyparamset phyparamset;
 	__le16 probedelay;
@@ -519,9 +515,16 @@
 	u8 tlv_memory_size_pad[100];
 } __attribute__ ((packed));
 
+struct cmd_ds_802_11_ad_hoc_result {
+	struct cmd_header hdr;
+
+	u8 pad[3];
+	u8 bssid[ETH_ALEN];
+};
+
 struct adhoc_bssdesc {
-	u8 bssid[6];
-	u8 ssid[32];
+	u8 bssid[ETH_ALEN];
+	u8 ssid[IW_ESSID_MAX_SIZE];
 	u8 type;
 	__le16 beaconperiod;
 	u8 dtimperiod;
@@ -539,10 +542,15 @@
 } __attribute__ ((packed));
 
 struct cmd_ds_802_11_ad_hoc_join {
-	struct adhoc_bssdesc bss;
-	__le16 failtimeout;
-	__le16 probedelay;
+	struct cmd_header hdr;
 
+	struct adhoc_bssdesc bss;
+	__le16 failtimeout;   /* Reserved on v9 and later */
+	__le16 probedelay;    /* Reserved on v9 and later */
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_ad_hoc_stop {
+	struct cmd_header hdr;
 } __attribute__ ((packed));
 
 struct cmd_ds_802_11_enable_rsn {
@@ -597,14 +605,28 @@
 } __attribute__ ((packed));
 
 struct cmd_ds_802_11_tpc_cfg {
+	struct cmd_header hdr;
+
 	__le16 action;
-	u8 enable;
-	s8 P0;
-	s8 P1;
-	s8 P2;
-	u8 usesnr;
+	uint8_t enable;
+	int8_t P0;
+	int8_t P1;
+	int8_t P2;
+	uint8_t usesnr;
 } __attribute__ ((packed));
 
+
+struct cmd_ds_802_11_pa_cfg {
+	struct cmd_header hdr;
+
+	__le16 action;
+	uint8_t enable;
+	int8_t P0;
+	int8_t P1;
+	int8_t P2;
+} __attribute__ ((packed));
+
+
 struct cmd_ds_802_11_led_ctrl {
 	__le16 action;
 	__le16 numled;
@@ -693,21 +715,13 @@
 	union {
 		struct cmd_ds_802_11_ps_mode psmode;
 		struct cmd_ds_802_11_associate associate;
-		struct cmd_ds_802_11_deauthenticate deauth;
-		struct cmd_ds_802_11_ad_hoc_start ads;
-		struct cmd_ds_802_11_reset reset;
-		struct cmd_ds_802_11_ad_hoc_result result;
 		struct cmd_ds_802_11_authenticate auth;
 		struct cmd_ds_802_11_get_stat gstat;
 		struct cmd_ds_802_3_get_stat gstat_8023;
-		struct cmd_ds_802_11_snmp_mib smib;
-		struct cmd_ds_802_11_rf_tx_power txp;
 		struct cmd_ds_802_11_rf_antenna rant;
 		struct cmd_ds_802_11_monitor_mode monitor;
-		struct cmd_ds_802_11_ad_hoc_join adj;
 		struct cmd_ds_802_11_rssi rssi;
 		struct cmd_ds_802_11_rssi_rsp rssirsp;
-		struct cmd_ds_802_11_disassociate dassociate;
 		struct cmd_ds_mac_reg_access macreg;
 		struct cmd_ds_bbp_reg_access bbpreg;
 		struct cmd_ds_rf_reg_access rfreg;
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 8941919..e3505c1 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -713,7 +713,7 @@
 		ret = if_cs_send_cmd(priv, buf, nb);
 		break;
 	default:
-		lbs_pr_err("%s: unsupported type %d\n", __FUNCTION__, type);
+		lbs_pr_err("%s: unsupported type %d\n", __func__, type);
 	}
 
 	lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 632c291..cafbccb 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -39,7 +39,10 @@
 
 static void if_usb_receive(struct urb *urb);
 static void if_usb_receive_fwload(struct urb *urb);
-static int if_usb_prog_firmware(struct if_usb_card *cardp);
+static int __if_usb_prog_firmware(struct if_usb_card *cardp,
+					const char *fwname, int cmd);
+static int if_usb_prog_firmware(struct if_usb_card *cardp,
+					const char *fwname, int cmd);
 static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type,
 			       uint8_t *payload, uint16_t nb);
 static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
@@ -48,6 +51,62 @@
 static int if_usb_submit_rx_urb(struct if_usb_card *cardp);
 static int if_usb_reset_device(struct if_usb_card *cardp);
 
+/* sysfs hooks */
+
+/**
+ *  Set function to write firmware to device's persistent memory
+ */
+static ssize_t if_usb_firmware_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct lbs_private *priv = to_net_dev(dev)->priv;
+	struct if_usb_card *cardp = priv->card;
+	char fwname[FIRMWARE_NAME_MAX];
+	int ret;
+
+	sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */
+	ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_FW);
+	if (ret == 0)
+		return count;
+
+	return ret;
+}
+
+/**
+ * lbs_flash_fw attribute to be exported per ethX interface through sysfs
+ * (/sys/class/net/ethX/lbs_flash_fw).  Use this like so to write firmware to
+ * the device's persistent memory:
+ * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_fw
+ */
+static DEVICE_ATTR(lbs_flash_fw, 0200, NULL, if_usb_firmware_set);
+
+/**
+ *  Set function to write firmware to device's persistent memory
+ */
+static ssize_t if_usb_boot2_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct lbs_private *priv = to_net_dev(dev)->priv;
+	struct if_usb_card *cardp = priv->card;
+	char fwname[FIRMWARE_NAME_MAX];
+	int ret;
+
+	sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */
+	ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_BOOT2);
+	if (ret == 0)
+		return count;
+
+	return ret;
+}
+
+/**
+ * lbs_flash_boot2 attribute to be exported per ethX interface through sysfs
+ * (/sys/class/net/ethX/lbs_flash_boot2).  Use this like so to write firmware
+ * to the device's persistent memory:
+ * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_boot2
+ */
+static DEVICE_ATTR(lbs_flash_boot2, 0200, NULL, if_usb_boot2_set);
+
 /**
  *  @brief  call back function to handle the status of the URB
  *  @param urb 		pointer to urb structure
@@ -66,10 +125,10 @@
 		lbs_deb_usb2(&urb->dev->dev, "Actual length transmitted %d\n",
 			     urb->actual_length);
 
-		/* Used for both firmware TX and regular TX.  priv isn't
-		 * valid at firmware load time.
+		/* Boot commands such as UPDATE_FW and UPDATE_BOOT2 are not
+		 * passed up to the lbs level.
 		 */
-		if (priv)
+		if (priv && priv->dnld_sent != DNLD_BOOTCMD_SENT)
 			lbs_host_to_card_done(priv);
 	} else {
 		/* print the failure status number for debug */
@@ -231,7 +290,7 @@
 	}
 
 	/* Upload firmware */
-	if (if_usb_prog_firmware(cardp)) {
+	if (__if_usb_prog_firmware(cardp, lbs_fw_name, BOOT_CMD_FW_BY_USB)) {
 		lbs_deb_usbd(&udev->dev, "FW upload failed\n");
 		goto err_prog_firmware;
 	}
@@ -260,6 +319,12 @@
 	usb_get_dev(udev);
 	usb_set_intfdata(intf, cardp);
 
+	if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_fw))
+		lbs_pr_err("cannot register lbs_flash_fw attribute\n");
+
+	if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2))
+		lbs_pr_err("cannot register lbs_flash_boot2 attribute\n");
+
 	return 0;
 
 err_start_card:
@@ -285,6 +350,9 @@
 
 	lbs_deb_enter(LBS_DEB_MAIN);
 
+	device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2);
+	device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_fw);
+
 	cardp->surprise_removed = 1;
 
 	if (priv) {
@@ -371,11 +439,10 @@
 	*(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
 
 	cmd->command = cpu_to_le16(CMD_802_11_RESET);
-	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset) + S_DS_GEN);
+	cmd->size = cpu_to_le16(sizeof(struct cmd_header));
 	cmd->result = cpu_to_le16(0);
 	cmd->seqnum = cpu_to_le16(0x5a5a);
-	cmd->params.reset.action = cpu_to_le16(CMD_ACT_HALT);
-	usb_tx_block(cardp, cardp->ep_out_buf, 4 + S_DS_GEN + sizeof(struct cmd_ds_802_11_reset));
+	usb_tx_block(cardp, cardp->ep_out_buf, 4 + sizeof(struct cmd_header));
 
 	msleep(100);
 	ret = usb_reset_device(cardp->udev);
@@ -510,7 +577,7 @@
 		if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) {
 			kfree_skb(skb);
 			if_usb_submit_rx_urb_fwload(cardp);
-			cardp->bootcmdresp = 1;
+			cardp->bootcmdresp = BOOT_CMD_RESP_OK;
 			lbs_deb_usbd(&cardp->udev->dev,
 				     "Received valid boot command response\n");
 			return;
@@ -526,7 +593,9 @@
 				lbs_pr_info("boot cmd response wrong magic number (0x%x)\n",
 					    le32_to_cpu(bootcmdresp.magic));
 			}
-		} else if (bootcmdresp.cmd != BOOT_CMD_FW_BY_USB) {
+		} else if ((bootcmdresp.cmd != BOOT_CMD_FW_BY_USB) &&
+			   (bootcmdresp.cmd != BOOT_CMD_UPDATE_FW) &&
+			   (bootcmdresp.cmd != BOOT_CMD_UPDATE_BOOT2)) {
 			lbs_pr_info("boot cmd response cmd_tag error (%d)\n",
 				    bootcmdresp.cmd);
 		} else if (bootcmdresp.result != BOOT_CMD_RESP_OK) {
@@ -564,8 +633,8 @@
 
 	kfree_skb(skb);
 
-	/* reschedule timer for 200ms hence */
-	mod_timer(&cardp->fw_timeout, jiffies + (HZ/5));
+	/* Give device 5s to either write firmware to its RAM or eeprom */
+	mod_timer(&cardp->fw_timeout, jiffies + (HZ*5));
 
 	if (cardp->fwfinalblk) {
 		cardp->fwdnldover = 1;
@@ -809,7 +878,54 @@
 }
 
 
-static int if_usb_prog_firmware(struct if_usb_card *cardp)
+/**
+*  @brief This function programs the firmware subject to cmd
+*
+*  @param cardp             the if_usb_card descriptor
+*         fwname            firmware or boot2 image file name
+*         cmd               either BOOT_CMD_FW_BY_USB, BOOT_CMD_UPDATE_FW,
+*                           or BOOT_CMD_UPDATE_BOOT2.
+*  @return     0 or error code
+*/
+static int if_usb_prog_firmware(struct if_usb_card *cardp,
+				const char *fwname, int cmd)
+{
+	struct lbs_private *priv = cardp->priv;
+	unsigned long flags, caps;
+	int ret;
+
+	caps = priv->fwcapinfo;
+	if (((cmd == BOOT_CMD_UPDATE_FW) && !(caps & FW_CAPINFO_FIRMWARE_UPGRADE)) ||
+	    ((cmd == BOOT_CMD_UPDATE_BOOT2) && !(caps & FW_CAPINFO_BOOT2_UPGRADE)))
+		return -EOPNOTSUPP;
+
+	/* Ensure main thread is idle. */
+	spin_lock_irqsave(&priv->driver_lock, flags);
+	while (priv->cur_cmd != NULL || priv->dnld_sent != DNLD_RES_RECEIVED) {
+		spin_unlock_irqrestore(&priv->driver_lock, flags);
+		if (wait_event_interruptible(priv->waitq,
+				(priv->cur_cmd == NULL &&
+				priv->dnld_sent == DNLD_RES_RECEIVED))) {
+			return -ERESTARTSYS;
+		}
+		spin_lock_irqsave(&priv->driver_lock, flags);
+	}
+	priv->dnld_sent = DNLD_BOOTCMD_SENT;
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+	ret = __if_usb_prog_firmware(cardp, fwname, cmd);
+
+	spin_lock_irqsave(&priv->driver_lock, flags);
+	priv->dnld_sent = DNLD_RES_RECEIVED;
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+	wake_up_interruptible(&priv->waitq);
+
+	return ret;
+}
+
+static int __if_usb_prog_firmware(struct if_usb_card *cardp,
+					const char *fwname, int cmd)
 {
 	int i = 0;
 	static int reset_count = 10;
@@ -817,20 +933,32 @@
 
 	lbs_deb_enter(LBS_DEB_USB);
 
-	if ((ret = request_firmware(&cardp->fw, lbs_fw_name,
-				    &cardp->udev->dev)) < 0) {
+	ret = request_firmware(&cardp->fw, fwname, &cardp->udev->dev);
+	if (ret < 0) {
 		lbs_pr_err("request_firmware() failed with %#x\n", ret);
-		lbs_pr_err("firmware %s not found\n", lbs_fw_name);
+		lbs_pr_err("firmware %s not found\n", fwname);
 		goto done;
 	}
 
-	if (check_fwfile_format(cardp->fw->data, cardp->fw->size))
+	if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) {
+		ret = -EINVAL;
 		goto release_fw;
+	}
+
+	/* Cancel any pending usb business */
+	usb_kill_urb(cardp->rx_urb);
+	usb_kill_urb(cardp->tx_urb);
+
+	cardp->fwlastblksent = 0;
+	cardp->fwdnldover = 0;
+	cardp->totalbytes = 0;
+	cardp->fwfinalblk = 0;
+	cardp->bootcmdresp = 0;
 
 restart:
 	if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
 		lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
-		ret = -1;
+		ret = -EIO;
 		goto release_fw;
 	}
 
@@ -838,8 +966,7 @@
 	do {
 		int j = 0;
 		i++;
-		/* Issue Boot command = 1, Boot from Download-FW */
-		if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB);
+		if_usb_issue_boot_command(cardp, cmd);
 		/* wait for command response */
 		do {
 			j++;
@@ -847,12 +974,21 @@
 		} while (cardp->bootcmdresp == 0 && j < 10);
 	} while (cardp->bootcmdresp == 0 && i < 5);
 
-	if (cardp->bootcmdresp <= 0) {
+	if (cardp->bootcmdresp == BOOT_CMD_RESP_NOT_SUPPORTED) {
+		/* Return to normal operation */
+		ret = -EOPNOTSUPP;
+		usb_kill_urb(cardp->rx_urb);
+		usb_kill_urb(cardp->tx_urb);
+		if (if_usb_submit_rx_urb(cardp) < 0)
+			ret = -EIO;
+		goto release_fw;
+	} else if (cardp->bootcmdresp <= 0) {
 		if (--reset_count >= 0) {
 			if_usb_reset_device(cardp);
 			goto restart;
 		}
-		return -1;
+		ret = -EIO;
+		goto release_fw;
 	}
 
 	i = 0;
@@ -882,7 +1018,7 @@
 		}
 
 		lbs_pr_info("FW download failure, time = %d ms\n", i * 100);
-		ret = -1;
+		ret = -EIO;
 		goto release_fw;
 	}
 
diff --git a/drivers/net/wireless/libertas/if_usb.h b/drivers/net/wireless/libertas/if_usb.h
index 5771a83..5ba0aee 100644
--- a/drivers/net/wireless/libertas/if_usb.h
+++ b/drivers/net/wireless/libertas/if_usb.h
@@ -30,6 +30,7 @@
 
 #define BOOT_CMD_RESP_OK		0x0001
 #define BOOT_CMD_RESP_FAIL		0x0000
+#define BOOT_CMD_RESP_NOT_SUPPORTED	0x0002
 
 struct bootcmdresp
 {
@@ -50,6 +51,10 @@
 	uint8_t ep_in;
 	uint8_t ep_out;
 
+	/* bootcmdresp == 0 means command is pending
+	 * bootcmdresp < 0 means error
+	 * bootcmdresp > 0 is a BOOT_CMD_RESP_* from firmware
+	 */
 	int8_t bootcmdresp;
 
 	int ep_in_size;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index bd32ac0..73dc8c7 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -291,9 +291,11 @@
 			if (priv->infra_open || priv->mesh_open)
 				return -EBUSY;
 			if (priv->mode == IW_MODE_INFRA)
-				lbs_send_deauthentication(priv);
+				lbs_cmd_80211_deauthenticate(priv,
+							     priv->curbssparams.bssid,
+							     WLAN_REASON_DEAUTH_LEAVING);
 			else if (priv->mode == IW_MODE_ADHOC)
-				lbs_stop_adhoc_network(priv);
+				lbs_adhoc_stop(priv);
 			lbs_add_rtap(priv);
 		}
 		priv->monitormode = monitor_mode;
@@ -956,17 +958,24 @@
 static int lbs_setup_firmware(struct lbs_private *priv)
 {
 	int ret = -1;
+	s16 curlevel = 0, minlevel = 0, maxlevel = 0;
 
 	lbs_deb_enter(LBS_DEB_FW);
 
-	/*
-	 * Read MAC address from HW
-	 */
+	/* Read MAC address from firmware */
 	memset(priv->current_addr, 0xff, ETH_ALEN);
 	ret = lbs_update_hw_spec(priv);
 	if (ret)
 		goto done;
 
+	/* Read power levels if available */
+	ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
+	if (ret == 0) {
+		priv->txpower_cur = curlevel;
+		priv->txpower_min = minlevel;
+		priv->txpower_max = maxlevel;
+	}
+
 	lbs_set_mac_control(priv);
 done:
 	lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
@@ -1042,7 +1051,7 @@
 	priv->mode = IW_MODE_INFRA;
 	priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL;
 	priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
-	priv->radioon = RADIO_ON;
+	priv->radio_on = 1;
 	priv->enablehwauto = 1;
 	priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
 	priv->psmode = LBS802_11POWERMODECAM;
@@ -1196,7 +1205,13 @@
 	cancel_delayed_work_sync(&priv->scan_work);
 	cancel_delayed_work_sync(&priv->assoc_work);
 	cancel_work_sync(&priv->mcast_work);
+
+	/* worker thread destruction blocks on the in-flight command which
+	 * should have been cleared already in lbs_stop_card().
+	 */
+	lbs_deb_main("destroying worker thread\n");
 	destroy_workqueue(priv->work_thread);
+	lbs_deb_main("done destroying worker thread\n");
 
 	if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
 		priv->psmode = LBS802_11POWERMODECAM;
@@ -1314,14 +1329,26 @@
 		device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
 	}
 
-	/* Flush pending command nodes */
+	/* Delete the timeout of the currently processing command */
 	del_timer_sync(&priv->command_timer);
+
+	/* Flush pending command nodes */
 	spin_lock_irqsave(&priv->driver_lock, flags);
+	lbs_deb_main("clearing pending commands\n");
 	list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
 		cmdnode->result = -ENOENT;
 		cmdnode->cmdwaitqwoken = 1;
 		wake_up_interruptible(&cmdnode->cmdwait_q);
 	}
+
+	/* Flush the command the card is currently processing */
+	if (priv->cur_cmd) {
+		lbs_deb_main("clearing current command\n");
+		priv->cur_cmd->result = -ENOENT;
+		priv->cur_cmd->cmdwaitqwoken = 1;
+		wake_up_interruptible(&priv->cur_cmd->cmdwait_q);
+	}
+	lbs_deb_main("done clearing commands\n");
 	spin_unlock_irqrestore(&priv->driver_lock, flags);
 
 	unregister_netdev(dev);
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 4b27456..8f66903 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -944,6 +944,11 @@
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
+	if (!priv->radio_on) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	if (!netif_running(dev)) {
 		ret = -ENETDOWN;
 		goto out;
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 8b3ed77..82c3e5a5 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -30,6 +30,14 @@
 	queue_delayed_work(priv->work_thread, &priv->assoc_work, HZ / 2);
 }
 
+static inline void lbs_do_association_work(struct lbs_private *priv)
+{
+	if (priv->surpriseremoved)
+		return;
+	cancel_delayed_work(&priv->assoc_work);
+	queue_delayed_work(priv->work_thread, &priv->assoc_work, 0);
+}
+
 static inline void lbs_cancel_association_work(struct lbs_private *priv)
 {
 	cancel_delayed_work(&priv->assoc_work);
@@ -120,34 +128,6 @@
 	return cfp;
 }
 
-
-/**
- *  @brief Set Radio On/OFF
- *
- *  @param priv                 A pointer to struct lbs_private structure
- *  @option 			Radio Option
- *  @return 	   		0 --success, otherwise fail
- */
-static int lbs_radio_ioctl(struct lbs_private *priv, u8 option)
-{
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (priv->radioon != option) {
-		lbs_deb_wext("switching radio %s\n", option ? "on" : "off");
-		priv->radioon = option;
-
-		ret = lbs_prepare_and_send_command(priv,
-					    CMD_802_11_RADIO_CONTROL,
-					    CMD_ACT_SET,
-					    CMD_OPTION_WAITFORRSP, 0, NULL);
-	}
-
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
 /**
  *  @brief Copy active data rates based on adapter mode and status
  *
@@ -294,21 +274,17 @@
 {
 	int ret = 0;
 	struct lbs_private *priv = dev->priv;
-	u32 rthr = vwrq->value;
+	u32 val = vwrq->value;
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
-	if (vwrq->disabled) {
-		priv->rtsthsd = rthr = MRVDRV_RTS_MAX_VALUE;
-	} else {
-		if (rthr < MRVDRV_RTS_MIN_VALUE || rthr > MRVDRV_RTS_MAX_VALUE)
-			return -EINVAL;
-		priv->rtsthsd = rthr;
-	}
+	if (vwrq->disabled)
+		val = MRVDRV_RTS_MAX_VALUE;
 
-	ret = lbs_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB,
-				    CMD_ACT_SET, CMD_OPTION_WAITFORRSP,
-				    OID_802_11_RTS_THRESHOLD, &rthr);
+	if (val > MRVDRV_RTS_MAX_VALUE) /* min rts value is 0 */
+		return -EINVAL;
+
+	ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, (u16) val);
 
 	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
 	return ret;
@@ -317,21 +293,18 @@
 static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info,
 			struct iw_param *vwrq, char *extra)
 {
-	int ret = 0;
 	struct lbs_private *priv = dev->priv;
+	int ret = 0;
+	u16 val = 0;
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
-	priv->rtsthsd = 0;
-	ret = lbs_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB,
-				    CMD_ACT_GET, CMD_OPTION_WAITFORRSP,
-				    OID_802_11_RTS_THRESHOLD, NULL);
+	ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, &val);
 	if (ret)
 		goto out;
 
-	vwrq->value = priv->rtsthsd;
-	vwrq->disabled = ((vwrq->value < MRVDRV_RTS_MIN_VALUE)
-			  || (vwrq->value > MRVDRV_RTS_MAX_VALUE));
+	vwrq->value = val;
+	vwrq->disabled = val > MRVDRV_RTS_MAX_VALUE; /* min rts value is 0 */
 	vwrq->fixed = 1;
 
 out:
@@ -342,24 +315,19 @@
 static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info,
 			 struct iw_param *vwrq, char *extra)
 {
-	int ret = 0;
-	u32 fthr = vwrq->value;
 	struct lbs_private *priv = dev->priv;
+	int ret = 0;
+	u32 val = vwrq->value;
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
-	if (vwrq->disabled) {
-		priv->fragthsd = fthr = MRVDRV_FRAG_MAX_VALUE;
-	} else {
-		if (fthr < MRVDRV_FRAG_MIN_VALUE
-		    || fthr > MRVDRV_FRAG_MAX_VALUE)
-			return -EINVAL;
-		priv->fragthsd = fthr;
-	}
+	if (vwrq->disabled)
+		val = MRVDRV_FRAG_MAX_VALUE;
 
-	ret = lbs_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB,
-				    CMD_ACT_SET, CMD_OPTION_WAITFORRSP,
-				    OID_802_11_FRAGMENTATION_THRESHOLD, &fthr);
+	if (val < MRVDRV_FRAG_MIN_VALUE || val > MRVDRV_FRAG_MAX_VALUE)
+		return -EINVAL;
+
+	ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, (u16) val);
 
 	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
 	return ret;
@@ -368,22 +336,19 @@
 static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info,
 			 struct iw_param *vwrq, char *extra)
 {
-	int ret = 0;
 	struct lbs_private *priv = dev->priv;
+	int ret = 0;
+	u16 val = 0;
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
-	priv->fragthsd = 0;
-	ret = lbs_prepare_and_send_command(priv,
-				    CMD_802_11_SNMP_MIB,
-				    CMD_ACT_GET, CMD_OPTION_WAITFORRSP,
-				    OID_802_11_FRAGMENTATION_THRESHOLD, NULL);
+	ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, &val);
 	if (ret)
 		goto out;
 
-	vwrq->value = priv->fragthsd;
-	vwrq->disabled = ((vwrq->value < MRVDRV_FRAG_MIN_VALUE)
-			  || (vwrq->value > MRVDRV_FRAG_MAX_VALUE));
+	vwrq->value = val;
+	vwrq->disabled = ((val < MRVDRV_FRAG_MIN_VALUE)
+			  || (val > MRVDRV_FRAG_MAX_VALUE));
 	vwrq->fixed = 1;
 
 out:
@@ -410,7 +375,7 @@
 {
 	lbs_deb_enter(LBS_DEB_WEXT);
 
-	*uwrq = IW_MODE_REPEAT ;
+	*uwrq = IW_MODE_REPEAT;
 
 	lbs_deb_leave(LBS_DEB_WEXT);
 	return 0;
@@ -420,28 +385,30 @@
 			  struct iw_request_info *info,
 			  struct iw_param *vwrq, char *extra)
 {
-	int ret = 0;
 	struct lbs_private *priv = dev->priv;
+	s16 curlevel = 0;
+	int ret = 0;
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
-	ret = lbs_prepare_and_send_command(priv,
-				    CMD_802_11_RF_TX_POWER,
-				    CMD_ACT_TX_POWER_OPT_GET,
-				    CMD_OPTION_WAITFORRSP, 0, NULL);
+	if (!priv->radio_on) {
+		lbs_deb_wext("tx power off\n");
+		vwrq->value = 0;
+		vwrq->disabled = 1;
+		goto out;
+	}
 
+	ret = lbs_get_tx_power(priv, &curlevel, NULL, NULL);
 	if (ret)
 		goto out;
 
-	lbs_deb_wext("tx power level %d dbm\n", priv->txpowerlevel);
-	vwrq->value = priv->txpowerlevel;
+	lbs_deb_wext("tx power level %d dbm\n", curlevel);
+	priv->txpower_cur = curlevel;
+
+	vwrq->value = curlevel;
 	vwrq->fixed = 1;
-	if (priv->radioon) {
-		vwrq->disabled = 0;
-		vwrq->flags = IW_TXPOW_DBM;
-	} else {
-		vwrq->disabled = 1;
-	}
+	vwrq->disabled = 0;
+	vwrq->flags = IW_TXPOW_DBM;
 
 out:
 	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
@@ -451,31 +418,44 @@
 static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info,
 			  struct iw_param *vwrq, char *extra)
 {
-	int ret = 0;
 	struct lbs_private *priv = dev->priv;
+	int ret = 0;
+	u16 slimit = 0, llimit = 0;
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
-	if (vwrq->flags == IW_RETRY_LIMIT) {
-		/* The MAC has a 4-bit Total_Tx_Count register
-		   Total_Tx_Count = 1 + Tx_Retry_Count */
+        if ((vwrq->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT)
+                return -EOPNOTSUPP;
+
+	/* The MAC has a 4-bit Total_Tx_Count register
+	   Total_Tx_Count = 1 + Tx_Retry_Count */
 #define TX_RETRY_MIN 0
 #define TX_RETRY_MAX 14
-		if (vwrq->value < TX_RETRY_MIN || vwrq->value > TX_RETRY_MAX)
-			return -EINVAL;
+	if (vwrq->value < TX_RETRY_MIN || vwrq->value > TX_RETRY_MAX)
+		return -EINVAL;
 
-		/* Adding 1 to convert retry count to try count */
-		priv->txretrycount = vwrq->value + 1;
+	/* Add 1 to convert retry count to try count */
+	if (vwrq->flags & IW_RETRY_SHORT)
+		slimit = (u16) (vwrq->value + 1);
+	else if (vwrq->flags & IW_RETRY_LONG)
+		llimit = (u16) (vwrq->value + 1);
+	else
+		slimit = llimit = (u16) (vwrq->value + 1); /* set both */
 
-		ret = lbs_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB,
-					    CMD_ACT_SET,
-					    CMD_OPTION_WAITFORRSP,
-					    OID_802_11_TX_RETRYCOUNT, NULL);
-
+	if (llimit) {
+		ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT,
+				       llimit);
 		if (ret)
 			goto out;
-	} else {
-		return -EOPNOTSUPP;
+	}
+
+	if (slimit) {
+		/* txretrycount follows the short retry limit */
+		priv->txretrycount = slimit;
+		ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT,
+				       slimit);
+		if (ret)
+			goto out;
 	}
 
 out:
@@ -488,22 +468,30 @@
 {
 	struct lbs_private *priv = dev->priv;
 	int ret = 0;
+	u16 val = 0;
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
-	priv->txretrycount = 0;
-	ret = lbs_prepare_and_send_command(priv,
-				    CMD_802_11_SNMP_MIB,
-				    CMD_ACT_GET, CMD_OPTION_WAITFORRSP,
-				    OID_802_11_TX_RETRYCOUNT, NULL);
-	if (ret)
-		goto out;
-
 	vwrq->disabled = 0;
-	if (!vwrq->flags) {
-		vwrq->flags = IW_RETRY_LIMIT;
+
+	if (vwrq->flags & IW_RETRY_LONG) {
+		ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT, &val);
+		if (ret)
+			goto out;
+
 		/* Subtract 1 to convert try count to retry count */
-		vwrq->value = priv->txretrycount - 1;
+		vwrq->value = val - 1;
+		vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
+	} else {
+		ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT, &val);
+		if (ret)
+			goto out;
+
+		/* txretry count follows the short retry limit */
+		priv->txretrycount = val;
+		/* Subtract 1 to convert try count to retry count */
+		vwrq->value = val - 1;
+		vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
 	}
 
 out:
@@ -693,22 +681,12 @@
 
 	range->sensitivity = 0;
 
-	/*
-	 * Setup the supported power level ranges
-	 */
+	/* Setup the supported power level ranges */
 	memset(range->txpower, 0, sizeof(range->txpower));
-	range->txpower[0] = 5;
-	range->txpower[1] = 7;
-	range->txpower[2] = 9;
-	range->txpower[3] = 11;
-	range->txpower[4] = 13;
-	range->txpower[5] = 15;
-	range->txpower[6] = 17;
-	range->txpower[7] = 19;
-
-	range->num_txpower = 8;
-	range->txpower_capa = IW_TXPOW_DBM;
-	range->txpower_capa |= IW_TXPOW_RANGE;
+	range->txpower_capa = IW_TXPOW_DBM | IW_TXPOW_RANGE;
+	range->txpower[0] = priv->txpower_min;
+	range->txpower[1] = priv->txpower_max;
+	range->num_txpower = 2;
 
 	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
 				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
@@ -998,9 +976,11 @@
 	if (fwrq->m != priv->curbssparams.channel) {
 		lbs_deb_wext("mesh channel change forces eth disconnect\n");
 		if (priv->mode == IW_MODE_INFRA)
-			lbs_send_deauthentication(priv);
+			lbs_cmd_80211_deauthenticate(priv,
+						     priv->curbssparams.bssid,
+						     WLAN_REASON_DEAUTH_LEAVING);
 		else if (priv->mode == IW_MODE_ADHOC)
-			lbs_stop_adhoc_network(priv);
+			lbs_adhoc_stop(priv);
 	}
 	lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, fwrq->m);
 	lbs_update_channel(priv);
@@ -1045,6 +1025,18 @@
 				new_rate);
 			goto out;
 		}
+		if (priv->fwrelease < 0x09000000) {
+			ret = lbs_set_power_adapt_cfg(priv, 0,
+					POW_ADAPT_DEFAULT_P0,
+					POW_ADAPT_DEFAULT_P1,
+					POW_ADAPT_DEFAULT_P2);
+			if (ret)
+				goto out;
+		}
+		ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
+				TPC_DEFAULT_P2, 1);
+		if (ret)
+			goto out;
 	}
 
 	/* Try the newer command first (Firmware Spec 5.1 and above) */
@@ -1612,12 +1604,26 @@
 			set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
 		}
 
-		disable_wep (assoc_req);
+		/* Only disable wep if necessary: can't waste time here. */
+		if (priv->mac_control & CMD_ACT_MAC_WEP_ENABLE)
+			disable_wep(assoc_req);
 	}
 
 out:
 	if (ret == 0) {
-		lbs_postpone_association_work(priv);
+		/* 802.1x and WPA rekeying must happen as quickly as possible,
+		 * especially during the 4-way handshake; thus if in
+		 * infrastructure mode, and either (a) 802.1x is enabled or
+		 * (b) WPA is being used, set the key right away.
+		 */
+		if (assoc_req->mode == IW_MODE_INFRA &&
+		    ((assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_802_1X) ||
+		     (assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_PSK) ||
+		      assoc_req->secinfo.WPAenabled ||
+		      assoc_req->secinfo.WPA2enabled)) {
+			lbs_do_association_work(priv);
+		} else
+			lbs_postpone_association_work(priv);
 	} else {
 		lbs_cancel_association_work(priv);
 	}
@@ -1725,13 +1731,17 @@
 	case IW_AUTH_TKIP_COUNTERMEASURES:
 	case IW_AUTH_CIPHER_PAIRWISE:
 	case IW_AUTH_CIPHER_GROUP:
-	case IW_AUTH_KEY_MGMT:
 	case IW_AUTH_DROP_UNENCRYPTED:
 		/*
 		 * libertas does not use these parameters
 		 */
 		break;
 
+	case IW_AUTH_KEY_MGMT:
+		assoc_req->secinfo.key_mgmt = dwrq->value;
+		updated = 1;
+		break;
+
 	case IW_AUTH_WPA_VERSION:
 		if (dwrq->value & IW_AUTH_WPA_VERSION_DISABLED) {
 			assoc_req->secinfo.WPAenabled = 0;
@@ -1811,6 +1821,10 @@
 	lbs_deb_enter(LBS_DEB_WEXT);
 
 	switch (dwrq->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_KEY_MGMT:
+		dwrq->value = priv->secinfo.key_mgmt;
+		break;
+
 	case IW_AUTH_WPA_VERSION:
 		dwrq->value = 0;
 		if (priv->secinfo.WPAenabled)
@@ -1844,39 +1858,77 @@
 {
 	int ret = 0;
 	struct lbs_private *priv = dev->priv;
-
-	u16 dbm;
+	s16 dbm = (s16) vwrq->value;
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
 	if (vwrq->disabled) {
-		lbs_radio_ioctl(priv, RADIO_OFF);
-		return 0;
+		lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 0);
+		goto out;
 	}
 
-	priv->preamble = CMD_TYPE_AUTO_PREAMBLE;
+	if (vwrq->fixed == 0) {
+		/* User requests automatic tx power control, however there are
+		 * many auto tx settings.  For now use firmware defaults until
+		 * we come up with a good way to expose these to the user. */
+		if (priv->fwrelease < 0x09000000) {
+			ret = lbs_set_power_adapt_cfg(priv, 1,
+					POW_ADAPT_DEFAULT_P0,
+					POW_ADAPT_DEFAULT_P1,
+					POW_ADAPT_DEFAULT_P2);
+			if (ret)
+				goto out;
+		}
+		ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
+				TPC_DEFAULT_P2, 1);
+		if (ret)
+			goto out;
+		dbm = priv->txpower_max;
+	} else {
+		/* Userspace check in iwrange if it should use dBm or mW,
+		 * therefore this should never happen... Jean II */
+		if ((vwrq->flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) {
+			ret = -EOPNOTSUPP;
+			goto out;
+		}
 
-	lbs_radio_ioctl(priv, RADIO_ON);
+		/* Validate requested power level against firmware allowed
+		 * levels */
+		if (priv->txpower_min && (dbm < priv->txpower_min)) {
+			ret = -EINVAL;
+			goto out;
+		}
 
-	/* Userspace check in iwrange if it should use dBm or mW,
-	 * therefore this should never happen... Jean II */
-	if ((vwrq->flags & IW_TXPOW_TYPE) == IW_TXPOW_MWATT) {
-		return -EOPNOTSUPP;
-	} else
-		dbm = (u16) vwrq->value;
+		if (priv->txpower_max && (dbm > priv->txpower_max)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		if (priv->fwrelease < 0x09000000) {
+			ret = lbs_set_power_adapt_cfg(priv, 0,
+					POW_ADAPT_DEFAULT_P0,
+					POW_ADAPT_DEFAULT_P1,
+					POW_ADAPT_DEFAULT_P2);
+			if (ret)
+				goto out;
+		}
+		ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
+				TPC_DEFAULT_P2, 1);
+		if (ret)
+			goto out;
+	}
 
-	/* auto tx power control */
+	/* If the radio was off, turn it on */
+	if (!priv->radio_on) {
+		ret = lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 1);
+		if (ret)
+			goto out;
+	}
 
-	if (vwrq->fixed == 0)
-		dbm = 0xffff;
+	lbs_deb_wext("txpower set %d dBm\n", dbm);
 
-	lbs_deb_wext("txpower set %d dbm\n", dbm);
+	ret = lbs_set_tx_power(priv, dbm);
 
-	ret = lbs_prepare_and_send_command(priv,
-				    CMD_802_11_RF_TX_POWER,
-				    CMD_ACT_TX_POWER_OPT_SET_LOW,
-				    CMD_OPTION_WAITFORRSP, 0, (void *)&dbm);
-
+out:
 	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
 	return ret;
 }
@@ -1928,6 +1980,11 @@
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
+	if (!priv->radio_on) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	/* Check the size of the string */
 	if (in_ssid_len > IW_ESSID_MAX_SIZE) {
 		ret = -E2BIG;
@@ -2005,6 +2062,11 @@
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
+	if (!priv->radio_on) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	/* Check the size of the string */
 	if (dwrq->length > IW_ESSID_MAX_SIZE) {
 		ret = -E2BIG;
@@ -2046,6 +2108,9 @@
 
 	lbs_deb_enter(LBS_DEB_WEXT);
 
+	if (!priv->radio_on)
+		return -EINVAL;
+
 	if (awrq->sa_family != ARPHRD_ETHER)
 		return -EINVAL;
 
diff --git a/drivers/net/wireless/libertas_tf/Makefile b/drivers/net/wireless/libertas_tf/Makefile
new file mode 100644
index 0000000..ff5544d
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/Makefile
@@ -0,0 +1,6 @@
+libertas_tf-objs := main.o cmd.o
+
+libertas_tf_usb-objs += if_usb.o
+
+obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf.o
+obj-$(CONFIG_LIBERTAS_THINFIRM_USB) += libertas_tf_usb.o
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
new file mode 100644
index 0000000..fdbcf8b
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -0,0 +1,669 @@
+/*
+ *  Copyright (C) 2008, cozybit Inc.
+ *  Copyright (C) 2003-2006, Marvell International Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ */
+#include "libertas_tf.h"
+
+static const struct channel_range channel_ranges[] = {
+	{ LBTF_REGDOMAIN_US,		1, 12 },
+	{ LBTF_REGDOMAIN_CA,		1, 12 },
+	{ LBTF_REGDOMAIN_EU,		1, 14 },
+	{ LBTF_REGDOMAIN_JP,		1, 14 },
+	{ LBTF_REGDOMAIN_SP,		1, 14 },
+	{ LBTF_REGDOMAIN_FR,		1, 14 },
+};
+
+static u16 lbtf_region_code_to_index[MRVDRV_MAX_REGION_CODE] =
+{
+	LBTF_REGDOMAIN_US, LBTF_REGDOMAIN_CA, LBTF_REGDOMAIN_EU,
+	LBTF_REGDOMAIN_SP, LBTF_REGDOMAIN_FR, LBTF_REGDOMAIN_JP,
+};
+
+static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv);
+
+
+/**
+ *  lbtf_cmd_copyback - Simple callback that copies response back into command
+ *
+ *  @priv	A pointer to struct lbtf_private structure
+ *  @extra	A pointer to the original command structure for which
+ *		'resp' is a response
+ *  @resp	A pointer to the command response
+ *
+ *  Returns: 0 on success, error on failure
+ */
+int lbtf_cmd_copyback(struct lbtf_private *priv, unsigned long extra,
+		     struct cmd_header *resp)
+{
+	struct cmd_header *buf = (void *)extra;
+	uint16_t copy_len;
+
+	copy_len = min(le16_to_cpu(buf->size), le16_to_cpu(resp->size));
+	memcpy(buf, resp, copy_len);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(lbtf_cmd_copyback);
+
+#define CHAN_TO_IDX(chan) ((chan) - 1)
+
+static void lbtf_geo_init(struct lbtf_private *priv)
+{
+	const struct channel_range *range = channel_ranges;
+	u8 ch;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(channel_ranges); i++)
+		if (channel_ranges[i].regdomain == priv->regioncode) {
+			range = &channel_ranges[i];
+			break;
+		}
+
+	for (ch = priv->range.start; ch < priv->range.end; ch++)
+		priv->channels[CHAN_TO_IDX(ch)].flags = 0;
+}
+
+/**
+ *  lbtf_update_hw_spec: Updates the hardware details.
+ *
+ *  @priv    	A pointer to struct lbtf_private structure
+ *
+ *  Returns: 0 on success, error on failure
+ */
+int lbtf_update_hw_spec(struct lbtf_private *priv)
+{
+	struct cmd_ds_get_hw_spec cmd;
+	int ret = -1;
+	u32 i;
+	DECLARE_MAC_BUF(mac);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN);
+	ret = lbtf_cmd_with_response(priv, CMD_GET_HW_SPEC, &cmd);
+	if (ret)
+		goto out;
+
+	priv->fwcapinfo = le32_to_cpu(cmd.fwcapinfo);
+
+	/* The firmware release is in an interesting format: the patch
+	 * level is in the most significant nibble ... so fix that: */
+	priv->fwrelease = le32_to_cpu(cmd.fwrelease);
+	priv->fwrelease = (priv->fwrelease << 8) |
+		(priv->fwrelease >> 24 & 0xff);
+
+	printk(KERN_INFO "libertastf: %s, fw %u.%u.%up%u, cap 0x%08x\n",
+		print_mac(mac, cmd.permanentaddr),
+		priv->fwrelease >> 24 & 0xff,
+		priv->fwrelease >> 16 & 0xff,
+		priv->fwrelease >>  8 & 0xff,
+		priv->fwrelease       & 0xff,
+		priv->fwcapinfo);
+
+	/* Clamp region code to 8-bit since FW spec indicates that it should
+	 * only ever be 8-bit, even though the field size is 16-bit.  Some
+	 * firmware returns non-zero high 8 bits here.
+	 */
+	priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF;
+
+	for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) {
+		/* use the region code to search for the index */
+		if (priv->regioncode == lbtf_region_code_to_index[i])
+			break;
+	}
+
+	/* if it's unidentified region code, use the default (USA) */
+	if (i >= MRVDRV_MAX_REGION_CODE)
+		priv->regioncode = 0x10;
+
+	if (priv->current_addr[0] == 0xff)
+		memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
+
+	SET_IEEE80211_PERM_ADDR(priv->hw, priv->current_addr);
+
+	lbtf_geo_init(priv);
+out:
+	return ret;
+}
+
+/**
+ *  lbtf_set_channel: Set the radio channel
+ *
+ *  @priv	A pointer to struct lbtf_private structure
+ *  @channel	The desired channel, or 0 to clear a locked channel
+ *
+ *  Returns: 0 on success, error on failure
+ */
+int lbtf_set_channel(struct lbtf_private *priv, u8 channel)
+{
+	struct cmd_ds_802_11_rf_channel cmd;
+
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET);
+	cmd.channel = cpu_to_le16(channel);
+
+	return lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd);
+}
+
+int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
+{
+	struct cmd_ds_802_11_beacon_set cmd;
+	int size;
+
+	if (beacon->len > MRVL_MAX_BCN_SIZE)
+		return -1;
+	size =  sizeof(cmd) - sizeof(cmd.beacon) + beacon->len;
+	cmd.hdr.size = cpu_to_le16(size);
+	cmd.len = cpu_to_le16(beacon->len);
+	memcpy(cmd.beacon, (u8 *) beacon->data, beacon->len);
+
+	lbtf_cmd_async(priv, CMD_802_11_BEACON_SET, &cmd.hdr, size);
+	return 0;
+}
+
+int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
+		     int beacon_int) {
+	struct cmd_ds_802_11_beacon_control cmd;
+
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_SET);
+	cmd.beacon_enable = cpu_to_le16(beacon_enable);
+	cmd.beacon_period = cpu_to_le16(beacon_int);
+
+	lbtf_cmd_async(priv, CMD_802_11_BEACON_CTRL, &cmd.hdr, sizeof(cmd));
+	return 0;
+}
+
+static void lbtf_queue_cmd(struct lbtf_private *priv,
+			  struct cmd_ctrl_node *cmdnode)
+{
+	unsigned long flags;
+
+	if (!cmdnode)
+		return;
+
+	if (!cmdnode->cmdbuf->size)
+		return;
+
+	cmdnode->result = 0;
+	spin_lock_irqsave(&priv->driver_lock, flags);
+	list_add_tail(&cmdnode->list, &priv->cmdpendingq);
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+}
+
+static void lbtf_submit_command(struct lbtf_private *priv,
+			       struct cmd_ctrl_node *cmdnode)
+{
+	unsigned long flags;
+	struct cmd_header *cmd;
+	uint16_t cmdsize;
+	uint16_t command;
+	int timeo = 5 * HZ;
+	int ret;
+
+	cmd = cmdnode->cmdbuf;
+
+	spin_lock_irqsave(&priv->driver_lock, flags);
+	priv->cur_cmd = cmdnode;
+	cmdsize = le16_to_cpu(cmd->size);
+	command = le16_to_cpu(cmd->command);
+	ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize);
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+	if (ret)
+		/* Let the timer kick in and retry, and potentially reset
+		   the whole thing if the condition persists */
+		timeo = HZ;
+
+	/* Setup the timer after transmit command */
+	mod_timer(&priv->command_timer, jiffies + timeo);
+}
+
+/**
+ *  This function inserts command node to cmdfreeq
+ *  after cleans it. Requires priv->driver_lock held.
+ */
+static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
+					 struct cmd_ctrl_node *cmdnode)
+{
+	if (!cmdnode)
+		return;
+
+	cmdnode->callback = NULL;
+	cmdnode->callback_arg = 0;
+
+	memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE);
+
+	list_add_tail(&cmdnode->list, &priv->cmdfreeq);
+}
+
+static void lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
+	struct cmd_ctrl_node *ptempcmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->driver_lock, flags);
+	__lbtf_cleanup_and_insert_cmd(priv, ptempcmd);
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+}
+
+void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd,
+			  int result)
+{
+	cmd->result = result;
+	cmd->cmdwaitqwoken = 1;
+	wake_up_interruptible(&cmd->cmdwait_q);
+
+	if (!cmd->callback)
+		__lbtf_cleanup_and_insert_cmd(priv, cmd);
+	priv->cur_cmd = NULL;
+}
+
+int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv)
+{
+	struct cmd_ds_mac_multicast_addr cmd;
+
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_SET);
+
+	cmd.nr_of_adrs = cpu_to_le16((u16) priv->nr_of_multicastmacaddr);
+	memcpy(cmd.maclist, priv->multicastlist,
+	       priv->nr_of_multicastmacaddr * ETH_ALEN);
+
+	lbtf_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &cmd.hdr, sizeof(cmd));
+	return 0;
+}
+
+void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode)
+{
+	struct cmd_ds_set_mode cmd;
+
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.mode = cpu_to_le16(mode);
+	lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd));
+}
+
+void lbtf_set_bssid(struct lbtf_private *priv, bool activate, u8 *bssid)
+{
+	struct cmd_ds_set_bssid cmd;
+
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.activate = activate ? 1 : 0;
+	if (activate)
+		memcpy(cmd.bssid, bssid, ETH_ALEN);
+
+	lbtf_cmd_async(priv, CMD_802_11_SET_BSSID, &cmd.hdr, sizeof(cmd));
+}
+
+int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
+{
+	struct cmd_ds_802_11_mac_address cmd;
+
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_SET);
+
+	memcpy(cmd.macadd, mac_addr, ETH_ALEN);
+
+	lbtf_cmd_async(priv, CMD_802_11_MAC_ADDRESS, &cmd.hdr, sizeof(cmd));
+	return 0;
+}
+
+int lbtf_set_radio_control(struct lbtf_private *priv)
+{
+	int ret = 0;
+	struct cmd_ds_802_11_radio_control cmd;
+
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_SET);
+
+	switch (priv->preamble) {
+	case CMD_TYPE_SHORT_PREAMBLE:
+		cmd.control = cpu_to_le16(SET_SHORT_PREAMBLE);
+		break;
+
+	case CMD_TYPE_LONG_PREAMBLE:
+		cmd.control = cpu_to_le16(SET_LONG_PREAMBLE);
+		break;
+
+	case CMD_TYPE_AUTO_PREAMBLE:
+	default:
+		cmd.control = cpu_to_le16(SET_AUTO_PREAMBLE);
+		break;
+	}
+
+	if (priv->radioon)
+		cmd.control |= cpu_to_le16(TURN_ON_RF);
+	else
+		cmd.control &= cpu_to_le16(~TURN_ON_RF);
+
+	ret = lbtf_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd);
+	return ret;
+}
+
+void lbtf_set_mac_control(struct lbtf_private *priv)
+{
+	struct cmd_ds_mac_control cmd;
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(priv->mac_control);
+	cmd.reserved = 0;
+
+	lbtf_cmd_async(priv, CMD_MAC_CONTROL,
+		&cmd.hdr, sizeof(cmd));
+}
+
+/**
+ *  lbtf_allocate_cmd_buffer - Allocates cmd buffer, links it to free cmd queue
+ *
+ *  @priv	A pointer to struct lbtf_private structure
+ *
+ *  Returns: 0 on success.
+ */
+int lbtf_allocate_cmd_buffer(struct lbtf_private *priv)
+{
+	u32 bufsize;
+	u32 i;
+	struct cmd_ctrl_node *cmdarray;
+
+	/* Allocate and initialize the command array */
+	bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS;
+	cmdarray = kzalloc(bufsize, GFP_KERNEL);
+	if (!cmdarray)
+		return -1;
+	priv->cmd_array = cmdarray;
+
+	/* Allocate and initialize each command buffer in the command array */
+	for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
+		cmdarray[i].cmdbuf = kzalloc(LBS_CMD_BUFFER_SIZE, GFP_KERNEL);
+		if (!cmdarray[i].cmdbuf)
+			return -1;
+	}
+
+	for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
+		init_waitqueue_head(&cmdarray[i].cmdwait_q);
+		lbtf_cleanup_and_insert_cmd(priv, &cmdarray[i]);
+	}
+	return 0;
+}
+
+/**
+ *  lbtf_free_cmd_buffer - Frees the cmd buffer.
+ *
+ *  @priv	A pointer to struct lbtf_private structure
+ *
+ *  Returns: 0
+ */
+int lbtf_free_cmd_buffer(struct lbtf_private *priv)
+{
+	struct cmd_ctrl_node *cmdarray;
+	unsigned int i;
+
+	/* need to check if cmd array is allocated or not */
+	if (priv->cmd_array == NULL)
+		return 0;
+
+	cmdarray = priv->cmd_array;
+
+	/* Release shared memory buffers */
+	for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
+		kfree(cmdarray[i].cmdbuf);
+		cmdarray[i].cmdbuf = NULL;
+	}
+
+	/* Release cmd_ctrl_node */
+	kfree(priv->cmd_array);
+	priv->cmd_array = NULL;
+
+	return 0;
+}
+
+/**
+ *  lbtf_get_cmd_ctrl_node - Gets free cmd node from free cmd queue.
+ *
+ *  @priv		A pointer to struct lbtf_private structure
+ *
+ *  Returns: pointer to a struct cmd_ctrl_node or NULL if none available.
+ */
+static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
+{
+	struct cmd_ctrl_node *tempnode;
+	unsigned long flags;
+
+	if (!priv)
+		return NULL;
+
+	spin_lock_irqsave(&priv->driver_lock, flags);
+
+	if (!list_empty(&priv->cmdfreeq)) {
+		tempnode = list_first_entry(&priv->cmdfreeq,
+					    struct cmd_ctrl_node, list);
+		list_del(&tempnode->list);
+	} else
+		tempnode = NULL;
+
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+	return tempnode;
+}
+
+/**
+ *  lbtf_execute_next_command: execute next command in cmd pending queue.
+ *
+ *  @priv     A pointer to struct lbtf_private structure
+ *
+ *  Returns: 0 on success.
+ */
+int lbtf_execute_next_command(struct lbtf_private *priv)
+{
+	struct cmd_ctrl_node *cmdnode = NULL;
+	struct cmd_header *cmd;
+	unsigned long flags;
+
+	/* Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the
+	 * only caller to us is lbtf_thread() and we get even when a
+	 * data packet is received */
+
+	spin_lock_irqsave(&priv->driver_lock, flags);
+
+	if (priv->cur_cmd) {
+		spin_unlock_irqrestore(&priv->driver_lock, flags);
+		return -1;
+	}
+
+	if (!list_empty(&priv->cmdpendingq)) {
+		cmdnode = list_first_entry(&priv->cmdpendingq,
+					   struct cmd_ctrl_node, list);
+	}
+
+	if (cmdnode) {
+		cmd = cmdnode->cmdbuf;
+
+		list_del(&cmdnode->list);
+		spin_unlock_irqrestore(&priv->driver_lock, flags);
+		lbtf_submit_command(priv, cmdnode);
+	} else
+		spin_unlock_irqrestore(&priv->driver_lock, flags);
+	return 0;
+}
+
+static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
+	uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
+	int (*callback)(struct lbtf_private *, unsigned long,
+			struct cmd_header *),
+	unsigned long callback_arg)
+{
+	struct cmd_ctrl_node *cmdnode;
+
+	if (priv->surpriseremoved)
+		return ERR_PTR(-ENOENT);
+
+	cmdnode = lbtf_get_cmd_ctrl_node(priv);
+	if (cmdnode == NULL) {
+		/* Wake up main thread to execute next command */
+		queue_work(lbtf_wq, &priv->cmd_work);
+		return ERR_PTR(-ENOBUFS);
+	}
+
+	cmdnode->callback = callback;
+	cmdnode->callback_arg = callback_arg;
+
+	/* Copy the incoming command to the buffer */
+	memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size);
+
+	/* Set sequence number, clean result, move to buffer */
+	priv->seqnum++;
+	cmdnode->cmdbuf->command = cpu_to_le16(command);
+	cmdnode->cmdbuf->size    = cpu_to_le16(in_cmd_size);
+	cmdnode->cmdbuf->seqnum  = cpu_to_le16(priv->seqnum);
+	cmdnode->cmdbuf->result  = 0;
+	cmdnode->cmdwaitqwoken = 0;
+	lbtf_queue_cmd(priv, cmdnode);
+	queue_work(lbtf_wq, &priv->cmd_work);
+
+	return cmdnode;
+}
+
+void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command,
+	struct cmd_header *in_cmd, int in_cmd_size)
+{
+	__lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, NULL, 0);
+}
+
+int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
+	      struct cmd_header *in_cmd, int in_cmd_size,
+	      int (*callback)(struct lbtf_private *,
+			      unsigned long, struct cmd_header *),
+	      unsigned long callback_arg)
+{
+	struct cmd_ctrl_node *cmdnode;
+	unsigned long flags;
+	int ret = 0;
+
+	cmdnode = __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size,
+				  callback, callback_arg);
+	if (IS_ERR(cmdnode))
+		return PTR_ERR(cmdnode);
+
+	might_sleep();
+	ret = wait_event_interruptible(cmdnode->cmdwait_q,
+				       cmdnode->cmdwaitqwoken);
+       if (ret)	{
+		printk(KERN_DEBUG
+		       "libertastf: command 0x%04x interrupted by signal",
+		       command);
+		return ret;
+	}
+
+	spin_lock_irqsave(&priv->driver_lock, flags);
+	ret = cmdnode->result;
+	if (ret)
+		printk(KERN_DEBUG "libertastf: command 0x%04x failed: %d\n",
+			    command, ret);
+
+	__lbtf_cleanup_and_insert_cmd(priv, cmdnode);
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__lbtf_cmd);
+
+/* Call holding driver_lock */
+void lbtf_cmd_response_rx(struct lbtf_private *priv)
+{
+	priv->cmd_response_rxed = 1;
+	queue_work(lbtf_wq, &priv->cmd_work);
+}
+EXPORT_SYMBOL_GPL(lbtf_cmd_response_rx);
+
+int lbtf_process_rx_command(struct lbtf_private *priv)
+{
+	uint16_t respcmd, curcmd;
+	struct cmd_header *resp;
+	int ret = 0;
+	unsigned long flags;
+	uint16_t result;
+
+	mutex_lock(&priv->lock);
+	spin_lock_irqsave(&priv->driver_lock, flags);
+
+	if (!priv->cur_cmd) {
+		ret = -1;
+		spin_unlock_irqrestore(&priv->driver_lock, flags);
+		goto done;
+	}
+
+	resp = (void *)priv->cmd_resp_buff;
+	curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command);
+	respcmd = le16_to_cpu(resp->command);
+	result = le16_to_cpu(resp->result);
+
+	if (net_ratelimit())
+		printk(KERN_DEBUG "libertastf: cmd response 0x%04x, seq %d, size %d\n",
+			respcmd, le16_to_cpu(resp->seqnum),
+			le16_to_cpu(resp->size));
+
+	if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) {
+		spin_unlock_irqrestore(&priv->driver_lock, flags);
+		ret = -1;
+		goto done;
+	}
+	if (respcmd != CMD_RET(curcmd)) {
+		spin_unlock_irqrestore(&priv->driver_lock, flags);
+		ret = -1;
+		goto done;
+	}
+
+	if (resp->result == cpu_to_le16(0x0004)) {
+		/* 0x0004 means -EAGAIN. Drop the response, let it time out
+		   and be resubmitted */
+		spin_unlock_irqrestore(&priv->driver_lock, flags);
+		ret = -1;
+		goto done;
+	}
+
+	/* Now we got response from FW, cancel the command timer */
+	del_timer(&priv->command_timer);
+	priv->cmd_timed_out = 0;
+	if (priv->nr_retries)
+		priv->nr_retries = 0;
+
+	/* If the command is not successful, cleanup and return failure */
+	if ((result != 0 || !(respcmd & 0x8000))) {
+		/*
+		 * Handling errors here
+		 */
+		switch (respcmd) {
+		case CMD_RET(CMD_GET_HW_SPEC):
+		case CMD_RET(CMD_802_11_RESET):
+			printk(KERN_DEBUG "libertastf: reset failed\n");
+			break;
+
+		}
+		lbtf_complete_command(priv, priv->cur_cmd, result);
+		spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+		ret = -1;
+		goto done;
+	}
+
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+	if (priv->cur_cmd && priv->cur_cmd->callback) {
+		ret = priv->cur_cmd->callback(priv, priv->cur_cmd->callback_arg,
+				resp);
+	}
+	spin_lock_irqsave(&priv->driver_lock, flags);
+
+	if (priv->cur_cmd) {
+		/* Clean up and Put current command back to cmdfreeq */
+		lbtf_complete_command(priv, priv->cur_cmd, result);
+	}
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+done:
+	mutex_unlock(&priv->lock);
+	return ret;
+}
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
new file mode 100644
index 0000000..1cc03a8
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -0,0 +1,766 @@
+/*
+ *  Copyright (C) 2008, cozybit Inc.
+ *  Copyright (C) 2003-2006, Marvell International Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ */
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/firmware.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#define DRV_NAME "lbtf_usb"
+
+#include "libertas_tf.h"
+#include "if_usb.h"
+
+#define MESSAGE_HEADER_LEN	4
+
+static char *lbtf_fw_name = "lbtf_usb.bin";
+module_param_named(fw_name, lbtf_fw_name, charp, 0644);
+
+static struct usb_device_id if_usb_table[] = {
+	/* Enter the device signature inside */
+	{ USB_DEVICE(0x1286, 0x2001) },
+	{ USB_DEVICE(0x05a3, 0x8388) },
+	{}	/* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, if_usb_table);
+
+static void if_usb_receive(struct urb *urb);
+static void if_usb_receive_fwload(struct urb *urb);
+static int if_usb_prog_firmware(struct if_usb_card *cardp);
+static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
+			       uint8_t *payload, uint16_t nb);
+static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
+			uint16_t nb, u8 data);
+static void if_usb_free(struct if_usb_card *cardp);
+static int if_usb_submit_rx_urb(struct if_usb_card *cardp);
+static int if_usb_reset_device(struct if_usb_card *cardp);
+
+/**
+ *  if_usb_wrike_bulk_callback -  call back to handle URB status
+ *
+ *  @param urb 		pointer to urb structure
+ */
+static void if_usb_write_bulk_callback(struct urb *urb)
+{
+	if (urb->status != 0)
+		printk(KERN_INFO "libertastf: URB in failure status: %d\n",
+		       urb->status);
+}
+
+/**
+ *  if_usb_free - free tx/rx urb, skb and rx buffer
+ *
+ *  @param cardp	pointer if_usb_card
+ */
+static void if_usb_free(struct if_usb_card *cardp)
+{
+	/* Unlink tx & rx urb */
+	usb_kill_urb(cardp->tx_urb);
+	usb_kill_urb(cardp->rx_urb);
+	usb_kill_urb(cardp->cmd_urb);
+
+	usb_free_urb(cardp->tx_urb);
+	cardp->tx_urb = NULL;
+
+	usb_free_urb(cardp->rx_urb);
+	cardp->rx_urb = NULL;
+
+	usb_free_urb(cardp->cmd_urb);
+	cardp->cmd_urb = NULL;
+
+	kfree(cardp->ep_out_buf);
+	cardp->ep_out_buf = NULL;
+}
+
+static void if_usb_setup_firmware(struct lbtf_private *priv)
+{
+	struct if_usb_card *cardp = priv->card;
+	struct cmd_ds_set_boot2_ver b2_cmd;
+
+	if_usb_submit_rx_urb(cardp);
+	b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd));
+	b2_cmd.action = 0;
+	b2_cmd.version = cardp->boot2_version;
+
+	if (lbtf_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd))
+		printk(KERN_INFO "libertastf: setting boot2 version failed\n");
+}
+
+static void if_usb_fw_timeo(unsigned long priv)
+{
+	struct if_usb_card *cardp = (void *)priv;
+
+	if (!cardp->fwdnldover)
+		/* Download timed out */
+		cardp->priv->surpriseremoved = 1;
+	wake_up(&cardp->fw_wq);
+}
+
+/**
+ *  if_usb_probe - sets the configuration values
+ *
+ *  @ifnum	interface number
+ *  @id		pointer to usb_device_id
+ *
+ *  Returns: 0 on success, error code on failure
+ */
+static int if_usb_probe(struct usb_interface *intf,
+			const struct usb_device_id *id)
+{
+	struct usb_device *udev;
+	struct usb_host_interface *iface_desc;
+	struct usb_endpoint_descriptor *endpoint;
+	struct lbtf_private *priv;
+	struct if_usb_card *cardp;
+	int i;
+
+	udev = interface_to_usbdev(intf);
+
+	cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
+	if (!cardp)
+		goto error;
+
+	setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
+	init_waitqueue_head(&cardp->fw_wq);
+
+	cardp->udev = udev;
+	iface_desc = intf->cur_altsetting;
+
+	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+		endpoint = &iface_desc->endpoint[i].desc;
+		if (usb_endpoint_is_bulk_in(endpoint)) {
+			cardp->ep_in_size =
+				le16_to_cpu(endpoint->wMaxPacketSize);
+			cardp->ep_in = usb_endpoint_num(endpoint);
+		} else if (usb_endpoint_is_bulk_out(endpoint)) {
+			cardp->ep_out_size =
+				le16_to_cpu(endpoint->wMaxPacketSize);
+			cardp->ep_out = usb_endpoint_num(endpoint);
+		}
+	}
+	if (!cardp->ep_out_size || !cardp->ep_in_size)
+		/* Endpoints not found */
+		goto dealloc;
+
+	cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!cardp->rx_urb)
+		goto dealloc;
+
+	cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!cardp->tx_urb)
+		goto dealloc;
+
+	cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!cardp->cmd_urb)
+		goto dealloc;
+
+	cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE,
+				    GFP_KERNEL);
+	if (!cardp->ep_out_buf)
+		goto dealloc;
+
+	priv = lbtf_add_card(cardp, &udev->dev);
+	if (!priv)
+		goto dealloc;
+
+	cardp->priv = priv;
+
+	priv->hw_host_to_card = if_usb_host_to_card;
+	priv->hw_prog_firmware = if_usb_prog_firmware;
+	priv->hw_reset_device = if_usb_reset_device;
+	cardp->boot2_version = udev->descriptor.bcdDevice;
+
+	usb_get_dev(udev);
+	usb_set_intfdata(intf, cardp);
+
+	return 0;
+
+dealloc:
+	if_usb_free(cardp);
+error:
+	return -ENOMEM;
+}
+
+/**
+ *  if_usb_disconnect -  free resource and cleanup
+ *
+ *  @intf	USB interface structure
+ */
+static void if_usb_disconnect(struct usb_interface *intf)
+{
+	struct if_usb_card *cardp = usb_get_intfdata(intf);
+	struct lbtf_private *priv = (struct lbtf_private *) cardp->priv;
+
+	if_usb_reset_device(cardp);
+
+	if (priv)
+		lbtf_remove_card(priv);
+
+	/* Unlink and free urb */
+	if_usb_free(cardp);
+
+	usb_set_intfdata(intf, NULL);
+	usb_put_dev(interface_to_usbdev(intf));
+}
+
+/**
+ *  if_usb_send_fw_pkt -  This function downloads the FW
+ *
+ *  @priv	pointer to struct lbtf_private
+ *
+ *  Returns: 0
+ */
+static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
+{
+	struct fwdata *fwdata = cardp->ep_out_buf;
+	u8 *firmware = (u8 *) cardp->fw->data;
+
+	/* If we got a CRC failure on the last block, back
+	   up and retry it */
+	if (!cardp->CRC_OK) {
+		cardp->totalbytes = cardp->fwlastblksent;
+		cardp->fwseqnum--;
+	}
+
+	/* struct fwdata (which we sent to the card) has an
+	   extra __le32 field in between the header and the data,
+	   which is not in the struct fwheader in the actual
+	   firmware binary. Insert the seqnum in the middle... */
+	memcpy(&fwdata->hdr, &firmware[cardp->totalbytes],
+	       sizeof(struct fwheader));
+
+	cardp->fwlastblksent = cardp->totalbytes;
+	cardp->totalbytes += sizeof(struct fwheader);
+
+	memcpy(fwdata->data, &firmware[cardp->totalbytes],
+	       le32_to_cpu(fwdata->hdr.datalength));
+
+	fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum);
+	cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength);
+
+	usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) +
+		     le32_to_cpu(fwdata->hdr.datalength), 0);
+
+	if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK))
+		/* Host has finished FW downloading
+		 * Donwloading FW JUMP BLOCK
+		 */
+		cardp->fwfinalblk = 1;
+
+	return 0;
+}
+
+static int if_usb_reset_device(struct if_usb_card *cardp)
+{
+	struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4;
+	int ret;
+
+	*(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
+
+	cmd->hdr.command = cpu_to_le16(CMD_802_11_RESET);
+	cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset));
+	cmd->hdr.result = cpu_to_le16(0);
+	cmd->hdr.seqnum = cpu_to_le16(0x5a5a);
+	cmd->action = cpu_to_le16(CMD_ACT_HALT);
+	usb_tx_block(cardp, cardp->ep_out_buf,
+		     4 + sizeof(struct cmd_ds_802_11_reset), 0);
+
+	msleep(100);
+	ret = usb_reset_device(cardp->udev);
+	msleep(100);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(if_usb_reset_device);
+
+/**
+ *  usb_tx_block - transfer data to the device
+ *
+ *  @priv 	pointer to struct lbtf_private
+ *  @payload	pointer to payload data
+ *  @nb		data length
+ *  @data	non-zero for data, zero for commands
+ *
+ *  Returns: 0 on success, nonzero otherwise.
+ */
+static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
+			uint16_t nb, u8 data)
+{
+	struct urb *urb;
+
+	/* check if device is removed */
+	if (cardp->priv->surpriseremoved)
+		return -1;
+
+	if (data)
+		urb = cardp->tx_urb;
+	else
+		urb = cardp->cmd_urb;
+
+	usb_fill_bulk_urb(urb, cardp->udev,
+			  usb_sndbulkpipe(cardp->udev,
+					  cardp->ep_out),
+			  payload, nb, if_usb_write_bulk_callback, cardp);
+
+	urb->transfer_flags |= URB_ZERO_PACKET;
+
+	if (usb_submit_urb(urb, GFP_ATOMIC))
+		return -1;
+	return 0;
+}
+
+static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
+				  void (*callbackfn)(struct urb *urb))
+{
+	struct sk_buff *skb;
+
+	skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
+	if (!skb)
+		return -1;
+
+	cardp->rx_skb = skb;
+
+	/* Fill the receive configuration URB and initialise the Rx call back */
+	usb_fill_bulk_urb(cardp->rx_urb, cardp->udev,
+			  usb_rcvbulkpipe(cardp->udev, cardp->ep_in),
+			  (void *) (skb->tail),
+			  MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
+
+	cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
+
+	if (usb_submit_urb(cardp->rx_urb, GFP_ATOMIC)) {
+		kfree_skb(skb);
+		cardp->rx_skb = NULL;
+		return -1;
+	} else
+		return 0;
+}
+
+static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp)
+{
+	return __if_usb_submit_rx_urb(cardp, &if_usb_receive_fwload);
+}
+
+static int if_usb_submit_rx_urb(struct if_usb_card *cardp)
+{
+	return __if_usb_submit_rx_urb(cardp, &if_usb_receive);
+}
+
+static void if_usb_receive_fwload(struct urb *urb)
+{
+	struct if_usb_card *cardp = urb->context;
+	struct sk_buff *skb = cardp->rx_skb;
+	struct fwsyncheader *syncfwheader;
+	struct bootcmdresp bcmdresp;
+
+	if (urb->status) {
+		kfree_skb(skb);
+		return;
+	}
+
+	if (cardp->fwdnldover) {
+		__le32 *tmp = (__le32 *)(skb->data);
+
+		if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) &&
+		    tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY))
+			/* Firmware ready event received */
+			wake_up(&cardp->fw_wq);
+		else
+			if_usb_submit_rx_urb_fwload(cardp);
+		kfree_skb(skb);
+		return;
+	}
+	if (cardp->bootcmdresp <= 0) {
+		memcpy(&bcmdresp, skb->data, sizeof(bcmdresp));
+
+		if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) {
+			kfree_skb(skb);
+			if_usb_submit_rx_urb_fwload(cardp);
+			cardp->bootcmdresp = 1;
+			/* Received valid boot command response */
+			return;
+		}
+		if (bcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) {
+			if (bcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) ||
+			    bcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) ||
+			    bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION))
+				cardp->bootcmdresp = -1;
+		} else if (bcmdresp.cmd == BOOT_CMD_FW_BY_USB &&
+			   bcmdresp.result == BOOT_CMD_RESP_OK)
+			cardp->bootcmdresp = 1;
+
+		kfree_skb(skb);
+		if_usb_submit_rx_urb_fwload(cardp);
+		return;
+	}
+
+	syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC);
+	if (!syncfwheader) {
+		kfree_skb(skb);
+		return;
+	}
+
+	memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader));
+
+	if (!syncfwheader->cmd)
+		cardp->CRC_OK = 1;
+	else
+		cardp->CRC_OK = 0;
+	kfree_skb(skb);
+
+	/* reschedule timer for 200ms hence */
+	mod_timer(&cardp->fw_timeout, jiffies + (HZ/5));
+
+	if (cardp->fwfinalblk) {
+		cardp->fwdnldover = 1;
+		goto exit;
+	}
+
+	if_usb_send_fw_pkt(cardp);
+
+ exit:
+	if_usb_submit_rx_urb_fwload(cardp);
+
+	kfree(syncfwheader);
+
+	return;
+}
+
+#define MRVDRV_MIN_PKT_LEN	30
+
+static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb,
+				       struct if_usb_card *cardp,
+				       struct lbtf_private *priv)
+{
+	if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN
+	    || recvlength < MRVDRV_MIN_PKT_LEN) {
+		kfree_skb(skb);
+		return;
+	}
+
+	skb_put(skb, recvlength);
+	skb_pull(skb, MESSAGE_HEADER_LEN);
+	lbtf_rx(priv, skb);
+}
+
+static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
+				      struct sk_buff *skb,
+				      struct if_usb_card *cardp,
+				      struct lbtf_private *priv)
+{
+	if (recvlength > LBS_CMD_BUFFER_SIZE) {
+		kfree_skb(skb);
+		return;
+	}
+
+	if (!in_interrupt())
+		BUG();
+
+	spin_lock(&priv->driver_lock);
+	memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,
+	       recvlength - MESSAGE_HEADER_LEN);
+	kfree_skb(skb);
+	lbtf_cmd_response_rx(priv);
+	spin_unlock(&priv->driver_lock);
+}
+
+/**
+ *  if_usb_receive - read data received from the device.
+ *
+ *  @urb		pointer to struct urb
+ */
+static void if_usb_receive(struct urb *urb)
+{
+	struct if_usb_card *cardp = urb->context;
+	struct sk_buff *skb = cardp->rx_skb;
+	struct lbtf_private *priv = cardp->priv;
+	int recvlength = urb->actual_length;
+	uint8_t *recvbuff = NULL;
+	uint32_t recvtype = 0;
+	__le32 *pkt = (__le32 *) skb->data;
+
+	if (recvlength) {
+		if (urb->status) {
+			kfree_skb(skb);
+			goto setup_for_next;
+		}
+
+		recvbuff = skb->data;
+		recvtype = le32_to_cpu(pkt[0]);
+	} else if (urb->status) {
+		kfree_skb(skb);
+		return;
+	}
+
+	switch (recvtype) {
+	case CMD_TYPE_DATA:
+		process_cmdtypedata(recvlength, skb, cardp, priv);
+		break;
+
+	case CMD_TYPE_REQUEST:
+		process_cmdrequest(recvlength, recvbuff, skb, cardp, priv);
+		break;
+
+	case CMD_TYPE_INDICATION:
+	{
+		/* Event cause handling */
+		u32 event_cause = le32_to_cpu(pkt[1]);
+
+		/* Icky undocumented magic special case */
+		if (event_cause & 0xffff0000) {
+			u16 tmp;
+			u8 retrycnt;
+			u8 failure;
+
+			tmp = event_cause >> 16;
+			retrycnt = tmp & 0x00ff;
+			failure = (tmp & 0xff00) >> 8;
+			lbtf_send_tx_feedback(priv, retrycnt, failure);
+		} else if (event_cause == LBTF_EVENT_BCN_SENT)
+			lbtf_bcn_sent(priv);
+		else
+			printk(KERN_DEBUG
+			       "Unsupported notification %d received\n",
+			       event_cause);
+		kfree_skb(skb);
+		break;
+	}
+	default:
+		printk(KERN_DEBUG "libertastf: unknown command type 0x%X\n",
+			     recvtype);
+		kfree_skb(skb);
+		break;
+	}
+
+setup_for_next:
+	if_usb_submit_rx_urb(cardp);
+}
+
+/**
+ *  if_usb_host_to_card -  Download data to the device
+ *
+ *  @priv		pointer to struct lbtf_private structure
+ *  @type		type of data
+ *  @buf		pointer to data buffer
+ *  @len		number of bytes
+ *
+ *  Returns: 0 on success, nonzero otherwise
+ */
+static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
+			       uint8_t *payload, uint16_t nb)
+{
+	struct if_usb_card *cardp = priv->card;
+	u8 data = 0;
+
+	if (type == MVMS_CMD) {
+		*(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
+	} else {
+		*(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_DATA);
+		data = 1;
+	}
+
+	memcpy((cardp->ep_out_buf + MESSAGE_HEADER_LEN), payload, nb);
+
+	return usb_tx_block(cardp, cardp->ep_out_buf, nb + MESSAGE_HEADER_LEN,
+			    data);
+}
+
+/**
+ *  if_usb_issue_boot_command - Issue boot command to Boot2.
+ *
+ *  @ivalue   1 boots from FW by USB-Download, 2 boots from FW in EEPROM.
+ *
+ *  Returns: 0
+ */
+static int if_usb_issue_boot_command(struct if_usb_card *cardp, int ivalue)
+{
+	struct bootcmd *bootcmd = cardp->ep_out_buf;
+
+	/* Prepare command */
+	bootcmd->magic = cpu_to_le32(BOOT_CMD_MAGIC_NUMBER);
+	bootcmd->cmd = ivalue;
+	memset(bootcmd->pad, 0, sizeof(bootcmd->pad));
+
+	/* Issue command */
+	usb_tx_block(cardp, cardp->ep_out_buf, sizeof(*bootcmd), 0);
+
+	return 0;
+}
+
+
+/**
+ *  check_fwfile_format - Check the validity of Boot2/FW image.
+ *
+ *  @data	pointer to image
+ *  @totlen	image length
+ *
+ *  Returns: 0 if the image is valid, nonzero otherwise.
+ */
+static int check_fwfile_format(const u8 *data, u32 totlen)
+{
+	u32 bincmd, exit;
+	u32 blksize, offset, len;
+	int ret;
+
+	ret = 1;
+	exit = len = 0;
+
+	do {
+		struct fwheader *fwh = (void *) data;
+
+		bincmd = le32_to_cpu(fwh->dnldcmd);
+		blksize = le32_to_cpu(fwh->datalength);
+		switch (bincmd) {
+		case FW_HAS_DATA_TO_RECV:
+			offset = sizeof(struct fwheader) + blksize;
+			data += offset;
+			len += offset;
+			if (len >= totlen)
+				exit = 1;
+			break;
+		case FW_HAS_LAST_BLOCK:
+			exit = 1;
+			ret = 0;
+			break;
+		default:
+			exit = 1;
+			break;
+		}
+	} while (!exit);
+
+	if (ret)
+		printk(KERN_INFO
+		       "libertastf: firmware file format check failed\n");
+	return ret;
+}
+
+
+static int if_usb_prog_firmware(struct if_usb_card *cardp)
+{
+	int i = 0;
+	static int reset_count = 10;
+	int ret = 0;
+
+	ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
+	if (ret < 0) {
+		printk(KERN_INFO "libertastf: firmware %s not found\n",
+		       lbtf_fw_name);
+		goto done;
+	}
+
+	if (check_fwfile_format(cardp->fw->data, cardp->fw->size))
+		goto release_fw;
+
+restart:
+	if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
+		ret = -1;
+		goto release_fw;
+	}
+
+	cardp->bootcmdresp = 0;
+	do {
+		int j = 0;
+		i++;
+		/* Issue Boot command = 1, Boot from Download-FW */
+		if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB);
+		/* wait for command response */
+		do {
+			j++;
+			msleep_interruptible(100);
+		} while (cardp->bootcmdresp == 0 && j < 10);
+	} while (cardp->bootcmdresp == 0 && i < 5);
+
+	if (cardp->bootcmdresp <= 0) {
+		if (--reset_count >= 0) {
+			if_usb_reset_device(cardp);
+			goto restart;
+		}
+		return -1;
+	}
+
+	i = 0;
+
+	cardp->totalbytes = 0;
+	cardp->fwlastblksent = 0;
+	cardp->CRC_OK = 1;
+	cardp->fwdnldover = 0;
+	cardp->fwseqnum = -1;
+	cardp->totalbytes = 0;
+	cardp->fwfinalblk = 0;
+
+	/* Send the first firmware packet... */
+	if_usb_send_fw_pkt(cardp);
+
+	/* ... and wait for the process to complete */
+	wait_event_interruptible(cardp->fw_wq, cardp->priv->surpriseremoved ||
+					       cardp->fwdnldover);
+
+	del_timer_sync(&cardp->fw_timeout);
+	usb_kill_urb(cardp->rx_urb);
+
+	if (!cardp->fwdnldover) {
+		printk(KERN_INFO "libertastf: failed to load fw,"
+				 " resetting device!\n");
+		if (--reset_count >= 0) {
+			if_usb_reset_device(cardp);
+			goto restart;
+		}
+
+		printk(KERN_INFO "libertastf: fw download failure\n");
+		ret = -1;
+		goto release_fw;
+	}
+
+	cardp->priv->fw_ready = 1;
+
+ release_fw:
+	release_firmware(cardp->fw);
+	cardp->fw = NULL;
+
+	if_usb_setup_firmware(cardp->priv);
+
+ done:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
+
+
+#define if_usb_suspend NULL
+#define if_usb_resume NULL
+
+static struct usb_driver if_usb_driver = {
+	.name = DRV_NAME,
+	.probe = if_usb_probe,
+	.disconnect = if_usb_disconnect,
+	.id_table = if_usb_table,
+	.suspend = if_usb_suspend,
+	.resume = if_usb_resume,
+};
+
+static int __init if_usb_init_module(void)
+{
+	int ret = 0;
+
+	ret = usb_register(&if_usb_driver);
+	return ret;
+}
+
+static void __exit if_usb_exit_module(void)
+{
+	usb_deregister(&if_usb_driver);
+}
+
+module_init(if_usb_init_module);
+module_exit(if_usb_exit_module);
+
+MODULE_DESCRIPTION("8388 USB WLAN Thinfirm Driver");
+MODULE_AUTHOR("Cozybit Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/libertas_tf/if_usb.h b/drivers/net/wireless/libertas_tf/if_usb.h
new file mode 100644
index 0000000..6fa5b3f5
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/if_usb.h
@@ -0,0 +1,98 @@
+/*
+ *  Copyright (C) 2008, cozybit Inc.
+ *  Copyright (C) 2003-2006, Marvell International Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ */
+#include <linux/wait.h>
+#include <linux/timer.h>
+
+struct lbtf_private;
+
+/**
+  * This file contains definition for USB interface.
+  */
+#define CMD_TYPE_REQUEST		0xF00DFACE
+#define CMD_TYPE_DATA			0xBEADC0DE
+#define CMD_TYPE_INDICATION		0xBEEFFACE
+
+#define BOOT_CMD_FW_BY_USB		0x01
+#define BOOT_CMD_FW_IN_EEPROM		0x02
+#define BOOT_CMD_UPDATE_BOOT2		0x03
+#define BOOT_CMD_UPDATE_FW		0x04
+#define BOOT_CMD_MAGIC_NUMBER		0x4C56524D   /* LVRM */
+
+struct bootcmd {
+	__le32	magic;
+	uint8_t	cmd;
+	uint8_t	pad[11];
+};
+
+#define BOOT_CMD_RESP_OK		0x0001
+#define BOOT_CMD_RESP_FAIL		0x0000
+
+struct bootcmdresp {
+	__le32	magic;
+	uint8_t	cmd;
+	uint8_t	result;
+	uint8_t	pad[2];
+};
+
+/** USB card description structure*/
+struct if_usb_card {
+	struct usb_device *udev;
+	struct urb *rx_urb, *tx_urb, *cmd_urb;
+	struct lbtf_private *priv;
+
+	struct sk_buff *rx_skb;
+
+	uint8_t ep_in;
+	uint8_t ep_out;
+
+	int8_t bootcmdresp;
+
+	int ep_in_size;
+
+	void *ep_out_buf;
+	int ep_out_size;
+
+	const struct firmware *fw;
+	struct timer_list fw_timeout;
+	wait_queue_head_t fw_wq;
+	uint32_t fwseqnum;
+	uint32_t totalbytes;
+	uint32_t fwlastblksent;
+	uint8_t CRC_OK;
+	uint8_t fwdnldover;
+	uint8_t fwfinalblk;
+
+	__le16 boot2_version;
+};
+
+/** fwheader */
+struct fwheader {
+	__le32 dnldcmd;
+	__le32 baseaddr;
+	__le32 datalength;
+	__le32 CRC;
+};
+
+#define FW_MAX_DATA_BLK_SIZE	600
+/** FWData */
+struct fwdata {
+	struct fwheader hdr;
+	__le32 seqnum;
+	uint8_t data[0];
+};
+
+/** fwsyncheader */
+struct fwsyncheader {
+	__le32 cmd;
+	__le32 seqnum;
+};
+
+#define FW_HAS_DATA_TO_RECV		0x00000001
+#define FW_HAS_LAST_BLOCK		0x00000004
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
new file mode 100644
index 0000000..8995cd7
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -0,0 +1,514 @@
+/*
+ *  Copyright (C) 2008, cozybit Inc.
+ *  Copyright (C) 2007, Red Hat, Inc.
+ *  Copyright (C) 2003-2006, Marvell International Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ */
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include <net/mac80211.h>
+
+#ifndef DRV_NAME
+#define DRV_NAME "libertas_tf"
+#endif
+
+#define	MRVL_DEFAULT_RETRIES			9
+#define MRVL_PER_PACKET_RATE			0x10
+#define MRVL_MAX_BCN_SIZE			440
+#define CMD_OPTION_WAITFORRSP			0x0002
+
+/* Return command are almost always the same as the host command, but with
+ * bit 15 set high.  There are a few exceptions, though...
+ */
+#define CMD_RET(cmd)			(0x8000 | cmd)
+
+/* Command codes */
+#define CMD_GET_HW_SPEC				0x0003
+#define CMD_802_11_RESET			0x0005
+#define CMD_MAC_MULTICAST_ADR			0x0010
+#define CMD_802_11_RADIO_CONTROL		0x001c
+#define CMD_802_11_RF_CHANNEL			0x001d
+#define CMD_802_11_RF_TX_POWER			0x001e
+#define CMD_MAC_CONTROL				0x0028
+#define CMD_802_11_MAC_ADDRESS			0x004d
+#define	CMD_SET_BOOT2_VER			0x00a5
+#define CMD_802_11_BEACON_CTRL			0x00b0
+#define CMD_802_11_BEACON_SET			0x00cb
+#define CMD_802_11_SET_MODE			0x00cc
+#define CMD_802_11_SET_BSSID			0x00cd
+
+#define CMD_ACT_GET			0x0000
+#define CMD_ACT_SET			0x0001
+
+/* Define action or option for CMD_802_11_RESET */
+#define CMD_ACT_HALT			0x0003
+
+/* Define action or option for CMD_MAC_CONTROL */
+#define CMD_ACT_MAC_RX_ON			0x0001
+#define CMD_ACT_MAC_TX_ON			0x0002
+#define CMD_ACT_MAC_MULTICAST_ENABLE		0x0020
+#define CMD_ACT_MAC_BROADCAST_ENABLE		0x0040
+#define CMD_ACT_MAC_PROMISCUOUS_ENABLE		0x0080
+#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE	0x0100
+
+/* Define action or option for CMD_802_11_RADIO_CONTROL */
+#define CMD_TYPE_AUTO_PREAMBLE		0x0001
+#define CMD_TYPE_SHORT_PREAMBLE		0x0002
+#define CMD_TYPE_LONG_PREAMBLE		0x0003
+
+#define TURN_ON_RF			0x01
+#define RADIO_ON			0x01
+#define RADIO_OFF			0x00
+
+#define SET_AUTO_PREAMBLE		0x05
+#define SET_SHORT_PREAMBLE		0x03
+#define SET_LONG_PREAMBLE		0x01
+
+/* Define action or option for CMD_802_11_RF_CHANNEL */
+#define CMD_OPT_802_11_RF_CHANNEL_GET	0x00
+#define CMD_OPT_802_11_RF_CHANNEL_SET	0x01
+
+/* Codes for CMD_802_11_SET_MODE */
+enum lbtf_mode {
+	LBTF_PASSIVE_MODE,
+	LBTF_STA_MODE,
+	LBTF_AP_MODE,
+};
+
+/** Card Event definition */
+#define MACREG_INT_CODE_FIRMWARE_READY		48
+/** Buffer Constants */
+
+/*	The size of SQ memory PPA, DPA are 8 DWORDs, that keep the physical
+*	addresses of TxPD buffers. Station has only 8 TxPD available, Whereas
+*	driver has more local TxPDs. Each TxPD on the host memory is associated
+*	with a Tx control node. The driver maintains 8 RxPD descriptors for
+*	station firmware to store Rx packet information.
+*
+*	Current version of MAC has a 32x6 multicast address buffer.
+*
+*	802.11b can have up to  14 channels, the driver keeps the
+*	BSSID(MAC address) of each APs or Ad hoc stations it has sensed.
+*/
+
+#define MRVDRV_MAX_MULTICAST_LIST_SIZE	32
+#define LBS_NUM_CMD_BUFFERS             10
+#define LBS_CMD_BUFFER_SIZE             (2 * 1024)
+#define MRVDRV_MAX_CHANNEL_SIZE		14
+#define MRVDRV_SNAP_HEADER_LEN          8
+
+#define	LBS_UPLD_SIZE			2312
+#define DEV_NAME_LEN			32
+
+/** Misc constants */
+/* This section defines 802.11 specific contants */
+
+#define MRVDRV_MAX_REGION_CODE			6
+/**
+ * the table to keep region code
+ */
+#define LBTF_REGDOMAIN_US	0x10
+#define LBTF_REGDOMAIN_CA	0x20
+#define LBTF_REGDOMAIN_EU	0x30
+#define LBTF_REGDOMAIN_SP	0x31
+#define LBTF_REGDOMAIN_FR	0x32
+#define LBTF_REGDOMAIN_JP	0x40
+
+#define SBI_EVENT_CAUSE_SHIFT		3
+
+/** RxPD status */
+
+#define MRVDRV_RXPD_STATUS_OK                0x0001
+
+
+/* This is for firmware specific length */
+#define EXTRA_LEN	36
+
+#define MRVDRV_ETH_TX_PACKET_BUFFER_SIZE \
+	(ETH_FRAME_LEN + sizeof(struct txpd) + EXTRA_LEN)
+
+#define MRVDRV_ETH_RX_PACKET_BUFFER_SIZE \
+	(ETH_FRAME_LEN + sizeof(struct rxpd) \
+	 + MRVDRV_SNAP_HEADER_LEN + EXTRA_LEN)
+
+#define	CMD_F_HOSTCMD		(1 << 0)
+#define FW_CAPINFO_WPA  	(1 << 0)
+
+#define RF_ANTENNA_1		0x1
+#define RF_ANTENNA_2		0x2
+#define RF_ANTENNA_AUTO		0xFFFF
+
+#define LBTF_EVENT_BCN_SENT	55
+
+/** Global Variable Declaration */
+/** mv_ms_type */
+enum mv_ms_type {
+	MVMS_DAT = 0,
+	MVMS_CMD = 1,
+	MVMS_TXDONE = 2,
+	MVMS_EVENT
+};
+
+extern struct workqueue_struct *lbtf_wq;
+
+struct lbtf_private;
+
+struct lbtf_offset_value {
+	u32 offset;
+	u32 value;
+};
+
+struct channel_range {
+	u8 regdomain;
+	u8 start;
+	u8 end; /* exclusive (channel must be less than end) */
+};
+
+struct if_usb_card;
+
+/** Private structure for the MV device */
+struct lbtf_private {
+	void *card;
+	struct ieee80211_hw *hw;
+
+	/* Command response buffer */
+	u8 cmd_resp_buff[LBS_UPLD_SIZE];
+	/* Download sent:
+	   bit0 1/0=data_sent/data_tx_done,
+	   bit1 1/0=cmd_sent/cmd_tx_done,
+	   all other bits reserved 0 */
+	struct ieee80211_vif *vif;
+
+	struct work_struct cmd_work;
+	struct work_struct tx_work;
+	/** Hardware access */
+	int (*hw_host_to_card) (struct lbtf_private *priv, u8 type, u8 *payload, u16 nb);
+	int (*hw_prog_firmware) (struct if_usb_card *cardp);
+	int (*hw_reset_device) (struct if_usb_card *cardp);
+
+
+	/** Wlan adapter data structure*/
+	/** STATUS variables */
+	u32 fwrelease;
+	u32 fwcapinfo;
+	/* protected with big lock */
+
+	struct mutex lock;
+
+	/** command-related variables */
+	u16 seqnum;
+	/* protected by big lock */
+
+	struct cmd_ctrl_node *cmd_array;
+	/** Current command */
+	struct cmd_ctrl_node *cur_cmd;
+	/** command Queues */
+	/** Free command buffers */
+	struct list_head cmdfreeq;
+	/** Pending command buffers */
+	struct list_head cmdpendingq;
+
+	/** spin locks */
+	spinlock_t driver_lock;
+
+	/** Timers */
+	struct timer_list command_timer;
+	int nr_retries;
+	int cmd_timed_out;
+
+	u8 cmd_response_rxed;
+
+	/** capability Info used in Association, start, join */
+	u16 capability;
+
+	/** MAC address information */
+	u8 current_addr[ETH_ALEN];
+	u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
+	u32 nr_of_multicastmacaddr;
+	int cur_freq;
+
+	struct sk_buff *skb_to_tx;
+	struct sk_buff *tx_skb;
+
+	/** NIC Operation characteristics */
+	u16 mac_control;
+	u16 regioncode;
+	struct channel_range range;
+
+	u8 radioon;
+	u32 preamble;
+
+	struct ieee80211_channel channels[14];
+	struct ieee80211_rate rates[12];
+	struct ieee80211_supported_band band;
+	struct lbtf_offset_value offsetvalue;
+
+	u8 fw_ready;
+	u8 surpriseremoved;
+	struct sk_buff_head bc_ps_buf;
+};
+
+/* 802.11-related definitions */
+
+/* TxPD descriptor */
+struct txpd {
+	/* Current Tx packet status */
+	__le32 tx_status;
+	/* Tx control */
+	__le32 tx_control;
+	__le32 tx_packet_location;
+	/* Tx packet length */
+	__le16 tx_packet_length;
+	/* First 2 byte of destination MAC address */
+	u8 tx_dest_addr_high[2];
+	/* Last 4 byte of destination MAC address */
+	u8 tx_dest_addr_low[4];
+	/* Pkt Priority */
+	u8 priority;
+	/* Pkt Trasnit Power control */
+	u8 powermgmt;
+	/* Time the packet has been queued in the driver (units = 2ms) */
+	u8 pktdelay_2ms;
+	/* reserved */
+	u8 reserved1;
+};
+
+/* RxPD Descriptor */
+struct rxpd {
+	/* Current Rx packet status */
+	__le16 status;
+
+	/* SNR */
+	u8 snr;
+
+	/* Tx control */
+	u8 rx_control;
+
+	/* Pkt length */
+	__le16 pkt_len;
+
+	/* Noise Floor */
+	u8 nf;
+
+	/* Rx Packet Rate */
+	u8 rx_rate;
+
+	/* Pkt addr */
+	__le32 pkt_ptr;
+
+	/* Next Rx RxPD addr */
+	__le32 next_rxpd_ptr;
+
+	/* Pkt Priority */
+	u8 priority;
+	u8 reserved[3];
+};
+
+struct cmd_header {
+	__le16 command;
+	__le16 size;
+	__le16 seqnum;
+	__le16 result;
+} __attribute__ ((packed));
+
+struct cmd_ctrl_node {
+	struct list_head list;
+	int result;
+	/* command response */
+	int (*callback)(struct lbtf_private *,
+			unsigned long, struct cmd_header *);
+	unsigned long callback_arg;
+	/* command data */
+	struct cmd_header *cmdbuf;
+	/* wait queue */
+	u16 cmdwaitqwoken;
+	wait_queue_head_t cmdwait_q;
+};
+
+/*
+ * Define data structure for CMD_GET_HW_SPEC
+ * This structure defines the response for the GET_HW_SPEC command
+ */
+struct cmd_ds_get_hw_spec {
+	struct cmd_header hdr;
+
+	/* HW Interface version number */
+	__le16 hwifversion;
+	/* HW version number */
+	__le16 version;
+	/* Max number of TxPD FW can handle */
+	__le16 nr_txpd;
+	/* Max no of Multicast address */
+	__le16 nr_mcast_adr;
+	/* MAC address */
+	u8 permanentaddr[6];
+
+	/* region Code */
+	__le16 regioncode;
+
+	/* Number of antenna used */
+	__le16 nr_antenna;
+
+	/* FW release number, example 0x01030304 = 2.3.4p1 */
+	__le32 fwrelease;
+
+	/* Base Address of TxPD queue */
+	__le32 wcb_base;
+	/* Read Pointer of RxPd queue */
+	__le32 rxpd_rdptr;
+
+	/* Write Pointer of RxPd queue */
+	__le32 rxpd_wrptr;
+
+	/*FW/HW capability */
+	__le32 fwcapinfo;
+} __attribute__ ((packed));
+
+struct cmd_ds_mac_control {
+	struct cmd_header hdr;
+	__le16 action;
+	u16 reserved;
+};
+
+struct cmd_ds_802_11_mac_address {
+	struct cmd_header hdr;
+
+	__le16 action;
+	uint8_t macadd[ETH_ALEN];
+};
+
+struct cmd_ds_mac_multicast_addr {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 nr_of_adrs;
+	u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
+};
+
+struct cmd_ds_set_mode {
+	struct cmd_header hdr;
+
+	__le16 mode;
+};
+
+struct cmd_ds_set_bssid {
+	struct cmd_header hdr;
+
+	u8 bssid[6];
+	u8 activate;
+};
+
+struct cmd_ds_802_11_radio_control {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 control;
+};
+
+
+struct cmd_ds_802_11_rf_channel {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 channel;
+	__le16 rftype;      /* unused */
+	__le16 reserved;    /* unused */
+	u8 channellist[32]; /* unused */
+};
+
+struct cmd_ds_set_boot2_ver {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 version;
+};
+
+struct cmd_ds_802_11_reset {
+	struct cmd_header hdr;
+
+	__le16 action;
+};
+
+struct cmd_ds_802_11_beacon_control {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 beacon_enable;
+	__le16 beacon_period;
+};
+
+struct cmd_ds_802_11_beacon_set {
+	struct cmd_header hdr;
+
+	__le16 len;
+	u8 beacon[MRVL_MAX_BCN_SIZE];
+};
+
+struct lbtf_private;
+struct cmd_ctrl_node;
+
+/** Function Prototype Declaration */
+void lbtf_set_mac_control(struct lbtf_private *priv);
+
+int lbtf_free_cmd_buffer(struct lbtf_private *priv);
+
+int lbtf_allocate_cmd_buffer(struct lbtf_private *priv);
+int lbtf_execute_next_command(struct lbtf_private *priv);
+int lbtf_set_radio_control(struct lbtf_private *priv);
+int lbtf_update_hw_spec(struct lbtf_private *priv);
+int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv);
+void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode);
+void lbtf_set_bssid(struct lbtf_private *priv, bool activate, u8 *bssid);
+int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr);
+
+int lbtf_set_channel(struct lbtf_private *priv, u8 channel);
+
+int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon);
+int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
+		     int beacon_int);
+
+
+int lbtf_process_rx_command(struct lbtf_private *priv);
+void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd,
+			  int result);
+void lbtf_cmd_response_rx(struct lbtf_private *priv);
+
+/* main.c */
+struct chan_freq_power *lbtf_get_region_cfp_table(u8 region,
+	int *cfp_no);
+struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev);
+int lbtf_remove_card(struct lbtf_private *priv);
+int lbtf_start_card(struct lbtf_private *priv);
+int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb);
+void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail);
+void lbtf_bcn_sent(struct lbtf_private *priv);
+
+/* support functions for cmd.c */
+/* lbtf_cmd() infers the size of the buffer to copy data back into, from
+   the size of the target of the pointer. Since the command to be sent
+   may often be smaller, that size is set in cmd->size by the caller.*/
+#define lbtf_cmd(priv, cmdnr, cmd, cb, cb_arg)	({		\
+	uint16_t __sz = le16_to_cpu((cmd)->hdr.size);		\
+	(cmd)->hdr.size = cpu_to_le16(sizeof(*(cmd)));		\
+	__lbtf_cmd(priv, cmdnr, &(cmd)->hdr, __sz, cb, cb_arg);	\
+})
+
+#define lbtf_cmd_with_response(priv, cmdnr, cmd)	\
+	lbtf_cmd(priv, cmdnr, cmd, lbtf_cmd_copyback, (unsigned long) (cmd))
+
+void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command,
+	struct cmd_header *in_cmd, int in_cmd_size);
+
+int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
+	      struct cmd_header *in_cmd, int in_cmd_size,
+	      int (*callback)(struct lbtf_private *, unsigned long,
+			      struct cmd_header *),
+	      unsigned long callback_arg);
+
+int lbtf_cmd_copyback(struct lbtf_private *priv, unsigned long extra,
+		     struct cmd_header *resp);
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
new file mode 100644
index 0000000..feff945
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -0,0 +1,662 @@
+/*
+ *  Copyright (C) 2008, cozybit Inc.
+ *  Copyright (C) 2003-2006, Marvell International Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ */
+#include "libertas_tf.h"
+#include "linux/etherdevice.h"
+
+#define DRIVER_RELEASE_VERSION "004.p0"
+/* thinfirm version: 5.132.X.pX */
+#define LBTF_FW_VER_MIN		0x05840300
+#define LBTF_FW_VER_MAX		0x0584ffff
+#define QOS_CONTROL_LEN		2
+
+static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION;
+struct workqueue_struct *lbtf_wq;
+
+static const struct ieee80211_channel lbtf_channels[] = {
+	{ .center_freq = 2412, .hw_value = 1 },
+	{ .center_freq = 2417, .hw_value = 2 },
+	{ .center_freq = 2422, .hw_value = 3 },
+	{ .center_freq = 2427, .hw_value = 4 },
+	{ .center_freq = 2432, .hw_value = 5 },
+	{ .center_freq = 2437, .hw_value = 6 },
+	{ .center_freq = 2442, .hw_value = 7 },
+	{ .center_freq = 2447, .hw_value = 8 },
+	{ .center_freq = 2452, .hw_value = 9 },
+	{ .center_freq = 2457, .hw_value = 10 },
+	{ .center_freq = 2462, .hw_value = 11 },
+	{ .center_freq = 2467, .hw_value = 12 },
+	{ .center_freq = 2472, .hw_value = 13 },
+	{ .center_freq = 2484, .hw_value = 14 },
+};
+
+/* This table contains the hardware specific values for the modulation rates. */
+static const struct ieee80211_rate lbtf_rates[] = {
+	{ .bitrate = 10,
+	  .hw_value = 0, },
+	{ .bitrate = 20,
+	  .hw_value = 1,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 55,
+	  .hw_value = 2,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 110,
+	  .hw_value = 3,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 60,
+	  .hw_value = 5,
+	  .flags = 0 },
+	{ .bitrate = 90,
+	  .hw_value = 6,
+	  .flags = 0 },
+	{ .bitrate = 120,
+	  .hw_value = 7,
+	  .flags = 0 },
+	{ .bitrate = 180,
+	  .hw_value = 8,
+	  .flags = 0 },
+	{ .bitrate = 240,
+	  .hw_value = 9,
+	  .flags = 0 },
+	{ .bitrate = 360,
+	  .hw_value = 10,
+	  .flags = 0 },
+	{ .bitrate = 480,
+	  .hw_value = 11,
+	  .flags = 0 },
+	{ .bitrate = 540,
+	  .hw_value = 12,
+	  .flags = 0 },
+};
+
+static void lbtf_cmd_work(struct work_struct *work)
+{
+	struct lbtf_private *priv = container_of(work, struct lbtf_private,
+					 cmd_work);
+	spin_lock_irq(&priv->driver_lock);
+	/* command response? */
+	if (priv->cmd_response_rxed) {
+		priv->cmd_response_rxed = 0;
+		spin_unlock_irq(&priv->driver_lock);
+		lbtf_process_rx_command(priv);
+		spin_lock_irq(&priv->driver_lock);
+	}
+
+	if (priv->cmd_timed_out && priv->cur_cmd) {
+		struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
+
+		if (++priv->nr_retries > 10) {
+			lbtf_complete_command(priv, cmdnode,
+					      -ETIMEDOUT);
+			priv->nr_retries = 0;
+		} else {
+			priv->cur_cmd = NULL;
+
+			/* Stick it back at the _top_ of the pending
+			 * queue for immediate resubmission */
+			list_add(&cmdnode->list, &priv->cmdpendingq);
+		}
+	}
+	priv->cmd_timed_out = 0;
+	spin_unlock_irq(&priv->driver_lock);
+
+	if (!priv->fw_ready)
+		return;
+	/* Execute the next command */
+	if (!priv->cur_cmd)
+		lbtf_execute_next_command(priv);
+}
+
+/**
+ *  lbtf_setup_firmware: initialize firmware.
+ *
+ *  @priv    A pointer to struct lbtf_private structure
+ *
+ *  Returns: 0 on success.
+ */
+static int lbtf_setup_firmware(struct lbtf_private *priv)
+{
+	int ret = -1;
+
+	/*
+	 * Read priv address from HW
+	 */
+	memset(priv->current_addr, 0xff, ETH_ALEN);
+	ret = lbtf_update_hw_spec(priv);
+	if (ret) {
+		ret = -1;
+		goto done;
+	}
+
+	lbtf_set_mac_control(priv);
+	lbtf_set_radio_control(priv);
+
+	ret = 0;
+done:
+	return ret;
+}
+
+/**
+ *  This function handles the timeout of command sending.
+ *  It will re-send the same command again.
+ */
+static void command_timer_fn(unsigned long data)
+{
+	struct lbtf_private *priv = (struct lbtf_private *)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->driver_lock, flags);
+
+	if (!priv->cur_cmd) {
+		printk(KERN_DEBUG "libertastf: command timer expired; "
+				  "no pending command\n");
+		goto out;
+	}
+
+	printk(KERN_DEBUG "libertas: command %x timed out\n",
+		le16_to_cpu(priv->cur_cmd->cmdbuf->command));
+
+	priv->cmd_timed_out = 1;
+	queue_work(lbtf_wq, &priv->cmd_work);
+out:
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+}
+
+static int lbtf_init_adapter(struct lbtf_private *priv)
+{
+	memset(priv->current_addr, 0xff, ETH_ALEN);
+	mutex_init(&priv->lock);
+
+	priv->vif = NULL;
+	setup_timer(&priv->command_timer, command_timer_fn,
+		(unsigned long)priv);
+
+	INIT_LIST_HEAD(&priv->cmdfreeq);
+	INIT_LIST_HEAD(&priv->cmdpendingq);
+
+	spin_lock_init(&priv->driver_lock);
+
+	/* Allocate the command buffers */
+	if (lbtf_allocate_cmd_buffer(priv))
+		return -1;
+
+	return 0;
+}
+
+static void lbtf_free_adapter(struct lbtf_private *priv)
+{
+	lbtf_free_cmd_buffer(priv);
+	del_timer(&priv->command_timer);
+}
+
+static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	struct lbtf_private *priv = hw->priv;
+
+	priv->skb_to_tx = skb;
+	queue_work(lbtf_wq, &priv->tx_work);
+	/*
+	 * queue will be restarted when we receive transmission feedback if
+	 * there are no buffered multicast frames to send
+	 */
+	ieee80211_stop_queues(priv->hw);
+	return 0;
+}
+
+static void lbtf_tx_work(struct work_struct *work)
+{
+	struct lbtf_private *priv = container_of(work, struct lbtf_private,
+					 tx_work);
+	unsigned int len;
+	struct ieee80211_tx_info *info;
+	struct txpd *txpd;
+	struct sk_buff *skb = NULL;
+	int err;
+
+	if ((priv->vif->type == NL80211_IFTYPE_AP) &&
+	    (!skb_queue_empty(&priv->bc_ps_buf)))
+		skb = skb_dequeue(&priv->bc_ps_buf);
+	else if (priv->skb_to_tx) {
+		skb = priv->skb_to_tx;
+		priv->skb_to_tx = NULL;
+	} else
+		return;
+
+	len = skb->len;
+	info  = IEEE80211_SKB_CB(skb);
+	txpd = (struct txpd *)  skb_push(skb, sizeof(struct txpd));
+
+	if (priv->surpriseremoved) {
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	memset(txpd, 0, sizeof(struct txpd));
+	/* Activate per-packet rate selection */
+	txpd->tx_control |= cpu_to_le32(MRVL_PER_PACKET_RATE |
+			     ieee80211_get_tx_rate(priv->hw, info)->hw_value);
+
+	/* copy destination address from 802.11 header */
+	memcpy(txpd->tx_dest_addr_high, skb->data + sizeof(struct txpd) + 4,
+		ETH_ALEN);
+	txpd->tx_packet_length = cpu_to_le16(len);
+	txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
+	BUG_ON(priv->tx_skb);
+	spin_lock_irq(&priv->driver_lock);
+	priv->tx_skb = skb;
+	err = priv->hw_host_to_card(priv, MVMS_DAT, skb->data, skb->len);
+	spin_unlock_irq(&priv->driver_lock);
+	if (err) {
+		dev_kfree_skb_any(skb);
+		priv->tx_skb = NULL;
+	}
+}
+
+static int lbtf_op_start(struct ieee80211_hw *hw)
+{
+	struct lbtf_private *priv = hw->priv;
+	void *card = priv->card;
+	int ret = -1;
+
+	if (!priv->fw_ready)
+		/* Upload firmware */
+		if (priv->hw_prog_firmware(card))
+			goto err_prog_firmware;
+
+	/* poke the firmware */
+	priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
+	priv->radioon = RADIO_ON;
+	priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
+	ret = lbtf_setup_firmware(priv);
+	if (ret)
+		goto err_prog_firmware;
+
+	if ((priv->fwrelease < LBTF_FW_VER_MIN) ||
+	    (priv->fwrelease > LBTF_FW_VER_MAX)) {
+		ret = -1;
+		goto err_prog_firmware;
+	}
+
+	printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n");
+	return 0;
+
+err_prog_firmware:
+	priv->hw_reset_device(card);
+	return ret;
+}
+
+static void lbtf_op_stop(struct ieee80211_hw *hw)
+{
+	struct lbtf_private *priv = hw->priv;
+	unsigned long flags;
+	struct sk_buff *skb;
+
+	struct cmd_ctrl_node *cmdnode;
+	/* Flush pending command nodes */
+	spin_lock_irqsave(&priv->driver_lock, flags);
+	list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
+		cmdnode->result = -ENOENT;
+		cmdnode->cmdwaitqwoken = 1;
+		wake_up_interruptible(&cmdnode->cmdwait_q);
+	}
+
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+	cancel_work_sync(&priv->cmd_work);
+	cancel_work_sync(&priv->tx_work);
+	while ((skb = skb_dequeue(&priv->bc_ps_buf)))
+		dev_kfree_skb_any(skb);
+	priv->radioon = RADIO_OFF;
+	lbtf_set_radio_control(priv);
+
+	return;
+}
+
+static int lbtf_op_add_interface(struct ieee80211_hw *hw,
+			struct ieee80211_if_init_conf *conf)
+{
+	struct lbtf_private *priv = hw->priv;
+	if (priv->vif != NULL)
+		return -EOPNOTSUPP;
+
+	priv->vif = conf->vif;
+	switch (conf->type) {
+	case NL80211_IFTYPE_MESH_POINT:
+	case NL80211_IFTYPE_AP:
+		lbtf_set_mode(priv, LBTF_AP_MODE);
+		break;
+	case NL80211_IFTYPE_STATION:
+		lbtf_set_mode(priv, LBTF_STA_MODE);
+		break;
+	default:
+		priv->vif = NULL;
+		return -EOPNOTSUPP;
+	}
+	lbtf_set_mac_address(priv, (u8 *) conf->mac_addr);
+	return 0;
+}
+
+static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
+			struct ieee80211_if_init_conf *conf)
+{
+	struct lbtf_private *priv = hw->priv;
+
+	if (priv->vif->type == NL80211_IFTYPE_AP ||
+	    priv->vif->type == NL80211_IFTYPE_MESH_POINT)
+		lbtf_beacon_ctrl(priv, 0, 0);
+	lbtf_set_mode(priv, LBTF_PASSIVE_MODE);
+	lbtf_set_bssid(priv, 0, NULL);
+	priv->vif = NULL;
+}
+
+static int lbtf_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
+{
+	struct lbtf_private *priv = hw->priv;
+	if (conf->channel->center_freq != priv->cur_freq) {
+		priv->cur_freq = conf->channel->center_freq;
+		lbtf_set_channel(priv, conf->channel->hw_value);
+	}
+	return 0;
+}
+
+static int lbtf_op_config_interface(struct ieee80211_hw *hw,
+			struct ieee80211_vif *vif,
+			struct ieee80211_if_conf *conf)
+{
+	struct lbtf_private *priv = hw->priv;
+	struct sk_buff *beacon;
+
+	switch (priv->vif->type) {
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_MESH_POINT:
+		beacon = ieee80211_beacon_get(hw, vif);
+		if (beacon) {
+			lbtf_beacon_set(priv, beacon);
+			kfree_skb(beacon);
+			lbtf_beacon_ctrl(priv, 1, hw->conf.beacon_int);
+		}
+		break;
+	default:
+		break;
+	}
+
+	if (conf->bssid) {
+		u8 null_bssid[ETH_ALEN] = {0};
+		bool activate = compare_ether_addr(conf->bssid, null_bssid);
+		lbtf_set_bssid(priv, activate, conf->bssid);
+	}
+
+	return 0;
+}
+
+#define SUPPORTED_FIF_FLAGS  (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)
+static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
+			unsigned int changed_flags,
+			unsigned int *new_flags,
+			int mc_count, struct dev_mc_list *mclist)
+{
+	struct lbtf_private *priv = hw->priv;
+	int old_mac_control = priv->mac_control;
+	int i;
+	changed_flags &= SUPPORTED_FIF_FLAGS;
+	*new_flags &= SUPPORTED_FIF_FLAGS;
+
+	if (!changed_flags)
+		return;
+
+	if (*new_flags & (FIF_PROMISC_IN_BSS))
+		priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
+	else
+		priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
+	if (*new_flags & (FIF_ALLMULTI) ||
+	    mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE) {
+		priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
+		priv->mac_control &= ~CMD_ACT_MAC_MULTICAST_ENABLE;
+	} else if (mc_count) {
+		priv->mac_control |= CMD_ACT_MAC_MULTICAST_ENABLE;
+		priv->mac_control &= ~CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
+		priv->nr_of_multicastmacaddr = mc_count;
+		for (i = 0; i < mc_count; i++) {
+			if (!mclist)
+				break;
+			memcpy(&priv->multicastlist[i], mclist->da_addr,
+					ETH_ALEN);
+			mclist = mclist->next;
+		}
+		lbtf_cmd_set_mac_multicast_addr(priv);
+	} else {
+		priv->mac_control &= ~(CMD_ACT_MAC_MULTICAST_ENABLE |
+				       CMD_ACT_MAC_ALL_MULTICAST_ENABLE);
+		if (priv->nr_of_multicastmacaddr) {
+			priv->nr_of_multicastmacaddr = 0;
+			lbtf_cmd_set_mac_multicast_addr(priv);
+		}
+	}
+
+
+	if (priv->mac_control != old_mac_control)
+		lbtf_set_mac_control(priv);
+}
+
+static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
+			struct ieee80211_vif *vif,
+			struct ieee80211_bss_conf *bss_conf,
+			u32 changes)
+{
+	struct lbtf_private *priv = hw->priv;
+
+	if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+		if (bss_conf->use_short_preamble)
+			priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
+		else
+			priv->preamble = CMD_TYPE_LONG_PREAMBLE;
+		lbtf_set_radio_control(priv);
+	}
+
+	return;
+}
+
+static const struct ieee80211_ops lbtf_ops = {
+	.tx			= lbtf_op_tx,
+	.start			= lbtf_op_start,
+	.stop			= lbtf_op_stop,
+	.add_interface		= lbtf_op_add_interface,
+	.remove_interface	= lbtf_op_remove_interface,
+	.config			= lbtf_op_config,
+	.config_interface	= lbtf_op_config_interface,
+	.configure_filter	= lbtf_op_configure_filter,
+	.bss_info_changed	= lbtf_op_bss_info_changed,
+};
+
+int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
+{
+	struct ieee80211_rx_status stats;
+	struct rxpd *prxpd;
+	int need_padding;
+	unsigned int flags;
+	struct ieee80211_hdr *hdr;
+
+	prxpd = (struct rxpd *) skb->data;
+
+	stats.flag = 0;
+	if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
+		stats.flag |= RX_FLAG_FAILED_FCS_CRC;
+	stats.freq = priv->cur_freq;
+	stats.band = IEEE80211_BAND_2GHZ;
+	stats.signal = prxpd->snr;
+	stats.noise = prxpd->nf;
+	stats.qual = prxpd->snr - prxpd->nf;
+	/* Marvell rate index has a hole at value 4 */
+	if (prxpd->rx_rate > 4)
+		--prxpd->rx_rate;
+	stats.rate_idx = prxpd->rx_rate;
+	skb_pull(skb, sizeof(struct rxpd));
+
+	hdr = (struct ieee80211_hdr *)skb->data;
+	flags = le32_to_cpu(*(__le32 *)(skb->data + 4));
+
+	need_padding = ieee80211_is_data_qos(hdr->frame_control);
+	need_padding ^= ieee80211_has_a4(hdr->frame_control);
+	need_padding ^= ieee80211_is_data_qos(hdr->frame_control) &&
+			(*ieee80211_get_qos_ctl(hdr) &
+			 IEEE80211_QOS_CONTROL_A_MSDU_PRESENT);
+
+	if (need_padding) {
+		memmove(skb->data + 2, skb->data, skb->len);
+		skb_reserve(skb, 2);
+	}
+
+	ieee80211_rx_irqsafe(priv->hw, skb, &stats);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(lbtf_rx);
+
+/**
+ * lbtf_add_card: Add and initialize the card, no fw upload yet.
+ *
+ *  @card    A pointer to card
+ *
+ *  Returns: pointer to struct lbtf_priv.
+ */
+struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
+{
+	struct ieee80211_hw *hw;
+	struct lbtf_private *priv = NULL;
+
+	hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops);
+	if (!hw)
+		goto done;
+
+	priv = hw->priv;
+	if (lbtf_init_adapter(priv))
+		goto err_init_adapter;
+
+	priv->hw = hw;
+	priv->card = card;
+	priv->tx_skb = NULL;
+
+	hw->queues = 1;
+	hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+	hw->extra_tx_headroom = sizeof(struct txpd);
+	memcpy(priv->channels, lbtf_channels, sizeof(lbtf_channels));
+	memcpy(priv->rates, lbtf_rates, sizeof(lbtf_rates));
+	priv->band.n_bitrates = ARRAY_SIZE(lbtf_rates);
+	priv->band.bitrates = priv->rates;
+	priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
+	priv->band.channels = priv->channels;
+	hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+	skb_queue_head_init(&priv->bc_ps_buf);
+
+	SET_IEEE80211_DEV(hw, dmdev);
+
+	INIT_WORK(&priv->cmd_work, lbtf_cmd_work);
+	INIT_WORK(&priv->tx_work, lbtf_tx_work);
+	if (ieee80211_register_hw(hw))
+		goto err_init_adapter;
+
+	goto done;
+
+err_init_adapter:
+	lbtf_free_adapter(priv);
+	ieee80211_free_hw(hw);
+	priv = NULL;
+
+done:
+	return priv;
+}
+EXPORT_SYMBOL_GPL(lbtf_add_card);
+
+
+int lbtf_remove_card(struct lbtf_private *priv)
+{
+	struct ieee80211_hw *hw = priv->hw;
+
+	priv->surpriseremoved = 1;
+	del_timer(&priv->command_timer);
+	lbtf_free_adapter(priv);
+	priv->hw = NULL;
+	ieee80211_unregister_hw(hw);
+	ieee80211_free_hw(hw);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(lbtf_remove_card);
+
+void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(priv->tx_skb);
+	memset(&info->status, 0, sizeof(info->status));
+	/*
+	 * Commented out, otherwise we never go beyond 1Mbit/s using mac80211
+	 * default pid rc algorithm.
+	 *
+	 * info->status.retry_count = MRVL_DEFAULT_RETRIES - retrycnt;
+	 */
+	info->status.excessive_retries = fail ? 1 : 0;
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !fail)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+	skb_pull(priv->tx_skb, sizeof(struct txpd));
+	ieee80211_tx_status_irqsafe(priv->hw, priv->tx_skb);
+	priv->tx_skb = NULL;
+	if (!priv->skb_to_tx && skb_queue_empty(&priv->bc_ps_buf))
+		ieee80211_wake_queues(priv->hw);
+	else
+		queue_work(lbtf_wq, &priv->tx_work);
+}
+EXPORT_SYMBOL_GPL(lbtf_send_tx_feedback);
+
+void lbtf_bcn_sent(struct lbtf_private *priv)
+{
+	struct sk_buff *skb = NULL;
+
+	if (priv->vif->type != NL80211_IFTYPE_AP)
+		return;
+
+	if (skb_queue_empty(&priv->bc_ps_buf)) {
+		bool tx_buff_bc = 0;
+
+		while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
+			skb_queue_tail(&priv->bc_ps_buf, skb);
+			tx_buff_bc = 1;
+		}
+		if (tx_buff_bc) {
+			ieee80211_stop_queues(priv->hw);
+			queue_work(lbtf_wq, &priv->tx_work);
+		}
+	}
+
+	skb = ieee80211_beacon_get(priv->hw, priv->vif);
+
+	if (skb) {
+		lbtf_beacon_set(priv, skb);
+		kfree_skb(skb);
+	}
+}
+EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
+
+static int __init lbtf_init_module(void)
+{
+	lbtf_wq = create_workqueue("libertastf");
+	if (lbtf_wq == NULL) {
+		printk(KERN_ERR "libertastf: couldn't create workqueue\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void __exit lbtf_exit_module(void)
+{
+	destroy_workqueue(lbtf_wq);
+}
+
+module_init(lbtf_init_module);
+module_exit(lbtf_exit_module);
+
+MODULE_DESCRIPTION("Libertas WLAN Thinfirm Driver Library");
+MODULE_AUTHOR("Cozybit Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 248d31a..c9e4a43 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -14,6 +14,8 @@
  * - RX filtering based on filter configuration (data->rx_filter)
  */
 
+#include <linux/list.h>
+#include <linux/spinlock.h>
 #include <net/mac80211.h>
 #include <net/ieee80211_radiotap.h>
 #include <linux/if_arp.h>
@@ -28,11 +30,56 @@
 module_param(radios, int, 0444);
 MODULE_PARM_DESC(radios, "Number of simulated radios");
 
+struct hwsim_vif_priv {
+	u32 magic;
+};
+
+#define HWSIM_VIF_MAGIC	0x69537748
+
+static inline void hwsim_check_magic(struct ieee80211_vif *vif)
+{
+	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+	WARN_ON(vp->magic != HWSIM_VIF_MAGIC);
+}
+
+static inline void hwsim_set_magic(struct ieee80211_vif *vif)
+{
+	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+	vp->magic = HWSIM_VIF_MAGIC;
+}
+
+static inline void hwsim_clear_magic(struct ieee80211_vif *vif)
+{
+	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+	vp->magic = 0;
+}
+
+struct hwsim_sta_priv {
+	u32 magic;
+};
+
+#define HWSIM_STA_MAGIC	0x6d537748
+
+static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta)
+{
+	struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+	WARN_ON(sp->magic != HWSIM_VIF_MAGIC);
+}
+
+static inline void hwsim_set_sta_magic(struct ieee80211_sta *sta)
+{
+	struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+	sp->magic = HWSIM_VIF_MAGIC;
+}
+
+static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta)
+{
+	struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+	sp->magic = 0;
+}
 
 static struct class *hwsim_class;
 
-static struct ieee80211_hw **hwsim_radios;
-static int hwsim_radio_count;
 static struct net_device *hwsim_mon; /* global monitor netdev */
 
 
@@ -68,7 +115,12 @@
 	{ .bitrate = 540 }
 };
 
+static spinlock_t hwsim_radio_lock;
+static struct list_head hwsim_radios;
+
 struct mac80211_hwsim_data {
+	struct list_head list;
+	struct ieee80211_hw *hw;
 	struct device *dev;
 	struct ieee80211_supported_band band;
 	struct ieee80211_channel channels[ARRAY_SIZE(hwsim_channels)];
@@ -144,11 +196,11 @@
 }
 
 
-static int mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
-				   struct sk_buff *skb)
+static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
+				    struct sk_buff *skb)
 {
-	struct mac80211_hwsim_data *data = hw->priv;
-	int i, ack = 0;
+	struct mac80211_hwsim_data *data = hw->priv, *data2;
+	bool ack = false;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_rx_status rx_status;
@@ -161,13 +213,13 @@
 	/* TODO: simulate signal strength (and optional packet drop) */
 
 	/* Copy skb to all enabled radios that are on the current frequency */
-	for (i = 0; i < hwsim_radio_count; i++) {
-		struct mac80211_hwsim_data *data2;
+	spin_lock(&hwsim_radio_lock);
+	list_for_each_entry(data2, &hwsim_radios, list) {
 		struct sk_buff *nskb;
 
-		if (hwsim_radios[i] == NULL || hwsim_radios[i] == hw)
+		if (data == data2)
 			continue;
-		data2 = hwsim_radios[i]->priv;
+
 		if (!data2->started || !data2->radio_enabled ||
 		    data->channel->center_freq != data2->channel->center_freq)
 			continue;
@@ -176,11 +228,12 @@
 		if (nskb == NULL)
 			continue;
 
-		if (memcmp(hdr->addr1, hwsim_radios[i]->wiphy->perm_addr,
+		if (memcmp(hdr->addr1, data2->hw->wiphy->perm_addr,
 			   ETH_ALEN) == 0)
-			ack = 1;
-		ieee80211_rx_irqsafe(hwsim_radios[i], nskb, &rx_status);
+			ack = true;
+		ieee80211_rx_irqsafe(data2->hw, nskb, &rx_status);
 	}
+	spin_unlock(&hwsim_radio_lock);
 
 	return ack;
 }
@@ -189,7 +242,7 @@
 static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
 	struct mac80211_hwsim_data *data = hw->priv;
-	int ack;
+	bool ack;
 	struct ieee80211_tx_info *txi;
 
 	mac80211_hwsim_monitor_rx(hw, skb);
@@ -210,6 +263,12 @@
 	ack = mac80211_hwsim_tx_frame(hw, skb);
 
 	txi = IEEE80211_SKB_CB(skb);
+
+	if (txi->control.vif)
+		hwsim_check_magic(txi->control.vif);
+	if (txi->control.sta)
+		hwsim_check_sta_magic(txi->control.sta);
+
 	memset(&txi->status, 0, sizeof(txi->status));
 	if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK)) {
 		if (ack)
@@ -246,6 +305,7 @@
 	printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%s)\n",
 	       wiphy_name(hw->wiphy), __func__, conf->type,
 	       print_mac(mac, conf->mac_addr));
+	hwsim_set_magic(conf->vif);
 	return 0;
 }
 
@@ -257,6 +317,8 @@
 	printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%s)\n",
 	       wiphy_name(hw->wiphy), __func__, conf->type,
 	       print_mac(mac, conf->mac_addr));
+	hwsim_check_magic(conf->vif);
+	hwsim_clear_magic(conf->vif);
 }
 
 
@@ -267,7 +329,9 @@
 	struct sk_buff *skb;
 	struct ieee80211_tx_info *info;
 
-	if (vif->type != IEEE80211_IF_TYPE_AP)
+	hwsim_check_magic(vif);
+
+	if (vif->type != NL80211_IFTYPE_AP)
 		return;
 
 	skb = ieee80211_beacon_get(hw, vif);
@@ -341,7 +405,45 @@
 	*total_flags = data->rx_filter;
 }
 
+static int mac80211_hwsim_config_interface(struct ieee80211_hw *hw,
+					   struct ieee80211_vif *vif,
+					   struct ieee80211_if_conf *conf)
+{
+	hwsim_check_magic(vif);
+	return 0;
+}
 
+static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
+					    struct ieee80211_vif *vif,
+					    struct ieee80211_bss_conf *info,
+					    u32 changed)
+{
+	hwsim_check_magic(vif);
+}
+
+static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif,
+				      enum sta_notify_cmd cmd,
+				      struct ieee80211_sta *sta)
+{
+	hwsim_check_magic(vif);
+	switch (cmd) {
+	case STA_NOTIFY_ADD:
+		hwsim_set_sta_magic(sta);
+		break;
+	case STA_NOTIFY_REMOVE:
+		hwsim_clear_sta_magic(sta);
+		break;
+	}
+}
+
+static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw,
+				  struct ieee80211_sta *sta,
+				  bool set)
+{
+	hwsim_check_sta_magic(sta);
+	return 0;
+}
 
 static const struct ieee80211_ops mac80211_hwsim_ops =
 {
@@ -352,23 +454,30 @@
 	.remove_interface = mac80211_hwsim_remove_interface,
 	.config = mac80211_hwsim_config,
 	.configure_filter = mac80211_hwsim_configure_filter,
+	.config_interface = mac80211_hwsim_config_interface,
+	.bss_info_changed = mac80211_hwsim_bss_info_changed,
+	.sta_notify = mac80211_hwsim_sta_notify,
+	.set_tim = mac80211_hwsim_set_tim,
 };
 
 
 static void mac80211_hwsim_free(void)
 {
-	int i;
+	struct list_head tmplist, *i, *tmp;
+	struct mac80211_hwsim_data *data;
 
-	for (i = 0; i < hwsim_radio_count; i++) {
-		if (hwsim_radios[i]) {
-			struct mac80211_hwsim_data *data;
-			data = hwsim_radios[i]->priv;
-			ieee80211_unregister_hw(hwsim_radios[i]);
-			device_unregister(data->dev);
-			ieee80211_free_hw(hwsim_radios[i]);
-		}
+	INIT_LIST_HEAD(&tmplist);
+
+	spin_lock_bh(&hwsim_radio_lock);
+	list_for_each_safe(i, tmp, &hwsim_radios)
+		list_move(i, &tmplist);
+	spin_unlock_bh(&hwsim_radio_lock);
+
+	list_for_each_entry(data, &tmplist, list) {
+		ieee80211_unregister_hw(data->hw);
+		device_unregister(data->dev);
+		ieee80211_free_hw(data->hw);
 	}
-	kfree(hwsim_radios);
 	class_destroy(hwsim_class);
 }
 
@@ -398,37 +507,32 @@
 	struct ieee80211_hw *hw;
 	DECLARE_MAC_BUF(mac);
 
-	if (radios < 1 || radios > 65535)
+	if (radios < 1 || radios > 100)
 		return -EINVAL;
 
-	hwsim_radio_count = radios;
-	hwsim_radios = kcalloc(hwsim_radio_count,
-			       sizeof(struct ieee80211_hw *), GFP_KERNEL);
-	if (hwsim_radios == NULL)
-		return -ENOMEM;
+	spin_lock_init(&hwsim_radio_lock);
+	INIT_LIST_HEAD(&hwsim_radios);
 
 	hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
-	if (IS_ERR(hwsim_class)) {
-		kfree(hwsim_radios);
+	if (IS_ERR(hwsim_class))
 		return PTR_ERR(hwsim_class);
-	}
 
 	memset(addr, 0, ETH_ALEN);
 	addr[0] = 0x02;
 
-	for (i = 0; i < hwsim_radio_count; i++) {
+	for (i = 0; i < radios; i++) {
 		printk(KERN_DEBUG "mac80211_hwsim: Initializing radio %d\n",
 		       i);
 		hw = ieee80211_alloc_hw(sizeof(*data), &mac80211_hwsim_ops);
-		if (hw == NULL) {
+		if (!hw) {
 			printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw "
 			       "failed\n");
 			err = -ENOMEM;
 			goto failed;
 		}
-		hwsim_radios[i] = hw;
-
 		data = hw->priv;
+		data->hw = hw;
+
 		data->dev = device_create_drvdata(hwsim_class, NULL, 0, hw,
 						"hwsim%d", i);
 		if (IS_ERR(data->dev)) {
@@ -446,7 +550,15 @@
 		SET_IEEE80211_PERM_ADDR(hw, addr);
 
 		hw->channel_change_time = 1;
-		hw->queues = 1;
+		hw->queues = 4;
+		hw->wiphy->interface_modes =
+			BIT(NL80211_IFTYPE_STATION) |
+			BIT(NL80211_IFTYPE_AP);
+		hw->ampdu_queues = 1;
+
+		/* ask mac80211 to reserve space for magic */
+		hw->vif_data_size = sizeof(struct hwsim_vif_priv);
+		hw->sta_data_size = sizeof(struct hwsim_sta_priv);
 
 		memcpy(data->channels, hwsim_channels, sizeof(hwsim_channels));
 		memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
@@ -454,6 +566,19 @@
 		data->band.n_channels = ARRAY_SIZE(hwsim_channels);
 		data->band.bitrates = data->rates;
 		data->band.n_bitrates = ARRAY_SIZE(hwsim_rates);
+		data->band.ht_info.ht_supported = 1;
+		data->band.ht_info.cap = IEEE80211_HT_CAP_SUP_WIDTH |
+			IEEE80211_HT_CAP_GRN_FLD |
+			IEEE80211_HT_CAP_SGI_40 |
+			IEEE80211_HT_CAP_DSSSCCK40;
+		data->band.ht_info.ampdu_factor = 0x3;
+		data->band.ht_info.ampdu_density = 0x6;
+		memset(data->band.ht_info.supp_mcs_set, 0,
+		       sizeof(data->band.ht_info.supp_mcs_set));
+		data->band.ht_info.supp_mcs_set[0] = 0xff;
+		data->band.ht_info.supp_mcs_set[1] = 0xff;
+		data->band.ht_info.supp_mcs_set[12] =
+			IEEE80211_HT_CAP_MCS_TX_DEFINED;
 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &data->band;
 
 		err = ieee80211_register_hw(hw);
@@ -469,6 +594,8 @@
 
 		setup_timer(&data->beacon_timer, mac80211_hwsim_beacon,
 			    (unsigned long) hw);
+
+		list_add_tail(&data->list, &hwsim_radios);
 	}
 
 	hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup);
@@ -500,7 +627,6 @@
 	device_unregister(data->dev);
 failed_drvdata:
 	ieee80211_free_hw(hw);
-	hwsim_radios[i] = NULL;
 failed:
 	mac80211_hwsim_free();
 	return err;
@@ -509,8 +635,7 @@
 
 static void __exit exit_mac80211_hwsim(void)
 {
-	printk(KERN_DEBUG "mac80211_hwsim: unregister %d radios\n",
-	       hwsim_radio_count);
+	printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n");
 
 	unregister_netdev(hwsim_mon);
 	mac80211_hwsim_free();
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index f479c1a..25bae79 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -398,7 +398,7 @@
     link->io.IOAddrLines = 5;
     
     /* Interrupt setup */
-    link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+    link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
     link->irq.IRQInfo1 = IRQ_LEVEL_ID;
     link->irq.Handler = &netwave_interrupt;
     
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 36c004e..5090477 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -79,15 +79,21 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
+#include <linux/firmware.h>
 #include <linux/if_arp.h>
 #include <linux/wireless.h>
 #include <net/iw_handler.h>
 #include <net/ieee80211.h>
 
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+
 #include "hermes_rid.h"
+#include "hermes_dld.h"
 #include "orinoco.h"
 
 /********************************************************************/
@@ -241,6 +247,74 @@
 static void __orinoco_set_multicast_list(struct net_device *dev);
 
 /********************************************************************/
+/* Michael MIC crypto setup                                         */
+/********************************************************************/
+#define MICHAEL_MIC_LEN 8
+static int orinoco_mic_init(struct orinoco_private *priv)
+{
+	priv->tx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
+	if (IS_ERR(priv->tx_tfm_mic)) {
+		printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
+		       "crypto API michael_mic\n");
+		priv->tx_tfm_mic = NULL;
+		return -ENOMEM;
+	}
+
+	priv->rx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
+	if (IS_ERR(priv->rx_tfm_mic)) {
+		printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
+		       "crypto API michael_mic\n");
+		priv->rx_tfm_mic = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void orinoco_mic_free(struct orinoco_private *priv)
+{
+	if (priv->tx_tfm_mic)
+		crypto_free_hash(priv->tx_tfm_mic);
+	if (priv->rx_tfm_mic)
+		crypto_free_hash(priv->rx_tfm_mic);
+}
+
+static int michael_mic(struct crypto_hash *tfm_michael, u8 *key,
+		       u8 *da, u8 *sa, u8 priority,
+		       u8 *data, size_t data_len, u8 *mic)
+{
+	struct hash_desc desc;
+	struct scatterlist sg[2];
+	u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
+
+	if (tfm_michael == NULL) {
+		printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
+		return -1;
+	}
+
+	/* Copy header into buffer. We need the padding on the end zeroed */
+	memcpy(&hdr[0], da, ETH_ALEN);
+	memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN);
+	hdr[ETH_ALEN*2] = priority;
+	hdr[ETH_ALEN*2+1] = 0;
+	hdr[ETH_ALEN*2+2] = 0;
+	hdr[ETH_ALEN*2+3] = 0;
+
+	/* Use scatter gather to MIC header and data in one go */
+	sg_init_table(sg, 2);
+	sg_set_buf(&sg[0], hdr, sizeof(hdr));
+	sg_set_buf(&sg[1], data, data_len);
+
+	if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN))
+		return -1;
+
+	desc.tfm = tfm_michael;
+	desc.flags = 0;
+	return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr),
+				  mic);
+}
+
+/********************************************************************/
 /* Internal helper functions                                        */
 /********************************************************************/
 
@@ -273,12 +347,19 @@
 #define ORINOCO_MAX_BSS_COUNT	64
 static int orinoco_bss_data_allocate(struct orinoco_private *priv)
 {
-	if (priv->bss_data)
+	if (priv->bss_xbss_data)
 		return 0;
 
-	priv->bss_data =
-	    kzalloc(ORINOCO_MAX_BSS_COUNT * sizeof(bss_element), GFP_KERNEL);
-	if (!priv->bss_data) {
+	if (priv->has_ext_scan)
+		priv->bss_xbss_data = kzalloc(ORINOCO_MAX_BSS_COUNT *
+					      sizeof(struct xbss_element),
+					      GFP_KERNEL);
+	else
+		priv->bss_xbss_data = kzalloc(ORINOCO_MAX_BSS_COUNT *
+					      sizeof(struct bss_element),
+					      GFP_KERNEL);
+
+	if (!priv->bss_xbss_data) {
 		printk(KERN_WARNING "Out of memory allocating beacons");
 		return -ENOMEM;
 	}
@@ -287,18 +368,319 @@
 
 static void orinoco_bss_data_free(struct orinoco_private *priv)
 {
-	kfree(priv->bss_data);
-	priv->bss_data = NULL;
+	kfree(priv->bss_xbss_data);
+	priv->bss_xbss_data = NULL;
 }
 
+#define PRIV_BSS	((struct bss_element *)priv->bss_xbss_data)
+#define PRIV_XBSS	((struct xbss_element *)priv->bss_xbss_data)
 static void orinoco_bss_data_init(struct orinoco_private *priv)
 {
 	int i;
 
 	INIT_LIST_HEAD(&priv->bss_free_list);
 	INIT_LIST_HEAD(&priv->bss_list);
-	for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++)
-		list_add_tail(&priv->bss_data[i].list, &priv->bss_free_list);
+	if (priv->has_ext_scan)
+		for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++)
+			list_add_tail(&(PRIV_XBSS[i].list),
+				      &priv->bss_free_list);
+	else
+		for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++)
+			list_add_tail(&(PRIV_BSS[i].list),
+				      &priv->bss_free_list);
+
+}
+
+static inline u8 *orinoco_get_ie(u8 *data, size_t len,
+				 enum ieee80211_mfie eid)
+{
+	u8 *p = data;
+	while ((p + 2) < (data + len)) {
+		if (p[0] == eid)
+			return p;
+		p += p[1] + 2;
+	}
+	return NULL;
+}
+
+#define WPA_OUI_TYPE	"\x00\x50\xF2\x01"
+#define WPA_SELECTOR_LEN 4
+static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
+{
+	u8 *p = data;
+	while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) {
+		if ((p[0] == MFIE_TYPE_GENERIC) &&
+		    (memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0))
+			return p;
+		p += p[1] + 2;
+	}
+	return NULL;
+}
+
+
+/********************************************************************/
+/* Download functionality                                           */
+/********************************************************************/
+
+struct fw_info {
+	char *pri_fw;
+	char *sta_fw;
+	char *ap_fw;
+	u32 pda_addr;
+	u16 pda_size;
+};
+
+const static struct fw_info orinoco_fw[] = {
+	{ "", "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 },
+	{ "", "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
+	{ "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", "", 0x00003100, 0x100 }
+};
+
+/* Structure used to access fields in FW
+ * Make sure LE decoding macros are used
+ */
+struct orinoco_fw_header {
+	char hdr_vers[6];       /* ASCII string for header version */
+	__le16 headersize;      /* Total length of header */
+	__le32 entry_point;     /* NIC entry point */
+	__le32 blocks;          /* Number of blocks to program */
+	__le32 block_offset;    /* Offset of block data from eof header */
+	__le32 pdr_offset;      /* Offset to PDR data from eof header */
+	__le32 pri_offset;      /* Offset to primary plug data */
+	__le32 compat_offset;   /* Offset to compatibility data*/
+	char signature[0];      /* FW signature length headersize-20 */
+} __attribute__ ((packed));
+
+/* Download either STA or AP firmware into the card. */
+static int
+orinoco_dl_firmware(struct orinoco_private *priv,
+		    const struct fw_info *fw,
+		    int ap)
+{
+	/* Plug Data Area (PDA) */
+	__le16 pda[512] = { 0 };
+
+	hermes_t *hw = &priv->hw;
+	const struct firmware *fw_entry;
+	const struct orinoco_fw_header *hdr;
+	const unsigned char *first_block;
+	const unsigned char *end;
+	const char *firmware;
+	struct net_device *dev = priv->ndev;
+	int err;
+
+	if (ap)
+		firmware = fw->ap_fw;
+	else
+		firmware = fw->sta_fw;
+
+	printk(KERN_DEBUG "%s: Attempting to download firmware %s\n",
+	       dev->name, firmware);
+
+	/* Read current plug data */
+	err = hermes_read_pda(hw, pda, fw->pda_addr,
+			      min_t(u16, fw->pda_size, sizeof(pda)), 0);
+	printk(KERN_DEBUG "%s: Read PDA returned %d\n", dev->name, err);
+	if (err)
+		return err;
+
+	err = request_firmware(&fw_entry, firmware, priv->dev);
+	if (err) {
+		printk(KERN_ERR "%s: Cannot find firmware %s\n",
+		       dev->name, firmware);
+		return -ENOENT;
+	}
+
+	hdr = (const struct orinoco_fw_header *) fw_entry->data;
+
+	/* Enable aux port to allow programming */
+	err = hermesi_program_init(hw, le32_to_cpu(hdr->entry_point));
+	printk(KERN_DEBUG "%s: Program init returned %d\n", dev->name, err);
+	if (err != 0)
+		goto abort;
+
+	/* Program data */
+	first_block = (fw_entry->data +
+		       le16_to_cpu(hdr->headersize) +
+		       le32_to_cpu(hdr->block_offset));
+	end = fw_entry->data + fw_entry->size;
+
+	err = hermes_program(hw, first_block, end);
+	printk(KERN_DEBUG "%s: Program returned %d\n", dev->name, err);
+	if (err != 0)
+		goto abort;
+
+	/* Update production data */
+	first_block = (fw_entry->data +
+		       le16_to_cpu(hdr->headersize) +
+		       le32_to_cpu(hdr->pdr_offset));
+
+	err = hermes_apply_pda_with_defaults(hw, first_block, pda);
+	printk(KERN_DEBUG "%s: Apply PDA returned %d\n", dev->name, err);
+	if (err)
+		goto abort;
+
+	/* Tell card we've finished */
+	err = hermesi_program_end(hw);
+	printk(KERN_DEBUG "%s: Program end returned %d\n", dev->name, err);
+	if (err != 0)
+		goto abort;
+
+	/* Check if we're running */
+	printk(KERN_DEBUG "%s: hermes_present returned %d\n",
+	       dev->name, hermes_present(hw));
+
+abort:
+	release_firmware(fw_entry);
+	return err;
+}
+
+/* End markers */
+#define TEXT_END	0x1A		/* End of text header */
+
+/*
+ * Process a firmware image - stop the card, load the firmware, reset
+ * the card and make sure it responds.  For the secondary firmware take
+ * care of the PDA - read it and then write it on top of the firmware.
+ */
+static int
+symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
+		const unsigned char *image, const unsigned char *end,
+		int secondary)
+{
+	hermes_t *hw = &priv->hw;
+	int ret;
+	const unsigned char *ptr;
+	const unsigned char *first_block;
+
+	/* Plug Data Area (PDA) */
+	__le16 pda[256];
+
+	/* Binary block begins after the 0x1A marker */
+	ptr = image;
+	while (*ptr++ != TEXT_END);
+	first_block = ptr;
+
+	/* Read the PDA from EEPROM */
+	if (secondary) {
+		ret = hermes_read_pda(hw, pda, fw->pda_addr, sizeof(pda), 1);
+		if (ret)
+			return ret;
+	}
+
+	/* Stop the firmware, so that it can be safely rewritten */
+	if (priv->stop_fw) {
+		ret = priv->stop_fw(priv, 1);
+		if (ret)
+			return ret;
+	}
+
+	/* Program the adapter with new firmware */
+	ret = hermes_program(hw, first_block, end);
+	if (ret)
+		return ret;
+
+	/* Write the PDA to the adapter */
+	if (secondary) {
+		size_t len = hermes_blocks_length(first_block);
+		ptr = first_block + len;
+		ret = hermes_apply_pda(hw, ptr, pda);
+		if (ret)
+			return ret;
+	}
+
+	/* Run the firmware */
+	if (priv->stop_fw) {
+		ret = priv->stop_fw(priv, 0);
+		if (ret)
+			return ret;
+	}
+
+	/* Reset hermes chip and make sure it responds */
+	ret = hermes_init(hw);
+
+	/* hermes_reset() should return 0 with the secondary firmware */
+	if (secondary && ret != 0)
+		return -ENODEV;
+
+	/* And this should work with any firmware */
+	if (!hermes_present(hw))
+		return -ENODEV;
+
+	return 0;
+}
+
+
+/*
+ * Download the firmware into the card, this also does a PCMCIA soft
+ * reset on the card, to make sure it's in a sane state.
+ */
+static int
+symbol_dl_firmware(struct orinoco_private *priv,
+		   const struct fw_info *fw)
+{
+	struct net_device *dev = priv->ndev;
+	int ret;
+	const struct firmware *fw_entry;
+
+	if (request_firmware(&fw_entry, fw->pri_fw,
+			     priv->dev) != 0) {
+		printk(KERN_ERR "%s: Cannot find firmware: %s\n",
+		       dev->name, fw->pri_fw);
+		return -ENOENT;
+	}
+
+	/* Load primary firmware */
+	ret = symbol_dl_image(priv, fw, fw_entry->data,
+			      fw_entry->data + fw_entry->size, 0);
+	release_firmware(fw_entry);
+	if (ret) {
+		printk(KERN_ERR "%s: Primary firmware download failed\n",
+		       dev->name);
+		return ret;
+	}
+
+	if (request_firmware(&fw_entry, fw->sta_fw,
+			     priv->dev) != 0) {
+		printk(KERN_ERR "%s: Cannot find firmware: %s\n",
+		       dev->name, fw->sta_fw);
+		return -ENOENT;
+	}
+
+	/* Load secondary firmware */
+	ret = symbol_dl_image(priv, fw, fw_entry->data,
+			      fw_entry->data + fw_entry->size, 1);
+	release_firmware(fw_entry);
+	if (ret) {
+		printk(KERN_ERR "%s: Secondary firmware download failed\n",
+		       dev->name);
+	}
+
+	return ret;
+}
+
+static int orinoco_download(struct orinoco_private *priv)
+{
+	int err = 0;
+	/* Reload firmware */
+	switch (priv->firmware_type) {
+	case FIRMWARE_TYPE_AGERE:
+		/* case FIRMWARE_TYPE_INTERSIL: */
+		err = orinoco_dl_firmware(priv,
+					  &orinoco_fw[priv->firmware_type], 0);
+		break;
+
+	case FIRMWARE_TYPE_SYMBOL:
+		err = symbol_dl_firmware(priv,
+					 &orinoco_fw[priv->firmware_type]);
+		break;
+	case FIRMWARE_TYPE_INTERSIL:
+		break;
+	}
+	/* TODO: if we fail we probably need to reinitialise
+	 * the driver */
+
+	return err;
 }
 
 /********************************************************************/
@@ -453,8 +835,7 @@
 	int err = 0;
 	u16 txfid = priv->txfid;
 	struct ethhdr *eh;
-	int data_off;
-	struct hermes_tx_descriptor desc;
+	int tx_control;
 	unsigned long flags;
 
 	if (! netif_running(dev)) {
@@ -486,23 +867,54 @@
 	if (skb->len < ETH_HLEN)
 		goto drop;
 
-	eh = (struct ethhdr *)skb->data;
+	tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX;
 
-	memset(&desc, 0, sizeof(desc));
- 	desc.tx_control = cpu_to_le16(HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX);
-	err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), txfid, 0);
-	if (err) {
-		if (net_ratelimit())
-			printk(KERN_ERR "%s: Error %d writing Tx descriptor "
-			       "to BAP\n", dev->name, err);
-		goto busy;
+	if (priv->encode_alg == IW_ENCODE_ALG_TKIP)
+		tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
+			HERMES_TXCTRL_MIC;
+
+	if (priv->has_alt_txcntl) {
+		/* WPA enabled firmwares have tx_cntl at the end of
+		 * the 802.11 header.  So write zeroed descriptor and
+		 * 802.11 header at the same time
+		 */
+		char desc[HERMES_802_3_OFFSET];
+		__le16 *txcntl = (__le16 *) &desc[HERMES_TXCNTL2_OFFSET];
+
+		memset(&desc, 0, sizeof(desc));
+
+		*txcntl = cpu_to_le16(tx_control);
+		err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
+					txfid, 0);
+		if (err) {
+			if (net_ratelimit())
+				printk(KERN_ERR "%s: Error %d writing Tx "
+				       "descriptor to BAP\n", dev->name, err);
+			goto busy;
+		}
+	} else {
+		struct hermes_tx_descriptor desc;
+
+		memset(&desc, 0, sizeof(desc));
+
+		desc.tx_control = cpu_to_le16(tx_control);
+		err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
+					txfid, 0);
+		if (err) {
+			if (net_ratelimit())
+				printk(KERN_ERR "%s: Error %d writing Tx "
+				       "descriptor to BAP\n", dev->name, err);
+			goto busy;
+		}
+
+		/* Clear the 802.11 header and data length fields - some
+		 * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
+		 * if this isn't done. */
+		hermes_clear_words(hw, HERMES_DATA0,
+				   HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
 	}
 
-	/* Clear the 802.11 header and data length fields - some
-	 * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
-	 * if this isn't done. */
-	hermes_clear_words(hw, HERMES_DATA0,
-			   HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
+	eh = (struct ethhdr *)skb->data;
 
 	/* Encapsulate Ethernet-II frames */
 	if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
@@ -513,33 +925,65 @@
 
 		/* Strip destination and source from the data */
 		skb_pull(skb, 2 * ETH_ALEN);
-		data_off = HERMES_802_2_OFFSET + sizeof(encaps_hdr);
 
 		/* And move them to a separate header */
 		memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
 		hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
 		memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
 
-		err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
-					txfid, HERMES_802_3_OFFSET);
-		if (err) {
-			if (net_ratelimit())
-				printk(KERN_ERR "%s: Error %d writing packet "
-				       "header to BAP\n", dev->name, err);
-			goto busy;
+		/* Insert the SNAP header */
+		if (skb_headroom(skb) < sizeof(hdr)) {
+			printk(KERN_ERR
+			       "%s: Not enough headroom for 802.2 headers %d\n",
+			       dev->name, skb_headroom(skb));
+			goto drop;
 		}
-	} else { /* IEEE 802.3 frame */
-		data_off = HERMES_802_3_OFFSET;
+		eh = (struct ethhdr *) skb_push(skb, sizeof(hdr));
+		memcpy(eh, &hdr, sizeof(hdr));
 	}
 
 	err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
-				txfid, data_off);
+				txfid, HERMES_802_3_OFFSET);
 	if (err) {
 		printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
 		       dev->name, err);
 		goto busy;
 	}
 
+	/* Calculate Michael MIC */
+	if (priv->encode_alg == IW_ENCODE_ALG_TKIP) {
+		u8 mic_buf[MICHAEL_MIC_LEN + 1];
+		u8 *mic;
+		size_t offset;
+		size_t len;
+
+		if (skb->len % 2) {
+			/* MIC start is on an odd boundary */
+			mic_buf[0] = skb->data[skb->len - 1];
+			mic = &mic_buf[1];
+			offset = skb->len - 1;
+			len = MICHAEL_MIC_LEN + 1;
+		} else {
+			mic = &mic_buf[0];
+			offset = skb->len;
+			len = MICHAEL_MIC_LEN;
+		}
+
+		michael_mic(priv->tx_tfm_mic,
+			    priv->tkip_key[priv->tx_key].tx_mic,
+			    eh->h_dest, eh->h_source, 0 /* priority */,
+			    skb->data + ETH_HLEN, skb->len - ETH_HLEN, mic);
+
+		/* Write the MIC */
+		err = hermes_bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
+					txfid, HERMES_802_3_OFFSET + offset);
+		if (err) {
+			printk(KERN_ERR "%s: Error %d writing MIC to BAP\n",
+			       dev->name, err);
+			goto busy;
+		}
+	}
+
 	/* Finally, we actually initiate the send */
 	netif_stop_queue(dev);
 
@@ -554,7 +998,7 @@
 	}
 
 	dev->trans_start = jiffies;
-	stats->tx_bytes += data_off + skb->len;
+	stats->tx_bytes += HERMES_802_3_OFFSET + skb->len;
 	goto ok;
 
  drop:
@@ -834,21 +1278,48 @@
 	stats->rx_dropped++;
 }
 
+/* Get tsc from the firmware */
+static int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key,
+				  u8 *tsc)
+{
+	hermes_t *hw = &priv->hw;
+	int err = 0;
+	u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE];
+
+	if ((key < 0) || (key > 4))
+		return -EINVAL;
+
+	err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
+			      sizeof(tsc_arr), NULL, &tsc_arr);
+	if (!err)
+		memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0]));
+
+	return err;
+}
+
 static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
 {
 	struct orinoco_private *priv = netdev_priv(dev);
 	struct net_device_stats *stats = &priv->stats;
 	struct iw_statistics *wstats = &priv->wstats;
 	struct sk_buff *skb = NULL;
-	u16 rxfid, status, fc;
+	u16 rxfid, status;
 	int length;
-	struct hermes_rx_descriptor desc;
-	struct ethhdr *hdr;
+	struct hermes_rx_descriptor *desc;
+	struct orinoco_rx_data *rx_data;
 	int err;
 
+	desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
+	if (!desc) {
+		printk(KERN_WARNING
+		       "%s: Can't allocate space for RX descriptor\n",
+		       dev->name);
+		goto update_stats;
+	}
+
 	rxfid = hermes_read_regn(hw, RXFID);
 
-	err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc),
+	err = hermes_bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
 			       rxfid, 0);
 	if (err) {
 		printk(KERN_ERR "%s: error %d reading Rx descriptor. "
@@ -856,7 +1327,7 @@
 		goto update_stats;
 	}
 
-	status = le16_to_cpu(desc.status);
+	status = le16_to_cpu(desc->status);
 
 	if (status & HERMES_RXSTAT_BADCRC) {
 		DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n",
@@ -867,8 +1338,8 @@
 
 	/* Handle frames in monitor mode */
 	if (priv->iw_mode == IW_MODE_MONITOR) {
-		orinoco_rx_monitor(dev, rxfid, &desc);
-		return;
+		orinoco_rx_monitor(dev, rxfid, desc);
+		goto out;
 	}
 
 	if (status & HERMES_RXSTAT_UNDECRYPTABLE) {
@@ -878,15 +1349,14 @@
 		goto update_stats;
 	}
 
-	length = le16_to_cpu(desc.data_len);
-	fc = le16_to_cpu(desc.frame_ctl);
+	length = le16_to_cpu(desc->data_len);
 
 	/* Sanity checks */
 	if (length < 3) { /* No for even an 802.2 LLC header */
 		/* At least on Symbol firmware with PCF we get quite a
                    lot of these legitimately - Poll frames with no
                    data. */
-		return;
+		goto out;
 	}
 	if (length > IEEE80211_DATA_LEN) {
 		printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
@@ -895,6 +1365,11 @@
 		goto update_stats;
 	}
 
+	/* Payload size does not include Michael MIC. Increase payload
+	 * size to read it together with the data. */
+	if (status & HERMES_RXSTAT_MIC)
+		length += MICHAEL_MIC_LEN;
+
 	/* We need space for the packet data itself, plus an ethernet
 	   header, plus 2 bytes so we can align the IP header on a
 	   32bit boundary, plus 1 byte so we can read in odd length
@@ -921,6 +1396,100 @@
 		goto drop;
 	}
 
+	/* Add desc and skb to rx queue */
+	rx_data = kzalloc(sizeof(*rx_data), GFP_ATOMIC);
+	if (!rx_data) {
+		printk(KERN_WARNING "%s: Can't allocate RX packet\n",
+			dev->name);
+		goto drop;
+	}
+	rx_data->desc = desc;
+	rx_data->skb = skb;
+	list_add_tail(&rx_data->list, &priv->rx_list);
+	tasklet_schedule(&priv->rx_tasklet);
+
+	return;
+
+drop:
+	dev_kfree_skb_irq(skb);
+update_stats:
+	stats->rx_errors++;
+	stats->rx_dropped++;
+out:
+	kfree(desc);
+}
+
+static void orinoco_rx(struct net_device *dev,
+		       struct hermes_rx_descriptor *desc,
+		       struct sk_buff *skb)
+{
+	struct orinoco_private *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &priv->stats;
+	u16 status, fc;
+	int length;
+	struct ethhdr *hdr;
+
+	status = le16_to_cpu(desc->status);
+	length = le16_to_cpu(desc->data_len);
+	fc = le16_to_cpu(desc->frame_ctl);
+
+	/* Calculate and check MIC */
+	if (status & HERMES_RXSTAT_MIC) {
+		int key_id = ((status & HERMES_RXSTAT_MIC_KEY_ID) >>
+			      HERMES_MIC_KEY_ID_SHIFT);
+		u8 mic[MICHAEL_MIC_LEN];
+		u8 *rxmic;
+		u8 *src = (fc & IEEE80211_FCTL_FROMDS) ?
+			desc->addr3 : desc->addr2;
+
+		/* Extract Michael MIC from payload */
+		rxmic = skb->data + skb->len - MICHAEL_MIC_LEN;
+
+		skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
+		length -= MICHAEL_MIC_LEN;
+
+		michael_mic(priv->rx_tfm_mic,
+			    priv->tkip_key[key_id].rx_mic,
+			    desc->addr1,
+			    src,
+			    0, /* priority or QoS? */
+			    skb->data,
+			    skb->len,
+			    &mic[0]);
+
+		if (memcmp(mic, rxmic,
+			   MICHAEL_MIC_LEN)) {
+			union iwreq_data wrqu;
+			struct iw_michaelmicfailure wxmic;
+			DECLARE_MAC_BUF(mac);
+
+			printk(KERN_WARNING "%s: "
+			       "Invalid Michael MIC in data frame from %s, "
+			       "using key %i\n",
+			       dev->name, print_mac(mac, src), key_id);
+
+			/* TODO: update stats */
+
+			/* Notify userspace */
+			memset(&wxmic, 0, sizeof(wxmic));
+			wxmic.flags = key_id & IW_MICFAILURE_KEY_ID;
+			wxmic.flags |= (desc->addr1[0] & 1) ?
+				IW_MICFAILURE_GROUP : IW_MICFAILURE_PAIRWISE;
+			wxmic.src_addr.sa_family = ARPHRD_ETHER;
+			memcpy(wxmic.src_addr.sa_data, src, ETH_ALEN);
+
+			(void) orinoco_hw_get_tkip_iv(priv, key_id,
+						      &wxmic.tsc[0]);
+
+			memset(&wrqu, 0, sizeof(wrqu));
+			wrqu.data.length = sizeof(wxmic);
+			wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu,
+					    (char *) &wxmic);
+
+			goto drop;
+		}
+	}
+
 	/* Handle decapsulation
 	 * In most cases, the firmware tell us about SNAP frames.
 	 * For some reason, the SNAP frames sent by LinkSys APs
@@ -939,11 +1508,11 @@
 		hdr = (struct ethhdr *)skb_push(skb, ETH_HLEN);
 		hdr->h_proto = htons(length);
 	}
-	memcpy(hdr->h_dest, desc.addr1, ETH_ALEN);
+	memcpy(hdr->h_dest, desc->addr1, ETH_ALEN);
 	if (fc & IEEE80211_FCTL_FROMDS)
-		memcpy(hdr->h_source, desc.addr3, ETH_ALEN);
+		memcpy(hdr->h_source, desc->addr3, ETH_ALEN);
 	else
-		memcpy(hdr->h_source, desc.addr2, ETH_ALEN);
+		memcpy(hdr->h_source, desc->addr2, ETH_ALEN);
 
 	dev->last_rx = jiffies;
 	skb->protocol = eth_type_trans(skb, dev);
@@ -952,7 +1521,7 @@
 		skb->pkt_type = PACKET_OTHERHOST;
 	
 	/* Process the wireless stats if needed */
-	orinoco_stat_gather(dev, skb, &desc);
+	orinoco_stat_gather(dev, skb, desc);
 
 	/* Pass the packet to the networking stack */
 	netif_rx(skb);
@@ -961,13 +1530,33 @@
 
 	return;
 
- drop:	
-	dev_kfree_skb_irq(skb);
- update_stats:
+ drop:
+	dev_kfree_skb(skb);
 	stats->rx_errors++;
 	stats->rx_dropped++;
 }
 
+static void orinoco_rx_isr_tasklet(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *) data;
+	struct orinoco_private *priv = netdev_priv(dev);
+	struct orinoco_rx_data *rx_data, *temp;
+	struct hermes_rx_descriptor *desc;
+	struct sk_buff *skb;
+
+	/* extract desc and skb from queue */
+	list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
+		desc = rx_data->desc;
+		skb = rx_data->skb;
+		list_del(&rx_data->list);
+		kfree(rx_data);
+
+		orinoco_rx(dev, desc, skb);
+
+		kfree(desc);
+	}
+}
+
 /********************************************************************/
 /* Rx path (info frames)                                            */
 /********************************************************************/
@@ -1087,49 +1676,169 @@
 }
 
 /* Send new BSSID to userspace */
-static void orinoco_send_wevents(struct work_struct *work)
+static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
 {
-	struct orinoco_private *priv =
-		container_of(work, struct orinoco_private, wevent_work);
 	struct net_device *dev = priv->ndev;
 	struct hermes *hw = &priv->hw;
 	union iwreq_data wrqu;
 	int err;
-	unsigned long flags;
-
-	if (orinoco_lock(priv, &flags) != 0)
-		return;
 
 	err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENTBSSID,
 			      ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
 	if (err != 0)
-		goto out;
+		return;
 
 	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
 
 	/* Send event to user space */
 	wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
-
- out:
-	orinoco_unlock(priv, &flags);
 }
 
+static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
+{
+	struct net_device *dev = priv->ndev;
+	struct hermes *hw = &priv->hw;
+	union iwreq_data wrqu;
+	int err;
+	u8 buf[88];
+	u8 *ie;
+
+	if (!priv->has_wpa)
+		return;
+
+	err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
+			      sizeof(buf), NULL, &buf);
+	if (err != 0)
+		return;
+
+	ie = orinoco_get_wpa_ie(buf, sizeof(buf));
+	if (ie) {
+		int rem = sizeof(buf) - (ie - &buf[0]);
+		wrqu.data.length = ie[1] + 2;
+		if (wrqu.data.length > rem)
+			wrqu.data.length = rem;
+
+		if (wrqu.data.length)
+			/* Send event to user space */
+			wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, ie);
+	}
+}
+
+static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
+{
+	struct net_device *dev = priv->ndev;
+	struct hermes *hw = &priv->hw;
+	union iwreq_data wrqu;
+	int err;
+	u8 buf[88]; /* TODO: verify max size or IW_GENERIC_IE_MAX */
+	u8 *ie;
+
+	if (!priv->has_wpa)
+		return;
+
+	err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO,
+			      sizeof(buf), NULL, &buf);
+	if (err != 0)
+		return;
+
+	ie = orinoco_get_wpa_ie(buf, sizeof(buf));
+	if (ie) {
+		int rem = sizeof(buf) - (ie - &buf[0]);
+		wrqu.data.length = ie[1] + 2;
+		if (wrqu.data.length > rem)
+			wrqu.data.length = rem;
+
+		if (wrqu.data.length)
+			/* Send event to user space */
+			wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, ie);
+	}
+}
+
+static void orinoco_send_wevents(struct work_struct *work)
+{
+	struct orinoco_private *priv =
+		container_of(work, struct orinoco_private, wevent_work);
+	unsigned long flags;
+
+	if (orinoco_lock(priv, &flags) != 0)
+		return;
+
+	orinoco_send_assocreqie_wevent(priv);
+	orinoco_send_assocrespie_wevent(priv);
+	orinoco_send_bssid_wevent(priv);
+
+	orinoco_unlock(priv, &flags);
+}
 
 static inline void orinoco_clear_scan_results(struct orinoco_private *priv,
 					      unsigned long scan_age)
 {
-	bss_element *bss;
-	bss_element *tmp_bss;
+	if (priv->has_ext_scan) {
+		struct xbss_element *bss;
+		struct xbss_element *tmp_bss;
 
-	/* Blow away current list of scan results */
-	list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) {
-		if (!scan_age ||
-		    time_after(jiffies, bss->last_scanned + scan_age)) {
-			list_move_tail(&bss->list, &priv->bss_free_list);
-			/* Don't blow away ->list, just BSS data */
-			memset(bss, 0, sizeof(bss->bss));
-			bss->last_scanned = 0;
+		/* Blow away current list of scan results */
+		list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) {
+			if (!scan_age ||
+			    time_after(jiffies, bss->last_scanned + scan_age)) {
+				list_move_tail(&bss->list,
+					       &priv->bss_free_list);
+				/* Don't blow away ->list, just BSS data */
+				memset(&bss->bss, 0, sizeof(bss->bss));
+				bss->last_scanned = 0;
+			}
 		}
+	} else {
+		struct bss_element *bss;
+		struct bss_element *tmp_bss;
+
+		/* Blow away current list of scan results */
+		list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) {
+			if (!scan_age ||
+			    time_after(jiffies, bss->last_scanned + scan_age)) {
+				list_move_tail(&bss->list,
+					       &priv->bss_free_list);
+				/* Don't blow away ->list, just BSS data */
+				memset(&bss->bss, 0, sizeof(bss->bss));
+				bss->last_scanned = 0;
+			}
+		}
+	}
+}
+
+static void orinoco_add_ext_scan_result(struct orinoco_private *priv,
+					struct agere_ext_scan_info *atom)
+{
+	struct xbss_element *bss = NULL;
+	int found = 0;
+
+	/* Try to update an existing bss first */
+	list_for_each_entry(bss, &priv->bss_list, list) {
+		if (compare_ether_addr(bss->bss.bssid, atom->bssid))
+			continue;
+		/* ESSID lengths */
+		if (bss->bss.data[1] != atom->data[1])
+			continue;
+		if (memcmp(&bss->bss.data[2], &atom->data[2],
+			   atom->data[1]))
+			continue;
+		found = 1;
+		break;
+	}
+
+	/* Grab a bss off the free list */
+	if (!found && !list_empty(&priv->bss_free_list)) {
+		bss = list_entry(priv->bss_free_list.next,
+				 struct xbss_element, list);
+		list_del(priv->bss_free_list.next);
+
+		list_add_tail(&bss->list, &priv->bss_list);
+	}
+
+	if (bss) {
+		/* Always update the BSS to get latest beacon info */
+		memcpy(&bss->bss, atom, sizeof(bss->bss));
+		bss->last_scanned = jiffies;
 	}
 }
 
@@ -1194,7 +1903,7 @@
 	/* Read the entries one by one */
 	for (; offset + atom_len <= len; offset += atom_len) {
 		int found = 0;
-		bss_element *bss = NULL;
+		struct bss_element *bss = NULL;
 
 		/* Get next atom */
 		atom = (union hermes_scan_info *) (buf + offset);
@@ -1216,7 +1925,7 @@
 		/* Grab a bss off the free list */
 		if (!found && !list_empty(&priv->bss_free_list)) {
 			bss = list_entry(priv->bss_free_list.next,
-					 bss_element, list);
+					 struct bss_element, list);
 			list_del(priv->bss_free_list.next);
 
 			list_add_tail(&bss->list, &priv->bss_list);
@@ -1404,6 +2113,63 @@
 		kfree(buf);
 	}
 	break;
+	case HERMES_INQ_CHANNELINFO:
+	{
+		struct agere_ext_scan_info *bss;
+
+		if (!priv->scan_inprogress) {
+			printk(KERN_DEBUG "%s: Got chaninfo without scan, "
+			       "len=%d\n", dev->name, len);
+			break;
+		}
+
+		/* An empty result indicates that the scan is complete */
+		if (len == 0) {
+			union iwreq_data	wrqu;
+
+			/* Scan is no longer in progress */
+			priv->scan_inprogress = 0;
+
+			wrqu.data.length = 0;
+			wrqu.data.flags = 0;
+			wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
+			break;
+		}
+
+		/* Sanity check */
+		else if (len > sizeof(*bss)) {
+			printk(KERN_WARNING
+			       "%s: Ext scan results too large (%d bytes). "
+			       "Truncating results to %zd bytes.\n",
+			       dev->name, len, sizeof(*bss));
+			len = sizeof(*bss);
+		} else if (len < (offsetof(struct agere_ext_scan_info,
+					   data) + 2)) {
+			/* Drop this result now so we don't have to
+			 * keep checking later */
+			printk(KERN_WARNING
+			       "%s: Ext scan results too short (%d bytes)\n",
+			       dev->name, len);
+			break;
+		}
+
+		bss = kmalloc(sizeof(*bss), GFP_ATOMIC);
+		if (bss == NULL)
+			break;
+
+		/* Read scan data */
+		err = hermes_bap_pread(hw, IRQ_BAP, (void *) bss, len,
+				       infofid, sizeof(info));
+		if (err) {
+			kfree(bss);
+			break;
+		}
+
+		orinoco_add_ext_scan_result(priv, bss);
+
+		kfree(bss);
+		break;
+	}
 	case HERMES_INQ_SEC_STAT_AGERE:
 		/* Security status (Agere specific) */
 		/* Ignore this frame for now */
@@ -1586,7 +2352,7 @@
 }
 
 /* Change the WEP keys and/or the current keys.  Can be called
- * either from __orinoco_hw_setup_wep() or directly from
+ * either from __orinoco_hw_setup_enc() or directly from
  * orinoco_ioctl_setiwencode().  In the later case the association
  * with the AP is not broken (if the firmware can handle it),
  * which is needed for 802.1x implementations. */
@@ -1646,14 +2412,16 @@
 	return 0;
 }
 
-static int __orinoco_hw_setup_wep(struct orinoco_private *priv)
+static int __orinoco_hw_setup_enc(struct orinoco_private *priv)
 {
 	hermes_t *hw = &priv->hw;
 	int err = 0;
 	int master_wep_flag;
 	int auth_flag;
+	int enc_flag;
 
-	if (priv->wep_on)
+	/* Setup WEP keys for WEP and WPA */
+	if (priv->encode_alg)
 		__orinoco_hw_setup_wepkeys(priv);
 
 	if (priv->wep_restrict)
@@ -1661,9 +2429,16 @@
 	else
 		auth_flag = HERMES_AUTH_OPEN;
 
+	if (priv->wpa_enabled)
+		enc_flag = 2;
+	else if (priv->encode_alg == IW_ENCODE_ALG_WEP)
+		enc_flag = 1;
+	else
+		enc_flag = 0;
+
 	switch (priv->firmware_type) {
 	case FIRMWARE_TYPE_AGERE: /* Agere style WEP */
-		if (priv->wep_on) {
+		if (priv->encode_alg == IW_ENCODE_ALG_WEP) {
 			/* Enable the shared-key authentication. */
 			err = hermes_write_wordrec(hw, USER_BAP,
 						   HERMES_RID_CNFAUTHENTICATION_AGERE,
@@ -1671,14 +2446,24 @@
 		}
 		err = hermes_write_wordrec(hw, USER_BAP,
 					   HERMES_RID_CNFWEPENABLED_AGERE,
-					   priv->wep_on);
+					   enc_flag);
 		if (err)
 			return err;
+
+		if (priv->has_wpa) {
+			/* Set WPA key management */
+			err = hermes_write_wordrec(hw, USER_BAP,
+				  HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE,
+				  priv->key_mgmt);
+			if (err)
+				return err;
+		}
+
 		break;
 
 	case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */
 	case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */
-		if (priv->wep_on) {
+		if (priv->encode_alg == IW_ENCODE_ALG_WEP) {
 			if (priv->wep_restrict ||
 			    (priv->firmware_type == FIRMWARE_TYPE_SYMBOL))
 				master_wep_flag = HERMES_WEP_PRIVACY_INVOKED |
@@ -1710,6 +2495,84 @@
 	return 0;
 }
 
+/* key must be 32 bytes, including the tx and rx MIC keys.
+ * rsc must be 8 bytes
+ * tsc must be 8 bytes or NULL
+ */
+static int __orinoco_hw_set_tkip_key(hermes_t *hw, int key_idx, int set_tx,
+				     u8 *key, u8 *rsc, u8 *tsc)
+{
+	struct {
+		__le16 idx;
+		u8 rsc[IW_ENCODE_SEQ_MAX_SIZE];
+		u8 key[TKIP_KEYLEN];
+		u8 tx_mic[MIC_KEYLEN];
+		u8 rx_mic[MIC_KEYLEN];
+		u8 tsc[IW_ENCODE_SEQ_MAX_SIZE];
+	} __attribute__ ((packed)) buf;
+	int ret;
+	int err;
+	int k;
+	u16 xmitting;
+
+	key_idx &= 0x3;
+
+	if (set_tx)
+		key_idx |= 0x8000;
+
+	buf.idx = cpu_to_le16(key_idx);
+	memcpy(buf.key, key,
+	       sizeof(buf.key) + sizeof(buf.tx_mic) + sizeof(buf.rx_mic));
+
+	if (rsc == NULL)
+		memset(buf.rsc, 0, sizeof(buf.rsc));
+	else
+		memcpy(buf.rsc, rsc, sizeof(buf.rsc));
+
+	if (tsc == NULL) {
+		memset(buf.tsc, 0, sizeof(buf.tsc));
+		buf.tsc[4] = 0x10;
+	} else {
+		memcpy(buf.tsc, tsc, sizeof(buf.tsc));
+	}
+
+	/* Wait upto 100ms for tx queue to empty */
+	k = 100;
+	do {
+		k--;
+		udelay(1000);
+		ret = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_TXQUEUEEMPTY,
+					  &xmitting);
+		if (ret)
+			break;
+	} while ((k > 0) && xmitting);
+
+	if (k == 0)
+		ret = -ETIMEDOUT;
+
+	err = HERMES_WRITE_RECORD(hw, USER_BAP,
+				  HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE,
+				  &buf);
+
+	return ret ? ret : err;
+}
+
+static int orinoco_clear_tkip_key(struct orinoco_private *priv,
+				  int key_idx)
+{
+	hermes_t *hw = &priv->hw;
+	int err;
+
+	memset(&priv->tkip_key[key_idx], 0, sizeof(priv->tkip_key[key_idx]));
+	err = hermes_write_wordrec(hw, USER_BAP,
+				   HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE,
+				   key_idx);
+	if (err)
+		printk(KERN_WARNING "%s: Error %d clearing TKIP key %d\n",
+		       priv->ndev->name, err, key_idx);
+	return err;
+}
+
 static int __orinoco_program_rids(struct net_device *dev)
 {
 	struct orinoco_private *priv = netdev_priv(dev);
@@ -1906,10 +2769,10 @@
 	}
 
 	/* Set up encryption */
-	if (priv->has_wep) {
-		err = __orinoco_hw_setup_wep(priv);
+	if (priv->has_wep || priv->has_wpa) {
+		err = __orinoco_hw_setup_enc(priv);
 		if (err) {
-			printk(KERN_ERR "%s: Error %d activating WEP\n",
+			printk(KERN_ERR "%s: Error %d activating encryption\n",
 			       dev->name, err);
 			return err;
 		}
@@ -2047,6 +2910,12 @@
 		}
 	}
 
+	if (priv->do_fw_download) {
+		err = orinoco_download(priv);
+		if (err)
+			priv->do_fw_download = 0;
+	}
+
 	err = orinoco_reinit_firmware(dev);
 	if (err) {
 		printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n",
@@ -2258,6 +3127,10 @@
 	priv->has_ibss = 1;
 	priv->has_wep = 0;
 	priv->has_big_wep = 0;
+	priv->has_alt_txcntl = 0;
+	priv->has_ext_scan = 0;
+	priv->has_wpa = 0;
+	priv->do_fw_download = 0;
 
 	/* Determine capabilities from the firmware version */
 	switch (priv->firmware_type) {
@@ -2277,8 +3150,11 @@
 		priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */
 		priv->ibss_port = 1;
 		priv->has_hostscan = (firmver >= 0x8000a);
+		priv->do_fw_download = 1;
 		priv->broken_monitor = (firmver >= 0x80000);
-
+		priv->has_alt_txcntl = (firmver >= 0x90000); /* All 9.x ? */
+		priv->has_ext_scan = (firmver >= 0x90000); /* All 9.x ? */
+		priv->has_wpa = (firmver >= 0x9002a);
 		/* Tested with Agere firmware :
 		 *	1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II
 		 * Tested CableTron firmware : 4.32 => Anton */
@@ -2321,6 +3197,21 @@
 			       firmver >= 0x31000;
 		priv->has_preamble = (firmver >= 0x20000);
 		priv->ibss_port = 4;
+
+		/* Symbol firmware is found on various cards, but
+		 * there has been no attempt to check firmware
+		 * download on non-spectrum_cs based cards.
+		 *
+		 * Given that the Agere firmware download works
+		 * differently, we should avoid doing a firmware
+		 * download with the Symbol algorithm on non-spectrum
+		 * cards.
+		 *
+		 * For now we can identify a spectrum_cs based card
+		 * because it has a firmware reset function.
+		 */
+		priv->do_fw_download = (priv->stop_fw != NULL);
+
  		priv->broken_disableport = (firmver == 0x25013) ||
  					   (firmver >= 0x30000 && firmver <= 0x31000);
 		priv->has_hostscan = (firmver >= 0x31001) ||
@@ -2391,6 +3282,20 @@
 		goto out;
 	}
 
+	if (priv->do_fw_download) {
+		err = orinoco_download(priv);
+		if (err)
+			priv->do_fw_download = 0;
+
+		/* Check firmware version again */
+		err = determine_firmware(dev);
+		if (err != 0) {
+			printk(KERN_ERR "%s: Incompatible firmware, aborting\n",
+			       dev->name);
+			goto out;
+		}
+	}
+
 	if (priv->has_port3)
 		printk(KERN_DEBUG "%s: Ad-hoc demo mode supported\n", dev->name);
 	if (priv->has_ibss)
@@ -2403,6 +3308,20 @@
 		else
 			printk("40-bit key\n");
 	}
+	if (priv->has_wpa) {
+		printk(KERN_DEBUG "%s: WPA-PSK supported\n", dev->name);
+		if (orinoco_mic_init(priv)) {
+			printk(KERN_ERR "%s: Failed to setup MIC crypto "
+			       "algorithm. Disabling WPA support\n", dev->name);
+			priv->has_wpa = 0;
+		}
+	}
+
+	/* Now we have the firmware capabilities, allocate appropiate
+	 * sized scan buffers */
+	if (orinoco_bss_data_allocate(priv))
+		goto out;
+	orinoco_bss_data_init(priv);
 
 	/* Get the MAC address */
 	err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
@@ -2518,8 +3437,13 @@
 	priv->channel = 0; /* use firmware default */
 
 	priv->promiscuous = 0;
-	priv->wep_on = 0;
+	priv->encode_alg = IW_ENCODE_ALG_NONE;
 	priv->tx_key = 0;
+	priv->wpa_enabled = 0;
+	priv->tkip_cm_active = 0;
+	priv->key_mgmt = 0;
+	priv->wpa_ie_len = 0;
+	priv->wpa_ie = NULL;
 
 	/* Make the hardware available, as long as it hasn't been
 	 * removed elsewhere (e.g. by PCMCIA hot unplug) */
@@ -2533,8 +3457,11 @@
 	return err;
 }
 
-struct net_device *alloc_orinocodev(int sizeof_card,
-				    int (*hard_reset)(struct orinoco_private *))
+struct net_device
+*alloc_orinocodev(int sizeof_card,
+		  struct device *device,
+		  int (*hard_reset)(struct orinoco_private *),
+		  int (*stop_fw)(struct orinoco_private *, int))
 {
 	struct net_device *dev;
 	struct orinoco_private *priv;
@@ -2549,10 +3476,7 @@
 				      + sizeof(struct orinoco_private));
 	else
 		priv->card = NULL;
-
-	if (orinoco_bss_data_allocate(priv))
-		goto err_out_free;
-	orinoco_bss_data_init(priv);
+	priv->dev = device;
 
 	/* Setup / override net_device fields */
 	dev->init = orinoco_init;
@@ -2570,10 +3494,14 @@
 	dev->set_multicast_list = orinoco_set_multicast_list;
 	/* we use the default eth_mac_addr for setting the MAC addr */
 
+	/* Reserve space in skb for the SNAP header */
+	dev->hard_header_len += ENCAPS_OVERHEAD;
+
 	/* Set up default callbacks */
 	dev->open = orinoco_open;
 	dev->stop = orinoco_stop;
 	priv->hard_reset = hard_reset;
+	priv->stop_fw = stop_fw;
 
 	spin_lock_init(&priv->lock);
 	priv->open = 0;
@@ -2584,20 +3512,27 @@
 	INIT_WORK(&priv->join_work, orinoco_join_ap);
 	INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
 
+	INIT_LIST_HEAD(&priv->rx_list);
+	tasklet_init(&priv->rx_tasklet, orinoco_rx_isr_tasklet,
+		     (unsigned long) dev);
+
 	netif_carrier_off(dev);
 	priv->last_linkstatus = 0xffff;
 
 	return dev;
-
-err_out_free:
-	free_netdev(dev);
-	return NULL;
 }
 
 void free_orinocodev(struct net_device *dev)
 {
 	struct orinoco_private *priv = netdev_priv(dev);
 
+	/* No need to empty priv->rx_list: if the tasklet is scheduled
+	 * when we call tasklet_kill it will run one final time,
+	 * emptying the list */
+	tasklet_kill(&priv->rx_tasklet);
+	priv->wpa_ie_len = 0;
+	kfree(priv->wpa_ie);
+	orinoco_mic_free(priv);
 	orinoco_bss_data_free(priv);
 	free_netdev(dev);
 }
@@ -2909,7 +3844,7 @@
 	memset(range, 0, sizeof(struct iw_range));
 
 	range->we_version_compiled = WIRELESS_EXT;
-	range->we_version_source = 14;
+	range->we_version_source = 22;
 
 	/* Set available channels/frequencies */
 	range->num_channels = NUM_CHANNELS;
@@ -2939,6 +3874,9 @@
 		}
 	}
 
+	if (priv->has_wpa)
+		range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_CIPHER_TKIP;
+
 	if ((priv->iw_mode == IW_MODE_ADHOC) && (!SPY_NUMBER(priv))){
 		/* Quality stats meaningless in ad-hoc mode */
 	} else {
@@ -2986,6 +3924,11 @@
 	range->min_r_time = 0;
 	range->max_r_time = 65535 * 1000;	/* ??? */
 
+	if (priv->firmware_type == FIRMWARE_TYPE_AGERE)
+		range->scan_capa = IW_SCAN_CAPA_ESSID;
+	else
+		range->scan_capa = IW_SCAN_CAPA_NONE;
+
 	/* Event capability (kernel) */
 	IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
 	/* Event capability (driver) */
@@ -3005,7 +3948,7 @@
 	struct orinoco_private *priv = netdev_priv(dev);
 	int index = (erq->flags & IW_ENCODE_INDEX) - 1;
 	int setindex = priv->tx_key;
-	int enable = priv->wep_on;
+	int encode_alg = priv->encode_alg;
 	int restricted = priv->wep_restrict;
 	u16 xlen = 0;
 	int err = -EINPROGRESS;		/* Call commit handler */
@@ -3026,6 +3969,10 @@
 	if (orinoco_lock(priv, &flags) != 0)
 		return -EBUSY;
 
+	/* Clear any TKIP key we have */
+	if ((priv->has_wpa) && (priv->encode_alg == IW_ENCODE_ALG_TKIP))
+		(void) orinoco_clear_tkip_key(priv, setindex);
+
 	if (erq->length > 0) {
 		if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
 			index = priv->tx_key;
@@ -3039,9 +3986,9 @@
 			xlen = 0;
 
 		/* Switch on WEP if off */
-		if ((!enable) && (xlen > 0)) {
+		if ((encode_alg != IW_ENCODE_ALG_WEP) && (xlen > 0)) {
 			setindex = index;
-			enable = 1;
+			encode_alg = IW_ENCODE_ALG_WEP;
 		}
 	} else {
 		/* Important note : if the user do "iwconfig eth0 enc off",
@@ -3063,7 +4010,7 @@
 	}
 
 	if (erq->flags & IW_ENCODE_DISABLED)
-		enable = 0;
+		encode_alg = IW_ENCODE_ALG_NONE;
 	if (erq->flags & IW_ENCODE_OPEN)
 		restricted = 0;
 	if (erq->flags & IW_ENCODE_RESTRICTED)
@@ -3078,14 +4025,15 @@
 	priv->tx_key = setindex;
 
 	/* Try fast key change if connected and only keys are changed */
-	if (priv->wep_on && enable && (priv->wep_restrict == restricted) &&
+	if ((priv->encode_alg == encode_alg) &&
+	    (priv->wep_restrict == restricted) &&
 	    netif_carrier_ok(dev)) {
 		err = __orinoco_hw_setup_wepkeys(priv);
 		/* No need to commit if successful */
 		goto out;
 	}
 
-	priv->wep_on = enable;
+	priv->encode_alg = encode_alg;
 	priv->wep_restrict = restricted;
 
  out:
@@ -3114,7 +4062,7 @@
 		index = priv->tx_key;
 
 	erq->flags = 0;
-	if (! priv->wep_on)
+	if (!priv->encode_alg)
 		erq->flags |= IW_ENCODE_DISABLED;
 	erq->flags |= index + 1;
 
@@ -3689,6 +4637,399 @@
 	return err;
 }
 
+static int orinoco_ioctl_set_encodeext(struct net_device *dev,
+				       struct iw_request_info *info,
+				       union iwreq_data *wrqu,
+				       char *extra)
+{
+	struct orinoco_private *priv = netdev_priv(dev);
+	struct iw_point *encoding = &wrqu->encoding;
+	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+	int idx, alg = ext->alg, set_key = 1;
+	unsigned long flags;
+	int err = -EINVAL;
+	u16 key_len;
+
+	if (orinoco_lock(priv, &flags) != 0)
+		return -EBUSY;
+
+	/* Determine and validate the key index */
+	idx = encoding->flags & IW_ENCODE_INDEX;
+	if (idx) {
+		if ((idx < 1) || (idx > WEP_KEYS))
+			goto out;
+		idx--;
+	} else
+		idx = priv->tx_key;
+
+	if (encoding->flags & IW_ENCODE_DISABLED)
+	    alg = IW_ENCODE_ALG_NONE;
+
+	if (priv->has_wpa && (alg != IW_ENCODE_ALG_TKIP)) {
+		/* Clear any TKIP TX key we had */
+		(void) orinoco_clear_tkip_key(priv, priv->tx_key);
+	}
+
+	if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+		priv->tx_key = idx;
+		set_key = ((alg == IW_ENCODE_ALG_TKIP) ||
+			   (ext->key_len > 0)) ? 1 : 0;
+	}
+
+	if (set_key) {
+		/* Set the requested key first */
+		switch (alg) {
+		case IW_ENCODE_ALG_NONE:
+			priv->encode_alg = alg;
+			priv->keys[idx].len = 0;
+			break;
+
+		case IW_ENCODE_ALG_WEP:
+			if (ext->key_len > SMALL_KEY_SIZE)
+				key_len = LARGE_KEY_SIZE;
+			else if (ext->key_len > 0)
+				key_len = SMALL_KEY_SIZE;
+			else
+				goto out;
+
+			priv->encode_alg = alg;
+			priv->keys[idx].len = cpu_to_le16(key_len);
+
+			key_len = min(ext->key_len, key_len);
+
+			memset(priv->keys[idx].data, 0, ORINOCO_MAX_KEY_SIZE);
+			memcpy(priv->keys[idx].data, ext->key, key_len);
+			break;
+
+		case IW_ENCODE_ALG_TKIP:
+		{
+			hermes_t *hw = &priv->hw;
+			u8 *tkip_iv = NULL;
+
+			if (!priv->has_wpa ||
+			    (ext->key_len > sizeof(priv->tkip_key[0])))
+				goto out;
+
+			priv->encode_alg = alg;
+			memset(&priv->tkip_key[idx], 0,
+			       sizeof(priv->tkip_key[idx]));
+			memcpy(&priv->tkip_key[idx], ext->key, ext->key_len);
+
+			if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
+				tkip_iv = &ext->rx_seq[0];
+
+			err = __orinoco_hw_set_tkip_key(hw, idx,
+				 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
+				 (u8 *) &priv->tkip_key[idx],
+				 tkip_iv, NULL);
+			if (err)
+				printk(KERN_ERR "%s: Error %d setting TKIP key"
+				       "\n", dev->name, err);
+
+			goto out;
+		}
+		default:
+			goto out;
+		}
+	}
+	err = -EINPROGRESS;
+ out:
+	orinoco_unlock(priv, &flags);
+
+	return err;
+}
+
+static int orinoco_ioctl_get_encodeext(struct net_device *dev,
+				       struct iw_request_info *info,
+				       union iwreq_data *wrqu,
+				       char *extra)
+{
+	struct orinoco_private *priv = netdev_priv(dev);
+	struct iw_point *encoding = &wrqu->encoding;
+	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+	int idx, max_key_len;
+	unsigned long flags;
+	int err;
+
+	if (orinoco_lock(priv, &flags) != 0)
+		return -EBUSY;
+
+	err = -EINVAL;
+	max_key_len = encoding->length - sizeof(*ext);
+	if (max_key_len < 0)
+		goto out;
+
+	idx = encoding->flags & IW_ENCODE_INDEX;
+	if (idx) {
+		if ((idx < 1) || (idx > WEP_KEYS))
+			goto out;
+		idx--;
+	} else
+		idx = priv->tx_key;
+
+	encoding->flags = idx + 1;
+	memset(ext, 0, sizeof(*ext));
+
+	ext->alg = priv->encode_alg;
+	switch (priv->encode_alg) {
+	case IW_ENCODE_ALG_NONE:
+		ext->key_len = 0;
+		encoding->flags |= IW_ENCODE_DISABLED;
+		break;
+	case IW_ENCODE_ALG_WEP:
+		ext->key_len = min_t(u16, le16_to_cpu(priv->keys[idx].len),
+				     max_key_len);
+		memcpy(ext->key, priv->keys[idx].data, ext->key_len);
+		encoding->flags |= IW_ENCODE_ENABLED;
+		break;
+	case IW_ENCODE_ALG_TKIP:
+		ext->key_len = min_t(u16, sizeof(struct orinoco_tkip_key),
+				     max_key_len);
+		memcpy(ext->key, &priv->tkip_key[idx], ext->key_len);
+		encoding->flags |= IW_ENCODE_ENABLED;
+		break;
+	}
+
+	err = 0;
+ out:
+	orinoco_unlock(priv, &flags);
+
+	return err;
+}
+
+static int orinoco_ioctl_set_auth(struct net_device *dev,
+				  struct iw_request_info *info,
+				  union iwreq_data *wrqu, char *extra)
+{
+	struct orinoco_private *priv = netdev_priv(dev);
+	hermes_t *hw = &priv->hw;
+	struct iw_param *param = &wrqu->param;
+	unsigned long flags;
+	int ret = -EINPROGRESS;
+
+	if (orinoco_lock(priv, &flags) != 0)
+		return -EBUSY;
+
+	switch (param->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_WPA_VERSION:
+	case IW_AUTH_CIPHER_PAIRWISE:
+	case IW_AUTH_CIPHER_GROUP:
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+	case IW_AUTH_PRIVACY_INVOKED:
+	case IW_AUTH_DROP_UNENCRYPTED:
+		/*
+		 * orinoco does not use these parameters
+		 */
+		break;
+
+	case IW_AUTH_KEY_MGMT:
+		/* wl_lkm implies value 2 == PSK for Hermes I
+		 * which ties in with WEXT
+		 * no other hints tho :(
+		 */
+		priv->key_mgmt = param->value;
+		break;
+
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		/* When countermeasures are enabled, shut down the
+		 * card; when disabled, re-enable the card. This must
+		 * take effect immediately.
+		 *
+		 * TODO: Make sure that the EAPOL message is getting
+		 *       out before card disabled
+		 */
+		if (param->value) {
+			priv->tkip_cm_active = 1;
+			ret = hermes_enable_port(hw, 0);
+		} else {
+			priv->tkip_cm_active = 0;
+			ret = hermes_disable_port(hw, 0);
+		}
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		if (param->value & IW_AUTH_ALG_SHARED_KEY)
+			priv->wep_restrict = 1;
+		else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM)
+			priv->wep_restrict = 0;
+		else
+			ret = -EINVAL;
+		break;
+
+	case IW_AUTH_WPA_ENABLED:
+		if (priv->has_wpa) {
+			priv->wpa_enabled = param->value ? 1 : 0;
+		} else {
+			if (param->value)
+				ret = -EOPNOTSUPP;
+			/* else silently accept disable of WPA */
+			priv->wpa_enabled = 0;
+		}
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	orinoco_unlock(priv, &flags);
+	return ret;
+}
+
+static int orinoco_ioctl_get_auth(struct net_device *dev,
+				  struct iw_request_info *info,
+				  union iwreq_data *wrqu, char *extra)
+{
+	struct orinoco_private *priv = netdev_priv(dev);
+	struct iw_param *param = &wrqu->param;
+	unsigned long flags;
+	int ret = 0;
+
+	if (orinoco_lock(priv, &flags) != 0)
+		return -EBUSY;
+
+	switch (param->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_KEY_MGMT:
+		param->value = priv->key_mgmt;
+		break;
+
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		param->value = priv->tkip_cm_active;
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		if (priv->wep_restrict)
+			param->value = IW_AUTH_ALG_SHARED_KEY;
+		else
+			param->value = IW_AUTH_ALG_OPEN_SYSTEM;
+		break;
+
+	case IW_AUTH_WPA_ENABLED:
+		param->value = priv->wpa_enabled;
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	orinoco_unlock(priv, &flags);
+	return ret;
+}
+
+static int orinoco_ioctl_set_genie(struct net_device *dev,
+				   struct iw_request_info *info,
+				   union iwreq_data *wrqu, char *extra)
+{
+	struct orinoco_private *priv = netdev_priv(dev);
+	u8 *buf;
+	unsigned long flags;
+	int err = 0;
+
+	if ((wrqu->data.length > MAX_WPA_IE_LEN) ||
+	    (wrqu->data.length && (extra == NULL)))
+		return -EINVAL;
+
+	if (orinoco_lock(priv, &flags) != 0)
+		return -EBUSY;
+
+	if (wrqu->data.length) {
+		buf = kmalloc(wrqu->data.length, GFP_KERNEL);
+		if (buf == NULL) {
+			err = -ENOMEM;
+			goto out;
+		}
+
+		memcpy(buf, extra, wrqu->data.length);
+		kfree(priv->wpa_ie);
+		priv->wpa_ie = buf;
+		priv->wpa_ie_len = wrqu->data.length;
+	} else {
+		kfree(priv->wpa_ie);
+		priv->wpa_ie = NULL;
+		priv->wpa_ie_len = 0;
+	}
+
+	if (priv->wpa_ie) {
+		/* Looks like wl_lkm wants to check the auth alg, and
+		 * somehow pass it to the firmware.
+		 * Instead it just calls the key mgmt rid
+		 *   - we do this in set auth.
+		 */
+	}
+
+out:
+	orinoco_unlock(priv, &flags);
+	return err;
+}
+
+static int orinoco_ioctl_get_genie(struct net_device *dev,
+				   struct iw_request_info *info,
+				   union iwreq_data *wrqu, char *extra)
+{
+	struct orinoco_private *priv = netdev_priv(dev);
+	unsigned long flags;
+	int err = 0;
+
+	if (orinoco_lock(priv, &flags) != 0)
+		return -EBUSY;
+
+	if ((priv->wpa_ie_len == 0) || (priv->wpa_ie == NULL)) {
+		wrqu->data.length = 0;
+		goto out;
+	}
+
+	if (wrqu->data.length < priv->wpa_ie_len) {
+		err = -E2BIG;
+		goto out;
+	}
+
+	wrqu->data.length = priv->wpa_ie_len;
+	memcpy(extra, priv->wpa_ie, priv->wpa_ie_len);
+
+out:
+	orinoco_unlock(priv, &flags);
+	return err;
+}
+
+static int orinoco_ioctl_set_mlme(struct net_device *dev,
+				  struct iw_request_info *info,
+				  union iwreq_data *wrqu, char *extra)
+{
+	struct orinoco_private *priv = netdev_priv(dev);
+	hermes_t *hw = &priv->hw;
+	struct iw_mlme *mlme = (struct iw_mlme *)extra;
+	unsigned long flags;
+	int ret = 0;
+
+	if (orinoco_lock(priv, &flags) != 0)
+		return -EBUSY;
+
+	switch (mlme->cmd) {
+	case IW_MLME_DEAUTH:
+		/* silently ignore */
+		break;
+
+	case IW_MLME_DISASSOC:
+	{
+		struct {
+			u8 addr[ETH_ALEN];
+			__le16 reason_code;
+		} __attribute__ ((packed)) buf;
+
+		memcpy(buf.addr, mlme->addr.sa_data, ETH_ALEN);
+		buf.reason_code = cpu_to_le16(mlme->reason_code);
+		ret = HERMES_WRITE_RECORD(hw, USER_BAP,
+					  HERMES_RID_CNFDISASSOCIATE,
+					  &buf);
+		break;
+	}
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	orinoco_unlock(priv, &flags);
+	return ret;
+}
+
 static int orinoco_ioctl_getretry(struct net_device *dev,
 				  struct iw_request_info *info,
 				  struct iw_param *rrq,
@@ -3947,14 +5288,15 @@
 	return err;
 }
 
-/* Trigger a scan (look for other cells in the vicinity */
+/* Trigger a scan (look for other cells in the vicinity) */
 static int orinoco_ioctl_setscan(struct net_device *dev,
 				 struct iw_request_info *info,
-				 struct iw_param *srq,
+				 struct iw_point *srq,
 				 char *extra)
 {
 	struct orinoco_private *priv = netdev_priv(dev);
 	hermes_t *hw = &priv->hw;
+	struct iw_scan_req *si = (struct iw_scan_req *) extra;
 	int err = 0;
 	unsigned long flags;
 
@@ -3986,7 +5328,6 @@
 	 * we access scan variables in priv is critical.
 	 *	o scan_inprogress : not touched by irq handler
 	 *	o scan_mode : not touched by irq handler
-	 *	o scan_len : synchronised with scan_result
 	 * Before modifying anything on those variables, please think hard !
 	 * Jean II */
 
@@ -4016,13 +5357,43 @@
 		}
 		break;
 		case FIRMWARE_TYPE_AGERE:
-			err = hermes_write_wordrec(hw, USER_BAP,
+			if (priv->scan_mode & IW_SCAN_THIS_ESSID) {
+				struct hermes_idstring idbuf;
+				size_t len = min(sizeof(idbuf.val),
+						 (size_t) si->essid_len);
+				idbuf.len = cpu_to_le16(len);
+				memcpy(idbuf.val, si->essid, len);
+
+				err = hermes_write_ltv(hw, USER_BAP,
+					       HERMES_RID_CNFSCANSSID_AGERE,
+					       HERMES_BYTES_TO_RECLEN(len + 2),
+					       &idbuf);
+			} else
+				err = hermes_write_wordrec(hw, USER_BAP,
 						   HERMES_RID_CNFSCANSSID_AGERE,
 						   0);	/* Any ESSID */
 			if (err)
 				break;
 
-			err = hermes_inquire(hw, HERMES_INQ_SCAN);
+			if (priv->has_ext_scan) {
+				/* Clear scan results at the start of
+				 * an extended scan */
+				orinoco_clear_scan_results(priv,
+						msecs_to_jiffies(15000));
+
+				/* TODO: Is this available on older firmware?
+				 *   Can we use it to scan specific channels
+				 *   for IW_SCAN_THIS_FREQ? */
+				err = hermes_write_wordrec(hw, USER_BAP,
+						HERMES_RID_CNFSCANCHANNELS2GHZ,
+						0x7FFF);
+				if (err)
+					goto out;
+
+				err = hermes_inquire(hw,
+						     HERMES_INQ_CHANNELINFO);
+			} else
+				err = hermes_inquire(hw, HERMES_INQ_SCAN);
 			break;
 		}
 	} else
@@ -4040,8 +5411,7 @@
 #define MAX_CUSTOM_LEN 64
 
 /* Translate scan data returned from the card to a card independant
- * format that the Wireless Tools will understand - Jean II
- * Return message length or -errno for fatal errors */
+ * format that the Wireless Tools will understand - Jean II */
 static inline char *orinoco_translate_scan(struct net_device *dev,
 					   struct iw_request_info *info,
 					   char *current_ev,
@@ -4053,9 +5423,10 @@
 	u16			capabilities;
 	u16			channel;
 	struct iw_event		iwe;		/* Temporary buffer */
-	char                   *p;
 	char custom[MAX_CUSTOM_LEN];
 
+	memset(&iwe, 0, sizeof(iwe));
+
 	/* First entry *MUST* be the AP MAC address */
 	iwe.cmd = SIOCGIWAP;
 	iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
@@ -4077,8 +5448,8 @@
 	/* Add mode */
 	iwe.cmd = SIOCGIWMODE;
 	capabilities = le16_to_cpu(bss->a.capabilities);
-	if (capabilities & 0x3) {
-		if (capabilities & 0x1)
+	if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
+		if (capabilities & WLAN_CAPABILITY_ESS)
 			iwe.u.mode = IW_MODE_MASTER;
 		else
 			iwe.u.mode = IW_MODE_ADHOC;
@@ -4088,17 +5459,22 @@
 
 	channel = bss->s.channel;
 	if ((channel >= 1) && (channel <= NUM_CHANNELS)) {
-		/* Add frequency */
+		/* Add channel and frequency */
 		iwe.cmd = SIOCGIWFREQ;
+		iwe.u.freq.m = channel;
+		iwe.u.freq.e = 0;
+		current_ev = iwe_stream_add_event(info, current_ev, end_buf,
+						  &iwe, IW_EV_FREQ_LEN);
+
 		iwe.u.freq.m = channel_frequency[channel-1] * 100000;
 		iwe.u.freq.e = 1;
 		current_ev = iwe_stream_add_event(info, current_ev, end_buf,
 						  &iwe, IW_EV_FREQ_LEN);
 	}
 
-	/* Add quality statistics */
+	/* Add quality statistics. level and noise in dB. No link quality */
 	iwe.cmd = IWEVQUAL;
-	iwe.u.qual.updated = 0x10;	/* no link quality */
+	iwe.u.qual.updated = IW_QUAL_DBM | IW_QUAL_QUAL_INVALID;
 	iwe.u.qual.level = (__u8) le16_to_cpu(bss->a.level) - 0x95;
 	iwe.u.qual.noise = (__u8) le16_to_cpu(bss->a.noise) - 0x95;
 	/* Wireless tools prior to 27.pre22 will show link quality
@@ -4112,25 +5488,13 @@
 
 	/* Add encryption capability */
 	iwe.cmd = SIOCGIWENCODE;
-	if (capabilities & 0x10)
+	if (capabilities & WLAN_CAPABILITY_PRIVACY)
 		iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
 	else
 		iwe.u.data.flags = IW_ENCODE_DISABLED;
 	iwe.u.data.length = 0;
 	current_ev = iwe_stream_add_point(info, current_ev, end_buf,
-					  &iwe, bss->a.essid);
-
-	/* Add EXTRA: Age to display seconds since last beacon/probe response
-	 * for given network. */
-	iwe.cmd = IWEVCUSTOM;
-	p = custom;
-	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
-		      " Last beacon: %dms ago",
-		      jiffies_to_msecs(jiffies - last_scanned));
-	iwe.u.data.length = p - custom;
-	if (iwe.u.data.length)
-		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
-						  &iwe, custom);
+					  &iwe, NULL);
 
 	/* Bit rate is not available in Lucent/Agere firmwares */
 	if (priv->firmware_type != FIRMWARE_TYPE_AGERE) {
@@ -4152,7 +5516,8 @@
 			if (bss->p.rates[i] == 0x0)
 				break;
 			/* Bit rate given in 500 kb/s units (+ 0x80) */
-			iwe.u.bitrate.value = ((bss->p.rates[i] & 0x7f) * 500000);
+			iwe.u.bitrate.value =
+				((bss->p.rates[i] & 0x7f) * 500000);
 			current_val = iwe_stream_add_value(info, current_ev,
 							   current_val,
 							   end_buf, &iwe,
@@ -4163,6 +5528,199 @@
 			current_ev = current_val;
 	}
 
+	/* Beacon interval */
+	iwe.cmd = IWEVCUSTOM;
+	iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
+				     "bcn_int=%d",
+				     le16_to_cpu(bss->a.beacon_interv));
+	if (iwe.u.data.length)
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, custom);
+
+	/* Capabilites */
+	iwe.cmd = IWEVCUSTOM;
+	iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
+				     "capab=0x%04x",
+				     capabilities);
+	if (iwe.u.data.length)
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, custom);
+
+	/* Add EXTRA: Age to display seconds since last beacon/probe response
+	 * for given network. */
+	iwe.cmd = IWEVCUSTOM;
+	iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
+				     " Last beacon: %dms ago",
+				     jiffies_to_msecs(jiffies - last_scanned));
+	if (iwe.u.data.length)
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, custom);
+
+	return current_ev;
+}
+
+static inline char *orinoco_translate_ext_scan(struct net_device *dev,
+					       struct iw_request_info *info,
+					       char *current_ev,
+					       char *end_buf,
+					       struct agere_ext_scan_info *bss,
+					       unsigned int last_scanned)
+{
+	u16			capabilities;
+	u16			channel;
+	struct iw_event		iwe;		/* Temporary buffer */
+	char custom[MAX_CUSTOM_LEN];
+	u8 *ie;
+
+	memset(&iwe, 0, sizeof(iwe));
+
+	/* First entry *MUST* be the AP MAC address */
+	iwe.cmd = SIOCGIWAP;
+	iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+	memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
+	current_ev = iwe_stream_add_event(info, current_ev, end_buf,
+					  &iwe, IW_EV_ADDR_LEN);
+
+	/* Other entries will be displayed in the order we give them */
+
+	/* Add the ESSID */
+	ie = bss->data;
+	iwe.u.data.length = ie[1];
+	if (iwe.u.data.length) {
+		if (iwe.u.data.length > 32)
+			iwe.u.data.length = 32;
+		iwe.cmd = SIOCGIWESSID;
+		iwe.u.data.flags = 1;
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, &ie[2]);
+	}
+
+	/* Add mode */
+	capabilities = le16_to_cpu(bss->capabilities);
+	if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
+		iwe.cmd = SIOCGIWMODE;
+		if (capabilities & WLAN_CAPABILITY_ESS)
+			iwe.u.mode = IW_MODE_MASTER;
+		else
+			iwe.u.mode = IW_MODE_ADHOC;
+		current_ev = iwe_stream_add_event(info, current_ev, end_buf,
+						  &iwe, IW_EV_UINT_LEN);
+	}
+
+	ie = orinoco_get_ie(bss->data, sizeof(bss->data), MFIE_TYPE_DS_SET);
+	channel = ie ? ie[2] : 0;
+	if ((channel >= 1) && (channel <= NUM_CHANNELS)) {
+		/* Add channel and frequency */
+		iwe.cmd = SIOCGIWFREQ;
+		iwe.u.freq.m = channel;
+		iwe.u.freq.e = 0;
+		current_ev = iwe_stream_add_event(info, current_ev, end_buf,
+						  &iwe, IW_EV_FREQ_LEN);
+
+		iwe.u.freq.m = channel_frequency[channel-1] * 100000;
+		iwe.u.freq.e = 1;
+		current_ev = iwe_stream_add_event(info, current_ev, end_buf,
+						  &iwe, IW_EV_FREQ_LEN);
+	}
+
+	/* Add quality statistics. level and noise in dB. No link quality */
+	iwe.cmd = IWEVQUAL;
+	iwe.u.qual.updated = IW_QUAL_DBM | IW_QUAL_QUAL_INVALID;
+	iwe.u.qual.level = bss->level - 0x95;
+	iwe.u.qual.noise = bss->noise - 0x95;
+	/* Wireless tools prior to 27.pre22 will show link quality
+	 * anyway, so we provide a reasonable value. */
+	if (iwe.u.qual.level > iwe.u.qual.noise)
+		iwe.u.qual.qual = iwe.u.qual.level - iwe.u.qual.noise;
+	else
+		iwe.u.qual.qual = 0;
+	current_ev = iwe_stream_add_event(info, current_ev, end_buf,
+					  &iwe, IW_EV_QUAL_LEN);
+
+	/* Add encryption capability */
+	iwe.cmd = SIOCGIWENCODE;
+	if (capabilities & WLAN_CAPABILITY_PRIVACY)
+		iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+	else
+		iwe.u.data.flags = IW_ENCODE_DISABLED;
+	iwe.u.data.length = 0;
+	current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+					  &iwe, NULL);
+
+	/* WPA IE */
+	ie = orinoco_get_wpa_ie(bss->data, sizeof(bss->data));
+	if (ie) {
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = ie[1] + 2;
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, ie);
+	}
+
+	/* RSN IE */
+	ie = orinoco_get_ie(bss->data, sizeof(bss->data), MFIE_TYPE_RSN);
+	if (ie) {
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = ie[1] + 2;
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, ie);
+	}
+
+	ie = orinoco_get_ie(bss->data, sizeof(bss->data), MFIE_TYPE_RATES);
+	if (ie) {
+		char *p = current_ev + iwe_stream_lcp_len(info);
+		int i;
+
+		iwe.cmd = SIOCGIWRATE;
+		/* Those two flags are ignored... */
+		iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+
+		for (i = 2; i < (ie[1] + 2); i++) {
+			iwe.u.bitrate.value = ((ie[i] & 0x7F) * 500000);
+			p = iwe_stream_add_value(info, current_ev, p, end_buf,
+						 &iwe, IW_EV_PARAM_LEN);
+		}
+		/* Check if we added any event */
+		if (p > (current_ev + iwe_stream_lcp_len(info)))
+			current_ev = p;
+	}
+
+	/* Timestamp */
+	iwe.cmd = IWEVCUSTOM;
+	iwe.u.data.length =
+		snprintf(custom, MAX_CUSTOM_LEN, "tsf=%016llx",
+			 (unsigned long long) le64_to_cpu(bss->timestamp));
+	if (iwe.u.data.length)
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, custom);
+
+	/* Beacon interval */
+	iwe.cmd = IWEVCUSTOM;
+	iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
+				     "bcn_int=%d",
+				     le16_to_cpu(bss->beacon_interval));
+	if (iwe.u.data.length)
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, custom);
+
+	/* Capabilites */
+	iwe.cmd = IWEVCUSTOM;
+	iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
+				     "capab=0x%04x",
+				     capabilities);
+	if (iwe.u.data.length)
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, custom);
+
+	/* Add EXTRA: Age to display seconds since last beacon/probe response
+	 * for given network. */
+	iwe.cmd = IWEVCUSTOM;
+	iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
+				     " Last beacon: %dms ago",
+				     jiffies_to_msecs(jiffies - last_scanned));
+	if (iwe.u.data.length)
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, custom);
+
 	return current_ev;
 }
 
@@ -4173,7 +5731,6 @@
 				 char *extra)
 {
 	struct orinoco_private *priv = netdev_priv(dev);
-	bss_element *bss;
 	int err = 0;
 	unsigned long flags;
 	char *current_ev = extra;
@@ -4193,18 +5750,47 @@
 		goto out;
 	}
 
-	list_for_each_entry(bss, &priv->bss_list, list) {
-		/* Translate to WE format this entry */
-		current_ev = orinoco_translate_scan(dev, info, current_ev,
-						    extra + srq->length,
-						    &bss->bss,
-						    bss->last_scanned);
+	if (priv->has_ext_scan) {
+		struct xbss_element *bss;
 
-		/* Check if there is space for one more entry */
-		if ((extra + srq->length - current_ev) <= IW_EV_ADDR_LEN) {
-			/* Ask user space to try again with a bigger buffer */
-			err = -E2BIG;
-			goto out;
+		list_for_each_entry(bss, &priv->bss_list, list) {
+			/* Translate this entry to WE format */
+			current_ev =
+				orinoco_translate_ext_scan(dev, info,
+							   current_ev,
+							   extra + srq->length,
+							   &bss->bss,
+							   bss->last_scanned);
+
+			/* Check if there is space for one more entry */
+			if ((extra + srq->length - current_ev)
+			    <= IW_EV_ADDR_LEN) {
+				/* Ask user space to try again with a
+				 * bigger buffer */
+				err = -E2BIG;
+				goto out;
+			}
+		}
+
+	} else {
+		struct bss_element *bss;
+
+		list_for_each_entry(bss, &priv->bss_list, list) {
+			/* Translate this entry to WE format */
+			current_ev = orinoco_translate_scan(dev, info,
+							    current_ev,
+							    extra + srq->length,
+							    &bss->bss,
+							    bss->last_scanned);
+
+			/* Check if there is space for one more entry */
+			if ((extra + srq->length - current_ev)
+			    <= IW_EV_ADDR_LEN) {
+				/* Ask user space to try again with a
+				 * bigger buffer */
+				err = -E2BIG;
+				goto out;
+			}
 		}
 	}
 
@@ -4295,39 +5881,48 @@
  * Structures to export the Wireless Handlers
  */
 
+#define STD_IW_HANDLER(id, func) \
+	[IW_IOCTL_IDX(id)] = (iw_handler) func
 static const iw_handler	orinoco_handler[] = {
-	[SIOCSIWCOMMIT-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_commit,
-	[SIOCGIWNAME  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getname,
-	[SIOCSIWFREQ  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfreq,
-	[SIOCGIWFREQ  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfreq,
-	[SIOCSIWMODE  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setmode,
-	[SIOCGIWMODE  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getmode,
-	[SIOCSIWSENS  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens,
-	[SIOCGIWSENS  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens,
-	[SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange,
-	[SIOCSIWSPY   -SIOCIWFIRST] = (iw_handler) iw_handler_set_spy,
-	[SIOCGIWSPY   -SIOCIWFIRST] = (iw_handler) iw_handler_get_spy,
-	[SIOCSIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy,
-	[SIOCGIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy,
-	[SIOCSIWAP    -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap,
-	[SIOCGIWAP    -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap,
-	[SIOCSIWSCAN  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan,
-	[SIOCGIWSCAN  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getscan,
-	[SIOCSIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setessid,
-	[SIOCGIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getessid,
-	[SIOCSIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setnick,
-	[SIOCGIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getnick,
-	[SIOCSIWRATE  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrate,
-	[SIOCGIWRATE  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrate,
-	[SIOCSIWRTS   -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrts,
-	[SIOCGIWRTS   -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrts,
-	[SIOCSIWFRAG  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfrag,
-	[SIOCGIWFRAG  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfrag,
-	[SIOCGIWRETRY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getretry,
-	[SIOCSIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setiwencode,
-	[SIOCGIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwencode,
-	[SIOCSIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setpower,
-	[SIOCGIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getpower,
+	STD_IW_HANDLER(SIOCSIWCOMMIT,	orinoco_ioctl_commit),
+	STD_IW_HANDLER(SIOCGIWNAME,	orinoco_ioctl_getname),
+	STD_IW_HANDLER(SIOCSIWFREQ,	orinoco_ioctl_setfreq),
+	STD_IW_HANDLER(SIOCGIWFREQ,	orinoco_ioctl_getfreq),
+	STD_IW_HANDLER(SIOCSIWMODE,	orinoco_ioctl_setmode),
+	STD_IW_HANDLER(SIOCGIWMODE,	orinoco_ioctl_getmode),
+	STD_IW_HANDLER(SIOCSIWSENS,	orinoco_ioctl_setsens),
+	STD_IW_HANDLER(SIOCGIWSENS,	orinoco_ioctl_getsens),
+	STD_IW_HANDLER(SIOCGIWRANGE,	orinoco_ioctl_getiwrange),
+	STD_IW_HANDLER(SIOCSIWSPY,	iw_handler_set_spy),
+	STD_IW_HANDLER(SIOCGIWSPY,	iw_handler_get_spy),
+	STD_IW_HANDLER(SIOCSIWTHRSPY,	iw_handler_set_thrspy),
+	STD_IW_HANDLER(SIOCGIWTHRSPY,	iw_handler_get_thrspy),
+	STD_IW_HANDLER(SIOCSIWAP,	orinoco_ioctl_setwap),
+	STD_IW_HANDLER(SIOCGIWAP,	orinoco_ioctl_getwap),
+	STD_IW_HANDLER(SIOCSIWSCAN,	orinoco_ioctl_setscan),
+	STD_IW_HANDLER(SIOCGIWSCAN,	orinoco_ioctl_getscan),
+	STD_IW_HANDLER(SIOCSIWESSID,	orinoco_ioctl_setessid),
+	STD_IW_HANDLER(SIOCGIWESSID,	orinoco_ioctl_getessid),
+	STD_IW_HANDLER(SIOCSIWNICKN,	orinoco_ioctl_setnick),
+	STD_IW_HANDLER(SIOCGIWNICKN,	orinoco_ioctl_getnick),
+	STD_IW_HANDLER(SIOCSIWRATE,	orinoco_ioctl_setrate),
+	STD_IW_HANDLER(SIOCGIWRATE,	orinoco_ioctl_getrate),
+	STD_IW_HANDLER(SIOCSIWRTS,	orinoco_ioctl_setrts),
+	STD_IW_HANDLER(SIOCGIWRTS,	orinoco_ioctl_getrts),
+	STD_IW_HANDLER(SIOCSIWFRAG,	orinoco_ioctl_setfrag),
+	STD_IW_HANDLER(SIOCGIWFRAG,	orinoco_ioctl_getfrag),
+	STD_IW_HANDLER(SIOCGIWRETRY,	orinoco_ioctl_getretry),
+	STD_IW_HANDLER(SIOCSIWENCODE,	orinoco_ioctl_setiwencode),
+	STD_IW_HANDLER(SIOCGIWENCODE,	orinoco_ioctl_getiwencode),
+	STD_IW_HANDLER(SIOCSIWPOWER,	orinoco_ioctl_setpower),
+	STD_IW_HANDLER(SIOCGIWPOWER,	orinoco_ioctl_getpower),
+	STD_IW_HANDLER(SIOCSIWGENIE,	orinoco_ioctl_set_genie),
+	STD_IW_HANDLER(SIOCGIWGENIE,	orinoco_ioctl_get_genie),
+	STD_IW_HANDLER(SIOCSIWMLME,	orinoco_ioctl_set_mlme),
+	STD_IW_HANDLER(SIOCSIWAUTH,	orinoco_ioctl_set_auth),
+	STD_IW_HANDLER(SIOCGIWAUTH,	orinoco_ioctl_get_auth),
+	STD_IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
+	STD_IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
 };
 
 
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h
index c6b1858..981570b 100644
--- a/drivers/net/wireless/orinoco.h
+++ b/drivers/net/wireless/orinoco.h
@@ -9,6 +9,7 @@
 
 #define DRIVER_VERSION "0.15"
 
+#include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/wireless.h>
 #include <net/iw_handler.h>
@@ -30,27 +31,57 @@
 	char data[ORINOCO_MAX_KEY_SIZE];
 } __attribute__ ((packed));
 
+#define TKIP_KEYLEN	16
+#define MIC_KEYLEN	8
+
+struct orinoco_tkip_key {
+	u8 tkip[TKIP_KEYLEN];
+	u8 tx_mic[MIC_KEYLEN];
+	u8 rx_mic[MIC_KEYLEN];
+};
+
 typedef enum {
 	FIRMWARE_TYPE_AGERE,
 	FIRMWARE_TYPE_INTERSIL,
 	FIRMWARE_TYPE_SYMBOL
 } fwtype_t;
 
-typedef struct {
+struct bss_element {
 	union hermes_scan_info bss;
 	unsigned long last_scanned;
 	struct list_head list;
-} bss_element;
+};
+
+struct xbss_element {
+	struct agere_ext_scan_info bss;
+	unsigned long last_scanned;
+	struct list_head list;
+};
+
+struct hermes_rx_descriptor;
+
+struct orinoco_rx_data {
+	struct hermes_rx_descriptor *desc;
+	struct sk_buff *skb;
+	struct list_head list;
+};
 
 struct orinoco_private {
 	void *card;	/* Pointer to card dependent structure */
+	struct device *dev;
 	int (*hard_reset)(struct orinoco_private *);
+	int (*stop_fw)(struct orinoco_private *, int);
 
 	/* Synchronisation stuff */
 	spinlock_t lock;
 	int hw_unavailable;
 	struct work_struct reset_work;
 
+	/* Interrupt tasklets */
+	struct tasklet_struct rx_tasklet;
+	struct list_head rx_list;
+	struct orinoco_rx_data *rx_data;
+
 	/* driver state */
 	int open;
 	u16 last_linkstatus;
@@ -83,13 +114,17 @@
 	unsigned int has_preamble:1;
 	unsigned int has_sensitivity:1;
 	unsigned int has_hostscan:1;
+	unsigned int has_alt_txcntl:1;
+	unsigned int has_ext_scan:1;
+	unsigned int has_wpa:1;
+	unsigned int do_fw_download:1;
 	unsigned int broken_disableport:1;
 	unsigned int broken_monitor:1;
 
 	/* Configuration paramaters */
 	u32 iw_mode;
 	int prefer_port3;
-	u16 wep_on, wep_restrict, tx_key;
+	u16 encode_alg, wep_restrict, tx_key;
 	struct orinoco_key keys[ORINOCO_MAX_KEYS];
 	int bitratemode;
  	char nick[IW_ESSID_MAX_SIZE+1];
@@ -113,10 +148,22 @@
 	/* Scanning support */
 	struct list_head bss_list;
 	struct list_head bss_free_list;
-	bss_element *bss_data;
+	void *bss_xbss_data;
 
 	int	scan_inprogress;	/* Scan pending... */
 	u32	scan_mode;		/* Type of scan done */
+
+	/* WPA support */
+	u8 *wpa_ie;
+	int wpa_ie_len;
+
+	struct orinoco_tkip_key tkip_key[ORINOCO_MAX_KEYS];
+	struct crypto_hash *rx_tfm_mic;
+	struct crypto_hash *tx_tfm_mic;
+
+	unsigned int wpa_enabled:1;
+	unsigned int tkip_cm_active:1;
+	unsigned int key_mgmt:3;
 };
 
 #ifdef ORINOCO_DEBUG
@@ -130,8 +177,10 @@
 /* Exported prototypes                                              */
 /********************************************************************/
 
-extern struct net_device *alloc_orinocodev(int sizeof_card,
-					   int (*hard_reset)(struct orinoco_private *));
+extern struct net_device *alloc_orinocodev(
+	int sizeof_card, struct device *device,
+	int (*hard_reset)(struct orinoco_private *),
+	int (*stop_fw)(struct orinoco_private *, int));
 extern void free_orinocodev(struct net_device *dev);
 extern int __orinoco_up(struct net_device *dev);
 extern int __orinoco_down(struct net_device *dev);
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index 1c216e0..9eaa252 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -109,7 +109,8 @@
 	struct orinoco_private *priv;
 	struct orinoco_pccard *card;
 
-	dev = alloc_orinocodev(sizeof(*card), orinoco_cs_hard_reset);
+	dev = alloc_orinocodev(sizeof(*card), &handle_to_dev(link),
+			       orinoco_cs_hard_reset, NULL);
 	if (! dev)
 		return -ENOMEM;
 	priv = netdev_priv(dev);
@@ -120,7 +121,7 @@
 	link->priv = dev;
 
 	/* Interrupt setup */
-	link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+	link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
 	link->irq.IRQInfo1 = IRQ_LEVEL_ID;
 	link->irq.Handler = orinoco_interrupt;
 	link->irq.Instance = dev; 
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c
index 35ec5fc..2fc8659 100644
--- a/drivers/net/wireless/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco_nortel.c
@@ -182,7 +182,8 @@
 	}
 
 	/* Allocate network device */
-	dev = alloc_orinocodev(sizeof(*card), orinoco_nortel_cor_reset);
+	dev = alloc_orinocodev(sizeof(*card), &pdev->dev,
+			       orinoco_nortel_cor_reset, NULL);
 	if (!dev) {
 		printk(KERN_ERR PFX "Cannot allocate network device\n");
 		err = -ENOMEM;
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c
index 2547d5d..4ebd638 100644
--- a/drivers/net/wireless/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco_pci.c
@@ -139,7 +139,8 @@
 	}
 
 	/* Allocate network device */
-	dev = alloc_orinocodev(sizeof(*card), orinoco_pci_cor_reset);
+	dev = alloc_orinocodev(sizeof(*card), &pdev->dev,
+			       orinoco_pci_cor_reset, NULL);
 	if (!dev) {
 		printk(KERN_ERR PFX "Cannot allocate network device\n");
 		err = -ENOMEM;
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c
index 98fe165..ef76185 100644
--- a/drivers/net/wireless/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco_plx.c
@@ -221,7 +221,8 @@
 	}
 
 	/* Allocate network device */
-	dev = alloc_orinocodev(sizeof(*card), orinoco_plx_cor_reset);
+	dev = alloc_orinocodev(sizeof(*card), &pdev->dev,
+			       orinoco_plx_cor_reset, NULL);
 	if (!dev) {
 		printk(KERN_ERR PFX "Cannot allocate network device\n");
 		err = -ENOMEM;
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c
index df49318..ede24ec 100644
--- a/drivers/net/wireless/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco_tmd.c
@@ -124,7 +124,8 @@
 	}
 
 	/* Allocate network device */
-	dev = alloc_orinocodev(sizeof(*card), orinoco_tmd_cor_reset);
+	dev = alloc_orinocodev(sizeof(*card), &pdev->dev,
+			       orinoco_tmd_cor_reset, NULL);
 	if (!dev) {
 		printk(KERN_ERR PFX "Cannot allocate network device\n");
 		err = -ENOMEM;
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 4801a36..1d0704f 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -1,5 +1,5 @@
-#ifndef PRISM54_H
-#define PRISM54_H
+#ifndef P54_H
+#define P54_H
 
 /*
  * Shared defines for all mac80211 Prism54 code
@@ -19,13 +19,24 @@
 	P54_CONTROL_TYPE_CHANNEL_CHANGE,
 	P54_CONTROL_TYPE_FREQDONE,
 	P54_CONTROL_TYPE_DCFINIT,
-	P54_CONTROL_TYPE_FREEQUEUE = 7,
+	P54_CONTROL_TYPE_ENCRYPTION,
+	P54_CONTROL_TYPE_TIM,
+	P54_CONTROL_TYPE_POWERMGT,
+	P54_CONTROL_TYPE_FREEQUEUE,
 	P54_CONTROL_TYPE_TXDONE,
 	P54_CONTROL_TYPE_PING,
 	P54_CONTROL_TYPE_STAT_READBACK,
 	P54_CONTROL_TYPE_BBP,
 	P54_CONTROL_TYPE_EEPROM_READBACK,
-	P54_CONTROL_TYPE_LED
+	P54_CONTROL_TYPE_LED,
+	P54_CONTROL_TYPE_GPIO,
+	P54_CONTROL_TYPE_TIMER,
+	P54_CONTROL_TYPE_MODULATION,
+	P54_CONTROL_TYPE_SYNTH_CONFIG,
+	P54_CONTROL_TYPE_DETECTOR_VALUE,
+	P54_CONTROL_TYPE_XBOW_SYNTH_CFG,
+	P54_CONTROL_TYPE_CCE_QUIET,
+	P54_CONTROL_TYPE_PSM_STA_UNLOCK,
 };
 
 struct p54_control_hdr {
@@ -38,11 +49,15 @@
 	u8 data[0];
 } __attribute__ ((packed));
 
-#define EEPROM_READBACK_LEN (sizeof(struct p54_control_hdr) + 4 /* p54_eeprom_lm86 */)
-#define MAX_RX_SIZE (IEEE80211_MAX_RTS_THRESHOLD + sizeof(struct p54_control_hdr) + 20 /* length of struct p54_rx_hdr */ + 16 )
+#define EEPROM_READBACK_LEN 0x3fc
 
 #define ISL38XX_DEV_FIRMWARE_ADDR 0x20000
 
+#define FW_FMAC 0x464d4143
+#define FW_LM86 0x4c4d3836
+#define FW_LM87 0x4c4d3837
+#define FW_LM20 0x4c4d3230
+
 struct p54_common {
 	u32 rx_start;
 	u32 rx_end;
@@ -53,27 +68,43 @@
 	void (*stop)(struct ieee80211_hw *dev);
 	int mode;
 	u16 seqno;
+	u16 rx_mtu;
+	u8 headroom;
+	u8 tailroom;
 	struct mutex conf_mutex;
 	u8 mac_addr[ETH_ALEN];
 	u8 bssid[ETH_ALEN];
+	__le16 filter_type;
 	struct pda_iq_autocal_entry *iq_autocal;
 	unsigned int iq_autocal_len;
 	struct pda_channel_output_limit *output_limit;
 	unsigned int output_limit_len;
 	struct pda_pa_curve_data *curve_data;
-	__le16 rxhw;
+	unsigned int filter_flags;
+	u16 rxhw;
 	u8 version;
+	u8 rx_antenna;
 	unsigned int tx_hdr_len;
 	void *cached_vdcf;
 	unsigned int fw_var;
-	struct ieee80211_tx_queue_stats tx_stats[4];
+	unsigned int fw_interface;
+	unsigned int output_power;
+	u32 tsf_low32;
+	u32 tsf_high32;
+	struct ieee80211_tx_queue_stats tx_stats[8];
+	struct ieee80211_low_level_stats stats;
+	struct timer_list stats_timer;
+	struct completion stats_comp;
+	void *cached_stats;
+	int noise;
+	void *eeprom;
+	struct completion eeprom_comp;
 };
 
 int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb);
-void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw);
-int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len);
-void p54_fill_eeprom_readback(struct p54_control_hdr *hdr);
+int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw);
+int p54_read_eeprom(struct ieee80211_hw *dev);
 struct ieee80211_hw *p54_init_common(size_t priv_data_len);
 void p54_free_common(struct ieee80211_hw *dev);
 
-#endif /* PRISM54_H */
+#endif /* P54_H */
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 29be3dc..de5e8f4 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -27,7 +27,7 @@
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("prism54common");
 
-static struct ieee80211_rate p54_rates[] = {
+static struct ieee80211_rate p54_bgrates[] = {
 	{ .bitrate = 10, .hw_value = 0, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 	{ .bitrate = 20, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 	{ .bitrate = 55, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
@@ -42,7 +42,7 @@
 	{ .bitrate = 540, .hw_value = 11, },
 };
 
-static struct ieee80211_channel p54_channels[] = {
+static struct ieee80211_channel p54_bgchannels[] = {
 	{ .center_freq = 2412, .hw_value = 1, },
 	{ .center_freq = 2417, .hw_value = 2, },
 	{ .center_freq = 2422, .hw_value = 3, },
@@ -60,14 +60,69 @@
 };
 
 static struct ieee80211_supported_band band_2GHz = {
-	.channels = p54_channels,
-	.n_channels = ARRAY_SIZE(p54_channels),
-	.bitrates = p54_rates,
-	.n_bitrates = ARRAY_SIZE(p54_rates),
+	.channels = p54_bgchannels,
+	.n_channels = ARRAY_SIZE(p54_bgchannels),
+	.bitrates = p54_bgrates,
+	.n_bitrates = ARRAY_SIZE(p54_bgrates),
 };
 
+static struct ieee80211_rate p54_arates[] = {
+	{ .bitrate = 60, .hw_value = 4, },
+	{ .bitrate = 90, .hw_value = 5, },
+	{ .bitrate = 120, .hw_value = 6, },
+	{ .bitrate = 180, .hw_value = 7, },
+	{ .bitrate = 240, .hw_value = 8, },
+	{ .bitrate = 360, .hw_value = 9, },
+	{ .bitrate = 480, .hw_value = 10, },
+	{ .bitrate = 540, .hw_value = 11, },
+};
 
-void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
+static struct ieee80211_channel p54_achannels[] = {
+	{ .center_freq = 4920 },
+	{ .center_freq = 4940 },
+	{ .center_freq = 4960 },
+	{ .center_freq = 4980 },
+	{ .center_freq = 5040 },
+	{ .center_freq = 5060 },
+	{ .center_freq = 5080 },
+	{ .center_freq = 5170 },
+	{ .center_freq = 5180 },
+	{ .center_freq = 5190 },
+	{ .center_freq = 5200 },
+	{ .center_freq = 5210 },
+	{ .center_freq = 5220 },
+	{ .center_freq = 5230 },
+	{ .center_freq = 5240 },
+	{ .center_freq = 5260 },
+	{ .center_freq = 5280 },
+	{ .center_freq = 5300 },
+	{ .center_freq = 5320 },
+	{ .center_freq = 5500 },
+	{ .center_freq = 5520 },
+	{ .center_freq = 5540 },
+	{ .center_freq = 5560 },
+	{ .center_freq = 5580 },
+	{ .center_freq = 5600 },
+	{ .center_freq = 5620 },
+	{ .center_freq = 5640 },
+	{ .center_freq = 5660 },
+	{ .center_freq = 5680 },
+	{ .center_freq = 5700 },
+	{ .center_freq = 5745 },
+	{ .center_freq = 5765 },
+	{ .center_freq = 5785 },
+	{ .center_freq = 5805 },
+	{ .center_freq = 5825 },
+};
+
+static struct ieee80211_supported_band band_5GHz = {
+	.channels = p54_achannels,
+	.n_channels = ARRAY_SIZE(p54_achannels),
+	.bitrates = p54_arates,
+	.n_bitrates = ARRAY_SIZE(p54_arates),
+};
+
+int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
 {
 	struct p54_common *priv = dev->priv;
 	struct bootrec_exp_if *exp_if;
@@ -79,7 +134,7 @@
 	int i;
 
 	if (priv->rx_start)
-		return;
+		return 0;
 
 	while (data < end_data && *data)
 		data++;
@@ -94,7 +149,9 @@
 		u32 code = le32_to_cpu(bootrec->code);
 		switch (code) {
 		case BR_CODE_COMPONENT_ID:
-			switch (be32_to_cpu(*(__be32 *)bootrec->data)) {
+			priv->fw_interface = be32_to_cpup((__be32 *)
+					     bootrec->data);
+			switch (priv->fw_interface) {
 			case FW_FMAC:
 				printk(KERN_INFO "p54: FreeMAC firmware\n");
 				break;
@@ -105,7 +162,7 @@
 				printk(KERN_INFO "p54: LM86 firmware\n");
 				break;
 			case FW_LM87:
-				printk(KERN_INFO "p54: LM87 firmware - not supported yet!\n");
+				printk(KERN_INFO "p54: LM87 firmware\n");
 				break;
 			default:
 				printk(KERN_INFO "p54: unknown firmware\n");
@@ -117,11 +174,21 @@
 			if (strnlen((unsigned char*)bootrec->data, 24) < 24)
 				fw_version = (unsigned char*)bootrec->data;
 			break;
-		case BR_CODE_DESCR:
-			priv->rx_start = le32_to_cpu(((__le32 *)bootrec->data)[1]);
+		case BR_CODE_DESCR: {
+			struct bootrec_desc *desc =
+				(struct bootrec_desc *)bootrec->data;
+			priv->rx_start = le32_to_cpu(desc->rx_start);
 			/* FIXME add sanity checking */
-			priv->rx_end = le32_to_cpu(((__le32 *)bootrec->data)[2]) - 0x3500;
+			priv->rx_end = le32_to_cpu(desc->rx_end) - 0x3500;
+			priv->headroom = desc->headroom;
+			priv->tailroom = desc->tailroom;
+			if (le32_to_cpu(bootrec->len) == 11)
+				priv->rx_mtu = le16_to_cpu(bootrec->rx_mtu);
+			else
+				priv->rx_mtu = (size_t)
+					0x620 - priv->tx_hdr_len;
 			break;
+			}
 		case BR_CODE_EXPOSED_IF:
 			exp_if = (struct bootrec_exp_if *) bootrec->data;
 			for (i = 0; i < (len * sizeof(*exp_if) / 4); i++)
@@ -146,23 +213,25 @@
 
 	if (priv->fw_var >= 0x300) {
 		/* Firmware supports QoS, use it! */
-		priv->tx_stats[0].limit = 3;
-		priv->tx_stats[1].limit = 4;
-		priv->tx_stats[2].limit = 3;
-		priv->tx_stats[3].limit = 1;
+		priv->tx_stats[4].limit = 3;
+		priv->tx_stats[5].limit = 4;
+		priv->tx_stats[6].limit = 3;
+		priv->tx_stats[7].limit = 1;
 		dev->queues = 4;
 	}
+
+	return 0;
 }
 EXPORT_SYMBOL_GPL(p54_parse_firmware);
 
-static int p54_convert_rev0_to_rev1(struct ieee80211_hw *dev,
-				    struct pda_pa_curve_data *curve_data)
+static int p54_convert_rev0(struct ieee80211_hw *dev,
+			    struct pda_pa_curve_data *curve_data)
 {
 	struct p54_common *priv = dev->priv;
-	struct pda_pa_curve_data_sample_rev1 *rev1;
-	struct pda_pa_curve_data_sample_rev0 *rev0;
+	struct p54_pa_curve_data_sample *dst;
+	struct pda_pa_curve_data_sample_rev0 *src;
 	size_t cd_len = sizeof(*curve_data) +
-		(curve_data->points_per_channel*sizeof(*rev1) + 2) *
+		(curve_data->points_per_channel*sizeof(*dst) + 2) *
 		 curve_data->channels;
 	unsigned int i, j;
 	void *source, *target;
@@ -180,28 +249,68 @@
 		*((__le16 *)target) = *freq;
 		target += sizeof(__le16);
 		for (j = 0; j < curve_data->points_per_channel; j++) {
-			rev1 = target;
-			rev0 = source;
+			dst = target;
+			src = source;
 
-			rev1->rf_power = rev0->rf_power;
-			rev1->pa_detector = rev0->pa_detector;
-			rev1->data_64qam = rev0->pcv;
+			dst->rf_power = src->rf_power;
+			dst->pa_detector = src->pa_detector;
+			dst->data_64qam = src->pcv;
 			/* "invent" the points for the other modulations */
 #define SUB(x,y) (u8)((x) - (y)) > (x) ? 0 : (x) - (y)
-			rev1->data_16qam = SUB(rev0->pcv, 12);
-			rev1->data_qpsk  = SUB(rev1->data_16qam, 12);
-			rev1->data_bpsk  = SUB(rev1->data_qpsk, 12);
-			rev1->data_barker= SUB(rev1->data_bpsk, 14);
+			dst->data_16qam = SUB(src->pcv, 12);
+			dst->data_qpsk = SUB(dst->data_16qam, 12);
+			dst->data_bpsk = SUB(dst->data_qpsk, 12);
+			dst->data_barker = SUB(dst->data_bpsk, 14);
 #undef SUB
-			target += sizeof(*rev1);
-			source += sizeof(*rev0);
+			target += sizeof(*dst);
+			source += sizeof(*src);
 		}
 	}
 
 	return 0;
 }
 
-int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
+static int p54_convert_rev1(struct ieee80211_hw *dev,
+			    struct pda_pa_curve_data *curve_data)
+{
+	struct p54_common *priv = dev->priv;
+	struct p54_pa_curve_data_sample *dst;
+	struct pda_pa_curve_data_sample_rev1 *src;
+	size_t cd_len = sizeof(*curve_data) +
+		(curve_data->points_per_channel*sizeof(*dst) + 2) *
+		 curve_data->channels;
+	unsigned int i, j;
+	void *source, *target;
+
+	priv->curve_data = kmalloc(cd_len, GFP_KERNEL);
+	if (!priv->curve_data)
+		return -ENOMEM;
+
+	memcpy(priv->curve_data, curve_data, sizeof(*curve_data));
+	source = curve_data->data;
+	target = priv->curve_data->data;
+	for (i = 0; i < curve_data->channels; i++) {
+		__le16 *freq = source;
+		source += sizeof(__le16);
+		*((__le16 *)target) = *freq;
+		target += sizeof(__le16);
+		for (j = 0; j < curve_data->points_per_channel; j++) {
+			memcpy(target, source, sizeof(*src));
+
+			target += sizeof(*dst);
+			source += sizeof(*src);
+		}
+		source++;
+	}
+
+	return 0;
+}
+
+static const char *p54_rf_chips[] = { "NULL", "Indigo?", "Duette",
+                              "Frisbee", "Xbow", "Longbow" };
+static int p54_init_xbow_synth(struct ieee80211_hw *dev);
+
+static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
 {
 	struct p54_common *priv = dev->priv;
 	struct eeprom_pda_wrap *wrap = NULL;
@@ -210,6 +319,7 @@
 	void *tmp;
 	int err;
 	u8 *end = (u8 *)eeprom + len;
+	DECLARE_MAC_BUF(mac);
 
 	wrap = (struct eeprom_pda_wrap *) eeprom;
 	entry = (void *)wrap->data + le16_to_cpu(wrap->len);
@@ -250,27 +360,32 @@
 			       entry->data[1]*sizeof(*priv->output_limit));
 			priv->output_limit_len = entry->data[1];
 			break;
-		case PDR_PRISM_PA_CAL_CURVE_DATA:
-			if (data_len < sizeof(struct pda_pa_curve_data)) {
+		case PDR_PRISM_PA_CAL_CURVE_DATA: {
+			struct pda_pa_curve_data *curve_data =
+				(struct pda_pa_curve_data *)entry->data;
+			if (data_len < sizeof(*curve_data)) {
 				err = -EINVAL;
 				goto err;
 			}
 
-			if (((struct pda_pa_curve_data *)entry->data)->cal_method_rev) {
-				priv->curve_data = kmalloc(data_len, GFP_KERNEL);
-				if (!priv->curve_data) {
-					err = -ENOMEM;
-					goto err;
-				}
-
-				memcpy(priv->curve_data, entry->data, data_len);
-			} else {
-				err = p54_convert_rev0_to_rev1(dev, (struct pda_pa_curve_data *)entry->data);
-				if (err)
-					goto err;
+			switch (curve_data->cal_method_rev) {
+			case 0:
+				err = p54_convert_rev0(dev, curve_data);
+				break;
+			case 1:
+				err = p54_convert_rev1(dev, curve_data);
+				break;
+			default:
+				printk(KERN_ERR "p54: unknown curve data "
+						"revision %d\n",
+						curve_data->cal_method_rev);
+				err = -ENODEV;
+				break;
 			}
+			if (err)
+				goto err;
 
-			break;
+		}
 		case PDR_PRISM_ZIF_TX_IQ_CALIBRATION:
 			priv->iq_autocal = kmalloc(data_len, GFP_KERNEL);
 			if (!priv->iq_autocal) {
@@ -286,7 +401,7 @@
 			while ((u8 *)tmp < entry->data + data_len) {
 				struct bootrec_exp_if *exp_if = tmp;
 				if (le16_to_cpu(exp_if->if_id) == 0xF)
-					priv->rxhw = exp_if->variant & cpu_to_le16(0x07);
+					priv->rxhw = le16_to_cpu(exp_if->variant) & 0x07;
 				tmp += sizeof(struct bootrec_exp_if);
 			}
 			break;
@@ -312,6 +427,37 @@
 		goto err;
 	}
 
+	switch (priv->rxhw) {
+	case 4: /* XBow */
+		p54_init_xbow_synth(dev);
+	case 1: /* Indigo? */
+	case 2: /* Duette */
+		dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz;
+	case 3: /* Frisbee */
+	case 5: /* Longbow */
+		dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
+		break;
+	default:
+		printk(KERN_ERR "%s: unsupported RF-Chip\n",
+			wiphy_name(dev->wiphy));
+		err = -EINVAL;
+		goto err;
+	}
+
+	if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
+		u8 perm_addr[ETH_ALEN];
+
+		printk(KERN_WARNING "%s: Invalid hwaddr! Using randomly generated MAC addr\n",
+			wiphy_name(dev->wiphy));
+		random_ether_addr(perm_addr);
+		SET_IEEE80211_PERM_ADDR(dev, perm_addr);
+	}
+
+	printk(KERN_INFO "%s: hwaddr %s, MAC:isl38%02x RF:%s\n",
+		wiphy_name(dev->wiphy),
+		print_mac(mac, dev->wiphy->perm_addr),
+		priv->version, p54_rf_chips[priv->rxhw]);
+
 	return 0;
 
   err:
@@ -335,40 +481,54 @@
 }
 EXPORT_SYMBOL_GPL(p54_parse_eeprom);
 
-void p54_fill_eeprom_readback(struct p54_control_hdr *hdr)
+static int p54_rssi_to_dbm(struct ieee80211_hw *dev, int rssi)
 {
-	struct p54_eeprom_lm86 *eeprom_hdr;
-
-	hdr->magic1 = cpu_to_le16(0x8000);
-	hdr->len = cpu_to_le16(sizeof(*eeprom_hdr) + 0x2000);
-	hdr->type = cpu_to_le16(P54_CONTROL_TYPE_EEPROM_READBACK);
-	hdr->retry1 = hdr->retry2 = 0;
-	eeprom_hdr = (struct p54_eeprom_lm86 *) hdr->data;
-	eeprom_hdr->offset = 0x0;
-	eeprom_hdr->len = cpu_to_le16(0x2000);
+	/* TODO: get the rssi_add & rssi_mul data from the eeprom */
+	return ((rssi * 0x83) / 64 - 400) / 4;
 }
-EXPORT_SYMBOL_GPL(p54_fill_eeprom_readback);
 
-static void p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
+static int p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
+	struct p54_common *priv = dev->priv;
 	struct p54_rx_hdr *hdr = (struct p54_rx_hdr *) skb->data;
 	struct ieee80211_rx_status rx_status = {0};
 	u16 freq = le16_to_cpu(hdr->freq);
+	size_t header_len = sizeof(*hdr);
+	u32 tsf32;
 
-	rx_status.signal = hdr->rssi;
+	if (!(hdr->magic & cpu_to_le16(0x0001))) {
+		if (priv->filter_flags & FIF_FCSFAIL)
+			rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
+		else
+			return 0;
+	}
+
+	rx_status.signal = p54_rssi_to_dbm(dev, hdr->rssi);
+	rx_status.noise = priv->noise;
 	/* XX correct? */
 	rx_status.qual = (100 * hdr->rssi) / 127;
 	rx_status.rate_idx = hdr->rate & 0xf;
 	rx_status.freq = freq;
 	rx_status.band = IEEE80211_BAND_2GHZ;
 	rx_status.antenna = hdr->antenna;
-	rx_status.mactime = le64_to_cpu(hdr->timestamp);
+
+	tsf32 = le32_to_cpu(hdr->tsf32);
+	if (tsf32 < priv->tsf_low32)
+		priv->tsf_high32++;
+	rx_status.mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
+	priv->tsf_low32 = tsf32;
+
 	rx_status.flag |= RX_FLAG_TSFT;
 
-	skb_pull(skb, sizeof(*hdr));
+	if (hdr->magic & cpu_to_le16(0x4000))
+		header_len += hdr->align[0];
+
+	skb_pull(skb, header_len);
 	skb_trim(skb, le16_to_cpu(hdr->len));
 
 	ieee80211_rx_irqsafe(dev, skb, &rx_status);
+
+	return -1;
 }
 
 static void inline p54_wake_free_queues(struct ieee80211_hw *dev)
@@ -377,7 +537,7 @@
 	int i;
 
 	for (i = 0; i < dev->queues; i++)
-		if (priv->tx_stats[i].len < priv->tx_stats[i].limit)
+		if (priv->tx_stats[i + 4].len < priv->tx_stats[i + 4].limit)
 			ieee80211_wake_queue(dev, i);
 }
 
@@ -387,11 +547,13 @@
 	struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data;
 	struct p54_frame_sent_hdr *payload = (struct p54_frame_sent_hdr *) hdr->data;
 	struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next;
-	u32 addr = le32_to_cpu(hdr->req_id) - 0x70;
+	u32 addr = le32_to_cpu(hdr->req_id) - priv->headroom;
 	struct memrecord *range = NULL;
 	u32 freed = 0;
 	u32 last_addr = priv->rx_start;
+	unsigned long flags;
 
+	spin_lock_irqsave(&priv->tx_queue.lock, flags);
 	while (entry != (struct sk_buff *)&priv->tx_queue) {
 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
 		range = (void *)info->driver_data;
@@ -412,13 +574,15 @@
 
 			last_addr = range->end_addr;
 			__skb_unlink(entry, &priv->tx_queue);
+			spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
+
 			memset(&info->status, 0, sizeof(info->status));
 			entry_hdr = (struct p54_control_hdr *) entry->data;
 			entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data;
 			if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0)
 				pad = entry_data->align[0];
 
-			priv->tx_stats[entry_data->hw_queue - 4].len--;
+			priv->tx_stats[entry_data->hw_queue].len--;
 			if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
 				if (!(payload->status & 0x01))
 					info->flags |= IEEE80211_TX_STAT_ACK;
@@ -426,21 +590,60 @@
 					info->status.excessive_retries = 1;
 			}
 			info->status.retry_count = payload->retries - 1;
-			info->status.ack_signal = le16_to_cpu(payload->ack_rssi);
+			info->status.ack_signal = p54_rssi_to_dbm(dev,
+					le16_to_cpu(payload->ack_rssi));
 			skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
 			ieee80211_tx_status_irqsafe(dev, entry);
-			break;
+			goto out;
 		} else
 			last_addr = range->end_addr;
 		entry = entry->next;
 	}
+	spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
 
+out:
 	if (freed >= IEEE80211_MAX_RTS_THRESHOLD + 0x170 +
 	    sizeof(struct p54_control_hdr))
 		p54_wake_free_queues(dev);
 }
 
-static void p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void p54_rx_eeprom_readback(struct ieee80211_hw *dev,
+				   struct sk_buff *skb)
+{
+	struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data;
+	struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data;
+	struct p54_common *priv = dev->priv;
+
+	if (!priv->eeprom)
+		return ;
+
+	memcpy(priv->eeprom, eeprom->data, le16_to_cpu(eeprom->len));
+
+	complete(&priv->eeprom_comp);
+}
+
+static void p54_rx_stats(struct ieee80211_hw *dev, struct sk_buff *skb)
+{
+	struct p54_common *priv = dev->priv;
+	struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data;
+	struct p54_statistics *stats = (struct p54_statistics *) hdr->data;
+	u32 tsf32 = le32_to_cpu(stats->tsf32);
+
+	if (tsf32 < priv->tsf_low32)
+		priv->tsf_high32++;
+	priv->tsf_low32 = tsf32;
+
+	priv->stats.dot11RTSFailureCount = le32_to_cpu(stats->rts_fail);
+	priv->stats.dot11RTSSuccessCount = le32_to_cpu(stats->rts_success);
+	priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs);
+
+	priv->noise = p54_rssi_to_dbm(dev, le32_to_cpu(stats->noise));
+	complete(&priv->stats_comp);
+
+	mod_timer(&priv->stats_timer, jiffies + 5 * HZ);
+}
+
+static int p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
 	struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data;
 
@@ -450,36 +653,30 @@
 		break;
 	case P54_CONTROL_TYPE_BBP:
 		break;
+	case P54_CONTROL_TYPE_STAT_READBACK:
+		p54_rx_stats(dev, skb);
+		break;
+	case P54_CONTROL_TYPE_EEPROM_READBACK:
+		p54_rx_eeprom_readback(dev, skb);
+		break;
 	default:
 		printk(KERN_DEBUG "%s: not handling 0x%02x type control frame\n",
 		       wiphy_name(dev->wiphy), le16_to_cpu(hdr->type));
 		break;
 	}
+
+	return 0;
 }
 
 /* returns zero if skb can be reused */
 int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
 	u8 type = le16_to_cpu(*((__le16 *)skb->data)) >> 8;
-	switch (type) {
-	case 0x00:
-	case 0x01:
-		p54_rx_data(dev, skb);
-		return -1;
-	case 0x4d:
-		/* TODO: do something better... but then again, I've never seen this happen */
-		printk(KERN_ERR "%s: Received fault. Probably need to restart hardware now..\n",
-		       wiphy_name(dev->wiphy));
-		break;
-	case 0x80:
-		p54_rx_control(dev, skb);
-		break;
-	default:
-		printk(KERN_ERR "%s: unknown frame RXed (0x%02x)\n",
-		       wiphy_name(dev->wiphy), type);
-		break;
-	}
-	return 0;
+
+	if (type == 0x80)
+		return p54_rx_control(dev, skb);
+	else
+		return p54_rx_data(dev, skb);
 }
 EXPORT_SYMBOL_GPL(p54_rx);
 
@@ -503,7 +700,7 @@
 	u32 target_addr = priv->rx_start;
 	unsigned long flags;
 	unsigned int left;
-	len = (len + 0x170 + 3) & ~0x3; /* 0x70 headroom, 0x100 tailroom */
+	len = (len + priv->headroom + priv->tailroom + 3) & ~0x3;
 
 	spin_lock_irqsave(&priv->tx_queue.lock, flags);
 	left = skb_queue_len(&priv->tx_queue);
@@ -538,15 +735,75 @@
 		range->start_addr = target_addr;
 		range->end_addr = target_addr + len;
 		__skb_queue_after(&priv->tx_queue, target_skb, skb);
-		if (largest_hole < IEEE80211_MAX_RTS_THRESHOLD + 0x170 +
+		if (largest_hole < priv->rx_mtu + priv->headroom +
+				   priv->tailroom +
 				   sizeof(struct p54_control_hdr))
 			ieee80211_stop_queues(dev);
 	}
 	spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
 
-	data->req_id = cpu_to_le32(target_addr + 0x70);
+	data->req_id = cpu_to_le32(target_addr + priv->headroom);
 }
 
+int p54_read_eeprom(struct ieee80211_hw *dev)
+{
+	struct p54_common *priv = dev->priv;
+	struct p54_control_hdr *hdr = NULL;
+	struct p54_eeprom_lm86 *eeprom_hdr;
+	size_t eeprom_size = 0x2020, offset = 0, blocksize;
+	int ret = -ENOMEM;
+	void *eeprom = NULL;
+
+	hdr = (struct p54_control_hdr *)kzalloc(sizeof(*hdr) +
+		sizeof(*eeprom_hdr) + EEPROM_READBACK_LEN, GFP_KERNEL);
+	if (!hdr)
+		goto free;
+
+	priv->eeprom = kzalloc(EEPROM_READBACK_LEN, GFP_KERNEL);
+	if (!priv->eeprom)
+		goto free;
+
+	eeprom = kzalloc(eeprom_size, GFP_KERNEL);
+	if (!eeprom)
+		goto free;
+
+	hdr->magic1 = cpu_to_le16(0x8000);
+	hdr->type = cpu_to_le16(P54_CONTROL_TYPE_EEPROM_READBACK);
+	hdr->retry1 = hdr->retry2 = 0;
+	eeprom_hdr = (struct p54_eeprom_lm86 *) hdr->data;
+
+	while (eeprom_size) {
+		blocksize = min(eeprom_size, (size_t)EEPROM_READBACK_LEN);
+		hdr->len = cpu_to_le16(blocksize + sizeof(*eeprom_hdr));
+		eeprom_hdr->offset = cpu_to_le16(offset);
+		eeprom_hdr->len = cpu_to_le16(blocksize);
+		p54_assign_address(dev, NULL, hdr, le16_to_cpu(hdr->len) +
+				   sizeof(*hdr));
+		priv->tx(dev, hdr, le16_to_cpu(hdr->len) + sizeof(*hdr), 0);
+
+		if (!wait_for_completion_interruptible_timeout(&priv->eeprom_comp, HZ)) {
+			printk(KERN_ERR "%s: device does not respond!\n",
+				wiphy_name(dev->wiphy));
+			ret = -EBUSY;
+			goto free;
+	        }
+
+		memcpy(eeprom + offset, priv->eeprom, blocksize);
+		offset += blocksize;
+		eeprom_size -= blocksize;
+	}
+
+	ret = p54_parse_eeprom(dev, eeprom, offset);
+free:
+	kfree(priv->eeprom);
+	priv->eeprom = NULL;
+	kfree(hdr);
+	kfree(eeprom);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(p54_read_eeprom);
+
 static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -559,7 +816,7 @@
 	u8 rate;
 	u8 cts_rate = 0x20;
 
-	current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)];
+	current_queue = &priv->tx_stats[skb_get_queue_mapping(skb) + 4];
 	if (unlikely(current_queue->len > current_queue->limit))
 		return NETDEV_TX_BUSY;
 	current_queue->len++;
@@ -601,7 +858,7 @@
 	txhdr->hw_queue = skb_get_queue_mapping(skb) + 4;
 	txhdr->tx_antenna = (info->antenna_sel_tx == 0) ?
 		2 : info->antenna_sel_tx - 1;
-	txhdr->output_power = 0x7f; // HW Maximum
+	txhdr->output_power = priv->output_power;
 	txhdr->cts_rate = (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
 			  0 : cts_rate;
 	if (padding)
@@ -628,12 +885,12 @@
 }
 
 static int p54_set_filter(struct ieee80211_hw *dev, u16 filter_type,
-			  const u8 *dst, const u8 *src, u8 antenna,
-			  u32 magic3, u32 magic8, u32 magic9)
+			  const u8 *bssid)
 {
 	struct p54_common *priv = dev->priv;
 	struct p54_control_hdr *hdr;
 	struct p54_tx_control_filter *filter;
+	size_t data_len;
 
 	hdr = kzalloc(sizeof(*hdr) + sizeof(*filter) +
 		      priv->tx_hdr_len, GFP_ATOMIC);
@@ -644,25 +901,35 @@
 
 	filter = (struct p54_tx_control_filter *) hdr->data;
 	hdr->magic1 = cpu_to_le16(0x8001);
-	hdr->len = cpu_to_le16(sizeof(*filter));
-	p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*filter));
 	hdr->type = cpu_to_le16(P54_CONTROL_TYPE_FILTER_SET);
 
-	filter->filter_type = cpu_to_le16(filter_type);
-	memcpy(filter->dst, dst, ETH_ALEN);
-	if (!src)
-		memset(filter->src, ~0, ETH_ALEN);
+	priv->filter_type = filter->filter_type = cpu_to_le16(filter_type);
+	memcpy(filter->mac_addr, priv->mac_addr, ETH_ALEN);
+	if (!bssid)
+		memset(filter->bssid, ~0, ETH_ALEN);
 	else
-		memcpy(filter->src, src, ETH_ALEN);
-	filter->antenna = antenna;
-	filter->magic3 = cpu_to_le32(magic3);
-	filter->rx_addr = cpu_to_le32(priv->rx_end);
-	filter->max_rx = cpu_to_le16(0x0620);	/* FIXME: for usb ver 1.. maybe */
-	filter->rxhw = priv->rxhw;
-	filter->magic8 = cpu_to_le16(magic8);
-	filter->magic9 = cpu_to_le16(magic9);
+		memcpy(filter->bssid, bssid, ETH_ALEN);
 
-	priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*filter), 1);
+	filter->rx_antenna = priv->rx_antenna;
+
+	if (priv->fw_var < 0x500) {
+		data_len = P54_TX_CONTROL_FILTER_V1_LEN;
+		filter->v1.basic_rate_mask = cpu_to_le32(0x15F);
+		filter->v1.rx_addr = cpu_to_le32(priv->rx_end);
+		filter->v1.max_rx = cpu_to_le16(priv->rx_mtu);
+		filter->v1.rxhw = cpu_to_le16(priv->rxhw);
+		filter->v1.wakeup_timer = cpu_to_le16(500);
+	} else {
+		data_len = P54_TX_CONTROL_FILTER_V2_LEN;
+		filter->v2.rx_addr = cpu_to_le32(priv->rx_end);
+		filter->v2.max_rx = cpu_to_le16(priv->rx_mtu);
+		filter->v2.rxhw = cpu_to_le16(priv->rxhw);
+		filter->v2.timer = cpu_to_le16(1000);
+	}
+
+	hdr->len = cpu_to_le16(data_len);
+	p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + data_len);
+	priv->tx(dev, hdr, sizeof(*hdr) + data_len, 1);
 	return 0;
 }
 
@@ -672,12 +939,10 @@
 	struct p54_control_hdr *hdr;
 	struct p54_tx_control_channel *chan;
 	unsigned int i;
-	size_t payload_len = sizeof(*chan) + sizeof(u32)*2 +
-			     sizeof(*chan->curve_data) *
-			     priv->curve_data->points_per_channel;
+	size_t data_len;
 	void *entry;
 
-	hdr = kzalloc(sizeof(*hdr) + payload_len +
+	hdr = kzalloc(sizeof(*hdr) + sizeof(*chan) +
 		      priv->tx_hdr_len, GFP_KERNEL);
 	if (!hdr)
 		return -ENOMEM;
@@ -687,12 +952,11 @@
 	chan = (struct p54_tx_control_channel *) hdr->data;
 
 	hdr->magic1 = cpu_to_le16(0x8001);
-	hdr->len = cpu_to_le16(sizeof(*chan));
-	hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE);
-	p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + payload_len);
 
-	chan->magic1 = cpu_to_le16(0x1);
-	chan->magic2 = cpu_to_le16(0x0);
+	hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE);
+
+	chan->flags = cpu_to_le16(0x1);
+	chan->dwell = cpu_to_le16(0x0);
 
 	for (i = 0; i < priv->iq_autocal_len; i++) {
 		if (priv->iq_autocal[i].freq != freq)
@@ -710,35 +974,51 @@
 			continue;
 
 		chan->val_barker = 0x38;
-		chan->val_bpsk = priv->output_limit[i].val_bpsk;
-		chan->val_qpsk = priv->output_limit[i].val_qpsk;
-		chan->val_16qam = priv->output_limit[i].val_16qam;
-		chan->val_64qam = priv->output_limit[i].val_64qam;
+		chan->val_bpsk = chan->dup_bpsk =
+			priv->output_limit[i].val_bpsk;
+		chan->val_qpsk = chan->dup_qpsk =
+			priv->output_limit[i].val_qpsk;
+		chan->val_16qam = chan->dup_16qam =
+			priv->output_limit[i].val_16qam;
+		chan->val_64qam = chan->dup_64qam =
+			priv->output_limit[i].val_64qam;
 		break;
 	}
 	if (i == priv->output_limit_len)
 		goto err;
 
-	chan->pa_points_per_curve = priv->curve_data->points_per_channel;
-
 	entry = priv->curve_data->data;
 	for (i = 0; i < priv->curve_data->channels; i++) {
 		if (*((__le16 *)entry) != freq) {
 			entry += sizeof(__le16);
-			entry += sizeof(struct pda_pa_curve_data_sample_rev1) *
-				 chan->pa_points_per_curve;
+			entry += sizeof(struct p54_pa_curve_data_sample) *
+				 priv->curve_data->points_per_channel;
 			continue;
 		}
 
 		entry += sizeof(__le16);
+		chan->pa_points_per_curve =
+			min(priv->curve_data->points_per_channel, (u8) 8);
+
 		memcpy(chan->curve_data, entry, sizeof(*chan->curve_data) *
 		       chan->pa_points_per_curve);
 		break;
 	}
 
-	memcpy(hdr->data + payload_len - 4, &chan->val_bpsk, 4);
+	if (priv->fw_var < 0x500) {
+		data_len = P54_TX_CONTROL_CHANNEL_V1_LEN;
+		chan->v1.rssical_mul = cpu_to_le16(130);
+		chan->v1.rssical_add = cpu_to_le16(0xfe70);
+	} else {
+		data_len = P54_TX_CONTROL_CHANNEL_V2_LEN;
+		chan->v2.rssical_mul = cpu_to_le16(130);
+		chan->v2.rssical_add = cpu_to_le16(0xfe70);
+		chan->v2.basic_rate_mask = cpu_to_le32(0x15f);
+	}
 
-	priv->tx(dev, hdr, sizeof(*hdr) + payload_len, 1);
+	hdr->len = cpu_to_le16(data_len);
+	p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + data_len);
+	priv->tx(dev, hdr, sizeof(*hdr) + data_len, 1);
 	return 0;
 
  err:
@@ -846,12 +1126,25 @@
 			return -ENOMEM;
 	}
 
+	if (!priv->cached_stats) {
+		priv->cached_stats = kzalloc(sizeof(struct p54_statistics) +
+			priv->tx_hdr_len + sizeof(struct p54_control_hdr),
+			GFP_KERNEL);
+
+		if (!priv->cached_stats) {
+			kfree(priv->cached_vdcf);
+			priv->cached_vdcf = NULL;
+			return -ENOMEM;
+		}
+	}
+
 	err = priv->open(dev);
 	if (!err)
-		priv->mode = IEEE80211_IF_TYPE_MNTR;
+		priv->mode = NL80211_IFTYPE_MONITOR;
 
 	p54_init_vdcf(dev);
 
+	mod_timer(&priv->stats_timer, jiffies + HZ);
 	return err;
 }
 
@@ -859,10 +1152,13 @@
 {
 	struct p54_common *priv = dev->priv;
 	struct sk_buff *skb;
+
+	del_timer(&priv->stats_timer);
 	while ((skb = skb_dequeue(&priv->tx_queue)))
 		kfree_skb(skb);
 	priv->stop(dev);
-	priv->mode = IEEE80211_IF_TYPE_INVALID;
+	priv->tsf_high32 = priv->tsf_low32 = 0;
+	priv->mode = NL80211_IFTYPE_UNSPECIFIED;
 }
 
 static int p54_add_interface(struct ieee80211_hw *dev,
@@ -870,11 +1166,11 @@
 {
 	struct p54_common *priv = dev->priv;
 
-	if (priv->mode != IEEE80211_IF_TYPE_MNTR)
+	if (priv->mode != NL80211_IFTYPE_MONITOR)
 		return -EOPNOTSUPP;
 
 	switch (conf->type) {
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		priv->mode = conf->type;
 		break;
 	default:
@@ -883,12 +1179,11 @@
 
 	memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
 
-	p54_set_filter(dev, 0, priv->mac_addr, NULL, 0, 1, 0, 0xF642);
-	p54_set_filter(dev, 0, priv->mac_addr, NULL, 1, 0, 0, 0xF642);
+	p54_set_filter(dev, 0, NULL);
 
 	switch (conf->type) {
-	case IEEE80211_IF_TYPE_STA:
-		p54_set_filter(dev, 1, priv->mac_addr, NULL, 0, 0x15F, 0x1F4, 0);
+	case NL80211_IFTYPE_STATION:
+		p54_set_filter(dev, 1, NULL);
 		break;
 	default:
 		BUG();	/* impossible */
@@ -904,9 +1199,9 @@
 				 struct ieee80211_if_init_conf *conf)
 {
 	struct p54_common *priv = dev->priv;
-	priv->mode = IEEE80211_IF_TYPE_MNTR;
+	priv->mode = NL80211_IFTYPE_MONITOR;
 	memset(priv->mac_addr, 0, ETH_ALEN);
-	p54_set_filter(dev, 0, priv->mac_addr, NULL, 2, 0, 0, 0);
+	p54_set_filter(dev, 0, NULL);
 }
 
 static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
@@ -915,6 +1210,9 @@
 	struct p54_common *priv = dev->priv;
 
 	mutex_lock(&priv->conf_mutex);
+	priv->rx_antenna = (conf->antenna_sel_rx == 0) ?
+		2 : conf->antenna_sel_tx - 1;
+	priv->output_power = conf->power_level << 2;
 	ret = p54_set_freq(dev, cpu_to_le16(conf->channel->center_freq));
 	p54_set_vdcf(dev);
 	mutex_unlock(&priv->conf_mutex);
@@ -928,8 +1226,7 @@
 	struct p54_common *priv = dev->priv;
 
 	mutex_lock(&priv->conf_mutex);
-	p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 0, 1, 0, 0xF642);
-	p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 2, 0, 0, 0);
+	p54_set_filter(dev, 0, conf->bssid);
 	p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0);
 	memcpy(priv->bssid, conf->bssid, ETH_ALEN);
 	mutex_unlock(&priv->conf_mutex);
@@ -943,15 +1240,28 @@
 {
 	struct p54_common *priv = dev->priv;
 
-	*total_flags &= FIF_BCN_PRBRESP_PROMISC;
+	*total_flags &= FIF_BCN_PRBRESP_PROMISC |
+			FIF_PROMISC_IN_BSS |
+			FIF_FCSFAIL;
+
+	priv->filter_flags = *total_flags;
 
 	if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
 		if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
-			p54_set_filter(dev, 0, priv->mac_addr,
-				       NULL, 2, 0, 0, 0);
+			p54_set_filter(dev, le16_to_cpu(priv->filter_type),
+				 NULL);
 		else
-			p54_set_filter(dev, 0, priv->mac_addr,
-				       priv->bssid, 2, 0, 0, 0);
+			p54_set_filter(dev, le16_to_cpu(priv->filter_type),
+				 priv->bssid);
+	}
+
+	if (changed_flags & FIF_PROMISC_IN_BSS) {
+		if (*total_flags & FIF_PROMISC_IN_BSS)
+			p54_set_filter(dev, le16_to_cpu(priv->filter_type) |
+				0x8, NULL);
+		else
+			p54_set_filter(dev, le16_to_cpu(priv->filter_type) &
+				~0x8, priv->bssid);
 	}
 }
 
@@ -975,10 +1285,67 @@
 	return 0;
 }
 
+static int p54_init_xbow_synth(struct ieee80211_hw *dev)
+{
+	struct p54_common *priv = dev->priv;
+	struct p54_control_hdr *hdr;
+	struct p54_tx_control_xbow_synth *xbow;
+
+	hdr = kzalloc(sizeof(*hdr) + sizeof(*xbow) +
+		      priv->tx_hdr_len, GFP_KERNEL);
+	if (!hdr)
+		return -ENOMEM;
+
+	hdr = (void *)hdr + priv->tx_hdr_len;
+	hdr->magic1 = cpu_to_le16(0x8001);
+	hdr->len = cpu_to_le16(sizeof(*xbow));
+	hdr->type = cpu_to_le16(P54_CONTROL_TYPE_XBOW_SYNTH_CFG);
+	p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*xbow));
+
+	xbow = (struct p54_tx_control_xbow_synth *) hdr->data;
+	xbow->magic1 = cpu_to_le16(0x1);
+	xbow->magic2 = cpu_to_le16(0x2);
+	xbow->freq = cpu_to_le16(5390);
+
+	priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*xbow), 1);
+
+	return 0;
+}
+
+static void p54_statistics_timer(unsigned long data)
+{
+	struct ieee80211_hw *dev = (struct ieee80211_hw *) data;
+	struct p54_common *priv = dev->priv;
+	struct p54_control_hdr *hdr;
+	struct p54_statistics *stats;
+
+	BUG_ON(!priv->cached_stats);
+
+	hdr = (void *)priv->cached_stats + priv->tx_hdr_len;
+	hdr->magic1 = cpu_to_le16(0x8000);
+	hdr->len = cpu_to_le16(sizeof(*stats));
+	hdr->type = cpu_to_le16(P54_CONTROL_TYPE_STAT_READBACK);
+	p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*stats));
+
+	priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*stats), 0);
+}
+
 static int p54_get_stats(struct ieee80211_hw *dev,
 			 struct ieee80211_low_level_stats *stats)
 {
-	/* TODO */
+	struct p54_common *priv = dev->priv;
+
+	del_timer(&priv->stats_timer);
+	p54_statistics_timer((unsigned long)dev);
+
+	if (!wait_for_completion_interruptible_timeout(&priv->stats_comp, HZ)) {
+		printk(KERN_ERR "%s: device does not respond!\n",
+			wiphy_name(dev->wiphy));
+		return -EBUSY;
+	}
+
+	memcpy(stats, &priv->stats, sizeof(*stats));
+
 	return 0;
 }
 
@@ -987,7 +1354,7 @@
 {
 	struct p54_common *priv = dev->priv;
 
-	memcpy(stats, &priv->tx_stats, sizeof(stats[0]) * dev->queues);
+	memcpy(stats, &priv->tx_stats[4], sizeof(stats[0]) * dev->queues);
 
 	return 0;
 }
@@ -1016,22 +1383,32 @@
 		return NULL;
 
 	priv = dev->priv;
-	priv->mode = IEEE80211_IF_TYPE_INVALID;
+	priv->mode = NL80211_IFTYPE_UNSPECIFIED;
 	skb_queue_head_init(&priv->tx_queue);
-	dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
 	dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */
 		     IEEE80211_HW_RX_INCLUDES_FCS |
-		     IEEE80211_HW_SIGNAL_UNSPEC;
+		     IEEE80211_HW_SIGNAL_DBM |
+		     IEEE80211_HW_NOISE_DBM;
+
+	dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
 	dev->channel_change_time = 1000;	/* TODO: find actual value */
-	dev->max_signal = 127;
 
-	priv->tx_stats[0].limit = 5;
+	priv->tx_stats[0].limit = 1;
+	priv->tx_stats[1].limit = 1;
+	priv->tx_stats[2].limit = 1;
+	priv->tx_stats[3].limit = 1;
+	priv->tx_stats[4].limit = 5;
 	dev->queues = 1;
-
+	priv->noise = -94;
 	dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 +
 				 sizeof(struct p54_tx_control_allocdata);
 
 	mutex_init(&priv->conf_mutex);
+	init_completion(&priv->eeprom_comp);
+	init_completion(&priv->stats_comp);
+	setup_timer(&priv->stats_timer, p54_statistics_timer,
+		(unsigned long)dev);
 
 	return dev;
 }
@@ -1040,6 +1417,7 @@
 void p54_free_common(struct ieee80211_hw *dev)
 {
 	struct p54_common *priv = dev->priv;
+	kfree(priv->cached_stats);
 	kfree(priv->iq_autocal);
 	kfree(priv->output_limit);
 	kfree(priv->curve_data);
diff --git a/drivers/net/wireless/p54/p54common.h b/drivers/net/wireless/p54/p54common.h
index 8db6c0e..2fa994c 100644
--- a/drivers/net/wireless/p54/p54common.h
+++ b/drivers/net/wireless/p54/p54common.h
@@ -1,5 +1,5 @@
-#ifndef PRISM54COMMON_H
-#define PRISM54COMMON_H
+#ifndef P54COMMON_H
+#define P54COMMON_H
 
 /*
  * Common code specific definitions for mac80211 Prism54 drivers
@@ -18,7 +18,8 @@
 struct bootrec {
 	__le32 code;
 	__le32 len;
-	u32 data[0];
+	u32 data[10];
+	__le16 rx_mtu;
 } __attribute__((packed));
 
 struct bootrec_exp_if {
@@ -29,6 +30,17 @@
 	__le16 top_compat;
 } __attribute__((packed));
 
+struct bootrec_desc {
+	__le16 modes;
+	__le16 flags;
+	__le32 rx_start;
+	__le32 rx_end;
+	u8 headroom;
+	u8 tailroom;
+	u8 unimportant[6];
+	u8 rates[16];
+} __attribute__((packed));
+
 #define BR_CODE_MIN			0x80000000
 #define BR_CODE_COMPONENT_ID		0x80000001
 #define BR_CODE_COMPONENT_VERSION	0x80000002
@@ -39,11 +51,6 @@
 #define BR_CODE_END_OF_BRA		0xFF0000FF
 #define LEGACY_BR_CODE_END_OF_BRA	0xFFFFFFFF
 
-#define FW_FMAC 0x464d4143
-#define FW_LM86 0x4c4d3836
-#define FW_LM87 0x4c4d3837
-#define FW_LM20 0x4c4d3230
-
 /* PDA defines are Copyright (C) 2005 Nokia Corporation (taken from islsm_pda.h) */
 
 struct pda_entry {
@@ -89,6 +96,16 @@
 	u8 data_qpsk;
 	u8 data_16qam;
 	u8 data_64qam;
+} __attribute__ ((packed));
+
+struct p54_pa_curve_data_sample {
+	u8 rf_power;
+	u8 pa_detector;
+	u8 data_barker;
+	u8 data_bpsk;
+	u8 data_qpsk;
+	u8 data_16qam;
+	u8 data_64qam;
 	u8 padding;
 } __attribute__ ((packed));
 
@@ -169,8 +186,9 @@
 	u8 rssi;
 	u8 quality;
 	u16 unknown2;
-	__le64 timestamp;
-	u8 data[0];
+	__le32 tsf32;
+	__le32 unalloc0;
+	u8 align[0];
 } __attribute__ ((packed));
 
 struct p54_frame_sent_hdr {
@@ -198,22 +216,37 @@
 
 struct p54_tx_control_filter {
 	__le16 filter_type;
-	u8 dst[ETH_ALEN];
-	u8 src[ETH_ALEN];
-	u8 antenna;
-	u8 debug;
-	__le32 magic3;
-	u8 rates[8];	// FIXME: what's this for?
-	__le32 rx_addr;
-	__le16 max_rx;
-	__le16 rxhw;
-	__le16 magic8;
-	__le16 magic9;
+	u8 mac_addr[ETH_ALEN];
+	u8 bssid[ETH_ALEN];
+	u8 rx_antenna;
+	u8 rx_align;
+	union {
+		struct {
+			__le32 basic_rate_mask;
+			u8 rts_rates[8];
+			__le32 rx_addr;
+			__le16 max_rx;
+			__le16 rxhw;
+			__le16 wakeup_timer;
+			__le16 unalloc0;
+		} v1 __attribute__ ((packed));
+		struct {
+			__le32 rx_addr;
+			__le16 max_rx;
+			__le16 rxhw;
+			__le16 timer;
+			__le16 unalloc0;
+			__le32 unalloc1;
+		} v2 __attribute__ ((packed));
+	} __attribute__ ((packed));
 } __attribute__ ((packed));
 
+#define P54_TX_CONTROL_FILTER_V1_LEN (sizeof(struct p54_tx_control_filter))
+#define P54_TX_CONTROL_FILTER_V2_LEN (sizeof(struct p54_tx_control_filter)-8)
+
 struct p54_tx_control_channel {
-	__le16 magic1;
-	__le16 magic2;
+	__le16 flags;
+	__le16 dwell;
 	u8 padding1[20];
 	struct pda_iq_autocal_entry iq_autocal;
 	u8 pa_points_per_curve;
@@ -222,10 +255,29 @@
 	u8 val_qpsk;
 	u8 val_16qam;
 	u8 val_64qam;
-	struct pda_pa_curve_data_sample_rev1 curve_data[0];
-	/* additional padding/data after curve_data */
+	struct p54_pa_curve_data_sample curve_data[8];
+	u8 dup_bpsk;
+	u8 dup_qpsk;
+	u8 dup_16qam;
+	u8 dup_64qam;
+	union {
+		struct {
+			__le16 rssical_mul;
+			__le16 rssical_add;
+		} v1 __attribute__ ((packed));
+
+		struct {
+			__le32 basic_rate_mask;
+			 u8 rts_rates[8];
+			__le16 rssical_mul;
+			__le16 rssical_add;
+		} v2 __attribute__ ((packed));
+	} __attribute__ ((packed));
 } __attribute__ ((packed));
 
+#define P54_TX_CONTROL_CHANNEL_V1_LEN (sizeof(struct p54_tx_control_channel)-12)
+#define P54_TX_CONTROL_CHANNEL_V2_LEN (sizeof(struct p54_tx_control_channel))
+
 struct p54_tx_control_led {
 	__le16 mode;
 	__le16 led_temporary;
@@ -250,4 +302,24 @@
 	__le16 frameburst;
 } __attribute__ ((packed));
 
-#endif /* PRISM54COMMON_H */
+struct p54_statistics {
+	__le32 rx_success;
+	__le32 rx_bad_fcs;
+	__le32 rx_abort;
+	__le32 rx_abort_phy;
+	__le32 rts_success;
+	__le32 rts_fail;
+	__le32 tsf32;
+	__le32 airtime;
+	__le32 noise;
+	__le32 unkn[10]; /* CCE / CCA / RADAR */
+} __attribute__ ((packed));
+
+struct p54_tx_control_xbow_synth {
+	__le16 magic1;
+	__le16 magic2;
+	__le16 freq;
+	u32 padding[5];
+} __attribute__ ((packed));
+
+#endif /* P54COMMON_H */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 7dd4add..1c2a02a 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -3,6 +3,7 @@
  * Linux device driver for PCI based Prism54
  *
  * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2008, Christian Lamparter <chunkeey@web.de>
  *
  * Based on the islsm (softmac prism54) driver, which is:
  * Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al.
@@ -71,16 +72,18 @@
 	P54P_WRITE(ctrl_stat, reg);
 	wmb();
 
-	mdelay(50);
-
 	err = request_firmware(&fw_entry, "isl3886", &priv->pdev->dev);
 	if (err) {
-		printk(KERN_ERR "%s (prism54pci): cannot find firmware "
+		printk(KERN_ERR "%s (p54pci): cannot find firmware "
 		       "(isl3886)\n", pci_name(priv->pdev));
 		return err;
 	}
 
-	p54_parse_firmware(dev, fw_entry);
+	err = p54_parse_firmware(dev, fw_entry);
+	if (err) {
+		release_firmware(fw_entry);
+		return err;
+	}
 
 	data = (__le32 *) fw_entry->data;
 	remains = fw_entry->size;
@@ -121,162 +124,147 @@
 	wmb();
 	udelay(10);
 
+	/* wait for the firmware to boot properly */
+	mdelay(100);
+
 	return 0;
 }
 
-static irqreturn_t p54p_simple_interrupt(int irq, void *dev_id)
-{
-	struct p54p_priv *priv = (struct p54p_priv *) dev_id;
-	__le32 reg;
-
-	reg = P54P_READ(int_ident);
-	P54P_WRITE(int_ack, reg);
-
-	if (reg & P54P_READ(int_enable))
-		complete(&priv->boot_comp);
-
-	return IRQ_HANDLED;
-}
-
-static int p54p_read_eeprom(struct ieee80211_hw *dev)
+static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
+	int ring_index, struct p54p_desc *ring, u32 ring_limit,
+	struct sk_buff **rx_buf)
 {
 	struct p54p_priv *priv = dev->priv;
 	struct p54p_ring_control *ring_control = priv->ring_control;
-	int err;
-	struct p54_control_hdr *hdr;
-	void *eeprom;
-	dma_addr_t rx_mapping, tx_mapping;
-	u16 alen;
+	u32 limit, idx, i;
 
-	init_completion(&priv->boot_comp);
-	err = request_irq(priv->pdev->irq, &p54p_simple_interrupt,
-			  IRQF_SHARED, "prism54pci", priv);
-	if (err) {
-		printk(KERN_ERR "%s (prism54pci): failed to register IRQ handler\n",
-		       pci_name(priv->pdev));
-		return err;
-	}
+	idx = le32_to_cpu(ring_control->host_idx[ring_index]);
+	limit = idx;
+	limit -= le32_to_cpu(ring_control->device_idx[ring_index]);
+	limit = ring_limit - limit;
 
-	eeprom = kmalloc(0x2010 + EEPROM_READBACK_LEN, GFP_KERNEL);
-	if (!eeprom) {
-		printk(KERN_ERR "%s (prism54pci): no memory for eeprom!\n",
-		       pci_name(priv->pdev));
-		err = -ENOMEM;
-		goto out;
-	}
-
-	memset(ring_control, 0, sizeof(*ring_control));
-	P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
-	P54P_READ(ring_control_base);
-	udelay(10);
-
-	P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
-	P54P_READ(int_enable);
-	udelay(10);
-
-	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
-
-	if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
-		printk(KERN_ERR "%s (prism54pci): Cannot boot firmware!\n",
-		       pci_name(priv->pdev));
-		err = -EINVAL;
-		goto out;
-	}
-
-	P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
-	P54P_READ(int_enable);
-
-	hdr = eeprom + 0x2010;
-	p54_fill_eeprom_readback(hdr);
-	hdr->req_id = cpu_to_le32(priv->common.rx_start);
-
-	rx_mapping = pci_map_single(priv->pdev, eeprom,
-				    0x2010, PCI_DMA_FROMDEVICE);
-	tx_mapping = pci_map_single(priv->pdev, (void *)hdr,
-				    EEPROM_READBACK_LEN, PCI_DMA_TODEVICE);
-
-	ring_control->rx_mgmt[0].host_addr = cpu_to_le32(rx_mapping);
-	ring_control->rx_mgmt[0].len = cpu_to_le16(0x2010);
-	ring_control->tx_data[0].host_addr = cpu_to_le32(tx_mapping);
-	ring_control->tx_data[0].device_addr = hdr->req_id;
-	ring_control->tx_data[0].len = cpu_to_le16(EEPROM_READBACK_LEN);
-
-	ring_control->host_idx[2] = cpu_to_le32(1);
-	ring_control->host_idx[1] = cpu_to_le32(1);
-
-	wmb();
-	mdelay(100);
-	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
-
-	wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ);
-	wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ);
-
-	pci_unmap_single(priv->pdev, tx_mapping,
-			 EEPROM_READBACK_LEN, PCI_DMA_TODEVICE);
-	pci_unmap_single(priv->pdev, rx_mapping,
-			 0x2010, PCI_DMA_FROMDEVICE);
-
-	alen = le16_to_cpu(ring_control->rx_mgmt[0].len);
-	if (le32_to_cpu(ring_control->device_idx[2]) != 1 ||
-	    alen < 0x10) {
-		printk(KERN_ERR "%s (prism54pci): Cannot read eeprom!\n",
-		       pci_name(priv->pdev));
-		err = -EINVAL;
-		goto out;
-	}
-
-	p54_parse_eeprom(dev, (u8 *)eeprom + 0x10, alen - 0x10);
-
- out:
-	kfree(eeprom);
-	P54P_WRITE(int_enable, cpu_to_le32(0));
-	P54P_READ(int_enable);
-	udelay(10);
-	free_irq(priv->pdev->irq, priv);
-	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
-	return err;
-}
-
-static void p54p_refill_rx_ring(struct ieee80211_hw *dev)
-{
-	struct p54p_priv *priv = dev->priv;
-	struct p54p_ring_control *ring_control = priv->ring_control;
-	u32 limit, host_idx, idx;
-
-	host_idx = le32_to_cpu(ring_control->host_idx[0]);
-	limit = host_idx;
-	limit -= le32_to_cpu(ring_control->device_idx[0]);
-	limit = ARRAY_SIZE(ring_control->rx_data) - limit;
-
-	idx = host_idx % ARRAY_SIZE(ring_control->rx_data);
+	i = idx % ring_limit;
 	while (limit-- > 1) {
-		struct p54p_desc *desc = &ring_control->rx_data[idx];
+		struct p54p_desc *desc = &ring[i];
 
 		if (!desc->host_addr) {
 			struct sk_buff *skb;
 			dma_addr_t mapping;
-			skb = dev_alloc_skb(MAX_RX_SIZE);
+			skb = dev_alloc_skb(priv->common.rx_mtu + 32);
 			if (!skb)
 				break;
 
 			mapping = pci_map_single(priv->pdev,
 						 skb_tail_pointer(skb),
-						 MAX_RX_SIZE,
+						 priv->common.rx_mtu + 32,
 						 PCI_DMA_FROMDEVICE);
 			desc->host_addr = cpu_to_le32(mapping);
 			desc->device_addr = 0;	// FIXME: necessary?
-			desc->len = cpu_to_le16(MAX_RX_SIZE);
+			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
 			desc->flags = 0;
-			priv->rx_buf[idx] = skb;
+			rx_buf[i] = skb;
 		}
 
+		i++;
 		idx++;
-		host_idx++;
-		idx %= ARRAY_SIZE(ring_control->rx_data);
+		i %= ring_limit;
 	}
 
 	wmb();
-	ring_control->host_idx[0] = cpu_to_le32(host_idx);
+	ring_control->host_idx[ring_index] = cpu_to_le32(idx);
+}
+
+static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
+	int ring_index, struct p54p_desc *ring, u32 ring_limit,
+	struct sk_buff **rx_buf)
+{
+	struct p54p_priv *priv = dev->priv;
+	struct p54p_ring_control *ring_control = priv->ring_control;
+	struct p54p_desc *desc;
+	u32 idx, i;
+
+	i = (*index) % ring_limit;
+	(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
+	idx %= ring_limit;
+	while (i != idx) {
+		u16 len;
+		struct sk_buff *skb;
+		desc = &ring[i];
+		len = le16_to_cpu(desc->len);
+		skb = rx_buf[i];
+
+		if (!skb) {
+			i++;
+			i %= ring_limit;
+			continue;
+		}
+		skb_put(skb, len);
+
+		if (p54_rx(dev, skb)) {
+			pci_unmap_single(priv->pdev,
+					 le32_to_cpu(desc->host_addr),
+					 priv->common.rx_mtu + 32,
+					 PCI_DMA_FROMDEVICE);
+			rx_buf[i] = NULL;
+			desc->host_addr = 0;
+		} else {
+			skb_trim(skb, 0);
+			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
+		}
+
+		i++;
+		i %= ring_limit;
+	}
+
+	p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
+}
+
+/* caller must hold priv->lock */
+static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
+	int ring_index, struct p54p_desc *ring, u32 ring_limit,
+	void **tx_buf)
+{
+	struct p54p_priv *priv = dev->priv;
+	struct p54p_ring_control *ring_control = priv->ring_control;
+	struct p54p_desc *desc;
+	u32 idx, i;
+
+	i = (*index) % ring_limit;
+	(*index) = idx = le32_to_cpu(ring_control->device_idx[1]);
+	idx %= ring_limit;
+
+	while (i != idx) {
+		desc = &ring[i];
+		kfree(tx_buf[i]);
+		tx_buf[i] = NULL;
+
+		pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
+				 le16_to_cpu(desc->len), PCI_DMA_TODEVICE);
+
+		desc->host_addr = 0;
+		desc->device_addr = 0;
+		desc->len = 0;
+		desc->flags = 0;
+
+		i++;
+		i %= ring_limit;
+	}
+}
+
+static void p54p_rx_tasklet(unsigned long dev_id)
+{
+	struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
+	struct p54p_priv *priv = dev->priv;
+	struct p54p_ring_control *ring_control = priv->ring_control;
+
+	p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
+		ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
+
+	p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data,
+		ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data);
+
+	wmb();
+	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
 }
 
 static irqreturn_t p54p_interrupt(int irq, void *dev_id)
@@ -298,65 +286,18 @@
 	reg &= P54P_READ(int_enable);
 
 	if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) {
-		struct p54p_desc *desc;
-		u32 idx, i;
-		i = priv->tx_idx;
-		i %= ARRAY_SIZE(ring_control->tx_data);
-		priv->tx_idx = idx = le32_to_cpu(ring_control->device_idx[1]);
-		idx %= ARRAY_SIZE(ring_control->tx_data);
+		p54p_check_tx_ring(dev, &priv->tx_idx_mgmt,
+				   3, ring_control->tx_mgmt,
+				   ARRAY_SIZE(ring_control->tx_mgmt),
+				   priv->tx_buf_mgmt);
 
-		while (i != idx) {
-			desc = &ring_control->tx_data[i];
-			if (priv->tx_buf[i]) {
-				kfree(priv->tx_buf[i]);
-				priv->tx_buf[i] = NULL;
-			}
+		p54p_check_tx_ring(dev, &priv->tx_idx_data,
+				   1, ring_control->tx_data,
+				   ARRAY_SIZE(ring_control->tx_data),
+				   priv->tx_buf_data);
 
-			pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
-					 le16_to_cpu(desc->len), PCI_DMA_TODEVICE);
+		tasklet_schedule(&priv->rx_tasklet);
 
-			desc->host_addr = 0;
-			desc->device_addr = 0;
-			desc->len = 0;
-			desc->flags = 0;
-
-			i++;
-			i %= ARRAY_SIZE(ring_control->tx_data);
-		}
-
-		i = priv->rx_idx;
-		i %= ARRAY_SIZE(ring_control->rx_data);
-		priv->rx_idx = idx = le32_to_cpu(ring_control->device_idx[0]);
-		idx %= ARRAY_SIZE(ring_control->rx_data);
-		while (i != idx) {
-			u16 len;
-			struct sk_buff *skb;
-			desc = &ring_control->rx_data[i];
-			len = le16_to_cpu(desc->len);
-			skb = priv->rx_buf[i];
-
-			skb_put(skb, len);
-
-			if (p54_rx(dev, skb)) {
-				pci_unmap_single(priv->pdev,
-						 le32_to_cpu(desc->host_addr),
-						 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
-
-				priv->rx_buf[i] = NULL;
-				desc->host_addr = 0;
-			} else {
-				skb_trim(skb, 0);
-				desc->len = cpu_to_le16(MAX_RX_SIZE);
-			}
-
-			i++;
-			i %= ARRAY_SIZE(ring_control->rx_data);
-		}
-
-		p54p_refill_rx_ring(dev);
-
-		wmb();
-		P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
 	} else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
 		complete(&priv->boot_comp);
 
@@ -392,7 +333,7 @@
 	ring_control->host_idx[1] = cpu_to_le32(idx + 1);
 
 	if (free_on_tx)
-		priv->tx_buf[i] = data;
+		priv->tx_buf_data[i] = data;
 
 	spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -412,7 +353,7 @@
 
 	init_completion(&priv->boot_comp);
 	err = request_irq(priv->pdev->irq, &p54p_interrupt,
-			  IRQF_SHARED, "prism54pci", dev);
+			  IRQF_SHARED, "p54pci", dev);
 	if (err) {
 		printk(KERN_ERR "%s: failed to register IRQ handler\n",
 		       wiphy_name(dev->wiphy));
@@ -420,10 +361,19 @@
 	}
 
 	memset(priv->ring_control, 0, sizeof(*priv->ring_control));
-	priv->rx_idx = priv->tx_idx = 0;
-	p54p_refill_rx_ring(dev);
+	err = p54p_upload_firmware(dev);
+	if (err) {
+		free_irq(priv->pdev->irq, dev);
+		return err;
+	}
+	priv->rx_idx_data = priv->tx_idx_data = 0;
+	priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
 
-	p54p_upload_firmware(dev);
+	p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
+		ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
+
+	p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
+		ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
 
 	P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
 	P54P_READ(ring_control_base);
@@ -465,6 +415,8 @@
 	unsigned int i;
 	struct p54p_desc *desc;
 
+	tasklet_kill(&priv->rx_tasklet);
+
 	P54P_WRITE(int_enable, cpu_to_le32(0));
 	P54P_READ(int_enable);
 	udelay(10);
@@ -473,26 +425,53 @@
 
 	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
 
-	for (i = 0; i < ARRAY_SIZE(priv->rx_buf); i++) {
+	for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
 		desc = &ring_control->rx_data[i];
 		if (desc->host_addr)
-			pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
-					 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
-		kfree_skb(priv->rx_buf[i]);
-		priv->rx_buf[i] = NULL;
+			pci_unmap_single(priv->pdev,
+					 le32_to_cpu(desc->host_addr),
+					 priv->common.rx_mtu + 32,
+					 PCI_DMA_FROMDEVICE);
+		kfree_skb(priv->rx_buf_data[i]);
+		priv->rx_buf_data[i] = NULL;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(priv->tx_buf); i++) {
+	for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
+		desc = &ring_control->rx_mgmt[i];
+		if (desc->host_addr)
+			pci_unmap_single(priv->pdev,
+					 le32_to_cpu(desc->host_addr),
+					 priv->common.rx_mtu + 32,
+					 PCI_DMA_FROMDEVICE);
+		kfree_skb(priv->rx_buf_mgmt[i]);
+		priv->rx_buf_mgmt[i] = NULL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
 		desc = &ring_control->tx_data[i];
 		if (desc->host_addr)
-			pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
-					 le16_to_cpu(desc->len), PCI_DMA_TODEVICE);
+			pci_unmap_single(priv->pdev,
+					 le32_to_cpu(desc->host_addr),
+					 le16_to_cpu(desc->len),
+					 PCI_DMA_TODEVICE);
 
-		kfree(priv->tx_buf[i]);
-		priv->tx_buf[i] = NULL;
+		kfree(priv->tx_buf_data[i]);
+		priv->tx_buf_data[i] = NULL;
 	}
 
-	memset(ring_control, 0, sizeof(ring_control));
+	for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
+		desc = &ring_control->tx_mgmt[i];
+		if (desc->host_addr)
+			pci_unmap_single(priv->pdev,
+					 le32_to_cpu(desc->host_addr),
+					 le16_to_cpu(desc->len),
+					 PCI_DMA_TODEVICE);
+
+		kfree(priv->tx_buf_mgmt[i]);
+		priv->tx_buf_mgmt[i] = NULL;
+	}
+
+	memset(ring_control, 0, sizeof(*ring_control));
 }
 
 static int __devinit p54p_probe(struct pci_dev *pdev,
@@ -506,7 +485,7 @@
 
 	err = pci_enable_device(pdev);
 	if (err) {
-		printk(KERN_ERR "%s (prism54pci): Cannot enable new PCI device\n",
+		printk(KERN_ERR "%s (p54pci): Cannot enable new PCI device\n",
 		       pci_name(pdev));
 		return err;
 	}
@@ -514,22 +493,22 @@
 	mem_addr = pci_resource_start(pdev, 0);
 	mem_len = pci_resource_len(pdev, 0);
 	if (mem_len < sizeof(struct p54p_csr)) {
-		printk(KERN_ERR "%s (prism54pci): Too short PCI resources\n",
+		printk(KERN_ERR "%s (p54pci): Too short PCI resources\n",
 		       pci_name(pdev));
 		pci_disable_device(pdev);
 		return err;
 	}
 
-	err = pci_request_regions(pdev, "prism54pci");
+	err = pci_request_regions(pdev, "p54pci");
 	if (err) {
-		printk(KERN_ERR "%s (prism54pci): Cannot obtain PCI resources\n",
+		printk(KERN_ERR "%s (p54pci): Cannot obtain PCI resources\n",
 		       pci_name(pdev));
 		return err;
 	}
 
 	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
 	    pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
-		printk(KERN_ERR "%s (prism54pci): No suitable DMA available\n",
+		printk(KERN_ERR "%s (p54pci): No suitable DMA available\n",
 		       pci_name(pdev));
 		goto err_free_reg;
 	}
@@ -542,7 +521,7 @@
 
 	dev = p54_init_common(sizeof(*priv));
 	if (!dev) {
-		printk(KERN_ERR "%s (prism54pci): ieee80211 alloc failed\n",
+		printk(KERN_ERR "%s (p54pci): ieee80211 alloc failed\n",
 		       pci_name(pdev));
 		err = -ENOMEM;
 		goto err_free_reg;
@@ -556,7 +535,7 @@
 
 	priv->map = ioremap(mem_addr, mem_len);
 	if (!priv->map) {
-		printk(KERN_ERR "%s (prism54pci): Cannot map device memory\n",
+		printk(KERN_ERR "%s (p54pci): Cannot map device memory\n",
 		       pci_name(pdev));
 		err = -EINVAL;	// TODO: use a better error code?
 		goto err_free_dev;
@@ -565,39 +544,31 @@
 	priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
 						  &priv->ring_control_dma);
 	if (!priv->ring_control) {
-		printk(KERN_ERR "%s (prism54pci): Cannot allocate rings\n",
+		printk(KERN_ERR "%s (p54pci): Cannot allocate rings\n",
 		       pci_name(pdev));
 		err = -ENOMEM;
 		goto err_iounmap;
 	}
-	memset(priv->ring_control, 0, sizeof(*priv->ring_control));
-
-	err = p54p_upload_firmware(dev);
-	if (err)
-		goto err_free_desc;
-
-	err = p54p_read_eeprom(dev);
-	if (err)
-		goto err_free_desc;
-
 	priv->common.open = p54p_open;
 	priv->common.stop = p54p_stop;
 	priv->common.tx = p54p_tx;
 
 	spin_lock_init(&priv->lock);
+	tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev);
+
+	p54p_open(dev);
+	err = p54_read_eeprom(dev);
+	p54p_stop(dev);
+	if (err)
+		goto err_free_desc;
 
 	err = ieee80211_register_hw(dev);
 	if (err) {
-		printk(KERN_ERR "%s (prism54pci): Cannot register netdevice\n",
+		printk(KERN_ERR "%s (p54pci): Cannot register netdevice\n",
 		       pci_name(pdev));
 		goto err_free_common;
 	}
 
-	printk(KERN_INFO "%s: hwaddr %s, isl38%02x\n",
-	       wiphy_name(dev->wiphy),
-	       print_mac(mac, dev->wiphy->perm_addr),
-	       priv->common.version);
-
 	return 0;
 
  err_free_common:
@@ -645,7 +616,7 @@
 	struct ieee80211_hw *dev = pci_get_drvdata(pdev);
 	struct p54p_priv *priv = dev->priv;
 
-	if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) {
+	if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
 		ieee80211_stop_queues(dev);
 		p54p_stop(dev);
 	}
@@ -663,7 +634,7 @@
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
 
-	if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) {
+	if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
 		p54p_open(dev);
 		ieee80211_wake_queues(dev);
 	}
@@ -673,7 +644,7 @@
 #endif /* CONFIG_PM */
 
 static struct pci_driver p54p_driver = {
-	.name		= "prism54pci",
+	.name		= "p54pci",
 	.id_table	= p54p_table,
 	.probe		= p54p_probe,
 	.remove		= __devexit_p(p54p_remove),
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 5bedd7a..4a67780 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -1,5 +1,5 @@
-#ifndef PRISM54PCI_H
-#define PRISM54PCI_H
+#ifndef P54PCI_H
+#define P54PCI_H
 
 /*
  * Defines for PCI based mac80211 Prism54 driver
@@ -68,7 +68,7 @@
 } __attribute__ ((packed));
 
 /* usb backend only needs the register defines above */
-#ifndef PRISM54USB_H
+#ifndef P54USB_H
 struct p54p_desc {
 	__le32 host_addr;
 	__le32 device_addr;
@@ -92,15 +92,19 @@
 	struct p54_common common;
 	struct pci_dev *pdev;
 	struct p54p_csr __iomem *map;
+	struct tasklet_struct rx_tasklet;
 
 	spinlock_t lock;
 	struct p54p_ring_control *ring_control;
 	dma_addr_t ring_control_dma;
-	u32 rx_idx, tx_idx;
-	struct sk_buff *rx_buf[8];
-	void *tx_buf[32];
+	u32 rx_idx_data, tx_idx_data;
+	u32 rx_idx_mgmt, tx_idx_mgmt;
+	struct sk_buff *rx_buf_data[8];
+	struct sk_buff *rx_buf_mgmt[4];
+	void *tx_buf_data[32];
+	void *tx_buf_mgmt[4];
 	struct completion boot_comp;
 };
 
-#endif /* PRISM54USB_H */
-#endif /* PRISM54PCI_H */
+#endif /* P54USB_H */
+#endif /* P54PCI_H */
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index cbaca23..1912f5e 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -91,11 +91,16 @@
 
 	skb_unlink(skb, &priv->rx_queue);
 	skb_put(skb, urb->actual_length);
-	if (!priv->hw_type)
-		skb_pull(skb, sizeof(struct net2280_tx_hdr));
+
+	if (priv->hw_type == P54U_NET2280)
+		skb_pull(skb, priv->common.tx_hdr_len);
+	if (priv->common.fw_interface == FW_LM87) {
+		skb_pull(skb, 4);
+		skb_put(skb, 4);
+	}
 
 	if (p54_rx(dev, skb)) {
-		skb = dev_alloc_skb(MAX_RX_SIZE);
+		skb = dev_alloc_skb(priv->common.rx_mtu + 32);
 		if (unlikely(!skb)) {
 			usb_free_urb(urb);
 			/* TODO check rx queue length and refill *somewhere* */
@@ -109,9 +114,12 @@
 		urb->context = skb;
 		skb_queue_tail(&priv->rx_queue, skb);
 	} else {
-		if (!priv->hw_type)
-			skb_push(skb, sizeof(struct net2280_tx_hdr));
-
+		if (priv->hw_type == P54U_NET2280)
+			skb_push(skb, priv->common.tx_hdr_len);
+		if (priv->common.fw_interface == FW_LM87) {
+			skb_push(skb, 4);
+			skb_put(skb, 4);
+		}
 		skb_reset_tail_pointer(skb);
 		skb_trim(skb, 0);
 		if (urb->transfer_buffer != skb_tail_pointer(skb)) {
@@ -145,7 +153,7 @@
 	struct p54u_rx_info *info;
 
 	while (skb_queue_len(&priv->rx_queue) < 32) {
-		skb = __dev_alloc_skb(MAX_RX_SIZE, GFP_KERNEL);
+		skb = __dev_alloc_skb(priv->common.rx_mtu + 32, GFP_KERNEL);
 		if (!skb)
 			break;
 		entry = usb_alloc_urb(0, GFP_KERNEL);
@@ -153,7 +161,10 @@
 			kfree_skb(skb);
 			break;
 		}
-		usb_fill_bulk_urb(entry, priv->udev, usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), skb_tail_pointer(skb), MAX_RX_SIZE, p54u_rx_cb, skb);
+		usb_fill_bulk_urb(entry, priv->udev,
+				  usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA),
+				  skb_tail_pointer(skb),
+				  priv->common.rx_mtu + 32, p54u_rx_cb, skb);
 		info = (struct p54u_rx_info *) skb->cb;
 		info->urb = entry;
 		info->dev = dev;
@@ -207,6 +218,42 @@
 	usb_submit_urb(data_urb, GFP_ATOMIC);
 }
 
+static __le32 p54u_lm87_chksum(const u32 *data, size_t length)
+{
+	u32 chk = 0;
+
+	length >>= 2;
+	while (length--) {
+		chk ^= *data++;
+		chk = (chk >> 5) ^ (chk << 3);
+	}
+
+	return cpu_to_le32(chk);
+}
+
+static void p54u_tx_lm87(struct ieee80211_hw *dev,
+			 struct p54_control_hdr *data,
+			 size_t len, int free_on_tx)
+{
+	struct p54u_priv *priv = dev->priv;
+	struct urb *data_urb;
+	struct lm87_tx_hdr *hdr = (void *)data - sizeof(*hdr);
+
+	data_urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!data_urb)
+		return;
+
+	hdr->chksum = p54u_lm87_chksum((u32 *)data, len);
+	hdr->device_addr = data->req_id;
+
+	usb_fill_bulk_urb(data_urb, priv->udev,
+		usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr,
+		len + sizeof(*hdr), free_on_tx ? p54u_tx_free_cb : p54u_tx_cb,
+		dev);
+
+	usb_submit_urb(data_urb, GFP_ATOMIC);
+}
+
 static void p54u_tx_net2280(struct ieee80211_hw *dev, struct p54_control_hdr *data,
 			    size_t len, int free_on_tx)
 {
@@ -312,73 +359,6 @@
 			    data, len, &alen, 2000);
 }
 
-static int p54u_read_eeprom(struct ieee80211_hw *dev)
-{
-	struct p54u_priv *priv = dev->priv;
-	void *buf;
-	struct p54_control_hdr *hdr;
-	int err, alen;
-	size_t offset = priv->hw_type ? 0x10 : 0x20;
-
-	buf = kmalloc(0x2020, GFP_KERNEL);
-	if (!buf) {
-		printk(KERN_ERR "prism54usb: cannot allocate memory for "
-		       "eeprom readback!\n");
-		return -ENOMEM;
-	}
-
-	if (priv->hw_type) {
-		*((u32 *) buf) = priv->common.rx_start;
-		err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32));
-		if (err) {
-			printk(KERN_ERR "prism54usb: addr send failed\n");
-			goto fail;
-		}
-	} else {
-		struct net2280_reg_write *reg = buf;
-		reg->port = cpu_to_le16(NET2280_DEV_U32);
-		reg->addr = cpu_to_le32(P54U_DEV_BASE);
-		reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA);
-		err = p54u_bulk_msg(priv, P54U_PIPE_DEV, buf, sizeof(*reg));
-		if (err) {
-			printk(KERN_ERR "prism54usb: dev_int send failed\n");
-			goto fail;
-		}
-	}
-
-	hdr = buf + priv->common.tx_hdr_len;
-	p54_fill_eeprom_readback(hdr);
-	hdr->req_id = cpu_to_le32(priv->common.rx_start);
-	if (priv->common.tx_hdr_len) {
-		struct net2280_tx_hdr *tx_hdr = buf;
-		tx_hdr->device_addr = hdr->req_id;
-		tx_hdr->len = cpu_to_le16(EEPROM_READBACK_LEN);
-	}
-
-	/* we can just pretend to send 0x2000 bytes of nothing in the headers */
-	err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf,
-			    EEPROM_READBACK_LEN + priv->common.tx_hdr_len);
-	if (err) {
-		printk(KERN_ERR "prism54usb: eeprom req send failed\n");
-		goto fail;
-	}
-
-	err = usb_bulk_msg(priv->udev,
-			   usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA),
-			   buf, 0x2020, &alen, 1000);
-	if (!err && alen > offset) {
-		p54_parse_eeprom(dev, (u8 *)buf + offset, alen - offset);
-	} else {
-		printk(KERN_ERR "prism54usb: eeprom read failed!\n");
-		err = -EINVAL;
-		goto fail;
-	}
-
- fail:
-	kfree(buf);
-	return err;
-}
-
 static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
 {
 	static char start_string[] = "~~~~<\r";
@@ -412,7 +392,9 @@
 		goto err_req_fw_failed;
 	}
 
-	p54_parse_firmware(dev, fw_entry);
+	err = p54_parse_firmware(dev, fw_entry);
+	if (err)
+		goto err_upload_failed;
 
 	left = block_size = min((size_t)P54U_FW_BLOCK, fw_entry->size);
 	strcpy(buf, start_string);
@@ -458,7 +440,7 @@
 
 		err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_size);
 		if (err) {
-			printk(KERN_ERR "prism54usb: firmware upload failed!\n");
+			printk(KERN_ERR "p54usb: firmware upload failed!\n");
 			goto err_upload_failed;
 		}
 
@@ -469,7 +451,7 @@
 	*((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, fw_entry->data, fw_entry->size));
 	err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32));
 	if (err) {
-		printk(KERN_ERR "prism54usb: firmware upload failed!\n");
+		printk(KERN_ERR "p54usb: firmware upload failed!\n");
 		goto err_upload_failed;
 	}
 
@@ -480,13 +462,13 @@
 			break;
 
 		if (alen > 5 && !memcmp(buf, "ERROR", 5)) {
-			printk(KERN_INFO "prism54usb: firmware upload failed!\n");
+			printk(KERN_INFO "p54usb: firmware upload failed!\n");
 			err = -EINVAL;
 			break;
 		}
 
 		if (time_after(jiffies, timeout)) {
-			printk(KERN_ERR "prism54usb: firmware boot timed out!\n");
+			printk(KERN_ERR "p54usb: firmware boot timed out!\n");
 			err = -ETIMEDOUT;
 			break;
 		}
@@ -498,7 +480,7 @@
 	buf[1] = '\r';
 	err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 2);
 	if (err) {
-		printk(KERN_ERR "prism54usb: firmware boot failed!\n");
+		printk(KERN_ERR "p54usb: firmware boot failed!\n");
 		goto err_upload_failed;
 	}
 
@@ -549,7 +531,12 @@
 		return err;
 	}
 
-	p54_parse_firmware(dev, fw_entry);
+	err = p54_parse_firmware(dev, fw_entry);
+	if (err) {
+		kfree(buf);
+		release_firmware(fw_entry);
+		return err;
+	}
 
 #define P54U_WRITE(type, addr, data) \
 	do {\
@@ -660,7 +647,7 @@
 
 		err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_len);
 		if (err) {
-			printk(KERN_ERR "prism54usb: firmware block upload "
+			printk(KERN_ERR "p54usb: firmware block upload "
 			       "failed\n");
 			goto fail;
 		}
@@ -694,7 +681,7 @@
 			  0x002C | (unsigned long)&devreg->direct_mem_win);
 		if (!(reg & cpu_to_le32(ISL38XX_DMA_STATUS_DONE)) ||
 		    !(reg & cpu_to_le32(ISL38XX_DMA_STATUS_READY))) {
-			printk(KERN_ERR "prism54usb: firmware DMA transfer "
+			printk(KERN_ERR "p54usb: firmware DMA transfer "
 			       "failed\n");
 			goto fail;
 		}
@@ -802,7 +789,7 @@
 
 	dev = p54_init_common(sizeof(*priv));
 	if (!dev) {
-		printk(KERN_ERR "prism54usb: ieee80211 alloc failed\n");
+		printk(KERN_ERR "p54usb: ieee80211 alloc failed\n");
 		return -ENOMEM;
 	}
 
@@ -833,49 +820,40 @@
 		}
 	}
 	priv->common.open = p54u_open;
-
+	priv->common.stop = p54u_stop;
 	if (recognized_pipes < P54U_PIPE_NUMBER) {
 		priv->hw_type = P54U_3887;
-		priv->common.tx = p54u_tx_3887;
+		err = p54u_upload_firmware_3887(dev);
+		if (priv->common.fw_interface == FW_LM87) {
+			dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr);
+			priv->common.tx_hdr_len = sizeof(struct lm87_tx_hdr);
+			priv->common.tx = p54u_tx_lm87;
+		} else
+			priv->common.tx = p54u_tx_3887;
 	} else {
+		priv->hw_type = P54U_NET2280;
 		dev->extra_tx_headroom += sizeof(struct net2280_tx_hdr);
 		priv->common.tx_hdr_len = sizeof(struct net2280_tx_hdr);
 		priv->common.tx = p54u_tx_net2280;
-	}
-	priv->common.stop = p54u_stop;
-
-	if (priv->hw_type)
-		err = p54u_upload_firmware_3887(dev);
-	else
 		err = p54u_upload_firmware_net2280(dev);
-	if (err)
-		goto err_free_dev;
-
-	err = p54u_read_eeprom(dev);
-	if (err)
-		goto err_free_dev;
-
-	if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
-		u8 perm_addr[ETH_ALEN];
-
-		printk(KERN_WARNING "prism54usb: Invalid hwaddr! Using randomly generated MAC addr\n");
-		random_ether_addr(perm_addr);
-		SET_IEEE80211_PERM_ADDR(dev, perm_addr);
 	}
+	if (err)
+		goto err_free_dev;
 
 	skb_queue_head_init(&priv->rx_queue);
 
+	p54u_open(dev);
+	err = p54_read_eeprom(dev);
+	p54u_stop(dev);
+	if (err)
+		goto err_free_dev;
+
 	err = ieee80211_register_hw(dev);
 	if (err) {
-		printk(KERN_ERR "prism54usb: Cannot register netdevice\n");
+		printk(KERN_ERR "p54usb: Cannot register netdevice\n");
 		goto err_free_dev;
 	}
 
-	printk(KERN_INFO "%s: hwaddr %s, isl38%02x\n",
-	       wiphy_name(dev->wiphy),
-	       print_mac(mac, dev->wiphy->perm_addr),
-	       priv->common.version);
-
 	return 0;
 
  err_free_dev:
@@ -902,7 +880,7 @@
 }
 
 static struct usb_driver p54u_driver = {
-	.name	= "prism54usb",
+	.name	= "p54usb",
 	.id_table = p54u_table,
 	.probe = p54u_probe,
 	.disconnect = p54u_disconnect,
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index d1896b3..5b8fe91 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -1,5 +1,5 @@
-#ifndef PRISM54USB_H
-#define PRISM54USB_H
+#ifndef P54USB_H
+#define P54USB_H
 
 /*
  * Defines for USB based mac80211 Prism54 driver
@@ -72,6 +72,11 @@
 	u8 padding[8];
 } __attribute__((packed));
 
+struct lm87_tx_hdr {
+	__le32 device_addr;
+	__le32 chksum;
+} __attribute__((packed));
+
 /* Some flags for the isl hardware registers controlling DMA inside the
  * chip */
 #define ISL38XX_DMA_STATUS_DONE			0x00000001
@@ -130,4 +135,4 @@
 	struct sk_buff_head rx_queue;
 };
 
-#endif /* PRISM54USB_H */
+#endif /* P54USB_H */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 3d75a71..16e68f4 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -71,7 +71,7 @@
 	if (iw_mode == IW_MODE_REPEAT || iw_mode == IW_MODE_SECOND) {
 		printk(KERN_DEBUG
 		       "%s(): Sorry, Repeater mode and Secondary mode "
-		       "are not yet supported by this driver.\n", __FUNCTION__);
+		       "are not yet supported by this driver.\n", __func__);
 		return -EINVAL;
 	}
 
@@ -333,7 +333,7 @@
 	if (*uwrq > IW_MODE_MONITOR || *uwrq < IW_MODE_AUTO) {
 		printk(KERN_DEBUG
 		       "%s: %s() You passed a non-valid init_mode.\n",
-		       priv->ndev->name, __FUNCTION__);
+		       priv->ndev->name, __func__);
 		return -EINVAL;
 	}
 
@@ -1234,7 +1234,7 @@
 		/* don't know how to disable radio */
 		printk(KERN_DEBUG
 		       "%s: %s() disabling radio is not yet supported.\n",
-		       priv->ndev->name, __FUNCTION__);
+		       priv->ndev->name, __func__);
 		return -ENOTSUPP;
 	} else if (vwrq->fixed)
 		/* currently only fixed value is supported */
@@ -1242,7 +1242,7 @@
 	else {
 		printk(KERN_DEBUG
 		       "%s: %s() auto power will be implemented later.\n",
-		       priv->ndev->name, __FUNCTION__);
+		       priv->ndev->name, __func__);
 		return -ENOTSUPP;
 	}
 }
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 963960d..44da0d1 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -325,7 +325,7 @@
     p_dev->io.IOAddrLines = 5;
 
     /* Interrupt setup. For PCMCIA, driver takes what's given */
-    p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+    p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
     p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
     p_dev->irq.Handler = &ray_interrupt;
 
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 00e965b..2b41489 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1627,7 +1627,6 @@
 static int rndis_iw_set_scan(struct net_device *dev,
     struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
 {
-	struct iw_param *param = &wrqu->param;
 	struct usbnet *usbdev = dev->priv;
 	union iwreq_data evt;
 	int ret = -EINVAL;
@@ -1635,7 +1634,7 @@
 
 	devdbg(usbdev, "SIOCSIWSCAN");
 
-	if (param->flags == 0) {
+	if (wrqu->data.flags == 0) {
 		tmp = ccpu2(1);
 		ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
 								sizeof(tmp));
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index d485a86..f839ce0 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,5 +1,5 @@
-config RT2X00
-	tristate "Ralink driver support"
+menuconfig RT2X00
+	bool "Ralink driver support"
 	depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
 	---help---
 	  This will enable the experimental support for the Ralink drivers,
@@ -17,31 +17,6 @@
 
 if RT2X00
 
-config RT2X00_LIB
-	tristate
-
-config RT2X00_LIB_PCI
-	tristate
-	select RT2X00_LIB
-
-config RT2X00_LIB_USB
-	tristate
-	select RT2X00_LIB
-
-config RT2X00_LIB_FIRMWARE
-	boolean
-	depends on RT2X00_LIB
-	select FW_LOADER
-
-config RT2X00_LIB_RFKILL
-	boolean
-	depends on RT2X00_LIB
-	select RFKILL
-
-config RT2X00_LIB_LEDS
-	boolean
-	depends on RT2X00_LIB && NEW_LEDS
-
 config RT2400PCI
 	tristate "Ralink rt2400 (PCI/PCMCIA) support"
 	depends on PCI
@@ -53,23 +28,6 @@
 
 	  When compiled as a module, this driver will be called "rt2400pci.ko".
 
-config RT2400PCI_RFKILL
-	bool "Ralink rt2400 rfkill support"
-	depends on RT2400PCI
-	select RT2X00_LIB_RFKILL
-	---help---
-	  This adds support for integrated rt2400 hardware that features a
-	  hardware button to control the radio state.
-	  This feature depends on the RF switch subsystem rfkill.
-
-config RT2400PCI_LEDS
-	bool "Ralink rt2400 leds support"
-	depends on RT2400PCI && NEW_LEDS
-	select LEDS_CLASS
-	select RT2X00_LIB_LEDS
-	---help---
-	  This adds support for led triggers provided my mac80211.
-
 config RT2500PCI
 	tristate "Ralink rt2500 (PCI/PCMCIA) support"
 	depends on PCI
@@ -81,28 +39,12 @@
 
 	  When compiled as a module, this driver will be called "rt2500pci.ko".
 
-config RT2500PCI_RFKILL
-	bool "Ralink rt2500 rfkill support"
-	depends on RT2500PCI
-	select RT2X00_LIB_RFKILL
-	---help---
-	  This adds support for integrated rt2500 hardware that features a
-	  hardware button to control the radio state.
-	  This feature depends on the RF switch subsystem rfkill.
-
-config RT2500PCI_LEDS
-	bool "Ralink rt2500 leds support"
-	depends on RT2500PCI && NEW_LEDS
-	select LEDS_CLASS
-	select RT2X00_LIB_LEDS
-	---help---
-	  This adds support for led triggers provided my mac80211.
-
 config RT61PCI
 	tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
 	depends on PCI
 	select RT2X00_LIB_PCI
 	select RT2X00_LIB_FIRMWARE
+	select RT2X00_LIB_CRYPTO
 	select CRC_ITU_T
 	select EEPROM_93CX6
 	---help---
@@ -111,23 +53,6 @@
 
 	  When compiled as a module, this driver will be called "rt61pci.ko".
 
-config RT61PCI_RFKILL
-	bool "Ralink rt2501/rt61 rfkill support"
-	depends on RT61PCI
-	select RT2X00_LIB_RFKILL
-	---help---
-	  This adds support for integrated rt61 hardware that features a
-	  hardware button to control the radio state.
-	  This feature depends on the RF switch subsystem rfkill.
-
-config RT61PCI_LEDS
-	bool "Ralink rt2501/rt61 leds support"
-	depends on RT61PCI && NEW_LEDS
-	select LEDS_CLASS
-	select RT2X00_LIB_LEDS
-	---help---
-	  This adds support for led triggers provided my mac80211.
-
 config RT2500USB
 	tristate "Ralink rt2500 (USB) support"
 	depends on USB
@@ -138,19 +63,12 @@
 
 	  When compiled as a module, this driver will be called "rt2500usb.ko".
 
-config RT2500USB_LEDS
-	bool "Ralink rt2500 leds support"
-	depends on RT2500USB && NEW_LEDS
-	select LEDS_CLASS
-	select RT2X00_LIB_LEDS
-	---help---
-	  This adds support for led triggers provided my mac80211.
-
 config RT73USB
 	tristate "Ralink rt2501/rt73 (USB) support"
 	depends on USB
 	select RT2X00_LIB_USB
 	select RT2X00_LIB_FIRMWARE
+	select RT2X00_LIB_CRYPTO
 	select CRC_ITU_T
 	---help---
 	  This adds support for rt2501 wireless chipset family.
@@ -158,13 +76,37 @@
 
 	  When compiled as a module, this driver will be called "rt73usb.ko".
 
-config RT73USB_LEDS
-	bool "Ralink rt2501/rt73 leds support"
-	depends on RT73USB && NEW_LEDS
-	select LEDS_CLASS
-	select RT2X00_LIB_LEDS
-	---help---
-	  This adds support for led triggers provided my mac80211.
+config RT2X00_LIB_PCI
+	tristate
+	select RT2X00_LIB
+
+config RT2X00_LIB_USB
+	tristate
+	select RT2X00_LIB
+
+config RT2X00_LIB
+	tristate
+
+config RT2X00_LIB_FIRMWARE
+	boolean
+	select FW_LOADER
+
+config RT2X00_LIB_CRYPTO
+	boolean
+
+config RT2X00_LIB_RFKILL
+	boolean
+	default y if (RT2X00_LIB=y && RFKILL=y) || (RT2X00_LIB=m && RFKILL!=n)
+
+comment "rt2x00 rfkill support disabled due to modularized RFKILL and built-in rt2x00"
+	depends on RT2X00_LIB=y && RFKILL=m
+
+config RT2X00_LIB_LEDS
+	boolean
+	default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
+
+comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
+	depends on RT2X00_LIB=y && LEDS_CLASS=m
 
 config RT2X00_LIB_DEBUGFS
 	bool "Ralink debugfs support"
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 1087dbc..917cb4f 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -3,6 +3,7 @@
 rt2x00lib-y				+= rt2x00config.o
 rt2x00lib-y				+= rt2x00queue.o
 rt2x00lib-$(CONFIG_RT2X00_LIB_DEBUGFS)	+= rt2x00debug.o
+rt2x00lib-$(CONFIG_RT2X00_LIB_CRYPTO)	+= rt2x00crypto.o
 rt2x00lib-$(CONFIG_RT2X00_LIB_RFKILL)	+= rt2x00rfkill.o
 rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE)	+= rt2x00firmware.o
 rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS)	+= rt2x00leds.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 4c0538d..08cb9ee 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -231,7 +231,7 @@
 };
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
 
-#ifdef CONFIG_RT2400PCI_RFKILL
+#ifdef CONFIG_RT2X00_LIB_RFKILL
 static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
 {
 	u32 reg;
@@ -241,9 +241,9 @@
 }
 #else
 #define rt2400pci_rfkill_poll	NULL
-#endif /* CONFIG_RT2400PCI_RFKILL */
+#endif /* CONFIG_RT2X00_LIB_RFKILL */
 
-#ifdef CONFIG_RT2400PCI_LEDS
+#ifdef CONFIG_RT2X00_LIB_LEDS
 static void rt2400pci_brightness_set(struct led_classdev *led_cdev,
 				     enum led_brightness brightness)
 {
@@ -288,7 +288,7 @@
 	led->led_dev.blink_set = rt2400pci_blink_set;
 	led->flags = LED_INITIALIZED;
 }
-#endif /* CONFIG_RT2400PCI_LEDS */
+#endif /* CONFIG_RT2X00_LIB_LEDS */
 
 /*
  * Configuration handlers.
@@ -1241,7 +1241,7 @@
 	if (!reg)
 		return IRQ_NONE;
 
-	if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 		return IRQ_HANDLED;
 
 	/*
@@ -1374,22 +1374,22 @@
 	/*
 	 * Store led mode, for correct led behaviour.
 	 */
-#ifdef CONFIG_RT2400PCI_LEDS
+#ifdef CONFIG_RT2X00_LIB_LEDS
 	value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
 
 	rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
 	if (value == LED_MODE_TXRX_ACTIVITY)
 		rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_qual,
 				   LED_TYPE_ACTIVITY);
-#endif /* CONFIG_RT2400PCI_LEDS */
+#endif /* CONFIG_RT2X00_LIB_LEDS */
 
 	/*
 	 * Detect if this device has an hardware controlled radio.
 	 */
-#ifdef CONFIG_RT2400PCI_RFKILL
+#ifdef CONFIG_RT2X00_LIB_RFKILL
 	if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
 		__set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
-#endif /* CONFIG_RT2400PCI_RFKILL */
+#endif /* CONFIG_RT2X00_LIB_RFKILL */
 
 	/*
 	 * Check if the BBP tuning should be enabled.
@@ -1404,7 +1404,7 @@
  * RF value list for RF2420 & RF2421
  * Supports: 2.4 GHz
  */
-static const struct rf_channel rf_vals_bg[] = {
+static const struct rf_channel rf_vals_b[] = {
 	{ 1,  0x00022058, 0x000c1fda, 0x00000101, 0 },
 	{ 2,  0x00022058, 0x000c1fee, 0x00000101, 0 },
 	{ 3,  0x00022058, 0x000c2002, 0x00000101, 0 },
@@ -1421,10 +1421,11 @@
 	{ 14, 0x00022058, 0x000c20fa, 0x00000101, 0 },
 };
 
-static void rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 {
 	struct hw_mode_spec *spec = &rt2x00dev->spec;
-	u8 *txpower;
+	struct channel_info *info;
+	char *tx_power;
 	unsigned int i;
 
 	/*
@@ -1440,23 +1441,28 @@
 						   EEPROM_MAC_ADDR_0));
 
 	/*
-	 * Convert tx_power array in eeprom.
-	 */
-	txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
-	for (i = 0; i < 14; i++)
-		txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
-
-	/*
 	 * Initialize hw_mode information.
 	 */
 	spec->supported_bands = SUPPORT_BAND_2GHZ;
 	spec->supported_rates = SUPPORT_RATE_CCK;
-	spec->tx_power_a = NULL;
-	spec->tx_power_bg = txpower;
-	spec->tx_power_default = DEFAULT_TXPOWER;
 
-	spec->num_channels = ARRAY_SIZE(rf_vals_bg);
-	spec->channels = rf_vals_bg;
+	spec->num_channels = ARRAY_SIZE(rf_vals_b);
+	spec->channels = rf_vals_b;
+
+	/*
+	 * Create channel information array
+	 */
+	info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	spec->channels_info = info;
+
+	tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
+	for (i = 0; i < 14; i++)
+		info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+
+	return 0;
 }
 
 static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1477,7 +1483,9 @@
 	/*
 	 * Initialize hw specifications.
 	 */
-	rt2400pci_probe_hw_mode(rt2x00dev);
+	retval = rt2400pci_probe_hw_mode(rt2x00dev);
+	if (retval)
+		return retval;
 
 	/*
 	 * This device requires the atim queue and DMA-mapped skbs.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index bc55642..bbff381 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -938,19 +938,13 @@
 #define MAX_TXPOWER	62
 #define DEFAULT_TXPOWER	39
 
-#define TXPOWER_FROM_DEV(__txpower)					\
-({									\
-	((__txpower) > MAX_TXPOWER) ? DEFAULT_TXPOWER - MIN_TXPOWER :	\
-	((__txpower) < MIN_TXPOWER) ? DEFAULT_TXPOWER - MIN_TXPOWER :	\
-	(((__txpower) - MAX_TXPOWER) + MIN_TXPOWER);			\
-})
+#define __CLAMP_TX(__txpower) \
+	clamp_t(char, (__txpower), MIN_TXPOWER, MAX_TXPOWER)
 
-#define TXPOWER_TO_DEV(__txpower)			\
-({							\
-	(__txpower) += MIN_TXPOWER;			\
-	((__txpower) <= MIN_TXPOWER) ? MAX_TXPOWER :	\
-	(((__txpower) >= MAX_TXPOWER) ? MIN_TXPOWER :	\
-	(MAX_TXPOWER - ((__txpower) - MIN_TXPOWER)));	\
-})
+#define TXPOWER_FROM_DEV(__txpower) \
+	((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER)
+
+#define TXPOWER_TO_DEV(__txpower) \
+	MAX_TXPOWER - (__CLAMP_TX(__txpower) - MIN_TXPOWER)
 
 #endif /* RT2400PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 181a146..ef42cc0 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -231,7 +231,7 @@
 };
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
 
-#ifdef CONFIG_RT2500PCI_RFKILL
+#ifdef CONFIG_RT2X00_LIB_RFKILL
 static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
 {
 	u32 reg;
@@ -241,9 +241,9 @@
 }
 #else
 #define rt2500pci_rfkill_poll	NULL
-#endif /* CONFIG_RT2500PCI_RFKILL */
+#endif /* CONFIG_RT2X00_LIB_RFKILL */
 
-#ifdef CONFIG_RT2500PCI_LEDS
+#ifdef CONFIG_RT2X00_LIB_LEDS
 static void rt2500pci_brightness_set(struct led_classdev *led_cdev,
 				     enum led_brightness brightness)
 {
@@ -288,7 +288,7 @@
 	led->led_dev.blink_set = rt2500pci_blink_set;
 	led->flags = LED_INITIALIZED;
 }
-#endif /* CONFIG_RT2500PCI_LEDS */
+#endif /* CONFIG_RT2X00_LIB_LEDS */
 
 /*
  * Configuration handlers.
@@ -1316,6 +1316,8 @@
 
 	if (rt2x00_get_field32(word0, RXD_W0_OFDM))
 		rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
+	else
+		rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
 	if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
 		rxdesc->dev_flags |= RXDONE_MY_BSS;
 }
@@ -1377,7 +1379,7 @@
 	if (!reg)
 		return IRQ_NONE;
 
-	if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 		return IRQ_HANDLED;
 
 	/*
@@ -1531,22 +1533,22 @@
 	/*
 	 * Store led mode, for correct led behaviour.
 	 */
-#ifdef CONFIG_RT2500PCI_LEDS
+#ifdef CONFIG_RT2X00_LIB_LEDS
 	value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
 
 	rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
 	if (value == LED_MODE_TXRX_ACTIVITY)
 		rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_qual,
 				   LED_TYPE_ACTIVITY);
-#endif /* CONFIG_RT2500PCI_LEDS */
+#endif /* CONFIG_RT2X00_LIB_LEDS */
 
 	/*
 	 * Detect if this device has an hardware controlled radio.
 	 */
-#ifdef CONFIG_RT2500PCI_RFKILL
+#ifdef CONFIG_RT2X00_LIB_RFKILL
 	if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
 		__set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
-#endif /* CONFIG_RT2500PCI_RFKILL */
+#endif /* CONFIG_RT2X00_LIB_RFKILL */
 
 	/*
 	 * Check if the BBP tuning should be enabled.
@@ -1721,10 +1723,11 @@
 	{ 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 },
 };
 
-static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 {
 	struct hw_mode_spec *spec = &rt2x00dev->spec;
-	u8 *txpower;
+	struct channel_info *info;
+	char *tx_power;
 	unsigned int i;
 
 	/*
@@ -1741,20 +1744,10 @@
 						   EEPROM_MAC_ADDR_0));
 
 	/*
-	 * Convert tx_power array in eeprom.
-	 */
-	txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
-	for (i = 0; i < 14; i++)
-		txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
-
-	/*
 	 * Initialize hw_mode information.
 	 */
 	spec->supported_bands = SUPPORT_BAND_2GHZ;
 	spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
-	spec->tx_power_a = NULL;
-	spec->tx_power_bg = txpower;
-	spec->tx_power_default = DEFAULT_TXPOWER;
 
 	if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
 		spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
@@ -1776,6 +1769,26 @@
 		spec->num_channels = ARRAY_SIZE(rf_vals_5222);
 		spec->channels = rf_vals_5222;
 	}
+
+	/*
+	 * Create channel information array
+	 */
+	info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	spec->channels_info = info;
+
+	tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
+	for (i = 0; i < 14; i++)
+		info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+
+	if (spec->num_channels > 14) {
+		for (i = 14; i < spec->num_channels; i++)
+			info[i].tx_power1 = DEFAULT_TXPOWER;
+	}
+
+	return 0;
 }
 
 static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1796,7 +1809,9 @@
 	/*
 	 * Initialize hw specifications.
 	 */
-	rt2500pci_probe_hw_mode(rt2x00dev);
+	retval = rt2500pci_probe_hw_mode(rt2x00dev);
+	if (retval)
+		return retval;
 
 	/*
 	 * This device requires the atim queue and DMA-mapped skbs.
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 42f3769..8c26bef 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -1223,17 +1223,10 @@
 #define MAX_TXPOWER	31
 #define DEFAULT_TXPOWER	24
 
-#define TXPOWER_FROM_DEV(__txpower)		\
-({						\
-	((__txpower) > MAX_TXPOWER) ?		\
-		DEFAULT_TXPOWER : (__txpower);	\
-})
+#define TXPOWER_FROM_DEV(__txpower) \
+	(((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
 
-#define TXPOWER_TO_DEV(__txpower)			\
-({							\
-	((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER :	\
-	(((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER :	\
-	(__txpower));					\
-})
+#define TXPOWER_TO_DEV(__txpower) \
+	clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
 
 #endif /* RT2500PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index cd5af65..d3bf7bb 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -288,7 +288,7 @@
 };
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
 
-#ifdef CONFIG_RT2500USB_LEDS
+#ifdef CONFIG_RT2X00_LIB_LEDS
 static void rt2500usb_brightness_set(struct led_classdev *led_cdev,
 				     enum led_brightness brightness)
 {
@@ -333,7 +333,7 @@
 	led->led_dev.blink_set = rt2500usb_blink_set;
 	led->flags = LED_INITIALIZED;
 }
-#endif /* CONFIG_RT2500USB_LEDS */
+#endif /* CONFIG_RT2X00_LIB_LEDS */
 
 /*
  * Configuration handlers.
@@ -384,7 +384,7 @@
 		rt2500usb_register_read(rt2x00dev, TXRX_CSR20, &reg);
 		rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, bcn_preload >> 6);
 		rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW,
-				   2 * (conf->type != IEEE80211_IF_TYPE_STA));
+				   2 * (conf->type != NL80211_IFTYPE_STATION));
 		rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg);
 
 		/*
@@ -1114,8 +1114,7 @@
 	rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
 			   test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
 	rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
-	rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT,
-			   skb->len - skbdesc->desc_len);
+	rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
 	rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE);
 	rt2x00_desc_write(txd, 0, word);
 }
@@ -1134,7 +1133,6 @@
 	int pipe = usb_sndbulkpipe(usb_dev, 1);
 	int length;
 	u16 reg;
-	u32 word, len;
 
 	/*
 	 * Add the descriptor in front of the skb.
@@ -1144,17 +1142,6 @@
 	skbdesc->desc = entry->skb->data;
 
 	/*
-	 * Adjust the beacon databyte count. The current number is
-	 * calculated before this function gets called, but falsely
-	 * assumes that the descriptor was already present in the SKB.
-	 */
-	rt2x00_desc_read(skbdesc->desc, 0, &word);
-	len  = rt2x00_get_field32(word, TXD_W0_DATABYTE_COUNT);
-	len += skbdesc->desc_len;
-	rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, len);
-	rt2x00_desc_write(skbdesc->desc, 0, word);
-
-	/*
 	 * Disable beaconing while we are reloading the beacon data,
 	 * otherwise we might be sending out invalid data.
 	 */
@@ -1280,6 +1267,8 @@
 
 	if (rt2x00_get_field32(word0, RXD_W0_OFDM))
 		rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
+	else
+		rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
 	if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
 		rxdesc->dev_flags |= RXDONE_MY_BSS;
 
@@ -1297,7 +1286,7 @@
 	struct queue_entry *entry = (struct queue_entry *)urb->context;
 	struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
 
-	if (!test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags))
 		return;
 
 	/*
@@ -1484,14 +1473,14 @@
 	/*
 	 * Store led mode, for correct led behaviour.
 	 */
-#ifdef CONFIG_RT2500USB_LEDS
+#ifdef CONFIG_RT2X00_LIB_LEDS
 	value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
 
 	rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
 	if (value == LED_MODE_TXRX_ACTIVITY)
 		rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual,
 				   LED_TYPE_ACTIVITY);
-#endif /* CONFIG_RT2500USB_LEDS */
+#endif /* CONFIG_RT2X00_LIB_LEDS */
 
 	/*
 	 * Check if the BBP tuning should be disabled.
@@ -1665,10 +1654,11 @@
 	{ 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 },
 };
 
-static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 {
 	struct hw_mode_spec *spec = &rt2x00dev->spec;
-	u8 *txpower;
+	struct channel_info *info;
+	char *tx_power;
 	unsigned int i;
 
 	/*
@@ -1687,20 +1677,10 @@
 						   EEPROM_MAC_ADDR_0));
 
 	/*
-	 * Convert tx_power array in eeprom.
-	 */
-	txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
-	for (i = 0; i < 14; i++)
-		txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
-
-	/*
 	 * Initialize hw_mode information.
 	 */
 	spec->supported_bands = SUPPORT_BAND_2GHZ;
 	spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
-	spec->tx_power_a = NULL;
-	spec->tx_power_bg = txpower;
-	spec->tx_power_default = DEFAULT_TXPOWER;
 
 	if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
 		spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
@@ -1722,6 +1702,26 @@
 		spec->num_channels = ARRAY_SIZE(rf_vals_5222);
 		spec->channels = rf_vals_5222;
 	}
+
+	/*
+	 * Create channel information array
+	 */
+	info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	spec->channels_info = info;
+
+	tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
+	for (i = 0; i < 14; i++)
+		info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+
+	if (spec->num_channels > 14) {
+		for (i = 14; i < spec->num_channels; i++)
+			info[i].tx_power1 = DEFAULT_TXPOWER;
+	}
+
+	return 0;
 }
 
 static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1742,7 +1742,9 @@
 	/*
 	 * Initialize hw specifications.
 	 */
-	rt2500usb_probe_hw_mode(rt2x00dev);
+	retval = rt2500usb_probe_hw_mode(rt2x00dev);
+	if (retval)
+		return retval;
 
 	/*
 	 * This device requires the atim queue
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index 4769ffe..89e5ed2 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -825,17 +825,10 @@
 #define MAX_TXPOWER	31
 #define DEFAULT_TXPOWER	24
 
-#define TXPOWER_FROM_DEV(__txpower)		\
-({						\
-	((__txpower) > MAX_TXPOWER) ?		\
-		DEFAULT_TXPOWER : (__txpower);	\
-})
+#define TXPOWER_FROM_DEV(__txpower) \
+	(((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
 
-#define TXPOWER_TO_DEV(__txpower)			\
-({							\
-	((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER :	\
-	(((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER :	\
-	(__txpower));					\
-})
+#define TXPOWER_TO_DEV(__txpower) \
+	clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
 
 #endif /* RT2500USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 8b10ea4..1359a37 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -44,7 +44,7 @@
 /*
  * Module information.
  */
-#define DRV_VERSION	"2.1.8"
+#define DRV_VERSION	"2.2.1"
 #define DRV_PROJECT	"http://rt2x00.serialmonkey.com"
 
 /*
@@ -53,11 +53,11 @@
  */
 #define DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, __args...)	\
 	printk(__kernlvl "%s -> %s: %s - " __msg,			\
-	       wiphy_name((__dev)->hw->wiphy), __FUNCTION__, __lvl, ##__args)
+	       wiphy_name((__dev)->hw->wiphy), __func__, __lvl, ##__args)
 
 #define DEBUG_PRINTK_PROBE(__kernlvl, __lvl, __msg, __args...)	\
 	printk(__kernlvl "%s -> %s: %s - " __msg,		\
-	       KBUILD_MODNAME, __FUNCTION__, __lvl, ##__args)
+	       KBUILD_MODNAME, __func__, __lvl, ##__args)
 
 #ifdef CONFIG_RT2X00_DEBUG
 #define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...)	\
@@ -144,6 +144,17 @@
 };
 
 /*
+ * Channel information structure
+ */
+struct channel_info {
+	unsigned int flags;
+#define GEOGRAPHY_ALLOWED	0x00000001
+
+	short tx_power1;
+	short tx_power2;
+};
+
+/*
  * Antenna setup values.
  */
 struct antenna_setup {
@@ -394,10 +405,7 @@
  * @num_channels: Number of supported channels. This is used as array size
  *	for @tx_power_a, @tx_power_bg and @channels.
  * @channels: Device/chipset specific channel values (See &struct rf_channel).
- * @tx_power_a: TX power values for all 5.2GHz channels (may be NULL).
- * @tx_power_bg: TX power values for all 2.4GHz channels (may be NULL).
- * @tx_power_default: Default TX power value to use when either
- *	@tx_power_a or @tx_power_bg is missing.
+ * @channels_info: Additional information for channels (See &struct channel_info).
  */
 struct hw_mode_spec {
 	unsigned int supported_bands;
@@ -410,10 +418,7 @@
 
 	unsigned int num_channels;
 	const struct rf_channel *channels;
-
-	const u8 *tx_power_a;
-	const u8 *tx_power_bg;
-	u8 tx_power_default;
+	const struct channel_info *channels_info;
 };
 
 /*
@@ -425,7 +430,9 @@
  */
 struct rt2x00lib_conf {
 	struct ieee80211_conf *conf;
+
 	struct rf_channel rf;
+	struct channel_info channel;
 
 	struct antenna_setup ant;
 
@@ -452,6 +459,23 @@
 };
 
 /*
+ * Configuration structure for hardware encryption.
+ */
+struct rt2x00lib_crypto {
+	enum cipher cipher;
+
+	enum set_key_cmd cmd;
+	const u8 *address;
+
+	u32 bssidx;
+	u32 aid;
+
+	u8 key[16];
+	u8 tx_mic[8];
+	u8 rx_mic[8];
+};
+
+/*
  * Configuration structure wrapper around the
  * rt2x00 interface configuration handler.
  */
@@ -459,7 +483,7 @@
 	/*
 	 * Interface type
 	 */
-	enum ieee80211_if_types type;
+	enum nl80211_iftype type;
 
 	/*
 	 * TSF sync value, this is dependant on the operation type.
@@ -547,6 +571,12 @@
 	/*
 	 * Configuration handlers.
 	 */
+	int (*config_shared_key) (struct rt2x00_dev *rt2x00dev,
+				  struct rt2x00lib_crypto *crypto,
+				  struct ieee80211_key_conf *key);
+	int (*config_pairwise_key) (struct rt2x00_dev *rt2x00dev,
+				    struct rt2x00lib_crypto *crypto,
+				    struct ieee80211_key_conf *key);
 	void (*config_filter) (struct rt2x00_dev *rt2x00dev,
 			       const unsigned int filter_flags);
 	void (*config_intf) (struct rt2x00_dev *rt2x00dev,
@@ -599,17 +629,16 @@
 	/*
 	 * Device state flags
 	 */
-	DEVICE_PRESENT,
-	DEVICE_REGISTERED_HW,
-	DEVICE_INITIALIZED,
-	DEVICE_STARTED,
-	DEVICE_STARTED_SUSPEND,
-	DEVICE_ENABLED_RADIO,
-	DEVICE_DISABLED_RADIO_HW,
-	DEVICE_DIRTY_CONFIG,
+	DEVICE_STATE_PRESENT,
+	DEVICE_STATE_REGISTERED_HW,
+	DEVICE_STATE_INITIALIZED,
+	DEVICE_STATE_STARTED,
+	DEVICE_STATE_STARTED_SUSPEND,
+	DEVICE_STATE_ENABLED_RADIO,
+	DEVICE_STATE_DISABLED_RADIO_HW,
 
 	/*
-	 * Driver features
+	 * Driver requirements
 	 */
 	DRIVER_REQUIRE_FIRMWARE,
 	DRIVER_REQUIRE_BEACON_GUARD,
@@ -618,9 +647,14 @@
 	DRIVER_REQUIRE_DMA,
 
 	/*
-	 * Driver configuration
+	 * Driver features
 	 */
 	CONFIG_SUPPORT_HW_BUTTON,
+	CONFIG_SUPPORT_HW_CRYPTO,
+
+	/*
+	 * Driver configuration
+	 */
 	CONFIG_FRAME_TYPE,
 	CONFIG_RF_SEQUENCE,
 	CONFIG_EXTERNAL_LNA_A,
@@ -769,6 +803,11 @@
 	u32 *rf;
 
 	/*
+	 * LNA gain
+	 */
+	short lna_gain;
+
+	/*
 	 * USB Max frame size (for rt2500usb & rt73usb).
 	 */
 	u16 usb_maxpacket;
@@ -966,6 +1005,13 @@
 				unsigned int changed_flags,
 				unsigned int *total_flags,
 				int mc_count, struct dev_addr_list *mc_list);
+#ifdef CONFIG_RT2X00_LIB_CRYPTO
+int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		      const u8 *local_address, const u8 *address,
+		      struct ieee80211_key_conf *key);
+#else
+#define rt2x00mac_set_key	NULL
+#endif /* CONFIG_RT2X00_LIB_CRYPTO */
 int rt2x00mac_get_stats(struct ieee80211_hw *hw,
 			struct ieee80211_low_level_stats *stats);
 int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index d134c3b..4d5e87b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -31,7 +31,7 @@
 
 void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
 			   struct rt2x00_intf *intf,
-			   enum ieee80211_if_types type,
+			   enum nl80211_iftype type,
 			   u8 *mac, u8 *bssid)
 {
 	struct rt2x00intf_conf conf;
@@ -40,11 +40,11 @@
 	conf.type = type;
 
 	switch (type) {
-	case IEEE80211_IF_TYPE_IBSS:
-	case IEEE80211_IF_TYPE_AP:
+	case NL80211_IFTYPE_ADHOC:
+	case NL80211_IFTYPE_AP:
 		conf.sync = TSF_SYNC_BEACON;
 		break;
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		conf.sync = TSF_SYNC_INFRA;
 		break;
 	default:
@@ -121,7 +121,7 @@
 	 * Antenna setup changes require the RX to be disabled,
 	 * else the changes will be ignored by the device.
 	 */
-	if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
+	if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 		rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK);
 
 	/*
@@ -136,7 +136,7 @@
 	rt2x00dev->link.ant.active.rx = libconf.ant.rx;
 	rt2x00dev->link.ant.active.tx = libconf.ant.tx;
 
-	if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
+	if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 		rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
 }
 
@@ -245,6 +245,10 @@
 		memcpy(&libconf.rf,
 		       &rt2x00dev->spec.channels[conf->channel->hw_value],
 		       sizeof(libconf.rf));
+
+		memcpy(&libconf.channel,
+		       &rt2x00dev->spec.channels_info[conf->channel->hw_value],
+		       sizeof(libconf.channel));
 	}
 
 	if (flags & CONFIG_UPDATE_ANTENNA) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
new file mode 100644
index 0000000..e1448cf
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -0,0 +1,215 @@
+/*
+	Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
+	<http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: rt2x00lib
+	Abstract: rt2x00 crypto specific routines.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "rt2x00.h"
+#include "rt2x00lib.h"
+
+enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
+{
+	switch (key->alg) {
+	case ALG_WEP:
+		if (key->keylen == LEN_WEP40)
+			return CIPHER_WEP64;
+		else
+			return CIPHER_WEP128;
+	case ALG_TKIP:
+		return CIPHER_TKIP;
+	case ALG_CCMP:
+		return CIPHER_AES;
+	default:
+		return CIPHER_NONE;
+	}
+}
+
+unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info)
+{
+	struct ieee80211_key_conf *key = tx_info->control.hw_key;
+	unsigned int overhead = 0;
+
+	/*
+	 * Extend frame length to include IV/EIV/ICV/MMIC,
+	 * note that these lengths should only be added when
+	 * mac80211 does not generate it.
+	 */
+	overhead += tx_info->control.icv_len;
+
+	if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
+		overhead += tx_info->control.iv_len;
+
+	if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
+		if (key->alg == ALG_TKIP)
+			overhead += 8;
+	}
+
+	return overhead;
+}
+
+void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len)
+{
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+	unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
+
+	if (unlikely(!iv_len))
+		return;
+
+	/* Copy IV/EIV data */
+	if (iv_len >= 4)
+		memcpy(&skbdesc->iv, skb->data + header_length, 4);
+	if (iv_len >= 8)
+		memcpy(&skbdesc->eiv, skb->data + header_length + 4, 4);
+
+	/* Move ieee80211 header */
+	memmove(skb->data + iv_len, skb->data, header_length);
+
+	/* Pull buffer to correct size */
+	skb_pull(skb, iv_len);
+
+	/* IV/EIV data has officially be stripped */
+	skbdesc->flags |= FRAME_DESC_IV_STRIPPED;
+}
+
+void rt2x00crypto_tx_insert_iv(struct sk_buff *skb)
+{
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+	unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
+	const unsigned int iv_len =
+	    ((!!(skbdesc->iv)) * 4) + ((!!(skbdesc->eiv)) * 4);
+
+	if (!(skbdesc->flags & FRAME_DESC_IV_STRIPPED))
+		return;
+
+	skb_push(skb, iv_len);
+
+	/* Move ieee80211 header */
+	memmove(skb->data, skb->data + iv_len, header_length);
+
+	/* Copy IV/EIV data */
+	if (iv_len >= 4)
+		memcpy(skb->data + header_length, &skbdesc->iv, 4);
+	if (iv_len >= 8)
+		memcpy(skb->data + header_length + 4, &skbdesc->eiv, 4);
+
+	/* IV/EIV data has returned into the frame */
+	skbdesc->flags &= ~FRAME_DESC_IV_STRIPPED;
+}
+
+void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
+			       unsigned int header_length,
+			       struct rxdone_entry_desc *rxdesc)
+{
+	unsigned int payload_len = rxdesc->size - header_length;
+	unsigned int iv_len;
+	unsigned int icv_len;
+	unsigned int transfer = 0;
+
+	/*
+	 * WEP64/WEP128: Provides IV & ICV
+	 * TKIP: Provides IV/EIV & ICV
+	 * AES: Provies IV/EIV & ICV
+	 */
+	switch (rxdesc->cipher) {
+	case CIPHER_WEP64:
+	case CIPHER_WEP128:
+		iv_len = 4;
+		icv_len = 4;
+		break;
+	case CIPHER_TKIP:
+		iv_len = 8;
+		icv_len = 4;
+		break;
+	case CIPHER_AES:
+		iv_len = 8;
+		icv_len = 8;
+		break;
+	default:
+		/* Unsupport type */
+		return;
+	}
+
+	/*
+	 * Make room for new data, note that we increase both
+	 * headsize and tailsize when required. The tailsize is
+	 * only needed when ICV data needs to be inserted and
+	 * the padding is smaller then the ICV data.
+	 * When alignment requirements is greater then the
+	 * ICV data we must trim the skb to the correct size
+	 * because we need to remove the extra bytes.
+	 */
+	skb_push(skb, iv_len + align);
+	if (align < icv_len)
+		skb_put(skb, icv_len - align);
+	else if (align > icv_len)
+		skb_trim(skb, rxdesc->size + iv_len + icv_len);
+
+	/* Move ieee80211 header */
+	memmove(skb->data + transfer,
+		skb->data + transfer + iv_len + align,
+		header_length);
+	transfer += header_length;
+
+	/* Copy IV data */
+	if (iv_len >= 4) {
+		memcpy(skb->data + transfer, &rxdesc->iv, 4);
+		transfer += 4;
+	}
+
+	/* Copy EIV data */
+	if (iv_len >= 8) {
+		memcpy(skb->data + transfer, &rxdesc->eiv, 4);
+		transfer += 4;
+	}
+
+	/* Move payload */
+	if (align) {
+		memmove(skb->data + transfer,
+			skb->data + transfer + align,
+			payload_len);
+	}
+
+	/*
+	 * NOTE: Always count the payload as transfered,
+	 * even when alignment was set to zero. This is required
+	 * for determining the correct offset for the ICV data.
+	 */
+	transfer += payload_len;
+
+	/* Copy ICV data */
+	if (icv_len >= 4) {
+		memcpy(skb->data + transfer, &rxdesc->icv, 4);
+		/*
+		 * AES appends 8 bytes, we can't fill the upper
+		 * 4 bytes, but mac80211 doesn't care about what
+		 * we provide here anyway and strips it immediately.
+		 */
+		transfer += icv_len;
+	}
+
+	/* IV/EIV/ICV has been inserted into frame */
+	rxdesc->size = transfer;
+	rxdesc->flags &= ~RX_FLAG_IV_STRIPPED;
+}
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 6bee1d6..5cf4c85 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -35,6 +35,13 @@
 
 #define MAX_LINE_LENGTH 64
 
+struct rt2x00debug_crypto {
+	unsigned long success;
+	unsigned long icv_error;
+	unsigned long mic_error;
+	unsigned long key_error;
+};
+
 struct rt2x00debug_intf {
 	/*
 	 * Pointer to driver structure where
@@ -63,6 +70,7 @@
 	 *   - queue folder
 	 *     - frame dump file
 	 *     - queue stats file
+	 *     - crypto stats file
 	 */
 	struct dentry *driver_folder;
 	struct dentry *driver_entry;
@@ -80,6 +88,7 @@
 	struct dentry *queue_folder;
 	struct dentry *queue_frame_dump_entry;
 	struct dentry *queue_stats_entry;
+	struct dentry *crypto_stats_entry;
 
 	/*
 	 * The frame dump file only allows a single reader,
@@ -98,6 +107,12 @@
 	wait_queue_head_t frame_dump_waitqueue;
 
 	/*
+	 * HW crypto statistics.
+	 * All statistics are stored seperately per cipher type.
+	 */
+	struct rt2x00debug_crypto crypto_stats[CIPHER_MAX];
+
+	/*
 	 * Driver and chipset files will use a data buffer
 	 * that has been created in advance. This will simplify
 	 * the code since we can use the debugfs functions.
@@ -114,6 +129,25 @@
 	unsigned int offset_rf;
 };
 
+void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
+			       enum cipher cipher, enum rx_crypto status)
+{
+	struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
+
+	if (cipher == CIPHER_TKIP_NO_MIC)
+		cipher = CIPHER_TKIP;
+	if (cipher == CIPHER_NONE || cipher > CIPHER_MAX)
+		return;
+
+	/* Remove CIPHER_NONE index */
+	cipher--;
+
+	intf->crypto_stats[cipher].success += (status == RX_CRYPTO_SUCCESS);
+	intf->crypto_stats[cipher].icv_error += (status == RX_CRYPTO_FAIL_ICV);
+	intf->crypto_stats[cipher].mic_error += (status == RX_CRYPTO_FAIL_MIC);
+	intf->crypto_stats[cipher].key_error += (status == RX_CRYPTO_FAIL_KEY);
+}
+
 void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
 			    enum rt2x00_dump_type type, struct sk_buff *skb)
 {
@@ -327,6 +361,59 @@
 	.release	= rt2x00debug_file_release,
 };
 
+#ifdef CONFIG_RT2X00_LIB_CRYPTO
+static ssize_t rt2x00debug_read_crypto_stats(struct file *file,
+					     char __user *buf,
+					     size_t length,
+					     loff_t *offset)
+{
+	struct rt2x00debug_intf *intf = file->private_data;
+	char *name[] = { "WEP64", "WEP128", "TKIP", "AES" };
+	char *data;
+	char *temp;
+	size_t size;
+	unsigned int i;
+
+	if (*offset)
+		return 0;
+
+	data = kzalloc((1 + CIPHER_MAX)* MAX_LINE_LENGTH, GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	temp = data;
+	temp += sprintf(data, "cipher\tsuccess\ticv err\tmic err\tkey err\n");
+
+	for (i = 0; i < CIPHER_MAX; i++) {
+		temp += sprintf(temp, "%s\t%lu\t%lu\t%lu\t%lu\n", name[i],
+				intf->crypto_stats[i].success,
+				intf->crypto_stats[i].icv_error,
+				intf->crypto_stats[i].mic_error,
+				intf->crypto_stats[i].key_error);
+	}
+
+	size = strlen(data);
+	size = min(size, length);
+
+	if (copy_to_user(buf, data, size)) {
+		kfree(data);
+		return -EFAULT;
+	}
+
+	kfree(data);
+
+	*offset += size;
+	return size;
+}
+
+static const struct file_operations rt2x00debug_fop_crypto_stats = {
+	.owner		= THIS_MODULE,
+	.read		= rt2x00debug_read_crypto_stats,
+	.open		= rt2x00debug_file_open,
+	.release	= rt2x00debug_file_release,
+};
+#endif
+
 #define RT2X00DEBUGFS_OPS_READ(__name, __format, __type)	\
 static ssize_t rt2x00debug_read_##__name(struct file *file,	\
 					 char __user *buf,	\
@@ -569,6 +656,13 @@
 	    debugfs_create_file("queue", S_IRUSR, intf->queue_folder,
 				intf, &rt2x00debug_fop_queue_stats);
 
+#ifdef CONFIG_RT2X00_LIB_CRYPTO
+	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
+		intf->crypto_stats_entry =
+		    debugfs_create_file("crypto", S_IRUGO, intf->queue_folder,
+					intf, &rt2x00debug_fop_crypto_stats);
+#endif
+
 	return;
 
 exit:
@@ -587,6 +681,9 @@
 
 	skb_queue_purge(&intf->frame_dump_skbqueue);
 
+#ifdef CONFIG_RT2X00_LIB_CRYPTO
+	debugfs_remove(intf->crypto_stats_entry);
+#endif
 	debugfs_remove(intf->queue_stats_entry);
 	debugfs_remove(intf->queue_frame_dump_entry);
 	debugfs_remove(intf->queue_folder);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index f42283a..86840e3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -34,7 +34,7 @@
  */
 void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev)
 {
-	if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 		return;
 
 	/*
@@ -94,8 +94,8 @@
 	 * Don't enable the radio twice.
 	 * And check if the hardware button has been disabled.
 	 */
-	if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
-	    test_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags))
+	if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
+	    test_bit(DEVICE_STATE_DISABLED_RADIO_HW, &rt2x00dev->flags))
 		return 0;
 
 	/*
@@ -117,7 +117,7 @@
 	rt2x00leds_led_radio(rt2x00dev, true);
 	rt2x00led_led_activity(rt2x00dev, true);
 
-	__set_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags);
+	set_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags);
 
 	/*
 	 * Enable RX.
@@ -134,7 +134,7 @@
 
 void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
 {
-	if (!__test_and_clear_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
+	if (!test_and_clear_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 		return;
 
 	/*
@@ -354,7 +354,7 @@
 	 * When the radio is shutting down we should
 	 * immediately cease all link tuning.
 	 */
-	if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 		return;
 
 	/*
@@ -431,7 +431,7 @@
 	 * note that in the spinlock protected area above the delayed_flags
 	 * have been cleared correctly.
 	 */
-	if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 		return;
 
 	if (delayed_flags & DELAYED_UPDATE_BEACON)
@@ -467,8 +467,8 @@
 	struct rt2x00_dev *rt2x00dev = data;
 	struct rt2x00_intf *intf = vif_to_intf(vif);
 
-	if (vif->type != IEEE80211_IF_TYPE_AP &&
-	    vif->type != IEEE80211_IF_TYPE_IBSS)
+	if (vif->type != NL80211_IFTYPE_AP &&
+	    vif->type != NL80211_IFTYPE_ADHOC)
 		return;
 
 	/*
@@ -484,7 +484,7 @@
 
 void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
 {
-	if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 		return;
 
 	ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
@@ -508,6 +508,15 @@
 	rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
 
 	/*
+	 * If the IV/EIV data was stripped from the frame before it was
+	 * passed to the hardware, we should now reinsert it again because
+	 * mac80211 will expect the the same data to be present it the
+	 * frame as it was passed to us.
+	 */
+	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
+		rt2x00crypto_tx_insert_iv(entry->skb);
+
+	/*
 	 * Send frame to debugfs immediately, after this call is completed
 	 * we are going to overwrite the skb->cb array.
 	 */
@@ -563,7 +572,7 @@
 
 	rt2x00dev->ops->lib->init_txentry(rt2x00dev, entry);
 
-	__clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
+	clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
 	rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
 
 	/*
@@ -585,7 +594,7 @@
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_hdr *hdr;
 	const struct rt2x00_rate *rate;
-	unsigned int header_size;
+	unsigned int header_length;
 	unsigned int align;
 	unsigned int i;
 	int idx = -1;
@@ -613,10 +622,19 @@
 	 * The data behind the ieee80211 header must be
 	 * aligned on a 4 byte boundary.
 	 */
-	header_size = ieee80211_get_hdrlen_from_skb(entry->skb);
-	align = ((unsigned long)(entry->skb->data + header_size)) & 3;
+	header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
+	align = ((unsigned long)(entry->skb->data + header_length)) & 3;
 
-	if (align) {
+	/*
+	 * Hardware might have stripped the IV/EIV/ICV data,
+	 * in that case it is possible that the data was
+	 * provided seperately (through hardware descriptor)
+	 * in which case we should reinsert the data into the frame.
+	 */
+	if ((rxdesc.flags & RX_FLAG_IV_STRIPPED)) {
+		rt2x00crypto_rx_insert_iv(entry->skb, align,
+					  header_length, &rxdesc);
+	} else if (align) {
 		skb_push(entry->skb, align);
 		/* Move entire frame in 1 command */
 		memmove(entry->skb->data, entry->skb->data + align,
@@ -635,7 +653,7 @@
 
 		if (((rxdesc.dev_flags & RXDONE_SIGNAL_PLCP) &&
 		     (rate->plcp == rxdesc.signal)) ||
-		    (!(rxdesc.dev_flags & RXDONE_SIGNAL_PLCP) &&
+		    ((rxdesc.dev_flags & RXDONE_SIGNAL_BITRATE) &&
 		      (rate->bitrate == rxdesc.signal))) {
 			idx = i;
 			break;
@@ -657,6 +675,10 @@
 	    (rxdesc.dev_flags & RXDONE_MY_BSS))
 		rt2x00lib_update_link_stats(&rt2x00dev->link, rxdesc.rssi);
 
+	rt2x00debug_update_crypto(rt2x00dev,
+				  rxdesc.cipher,
+				  rxdesc.cipher_status);
+
 	rt2x00dev->link.qual.rx_success++;
 
 	rx_status->mactime = rxdesc.timestamp;
@@ -796,7 +818,6 @@
 	struct ieee80211_rate *rates;
 	unsigned int num_rates;
 	unsigned int i;
-	unsigned char tx_power;
 
 	num_rates = 0;
 	if (spec->supported_rates & SUPPORT_RATE_CCK)
@@ -822,20 +843,9 @@
 	 * Initialize Channel list.
 	 */
 	for (i = 0; i < spec->num_channels; i++) {
-		if (spec->channels[i].channel <= 14) {
-			if (spec->tx_power_bg)
-				tx_power = spec->tx_power_bg[i];
-			else
-				tx_power = spec->tx_power_default;
-		} else {
-			if (spec->tx_power_a)
-				tx_power = spec->tx_power_a[i];
-			else
-				tx_power = spec->tx_power_default;
-		}
-
 		rt2x00lib_channel(&channels[i],
-				  spec->channels[i].channel, tx_power, i);
+				  spec->channels[i].channel,
+				  spec->channels_info[i].tx_power1, i);
 	}
 
 	/*
@@ -878,7 +888,7 @@
 
 static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
 {
-	if (test_bit(DEVICE_REGISTERED_HW, &rt2x00dev->flags))
+	if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
 		ieee80211_unregister_hw(rt2x00dev->hw);
 
 	if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) {
@@ -887,6 +897,8 @@
 		rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
 		rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
 	}
+
+	kfree(rt2x00dev->spec.channels_info);
 }
 
 static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -894,6 +906,9 @@
 	struct hw_mode_spec *spec = &rt2x00dev->spec;
 	int status;
 
+	if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
+		return 0;
+
 	/*
 	 * Initialize HW modes.
 	 */
@@ -915,7 +930,7 @@
 		return status;
 	}
 
-	__set_bit(DEVICE_REGISTERED_HW, &rt2x00dev->flags);
+	set_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags);
 
 	return 0;
 }
@@ -925,7 +940,7 @@
  */
 static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
 {
-	if (!__test_and_clear_bit(DEVICE_INITIALIZED, &rt2x00dev->flags))
+	if (!test_and_clear_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags))
 		return;
 
 	/*
@@ -948,7 +963,7 @@
 {
 	int status;
 
-	if (test_bit(DEVICE_INITIALIZED, &rt2x00dev->flags))
+	if (test_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags))
 		return 0;
 
 	/*
@@ -967,7 +982,7 @@
 		return status;
 	}
 
-	__set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags);
+	set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
 
 	/*
 	 * Register the extra components.
@@ -981,7 +996,7 @@
 {
 	int retval;
 
-	if (test_bit(DEVICE_STARTED, &rt2x00dev->flags))
+	if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
 		return 0;
 
 	/*
@@ -999,28 +1014,18 @@
 	if (retval)
 		return retval;
 
-	/*
-	 * Enable radio.
-	 */
-	retval = rt2x00lib_enable_radio(rt2x00dev);
-	if (retval) {
-		rt2x00lib_uninitialize(rt2x00dev);
-		return retval;
-	}
-
 	rt2x00dev->intf_ap_count = 0;
 	rt2x00dev->intf_sta_count = 0;
 	rt2x00dev->intf_associated = 0;
 
-	__set_bit(DEVICE_STARTED, &rt2x00dev->flags);
-	__set_bit(DEVICE_DIRTY_CONFIG, &rt2x00dev->flags);
+	set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
 
 	return 0;
 }
 
 void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
 {
-	if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags))
+	if (!test_and_clear_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
 		return;
 
 	/*
@@ -1032,8 +1037,6 @@
 	rt2x00dev->intf_ap_count = 0;
 	rt2x00dev->intf_sta_count = 0;
 	rt2x00dev->intf_associated = 0;
-
-	__clear_bit(DEVICE_STARTED, &rt2x00dev->flags);
 }
 
 /*
@@ -1049,6 +1052,11 @@
 	 */
 	rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf);
 
+	rt2x00dev->hw->wiphy->interface_modes =
+	    BIT(NL80211_IFTYPE_AP) |
+	    BIT(NL80211_IFTYPE_STATION) |
+	    BIT(NL80211_IFTYPE_ADHOC);
+
 	/*
 	 * Let the driver probe the device to detect the capabilities.
 	 */
@@ -1088,7 +1096,7 @@
 	rt2x00rfkill_allocate(rt2x00dev);
 	rt2x00debug_register(rt2x00dev);
 
-	__set_bit(DEVICE_PRESENT, &rt2x00dev->flags);
+	set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
 
 	return 0;
 
@@ -1101,7 +1109,7 @@
 
 void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
 {
-	__clear_bit(DEVICE_PRESENT, &rt2x00dev->flags);
+	clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
 
 	/*
 	 * Disable radio.
@@ -1146,14 +1154,15 @@
 	int retval;
 
 	NOTICE(rt2x00dev, "Going to sleep.\n");
-	__clear_bit(DEVICE_PRESENT, &rt2x00dev->flags);
 
 	/*
 	 * Only continue if mac80211 has open interfaces.
 	 */
-	if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags))
+	if (!test_and_clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
+	    !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
 		goto exit;
-	__set_bit(DEVICE_STARTED_SUSPEND, &rt2x00dev->flags);
+
+	set_bit(DEVICE_STATE_STARTED_SUSPEND, &rt2x00dev->flags);
 
 	/*
 	 * Disable radio.
@@ -1203,8 +1212,8 @@
 	/*
 	 * Master or Ad-hoc mode require a new beacon update.
 	 */
-	if (vif->type == IEEE80211_IF_TYPE_AP ||
-	    vif->type == IEEE80211_IF_TYPE_IBSS)
+	if (vif->type == NL80211_IFTYPE_AP ||
+	    vif->type == NL80211_IFTYPE_ADHOC)
 		intf->delayed_flags |= DELAYED_UPDATE_BEACON;
 
 	spin_unlock(&intf->lock);
@@ -1225,7 +1234,7 @@
 	/*
 	 * Only continue if mac80211 had open interfaces.
 	 */
-	if (!__test_and_clear_bit(DEVICE_STARTED_SUSPEND, &rt2x00dev->flags))
+	if (!test_and_clear_bit(DEVICE_STATE_STARTED_SUSPEND, &rt2x00dev->flags))
 		return 0;
 
 	/*
@@ -1252,7 +1261,7 @@
 	/*
 	 * We are ready again to receive requests from mac80211.
 	 */
-	__set_bit(DEVICE_PRESENT, &rt2x00dev->flags);
+	set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
 
 	/*
 	 * It is possible that during that mac80211 has attempted
@@ -1272,7 +1281,7 @@
 	return 0;
 
 exit:
-	rt2x00lib_disable_radio(rt2x00dev);
+	rt2x00lib_stop(rt2x00dev);
 	rt2x00lib_uninitialize(rt2x00dev);
 	rt2x00debug_deregister(rt2x00dev);
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index c5fb3a7..797eb61 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -88,7 +88,7 @@
  */
 void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
 			   struct rt2x00_intf *intf,
-			   enum ieee80211_if_types type,
+			   enum nl80211_iftype type,
 			   u8 *mac, u8 *bssid);
 void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
 			  struct rt2x00_intf *intf,
@@ -181,6 +181,8 @@
 void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev);
 void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
 			    enum rt2x00_dump_type type, struct sk_buff *skb);
+void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
+			       enum cipher cipher, enum rx_crypto status);
 #else
 static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
 {
@@ -195,9 +197,54 @@
 					  struct sk_buff *skb)
 {
 }
+
+static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
+					     enum cipher cipher,
+					     enum rx_crypto status)
+{
+}
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
 
 /*
+ * Crypto handlers.
+ */
+#ifdef CONFIG_RT2X00_LIB_CRYPTO
+enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key);
+unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info);
+void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len);
+void rt2x00crypto_tx_insert_iv(struct sk_buff *skb);
+void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
+			       unsigned int header_length,
+			       struct rxdone_entry_desc *rxdesc);
+#else
+static inline enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
+{
+	return CIPHER_NONE;
+}
+
+static inline unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info)
+{
+	return 0;
+}
+
+static inline void rt2x00crypto_tx_remove_iv(struct sk_buff *skb,
+					     unsigned int iv_len)
+{
+}
+
+static inline void rt2x00crypto_tx_insert_iv(struct sk_buff *skb)
+{
+}
+
+static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
+					     unsigned int align,
+					     unsigned int header_length,
+					     struct rxdone_entry_desc *rxdesc)
+{
+}
+#endif
+
+/*
  * RFkill handlers.
  */
 #ifdef CONFIG_RT2X00_LIB_RFKILL
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index d065073..2c6cc5c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -36,21 +36,22 @@
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb);
 	struct ieee80211_tx_info *rts_info;
 	struct sk_buff *skb;
-	int size;
+	unsigned int data_length;
+	int retval = 0;
 
 	if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
-		size = sizeof(struct ieee80211_cts);
+		data_length = sizeof(struct ieee80211_cts);
 	else
-		size = sizeof(struct ieee80211_rts);
+		data_length = sizeof(struct ieee80211_rts);
 
-	skb = dev_alloc_skb(size + rt2x00dev->hw->extra_tx_headroom);
-	if (!skb) {
+	skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom);
+	if (unlikely(!skb)) {
 		WARNING(rt2x00dev, "Failed to create RTS/CTS frame.\n");
-		return NETDEV_TX_BUSY;
+		return -ENOMEM;
 	}
 
 	skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
-	skb_put(skb, size);
+	skb_put(skb, data_length);
 
 	/*
 	 * Copy TX information over from original frame to
@@ -63,7 +64,6 @@
 	 */
 	memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
 	rts_info = IEEE80211_SKB_CB(skb);
-	rts_info->control.hw_key = NULL;
 	rts_info->flags &= ~IEEE80211_TX_CTL_USE_RTS_CTS;
 	rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT;
 	rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS;
@@ -73,22 +73,33 @@
 	else
 		rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
 
+	skb->do_not_encrypt = 1;
+
+	/*
+	 * RTS/CTS frame should use the length of the frame plus any
+	 * encryption overhead that will be added by the hardware.
+	 */
+#ifdef CONFIG_RT2X00_LIB_CRYPTO
+	if (!frag_skb->do_not_encrypt)
+		data_length += rt2x00crypto_tx_overhead(tx_info);
+#endif /* CONFIG_RT2X00_LIB_CRYPTO */
+
 	if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
 		ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
-					frag_skb->data, size, tx_info,
+					frag_skb->data, data_length, tx_info,
 					(struct ieee80211_cts *)(skb->data));
 	else
 		ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif,
-				  frag_skb->data, size, tx_info,
+				  frag_skb->data, data_length, tx_info,
 				  (struct ieee80211_rts *)(skb->data));
 
-	if (rt2x00queue_write_tx_frame(queue, skb)) {
+	retval = rt2x00queue_write_tx_frame(queue, skb);
+	if (retval) {
 		dev_kfree_skb_any(skb);
 		WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n");
-		return NETDEV_TX_BUSY;
 	}
 
-	return NETDEV_TX_OK;
+	return retval;
 }
 
 int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -106,11 +117,8 @@
 	 * Note that we can only stop the TX queues inside the TX path
 	 * due to possible race conditions in mac80211.
 	 */
-	if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) {
-		ieee80211_stop_queues(hw);
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
-	}
+	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+		goto exit_fail;
 
 	/*
 	 * Determine which queue to put packet on.
@@ -141,26 +149,25 @@
 	if ((tx_info->flags & (IEEE80211_TX_CTL_USE_RTS_CTS |
 			       IEEE80211_TX_CTL_USE_CTS_PROTECT)) &&
 	    !rt2x00dev->ops->hw->set_rts_threshold) {
-		if (rt2x00queue_available(queue) <= 1) {
-			ieee80211_stop_queue(rt2x00dev->hw, qid);
-			return NETDEV_TX_BUSY;
-		}
+		if (rt2x00queue_available(queue) <= 1)
+			goto exit_fail;
 
-		if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) {
-			ieee80211_stop_queue(rt2x00dev->hw, qid);
-			return NETDEV_TX_BUSY;
-		}
+		if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
+			goto exit_fail;
 	}
 
-	if (rt2x00queue_write_tx_frame(queue, skb)) {
-		ieee80211_stop_queue(rt2x00dev->hw, qid);
-		return NETDEV_TX_BUSY;
-	}
+	if (rt2x00queue_write_tx_frame(queue, skb))
+		goto exit_fail;
 
 	if (rt2x00queue_threshold(queue))
 		ieee80211_stop_queue(rt2x00dev->hw, qid);
 
 	return NETDEV_TX_OK;
+
+ exit_fail:
+	ieee80211_stop_queue(rt2x00dev->hw, qid);
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_tx);
 
@@ -168,7 +175,7 @@
 {
 	struct rt2x00_dev *rt2x00dev = hw->priv;
 
-	if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
 		return 0;
 
 	return rt2x00lib_start(rt2x00dev);
@@ -179,7 +186,7 @@
 {
 	struct rt2x00_dev *rt2x00dev = hw->priv;
 
-	if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
 		return;
 
 	rt2x00lib_stop(rt2x00dev);
@@ -199,12 +206,12 @@
 	 * Don't allow interfaces to be added
 	 * the device has disappeared.
 	 */
-	if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) ||
-	    !test_bit(DEVICE_STARTED, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
+	    !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
 		return -ENODEV;
 
 	switch (conf->type) {
-	case IEEE80211_IF_TYPE_AP:
+	case NL80211_IFTYPE_AP:
 		/*
 		 * We don't support mixed combinations of
 		 * sta and ap interfaces.
@@ -220,8 +227,8 @@
 			return -ENOBUFS;
 
 		break;
-	case IEEE80211_IF_TYPE_STA:
-	case IEEE80211_IF_TYPE_IBSS:
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_ADHOC:
 		/*
 		 * We don't support mixed combinations of
 		 * sta and ap interfaces.
@@ -249,7 +256,7 @@
 	 */
 	for (i = 0; i < queue->limit; i++) {
 		entry = &queue->entries[i];
-		if (!__test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags))
+		if (!test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags))
 			break;
 	}
 
@@ -261,7 +268,7 @@
 	 * increase interface count and start initialization.
 	 */
 
-	if (conf->type == IEEE80211_IF_TYPE_AP)
+	if (conf->type == NL80211_IFTYPE_AP)
 		rt2x00dev->intf_ap_count++;
 	else
 		rt2x00dev->intf_sta_count++;
@@ -270,7 +277,7 @@
 	spin_lock_init(&intf->seqlock);
 	intf->beacon = entry;
 
-	if (conf->type == IEEE80211_IF_TYPE_AP)
+	if (conf->type == NL80211_IFTYPE_AP)
 		memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN);
 	memcpy(&intf->mac, conf->mac_addr, ETH_ALEN);
 
@@ -303,12 +310,12 @@
 	 * either the device has disappeared or when
 	 * no interface is present.
 	 */
-	if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) ||
-	    (conf->type == IEEE80211_IF_TYPE_AP && !rt2x00dev->intf_ap_count) ||
-	    (conf->type != IEEE80211_IF_TYPE_AP && !rt2x00dev->intf_sta_count))
+	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
+	    (conf->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
+	    (conf->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
 		return;
 
-	if (conf->type == IEEE80211_IF_TYPE_AP)
+	if (conf->type == NL80211_IFTYPE_AP)
 		rt2x00dev->intf_ap_count--;
 	else
 		rt2x00dev->intf_sta_count--;
@@ -317,59 +324,59 @@
 	 * Release beacon entry so it is available for
 	 * new interfaces again.
 	 */
-	__clear_bit(ENTRY_BCN_ASSIGNED, &intf->beacon->flags);
+	clear_bit(ENTRY_BCN_ASSIGNED, &intf->beacon->flags);
 
 	/*
 	 * Make sure the bssid and mac address registers
 	 * are cleared to prevent false ACKing of frames.
 	 */
 	rt2x00lib_config_intf(rt2x00dev, intf,
-			      IEEE80211_IF_TYPE_INVALID, NULL, NULL);
+			      NL80211_IFTYPE_UNSPECIFIED, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface);
 
 int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
 {
 	struct rt2x00_dev *rt2x00dev = hw->priv;
-	int force_reconfig;
+	int radio_on;
+	int status;
 
 	/*
 	 * Mac80211 might be calling this function while we are trying
 	 * to remove the device or perhaps suspending it.
 	 */
-	if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
 		return 0;
 
 	/*
-	 * Check if we need to disable the radio,
-	 * if this is not the case, at least the RX must be disabled.
+	 * Only change device state when the radio is enabled. It does not
+	 * matter what parameters we have configured when the radio is disabled
+	 * because we won't be able to send or receive anyway. Also note that
+	 * some configuration parameters (e.g. channel and antenna values) can
+	 * only be set when the radio is enabled.
 	 */
-	if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) {
-		if (!conf->radio_enabled)
-			rt2x00lib_disable_radio(rt2x00dev);
-		else
-			rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
-	}
+	radio_on = test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags);
+	if (conf->radio_enabled) {
+		/* For programming the values, we have to turn RX off */
+		rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
 
-	/*
-	 * When the DEVICE_DIRTY_CONFIG flag is set, the device has recently
-	 * been started and the configuration must be forced upon the hardware.
-	 * Otherwise registers will not be intialized correctly and could
-	 * result in non-working hardware because essential registers aren't
-	 * initialized.
-	 */
-	force_reconfig =
-	    __test_and_clear_bit(DEVICE_DIRTY_CONFIG, &rt2x00dev->flags);
+		/* Enable the radio */
+		status = rt2x00lib_enable_radio(rt2x00dev);
+		if (unlikely(status))
+			return status;
 
-	rt2x00lib_config(rt2x00dev, conf, force_reconfig);
+		/*
+		 * When we've just turned on the radio, we want to reprogram
+		 * everything to ensure a consistent state
+		 */
+		rt2x00lib_config(rt2x00dev, conf, !radio_on);
 
-	/*
-	 * Reenable RX only if the radio should be on.
-	 */
-	if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
+		/* Turn RX back on */
 		rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
-	else if (conf->radio_enabled)
-		return rt2x00lib_enable_radio(rt2x00dev);
+	} else {
+		/* Disable the radio */
+		rt2x00lib_disable_radio(rt2x00dev);
+	}
 
 	return 0;
 }
@@ -388,7 +395,7 @@
 	 * Mac80211 might be calling this function while we are trying
 	 * to remove the device or perhaps suspending it.
 	 */
-	if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
 		return 0;
 
 	spin_lock(&intf->lock);
@@ -467,6 +474,91 @@
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
 
+#ifdef CONFIG_RT2X00_LIB_CRYPTO
+int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		      const u8 *local_address, const u8 *address,
+		      struct ieee80211_key_conf *key)
+{
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+	int (*set_key) (struct rt2x00_dev *rt2x00dev,
+			struct rt2x00lib_crypto *crypto,
+			struct ieee80211_key_conf *key);
+	struct rt2x00lib_crypto crypto;
+
+	if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
+		return -EOPNOTSUPP;
+	else if (key->keylen > 32)
+		return -ENOSPC;
+
+	memset(&crypto, 0, sizeof(crypto));
+
+	/*
+	 * When in STA mode, bssidx is always 0 otherwise local_address[5]
+	 * contains the bss number, see BSS_ID_MASK comments for details.
+	 */
+	if (rt2x00dev->intf_sta_count)
+		crypto.bssidx = 0;
+	else
+		crypto.bssidx =
+		    local_address[5] & (rt2x00dev->ops->max_ap_intf - 1);
+
+	crypto.cipher = rt2x00crypto_key_to_cipher(key);
+	if (crypto.cipher == CIPHER_NONE)
+		return -EOPNOTSUPP;
+
+	crypto.cmd = cmd;
+	crypto.address = address;
+
+	if (crypto.cipher == CIPHER_TKIP) {
+		if (key->keylen > NL80211_TKIP_DATA_OFFSET_ENCR_KEY)
+			memcpy(&crypto.key,
+			       &key->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY],
+			       sizeof(crypto.key));
+
+		if (key->keylen > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
+			memcpy(&crypto.tx_mic,
+			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+			       sizeof(crypto.tx_mic));
+
+		if (key->keylen > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY)
+			memcpy(&crypto.rx_mic,
+			       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+			       sizeof(crypto.rx_mic));
+	} else
+		memcpy(&crypto.key, &key->key[0], key->keylen);
+
+	/*
+	 * Each BSS has a maximum of 4 shared keys.
+	 * Shared key index values:
+	 *	0) BSS0 key0
+	 *	1) BSS0 key1
+	 *	...
+	 *	4) BSS1 key0
+	 *	...
+	 *	8) BSS2 key0
+	 *	...
+	 * Both pairwise as shared key indeces are determined by
+	 * driver. This is required because the hardware requires
+	 * keys to be assigned in correct order (When key 1 is
+	 * provided but key 0 is not, then the key is not found
+	 * by the hardware during RX).
+	 */
+	if (cmd == SET_KEY)
+		key->hw_key_idx = 0;
+
+	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+		set_key = rt2x00dev->ops->lib->config_pairwise_key;
+	else
+		set_key = rt2x00dev->ops->lib->config_shared_key;
+
+	if (!set_key)
+		return -EOPNOTSUPP;
+
+	return set_key(rt2x00dev, &crypto, key);
+}
+EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
+#endif /* CONFIG_RT2X00_LIB_CRYPTO */
+
 int rt2x00mac_get_stats(struct ieee80211_hw *hw,
 			struct ieee80211_low_level_stats *stats)
 {
@@ -575,10 +667,11 @@
 		queue->cw_max = 10; /* cw_min: 2^10 = 1024. */
 
 	queue->aifs = params->aifs;
+	queue->txop = params->txop;
 
 	INFO(rt2x00dev,
-	     "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d.\n",
-	     queue_idx, queue->cw_min, queue->cw_max, queue->aifs);
+	     "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n",
+	     queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 898cdd7..b7f4fe8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -33,10 +33,11 @@
 struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
 					struct queue_entry *entry)
 {
-	unsigned int frame_size;
-	unsigned int reserved_size;
 	struct sk_buff *skb;
 	struct skb_frame_desc *skbdesc;
+	unsigned int frame_size;
+	unsigned int head_size = 0;
+	unsigned int tail_size = 0;
 
 	/*
 	 * The frame size includes descriptor size, because the
@@ -49,16 +50,32 @@
 	 * this means we need at least 3 bytes for moving the frame
 	 * into the correct offset.
 	 */
-	reserved_size = 4;
+	head_size = 4;
+
+	/*
+	 * For IV/EIV/ICV assembly we must make sure there is
+	 * at least 8 bytes bytes available in headroom for IV/EIV
+	 * and 4 bytes for ICV data as tailroon.
+	 */
+#ifdef CONFIG_RT2X00_LIB_CRYPTO
+	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
+		head_size += 8;
+		tail_size += 4;
+	}
+#endif /* CONFIG_RT2X00_LIB_CRYPTO */
 
 	/*
 	 * Allocate skbuffer.
 	 */
-	skb = dev_alloc_skb(frame_size + reserved_size);
+	skb = dev_alloc_skb(frame_size + head_size + tail_size);
 	if (!skb)
 		return NULL;
 
-	skb_reserve(skb, reserved_size);
+	/*
+	 * Make sure we not have a frame with the requested bytes
+	 * available in the head and tail.
+	 */
+	skb_reserve(skb, head_size);
 	skb_put(skb, frame_size);
 
 	/*
@@ -83,8 +100,21 @@
 {
 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
 
-	skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
-					  DMA_TO_DEVICE);
+	/*
+	 * If device has requested headroom, we should make sure that
+	 * is also mapped to the DMA so it can be used for transfering
+	 * additional descriptor information to the hardware.
+	 */
+	skb_push(skb, rt2x00dev->hw->extra_tx_headroom);
+
+	skbdesc->skb_dma =
+	    dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
+
+	/*
+	 * Restore data pointer to original location again.
+	 */
+	skb_pull(skb, rt2x00dev->hw->extra_tx_headroom);
+
 	skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
 }
 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
@@ -100,7 +130,12 @@
 	}
 
 	if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
-		dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
+		/*
+		 * Add headroom to the skb length, it has been removed
+		 * by the driver, but it was actually mapped to DMA.
+		 */
+		dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
+				 skb->len + rt2x00dev->hw->extra_tx_headroom,
 				 DMA_TO_DEVICE);
 		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
 	}
@@ -120,7 +155,6 @@
 {
 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
-	struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
 	struct ieee80211_rate *rate =
 	    ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
@@ -140,7 +174,7 @@
 	txdesc->cw_max = entry->queue->cw_max;
 	txdesc->aifs = entry->queue->aifs;
 
-	/* Data length should be extended with 4 bytes for CRC */
+	/* Data length + CRC + IV/EIV/ICV/MMIC (when using encryption) */
 	data_length = entry->skb->len + 4;
 
 	/*
@@ -149,6 +183,35 @@
 	if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
 		__set_bit(ENTRY_TXD_ACK, &txdesc->flags);
 
+#ifdef CONFIG_RT2X00_LIB_CRYPTO
+	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) &&
+	    !entry->skb->do_not_encrypt) {
+		struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
+
+		__set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
+
+		txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key);
+
+		if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+			__set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags);
+
+		txdesc->key_idx = hw_key->hw_key_idx;
+		txdesc->iv_offset = ieee80211_get_hdrlen_from_skb(entry->skb);
+
+		/*
+		 * Extend frame length to include all encryption overhead
+		 * that will be added by the hardware.
+		 */
+		data_length += rt2x00crypto_tx_overhead(tx_info);
+
+		if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
+			__set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags);
+
+		if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+			__set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags);
+	}
+#endif /* CONFIG_RT2X00_LIB_CRYPTO */
+
 	/*
 	 * Check if this is a RTS/CTS frame
 	 */
@@ -214,16 +277,22 @@
 	 * sequence counter given by mac80211.
 	 */
 	if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
-		spin_lock_irqsave(&intf->seqlock, irqflags);
+		if (likely(tx_info->control.vif)) {
+			struct rt2x00_intf *intf;
 
-		if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
-			intf->seqno += 0x10;
-		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
-		hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
+			intf = vif_to_intf(tx_info->control.vif);
 
-		spin_unlock_irqrestore(&intf->seqlock, irqflags);
+			spin_lock_irqsave(&intf->seqlock, irqflags);
 
-		__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
+			if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
+				intf->seqno += 0x10;
+			hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+			hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
+
+			spin_unlock_irqrestore(&intf->seqlock, irqflags);
+
+			__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
+		}
 	}
 
 	/*
@@ -305,11 +374,12 @@
 	struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
 	struct txentry_desc txdesc;
 	struct skb_frame_desc *skbdesc;
+	unsigned int iv_len = IEEE80211_SKB_CB(skb)->control.iv_len;
 
 	if (unlikely(rt2x00queue_full(queue)))
 		return -EINVAL;
 
-	if (__test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
+	if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
 		ERROR(queue->rt2x00dev,
 		      "Arrived at non-free entry in the non-full queue %d.\n"
 		      "Please file bug report to %s.\n",
@@ -326,21 +396,39 @@
 	rt2x00queue_create_tx_descriptor(entry, &txdesc);
 
 	/*
-	 * skb->cb array is now ours and we are free to use it.
+	 * All information is retreived from the skb->cb array,
+	 * now we should claim ownership of the driver part of that
+	 * array.
 	 */
 	skbdesc = get_skb_frame_desc(entry->skb);
 	memset(skbdesc, 0, sizeof(*skbdesc));
 	skbdesc->entry = entry;
 
+	/*
+	 * When hardware encryption is supported, and this frame
+	 * is to be encrypted, we should strip the IV/EIV data from
+	 * the frame so we can provide it to the driver seperately.
+	 */
+	if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
+	    !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags))
+		rt2x00crypto_tx_remove_iv(skb, iv_len);
+
+	/*
+	 * It could be possible that the queue was corrupted and this
+	 * call failed. Just drop the frame, we cannot rollback and pass
+	 * the frame to mac80211 because the skb->cb has now been tainted.
+	 */
 	if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
-		__clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
-		return -EIO;
+		clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
+		dev_kfree_skb_any(entry->skb);
+		entry->skb = NULL;
+		return 0;
 	}
 
 	if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
 		rt2x00queue_map_txskb(queue->rt2x00dev, skb);
 
-	__set_bit(ENTRY_DATA_PENDING, &entry->flags);
+	set_bit(ENTRY_DATA_PENDING, &entry->flags);
 
 	rt2x00queue_index_inc(queue, Q_INDEX);
 	rt2x00queue_write_tx_descriptor(entry, &txdesc);
@@ -653,6 +741,7 @@
 
 	queue->rt2x00dev = rt2x00dev;
 	queue->qid = qid;
+	queue->txop = 0;
 	queue->aifs = 2;
 	queue->cw_min = 5;
 	queue->cw_max = 10;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index ff78e52..9dbf04f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -87,10 +87,13 @@
  *
  * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
  * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
+ * @FRAME_DESC_IV_STRIPPED: Frame contained a IV/EIV provided by
+ *	mac80211 but was stripped for processing by the driver.
  */
 enum skb_frame_desc_flags {
-	SKBDESC_DMA_MAPPED_RX = (1 << 0),
-	SKBDESC_DMA_MAPPED_TX = (1 << 1),
+	SKBDESC_DMA_MAPPED_RX = 1 << 0,
+	SKBDESC_DMA_MAPPED_TX = 1 << 1,
+	FRAME_DESC_IV_STRIPPED = 1 << 2,
 };
 
 /**
@@ -104,6 +107,8 @@
  * @desc: Pointer to descriptor part of the frame.
  *	Note that this pointer could point to something outside
  *	of the scope of the skb->data pointer.
+ * @iv: IV data used during encryption/decryption.
+ * @eiv: EIV data used during encryption/decryption.
  * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
  * @entry: The entry to which this sk buffer belongs.
  */
@@ -113,6 +118,9 @@
 	unsigned int desc_len;
 	void *desc;
 
+	__le32 iv;
+	__le32 eiv;
+
 	dma_addr_t skb_dma;
 
 	struct queue_entry *entry;
@@ -132,13 +140,14 @@
 /**
  * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
  *
- * @RXDONE_SIGNAL_PLCP: Does the signal field contain the plcp value,
- *	or does it contain the bitrate itself.
+ * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
+ * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
  * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
  */
 enum rxdone_entry_desc_flags {
 	RXDONE_SIGNAL_PLCP = 1 << 0,
-	RXDONE_MY_BSS = 1 << 1,
+	RXDONE_SIGNAL_BITRATE = 1 << 1,
+	RXDONE_MY_BSS = 1 << 2,
 };
 
 /**
@@ -152,7 +161,11 @@
  * @size: Data size of the received frame.
  * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
  * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
-
+ * @cipher: Cipher type used during decryption.
+ * @cipher_status: Decryption status.
+ * @iv: IV data used during decryption.
+ * @eiv: EIV data used during decryption.
+ * @icv: ICV data used during decryption.
  */
 struct rxdone_entry_desc {
 	u64 timestamp;
@@ -161,6 +174,12 @@
 	int size;
 	int flags;
 	int dev_flags;
+	u8 cipher;
+	u8 cipher_status;
+
+	__le32 iv;
+	__le32 eiv;
+	__le32 icv;
 };
 
 /**
@@ -206,6 +225,10 @@
  * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
  * @ENTRY_TXD_ACK: An ACK is required for this frame.
  * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
+ * @ENTRY_TXD_ENCRYPT: This frame should be encrypted.
+ * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared).
+ * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware.
+ * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware.
  */
 enum txentry_desc_flags {
 	ENTRY_TXD_RTS_FRAME,
@@ -218,6 +241,10 @@
 	ENTRY_TXD_BURST,
 	ENTRY_TXD_ACK,
 	ENTRY_TXD_RETRY_MODE,
+	ENTRY_TXD_ENCRYPT,
+	ENTRY_TXD_ENCRYPT_PAIRWISE,
+	ENTRY_TXD_ENCRYPT_IV,
+	ENTRY_TXD_ENCRYPT_MMIC,
 };
 
 /**
@@ -236,6 +263,9 @@
  * @ifs: IFS value.
  * @cw_min: cwmin value.
  * @cw_max: cwmax value.
+ * @cipher: Cipher type used for encryption.
+ * @key_idx: Key index used for encryption.
+ * @iv_offset: Position where IV should be inserted by hardware.
  */
 struct txentry_desc {
 	unsigned long flags;
@@ -252,6 +282,10 @@
 	short ifs;
 	short cw_min;
 	short cw_max;
+
+	enum cipher cipher;
+	u16 key_idx;
+	u16 iv_offset;
 };
 
 /**
@@ -335,6 +369,7 @@
  * @length: Number of frames in queue.
  * @index: Index pointers to entry positions in the queue,
  *	use &enum queue_index to get a specific index field.
+ * @txop: maximum burst time.
  * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
  * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
  * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
@@ -354,6 +389,7 @@
 	unsigned short length;
 	unsigned short index[Q_INDEX_MAX];
 
+	unsigned short txop;
 	unsigned short aifs;
 	unsigned short cw_min;
 	unsigned short cw_max;
@@ -484,25 +520,51 @@
 }
 
 /**
- * rt2x00_desc_read - Read a word from the hardware descriptor.
+ * _rt2x00_desc_read - Read a word from the hardware descriptor.
+ * @desc: Base descriptor address
+ * @word: Word index from where the descriptor should be read.
+ * @value: Address where the descriptor value should be written into.
+ */
+static inline void _rt2x00_desc_read(__le32 *desc, const u8 word, __le32 *value)
+{
+	*value = desc[word];
+}
+
+/**
+ * rt2x00_desc_read - Read a word from the hardware descriptor, this
+ * function will take care of the byte ordering.
  * @desc: Base descriptor address
  * @word: Word index from where the descriptor should be read.
  * @value: Address where the descriptor value should be written into.
  */
 static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
 {
-	*value = le32_to_cpu(desc[word]);
+	__le32 tmp;
+	_rt2x00_desc_read(desc, word, &tmp);
+	*value = le32_to_cpu(tmp);
 }
 
 /**
- * rt2x00_desc_write - wrote a word to the hardware descriptor.
+ * rt2x00_desc_write - write a word to the hardware descriptor, this
+ * function will take care of the byte ordering.
+ * @desc: Base descriptor address
+ * @word: Word index from where the descriptor should be written.
+ * @value: Value that should be written into the descriptor.
+ */
+static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value)
+{
+	desc[word] = value;
+}
+
+/**
+ * rt2x00_desc_write - write a word to the hardware descriptor.
  * @desc: Base descriptor address
  * @word: Word index from where the descriptor should be written.
  * @value: Value that should be written into the descriptor.
  */
 static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
 {
-	desc[word] = cpu_to_le32(value);
+	_rt2x00_desc_write(desc, word, cpu_to_le32(value));
 }
 
 #endif /* RT2X00QUEUE_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 2ea7866..c2fba7c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -27,6 +27,16 @@
 #define RT2X00REG_H
 
 /*
+ * RX crypto status
+ */
+enum rx_crypto {
+	RX_CRYPTO_SUCCESS = 0,
+	RX_CRYPTO_FAIL_ICV = 1,
+	RX_CRYPTO_FAIL_MIC = 2,
+	RX_CRYPTO_FAIL_KEY = 3,
+};
+
+/*
  * Antenna values
  */
 enum antenna {
@@ -104,7 +114,14 @@
  */
 	CIPHER_CKIP64 = 5,
 	CIPHER_CKIP128 = 6,
-	CIPHER_TKIP_NO_MIC = 7,
+	CIPHER_TKIP_NO_MIC = 7, /* Don't send to device */
+
+/*
+ * Max cipher type.
+ * Note that CIPHER_NONE isn't counted, and CKIP64 and CKIP128
+ * are excluded due to limitations in mac80211.
+ */
+	CIPHER_MAX = 4,
 };
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00rfkill.c b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
index 04b2971..55eff58 100644
--- a/drivers/net/wireless/rt2x00/rt2x00rfkill.c
+++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
@@ -41,20 +41,19 @@
 	/*
 	 * Only continue if there are enabled interfaces.
 	 */
-	if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
 		return 0;
 
 	if (state == RFKILL_STATE_UNBLOCKED) {
-		INFO(rt2x00dev, "Hardware button pressed, enabling radio.\n");
-		__clear_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags);
+		INFO(rt2x00dev, "RFKILL event: enabling radio.\n");
+		clear_bit(DEVICE_STATE_DISABLED_RADIO_HW, &rt2x00dev->flags);
 		retval = rt2x00lib_enable_radio(rt2x00dev);
 	} else if (state == RFKILL_STATE_SOFT_BLOCKED) {
-		INFO(rt2x00dev, "Hardware button pressed, disabling radio.\n");
-		__set_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags);
+		INFO(rt2x00dev, "RFKILL event: disabling radio.\n");
+		set_bit(DEVICE_STATE_DISABLED_RADIO_HW, &rt2x00dev->flags);
 		rt2x00lib_disable_radio(rt2x00dev);
 	} else {
-		WARNING(rt2x00dev, "Received unexpected rfkill state %d.\n",
-			state);
+		WARNING(rt2x00dev, "RFKILL event: unknown state %d.\n", state);
 	}
 
 	return retval;
@@ -64,7 +63,12 @@
 {
 	struct rt2x00_dev *rt2x00dev = data;
 
-	*state = rt2x00dev->rfkill->state;
+	/*
+	 * rfkill_poll reports 1 when the key has been pressed and the
+	 * radio should be blocked.
+	 */
+	*state = rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ?
+	    RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED;
 
 	return 0;
 }
@@ -73,19 +77,18 @@
 {
 	struct rt2x00_dev *rt2x00dev =
 	    container_of(work, struct rt2x00_dev, rfkill_work.work);
-	int state;
+	enum rfkill_state state;
 
-	if (!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
+	if (!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state) ||
+	    !test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
 		return;
 
 	/*
-	 * rfkill_poll reports 1 when the key has been pressed and the
-	 * radio should be blocked.
+	 * Poll latest state and report it to rfkill who should sort
+	 * out if the state should be toggled or not.
 	 */
-	state = !rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ?
-	    RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
-
-	rfkill_force_state(rt2x00dev->rfkill, state);
+	if (!rt2x00rfkill_get_state(rt2x00dev, &state))
+		rfkill_force_state(rt2x00dev->rfkill, state);
 
 	queue_delayed_work(rt2x00dev->hw->workqueue,
 			   &rt2x00dev->rfkill_work, RFKILL_POLL_INTERVAL);
@@ -93,8 +96,8 @@
 
 void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
 {
-	if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) ||
-	    !test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
+	if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) ||
+	    test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
 		return;
 
 	if (rfkill_register(rt2x00dev->rfkill)) {
@@ -114,7 +117,7 @@
 
 void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
 {
-	if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) ||
+	if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) ||
 	    !test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
 		return;
 
@@ -127,21 +130,25 @@
 
 void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
 {
-	if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
+	struct device *dev = wiphy_dev(rt2x00dev->hw->wiphy);
+
+	if (test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
 		return;
 
-	rt2x00dev->rfkill =
-	    rfkill_allocate(wiphy_dev(rt2x00dev->hw->wiphy), RFKILL_TYPE_WLAN);
+	rt2x00dev->rfkill = rfkill_allocate(dev, RFKILL_TYPE_WLAN);
 	if (!rt2x00dev->rfkill) {
 		ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n");
 		return;
 	}
 
+	__set_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state);
+
 	rt2x00dev->rfkill->name = rt2x00dev->ops->name;
 	rt2x00dev->rfkill->data = rt2x00dev;
 	rt2x00dev->rfkill->state = -1;
 	rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio;
-	rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state;
+	if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
+		rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state;
 
 	INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll);
 
@@ -150,8 +157,7 @@
 
 void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
 {
-	if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) ||
-	    !test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
+	if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->flags))
 		return;
 
 	cancel_delayed_work_sync(&rt2x00dev->rfkill_work);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 2050227..b73a7e0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -163,16 +163,11 @@
 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 	struct txdone_entry_desc txdesc;
 
-	if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
 	    !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
 		return;
 
 	/*
-	 * Remove the descriptor data from the buffer.
-	 */
-	skb_pull(entry->skb, entry->queue->desc_size);
-
-	/*
 	 * Obtain the status about this packet.
 	 * Note that when the status is 0 it does not mean the
 	 * frame was send out correctly. It only means the frame
@@ -224,6 +219,12 @@
 			  entry->skb->data, length,
 			  rt2x00usb_interrupt_txdone, entry);
 
+	/*
+	 * Make sure the skb->data pointer points to the frame, not the
+	 * descriptor.
+	 */
+	skb_pull(entry->skb, entry->queue->desc_size);
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
@@ -232,7 +233,7 @@
 {
 	struct queue_entry_priv_usb *entry_priv = entry->priv_data;
 
-	if (__test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
+	if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
 		usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
 }
 
@@ -283,7 +284,7 @@
 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 	u8 rxd[32];
 
-	if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
 	    !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
 		return;
 
@@ -293,7 +294,7 @@
 	 * a problem.
 	 */
 	if (urb->actual_length < entry->queue->desc_size || urb->status) {
-		__set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
+		set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
 		usb_submit_urb(urb, GFP_ATOMIC);
 		return;
 	}
@@ -361,7 +362,7 @@
 			  entry->skb->data, entry->skb->len,
 			  rt2x00usb_interrupt_rxdone, entry);
 
-	__set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
+	set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
 	usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
 }
 EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 087e90b..a461620 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -38,6 +38,13 @@
 #include "rt61pci.h"
 
 /*
+ * Allow hardware encryption to be disabled.
+ */
+static int modparam_nohwcrypt = 0;
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+
+/*
  * Register access.
  * BBP and RF register require indirect register access,
  * and use the CSR registers PHY_CSR3 and PHY_CSR4 to achieve this.
@@ -156,7 +163,7 @@
 	rt2x00_rf_write(rt2x00dev, word, value);
 }
 
-#ifdef CONFIG_RT61PCI_LEDS
+#ifdef CONFIG_RT2X00_LIB_LEDS
 /*
  * This function is only called from rt61pci_led_brightness()
  * make gcc happy by placing this function inside the
@@ -188,7 +195,7 @@
 	rt2x00_set_field32(&reg, HOST_CMD_CSR_INTERRUPT_MCU, 1);
 	rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg);
 }
-#endif /* CONFIG_RT61PCI_LEDS */
+#endif /* CONFIG_RT2X00_LIB_LEDS */
 
 static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
 {
@@ -264,7 +271,7 @@
 };
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
 
-#ifdef CONFIG_RT61PCI_RFKILL
+#ifdef CONFIG_RT2X00_LIB_RFKILL
 static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
 {
 	u32 reg;
@@ -274,9 +281,9 @@
 }
 #else
 #define rt61pci_rfkill_poll	NULL
-#endif /* CONFIG_RT61PCI_RFKILL */
+#endif /* CONFIG_RT2X00_LIB_RFKILL */
 
-#ifdef CONFIG_RT61PCI_LEDS
+#ifdef CONFIG_RT2X00_LIB_LEDS
 static void rt61pci_brightness_set(struct led_classdev *led_cdev,
 				   enum led_brightness brightness)
 {
@@ -341,11 +348,209 @@
 	led->led_dev.blink_set = rt61pci_blink_set;
 	led->flags = LED_INITIALIZED;
 }
-#endif /* CONFIG_RT61PCI_LEDS */
+#endif /* CONFIG_RT2X00_LIB_LEDS */
 
 /*
  * Configuration handlers.
  */
+static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
+				     struct rt2x00lib_crypto *crypto,
+				     struct ieee80211_key_conf *key)
+{
+	struct hw_key_entry key_entry;
+	struct rt2x00_field32 field;
+	u32 mask;
+	u32 reg;
+
+	if (crypto->cmd == SET_KEY) {
+		/*
+		 * rt2x00lib can't determine the correct free
+		 * key_idx for shared keys. We have 1 register
+		 * with key valid bits. The goal is simple, read
+		 * the register, if that is full we have no slots
+		 * left.
+		 * Note that each BSS is allowed to have up to 4
+		 * shared keys, so put a mask over the allowed
+		 * entries.
+		 */
+		mask = (0xf << crypto->bssidx);
+
+		rt2x00pci_register_read(rt2x00dev, SEC_CSR0, &reg);
+		reg &= mask;
+
+		if (reg && reg == mask)
+			return -ENOSPC;
+
+		key->hw_key_idx += reg ? ffz(reg) : 0;
+
+		/*
+		 * Upload key to hardware
+		 */
+		memcpy(key_entry.key, crypto->key,
+		       sizeof(key_entry.key));
+		memcpy(key_entry.tx_mic, crypto->tx_mic,
+		       sizeof(key_entry.tx_mic));
+		memcpy(key_entry.rx_mic, crypto->rx_mic,
+		       sizeof(key_entry.rx_mic));
+
+		reg = SHARED_KEY_ENTRY(key->hw_key_idx);
+		rt2x00pci_register_multiwrite(rt2x00dev, reg,
+					      &key_entry, sizeof(key_entry));
+
+		/*
+		 * The cipher types are stored over 2 registers.
+		 * bssidx 0 and 1 keys are stored in SEC_CSR1 and
+		 * bssidx 1 and 2 keys are stored in SEC_CSR5.
+		 * Using the correct defines correctly will cause overhead,
+		 * so just calculate the correct offset.
+		 */
+		if (key->hw_key_idx < 8) {
+			field.bit_offset = (3 * key->hw_key_idx);
+			field.bit_mask = 0x7 << field.bit_offset;
+
+			rt2x00pci_register_read(rt2x00dev, SEC_CSR1, &reg);
+			rt2x00_set_field32(&reg, field, crypto->cipher);
+			rt2x00pci_register_write(rt2x00dev, SEC_CSR1, reg);
+		} else {
+			field.bit_offset = (3 * (key->hw_key_idx - 8));
+			field.bit_mask = 0x7 << field.bit_offset;
+
+			rt2x00pci_register_read(rt2x00dev, SEC_CSR5, &reg);
+			rt2x00_set_field32(&reg, field, crypto->cipher);
+			rt2x00pci_register_write(rt2x00dev, SEC_CSR5, reg);
+		}
+
+		/*
+		 * The driver does not support the IV/EIV generation
+		 * in hardware. However it doesn't support the IV/EIV
+		 * inside the ieee80211 frame either, but requires it
+		 * to be provided seperately for the descriptor.
+		 * rt2x00lib will cut the IV/EIV data out of all frames
+		 * given to us by mac80211, but we must tell mac80211
+		 * to generate the IV/EIV data.
+		 */
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+	}
+
+	/*
+	 * SEC_CSR0 contains only single-bit fields to indicate
+	 * a particular key is valid. Because using the FIELD32()
+	 * defines directly will cause a lot of overhead we use
+	 * a calculation to determine the correct bit directly.
+	 */
+	mask = 1 << key->hw_key_idx;
+
+	rt2x00pci_register_read(rt2x00dev, SEC_CSR0, &reg);
+	if (crypto->cmd == SET_KEY)
+		reg |= mask;
+	else if (crypto->cmd == DISABLE_KEY)
+		reg &= ~mask;
+	rt2x00pci_register_write(rt2x00dev, SEC_CSR0, reg);
+
+	return 0;
+}
+
+static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
+				       struct rt2x00lib_crypto *crypto,
+				       struct ieee80211_key_conf *key)
+{
+	struct hw_pairwise_ta_entry addr_entry;
+	struct hw_key_entry key_entry;
+	u32 mask;
+	u32 reg;
+
+	if (crypto->cmd == SET_KEY) {
+		/*
+		 * rt2x00lib can't determine the correct free
+		 * key_idx for pairwise keys. We have 2 registers
+		 * with key valid bits. The goal is simple, read
+		 * the first register, if that is full move to
+		 * the next register.
+		 * When both registers are full, we drop the key,
+		 * otherwise we use the first invalid entry.
+		 */
+		rt2x00pci_register_read(rt2x00dev, SEC_CSR2, &reg);
+		if (reg && reg == ~0) {
+			key->hw_key_idx = 32;
+			rt2x00pci_register_read(rt2x00dev, SEC_CSR3, &reg);
+			if (reg && reg == ~0)
+				return -ENOSPC;
+		}
+
+		key->hw_key_idx += reg ? ffz(reg) : 0;
+
+		/*
+		 * Upload key to hardware
+		 */
+		memcpy(key_entry.key, crypto->key,
+		       sizeof(key_entry.key));
+		memcpy(key_entry.tx_mic, crypto->tx_mic,
+		       sizeof(key_entry.tx_mic));
+		memcpy(key_entry.rx_mic, crypto->rx_mic,
+		       sizeof(key_entry.rx_mic));
+
+		memset(&addr_entry, 0, sizeof(addr_entry));
+		memcpy(&addr_entry, crypto->address, ETH_ALEN);
+		addr_entry.cipher = crypto->cipher;
+
+		reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
+		rt2x00pci_register_multiwrite(rt2x00dev, reg,
+					      &key_entry, sizeof(key_entry));
+
+		reg = PAIRWISE_TA_ENTRY(key->hw_key_idx);
+		rt2x00pci_register_multiwrite(rt2x00dev, reg,
+					      &addr_entry, sizeof(addr_entry));
+
+		/*
+		 * Enable pairwise lookup table for given BSS idx,
+		 * without this received frames will not be decrypted
+		 * by the hardware.
+		 */
+		rt2x00pci_register_read(rt2x00dev, SEC_CSR4, &reg);
+		reg |= (1 << crypto->bssidx);
+		rt2x00pci_register_write(rt2x00dev, SEC_CSR4, reg);
+
+		/*
+		 * The driver does not support the IV/EIV generation
+		 * in hardware. However it doesn't support the IV/EIV
+		 * inside the ieee80211 frame either, but requires it
+		 * to be provided seperately for the descriptor.
+		 * rt2x00lib will cut the IV/EIV data out of all frames
+		 * given to us by mac80211, but we must tell mac80211
+		 * to generate the IV/EIV data.
+		 */
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+	}
+
+	/*
+	 * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate
+	 * a particular key is valid. Because using the FIELD32()
+	 * defines directly will cause a lot of overhead we use
+	 * a calculation to determine the correct bit directly.
+	 */
+	if (key->hw_key_idx < 32) {
+		mask = 1 << key->hw_key_idx;
+
+		rt2x00pci_register_read(rt2x00dev, SEC_CSR2, &reg);
+		if (crypto->cmd == SET_KEY)
+			reg |= mask;
+		else if (crypto->cmd == DISABLE_KEY)
+			reg &= ~mask;
+		rt2x00pci_register_write(rt2x00dev, SEC_CSR2, reg);
+	} else {
+		mask = 1 << (key->hw_key_idx - 32);
+
+		rt2x00pci_register_read(rt2x00dev, SEC_CSR3, &reg);
+		if (crypto->cmd == SET_KEY)
+			reg |= mask;
+		else if (crypto->cmd == DISABLE_KEY)
+			reg &= ~mask;
+		rt2x00pci_register_write(rt2x00dev, SEC_CSR3, reg);
+	}
+
+	return 0;
+}
+
 static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
 				  const unsigned int filter_flags)
 {
@@ -440,6 +645,30 @@
 	rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg);
 }
 
+
+static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
+				    struct rt2x00lib_conf *libconf)
+{
+	u16 eeprom;
+	short lna_gain = 0;
+
+	if (libconf->band == IEEE80211_BAND_2GHZ) {
+		if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
+			lna_gain += 14;
+
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
+		lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
+	} else {
+		if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
+			lna_gain += 14;
+
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
+		lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
+	}
+
+	rt2x00dev->lna_gain = lna_gain;
+}
+
 static void rt61pci_config_phymode(struct rt2x00_dev *rt2x00dev,
 				   const int basic_rate_mask)
 {
@@ -758,6 +987,9 @@
 			   struct rt2x00lib_conf *libconf,
 			   const unsigned int flags)
 {
+	/* Always recalculate LNA gain before changing configuration */
+	rt61pci_config_lna_gain(rt2x00dev, libconf);
+
 	if (flags & CONFIG_UPDATE_PHYMODE)
 		rt61pci_config_phymode(rt2x00dev, libconf->basic_rates);
 	if (flags & CONFIG_UPDATE_CHANNEL)
@@ -1246,16 +1478,6 @@
 
 	rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff);
 
-	rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
-	rt2x00_set_field32(&reg, AC_TXOP_CSR0_AC0_TX_OP, 0);
-	rt2x00_set_field32(&reg, AC_TXOP_CSR0_AC1_TX_OP, 0);
-	rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
-
-	rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
-	rt2x00_set_field32(&reg, AC_TXOP_CSR1_AC2_TX_OP, 192);
-	rt2x00_set_field32(&reg, AC_TXOP_CSR1_AC3_TX_OP, 48);
-	rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
-
 	/*
 	 * Clear all beacons
 	 * For the Beacon base registers we only need to clear
@@ -1533,8 +1755,8 @@
  * TX descriptor initialization
  */
 static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
-				    struct sk_buff *skb,
-				    struct txentry_desc *txdesc)
+				  struct sk_buff *skb,
+				  struct txentry_desc *txdesc)
 {
 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
 	__le32 *txd = skbdesc->desc;
@@ -1548,7 +1770,7 @@
 	rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
 	rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
 	rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
-	rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
+	rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
 	rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
 			   test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
 	rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1);
@@ -1561,6 +1783,11 @@
 	rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
 	rt2x00_desc_write(txd, 2, word);
 
+	if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
+		_rt2x00_desc_write(txd, 3, skbdesc->iv);
+		_rt2x00_desc_write(txd, 4, skbdesc->eiv);
+	}
+
 	rt2x00_desc_read(txd, 5, &word);
 	rt2x00_set_field32(&word, TXD_W5_PID_TYPE, skbdesc->entry->queue->qid);
 	rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE,
@@ -1595,11 +1822,15 @@
 	rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
 	rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
 			   test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
-	rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0);
+	rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
+			   test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
+			   test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
 	rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
 	rt2x00_set_field32(&word, TXD_W0_BURST,
 			   test_bit(ENTRY_TXD_BURST, &txdesc->flags));
-	rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
+	rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
 	rt2x00_desc_write(txd, 0, word);
 }
 
@@ -1676,40 +1907,27 @@
  */
 static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
 {
-	u16 eeprom;
-	u8 offset;
+	u8 offset = rt2x00dev->lna_gain;
 	u8 lna;
 
 	lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA);
 	switch (lna) {
 	case 3:
-		offset = 90;
+		offset += 90;
 		break;
 	case 2:
-		offset = 74;
+		offset += 74;
 		break;
 	case 1:
-		offset = 64;
+		offset += 64;
 		break;
 	default:
 		return 0;
 	}
 
 	if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) {
-		if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
-			offset += 14;
-
 		if (lna == 3 || lna == 2)
 			offset += 10;
-
-		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
-		offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
-	} else {
-		if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
-			offset += 14;
-
-		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
-		offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
 	}
 
 	return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset;
@@ -1718,6 +1936,7 @@
 static void rt61pci_fill_rxdone(struct queue_entry *entry,
 			        struct rxdone_entry_desc *rxdesc)
 {
+	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 	struct queue_entry_priv_pci *entry_priv = entry->priv_data;
 	u32 word0;
 	u32 word1;
@@ -1728,6 +1947,38 @@
 	if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
 		rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
 
+	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
+		rxdesc->cipher =
+		    rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
+		rxdesc->cipher_status =
+		    rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
+	}
+
+	if (rxdesc->cipher != CIPHER_NONE) {
+		_rt2x00_desc_read(entry_priv->desc, 2, &rxdesc->iv);
+		_rt2x00_desc_read(entry_priv->desc, 3, &rxdesc->eiv);
+		_rt2x00_desc_read(entry_priv->desc, 4, &rxdesc->icv);
+
+		/*
+		 * Hardware has stripped IV/EIV data from 802.11 frame during
+		 * decryption. It has provided the data seperately but rt2x00lib
+		 * should decide if it should be reinserted.
+		 */
+		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+
+		/*
+		 * FIXME: Legacy driver indicates that the frame does
+		 * contain the Michael Mic. Unfortunately, in rt2x00
+		 * the MIC seems to be missing completely...
+		 */
+		rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
+		if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
+			rxdesc->flags |= RX_FLAG_DECRYPTED;
+		else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
+			rxdesc->flags |= RX_FLAG_MMIC_ERROR;
+	}
+
 	/*
 	 * Obtain the status about this packet.
 	 * When frame was received with an OFDM bitrate,
@@ -1735,11 +1986,13 @@
 	 * a CCK bitrate the signal is the rate in 100kbit/s.
 	 */
 	rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
-	rxdesc->rssi = rt61pci_agc_to_rssi(entry->queue->rt2x00dev, word1);
+	rxdesc->rssi = rt61pci_agc_to_rssi(rt2x00dev, word1);
 	rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
 
 	if (rt2x00_get_field32(word0, RXD_W0_OFDM))
 		rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
+	else
+		rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
 	if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
 		rxdesc->dev_flags |= RXDONE_MY_BSS;
 }
@@ -1860,7 +2113,7 @@
 	if (!reg && !reg_mcu)
 		return IRQ_NONE;
 
-	if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 		return IRQ_HANDLED;
 
 	/*
@@ -2060,10 +2313,10 @@
 	/*
 	 * Detect if this device has an hardware controlled radio.
 	 */
-#ifdef CONFIG_RT61PCI_RFKILL
+#ifdef CONFIG_RT2X00_LIB_RFKILL
 	if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
 		__set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
-#endif /* CONFIG_RT61PCI_RFKILL */
+#endif /* CONFIG_RT2X00_LIB_RFKILL */
 
 	/*
 	 * Read frequency offset and RF programming sequence.
@@ -2121,7 +2374,7 @@
 	 * If the eeprom value is invalid,
 	 * switch to default led mode.
 	 */
-#ifdef CONFIG_RT61PCI_LEDS
+#ifdef CONFIG_RT2X00_LIB_LEDS
 	rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom);
 	value = rt2x00_get_field16(eeprom, EEPROM_LED_LED_MODE);
 
@@ -2155,7 +2408,7 @@
 	rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A,
 			   rt2x00_get_field16(eeprom,
 					      EEPROM_LED_POLARITY_RDY_A));
-#endif /* CONFIG_RT61PCI_LEDS */
+#endif /* CONFIG_RT2X00_LIB_LEDS */
 
 	return 0;
 }
@@ -2274,10 +2527,11 @@
 	{ 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000c0a23 },
 };
 
-static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 {
 	struct hw_mode_spec *spec = &rt2x00dev->spec;
-	u8 *txpower;
+	struct channel_info *info;
+	char *tx_power;
 	unsigned int i;
 
 	/*
@@ -2294,20 +2548,10 @@
 						   EEPROM_MAC_ADDR_0));
 
 	/*
-	 * Convert tx_power array in eeprom.
-	 */
-	txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
-	for (i = 0; i < 14; i++)
-		txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
-
-	/*
 	 * Initialize hw_mode information.
 	 */
 	spec->supported_bands = SUPPORT_BAND_2GHZ;
 	spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
-	spec->tx_power_a = NULL;
-	spec->tx_power_bg = txpower;
-	spec->tx_power_default = DEFAULT_TXPOWER;
 
 	if (!test_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags)) {
 		spec->num_channels = 14;
@@ -2321,13 +2565,28 @@
 	    rt2x00_rf(&rt2x00dev->chip, RF5325)) {
 		spec->supported_bands |= SUPPORT_BAND_5GHZ;
 		spec->num_channels = ARRAY_SIZE(rf_vals_seq);
-
-		txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
-		for (i = 0; i < 14; i++)
-			txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
-
-		spec->tx_power_a = txpower;
 	}
+
+	/*
+	 * Create channel information array
+	 */
+	info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	spec->channels_info = info;
+
+	tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
+	for (i = 0; i < 14; i++)
+		info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+
+	if (spec->num_channels > 14) {
+		tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
+		for (i = 14; i < spec->num_channels; i++)
+			info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+	}
+
+	return 0;
 }
 
 static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -2348,13 +2607,17 @@
 	/*
 	 * Initialize hw specifications.
 	 */
-	rt61pci_probe_hw_mode(rt2x00dev);
+	retval = rt61pci_probe_hw_mode(rt2x00dev);
+	if (retval)
+		return retval;
 
 	/*
 	 * This device requires firmware and DMA mapped skbs.
 	 */
 	__set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
 	__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
+	if (!modparam_nohwcrypt)
+		__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
 
 	/*
 	 * Set the rssi offset.
@@ -2381,6 +2644,63 @@
 	return 0;
 }
 
+static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
+			   const struct ieee80211_tx_queue_params *params)
+{
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+	struct data_queue *queue;
+	struct rt2x00_field32 field;
+	int retval;
+	u32 reg;
+
+	/*
+	 * First pass the configuration through rt2x00lib, that will
+	 * update the queue settings and validate the input. After that
+	 * we are free to update the registers based on the value
+	 * in the queue parameter.
+	 */
+	retval = rt2x00mac_conf_tx(hw, queue_idx, params);
+	if (retval)
+		return retval;
+
+	queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+
+	/* Update WMM TXOP register */
+	if (queue_idx < 2) {
+		field.bit_offset = queue_idx * 16;
+		field.bit_mask = 0xffff << field.bit_offset;
+
+		rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
+		rt2x00_set_field32(&reg, field, queue->txop);
+		rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
+	} else if (queue_idx < 4) {
+		field.bit_offset = (queue_idx - 2) * 16;
+		field.bit_mask = 0xffff << field.bit_offset;
+
+		rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
+		rt2x00_set_field32(&reg, field, queue->txop);
+		rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
+	}
+
+	/* Update WMM registers */
+	field.bit_offset = queue_idx * 4;
+	field.bit_mask = 0xf << field.bit_offset;
+
+	rt2x00pci_register_read(rt2x00dev, AIFSN_CSR, &reg);
+	rt2x00_set_field32(&reg, field, queue->aifs);
+	rt2x00pci_register_write(rt2x00dev, AIFSN_CSR, reg);
+
+	rt2x00pci_register_read(rt2x00dev, CWMIN_CSR, &reg);
+	rt2x00_set_field32(&reg, field, queue->cw_min);
+	rt2x00pci_register_write(rt2x00dev, CWMIN_CSR, reg);
+
+	rt2x00pci_register_read(rt2x00dev, CWMAX_CSR, &reg);
+	rt2x00_set_field32(&reg, field, queue->cw_max);
+	rt2x00pci_register_write(rt2x00dev, CWMAX_CSR, reg);
+
+	return 0;
+}
+
 static u64 rt61pci_get_tsf(struct ieee80211_hw *hw)
 {
 	struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -2404,10 +2724,11 @@
 	.config			= rt2x00mac_config,
 	.config_interface	= rt2x00mac_config_interface,
 	.configure_filter	= rt2x00mac_configure_filter,
+	.set_key		= rt2x00mac_set_key,
 	.get_stats		= rt2x00mac_get_stats,
 	.set_retry_limit	= rt61pci_set_retry_limit,
 	.bss_info_changed	= rt2x00mac_bss_info_changed,
-	.conf_tx		= rt2x00mac_conf_tx,
+	.conf_tx		= rt61pci_conf_tx,
 	.get_tx_stats		= rt2x00mac_get_tx_stats,
 	.get_tsf		= rt61pci_get_tsf,
 };
@@ -2432,6 +2753,8 @@
 	.write_beacon		= rt61pci_write_beacon,
 	.kick_tx_queue		= rt61pci_kick_tx_queue,
 	.fill_rxdone		= rt61pci_fill_rxdone,
+	.config_shared_key	= rt61pci_config_shared_key,
+	.config_pairwise_key	= rt61pci_config_pairwise_key,
 	.config_filter		= rt61pci_config_filter,
 	.config_intf		= rt61pci_config_intf,
 	.config_erp		= rt61pci_config_erp,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 1004d5b..8ec1451 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -134,6 +134,16 @@
 #define PAIRWISE_KEY_TABLE_BASE		0x1200
 #define PAIRWISE_TA_TABLE_BASE		0x1a00
 
+#define SHARED_KEY_ENTRY(__idx) \
+	( SHARED_KEY_TABLE_BASE + \
+		((__idx) * sizeof(struct hw_key_entry)) )
+#define PAIRWISE_KEY_ENTRY(__idx) \
+	( PAIRWISE_KEY_TABLE_BASE + \
+		((__idx) * sizeof(struct hw_key_entry)) )
+#define PAIRWISE_TA_ENTRY(__idx) \
+	( PAIRWISE_TA_TABLE_BASE + \
+		((__idx) * sizeof(struct hw_pairwise_ta_entry)) )
+
 struct hw_key_entry {
 	u8 key[16];
 	u8 tx_mic[8];
@@ -142,7 +152,8 @@
 
 struct hw_pairwise_ta_entry {
 	u8 address[6];
-	u8 reserved[2];
+	u8 cipher;
+	u8 reserved;
 } __attribute__ ((packed));
 
 /*
@@ -662,6 +673,10 @@
  * SEC_CSR4: Pairwise key table lookup control.
  */
 #define SEC_CSR4			0x30b0
+#define SEC_CSR4_ENABLE_BSS0		FIELD32(0x00000001)
+#define SEC_CSR4_ENABLE_BSS1		FIELD32(0x00000002)
+#define SEC_CSR4_ENABLE_BSS2		FIELD32(0x00000004)
+#define SEC_CSR4_ENABLE_BSS3		FIELD32(0x00000008)
 
 /*
  * SEC_CSR5: shared key table security mode register.
@@ -1428,8 +1443,10 @@
 
 /*
  * Word4
+ * ICV: Received ICV of originally encrypted.
+ * NOTE: This is a guess, the official definition is "reserved"
  */
-#define RXD_W4_RESERVED			FIELD32(0xffffffff)
+#define RXD_W4_ICV			FIELD32(0xffffffff)
 
 /*
  * the above 20-byte is called RXINFO and will be DMAed to MAC RX block
@@ -1465,17 +1482,10 @@
 #define MAX_TXPOWER	31
 #define DEFAULT_TXPOWER	24
 
-#define TXPOWER_FROM_DEV(__txpower)		\
-({						\
-	((__txpower) > MAX_TXPOWER) ?		\
-		DEFAULT_TXPOWER : (__txpower);	\
-})
+#define TXPOWER_FROM_DEV(__txpower) \
+	(((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
 
-#define TXPOWER_TO_DEV(__txpower)			\
-({							\
-	((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER :	\
-	(((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER :	\
-	(__txpower));					\
-})
+#define TXPOWER_TO_DEV(__txpower) \
+	clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
 
 #endif /* RT61PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 9761eaa..934f8e03 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -37,6 +37,13 @@
 #include "rt73usb.h"
 
 /*
+ * Allow hardware encryption to be disabled.
+ */
+static int modparam_nohwcrypt = 0;
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+
+/*
  * Register access.
  * All access to the CSR registers will go through the methods
  * rt73usb_register_read and rt73usb_register_write.
@@ -285,7 +292,7 @@
 };
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
 
-#ifdef CONFIG_RT73USB_LEDS
+#ifdef CONFIG_RT2X00_LIB_LEDS
 static void rt73usb_brightness_set(struct led_classdev *led_cdev,
 				   enum led_brightness brightness)
 {
@@ -352,11 +359,224 @@
 	led->led_dev.blink_set = rt73usb_blink_set;
 	led->flags = LED_INITIALIZED;
 }
-#endif /* CONFIG_RT73USB_LEDS */
+#endif /* CONFIG_RT2X00_LIB_LEDS */
 
 /*
  * Configuration handlers.
  */
+static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
+				     struct rt2x00lib_crypto *crypto,
+				     struct ieee80211_key_conf *key)
+{
+	struct hw_key_entry key_entry;
+	struct rt2x00_field32 field;
+	int timeout;
+	u32 mask;
+	u32 reg;
+
+	if (crypto->cmd == SET_KEY) {
+		/*
+		 * rt2x00lib can't determine the correct free
+		 * key_idx for shared keys. We have 1 register
+		 * with key valid bits. The goal is simple, read
+		 * the register, if that is full we have no slots
+		 * left.
+		 * Note that each BSS is allowed to have up to 4
+		 * shared keys, so put a mask over the allowed
+		 * entries.
+		 */
+		mask = (0xf << crypto->bssidx);
+
+		rt73usb_register_read(rt2x00dev, SEC_CSR0, &reg);
+		reg &= mask;
+
+		if (reg && reg == mask)
+			return -ENOSPC;
+
+		key->hw_key_idx += reg ? ffz(reg) : 0;
+
+		/*
+		 * Upload key to hardware
+		 */
+		memcpy(key_entry.key, crypto->key,
+		       sizeof(key_entry.key));
+		memcpy(key_entry.tx_mic, crypto->tx_mic,
+		       sizeof(key_entry.tx_mic));
+		memcpy(key_entry.rx_mic, crypto->rx_mic,
+		       sizeof(key_entry.rx_mic));
+
+		reg = SHARED_KEY_ENTRY(key->hw_key_idx);
+		timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
+		rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
+						    USB_VENDOR_REQUEST_OUT, reg,
+						    &key_entry,
+						    sizeof(key_entry),
+						    timeout);
+
+		/*
+		 * The cipher types are stored over 2 registers.
+		 * bssidx 0 and 1 keys are stored in SEC_CSR1 and
+		 * bssidx 1 and 2 keys are stored in SEC_CSR5.
+		 * Using the correct defines correctly will cause overhead,
+		 * so just calculate the correct offset.
+		 */
+		if (key->hw_key_idx < 8) {
+			field.bit_offset = (3 * key->hw_key_idx);
+			field.bit_mask = 0x7 << field.bit_offset;
+
+			rt73usb_register_read(rt2x00dev, SEC_CSR1, &reg);
+			rt2x00_set_field32(&reg, field, crypto->cipher);
+			rt73usb_register_write(rt2x00dev, SEC_CSR1, reg);
+		} else {
+			field.bit_offset = (3 * (key->hw_key_idx - 8));
+			field.bit_mask = 0x7 << field.bit_offset;
+
+			rt73usb_register_read(rt2x00dev, SEC_CSR5, &reg);
+			rt2x00_set_field32(&reg, field, crypto->cipher);
+			rt73usb_register_write(rt2x00dev, SEC_CSR5, reg);
+		}
+
+		/*
+		 * The driver does not support the IV/EIV generation
+		 * in hardware. However it doesn't support the IV/EIV
+		 * inside the ieee80211 frame either, but requires it
+		 * to be provided seperately for the descriptor.
+		 * rt2x00lib will cut the IV/EIV data out of all frames
+		 * given to us by mac80211, but we must tell mac80211
+		 * to generate the IV/EIV data.
+		 */
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+	}
+
+	/*
+	 * SEC_CSR0 contains only single-bit fields to indicate
+	 * a particular key is valid. Because using the FIELD32()
+	 * defines directly will cause a lot of overhead we use
+	 * a calculation to determine the correct bit directly.
+	 */
+	mask = 1 << key->hw_key_idx;
+
+	rt73usb_register_read(rt2x00dev, SEC_CSR0, &reg);
+	if (crypto->cmd == SET_KEY)
+		reg |= mask;
+	else if (crypto->cmd == DISABLE_KEY)
+		reg &= ~mask;
+	rt73usb_register_write(rt2x00dev, SEC_CSR0, reg);
+
+	return 0;
+}
+
+static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
+				       struct rt2x00lib_crypto *crypto,
+				       struct ieee80211_key_conf *key)
+{
+	struct hw_pairwise_ta_entry addr_entry;
+	struct hw_key_entry key_entry;
+	int timeout;
+	u32 mask;
+	u32 reg;
+
+	if (crypto->cmd == SET_KEY) {
+		/*
+		 * rt2x00lib can't determine the correct free
+		 * key_idx for pairwise keys. We have 2 registers
+		 * with key valid bits. The goal is simple, read
+		 * the first register, if that is full move to
+		 * the next register.
+		 * When both registers are full, we drop the key,
+		 * otherwise we use the first invalid entry.
+		 */
+		rt73usb_register_read(rt2x00dev, SEC_CSR2, &reg);
+		if (reg && reg == ~0) {
+			key->hw_key_idx = 32;
+			rt73usb_register_read(rt2x00dev, SEC_CSR3, &reg);
+			if (reg && reg == ~0)
+				return -ENOSPC;
+		}
+
+		key->hw_key_idx += reg ? ffz(reg) : 0;
+
+		/*
+		 * Upload key to hardware
+		 */
+		memcpy(key_entry.key, crypto->key,
+		       sizeof(key_entry.key));
+		memcpy(key_entry.tx_mic, crypto->tx_mic,
+		       sizeof(key_entry.tx_mic));
+		memcpy(key_entry.rx_mic, crypto->rx_mic,
+		       sizeof(key_entry.rx_mic));
+
+		reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
+		timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
+		rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
+						    USB_VENDOR_REQUEST_OUT, reg,
+						    &key_entry,
+						    sizeof(key_entry),
+						    timeout);
+
+		/*
+		 * Send the address and cipher type to the hardware register.
+		 * This data fits within the CSR cache size, so we can use
+		 * rt73usb_register_multiwrite() directly.
+		 */
+		memset(&addr_entry, 0, sizeof(addr_entry));
+		memcpy(&addr_entry, crypto->address, ETH_ALEN);
+		addr_entry.cipher = crypto->cipher;
+
+		reg = PAIRWISE_TA_ENTRY(key->hw_key_idx);
+		rt73usb_register_multiwrite(rt2x00dev, reg,
+					    &addr_entry, sizeof(addr_entry));
+
+		/*
+		 * Enable pairwise lookup table for given BSS idx,
+		 * without this received frames will not be decrypted
+		 * by the hardware.
+		 */
+		rt73usb_register_read(rt2x00dev, SEC_CSR4, &reg);
+		reg |= (1 << crypto->bssidx);
+		rt73usb_register_write(rt2x00dev, SEC_CSR4, reg);
+
+		/*
+		 * The driver does not support the IV/EIV generation
+		 * in hardware. However it doesn't support the IV/EIV
+		 * inside the ieee80211 frame either, but requires it
+		 * to be provided seperately for the descriptor.
+		 * rt2x00lib will cut the IV/EIV data out of all frames
+		 * given to us by mac80211, but we must tell mac80211
+		 * to generate the IV/EIV data.
+		 */
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+	}
+
+	/*
+	 * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate
+	 * a particular key is valid. Because using the FIELD32()
+	 * defines directly will cause a lot of overhead we use
+	 * a calculation to determine the correct bit directly.
+	 */
+	if (key->hw_key_idx < 32) {
+		mask = 1 << key->hw_key_idx;
+
+		rt73usb_register_read(rt2x00dev, SEC_CSR2, &reg);
+		if (crypto->cmd == SET_KEY)
+			reg |= mask;
+		else if (crypto->cmd == DISABLE_KEY)
+			reg &= ~mask;
+		rt73usb_register_write(rt2x00dev, SEC_CSR2, reg);
+	} else {
+		mask = 1 << (key->hw_key_idx - 32);
+
+		rt73usb_register_read(rt2x00dev, SEC_CSR3, &reg);
+		if (crypto->cmd == SET_KEY)
+			reg |= mask;
+		else if (crypto->cmd == DISABLE_KEY)
+			reg &= ~mask;
+		rt73usb_register_write(rt2x00dev, SEC_CSR3, reg);
+	}
+
+	return 0;
+}
+
 static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
 				  const unsigned int filter_flags)
 {
@@ -451,6 +671,26 @@
 	rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg);
 }
 
+static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
+				    struct rt2x00lib_conf *libconf)
+{
+	u16 eeprom;
+	short lna_gain = 0;
+
+	if (libconf->band == IEEE80211_BAND_2GHZ) {
+		if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
+			lna_gain += 14;
+
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
+		lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
+	} else {
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
+		lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
+	}
+
+	rt2x00dev->lna_gain = lna_gain;
+}
+
 static void rt73usb_config_phymode(struct rt2x00_dev *rt2x00dev,
 				   const int basic_rate_mask)
 {
@@ -705,6 +945,9 @@
 			   struct rt2x00lib_conf *libconf,
 			   const unsigned int flags)
 {
+	/* Always recalculate LNA gain before changing configuration */
+	rt73usb_config_lna_gain(rt2x00dev, libconf);
+
 	if (flags & CONFIG_UPDATE_PHYMODE)
 		rt73usb_config_phymode(rt2x00dev, libconf->basic_rates);
 	if (flags & CONFIG_UPDATE_CHANNEL)
@@ -1034,16 +1277,6 @@
 	rt73usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606);
 	rt73usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408);
 
-	rt73usb_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
-	rt2x00_set_field32(&reg, AC_TXOP_CSR0_AC0_TX_OP, 0);
-	rt2x00_set_field32(&reg, AC_TXOP_CSR0_AC1_TX_OP, 0);
-	rt73usb_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
-
-	rt73usb_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
-	rt2x00_set_field32(&reg, AC_TXOP_CSR1_AC2_TX_OP, 192);
-	rt2x00_set_field32(&reg, AC_TXOP_CSR1_AC3_TX_OP, 48);
-	rt73usb_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
-
 	rt73usb_register_read(rt2x00dev, MAC_CSR9, &reg);
 	rt2x00_set_field32(&reg, MAC_CSR9_CW_SELECT, 0);
 	rt73usb_register_write(rt2x00dev, MAC_CSR9, reg);
@@ -1265,8 +1498,8 @@
  * TX descriptor initialization
  */
 static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
-				    struct sk_buff *skb,
-				    struct txentry_desc *txdesc)
+				  struct sk_buff *skb,
+				  struct txentry_desc *txdesc)
 {
 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
 	__le32 *txd = skbdesc->desc;
@@ -1280,7 +1513,7 @@
 	rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
 	rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
 	rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
-	rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
+	rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
 	rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
 			   test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
 	rt2x00_desc_write(txd, 1, word);
@@ -1292,6 +1525,11 @@
 	rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
 	rt2x00_desc_write(txd, 2, word);
 
+	if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
+		_rt2x00_desc_write(txd, 3, skbdesc->iv);
+		_rt2x00_desc_write(txd, 4, skbdesc->eiv);
+	}
+
 	rt2x00_desc_read(txd, 5, &word);
 	rt2x00_set_field32(&word, TXD_W5_TX_POWER,
 			   TXPOWER_TO_DEV(rt2x00dev->tx_power));
@@ -1313,12 +1551,15 @@
 	rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
 	rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
 			   test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
-	rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0);
-	rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT,
-			   skb->len - skbdesc->desc_len);
+	rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
+			   test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
+			   test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
+	rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
 	rt2x00_set_field32(&word, TXD_W0_BURST2,
 			   test_bit(ENTRY_TXD_BURST, &txdesc->flags));
-	rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
+	rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
 	rt2x00_desc_write(txd, 0, word);
 }
 
@@ -1331,7 +1572,6 @@
 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 	unsigned int beacon_base;
 	u32 reg;
-	u32 word, len;
 
 	/*
 	 * Add the descriptor in front of the skb.
@@ -1341,17 +1581,6 @@
 	skbdesc->desc = entry->skb->data;
 
 	/*
-	 * Adjust the beacon databyte count. The current number is
-	 * calculated before this function gets called, but falsely
-	 * assumes that the descriptor was already present in the SKB.
-	 */
-	rt2x00_desc_read(skbdesc->desc, 0, &word);
-	len  = rt2x00_get_field32(word, TXD_W0_DATABYTE_COUNT);
-	len += skbdesc->desc_len;
-	rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, len);
-	rt2x00_desc_write(skbdesc->desc, 0, word);
-
-	/*
 	 * Disable beaconing while we are reloading the beacon data,
 	 * otherwise we might be sending out invalid data.
 	 */
@@ -1422,20 +1651,19 @@
  */
 static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
 {
-	u16 eeprom;
-	u8 offset;
+	u8 offset = rt2x00dev->lna_gain;
 	u8 lna;
 
 	lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA);
 	switch (lna) {
 	case 3:
-		offset = 90;
+		offset += 90;
 		break;
 	case 2:
-		offset = 74;
+		offset += 74;
 		break;
 	case 1:
-		offset = 64;
+		offset += 64;
 		break;
 	default:
 		return 0;
@@ -1451,15 +1679,6 @@
 			else if (lna == 2)
 				offset += 8;
 		}
-
-		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
-		offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
-	} else {
-		if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
-			offset += 14;
-
-		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
-		offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
 	}
 
 	return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset;
@@ -1468,6 +1687,7 @@
 static void rt73usb_fill_rxdone(struct queue_entry *entry,
 			        struct rxdone_entry_desc *rxdesc)
 {
+	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 	__le32 *rxd = (__le32 *)entry->skb->data;
 	u32 word0;
@@ -1489,6 +1709,38 @@
 	if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
 		rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
 
+	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
+		rxdesc->cipher =
+		    rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
+		rxdesc->cipher_status =
+		    rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
+	}
+
+	if (rxdesc->cipher != CIPHER_NONE) {
+		_rt2x00_desc_read(rxd, 2, &rxdesc->iv);
+		_rt2x00_desc_read(rxd, 3, &rxdesc->eiv);
+		_rt2x00_desc_read(rxd, 4, &rxdesc->icv);
+
+		/*
+		 * Hardware has stripped IV/EIV data from 802.11 frame during
+		 * decryption. It has provided the data seperately but rt2x00lib
+		 * should decide if it should be reinserted.
+		 */
+		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+
+		/*
+		 * FIXME: Legacy driver indicates that the frame does
+		 * contain the Michael Mic. Unfortunately, in rt2x00
+		 * the MIC seems to be missing completely...
+		 */
+		rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
+		if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
+			rxdesc->flags |= RX_FLAG_DECRYPTED;
+		else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
+			rxdesc->flags |= RX_FLAG_MMIC_ERROR;
+	}
+
 	/*
 	 * Obtain the status about this packet.
 	 * When frame was received with an OFDM bitrate,
@@ -1496,11 +1748,13 @@
 	 * a CCK bitrate the signal is the rate in 100kbit/s.
 	 */
 	rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
-	rxdesc->rssi = rt73usb_agc_to_rssi(entry->queue->rt2x00dev, word1);
+	rxdesc->rssi = rt73usb_agc_to_rssi(rt2x00dev, word1);
 	rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
 
 	if (rt2x00_get_field32(word0, RXD_W0_OFDM))
 		rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
+	else
+		rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
 	if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
 		rxdesc->dev_flags |= RXDONE_MY_BSS;
 
@@ -1678,7 +1932,7 @@
 	/*
 	 * Store led settings, for correct led behaviour.
 	 */
-#ifdef CONFIG_RT73USB_LEDS
+#ifdef CONFIG_RT2X00_LIB_LEDS
 	rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom);
 
 	rt73usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
@@ -1711,7 +1965,7 @@
 	rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A,
 			   rt2x00_get_field16(eeprom,
 					      EEPROM_LED_POLARITY_RDY_A));
-#endif /* CONFIG_RT73USB_LEDS */
+#endif /* CONFIG_RT2X00_LIB_LEDS */
 
 	return 0;
 }
@@ -1852,10 +2106,11 @@
 };
 
 
-static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 {
 	struct hw_mode_spec *spec = &rt2x00dev->spec;
-	u8 *txpower;
+	struct channel_info *info;
+	char *tx_power;
 	unsigned int i;
 
 	/*
@@ -1872,20 +2127,10 @@
 						   EEPROM_MAC_ADDR_0));
 
 	/*
-	 * Convert tx_power array in eeprom.
-	 */
-	txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
-	for (i = 0; i < 14; i++)
-		txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
-
-	/*
 	 * Initialize hw_mode information.
 	 */
 	spec->supported_bands = SUPPORT_BAND_2GHZ;
 	spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
-	spec->tx_power_a = NULL;
-	spec->tx_power_bg = txpower;
-	spec->tx_power_default = DEFAULT_TXPOWER;
 
 	if (rt2x00_rf(&rt2x00dev->chip, RF2528)) {
 		spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
@@ -1903,14 +2148,26 @@
 		spec->channels = rf_vals_5225_2527;
 	}
 
-	if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
-	    rt2x00_rf(&rt2x00dev->chip, RF5226)) {
-		txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
-		for (i = 0; i < 14; i++)
-			txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
+	/*
+	 * Create channel information array
+	 */
+	info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
 
-		spec->tx_power_a = txpower;
+	spec->channels_info = info;
+
+	tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
+	for (i = 0; i < 14; i++)
+		info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+
+	if (spec->num_channels > 14) {
+		tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
+		for (i = 14; i < spec->num_channels; i++)
+			info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
 	}
+
+	return 0;
 }
 
 static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1931,13 +2188,17 @@
 	/*
 	 * Initialize hw specifications.
 	 */
-	rt73usb_probe_hw_mode(rt2x00dev);
+	retval = rt73usb_probe_hw_mode(rt2x00dev);
+	if (retval)
+		return retval;
 
 	/*
 	 * This device requires firmware.
 	 */
 	__set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
 	__set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags);
+	if (!modparam_nohwcrypt)
+		__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
 
 	/*
 	 * Set the rssi offset.
@@ -1964,6 +2225,63 @@
 	return 0;
 }
 
+static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
+			   const struct ieee80211_tx_queue_params *params)
+{
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+	struct data_queue *queue;
+	struct rt2x00_field32 field;
+	int retval;
+	u32 reg;
+
+	/*
+	 * First pass the configuration through rt2x00lib, that will
+	 * update the queue settings and validate the input. After that
+	 * we are free to update the registers based on the value
+	 * in the queue parameter.
+	 */
+	retval = rt2x00mac_conf_tx(hw, queue_idx, params);
+	if (retval)
+		return retval;
+
+	queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+
+	/* Update WMM TXOP register */
+	if (queue_idx < 2) {
+		field.bit_offset = queue_idx * 16;
+		field.bit_mask = 0xffff << field.bit_offset;
+
+		rt73usb_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
+		rt2x00_set_field32(&reg, field, queue->txop);
+		rt73usb_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
+	} else if (queue_idx < 4) {
+		field.bit_offset = (queue_idx - 2) * 16;
+		field.bit_mask = 0xffff << field.bit_offset;
+
+		rt73usb_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
+		rt2x00_set_field32(&reg, field, queue->txop);
+		rt73usb_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
+	}
+
+	/* Update WMM registers */
+	field.bit_offset = queue_idx * 4;
+	field.bit_mask = 0xf << field.bit_offset;
+
+	rt73usb_register_read(rt2x00dev, AIFSN_CSR, &reg);
+	rt2x00_set_field32(&reg, field, queue->aifs);
+	rt73usb_register_write(rt2x00dev, AIFSN_CSR, reg);
+
+	rt73usb_register_read(rt2x00dev, CWMIN_CSR, &reg);
+	rt2x00_set_field32(&reg, field, queue->cw_min);
+	rt73usb_register_write(rt2x00dev, CWMIN_CSR, reg);
+
+	rt73usb_register_read(rt2x00dev, CWMAX_CSR, &reg);
+	rt2x00_set_field32(&reg, field, queue->cw_max);
+	rt73usb_register_write(rt2x00dev, CWMAX_CSR, reg);
+
+	return 0;
+}
+
 #if 0
 /*
  * Mac80211 demands get_tsf must be atomic.
@@ -1997,10 +2315,11 @@
 	.config			= rt2x00mac_config,
 	.config_interface	= rt2x00mac_config_interface,
 	.configure_filter	= rt2x00mac_configure_filter,
+	.set_key		= rt2x00mac_set_key,
 	.get_stats		= rt2x00mac_get_stats,
 	.set_retry_limit	= rt73usb_set_retry_limit,
 	.bss_info_changed	= rt2x00mac_bss_info_changed,
-	.conf_tx		= rt2x00mac_conf_tx,
+	.conf_tx		= rt73usb_conf_tx,
 	.get_tx_stats		= rt2x00mac_get_tx_stats,
 	.get_tsf		= rt73usb_get_tsf,
 };
@@ -2024,6 +2343,8 @@
 	.get_tx_data_len	= rt73usb_get_tx_data_len,
 	.kick_tx_queue		= rt73usb_kick_tx_queue,
 	.fill_rxdone		= rt73usb_fill_rxdone,
+	.config_shared_key	= rt73usb_config_shared_key,
+	.config_pairwise_key	= rt73usb_config_pairwise_key,
 	.config_filter		= rt73usb_config_filter,
 	.config_intf		= rt73usb_config_intf,
 	.config_erp		= rt73usb_config_erp,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 1484935..868386c 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -92,6 +92,16 @@
 #define PAIRWISE_KEY_TABLE_BASE		0x1200
 #define PAIRWISE_TA_TABLE_BASE		0x1a00
 
+#define SHARED_KEY_ENTRY(__idx) \
+	( SHARED_KEY_TABLE_BASE + \
+		((__idx) * sizeof(struct hw_key_entry)) )
+#define PAIRWISE_KEY_ENTRY(__idx) \
+	( PAIRWISE_KEY_TABLE_BASE + \
+		((__idx) * sizeof(struct hw_key_entry)) )
+#define PAIRWISE_TA_ENTRY(__idx) \
+	( PAIRWISE_TA_TABLE_BASE + \
+		((__idx) * sizeof(struct hw_pairwise_ta_entry)) )
+
 struct hw_key_entry {
 	u8 key[16];
 	u8 tx_mic[8];
@@ -100,7 +110,8 @@
 
 struct hw_pairwise_ta_entry {
 	u8 address[6];
-	u8 reserved[2];
+	u8 cipher;
+	u8 reserved;
 } __attribute__ ((packed));
 
 /*
@@ -563,6 +574,10 @@
  * SEC_CSR4: Pairwise key table lookup control.
  */
 #define SEC_CSR4			0x30b0
+#define SEC_CSR4_ENABLE_BSS0		FIELD32(0x00000001)
+#define SEC_CSR4_ENABLE_BSS1		FIELD32(0x00000002)
+#define SEC_CSR4_ENABLE_BSS2		FIELD32(0x00000004)
+#define SEC_CSR4_ENABLE_BSS3		FIELD32(0x00000008)
 
 /*
  * SEC_CSR5: shared key table security mode register.
@@ -1010,8 +1025,10 @@
 
 /*
  * Word4
+ * ICV: Received ICV of originally encrypted.
+ * NOTE: This is a guess, the official definition is "reserved"
  */
-#define RXD_W4_RESERVED			FIELD32(0xffffffff)
+#define RXD_W4_ICV			FIELD32(0xffffffff)
 
 /*
  * the above 20-byte is called RXINFO and will be DMAed to MAC RX block
@@ -1033,17 +1050,10 @@
 #define MAX_TXPOWER	31
 #define DEFAULT_TXPOWER	24
 
-#define TXPOWER_FROM_DEV(__txpower)		\
-({						\
-	((__txpower) > MAX_TXPOWER) ?		\
-		DEFAULT_TXPOWER : (__txpower);	\
-})
+#define TXPOWER_FROM_DEV(__txpower) \
+	(((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
 
-#define TXPOWER_TO_DEV(__txpower)			\
-({							\
-	((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER :	\
-	(((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER :	\
-	(__txpower));					\
-})
+#define TXPOWER_TO_DEV(__txpower) \
+	clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
 
 #endif /* RT73USB_H */
diff --git a/drivers/net/wireless/rtl8180.h b/drivers/net/wireless/rtl8180.h
index 082a11f..8721282 100644
--- a/drivers/net/wireless/rtl8180.h
+++ b/drivers/net/wireless/rtl8180.h
@@ -24,20 +24,6 @@
 #define ANAPARAM_PWR1_SHIFT	20
 #define ANAPARAM_PWR1_MASK	(0x7F << ANAPARAM_PWR1_SHIFT)
 
-enum rtl8180_tx_desc_flags {
-	RTL8180_TX_DESC_FLAG_NO_ENC	= (1 << 15),
-	RTL8180_TX_DESC_FLAG_TX_OK	= (1 << 15),
-	RTL8180_TX_DESC_FLAG_SPLCP	= (1 << 16),
-	RTL8180_TX_DESC_FLAG_RX_UNDER	= (1 << 16),
-	RTL8180_TX_DESC_FLAG_MOREFRAG	= (1 << 17),
-	RTL8180_TX_DESC_FLAG_CTS	= (1 << 18),
-	RTL8180_TX_DESC_FLAG_RTS	= (1 << 23),
-	RTL8180_TX_DESC_FLAG_LS		= (1 << 28),
-	RTL8180_TX_DESC_FLAG_FS		= (1 << 29),
-	RTL8180_TX_DESC_FLAG_DMA	= (1 << 30),
-	RTL8180_TX_DESC_FLAG_OWN	= (1 << 31)
-};
-
 struct rtl8180_tx_desc {
 	__le32 flags;
 	__le16 rts_duration;
@@ -52,23 +38,6 @@
 	u32 reserved[2];
 } __attribute__ ((packed));
 
-enum rtl8180_rx_desc_flags {
-	RTL8180_RX_DESC_FLAG_ICV_ERR	= (1 << 12),
-	RTL8180_RX_DESC_FLAG_CRC32_ERR	= (1 << 13),
-	RTL8180_RX_DESC_FLAG_PM		= (1 << 14),
-	RTL8180_RX_DESC_FLAG_RX_ERR	= (1 << 15),
-	RTL8180_RX_DESC_FLAG_BCAST	= (1 << 16),
-	RTL8180_RX_DESC_FLAG_PAM	= (1 << 17),
-	RTL8180_RX_DESC_FLAG_MCAST	= (1 << 18),
-	RTL8180_RX_DESC_FLAG_SPLCP	= (1 << 25),
-	RTL8180_RX_DESC_FLAG_FOF	= (1 << 26),
-	RTL8180_RX_DESC_FLAG_DMA_FAIL	= (1 << 27),
-	RTL8180_RX_DESC_FLAG_LS		= (1 << 28),
-	RTL8180_RX_DESC_FLAG_FS		= (1 << 29),
-	RTL8180_RX_DESC_FLAG_EOR	= (1 << 30),
-	RTL8180_RX_DESC_FLAG_OWN	= (1 << 31)
-};
-
 struct rtl8180_rx_desc {
 	__le32 flags;
 	__le32 flags2;
diff --git a/drivers/net/wireless/rtl8180_dev.c b/drivers/net/wireless/rtl8180_dev.c
index b7172a1..abcd641 100644
--- a/drivers/net/wireless/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl8180_dev.c
@@ -110,12 +110,12 @@
 		struct sk_buff *skb = priv->rx_buf[priv->rx_idx];
 		u32 flags = le32_to_cpu(entry->flags);
 
-		if (flags & RTL8180_RX_DESC_FLAG_OWN)
+		if (flags & RTL818X_RX_DESC_FLAG_OWN)
 			return;
 
-		if (unlikely(flags & (RTL8180_RX_DESC_FLAG_DMA_FAIL |
-				      RTL8180_RX_DESC_FLAG_FOF |
-				      RTL8180_RX_DESC_FLAG_RX_ERR)))
+		if (unlikely(flags & (RTL818X_RX_DESC_FLAG_DMA_FAIL |
+				      RTL818X_RX_DESC_FLAG_FOF |
+				      RTL818X_RX_DESC_FLAG_RX_ERR)))
 			goto done;
 		else {
 			u32 flags2 = le32_to_cpu(entry->flags2);
@@ -140,7 +140,7 @@
 			rx_status.band = dev->conf.channel->band;
 			rx_status.mactime = le64_to_cpu(entry->tsft);
 			rx_status.flag |= RX_FLAG_TSFT;
-			if (flags & RTL8180_RX_DESC_FLAG_CRC32_ERR)
+			if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
 				rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
 
 			ieee80211_rx_irqsafe(dev, skb, &rx_status);
@@ -154,10 +154,10 @@
 
 	done:
 		entry->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
-		entry->flags = cpu_to_le32(RTL8180_RX_DESC_FLAG_OWN |
+		entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
 					   MAX_RX_SIZE);
 		if (priv->rx_idx == 31)
-			entry->flags |= cpu_to_le32(RTL8180_RX_DESC_FLAG_EOR);
+			entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
 		priv->rx_idx = (priv->rx_idx + 1) % 32;
 	}
 }
@@ -173,7 +173,7 @@
 		struct ieee80211_tx_info *info;
 		u32 flags = le32_to_cpu(entry->flags);
 
-		if (flags & RTL8180_TX_DESC_FLAG_OWN)
+		if (flags & RTL818X_TX_DESC_FLAG_OWN)
 			return;
 
 		ring->idx = (ring->idx + 1) % ring->entries;
@@ -185,7 +185,7 @@
 		memset(&info->status, 0, sizeof(info->status));
 
 		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
-			if (flags & RTL8180_TX_DESC_FLAG_TX_OK)
+			if (flags & RTL818X_TX_DESC_FLAG_TX_OK)
 				info->flags |= IEEE80211_TX_STAT_ACK;
 			else
 				info->status.excessive_retries = 1;
@@ -252,20 +252,20 @@
 	mapping = pci_map_single(priv->pdev, skb->data,
 				 skb->len, PCI_DMA_TODEVICE);
 
-	tx_flags = RTL8180_TX_DESC_FLAG_OWN | RTL8180_TX_DESC_FLAG_FS |
-		   RTL8180_TX_DESC_FLAG_LS |
+	tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
+		   RTL818X_TX_DESC_FLAG_LS |
 		   (ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
 		   skb->len;
 
 	if (priv->r8185)
-		tx_flags |= RTL8180_TX_DESC_FLAG_DMA |
-			    RTL8180_TX_DESC_FLAG_NO_ENC;
+		tx_flags |= RTL818X_TX_DESC_FLAG_DMA |
+			    RTL818X_TX_DESC_FLAG_NO_ENC;
 
 	if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
-		tx_flags |= RTL8180_TX_DESC_FLAG_RTS;
+		tx_flags |= RTL818X_TX_DESC_FLAG_RTS;
 		tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
 	} else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
-		tx_flags |= RTL8180_TX_DESC_FLAG_CTS;
+		tx_flags |= RTL818X_TX_DESC_FLAG_CTS;
 		tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
 	}
 
@@ -446,10 +446,10 @@
 		*mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
 					  MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
 		entry->rx_buf = cpu_to_le32(*mapping);
-		entry->flags = cpu_to_le32(RTL8180_RX_DESC_FLAG_OWN |
+		entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
 					   MAX_RX_SIZE);
 	}
-	entry->flags |= cpu_to_le32(RTL8180_RX_DESC_FLAG_EOR);
+	entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
 	return 0;
 }
 
@@ -615,7 +615,7 @@
 	reg |= RTL818X_CMD_TX_ENABLE;
 	rtl818x_iowrite8(priv, &priv->map->CMD, reg);
 
-	priv->mode = IEEE80211_IF_TYPE_MNTR;
+	priv->mode = NL80211_IFTYPE_MONITOR;
 	return 0;
 
  err_free_rings:
@@ -633,7 +633,7 @@
 	u8 reg;
 	int i;
 
-	priv->mode = IEEE80211_IF_TYPE_INVALID;
+	priv->mode = NL80211_IFTYPE_UNSPECIFIED;
 
 	rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
 
@@ -661,11 +661,11 @@
 {
 	struct rtl8180_priv *priv = dev->priv;
 
-	if (priv->mode != IEEE80211_IF_TYPE_MNTR)
+	if (priv->mode != NL80211_IFTYPE_MONITOR)
 		return -EOPNOTSUPP;
 
 	switch (conf->type) {
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		priv->mode = conf->type;
 		break;
 	default:
@@ -688,7 +688,7 @@
 				     struct ieee80211_if_init_conf *conf)
 {
 	struct rtl8180_priv *priv = dev->priv;
-	priv->mode = IEEE80211_IF_TYPE_MNTR;
+	priv->mode = NL80211_IFTYPE_MONITOR;
 	priv->vif = NULL;
 }
 
diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
index 5a9515c..e82bb4d 100644
--- a/drivers/net/wireless/rtl8187.h
+++ b/drivers/net/wireless/rtl8187.h
@@ -58,12 +58,6 @@
 
 /* {rtl8187,rtl8187b}_tx_info is in skb */
 
-/* Tx flags are common between rtl8187 and rtl8187b */
-#define RTL8187_TX_FLAG_NO_ENCRYPT	(1 << 15)
-#define RTL8187_TX_FLAG_MORE_FRAG	(1 << 17)
-#define RTL8187_TX_FLAG_CTS		(1 << 18)
-#define RTL8187_TX_FLAG_RTS		(1 << 23)
-
 struct rtl8187_tx_hdr {
 	__le32 flags;
 	__le16 rts_duration;
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index ca5deb6..e990261 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -187,18 +187,18 @@
 	}
 
 	flags = skb->len;
-	flags |= RTL8187_TX_FLAG_NO_ENCRYPT;
+	flags |= RTL818X_TX_DESC_FLAG_NO_ENC;
 
 	flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
 	if (ieee80211_has_morefrags(((struct ieee80211_hdr *)skb->data)->frame_control))
-		flags |= RTL8187_TX_FLAG_MORE_FRAG;
+		flags |= RTL818X_TX_DESC_FLAG_MOREFRAG;
 	if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
-		flags |= RTL8187_TX_FLAG_RTS;
+		flags |= RTL818X_TX_DESC_FLAG_RTS;
 		flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
 		rts_dur = ieee80211_rts_duration(dev, priv->vif,
 						 skb->len, info);
 	} else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
-		flags |= RTL8187_TX_FLAG_CTS;
+		flags |= RTL818X_TX_DESC_FLAG_CTS;
 		flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
 	}
 
@@ -354,7 +354,7 @@
 	rx_status.freq = dev->conf.channel->center_freq;
 	rx_status.band = dev->conf.channel->band;
 	rx_status.flag |= RX_FLAG_TSFT;
-	if (flags & (1 << 13))
+	if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
 		rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
 	ieee80211_rx_irqsafe(dev, skb, &rx_status);
 
@@ -836,11 +836,11 @@
 	struct rtl8187_priv *priv = dev->priv;
 	int i;
 
-	if (priv->mode != IEEE80211_IF_TYPE_MNTR)
+	if (priv->mode != NL80211_IFTYPE_MONITOR)
 		return -EOPNOTSUPP;
 
 	switch (conf->type) {
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		priv->mode = conf->type;
 		break;
 	default:
@@ -865,7 +865,7 @@
 {
 	struct rtl8187_priv *priv = dev->priv;
 	mutex_lock(&priv->conf_mutex);
-	priv->mode = IEEE80211_IF_TYPE_MNTR;
+	priv->mode = NL80211_IFTYPE_MONITOR;
 	priv->vif = NULL;
 	mutex_unlock(&priv->conf_mutex);
 }
@@ -1057,7 +1057,7 @@
 	dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
 
 
-	priv->mode = IEEE80211_IF_TYPE_MNTR;
+	priv->mode = NL80211_IFTYPE_MONITOR;
 	dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
 		     IEEE80211_HW_RX_INCLUDES_FCS;
 
@@ -1184,6 +1184,8 @@
 		dev->max_signal = 65;
 	}
 
+	dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
 	if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b)
 		printk(KERN_INFO "rtl8187: inconsistency between id with OEM"
 		       " info!\n");
diff --git a/drivers/net/wireless/rtl818x.h b/drivers/net/wireless/rtl818x.h
index 00900fe..3538b15 100644
--- a/drivers/net/wireless/rtl818x.h
+++ b/drivers/net/wireless/rtl818x.h
@@ -193,4 +193,39 @@
 	void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *);
 };
 
+/* Tx/Rx flags are common between RTL818X chips */
+
+enum rtl818x_tx_desc_flags {
+	RTL818X_TX_DESC_FLAG_NO_ENC	= (1 << 15),
+	RTL818X_TX_DESC_FLAG_TX_OK	= (1 << 15),
+	RTL818X_TX_DESC_FLAG_SPLCP	= (1 << 16),
+	RTL818X_TX_DESC_FLAG_RX_UNDER	= (1 << 16),
+	RTL818X_TX_DESC_FLAG_MOREFRAG	= (1 << 17),
+	RTL818X_TX_DESC_FLAG_CTS	= (1 << 18),
+	RTL818X_TX_DESC_FLAG_RTS	= (1 << 23),
+	RTL818X_TX_DESC_FLAG_LS		= (1 << 28),
+	RTL818X_TX_DESC_FLAG_FS		= (1 << 29),
+	RTL818X_TX_DESC_FLAG_DMA	= (1 << 30),
+	RTL818X_TX_DESC_FLAG_OWN	= (1 << 31)
+};
+
+enum rtl818x_rx_desc_flags {
+	RTL818X_RX_DESC_FLAG_ICV_ERR	= (1 << 12),
+	RTL818X_RX_DESC_FLAG_CRC32_ERR	= (1 << 13),
+	RTL818X_RX_DESC_FLAG_PM		= (1 << 14),
+	RTL818X_RX_DESC_FLAG_RX_ERR	= (1 << 15),
+	RTL818X_RX_DESC_FLAG_BCAST	= (1 << 16),
+	RTL818X_RX_DESC_FLAG_PAM	= (1 << 17),
+	RTL818X_RX_DESC_FLAG_MCAST	= (1 << 18),
+	RTL818X_RX_DESC_FLAG_QOS	= (1 << 19), /* RTL8187(B) only */
+	RTL818X_RX_DESC_FLAG_TRSW	= (1 << 24), /* RTL8187(B) only */
+	RTL818X_RX_DESC_FLAG_SPLCP	= (1 << 25),
+	RTL818X_RX_DESC_FLAG_FOF	= (1 << 26),
+	RTL818X_RX_DESC_FLAG_DMA_FAIL	= (1 << 27),
+	RTL818X_RX_DESC_FLAG_LS		= (1 << 28),
+	RTL818X_RX_DESC_FLAG_FS		= (1 << 29),
+	RTL818X_RX_DESC_FLAG_EOR	= (1 << 30),
+	RTL818X_RX_DESC_FLAG_OWN	= (1 << 31)
+};
+
 #endif /* RTL818X_H */
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 98df9bc..67b26d3 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -25,7 +25,6 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/delay.h>
-#include <linux/firmware.h>
 #include <pcmcia/cs_types.h>
 #include <pcmcia/cs.h>
 #include <pcmcia/cistpl.h>
@@ -34,9 +33,6 @@
 
 #include "orinoco.h"
 
-static const char primary_fw_name[] = "symbol_sp24t_prim_fw";
-static const char secondary_fw_name[] = "symbol_sp24t_sec_fw";
-
 /********************************************************************/
 /* Module stuff							    */
 /********************************************************************/
@@ -71,161 +67,11 @@
 static int spectrum_cs_config(struct pcmcia_device *link);
 static void spectrum_cs_release(struct pcmcia_device *link);
 
-/********************************************************************/
-/* Firmware downloader						    */
-/********************************************************************/
-
-/* Position of PDA in the adapter memory */
-#define EEPROM_ADDR	0x3000
-#define EEPROM_LEN	0x200
-#define PDA_OFFSET	0x100
-
-#define PDA_ADDR	(EEPROM_ADDR + PDA_OFFSET)
-#define PDA_WORDS	((EEPROM_LEN - PDA_OFFSET) / 2)
-
 /* Constants for the CISREG_CCSR register */
 #define HCR_RUN		0x07	/* run firmware after reset */
 #define HCR_IDLE	0x0E	/* don't run firmware after reset */
 #define HCR_MEM16	0x10	/* memory width bit, should be preserved */
 
-/*
- * AUX port access.  To unlock the AUX port write the access keys to the
- * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
- * register.  Then read it and make sure it's HERMES_AUX_ENABLED.
- */
-#define HERMES_AUX_ENABLE	0x8000	/* Enable auxiliary port access */
-#define HERMES_AUX_DISABLE	0x4000	/* Disable to auxiliary port access */
-#define HERMES_AUX_ENABLED	0xC000	/* Auxiliary port is open */
-
-#define HERMES_AUX_PW0	0xFE01
-#define HERMES_AUX_PW1	0xDC23
-#define HERMES_AUX_PW2	0xBA45
-
-/* End markers */
-#define PDI_END		0x00000000	/* End of PDA */
-#define BLOCK_END	0xFFFFFFFF	/* Last image block */
-#define TEXT_END	0x1A		/* End of text header */
-
-/*
- * The following structures have little-endian fields denoted by
- * the leading underscore.  Don't access them directly - use inline
- * functions defined below.
- */
-
-/*
- * The binary image to be downloaded consists of series of data blocks.
- * Each block has the following structure.
- */
-struct dblock {
-	__le32 addr;		/* adapter address where to write the block */
-	__le16 len;		/* length of the data only, in bytes */
-	char data[0];		/* data to be written */
-} __attribute__ ((packed));
-
-/*
- * Plug Data References are located in in the image after the last data
- * block.  They refer to areas in the adapter memory where the plug data
- * items with matching ID should be written.
- */
-struct pdr {
-	__le32 id;		/* record ID */
-	__le32 addr;		/* adapter address where to write the data */
-	__le32 len;		/* expected length of the data, in bytes */
-	char next[0];		/* next PDR starts here */
-} __attribute__ ((packed));
-
-
-/*
- * Plug Data Items are located in the EEPROM read from the adapter by
- * primary firmware.  They refer to the device-specific data that should
- * be plugged into the secondary firmware.
- */
-struct pdi {
-	__le16 len;		/* length of ID and data, in words */
-	__le16 id;		/* record ID */
-	char data[0];		/* plug data */
-} __attribute__ ((packed));
-
-
-/* Functions for access to little-endian data */
-static inline u32
-dblock_addr(const struct dblock *blk)
-{
-	return le32_to_cpu(blk->addr);
-}
-
-static inline u32
-dblock_len(const struct dblock *blk)
-{
-	return le16_to_cpu(blk->len);
-}
-
-static inline u32
-pdr_id(const struct pdr *pdr)
-{
-	return le32_to_cpu(pdr->id);
-}
-
-static inline u32
-pdr_addr(const struct pdr *pdr)
-{
-	return le32_to_cpu(pdr->addr);
-}
-
-static inline u32
-pdr_len(const struct pdr *pdr)
-{
-	return le32_to_cpu(pdr->len);
-}
-
-static inline u32
-pdi_id(const struct pdi *pdi)
-{
-	return le16_to_cpu(pdi->id);
-}
-
-/* Return length of the data only, in bytes */
-static inline u32
-pdi_len(const struct pdi *pdi)
-{
-	return 2 * (le16_to_cpu(pdi->len) - 1);
-}
-
-
-/* Set address of the auxiliary port */
-static inline void
-spectrum_aux_setaddr(hermes_t *hw, u32 addr)
-{
-	hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
-	hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
-}
-
-
-/* Open access to the auxiliary port */
-static int
-spectrum_aux_open(hermes_t *hw)
-{
-	int i;
-
-	/* Already open? */
-	if (hermes_read_reg(hw, HERMES_CONTROL) == HERMES_AUX_ENABLED)
-		return 0;
-
-	hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
-	hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
-	hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
-	hermes_write_reg(hw, HERMES_CONTROL, HERMES_AUX_ENABLE);
-
-	for (i = 0; i < 20; i++) {
-		udelay(10);
-		if (hermes_read_reg(hw, HERMES_CONTROL) ==
-		    HERMES_AUX_ENABLED)
-			return 0;
-	}
-
-	return -EBUSY;
-}
-
 
 #define CS_CHECK(fn, ret) \
   do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
@@ -292,252 +138,6 @@
 	return -ENODEV;
 }
 
-
-/*
- * Scan PDR for the record with the specified RECORD_ID.
- * If it's not found, return NULL.
- */
-static struct pdr *
-spectrum_find_pdr(struct pdr *first_pdr, u32 record_id)
-{
-	struct pdr *pdr = first_pdr;
-
-	while (pdr_id(pdr) != PDI_END) {
-		/*
-		 * PDR area is currently not terminated by PDI_END.
-		 * It's followed by CRC records, which have the type
-		 * field where PDR has length.  The type can be 0 or 1.
-		 */
-		if (pdr_len(pdr) < 2)
-			return NULL;
-
-		/* If the record ID matches, we are done */
-		if (pdr_id(pdr) == record_id)
-			return pdr;
-
-		pdr = (struct pdr *) pdr->next;
-	}
-	return NULL;
-}
-
-
-/* Process one Plug Data Item - find corresponding PDR and plug it */
-static int
-spectrum_plug_pdi(hermes_t *hw, struct pdr *first_pdr, struct pdi *pdi)
-{
-	struct pdr *pdr;
-
-	/* Find the PDI corresponding to this PDR */
-	pdr = spectrum_find_pdr(first_pdr, pdi_id(pdi));
-
-	/* No match is found, safe to ignore */
-	if (!pdr)
-		return 0;
-
-	/* Lengths of the data in PDI and PDR must match */
-	if (pdi_len(pdi) != pdr_len(pdr))
-		return -EINVAL;
-
-	/* do the actual plugging */
-	spectrum_aux_setaddr(hw, pdr_addr(pdr));
-	hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
-
-	return 0;
-}
-
-
-/* Read PDA from the adapter */
-static int
-spectrum_read_pda(hermes_t *hw, __le16 *pda, int pda_len)
-{
-	int ret;
-	int pda_size;
-
-	/* Issue command to read EEPROM */
-	ret = hermes_docmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
-	if (ret)
-		return ret;
-
-	/* Open auxiliary port */
-	ret = spectrum_aux_open(hw);
-	if (ret)
-		return ret;
-
-	/* read PDA from EEPROM */
-	spectrum_aux_setaddr(hw, PDA_ADDR);
-	hermes_read_words(hw, HERMES_AUXDATA, pda, pda_len / 2);
-
-	/* Check PDA length */
-	pda_size = le16_to_cpu(pda[0]);
-	if (pda_size > pda_len)
-		return -EINVAL;
-
-	return 0;
-}
-
-
-/* Parse PDA and write the records into the adapter */
-static int
-spectrum_apply_pda(hermes_t *hw, const struct dblock *first_block,
-		   __le16 *pda)
-{
-	int ret;
-	struct pdi *pdi;
-	struct pdr *first_pdr;
-	const struct dblock *blk = first_block;
-
-	/* Skip all blocks to locate Plug Data References */
-	while (dblock_addr(blk) != BLOCK_END)
-		blk = (struct dblock *) &blk->data[dblock_len(blk)];
-
-	first_pdr = (struct pdr *) blk;
-
-	/* Go through every PDI and plug them into the adapter */
-	pdi = (struct pdi *) (pda + 2);
-	while (pdi_id(pdi) != PDI_END) {
-		ret = spectrum_plug_pdi(hw, first_pdr, pdi);
-		if (ret)
-			return ret;
-
-		/* Increment to the next PDI */
-		pdi = (struct pdi *) &pdi->data[pdi_len(pdi)];
-	}
-	return 0;
-}
-
-
-/* Load firmware blocks into the adapter */
-static int
-spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block)
-{
-	const struct dblock *blk;
-	u32 blkaddr;
-	u32 blklen;
-
-	blk = first_block;
-	blkaddr = dblock_addr(blk);
-	blklen = dblock_len(blk);
-
-	while (dblock_addr(blk) != BLOCK_END) {
-		spectrum_aux_setaddr(hw, blkaddr);
-		hermes_write_bytes(hw, HERMES_AUXDATA, blk->data,
-				   blklen);
-
-		blk = (struct dblock *) &blk->data[blklen];
-		blkaddr = dblock_addr(blk);
-		blklen = dblock_len(blk);
-	}
-	return 0;
-}
-
-
-/*
- * Process a firmware image - stop the card, load the firmware, reset
- * the card and make sure it responds.  For the secondary firmware take
- * care of the PDA - read it and then write it on top of the firmware.
- */
-static int
-spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link,
-		  const unsigned char *image, int secondary)
-{
-	int ret;
-	const unsigned char *ptr;
-	const struct dblock *first_block;
-
-	/* Plug Data Area (PDA) */
-	__le16 pda[PDA_WORDS];
-
-	/* Binary block begins after the 0x1A marker */
-	ptr = image;
-	while (*ptr++ != TEXT_END);
-	first_block = (const struct dblock *) ptr;
-
-	/* Read the PDA */
-	if (secondary) {
-		ret = spectrum_read_pda(hw, pda, sizeof(pda));
-		if (ret)
-			return ret;
-	}
-
-	/* Stop the firmware, so that it can be safely rewritten */
-	ret = spectrum_reset(link, 1);
-	if (ret)
-		return ret;
-
-	/* Program the adapter with new firmware */
-	ret = spectrum_load_blocks(hw, first_block);
-	if (ret)
-		return ret;
-
-	/* Write the PDA to the adapter */
-	if (secondary) {
-		ret = spectrum_apply_pda(hw, first_block, pda);
-		if (ret)
-			return ret;
-	}
-
-	/* Run the firmware */
-	ret = spectrum_reset(link, 0);
-	if (ret)
-		return ret;
-
-	/* Reset hermes chip and make sure it responds */
-	ret = hermes_init(hw);
-
-	/* hermes_reset() should return 0 with the secondary firmware */
-	if (secondary && ret != 0)
-		return -ENODEV;
-
-	/* And this should work with any firmware */
-	if (!hermes_present(hw))
-		return -ENODEV;
-
-	return 0;
-}
-
-
-/*
- * Download the firmware into the card, this also does a PCMCIA soft
- * reset on the card, to make sure it's in a sane state.
- */
-static int
-spectrum_dl_firmware(hermes_t *hw, struct pcmcia_device *link)
-{
-	int ret;
-	const struct firmware *fw_entry;
-
-	if (request_firmware(&fw_entry, primary_fw_name,
-			     &handle_to_dev(link)) != 0) {
-		printk(KERN_ERR PFX "Cannot find firmware: %s\n",
-		       primary_fw_name);
-		return -ENOENT;
-	}
-
-	/* Load primary firmware */
-	ret = spectrum_dl_image(hw, link, fw_entry->data, 0);
-	release_firmware(fw_entry);
-	if (ret) {
-		printk(KERN_ERR PFX "Primary firmware download failed\n");
-		return ret;
-	}
-
-	if (request_firmware(&fw_entry, secondary_fw_name,
-			     &handle_to_dev(link)) != 0) {
-		printk(KERN_ERR PFX "Cannot find firmware: %s\n",
-		       secondary_fw_name);
-		return -ENOENT;
-	}
-
-	/* Load secondary firmware */
-	ret = spectrum_dl_image(hw, link, fw_entry->data, 1);
-	release_firmware(fw_entry);
-	if (ret) {
-		printk(KERN_ERR PFX "Secondary firmware download failed\n");
-	}
-
-	return ret;
-}
-
 /********************************************************************/
 /* Device methods     						    */
 /********************************************************************/
@@ -547,22 +147,22 @@
 {
 	struct orinoco_pccard *card = priv->card;
 	struct pcmcia_device *link = card->p_dev;
-	int err;
 
-	if (!hermes_present(&priv->hw)) {
-		/* The firmware needs to be reloaded */
-		if (spectrum_dl_firmware(&priv->hw, link) != 0) {
-			printk(KERN_ERR PFX "Firmware download failed\n");
-			err = -ENODEV;
-		}
-	} else {
-		/* Soft reset using COR and HCR */
-		spectrum_reset(link, 0);
-	}
+	/* Soft reset using COR and HCR */
+	spectrum_reset(link, 0);
 
 	return 0;
 }
 
+static int
+spectrum_cs_stop_firmware(struct orinoco_private *priv, int idle)
+{
+	struct orinoco_pccard *card = priv->card;
+	struct pcmcia_device *link = card->p_dev;
+
+	return spectrum_reset(link, idle);
+}
+
 /********************************************************************/
 /* PCMCIA stuff     						    */
 /********************************************************************/
@@ -582,7 +182,9 @@
 	struct orinoco_private *priv;
 	struct orinoco_pccard *card;
 
-	dev = alloc_orinocodev(sizeof(*card), spectrum_cs_hard_reset);
+	dev = alloc_orinocodev(sizeof(*card), &handle_to_dev(link),
+			       spectrum_cs_hard_reset,
+			       spectrum_cs_stop_firmware);
 	if (! dev)
 		return -ENOMEM;
 	priv = netdev_priv(dev);
@@ -593,7 +195,7 @@
 	link->priv = dev;
 
 	/* Interrupt setup */
-	link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+	link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
 	link->irq.IRQInfo1 = IRQ_LEVEL_ID;
 	link->irq.Handler = orinoco_interrupt;
 	link->irq.Instance = dev; 
@@ -784,7 +386,7 @@
 	dev->irq = link->irq.AssignedIRQ;
 	card->node.major = card->node.minor = 0;
 
-	/* Reset card and download firmware */
+	/* Reset card */
 	if (spectrum_cs_hard_reset(priv) != 0) {
 		goto failed;
 	}
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 00a3559..b5de38a 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -4496,7 +4496,7 @@
   p_dev->io.IOAddrLines = 3;
 
   /* Interrupt setup */
-  p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+  p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
   p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
   p_dev->irq.Handler = wavelan_interrupt;
 
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 3771419..74a5ad2 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -79,7 +79,7 @@
 module_param(pc_debug, int, 0);
 #define dprintk(n, format, args...) \
 	{ if (pc_debug > (n)) \
-		printk(KERN_INFO "%s: " format "\n", __FUNCTION__ , ##args); }
+		printk(KERN_INFO "%s: " format "\n", __func__ , ##args); }
 #else
 #define dprintk(n, format, args...)
 #endif
@@ -470,7 +470,7 @@
 			spin_unlock_irqrestore(&this->lock, flags);
 			rc = wait_event_interruptible(this->wait,
 				this->sig_pwr_mgmt_confirm.status != 255);
-			printk(KERN_INFO "%s: %s status=%d\n", __FUNCTION__,
+			printk(KERN_INFO "%s: %s status=%d\n", __func__,
 			       suspend ? "suspend" : "resume",
 			       this->sig_pwr_mgmt_confirm.status);
 			goto out;
@@ -1199,7 +1199,7 @@
 		}
 		WL3501_NOPLOOP(10);
 	}
-	printk(KERN_WARNING "%s: failed to reset the board!\n", __FUNCTION__);
+	printk(KERN_WARNING "%s: failed to reset the board!\n", __func__);
 	rc = -ENODEV;
 out:
 	return rc;
@@ -1250,7 +1250,7 @@
 out:
 	return rc;
 fail:
-	printk(KERN_WARNING "%s: failed!\n", __FUNCTION__);
+	printk(KERN_WARNING "%s: failed!\n", __func__);
 	goto out;
 }
 
@@ -1917,7 +1917,7 @@
 	p_dev->io.IOAddrLines	= 5;
 
 	/* Interrupt setup */
-	p_dev->irq.Attributes	= IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+	p_dev->irq.Attributes	= IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
 	p_dev->irq.IRQInfo1	= IRQ_LEVEL_ID;
 	p_dev->irq.Handler = wl3501_interrupt;
 
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile
index cc36126..1907eaf 100644
--- a/drivers/net/wireless/zd1211rw/Makefile
+++ b/drivers/net/wireless/zd1211rw/Makefile
@@ -1,6 +1,6 @@
 obj-$(CONFIG_ZD1211RW) += zd1211rw.o
 
-zd1211rw-objs := zd_chip.o zd_ieee80211.o zd_mac.o \
+zd1211rw-objs := zd_chip.o zd_mac.o \
 		zd_rf_al2230.o zd_rf_rf2959.o \
 		zd_rf_al7230b.o zd_rf_uw2453.o \
 		zd_rf.o zd_usb.o
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 0acb5c3..e0ac58b 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -28,7 +28,6 @@
 
 #include "zd_def.h"
 #include "zd_chip.h"
-#include "zd_ieee80211.h"
 #include "zd_mac.h"
 #include "zd_rf.h"
 
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.c b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
deleted file mode 100644
index d8dc41e..0000000
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/* ZD1211 USB-WLAN driver for Linux
- *
- * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
- * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/*
- * In the long term, we'll probably find a better way of handling regulatory
- * requirements outside of the driver.
- */
-
-#include <linux/kernel.h>
-#include <net/mac80211.h>
-
-#include "zd_ieee80211.h"
-#include "zd_mac.h"
-
-struct channel_range {
-	u8 regdomain;
-	u8 start;
-	u8 end; /* exclusive (channel must be less than end) */
-};
-
-static const struct channel_range channel_ranges[] = {
-	{ ZD_REGDOMAIN_FCC,		1, 12 },
-	{ ZD_REGDOMAIN_IC,		1, 12 },
-	{ ZD_REGDOMAIN_ETSI,		1, 14 },
-	{ ZD_REGDOMAIN_JAPAN,		1, 14 },
-	{ ZD_REGDOMAIN_SPAIN,		1, 14 },
-	{ ZD_REGDOMAIN_FRANCE,		1, 14 },
-
-	/* Japan originally only had channel 14 available (see CHNL_ID 0x40 in
-	 * 802.11). However, in 2001 the range was extended to include channels
-	 * 1-13. The ZyDAS devices still use the old region code but are
-	 * designed to allow the extra channel access in Japan. */
-	{ ZD_REGDOMAIN_JAPAN_ADD,	1, 15 },
-};
-
-static const struct channel_range *zd_channel_range(u8 regdomain)
-{
-	int i;
-	for (i = 0; i < ARRAY_SIZE(channel_ranges); i++) {
-		const struct channel_range *range = &channel_ranges[i];
-		if (range->regdomain == regdomain)
-			return range;
-	}
-	return NULL;
-}
-
-#define CHAN_TO_IDX(chan) ((chan) - 1)
-
-static void unmask_bg_channels(struct ieee80211_hw *hw,
-	const struct channel_range *range,
-	struct ieee80211_supported_band *sband)
-{
-	u8 channel;
-
-	for (channel = range->start; channel < range->end; channel++) {
-		struct ieee80211_channel *chan =
-			&sband->channels[CHAN_TO_IDX(channel)];
-		chan->flags = 0;
-	}
-}
-
-void zd_geo_init(struct ieee80211_hw *hw, u8 regdomain)
-{
-	struct zd_mac *mac = zd_hw_mac(hw);
-	const struct channel_range *range;
-
-	dev_dbg(zd_mac_dev(mac), "regdomain %#02x\n", regdomain);
-
-	range = zd_channel_range(regdomain);
-	if (!range) {
-		/* The vendor driver overrides the regulatory domain and
-		 * allowed channel registers and unconditionally restricts
-		 * available channels to 1-11 everywhere. Match their
-		 * questionable behaviour only for regdomains which we don't
-		 * recognise. */
-		dev_warn(zd_mac_dev(mac), "Unrecognised regulatory domain: "
-			"%#02x. Defaulting to FCC.\n", regdomain);
-		range = zd_channel_range(ZD_REGDOMAIN_FCC);
-	}
-
-	unmask_bg_channels(hw, range, &mac->band);
-}
-
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
deleted file mode 100644
index 26b79f1..0000000
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* ZD1211 USB-WLAN driver for Linux
- *
- * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
- * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ZD_IEEE80211_H
-#define _ZD_IEEE80211_H
-
-#include <net/mac80211.h>
-
-/* Additional definitions from the standards.
- */
-
-#define ZD_REGDOMAIN_FCC	0x10
-#define ZD_REGDOMAIN_IC		0x20
-#define ZD_REGDOMAIN_ETSI	0x30
-#define ZD_REGDOMAIN_SPAIN	0x31
-#define ZD_REGDOMAIN_FRANCE	0x32
-#define ZD_REGDOMAIN_JAPAN_ADD	0x40
-#define ZD_REGDOMAIN_JAPAN	0x41
-
-enum {
-	MIN_CHANNEL24 = 1,
-	MAX_CHANNEL24 = 14,
-};
-
-void zd_geo_init(struct ieee80211_hw *hw, u8 regdomain);
-
-#define ZD_PLCP_SERVICE_LENGTH_EXTENSION 0x80
-
-struct ofdm_plcp_header {
-	u8 prefix[3];
-	__le16 service;
-} __attribute__((packed));
-
-static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header)
-{
-	return header->prefix[0] & 0xf;
-}
-
-/* The following defines give the encoding of the 4-bit rate field in the
- * OFDM (802.11a/802.11g) PLCP header. Notify that these values are used to
- * define the zd-rate values for OFDM.
- *
- * See the struct zd_ctrlset definition in zd_mac.h.
- */
-#define ZD_OFDM_PLCP_RATE_6M	0xb
-#define ZD_OFDM_PLCP_RATE_9M	0xf
-#define ZD_OFDM_PLCP_RATE_12M	0xa
-#define ZD_OFDM_PLCP_RATE_18M	0xe
-#define ZD_OFDM_PLCP_RATE_24M	0x9
-#define ZD_OFDM_PLCP_RATE_36M	0xd
-#define ZD_OFDM_PLCP_RATE_48M	0x8
-#define ZD_OFDM_PLCP_RATE_54M	0xc
-
-struct cck_plcp_header {
-	u8 signal;
-	u8 service;
-	__le16 length;
-	__le16 crc16;
-} __attribute__((packed));
-
-static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header)
-{
-	return header->signal;
-}
-
-/* These defines give the encodings of the signal field in the 802.11b PLCP
- * header. The signal field gives the bit rate of the following packet. Even
- * if technically wrong we use CCK here also for the 1 MBit/s and 2 MBit/s
- * rate to stay consistent with Zydas and our use of the term.
- *
- * Notify that these values are *not* used in the zd-rates.
- */
-#define ZD_CCK_PLCP_SIGNAL_1M	0x0a
-#define ZD_CCK_PLCP_SIGNAL_2M	0x14
-#define ZD_CCK_PLCP_SIGNAL_5M5	0x37
-#define ZD_CCK_PLCP_SIGNAL_11M	0x6e
-
-#endif /* _ZD_IEEE80211_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 4d7b98b..fe1867b 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -3,7 +3,7 @@
  * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
  * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
  * Copyright (C) 2006-2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
+ * Copyright (C) 2007-2008 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -29,9 +29,23 @@
 #include "zd_def.h"
 #include "zd_chip.h"
 #include "zd_mac.h"
-#include "zd_ieee80211.h"
 #include "zd_rf.h"
 
+struct zd_reg_alpha2_map {
+	u32 reg;
+	char alpha2[2];
+};
+
+static struct zd_reg_alpha2_map reg_alpha2_map[] = {
+	{ ZD_REGDOMAIN_FCC, "US" },
+	{ ZD_REGDOMAIN_IC, "CA" },
+	{ ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
+	{ ZD_REGDOMAIN_JAPAN, "JP" },
+	{ ZD_REGDOMAIN_JAPAN_ADD, "JP" },
+	{ ZD_REGDOMAIN_SPAIN, "ES" },
+	{ ZD_REGDOMAIN_FRANCE, "FR" },
+};
+
 /* This table contains the hardware specific values for the modulation rates. */
 static const struct ieee80211_rate zd_rates[] = {
 	{ .bitrate = 10,
@@ -95,6 +109,21 @@
 static void housekeeping_enable(struct zd_mac *mac);
 static void housekeeping_disable(struct zd_mac *mac);
 
+static int zd_reg2alpha2(u8 regdomain, char *alpha2)
+{
+	unsigned int i;
+	struct zd_reg_alpha2_map *reg_map;
+	for (i = 0; i < ARRAY_SIZE(reg_alpha2_map); i++) {
+		reg_map = &reg_alpha2_map[i];
+		if (regdomain == reg_map->reg) {
+			alpha2[0] = reg_map->alpha2[0];
+			alpha2[1] = reg_map->alpha2[1];
+			return 0;
+		}
+	}
+	return 1;
+}
+
 int zd_mac_preinit_hw(struct ieee80211_hw *hw)
 {
 	int r;
@@ -115,6 +144,7 @@
 	int r;
 	struct zd_mac *mac = zd_hw_mac(hw);
 	struct zd_chip *chip = &mac->chip;
+	char alpha2[2];
 	u8 default_regdomain;
 
 	r = zd_chip_enable_int(chip);
@@ -139,7 +169,9 @@
 	if (r)
 		goto disable_int;
 
-	zd_geo_init(hw, mac->regdomain);
+	r = zd_reg2alpha2(mac->regdomain, alpha2);
+	if (!r)
+		regulatory_hint(hw->wiphy, alpha2, NULL);
 
 	r = 0;
 disable_int:
@@ -579,7 +611,7 @@
 
 	q = &zd_hw_mac(hw)->ack_wait_queue;
 	spin_lock_irqsave(&q->lock, flags);
-	for (skb = q->next; skb != (struct sk_buff *)q; skb = skb->next) {
+	skb_queue_walk(q, skb) {
 		struct ieee80211_hdr *tx_hdr;
 
 		tx_hdr = (struct ieee80211_hdr *)skb->data;
@@ -684,15 +716,15 @@
 {
 	struct zd_mac *mac = zd_hw_mac(hw);
 
-	/* using IEEE80211_IF_TYPE_INVALID to indicate no mode selected */
-	if (mac->type != IEEE80211_IF_TYPE_INVALID)
+	/* using NL80211_IFTYPE_UNSPECIFIED to indicate no mode selected */
+	if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
 		return -EOPNOTSUPP;
 
 	switch (conf->type) {
-	case IEEE80211_IF_TYPE_MNTR:
-	case IEEE80211_IF_TYPE_MESH_POINT:
-	case IEEE80211_IF_TYPE_STA:
-	case IEEE80211_IF_TYPE_IBSS:
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_MESH_POINT:
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_ADHOC:
 		mac->type = conf->type;
 		break;
 	default:
@@ -706,7 +738,7 @@
 				    struct ieee80211_if_init_conf *conf)
 {
 	struct zd_mac *mac = zd_hw_mac(hw);
-	mac->type = IEEE80211_IF_TYPE_INVALID;
+	mac->type = NL80211_IFTYPE_UNSPECIFIED;
 	zd_set_beacon_interval(&mac->chip, 0);
 	zd_write_mac_addr(&mac->chip, NULL);
 }
@@ -725,8 +757,8 @@
 	int associated;
 	int r;
 
-	if (mac->type == IEEE80211_IF_TYPE_MESH_POINT ||
-	    mac->type == IEEE80211_IF_TYPE_IBSS) {
+	if (mac->type == NL80211_IFTYPE_MESH_POINT ||
+	    mac->type == NL80211_IFTYPE_ADHOC) {
 		associated = true;
 		if (conf->changed & IEEE80211_IFCC_BEACON) {
 			struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
@@ -753,7 +785,7 @@
 	return 0;
 }
 
-void zd_process_intr(struct work_struct *work)
+static void zd_process_intr(struct work_struct *work)
 {
 	u16 int_status;
 	struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
@@ -923,7 +955,7 @@
 	spin_lock_init(&mac->lock);
 	mac->hw = hw;
 
-	mac->type = IEEE80211_IF_TYPE_INVALID;
+	mac->type = NL80211_IFTYPE_UNSPECIFIED;
 
 	memcpy(mac->channels, zd_channels, sizeof(zd_channels));
 	memcpy(mac->rates, zd_rates, sizeof(zd_rates));
@@ -937,6 +969,11 @@
 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
 		    IEEE80211_HW_SIGNAL_DB;
 
+	hw->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_MESH_POINT) |
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_ADHOC);
+
 	hw->max_signal = 100;
 	hw->queues = 1;
 	hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 18c1d56..4c05d3e 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -25,7 +25,6 @@
 #include <net/mac80211.h>
 
 #include "zd_chip.h"
-#include "zd_ieee80211.h"
 
 struct zd_ctrlset {
 	u8     modulation;
@@ -187,6 +186,70 @@
 	unsigned int pass_ctrl:1;
 };
 
+#define ZD_REGDOMAIN_FCC	0x10
+#define ZD_REGDOMAIN_IC		0x20
+#define ZD_REGDOMAIN_ETSI	0x30
+#define ZD_REGDOMAIN_SPAIN	0x31
+#define ZD_REGDOMAIN_FRANCE	0x32
+#define ZD_REGDOMAIN_JAPAN_ADD	0x40
+#define ZD_REGDOMAIN_JAPAN	0x41
+
+enum {
+	MIN_CHANNEL24 = 1,
+	MAX_CHANNEL24 = 14,
+};
+
+#define ZD_PLCP_SERVICE_LENGTH_EXTENSION 0x80
+
+struct ofdm_plcp_header {
+	u8 prefix[3];
+	__le16 service;
+} __attribute__((packed));
+
+static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header)
+{
+	return header->prefix[0] & 0xf;
+}
+
+/* The following defines give the encoding of the 4-bit rate field in the
+ * OFDM (802.11a/802.11g) PLCP header. Notify that these values are used to
+ * define the zd-rate values for OFDM.
+ *
+ * See the struct zd_ctrlset definition in zd_mac.h.
+ */
+#define ZD_OFDM_PLCP_RATE_6M	0xb
+#define ZD_OFDM_PLCP_RATE_9M	0xf
+#define ZD_OFDM_PLCP_RATE_12M	0xa
+#define ZD_OFDM_PLCP_RATE_18M	0xe
+#define ZD_OFDM_PLCP_RATE_24M	0x9
+#define ZD_OFDM_PLCP_RATE_36M	0xd
+#define ZD_OFDM_PLCP_RATE_48M	0x8
+#define ZD_OFDM_PLCP_RATE_54M	0xc
+
+struct cck_plcp_header {
+	u8 signal;
+	u8 service;
+	__le16 length;
+	__le16 crc16;
+} __attribute__((packed));
+
+static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header)
+{
+	return header->signal;
+}
+
+/* These defines give the encodings of the signal field in the 802.11b PLCP
+ * header. The signal field gives the bit rate of the following packet. Even
+ * if technically wrong we use CCK here also for the 1 MBit/s and 2 MBit/s
+ * rate to stay consistent with Zydas and our use of the term.
+ *
+ * Notify that these values are *not* used in the zd-rates.
+ */
+#define ZD_CCK_PLCP_SIGNAL_1M	0x0a
+#define ZD_CCK_PLCP_SIGNAL_2M	0x14
+#define ZD_CCK_PLCP_SIGNAL_5M5	0x37
+#define ZD_CCK_PLCP_SIGNAL_11M	0x6e
+
 static inline struct zd_mac *zd_hw_mac(struct ieee80211_hw *hw)
 {
 	return hw->priv;
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
index ec41293..7207bfd 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -23,7 +23,7 @@
 
 #include "zd_def.h"
 #include "zd_rf.h"
-#include "zd_ieee80211.h"
+#include "zd_mac.h"
 #include "zd_chip.h"
 
 static const char * const rfs[] = {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c7ab1b8..908f50b 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -760,7 +760,7 @@
 		if (sense[SENSE_COMMAND_REJECT_BYTE] &
 		    SENSE_COMMAND_REJECT_FLAG) {
 			QETH_DBF_TEXT(TRACE, 2, "CMDREJi");
-			return 0;
+			return 1;
 		}
 		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
 			QETH_DBF_TEXT(TRACE, 2, "AFFE");
@@ -884,6 +884,7 @@
 		}
 		rc = qeth_get_problem(cdev, irb);
 		if (rc) {
+			qeth_clear_ipacmd_list(card);
 			qeth_schedule_recovery(card);
 			goto out;
 		}
@@ -4147,6 +4148,7 @@
 	unsigned long flags;
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
 
+	QETH_DBF_TEXT(SETUP, 2, "removedv");
 	if (card->discipline.ccwgdriver) {
 		card->discipline.ccwgdriver->remove(gdev);
 		qeth_core_free_discipline(card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 3ac3cc1..955ba7a 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -395,7 +395,8 @@
 	}
 	if (card->state == CARD_STATE_SOFTSETUP) {
 		qeth_l2_process_vlans(card, 1);
-		qeth_l2_del_all_mc(card);
+		if (!card->use_hard_stop)
+			qeth_l2_del_all_mc(card);
 		qeth_clear_ipacmd_list(card);
 		card->state = CARD_STATE_HARDSETUP;
 	}
@@ -559,7 +560,8 @@
 			"device %s: x%x\n", CARD_BUS_ID(card), rc);
 	}
 
-	if (card->info.guestlan) {
+	if ((card->info.type == QETH_CARD_TYPE_IQD) || 
+	    (card->info.guestlan)) {
 		rc = qeth_setadpparms_change_macaddr(card);
 		if (rc) {
 			QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
@@ -825,7 +827,6 @@
 	}
 	card->data.state = CH_STATE_UP;
 	card->state = CARD_STATE_UP;
-	card->dev->flags |= IFF_UP;
 	netif_start_queue(dev);
 
 	if (!card->lan_online && netif_carrier_ok(dev))
@@ -840,7 +841,6 @@
 
 	QETH_DBF_TEXT(TRACE, 4, "qethstop");
 	netif_tx_disable(dev);
-	card->dev->flags &= ~IFF_UP;
 	if (card->state == CARD_STATE_UP)
 		card->state = CARD_STATE_SOFTSETUP;
 	return 0;
@@ -1137,9 +1137,13 @@
 	if (!rc)
 		PRINT_INFO("Device %s successfully recovered!\n",
 			   CARD_BUS_ID(card));
-	else
+	else {
+		rtnl_lock();
+		dev_close(card->dev);
+		rtnl_unlock();
 		PRINT_INFO("Device %s could not be recovered!\n",
 			   CARD_BUS_ID(card));
+	}
 	return 0;
 }
 
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index dd72c3c..99547de 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2795,7 +2795,6 @@
 		return -ENODEV;
 	card->data.state = CH_STATE_UP;
 	card->state = CARD_STATE_UP;
-	card->dev->flags |= IFF_UP;
 	netif_start_queue(dev);
 
 	if (!card->lan_online && netif_carrier_ok(dev))
@@ -2809,7 +2808,6 @@
 
 	QETH_DBF_TEXT(TRACE, 4, "qethstop");
 	netif_tx_disable(dev);
-	card->dev->flags &= ~IFF_UP;
 	if (card->state == CARD_STATE_UP)
 		card->state = CARD_STATE_SOFTSETUP;
 	return 0;
@@ -3218,9 +3216,13 @@
 	if (!rc)
 		PRINT_INFO("Device %s successfully recovered!\n",
 			   CARD_BUS_ID(card));
-	else
+	else {
+		rtnl_lock();
+		dev_close(card->dev);
+		rtnl_unlock();
 		PRINT_INFO("Device %s could not be recovered!\n",
 			   CARD_BUS_ID(card));
+	}
 	return 0;
 }
 
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index f883dcf..d5cde05 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -327,11 +327,9 @@
 	s8 gain;
 	u16 loc[3];
 
-	if (out->revision == 3) {			/* rev 3 moved MAC */
+	if (out->revision == 3)			/* rev 3 moved MAC */
 		loc[0] = SSB_SPROM3_IL0MAC;
-		loc[1] = SSB_SPROM3_ET0MAC;
-		loc[2] = SSB_SPROM3_ET1MAC;
-	} else {
+	else {
 		loc[0] = SSB_SPROM1_IL0MAC;
 		loc[1] = SSB_SPROM1_ET0MAC;
 		loc[2] = SSB_SPROM1_ET1MAC;
@@ -340,13 +338,15 @@
 		v = in[SPOFF(loc[0]) + i];
 		*(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
 	}
-	for (i = 0; i < 3; i++) {
-		v = in[SPOFF(loc[1]) + i];
-		*(((__be16 *)out->et0mac) + i) = cpu_to_be16(v);
-	}
-	for (i = 0; i < 3; i++) {
-		v = in[SPOFF(loc[2]) + i];
-		*(((__be16 *)out->et1mac) + i) = cpu_to_be16(v);
+	if (out->revision < 3) { 	/* only rev 1-2 have et0, et1 */
+		for (i = 0; i < 3; i++) {
+			v = in[SPOFF(loc[1]) + i];
+			*(((__be16 *)out->et0mac) + i) = cpu_to_be16(v);
+		}
+		for (i = 0; i < 3; i++) {
+			v = in[SPOFF(loc[2]) + i];
+			*(((__be16 *)out->et1mac) + i) = cpu_to_be16(v);
+		}
 	}
 	SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0);
 	SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A,
@@ -399,30 +399,33 @@
 	out->antenna_gain.ghz5.a3 = gain;
 }
 
-static void sprom_extract_r4(struct ssb_sprom *out, const u16 *in)
+static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
 {
 	int i;
 	u16 v;
+	u16 il0mac_offset;
 
-	/* extract the equivalent of the r1 variables */
+	if (out->revision == 4)
+		il0mac_offset = SSB_SPROM4_IL0MAC;
+	else
+		il0mac_offset = SSB_SPROM5_IL0MAC;
+	/* extract the MAC address */
 	for (i = 0; i < 3; i++) {
-		v = in[SPOFF(SSB_SPROM4_IL0MAC) + i];
+		v = in[SPOFF(il0mac_offset) + i];
 		*(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
 	}
-	for (i = 0; i < 3; i++) {
-		v = in[SPOFF(SSB_SPROM4_ET0MAC) + i];
-		*(((__be16 *)out->et0mac) + i) = cpu_to_be16(v);
-	}
-	for (i = 0; i < 3; i++) {
-		v = in[SPOFF(SSB_SPROM4_ET1MAC) + i];
-		*(((__be16 *)out->et1mac) + i) = cpu_to_be16(v);
-	}
 	SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0);
 	SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A,
 	     SSB_SPROM4_ETHPHY_ET1A_SHIFT);
-	SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0);
-	SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0);
-	SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0);
+	if (out->revision == 4) {
+		SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0);
+		SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0);
+		SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0);
+	} else {
+		SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0);
+		SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0);
+		SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0);
+	}
 	SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A,
 	     SSB_SPROM4_ANTAVAIL_A_SHIFT);
 	SPEX(ant_available_bg, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_BG,
@@ -433,12 +436,21 @@
 	SPEX(maxpwr_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_MAXP_A_MASK, 0);
 	SPEX(itssi_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_ITSSI_A,
 	     SSB_SPROM4_ITSSI_A_SHIFT);
-	SPEX(gpio0, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P0, 0);
-	SPEX(gpio1, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P1,
-	     SSB_SPROM4_GPIOA_P1_SHIFT);
-	SPEX(gpio2, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P2, 0);
-	SPEX(gpio3, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P3,
-	     SSB_SPROM4_GPIOB_P3_SHIFT);
+	if (out->revision == 4) {
+		SPEX(gpio0, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P0, 0);
+		SPEX(gpio1, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P1,
+		     SSB_SPROM4_GPIOA_P1_SHIFT);
+		SPEX(gpio2, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P2, 0);
+		SPEX(gpio3, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P3,
+		     SSB_SPROM4_GPIOB_P3_SHIFT);
+	} else {
+		SPEX(gpio0, SSB_SPROM5_GPIOA, SSB_SPROM5_GPIOA_P0, 0);
+		SPEX(gpio1, SSB_SPROM5_GPIOA, SSB_SPROM5_GPIOA_P1,
+		     SSB_SPROM5_GPIOA_P1_SHIFT);
+		SPEX(gpio2, SSB_SPROM5_GPIOB, SSB_SPROM5_GPIOB_P2, 0);
+		SPEX(gpio3, SSB_SPROM5_GPIOB, SSB_SPROM5_GPIOB_P3,
+		     SSB_SPROM5_GPIOB_P3_SHIFT);
+	}
 
 	/* Extract the antenna gain values. */
 	SPEX(antenna_gain.ghz24.a0, SSB_SPROM4_AGAIN01,
@@ -462,6 +474,8 @@
 
 	out->revision = in[size - 1] & 0x00FF;
 	ssb_dprintk(KERN_DEBUG PFX "SPROM revision %d detected.\n", out->revision);
+	memset(out->et0mac, 0xFF, 6);		/* preset et0 and et1 mac */
+	memset(out->et1mac, 0xFF, 6);
 	if ((bus->chip_id & 0xFF00) == 0x4400) {
 		/* Workaround: The BCM44XX chip has a stupid revision
 		 * number stored in the SPROM.
@@ -471,16 +485,16 @@
 	} else if (bus->chip_id == 0x4321) {
 		/* the BCM4328 has a chipid == 0x4321 and a rev 4 SPROM */
 		out->revision = 4;
-		sprom_extract_r4(out, in);
+		sprom_extract_r45(out, in);
 	} else {
 		if (out->revision == 0)
 			goto unsupported;
 		if (out->revision >= 1 && out->revision <= 3) {
 			sprom_extract_r123(out, in);
 		}
-		if (out->revision == 4)
-			sprom_extract_r4(out, in);
-		if (out->revision >= 5)
+		if (out->revision == 4 || out->revision == 5)
+			sprom_extract_r45(out, in);
+		if (out->revision > 5)
 			goto unsupported;
 	}
 
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 0722872..0da2c25 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -640,14 +640,13 @@
 
 	atm_dbg(instance, "%s entered\n", __func__);
 	spin_lock_irq(&instance->sndqueue.lock);
-	for (skb = instance->sndqueue.next, n = skb->next;
-	     skb != (struct sk_buff *)&instance->sndqueue;
-	     skb = n, n = skb->next)
+	skb_queue_walk_safe(&instance->sndqueue, skb, n) {
 		if (UDSL_SKB(skb)->atm.vcc == vcc) {
 			atm_dbg(instance, "%s: popping skb 0x%p\n", __func__, skb);
 			__skb_unlink(skb, &instance->sndqueue);
 			usbatm_pop(vcc, skb);
 		}
+	}
 	spin_unlock_irq(&instance->sndqueue.lock);
 
 	tasklet_disable(&instance->tx_channel.tasklet);
diff --git a/firmware/Makefile b/firmware/Makefile
index da75a6f..ca8cd30 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -22,6 +22,7 @@
 
 fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin
 fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw
+fw-shipped-$(CONFIG_CASSINI) += sun/cassini.bin
 fw-shipped-$(CONFIG_COMPUTONE) += intelliport2.bin
 fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
 fw-shipped-$(CONFIG_SMCTR) += tr_smctr.bin
diff --git a/firmware/WHENCE b/firmware/WHENCE
index 66c51b2..57002cd 100644
--- a/firmware/WHENCE
+++ b/firmware/WHENCE
@@ -339,3 +339,13 @@
 Found in hex form in kernel source.
 
 --------------------------------------------------------------------------
+
+Driver: CASSINI - Sun Cassini
+
+File: sun/cassini.bin
+
+Licence: Unknown
+
+Found in hex form in kernel source.
+
+--------------------------------------------------------------------------
diff --git a/firmware/sun/cassini.bin.ihex b/firmware/sun/cassini.bin.ihex
new file mode 100644
index 0000000..5cd7ae7
--- /dev/null
+++ b/firmware/sun/cassini.bin.ihex
@@ -0,0 +1,143 @@
+:1000000000827E82090000000000008E8EFFCE82FA
+:1000100025FF010FCE8426FF0111CE853DDFE58649
+:1000200039B78FF87EC3C2964784F38A009747CECC
+:100030008233FF010F9646840C8104270B96468479
+:100040000C810827577E8425964784F38A049747B6
+:10005000CE8254FF010F9646840C81042638B612D6
+:1000600020842026037E8425967BD67CFE8F56BD79
+:10007000F7B6FE8F4EBDEC8EBDFAF7BDF728CE82E7
+:1000800082FF010F9646840C8104260AB612208452
+:100090002027B57E8425BDF71F7E841F964784F3F5
+:1000A0008A089747DEE1AD00CE82AFFF010F7E8464
+:1000B00025964185102606962385402706BDED002E
+:1000C0007E83A2DE42BDEB8E9624840827037E83C6
+:1000D000DF967BD67CFE8F56BDF7B6FE8F50BDEC0B
+:1000E0008EBDFAF78611C649BDE412CE82EFFF013C
+:1000F0000F9646840C81002717C649BDE491240D54
+:10010000B612208520260CCE82C1FF010F7E8425E9
+:100110007E8416FE8F52BDEC8EBDFAF7866AC64904
+:10012000BDE412CE8327FF010F9646840C81002781
+:100130000AC649BDE49125067E84257E8416B6183C
+:1001400070BB19702A0481AF2E19967BF62007FA2E
+:100150002027C4388138270BF62007FA2027CB0840
+:100160007E82D3BDF7668674C649BDE412CE837124
+:10017000FF010F9646840C8108260AC649BDE4910A
+:1001800025067E84257E8416BDF73E260EBDE50934
+:100190002606CE82C1FF010F7E8425FE8F54BDEC62
+:1001A0008EBDFAF7BDF733860FC651BDE412CE837C
+:1001B000B2FF010F9646840C8108265CB61220849B
+:1001C0003F813A271C9623854027037E8425C6510C
+:1001D000BDE49125037E8425CE82C1FF010F7E847C
+:1001E00025BDF8377C007ACE83EEFF010F7E842593
+:1001F0009646840C81082620962484082629B61861
+:1002000082BB1982B1013B2209B6122084378132A8
+:100210002715BDF8447E82C1BDF71FBDF844BDFC63
+:1002200029CE8225FF010F39964784FC8A00974723
+:10023000CE8434FF011196468403810227037E8514
+:100240001E964784FC8A029747DEE1AD008601B71F
+:100250001251BDF714B6103184FDB71031BDF81E30
+:100260009681D682FE8F5ABDF7B6FE8F5CBDEC8EAE
+:10027000BDFAF78608D600C51026028B20C651BDF0
+:10028000E412CE8486FF011196468403810227037F
+:100290007E850FC651BDE49125037E851E9644855B
+:1002A00010260AB61250BA013C851027A8BDF76681
+:1002B000CE84B7FF01117E851E96468403810226F7
+:1002C00050B612308403810127037E851E96448533
+:1002D000102613B61250BA013C85102609CE84535D
+:1002E000FF01117E851EB610318A02B71031BD851F
+:1002F0001FBDF8377C0080CE84FEFF01117E851E75
+:100300009646840381022609B612308403810127B0
+:100310000FBDF844BDF70BBDFC29CE8426FF0111AB
+:1003200039D622C40FB61230BA12328404270D9681
+:100330002285042705CA107E853ACA20D72239862D
+:1003400000978318CE1C00BDEB4696578501270207
+:100350004F3985022701397F8F7D8604B7120486C5
+:1003600008B712078610B7120C8607B71206B68FA9
+:100370007DB712708601BA1204B71204010101019F
+:100380000101B6120484FE8A02B7120401010101C0
+:10039000010186FDB41204B71204B612008408816C
+:1003A000082716B68F7D810C27088B04B78F7D7EBA
+:1003B000856C860397407E896E8607B712065FF7C5
+:1003C0008F825FF78F7FF78F70F78F71F78F72F7DC
+:1003D0008F73F78F74F78F75F78F76F78F77F78FA7
+:1003E00078F78F79F78F7AF78F7BB612048A10B778
+:1003F000120486E4B71270B71207F71205F7120954
+:100400008608BA1204B7120486F7B41204B71204AD
+:10041000010101010101B61208277F8180260B86A8
+:1004200008CE8F79BD897B7E868E8140260B86041F
+:10043000CE8F76BD897B7E868E8120260B8602CE6E
+:100440008F73BD897B7E868E8110260B8601CE8FB1
+:1004500070BD897B7E868E8108260B8608CE8F79BB
+:10046000BD897F7E868E8104260B8604CE8F76BD65
+:10047000897F7E868E8102260B8A02CE8F73BD898C
+:100480007F7E868E810126088601CE8F70BD897F92
+:10049000B68F7F810F26037E8747B61209840381BA
+:1004A0000327067C12097E85FEB6120684078107A3
+:1004B00027088B01B712067E86D5B68F82260A7C66
+:1004C0008F824FB712067E85C0B61206843F813FE9
+:1004D00027108B08B71206B6120984FCB712097EE2
+:1004E00085FECE8F7018CE8F84C60CBD896FCE8FDF
+:1004F0008418CE8F70C60CBD896FD683C14F2D0373
+:100500007E8740B68F7F8107270F810B2715810DCE
+:10051000271B810E27217E8740F78F7B8602B78FAE
+:100520007A201CF78F788602B78F772012F78F75A5
+:100530008602B78F742008F78F728602B78F717E9C
+:100540008747860497407E896ECE8F72BD89F7CE2D
+:100550008F75BD89F7CE8F78BD89F7CE8F7BBD892A
+:10056000F74FB78F7DB78F81B68F7227477C8F7D0E
+:10057000B68F75273F7C8F7DB68F7827377C8F7D30
+:10058000B68F7B272F7F8F7D7C8F817A8F72271B81
+:100590007C8F7D7A8F7527167C8F7D7A8F782711D7
+:1005A0007C8F7D7A8F7B270C7E87837A8F757A8FFD
+:1005B000787A8F7BCEC1FCF68F7D3AA600B7127099
+:1005C000B68F7226037E87FAB68F75260A18CE8FED
+:1005D00073BD89D57E87FAB68F78260A18CE8F76B6
+:1005E000BD89D57E87FAB68F7B260A18CE8F79BD56
+:1005F00089D57E87FA860597407E8900B68F7581FA
+:10060000072EF2F61206C4F81BB71206B68F7881D1
+:10061000072EE2484848F61206C4C71BB71206B6B2
+:100620008F7B81072ECFF61205C4F81BB712058603
+:1006300000F68F71BD89948601F68F74BD8994860A
+:1006400002F68F77BD89948603F68F7ABD8994CEA2
+:100650008F70A60181012707810327037E8866A684
+:1006600000B88F818401260B8C8F792C0E08080826
+:100670007E8850B612048A40B71204B6120484FB76
+:1006800084EFB71204B6120736B68F7C4848B7120B
+:10069000078601BA1204B7120401010101010186A3
+:1006A000FEB41204B712048602BA1204B71204860A
+:1006B000FDB41204B7120432B71207B61200840850
+:1006C0008108270F7C82082607867697407E896EF0
+:1006D0007E86ECB68F7F810F273CBDE6C7B7120D33
+:1006E000BDE6CBB612048A20B71204CEFFFFB612C5
+:1006F00000810C26050926F6271CB6120484DFB7F4
+:100700001204968381072C057C0083200696838B38
+:100710000897837E85417F8F7E8680B7120C860185
+:10072000B78F7DB6120C847FB7120C8A80B7120C7B
+:10073000860ABD8A06B6120A2A09B6120CBA8F7D3D
+:10074000B7120CB68F7E8160271A8B20B78F7EB6CA
+:10075000120C849FBA8F7EB7120CB68F7D48B78F6C
+:100760007D7E8921B612048A20B71204BD8A0A4F01
+:1007700039A60018A7000818085A26F539366C0063
+:1007800032BA8F7FB78F7FB612098403A701B612E2
+:1007900006843FA70239368603B78F8032C1002610
+:1007A00006B78F7C7E89C9C1012718C102270CC1F9
+:1007B000032700F68F800505F78F80F68F800505EB
+:1007C000F78F80F68F800505F78F80F68F8053F4C2
+:1007D00012071BB7120739CE8F70A60018E6001853
+:1007E000A700E700A60118E60118A701E701A60285
+:1007F00018E60218A702E70239A6008407E600C43B
+:10080000385454541BA700394A26FD399622840FC8
+:1008100097228601B78F70B61207B78F71F6120C48
+:10082000C40FC80FF78F72F68F72B68F71840327CB
+:10083000148101271C81022724F48F70272A962215
+:100840008A807E8A64F48F70271E96228A107E8AA0
+:1008500064F48F70271296228A207E8A64F48F7047
+:10086000270696228A409722748F71748F71788F31
+:1008700070B68F70851027AFD622C41058B612708C
+:1008800081E4273681E1260C96228420441BD6225F
+:10089000C4CF20235881C6260D9622844044441B91
+:1008A000D622C4AF2011588127260F962284804477
+:1008B00044441BD622C46F1B972239270C7C820626
+:0D08C000BDD9EDB682077E8AB97F82063968
+:00000001FF
+/* firmware patch for NS_DP83065 */
diff --git a/include/asm-um/dma-mapping.h b/include/asm-um/dma-mapping.h
index f0ee4fb..90fc708 100644
--- a/include/asm-um/dma-mapping.h
+++ b/include/asm-um/dma-mapping.h
@@ -118,4 +118,11 @@
 	BUG();
 }
 
+static inline int
+dma_mapping_error(struct device *dev, dma_addr_t dma_handle)
+{
+	BUG();
+	return 0;
+}
+
 #endif
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b68ec09..f431e40 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -126,6 +126,7 @@
 header-y += pfkeyv2.h
 header-y += pg.h
 header-y += phantom.h
+header-y += phonet.h
 header-y += pkt_cls.h
 header-y += pkt_sched.h
 header-y += posix_types.h
@@ -232,6 +233,7 @@
 unifdef-y += if_frad.h
 unifdef-y += if_ltalk.h
 unifdef-y += if_link.h
+unifdef-y += if_phonet.h
 unifdef-y += if_pppol2tp.h
 unifdef-y += if_pppox.h
 unifdef-y += if_tr.h
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 7f4df7c..14126bc 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -471,6 +471,11 @@
 	u8 eaddr3[6];
 } __attribute__ ((packed));
 
+/* Mesh flags */
+#define MESH_FLAGS_AE_A4 	0x1
+#define MESH_FLAGS_AE_A5_A6	0x2
+#define MESH_FLAGS_PS_DEEP	0x4
+
 /**
  * struct ieee80211_quiet_ie
  *
@@ -643,6 +648,9 @@
 	} u;
 } __attribute__ ((packed));
 
+/* mgmt header + 1 byte category code */
+#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
+
 
 /* Control frames */
 struct ieee80211_rts {
@@ -708,12 +716,13 @@
 
 /* 802.11n HT capabilities masks */
 #define IEEE80211_HT_CAP_SUP_WIDTH		0x0002
-#define IEEE80211_HT_CAP_MIMO_PS		0x000C
+#define IEEE80211_HT_CAP_SM_PS			0x000C
 #define IEEE80211_HT_CAP_GRN_FLD		0x0010
 #define IEEE80211_HT_CAP_SGI_20			0x0020
 #define IEEE80211_HT_CAP_SGI_40			0x0040
 #define IEEE80211_HT_CAP_DELAY_BA		0x0400
 #define IEEE80211_HT_CAP_MAX_AMSDU		0x0800
+#define IEEE80211_HT_CAP_DSSSCCK40		0x1000
 /* 802.11n HT capability AMPDU settings */
 #define IEEE80211_HT_CAP_AMPDU_FACTOR		0x03
 #define IEEE80211_HT_CAP_AMPDU_DENSITY		0x1C
@@ -736,11 +745,26 @@
 #define IEEE80211_HT_IE_NON_GF_STA_PRSNT	0x0004
 #define IEEE80211_HT_IE_NON_HT_STA_PRSNT	0x0010
 
-/* MIMO Power Save Modes */
-#define WLAN_HT_CAP_MIMO_PS_STATIC	0
-#define WLAN_HT_CAP_MIMO_PS_DYNAMIC	1
-#define WLAN_HT_CAP_MIMO_PS_INVALID	2
-#define WLAN_HT_CAP_MIMO_PS_DISABLED	3
+/* block-ack parameters */
+#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
+#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
+#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
+#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
+#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
+
+/*
+ * A-PMDU buffer sizes
+ * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
+ */
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+#define IEEE80211_MAX_AMPDU_BUF 0x40
+
+
+/* Spatial Multiplexing Power Save Modes */
+#define WLAN_HT_CAP_SM_PS_STATIC	0
+#define WLAN_HT_CAP_SM_PS_DYNAMIC	1
+#define WLAN_HT_CAP_SM_PS_INVALID	2
+#define WLAN_HT_CAP_SM_PS_DISABLED	3
 
 /* Authentication algorithms */
 #define WLAN_AUTH_OPEN 0
diff --git a/include/linux/if.h b/include/linux/if.h
index 5c9d1fa..6524684 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -24,6 +24,7 @@
 #include <linux/compiler.h>		/* for "__user" et al           */
 
 #define	IFNAMSIZ	16
+#define	IFALIASZ	256
 #include <linux/hdlc/ioctl.h>
 
 /* Standard interface flags (netdevice->flags). */
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index e157c13..723a1c5f 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -74,6 +74,7 @@
 #define ETH_P_ATMFATE	0x8884		/* Frame-based ATM Transport
 					 * over Ethernet
 					 */
+#define ETH_P_PAE	0x888E		/* Port Access Entity (IEEE 802.1X) */
 #define ETH_P_AOE	0x88A2		/* ATA over Ethernet		*/
 #define ETH_P_TIPC	0x88CA		/* TIPC 			*/
 
@@ -99,6 +100,7 @@
 #define ETH_P_ECONET	0x0018		/* Acorn Econet			*/
 #define ETH_P_HDLC	0x0019		/* HDLC frames			*/
 #define ETH_P_ARCNET	0x001A		/* 1A for ArcNet :-)            */
+#define ETH_P_PHONET	0x00F5		/* Nokia Phonet frames          */
 
 /*
  *	This is an Ethernet frame header.
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 84c3492..f9032c8 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -79,6 +79,7 @@
 	IFLA_LINKINFO,
 #define IFLA_LINKINFO IFLA_LINKINFO
 	IFLA_NET_NS_PID,
+	IFLA_IFALIAS,
 	__IFLA_MAX
 };
 
diff --git a/include/linux/if_phonet.h b/include/linux/if_phonet.h
new file mode 100644
index 0000000..d70034b
--- /dev/null
+++ b/include/linux/if_phonet.h
@@ -0,0 +1,19 @@
+/*
+ * File: if_phonet.h
+ *
+ * Phonet interface kernel definitions
+ *
+ * Copyright (C) 2008 Nokia Corporation. All rights reserved.
+ */
+#ifndef LINUX_IF_PHONET_H
+#define LINUX_IF_PHONET_H
+
+#define PHONET_MIN_MTU		6	/* pn_length = 0 */
+#define PHONET_MAX_MTU		65541	/* pn_length = 0xffff */
+#define PHONET_DEV_MTU		PHONET_MAX_MTU
+
+#ifdef __KERNEL__
+extern struct header_ops phonet_header_ops;
+#endif
+
+#endif
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index ec6eb49..0f434a2 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -242,4 +242,164 @@
 	int			syncid;
 };
 
+/*
+ *
+ * IPVS Generic Netlink interface definitions
+ *
+ */
+
+/* Generic Netlink family info */
+
+#define IPVS_GENL_NAME		"IPVS"
+#define IPVS_GENL_VERSION	0x1
+
+struct ip_vs_flags {
+	__be32 flags;
+	__be32 mask;
+};
+
+/* Generic Netlink command attributes */
+enum {
+	IPVS_CMD_UNSPEC = 0,
+
+	IPVS_CMD_NEW_SERVICE,		/* add service */
+	IPVS_CMD_SET_SERVICE,		/* modify service */
+	IPVS_CMD_DEL_SERVICE,		/* delete service */
+	IPVS_CMD_GET_SERVICE,		/* get service info */
+
+	IPVS_CMD_NEW_DEST,		/* add destination */
+	IPVS_CMD_SET_DEST,		/* modify destination */
+	IPVS_CMD_DEL_DEST,		/* delete destination */
+	IPVS_CMD_GET_DEST,		/* get destination info */
+
+	IPVS_CMD_NEW_DAEMON,		/* start sync daemon */
+	IPVS_CMD_DEL_DAEMON,		/* stop sync daemon */
+	IPVS_CMD_GET_DAEMON,		/* get sync daemon status */
+
+	IPVS_CMD_SET_CONFIG,		/* set config settings */
+	IPVS_CMD_GET_CONFIG,		/* get config settings */
+
+	IPVS_CMD_SET_INFO,		/* only used in GET_INFO reply */
+	IPVS_CMD_GET_INFO,		/* get general IPVS info */
+
+	IPVS_CMD_ZERO,			/* zero all counters and stats */
+	IPVS_CMD_FLUSH,			/* flush services and dests */
+
+	__IPVS_CMD_MAX,
+};
+
+#define IPVS_CMD_MAX (__IPVS_CMD_MAX - 1)
+
+/* Attributes used in the first level of commands */
+enum {
+	IPVS_CMD_ATTR_UNSPEC = 0,
+	IPVS_CMD_ATTR_SERVICE,		/* nested service attribute */
+	IPVS_CMD_ATTR_DEST,		/* nested destination attribute */
+	IPVS_CMD_ATTR_DAEMON,		/* nested sync daemon attribute */
+	IPVS_CMD_ATTR_TIMEOUT_TCP,	/* TCP connection timeout */
+	IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,	/* TCP FIN wait timeout */
+	IPVS_CMD_ATTR_TIMEOUT_UDP,	/* UDP timeout */
+	__IPVS_CMD_ATTR_MAX,
+};
+
+#define IPVS_CMD_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1)
+
+/*
+ * Attributes used to describe a service
+ *
+ * Used inside nested attribute IPVS_CMD_ATTR_SERVICE
+ */
+enum {
+	IPVS_SVC_ATTR_UNSPEC = 0,
+	IPVS_SVC_ATTR_AF,		/* address family */
+	IPVS_SVC_ATTR_PROTOCOL,		/* virtual service protocol */
+	IPVS_SVC_ATTR_ADDR,		/* virtual service address */
+	IPVS_SVC_ATTR_PORT,		/* virtual service port */
+	IPVS_SVC_ATTR_FWMARK,		/* firewall mark of service */
+
+	IPVS_SVC_ATTR_SCHED_NAME,	/* name of scheduler */
+	IPVS_SVC_ATTR_FLAGS,		/* virtual service flags */
+	IPVS_SVC_ATTR_TIMEOUT,		/* persistent timeout */
+	IPVS_SVC_ATTR_NETMASK,		/* persistent netmask */
+
+	IPVS_SVC_ATTR_STATS,		/* nested attribute for service stats */
+	__IPVS_SVC_ATTR_MAX,
+};
+
+#define IPVS_SVC_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1)
+
+/*
+ * Attributes used to describe a destination (real server)
+ *
+ * Used inside nested attribute IPVS_CMD_ATTR_DEST
+ */
+enum {
+	IPVS_DEST_ATTR_UNSPEC = 0,
+	IPVS_DEST_ATTR_ADDR,		/* real server address */
+	IPVS_DEST_ATTR_PORT,		/* real server port */
+
+	IPVS_DEST_ATTR_FWD_METHOD,	/* forwarding method */
+	IPVS_DEST_ATTR_WEIGHT,		/* destination weight */
+
+	IPVS_DEST_ATTR_U_THRESH,	/* upper threshold */
+	IPVS_DEST_ATTR_L_THRESH,	/* lower threshold */
+
+	IPVS_DEST_ATTR_ACTIVE_CONNS,	/* active connections */
+	IPVS_DEST_ATTR_INACT_CONNS,	/* inactive connections */
+	IPVS_DEST_ATTR_PERSIST_CONNS,	/* persistent connections */
+
+	IPVS_DEST_ATTR_STATS,		/* nested attribute for dest stats */
+	__IPVS_DEST_ATTR_MAX,
+};
+
+#define IPVS_DEST_ATTR_MAX (__IPVS_DEST_ATTR_MAX - 1)
+
+/*
+ * Attributes describing a sync daemon
+ *
+ * Used inside nested attribute IPVS_CMD_ATTR_DAEMON
+ */
+enum {
+	IPVS_DAEMON_ATTR_UNSPEC = 0,
+	IPVS_DAEMON_ATTR_STATE,		/* sync daemon state (master/backup) */
+	IPVS_DAEMON_ATTR_MCAST_IFN,	/* multicast interface name */
+	IPVS_DAEMON_ATTR_SYNC_ID,	/* SyncID we belong to */
+	__IPVS_DAEMON_ATTR_MAX,
+};
+
+#define IPVS_DAEMON_ATTR_MAX (__IPVS_DAEMON_ATTR_MAX - 1)
+
+/*
+ * Attributes used to describe service or destination entry statistics
+ *
+ * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS
+ */
+enum {
+	IPVS_STATS_ATTR_UNSPEC = 0,
+	IPVS_STATS_ATTR_CONNS,		/* connections scheduled */
+	IPVS_STATS_ATTR_INPKTS,		/* incoming packets */
+	IPVS_STATS_ATTR_OUTPKTS,	/* outgoing packets */
+	IPVS_STATS_ATTR_INBYTES,	/* incoming bytes */
+	IPVS_STATS_ATTR_OUTBYTES,	/* outgoing bytes */
+
+	IPVS_STATS_ATTR_CPS,		/* current connection rate */
+	IPVS_STATS_ATTR_INPPS,		/* current in packet rate */
+	IPVS_STATS_ATTR_OUTPPS,		/* current out packet rate */
+	IPVS_STATS_ATTR_INBPS,		/* current in byte rate */
+	IPVS_STATS_ATTR_OUTBPS,		/* current out byte rate */
+	__IPVS_STATS_ATTR_MAX,
+};
+
+#define IPVS_STATS_ATTR_MAX (__IPVS_STATS_ATTR_MAX - 1)
+
+/* Attributes used in response to IPVS_CMD_GET_INFO command */
+enum {
+	IPVS_INFO_ATTR_UNSPEC = 0,
+	IPVS_INFO_ATTR_VERSION,		/* IPVS version number */
+	IPVS_INFO_ATTR_CONN_TAB_SIZE,	/* size of connection hash table */
+	__IPVS_INFO_ATTR_MAX,
+};
+
+#define IPVS_INFO_ATTR_MAX (__IPVS_INFO_ATTR_MAX - 1)
+
 #endif	/* _IP_VS_H */
diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
index 8687a7d..4c218ee 100644
--- a/include/linux/isdn_ppp.h
+++ b/include/linux/isdn_ppp.h
@@ -157,7 +157,7 @@
 
 typedef struct {
   int mp_mrru;                        /* unused                             */
-  struct sk_buff * frags;	/* fragments sl list -- use skb->next */
+  struct sk_buff_head frags;	/* fragments sl list */
   long frames;			/* number of frames in the frame list */
   unsigned int seq;		/* last processed packet seq #: any packets
   				 * with smaller seq # will be dropped
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 1207857..cbbbe9b 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -17,9 +17,14 @@
 
 struct mv643xx_eth_shared_platform_data {
 	struct mbus_dram_target_info	*dram;
+	struct platform_device	*shared_smi;
 	unsigned int		t_clk;
 };
 
+#define MV643XX_ETH_PHY_ADDR_DEFAULT	0
+#define MV643XX_ETH_PHY_ADDR(x)		(0x80 | (x))
+#define MV643XX_ETH_PHY_NONE		0xff
+
 struct mv643xx_eth_platform_data {
 	/*
 	 * Pointer back to our parent instance, and our port number.
@@ -30,8 +35,6 @@
 	/*
 	 * Whether a PHY is present, and if yes, at which address.
 	 */
-	struct platform_device	*shared_smi;
-	int			force_phy_addr;
 	int			phy_addr;
 
 	/*
@@ -49,10 +52,10 @@
 	int			duplex;
 
 	/*
-	 * Which RX/TX queues to use.
+	 * How many RX/TX queues to use.
 	 */
-	int			rx_queue_mask;
-	int			tx_queue_mask;
+	int			rx_queue_count;
+	int			tx_queue_count;
 
 	/*
 	 * Override default RX/TX queue sizes if nonzero.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 488c56e..9cfd20b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -471,6 +471,8 @@
 	char			name[IFNAMSIZ];
 	/* device name hash chain */
 	struct hlist_node	name_hlist;
+	/* snmp alias */
+	char 			*ifalias;
 
 	/*
 	 *	I/O specific fields
@@ -1223,7 +1225,8 @@
 extern int		dev_ethtool(struct net *net, struct ifreq *);
 extern unsigned		dev_get_flags(const struct net_device *);
 extern int		dev_change_flags(struct net_device *, unsigned);
-extern int		dev_change_name(struct net_device *, char *);
+extern int		dev_change_name(struct net_device *, const char *);
+extern int		dev_set_alias(struct net_device *, const char *, size_t);
 extern int		dev_change_net_namespace(struct net_device *,
 						 struct net *, const char *);
 extern int		dev_set_mtu(struct net_device *, int);
@@ -1667,7 +1670,7 @@
 extern int netdev_class_create_file(struct class_attribute *class_attr);
 extern void netdev_class_remove_file(struct class_attribute *class_attr);
 
-extern char *netdev_drivername(struct net_device *dev, char *buffer, int len);
+extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
 
 extern void linkwatch_run_queue(void);
 
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9ff1b54..cbba776 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -220,7 +220,7 @@
 	int		(*dump)(struct sk_buff * skb, struct netlink_callback *cb);
 	int		(*done)(struct netlink_callback *cb);
 	int		family;
-	long		args[6];
+	long		args[7];
 };
 
 struct netlink_notify
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2be7c63..9bad6540 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -89,6 +89,22 @@
  * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC
  *	or, if no MAC address given, all mesh paths, on the interface identified
  *	by %NL80211_ATTR_IFINDEX.
+ * @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by
+ *	%NL80211_ATTR_IFINDEX.
+ *
+ * @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command
+ *	after being queried by the kernel. CRDA replies by sending a regulatory
+ *	domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our
+ *	current alpha2 if it found a match. It also provides
+ * 	NL80211_ATTR_REG_RULE_FLAGS, and a set of regulatory rules. Each
+ * 	regulatory rule is a nested set of attributes  given by
+ * 	%NL80211_ATTR_REG_RULE_FREQ_[START|END] and
+ * 	%NL80211_ATTR_FREQ_RANGE_MAX_BW with an attached power rule given by
+ * 	%NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and
+ * 	%NL80211_ATTR_REG_RULE_POWER_MAX_EIRP.
+ * @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain
+ * 	to the the specified ISO/IEC 3166-1 alpha2 country code. The core will
+ * 	store this as a valid request and then query userspace for it.
  *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
@@ -127,13 +143,23 @@
 	NL80211_CMD_NEW_MPATH,
 	NL80211_CMD_DEL_MPATH,
 
-	/* add commands here */
+	NL80211_CMD_SET_BSS,
+
+	NL80211_CMD_SET_REG,
+	NL80211_CMD_REQ_SET_REG,
+
+	/* add new commands above here */
 
 	/* used to define NL80211_CMD_MAX below */
 	__NL80211_CMD_AFTER_LAST,
 	NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1
 };
 
+/*
+ * Allow user space programs to use #ifdef on new commands by defining them
+ * here
+ */
+#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS
 
 /**
  * enum nl80211_attrs - nl80211 netlink attributes
@@ -188,10 +214,34 @@
  * 	info given for %NL80211_CMD_GET_MPATH, nested attribute described at
  *	&enum nl80211_mpath_info.
  *
- *
  * @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of
  *      &enum nl80211_mntr_flags.
  *
+ * @NL80211_ATTR_REG_ALPHA2: an ISO-3166-alpha2 country code for which the
+ * 	current regulatory domain should be set to or is already set to.
+ * 	For example, 'CR', for Costa Rica. This attribute is used by the kernel
+ * 	to query the CRDA to retrieve one regulatory domain. This attribute can
+ * 	also be used by userspace to query the kernel for the currently set
+ * 	regulatory domain. We chose an alpha2 as that is also used by the
+ * 	IEEE-802.11d country information element to identify a country.
+ * 	Users can also simply ask the wireless core to set regulatory domain
+ * 	to a specific alpha2.
+ * @NL80211_ATTR_REG_RULES: a nested array of regulatory domain regulatory
+ *	rules.
+ *
+ * @NL80211_ATTR_BSS_CTS_PROT: whether CTS protection is enabled (u8, 0 or 1)
+ * @NL80211_ATTR_BSS_SHORT_PREAMBLE: whether short preamble is enabled
+ *	(u8, 0 or 1)
+ * @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled
+ *	(u8, 0 or 1)
+ *
+ * @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from
+ *	association request when used with NL80211_CMD_NEW_STATION)
+ *
+ * @NL80211_ATTR_SUPPORTED_IFTYPES: nested attribute containing all
+ *	supported interface types, each a flag attribute with the number
+ *	of the interface mode.
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -235,16 +285,35 @@
 	NL80211_ATTR_MPATH_NEXT_HOP,
 	NL80211_ATTR_MPATH_INFO,
 
+	NL80211_ATTR_BSS_CTS_PROT,
+	NL80211_ATTR_BSS_SHORT_PREAMBLE,
+	NL80211_ATTR_BSS_SHORT_SLOT_TIME,
+
+	NL80211_ATTR_HT_CAPABILITY,
+
+	NL80211_ATTR_SUPPORTED_IFTYPES,
+
+	NL80211_ATTR_REG_ALPHA2,
+	NL80211_ATTR_REG_RULES,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
 	NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1
 };
 
+/*
+ * Allow user space programs to use #ifdef on new attributes by defining them
+ * here
+ */
+#define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY
+
 #define NL80211_MAX_SUPP_RATES			32
+#define NL80211_MAX_SUPP_REG_RULES		32
 #define NL80211_TKIP_DATA_OFFSET_ENCR_KEY	0
 #define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY	16
 #define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY	24
+#define NL80211_HT_CAPABILITY_LEN		26
 
 /**
  * enum nl80211_iftype - (virtual) interface types
@@ -436,6 +505,66 @@
 };
 
 /**
+ * enum nl80211_reg_rule_attr - regulatory rule attributes
+ * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional
+ * 	considerations for a given frequency range. These are the
+ * 	&enum nl80211_reg_rule_flags.
+ * @NL80211_ATTR_FREQ_RANGE_START: starting frequencry for the regulatory
+ * 	rule in KHz. This is not a center of frequency but an actual regulatory
+ * 	band edge.
+ * @NL80211_ATTR_FREQ_RANGE_END: ending frequency for the regulatory rule
+ * 	in KHz. This is not a center a frequency but an actual regulatory
+ * 	band edge.
+ * @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this
+ * 	frequency range, in KHz.
+ * @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain
+ * 	for a given frequency range. The value is in mBi (100 * dBi).
+ * 	If you don't have one then don't send this.
+ * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for
+ * 	a given frequency range. The value is in mBm (100 * dBm).
+ */
+enum nl80211_reg_rule_attr {
+	__NL80211_REG_RULE_ATTR_INVALID,
+	NL80211_ATTR_REG_RULE_FLAGS,
+
+	NL80211_ATTR_FREQ_RANGE_START,
+	NL80211_ATTR_FREQ_RANGE_END,
+	NL80211_ATTR_FREQ_RANGE_MAX_BW,
+
+	NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
+	NL80211_ATTR_POWER_RULE_MAX_EIRP,
+
+	/* keep last */
+	__NL80211_REG_RULE_ATTR_AFTER_LAST,
+	NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum nl80211_reg_rule_flags - regulatory rule flags
+ *
+ * @NL80211_RRF_NO_OFDM: OFDM modulation not allowed
+ * @NL80211_RRF_NO_CCK: CCK modulation not allowed
+ * @NL80211_RRF_NO_INDOOR: indoor operation not allowed
+ * @NL80211_RRF_NO_OUTDOOR: outdoor operation not allowed
+ * @NL80211_RRF_DFS: DFS support is required to be used
+ * @NL80211_RRF_PTP_ONLY: this is only for Point To Point links
+ * @NL80211_RRF_PTMP_ONLY: this is only for Point To Multi Point links
+ * @NL80211_RRF_PASSIVE_SCAN: passive scan is required
+ * @NL80211_RRF_NO_IBSS: no IBSS is allowed
+ */
+enum nl80211_reg_rule_flags {
+	NL80211_RRF_NO_OFDM		= 1<<0,
+	NL80211_RRF_NO_CCK		= 1<<1,
+	NL80211_RRF_NO_INDOOR		= 1<<2,
+	NL80211_RRF_NO_OUTDOOR		= 1<<3,
+	NL80211_RRF_DFS			= 1<<4,
+	NL80211_RRF_PTP_ONLY		= 1<<5,
+	NL80211_RRF_PTMP_ONLY		= 1<<6,
+	NL80211_RRF_PASSIVE_SCAN	= 1<<7,
+	NL80211_RRF_NO_IBSS		= 1<<8,
+};
+
+/**
  * enum nl80211_mntr_flags - monitor configuration flags
  *
  * Monitor configuration flags.
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index f1624b3..a65b082 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1411,6 +1411,8 @@
 #define PCI_DEVICE_ID_EICON_MAESTRAQ_U	0xe013
 #define PCI_DEVICE_ID_EICON_MAESTRAP	0xe014
 
+#define PCI_VENDOR_ID_CISCO		0x1137
+
 #define PCI_VENDOR_ID_ZIATECH		0x1138
 #define PCI_DEVICE_ID_ZIATECH_5550_HC	0x5550
  
@@ -2213,6 +2215,7 @@
 
 #define PCI_VENDOR_ID_ATTANSIC		0x1969
 #define PCI_DEVICE_ID_ATTANSIC_L1	0x1048
+#define PCI_DEVICE_ID_ATTANSIC_L2	0x2048
 
 #define PCI_VENDOR_ID_JMICRON		0x197B
 #define PCI_DEVICE_ID_JMICRON_JMB360	0x2360
@@ -2244,6 +2247,16 @@
 #define PCI_DEVICE_ID_3DLABS_PERMEDIA2	0x0007
 #define PCI_DEVICE_ID_3DLABS_PERMEDIA2V	0x0009
 
+#define PCI_VENDOR_ID_NETXEN		0x4040
+#define PCI_DEVICE_ID_NX2031_10GXSR	0x0001
+#define PCI_DEVICE_ID_NX2031_10GCX4	0x0002
+#define PCI_DEVICE_ID_NX2031_4GCU	0x0003
+#define PCI_DEVICE_ID_NX2031_IMEZ	0x0004
+#define PCI_DEVICE_ID_NX2031_HMEZ	0x0005
+#define PCI_DEVICE_ID_NX2031_XG_MGMT	0x0024
+#define PCI_DEVICE_ID_NX2031_XG_MGMT2	0x0025
+#define PCI_DEVICE_ID_NX3031		0x0100
+
 #define PCI_VENDOR_ID_AKS		0x416c
 #define PCI_DEVICE_ID_AKS_ALADDINCARD	0x0100
 
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
new file mode 100644
index 0000000..3a027f5
--- /dev/null
+++ b/include/linux/phonet.h
@@ -0,0 +1,160 @@
+/**
+ * file phonet.h
+ *
+ * Phonet sockets kernel interface
+ *
+ * Copyright (C) 2008 Nokia Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef LINUX_PHONET_H
+#define LINUX_PHONET_H
+
+/* Automatic protocol selection */
+#define PN_PROTO_TRANSPORT	0
+/* Phonet datagram socket */
+#define PN_PROTO_PHONET		1
+#define PHONET_NPROTO		2
+
+#define PNADDR_ANY		0
+#define PNPORT_RESOURCE_ROUTING	0
+
+/* ioctls */
+#define SIOCPNGETOBJECT		(SIOCPROTOPRIVATE + 0)
+
+/* Phonet protocol header */
+struct phonethdr {
+	__u8	pn_rdev;
+	__u8	pn_sdev;
+	__u8	pn_res;
+	__be16	pn_length;
+	__u8	pn_robj;
+	__u8	pn_sobj;
+} __attribute__((packed));
+
+/* Common Phonet payload header */
+struct phonetmsg {
+	__u8	pn_trans_id;	/* transaction ID */
+	__u8	pn_msg_id;	/* message type */
+	union {
+		struct {
+			__u8	pn_submsg_id;	/* message subtype */
+			__u8	pn_data[5];
+		} base;
+		struct {
+			__u16	pn_e_res_id;	/* extended resource ID */
+			__u8	pn_e_submsg_id;	/* message subtype */
+			__u8	pn_e_data[3];
+		} ext;
+	} pn_msg_u;
+};
+#define PN_COMMON_MESSAGE	0xF0
+#define PN_PREFIX		0xE0 /* resource for extended messages */
+#define pn_submsg_id		pn_msg_u.base.pn_submsg_id
+#define pn_e_submsg_id		pn_msg_u.ext.pn_e_submsg_id
+#define pn_e_res_id		pn_msg_u.ext.pn_e_res_id
+#define pn_data			pn_msg_u.base.pn_data
+#define pn_e_data		pn_msg_u.ext.pn_e_data
+
+/* data for unreachable errors */
+#define PN_COMM_SERVICE_NOT_IDENTIFIED_RESP	0x01
+#define PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP	0x14
+#define pn_orig_msg_id		pn_data[0]
+#define pn_status		pn_data[1]
+#define pn_e_orig_msg_id	pn_e_data[0]
+#define pn_e_status		pn_e_data[1]
+
+/* Phonet socket address structure */
+struct sockaddr_pn {
+	sa_family_t spn_family;
+	__u8 spn_obj;
+	__u8 spn_dev;
+	__u8 spn_resource;
+	__u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3];
+} __attribute__ ((packed));
+
+static inline __u16 pn_object(__u8 addr, __u16 port)
+{
+	return (addr << 8) | (port & 0x3ff);
+}
+
+static inline __u8 pn_obj(__u16 handle)
+{
+	return handle & 0xff;
+}
+
+static inline __u8 pn_dev(__u16 handle)
+{
+	return handle >> 8;
+}
+
+static inline __u16 pn_port(__u16 handle)
+{
+	return handle & 0x3ff;
+}
+
+static inline __u8 pn_addr(__u16 handle)
+{
+	return (handle >> 8) & 0xfc;
+}
+
+static inline void pn_sockaddr_set_addr(struct sockaddr_pn *spn, __u8 addr)
+{
+	spn->spn_dev &= 0x03;
+	spn->spn_dev |= addr & 0xfc;
+}
+
+static inline void pn_sockaddr_set_port(struct sockaddr_pn *spn, __u16 port)
+{
+	spn->spn_dev &= 0xfc;
+	spn->spn_dev |= (port >> 8) & 0x03;
+	spn->spn_obj = port & 0xff;
+}
+
+static inline void pn_sockaddr_set_object(struct sockaddr_pn *spn,
+						__u16 handle)
+{
+	spn->spn_dev = pn_dev(handle);
+	spn->spn_obj = pn_obj(handle);
+}
+
+static inline void pn_sockaddr_set_resource(struct sockaddr_pn *spn,
+						__u8 resource)
+{
+	spn->spn_resource = resource;
+}
+
+static inline __u8 pn_sockaddr_get_addr(const struct sockaddr_pn *spn)
+{
+	return spn->spn_dev & 0xfc;
+}
+
+static inline __u16 pn_sockaddr_get_port(const struct sockaddr_pn *spn)
+{
+	return ((spn->spn_dev & 0x03) << 8) | spn->spn_obj;
+}
+
+static inline __u16 pn_sockaddr_get_object(const struct sockaddr_pn *spn)
+{
+	return pn_object(spn->spn_dev, spn->spn_obj);
+}
+
+static inline __u8 pn_sockaddr_get_resource(const struct sockaddr_pn *spn)
+{
+	return spn->spn_resource;
+}
+
+#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 7224c40..5f170f5 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -410,6 +410,8 @@
 
 int mdiobus_register(struct mii_bus *bus);
 void mdiobus_unregister(struct mii_bus *bus);
+struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
+
 void phy_sanitize_settings(struct phy_device *phydev);
 int phy_stop_interrupts(struct phy_device *phydev);
 int phy_enable_interrupts(struct phy_device *phydev);
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index e5de421..5d921fa 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -123,6 +123,13 @@
 	__u8	priomap[TC_PRIO_MAX+1];	/* Map: logical priority -> PRIO band */
 };
 
+/* MULTIQ section */
+
+struct tc_multiq_qopt {
+	__u16	bands;			/* Number of bands */
+	__u16	max_bands;		/* Maximum number of queues */
+};
+
 /* TBF section */
 
 struct tc_tbf_qopt
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 741d1a6..4cd64b0 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -49,6 +49,7 @@
 	RFKILL_STATE_SOFT_BLOCKED = 0,	/* Radio output blocked */
 	RFKILL_STATE_UNBLOCKED    = 1,	/* Radio output allowed */
 	RFKILL_STATE_HARD_BLOCKED = 2,	/* Output blocked, non-overrideable */
+	RFKILL_STATE_MAX,		/* marker for last valid state */
 };
 
 /*
@@ -110,12 +111,14 @@
 };
 #define to_rfkill(d)	container_of(d, struct rfkill, dev)
 
-struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type);
+struct rfkill * __must_check rfkill_allocate(struct device *parent,
+					     enum rfkill_type type);
 void rfkill_free(struct rfkill *rfkill);
-int rfkill_register(struct rfkill *rfkill);
+int __must_check rfkill_register(struct rfkill *rfkill);
 void rfkill_unregister(struct rfkill *rfkill);
 
 int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state);
+int rfkill_set_default(enum rfkill_type type, enum rfkill_state state);
 
 /**
  * rfkill_state_complement - return complementar state
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index ca643b1..2b3d51c 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -582,6 +582,10 @@
 #define RTNLGRP_IPV6_RULE	RTNLGRP_IPV6_RULE
 	RTNLGRP_ND_USEROPT,
 #define RTNLGRP_ND_USEROPT	RTNLGRP_ND_USEROPT
+	RTNLGRP_PHONET_IFADDR,
+#define RTNLGRP_PHONET_IFADDR	RTNLGRP_PHONET_IFADDR
+	RTNLGRP_PHONET_ROUTE,
+#define RTNLGRP_PHONET_ROUTE	RTNLGRP_PHONET_ROUTE
 	__RTNLGRP_MAX
 };
 #define RTNLGRP_MAX	(__RTNLGRP_MAX - 1)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9099237..720b688 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -146,8 +146,14 @@
 	unsigned short	gso_segs;
 	unsigned short  gso_type;
 	__be32          ip6_frag_id;
+#ifdef CONFIG_HAS_DMA
+	unsigned int	num_dma_maps;
+#endif
 	struct sk_buff	*frag_list;
 	skb_frag_t	frags[MAX_SKB_FRAGS];
+#ifdef CONFIG_HAS_DMA
+	dma_addr_t	dma_maps[MAX_SKB_FRAGS + 1];
+#endif
 };
 
 /* We divide dataref into two halves.  The higher 16 bits hold references
@@ -353,6 +359,14 @@
 
 #include <asm/system.h>
 
+#ifdef CONFIG_HAS_DMA
+#include <linux/dma-mapping.h>
+extern int skb_dma_map(struct device *dev, struct sk_buff *skb,
+		       enum dma_data_direction dir);
+extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
+			  enum dma_data_direction dir);
+#endif
+
 extern void kfree_skb(struct sk_buff *skb);
 extern void	       __kfree_skb(struct sk_buff *skb);
 extern struct sk_buff *__alloc_skb(unsigned int size,
@@ -369,6 +383,8 @@
 	return __alloc_skb(size, priority, 1, -1);
 }
 
+extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
+
 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
 extern struct sk_buff *skb_clone(struct sk_buff *skb,
 				 gfp_t priority);
@@ -459,6 +475,37 @@
 }
 
 /**
+ *	skb_queue_is_last - check if skb is the last entry in the queue
+ *	@list: queue head
+ *	@skb: buffer
+ *
+ *	Returns true if @skb is the last buffer on the list.
+ */
+static inline bool skb_queue_is_last(const struct sk_buff_head *list,
+				     const struct sk_buff *skb)
+{
+	return (skb->next == (struct sk_buff *) list);
+}
+
+/**
+ *	skb_queue_next - return the next packet in the queue
+ *	@list: queue head
+ *	@skb: current buffer
+ *
+ *	Return the next packet in @list after @skb.  It is only valid to
+ *	call this if skb_queue_is_last() evaluates to false.
+ */
+static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
+					     const struct sk_buff *skb)
+{
+	/* This BUG_ON may seem severe, but if we just return then we
+	 * are going to dereference garbage.
+	 */
+	BUG_ON(skb_queue_is_last(list, skb));
+	return skb->next;
+}
+
+/**
  *	skb_get - reference buffer
  *	@skb: buffer to reference
  *
@@ -646,6 +693,22 @@
 	return list_->qlen;
 }
 
+/**
+ *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
+ *	@list: queue to initialize
+ *
+ *	This initializes only the list and queue length aspects of
+ *	an sk_buff_head object.  This allows to initialize the list
+ *	aspects of an sk_buff_head without reinitializing things like
+ *	the spinlock.  It can also be used for on-stack sk_buff_head
+ *	objects where the spinlock is known to not be used.
+ */
+static inline void __skb_queue_head_init(struct sk_buff_head *list)
+{
+	list->prev = list->next = (struct sk_buff *)list;
+	list->qlen = 0;
+}
+
 /*
  * This function creates a split out lock class for each invocation;
  * this is needed for now since a whole lot of users of the skb-queue
@@ -657,8 +720,7 @@
 static inline void skb_queue_head_init(struct sk_buff_head *list)
 {
 	spin_lock_init(&list->lock);
-	list->prev = list->next = (struct sk_buff *)list;
-	list->qlen = 0;
+	__skb_queue_head_init(list);
 }
 
 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
@@ -685,6 +747,83 @@
 	list->qlen++;
 }
 
+static inline void __skb_queue_splice(const struct sk_buff_head *list,
+				      struct sk_buff *prev,
+				      struct sk_buff *next)
+{
+	struct sk_buff *first = list->next;
+	struct sk_buff *last = list->prev;
+
+	first->prev = prev;
+	prev->next = first;
+
+	last->next = next;
+	next->prev = last;
+}
+
+/**
+ *	skb_queue_splice - join two skb lists, this is designed for stacks
+ *	@list: the new list to add
+ *	@head: the place to add it in the first list
+ */
+static inline void skb_queue_splice(const struct sk_buff_head *list,
+				    struct sk_buff_head *head)
+{
+	if (!skb_queue_empty(list)) {
+		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
+		head->qlen += list->qlen;
+	}
+}
+
+/**
+ *	skb_queue_splice - join two skb lists and reinitialise the emptied list
+ *	@list: the new list to add
+ *	@head: the place to add it in the first list
+ *
+ *	The list at @list is reinitialised
+ */
+static inline void skb_queue_splice_init(struct sk_buff_head *list,
+					 struct sk_buff_head *head)
+{
+	if (!skb_queue_empty(list)) {
+		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
+		head->qlen += list->qlen;
+		__skb_queue_head_init(list);
+	}
+}
+
+/**
+ *	skb_queue_splice_tail - join two skb lists, each list being a queue
+ *	@list: the new list to add
+ *	@head: the place to add it in the first list
+ */
+static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
+					 struct sk_buff_head *head)
+{
+	if (!skb_queue_empty(list)) {
+		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
+		head->qlen += list->qlen;
+	}
+}
+
+/**
+ *	skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
+ *	@list: the new list to add
+ *	@head: the place to add it in the first list
+ *
+ *	Each of the lists is a queue.
+ *	The list at @list is reinitialised
+ */
+static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
+					      struct sk_buff_head *head)
+{
+	if (!skb_queue_empty(list)) {
+		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
+		head->qlen += list->qlen;
+		__skb_queue_head_init(list);
+	}
+}
+
 /**
  *	__skb_queue_after - queue a buffer at the list head
  *	@list: list to use
@@ -1434,6 +1573,15 @@
 		     skb != (struct sk_buff *)(queue);				\
 		     skb = tmp, tmp = skb->next)
 
+#define skb_queue_walk_from(queue, skb)						\
+		for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue));	\
+		     skb = skb->next)
+
+#define skb_queue_walk_from_safe(queue, skb, tmp)				\
+		for (tmp = skb->next;						\
+		     skb != (struct sk_buff *)(queue);				\
+		     skb = tmp, tmp = skb->next)
+
 #define skb_queue_reverse_walk(queue, skb) \
 		for (skb = (queue)->prev;					\
 		     prefetch(skb->prev), (skb != (struct sk_buff *)(queue));	\
diff --git a/include/linux/socket.h b/include/linux/socket.h
index dc5086f..818ca33 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -190,7 +190,8 @@
 #define AF_IUCV		32	/* IUCV sockets			*/
 #define AF_RXRPC	33	/* RxRPC sockets 		*/
 #define AF_ISDN		34	/* mISDN sockets 		*/
-#define AF_MAX		35	/* For now.. */
+#define AF_PHONET	35	/* Phonet sockets		*/
+#define AF_MAX		36	/* For now.. */
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC	AF_UNSPEC
@@ -227,6 +228,7 @@
 #define PF_IUCV		AF_IUCV
 #define PF_RXRPC	AF_RXRPC
 #define PF_ISDN		AF_ISDN
+#define PF_PHONET	AF_PHONET
 #define PF_MAX		AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index ebad0ba..99a0f99 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -245,8 +245,6 @@
 
 /* SPROM Revision 3 (inherits most data from rev 2) */
 #define SSB_SPROM3_IL0MAC		0x104A	/* 6 bytes MAC address for 802.11b/g */
-#define SSB_SPROM3_ET0MAC		0x1050	/* 6 bytes MAC address for Ethernet ?? */
-#define SSB_SPROM3_ET1MAC		0x1050	/* 6 bytes MAC address for 802.11a ?? */
 #define SSB_SPROM3_OFDMAPO		0x102C	/* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */
 #define SSB_SPROM3_OFDMALPO		0x1030	/* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */
 #define SSB_SPROM3_OFDMAHPO		0x1034	/* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */
@@ -267,8 +265,6 @@
 
 /* SPROM Revision 4 */
 #define SSB_SPROM4_IL0MAC		0x104C	/* 6 byte MAC address for a/b/g/n */
-#define SSB_SPROM4_ET0MAC		0x1018	/* 6 bytes MAC address for Ethernet ?? */
-#define SSB_SPROM4_ET1MAC		0x1018	/* 6 bytes MAC address for 802.11a ?? */
 #define SSB_SPROM4_ETHPHY		0x105A	/* Ethernet PHY settings ?? */
 #define  SSB_SPROM4_ETHPHY_ET0A		0x001F	/* MII Address for enet0 */
 #define  SSB_SPROM4_ETHPHY_ET1A		0x03E0	/* MII Address for enet1 */
@@ -316,6 +312,21 @@
 #define SSB_SPROM4_PA1B1		0x1090
 #define SSB_SPROM4_PA1B2		0x1092
 
+/* SPROM Revision 5 (inherits most data from rev 4) */
+#define SSB_SPROM5_BFLLO		0x104A	/* Boardflags (low 16 bits) */
+#define SSB_SPROM5_BFLHI		0x104C  /* Board Flags Hi */
+#define SSB_SPROM5_IL0MAC		0x1052	/* 6 byte MAC address for a/b/g/n */
+#define SSB_SPROM5_CCODE		0x1044	/* Country Code (2 bytes) */
+#define SSB_SPROM5_GPIOA		0x1076	/* Gen. Purpose IO # 0 and 1 */
+#define  SSB_SPROM5_GPIOA_P0		0x00FF	/* Pin 0 */
+#define  SSB_SPROM5_GPIOA_P1		0xFF00	/* Pin 1 */
+#define  SSB_SPROM5_GPIOA_P1_SHIFT	8
+#define SSB_SPROM5_GPIOB		0x1078	/* Gen. Purpose IO # 2 and 3 */
+#define  SSB_SPROM5_GPIOB_P2		0x00FF	/* Pin 2 */
+#define  SSB_SPROM5_GPIOB_P3		0xFF00	/* Pin 3 */
+#define  SSB_SPROM5_GPIOB_P3_SHIFT	8
+
+
 /* Values for SSB_SPROM1_BINF_CCODE */
 enum {
 	SSB_SPROM1CCODE_WORLD = 0,
diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild
index 6dac0d7..7699093 100644
--- a/include/linux/tc_act/Kbuild
+++ b/include/linux/tc_act/Kbuild
@@ -3,3 +3,4 @@
 header-y += tc_mirred.h
 header-y += tc_pedit.h
 header-y += tc_nat.h
+header-y += tc_skbedit.h
diff --git a/include/linux/tc_act/tc_skbedit.h b/include/linux/tc_act/tc_skbedit.h
new file mode 100644
index 0000000..a14e461
--- /dev/null
+++ b/include/linux/tc_act/tc_skbedit.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2008, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Alexander Duyck <alexander.h.duyck@intel.com>
+ */
+
+#ifndef __LINUX_TC_SKBEDIT_H
+#define __LINUX_TC_SKBEDIT_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_SKBEDIT 11
+
+#define SKBEDIT_F_PRIORITY		0x1
+#define SKBEDIT_F_QUEUE_MAPPING		0x2
+
+struct tc_skbedit {
+	tc_gen;
+};
+
+enum {
+	TCA_SKBEDIT_UNSPEC,
+	TCA_SKBEDIT_TM,
+	TCA_SKBEDIT_PARMS,
+	TCA_SKBEDIT_PRIORITY,
+	TCA_SKBEDIT_QUEUE_MAPPING,
+	__TCA_SKBEDIT_MAX
+};
+#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
+
+#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 2e25573..7672906 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -342,7 +342,6 @@
 	struct sk_buff* lost_skb_hint;
 	struct sk_buff *scoreboard_skb_hint;
 	struct sk_buff *retransmit_skb_hint;
-	struct sk_buff *forward_skb_hint;
 
 	struct sk_buff_head	out_of_order_queue; /* Out of order segments go here */
 
@@ -358,7 +357,7 @@
 					 */
 
 	int     lost_cnt_hint;
-	int     retransmit_cnt_hint;
+	u32     retransmit_high;	/* L-bits may be on up to this seqno */
 
 	u32	lost_retrans_low;	/* Sent seq after any rxmit (lowest) */
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index e007508..0e85ec3 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -152,6 +152,7 @@
 	u16 aid;
 	u8 supported_rates_len;
 	u8 plink_action;
+	struct ieee80211_ht_cap *ht_capa;
 };
 
 /**
@@ -268,6 +269,83 @@
 	u8 flags;
 };
 
+/**
+ * struct bss_parameters - BSS parameters
+ *
+ * Used to change BSS parameters (mainly for AP mode).
+ *
+ * @use_cts_prot: Whether to use CTS protection
+ *	(0 = no, 1 = yes, -1 = do not change)
+ * @use_short_preamble: Whether the use of short preambles is allowed
+ *	(0 = no, 1 = yes, -1 = do not change)
+ * @use_short_slot_time: Whether the use of short slot time is allowed
+ *	(0 = no, 1 = yes, -1 = do not change)
+ */
+struct bss_parameters {
+	int use_cts_prot;
+	int use_short_preamble;
+	int use_short_slot_time;
+};
+
+/**
+ * enum reg_set_by - Indicates who is trying to set the regulatory domain
+ * @REGDOM_SET_BY_INIT: regulatory domain was set by initialization. We will be
+ * 	using a static world regulatory domain by default.
+ * @REGDOM_SET_BY_CORE: Core queried CRDA for a dynamic world regulatory domain.
+ * @REGDOM_SET_BY_USER: User asked the wireless core to set the
+ * 	regulatory domain.
+ * @REGDOM_SET_BY_DRIVER: a wireless drivers has hinted to the wireless core
+ *	it thinks its knows the regulatory domain we should be in.
+ * @REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an 802.11 country
+ *	information element with regulatory information it thinks we
+ *	should consider.
+ */
+enum reg_set_by {
+	REGDOM_SET_BY_INIT,
+	REGDOM_SET_BY_CORE,
+	REGDOM_SET_BY_USER,
+	REGDOM_SET_BY_DRIVER,
+	REGDOM_SET_BY_COUNTRY_IE,
+};
+
+struct ieee80211_freq_range {
+	u32 start_freq_khz;
+	u32 end_freq_khz;
+	u32 max_bandwidth_khz;
+};
+
+struct ieee80211_power_rule {
+	u32 max_antenna_gain;
+	u32 max_eirp;
+};
+
+struct ieee80211_reg_rule {
+	struct ieee80211_freq_range freq_range;
+	struct ieee80211_power_rule power_rule;
+	u32 flags;
+};
+
+struct ieee80211_regdomain {
+	u32 n_reg_rules;
+	char alpha2[2];
+	struct ieee80211_reg_rule reg_rules[];
+};
+
+#define MHZ_TO_KHZ(freq) (freq * 1000)
+#define KHZ_TO_MHZ(freq) (freq / 1000)
+#define DBI_TO_MBI(gain) (gain * 100)
+#define MBI_TO_DBI(gain) (gain / 100)
+#define DBM_TO_MBM(gain) (gain * 100)
+#define MBM_TO_DBM(gain) (gain / 100)
+
+#define REG_RULE(start, end, bw, gain, eirp, reg_flags) { \
+	.freq_range.start_freq_khz = (start) * 1000, \
+	.freq_range.end_freq_khz = (end) * 1000, \
+	.freq_range.max_bandwidth_khz = (bw) * 1000, \
+	.power_rule.max_antenna_gain = (gain) * 100, \
+	.power_rule.max_eirp = (eirp) * 100, \
+	.flags = reg_flags, \
+	}
 
 /* from net/wireless.h */
 struct wiphy;
@@ -285,11 +363,13 @@
  * wireless extensions but this is subject to reevaluation as soon as this
  * code is used more widely and we have a first user without wext.
  *
- * @add_virtual_intf: create a new virtual interface with the given name
+ * @add_virtual_intf: create a new virtual interface with the given name,
+ *	must set the struct wireless_dev's iftype.
  *
  * @del_virtual_intf: remove the virtual interface determined by ifindex.
  *
- * @change_virtual_intf: change type of virtual interface
+ * @change_virtual_intf: change type/configuration of virtual interface,
+ *	keep the struct wireless_dev's iftype updated.
  *
  * @add_key: add a key with the given parameters. @mac_addr will be %NULL
  *	when adding a group key.
@@ -318,6 +398,8 @@
  * @change_station: Modify a given station.
  *
  * @set_mesh_cfg: set mesh parameters (by now, just mesh id)
+ *
+ * @change_bss: Modify parameters for a given BSS.
  */
 struct cfg80211_ops {
 	int	(*add_virtual_intf)(struct wiphy *wiphy, char *name,
@@ -370,6 +452,9 @@
 	int	(*dump_mpath)(struct wiphy *wiphy, struct net_device *dev,
 			       int idx, u8 *dst, u8 *next_hop,
 			       struct mpath_info *pinfo);
+
+	int	(*change_bss)(struct wiphy *wiphy, struct net_device *dev,
+			      struct bss_parameters *params);
 };
 
 #endif /* __NET_CFG80211_H */
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index b31399e..6048579 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -190,10 +190,6 @@
 #endif
 #include <net/iw_handler.h>	/* new driver API */
 
-#ifndef ETH_P_PAE
-#define ETH_P_PAE 0x888E	/* Port Access Entity (IEEE 802.1X) */
-#endif				/* ETH_P_PAE */
-
 #define ETH_P_PREAUTH 0x88C7	/* IEEE 802.11i pre-authentication */
 
 #ifndef ETH_P_80211_RAW
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 2ff545a..03cffd9 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -51,12 +51,14 @@
 				  char __user *optval, int optlen);
 	int	    (*getsockopt)(struct sock *sk, int level, int optname, 
 				  char __user *optval, int __user *optlen);
+#ifdef CONFIG_COMPAT
 	int	    (*compat_setsockopt)(struct sock *sk,
 				int level, int optname,
 				char __user *optval, int optlen);
 	int	    (*compat_getsockopt)(struct sock *sk,
 				int level, int optname,
 				char __user *optval, int __user *optlen);
+#endif
 	void	    (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
 	int	    (*bind_conflict)(const struct sock *sk,
 				     const struct inet_bind_bucket *tb);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 7312c3d..33e2ac6 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -21,11 +21,103 @@
 #include <linux/timer.h>
 
 #include <net/checksum.h>
+#include <linux/netfilter.h>		/* for union nf_inet_addr */
+#include <linux/ipv6.h>			/* for struct ipv6hdr */
+#include <net/ipv6.h>			/* for ipv6_addr_copy */
+
+struct ip_vs_iphdr {
+	int len;
+	__u8 protocol;
+	union nf_inet_addr saddr;
+	union nf_inet_addr daddr;
+};
+
+static inline void
+ip_vs_fill_iphdr(int af, const void *nh, struct ip_vs_iphdr *iphdr)
+{
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6) {
+		const struct ipv6hdr *iph = nh;
+		iphdr->len = sizeof(struct ipv6hdr);
+		iphdr->protocol = iph->nexthdr;
+		ipv6_addr_copy(&iphdr->saddr.in6, &iph->saddr);
+		ipv6_addr_copy(&iphdr->daddr.in6, &iph->daddr);
+	} else
+#endif
+	{
+		const struct iphdr *iph = nh;
+		iphdr->len = iph->ihl * 4;
+		iphdr->protocol = iph->protocol;
+		iphdr->saddr.ip = iph->saddr;
+		iphdr->daddr.ip = iph->daddr;
+	}
+}
+
+static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
+				   const union nf_inet_addr *src)
+{
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		ipv6_addr_copy(&dst->in6, &src->in6);
+	else
+#endif
+	dst->ip = src->ip;
+}
+
+static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a,
+				   const union nf_inet_addr *b)
+{
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		return ipv6_addr_equal(&a->in6, &b->in6);
+#endif
+	return a->ip == b->ip;
+}
 
 #ifdef CONFIG_IP_VS_DEBUG
 #include <linux/net.h>
 
 extern int ip_vs_get_debug_level(void);
+
+static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
+					 const union nf_inet_addr *addr,
+					 int *idx)
+{
+	int len;
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		len = snprintf(&buf[*idx], buf_len - *idx, "[" NIP6_FMT "]",
+			       NIP6(addr->in6)) + 1;
+	else
+#endif
+		len = snprintf(&buf[*idx], buf_len - *idx, NIPQUAD_FMT,
+			       NIPQUAD(addr->ip)) + 1;
+
+	*idx += len;
+	BUG_ON(*idx > buf_len + 1);
+	return &buf[*idx - len];
+}
+
+#define IP_VS_DBG_BUF(level, msg...)			\
+    do {						\
+	    char ip_vs_dbg_buf[160];			\
+	    int ip_vs_dbg_idx = 0;			\
+	    if (level <= ip_vs_get_debug_level())	\
+		    printk(KERN_DEBUG "IPVS: " msg);	\
+    } while (0)
+#define IP_VS_ERR_BUF(msg...)				\
+    do {						\
+	    char ip_vs_dbg_buf[160];			\
+	    int ip_vs_dbg_idx = 0;			\
+	    printk(KERN_ERR "IPVS: " msg);		\
+    } while (0)
+
+/* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */
+#define IP_VS_DBG_ADDR(af, addr)			\
+    ip_vs_dbg_addr(af, ip_vs_dbg_buf,			\
+		   sizeof(ip_vs_dbg_buf), addr,		\
+		   &ip_vs_dbg_idx)
+
 #define IP_VS_DBG(level, msg...)			\
     do {						\
 	    if (level <= ip_vs_get_debug_level())	\
@@ -48,6 +140,8 @@
 		pp->debug_packet(pp, skb, ofs, msg);	\
     } while (0)
 #else	/* NO DEBUGGING at ALL */
+#define IP_VS_DBG_BUF(level, msg...)  do {} while (0)
+#define IP_VS_ERR_BUF(msg...)  do {} while (0)
 #define IP_VS_DBG(level, msg...)  do {} while (0)
 #define IP_VS_DBG_RL(msg...)  do {} while (0)
 #define IP_VS_DBG_PKT(level, pp, skb, ofs, msg)		do {} while (0)
@@ -160,27 +254,10 @@
 
 struct ip_vs_stats
 {
-	__u32                   conns;          /* connections scheduled */
-	__u32                   inpkts;         /* incoming packets */
-	__u32                   outpkts;        /* outgoing packets */
-	__u64                   inbytes;        /* incoming bytes */
-	__u64                   outbytes;       /* outgoing bytes */
-
-	__u32			cps;		/* current connection rate */
-	__u32			inpps;		/* current in packet rate */
-	__u32			outpps;		/* current out packet rate */
-	__u32			inbps;		/* current in byte rate */
-	__u32			outbps;		/* current out byte rate */
-
-	/*
-	 * Don't add anything before the lock, because we use memcpy() to copy
-	 * the members before the lock to struct ip_vs_stats_user in
-	 * ip_vs_ctl.c.
-	 */
+	struct ip_vs_stats_user	ustats;         /* statistics */
+	struct ip_vs_estimator	est;		/* estimator */
 
 	spinlock_t              lock;           /* spin lock */
-
-	struct ip_vs_estimator	est;		/* estimator */
 };
 
 struct dst_entry;
@@ -202,21 +279,23 @@
 
 	void (*exit)(struct ip_vs_protocol *pp);
 
-	int (*conn_schedule)(struct sk_buff *skb,
+	int (*conn_schedule)(int af, struct sk_buff *skb,
 			     struct ip_vs_protocol *pp,
 			     int *verdict, struct ip_vs_conn **cpp);
 
 	struct ip_vs_conn *
-	(*conn_in_get)(const struct sk_buff *skb,
+	(*conn_in_get)(int af,
+		       const struct sk_buff *skb,
 		       struct ip_vs_protocol *pp,
-		       const struct iphdr *iph,
+		       const struct ip_vs_iphdr *iph,
 		       unsigned int proto_off,
 		       int inverse);
 
 	struct ip_vs_conn *
-	(*conn_out_get)(const struct sk_buff *skb,
+	(*conn_out_get)(int af,
+			const struct sk_buff *skb,
 			struct ip_vs_protocol *pp,
-			const struct iphdr *iph,
+			const struct ip_vs_iphdr *iph,
 			unsigned int proto_off,
 			int inverse);
 
@@ -226,7 +305,8 @@
 	int (*dnat_handler)(struct sk_buff *skb,
 			    struct ip_vs_protocol *pp, struct ip_vs_conn *cp);
 
-	int (*csum_check)(struct sk_buff *skb, struct ip_vs_protocol *pp);
+	int (*csum_check)(int af, struct sk_buff *skb,
+			  struct ip_vs_protocol *pp);
 
 	const char *(*state_name)(int state);
 
@@ -259,9 +339,10 @@
 	struct list_head        c_list;         /* hashed list heads */
 
 	/* Protocol, addresses and port numbers */
-	__be32                   caddr;          /* client address */
-	__be32                   vaddr;          /* virtual address */
-	__be32                   daddr;          /* destination address */
+	u16                      af;		/* address family */
+	union nf_inet_addr       caddr;          /* client address */
+	union nf_inet_addr       vaddr;          /* virtual address */
+	union nf_inet_addr       daddr;          /* destination address */
 	__be16                   cport;
 	__be16                   vport;
 	__be16                   dport;
@@ -305,6 +386,45 @@
 
 
 /*
+ *	Extended internal versions of struct ip_vs_service_user and
+ *	ip_vs_dest_user for IPv6 support.
+ *
+ *	We need these to conveniently pass around service and destination
+ *	options, but unfortunately, we also need to keep the old definitions to
+ *	maintain userspace backwards compatibility for the setsockopt interface.
+ */
+struct ip_vs_service_user_kern {
+	/* virtual service addresses */
+	u16			af;
+	u16			protocol;
+	union nf_inet_addr	addr;		/* virtual ip address */
+	u16			port;
+	u32			fwmark;		/* firwall mark of service */
+
+	/* virtual service options */
+	char			*sched_name;
+	unsigned		flags;		/* virtual service flags */
+	unsigned		timeout;	/* persistent timeout in sec */
+	u32			netmask;	/* persistent netmask */
+};
+
+
+struct ip_vs_dest_user_kern {
+	/* destination server address */
+	union nf_inet_addr	addr;
+	u16			port;
+
+	/* real server options */
+	unsigned		conn_flags;	/* connection flags */
+	int			weight;		/* destination weight */
+
+	/* thresholds for active connections */
+	u32			u_threshold;	/* upper threshold */
+	u32			l_threshold;	/* lower threshold */
+};
+
+
+/*
  *	The information about the virtual service offered to the net
  *	and the forwarding entries
  */
@@ -314,8 +434,9 @@
 	atomic_t		refcnt;   /* reference counter */
 	atomic_t		usecnt;   /* use counter */
 
+	u16			af;       /* address family */
 	__u16			protocol; /* which protocol (TCP/UDP) */
-	__be32			addr;	  /* IP address for virtual service */
+	union nf_inet_addr	addr;	  /* IP address for virtual service */
 	__be16			port;	  /* port number for the service */
 	__u32                   fwmark;   /* firewall mark of the service */
 	unsigned		flags;	  /* service status flags */
@@ -342,7 +463,8 @@
 	struct list_head	n_list;   /* for the dests in the service */
 	struct list_head	d_list;   /* for table with all the dests */
 
-	__be32			addr;		/* IP address of the server */
+	u16			af;		/* address family */
+	union nf_inet_addr	addr;		/* IP address of the server */
 	__be16			port;		/* port number of the server */
 	volatile unsigned	flags;		/* dest status flags */
 	atomic_t		conn_flags;	/* flags to copy to conn */
@@ -366,7 +488,7 @@
 	/* for virtual service */
 	struct ip_vs_service	*svc;		/* service it belongs to */
 	__u16			protocol;	/* which protocol (TCP/UDP) */
-	__be32			vaddr;		/* virtual IP address */
+	union nf_inet_addr	vaddr;		/* virtual IP address */
 	__be16			vport;		/* virtual port number */
 	__u32			vfwmark;	/* firewall mark of service */
 };
@@ -380,6 +502,9 @@
 	char			*name;		/* scheduler name */
 	atomic_t		refcnt;		/* reference counter */
 	struct module		*module;	/* THIS_MODULE/NULL */
+#ifdef CONFIG_IP_VS_IPV6
+	int			supports_ipv6;	/* scheduler has IPv6 support */
+#endif
 
 	/* scheduler initializing service */
 	int (*init_service)(struct ip_vs_service *svc);
@@ -479,16 +604,8 @@
 #ifndef CONFIG_IP_VS_TAB_BITS
 #define CONFIG_IP_VS_TAB_BITS   12
 #endif
-/* make sure that IP_VS_CONN_TAB_BITS is located in [8, 20] */
-#if CONFIG_IP_VS_TAB_BITS < 8
-#define IP_VS_CONN_TAB_BITS	8
-#endif
-#if CONFIG_IP_VS_TAB_BITS > 20
-#define IP_VS_CONN_TAB_BITS	20
-#endif
-#if 8 <= CONFIG_IP_VS_TAB_BITS && CONFIG_IP_VS_TAB_BITS <= 20
+
 #define IP_VS_CONN_TAB_BITS	CONFIG_IP_VS_TAB_BITS
-#endif
 #define IP_VS_CONN_TAB_SIZE     (1 << IP_VS_CONN_TAB_BITS)
 #define IP_VS_CONN_TAB_MASK     (IP_VS_CONN_TAB_SIZE - 1)
 
@@ -500,11 +617,16 @@
 };
 
 extern struct ip_vs_conn *ip_vs_conn_in_get
-(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port);
+(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
+ const union nf_inet_addr *d_addr, __be16 d_port);
+
 extern struct ip_vs_conn *ip_vs_ct_in_get
-(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port);
+(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
+ const union nf_inet_addr *d_addr, __be16 d_port);
+
 extern struct ip_vs_conn *ip_vs_conn_out_get
-(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port);
+(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
+ const union nf_inet_addr *d_addr, __be16 d_port);
 
 /* put back the conn without restarting its timer */
 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
@@ -515,8 +637,9 @@
 extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
 
 extern struct ip_vs_conn *
-ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport,
-	       __be32 daddr, __be16 dport, unsigned flags,
+ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
+	       const union nf_inet_addr *vaddr, __be16 vport,
+	       const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
 	       struct ip_vs_dest *dest);
 extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
 
@@ -532,24 +655,32 @@
 {
 	struct ip_vs_conn *ctl_cp = cp->control;
 	if (!ctl_cp) {
-		IP_VS_ERR("request control DEL for uncontrolled: "
-			  "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n",
-			  NIPQUAD(cp->caddr),ntohs(cp->cport),
-			  NIPQUAD(cp->vaddr),ntohs(cp->vport));
+		IP_VS_ERR_BUF("request control DEL for uncontrolled: "
+			      "%s:%d to %s:%d\n",
+			      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+			      ntohs(cp->cport),
+			      IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
+			      ntohs(cp->vport));
+
 		return;
 	}
 
-	IP_VS_DBG(7, "DELeting control for: "
-		  "cp.dst=%d.%d.%d.%d:%d ctl_cp.dst=%d.%d.%d.%d:%d\n",
-		  NIPQUAD(cp->caddr),ntohs(cp->cport),
-		  NIPQUAD(ctl_cp->caddr),ntohs(ctl_cp->cport));
+	IP_VS_DBG_BUF(7, "DELeting control for: "
+		      "cp.dst=%s:%d ctl_cp.dst=%s:%d\n",
+		      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+		      ntohs(cp->cport),
+		      IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr),
+		      ntohs(ctl_cp->cport));
 
 	cp->control = NULL;
 	if (atomic_read(&ctl_cp->n_control) == 0) {
-		IP_VS_ERR("BUG control DEL with n=0 : "
-			  "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n",
-			  NIPQUAD(cp->caddr),ntohs(cp->cport),
-			  NIPQUAD(cp->vaddr),ntohs(cp->vport));
+		IP_VS_ERR_BUF("BUG control DEL with n=0 : "
+			      "%s:%d to %s:%d\n",
+			      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+			      ntohs(cp->cport),
+			      IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
+			      ntohs(cp->vport));
+
 		return;
 	}
 	atomic_dec(&ctl_cp->n_control);
@@ -559,17 +690,22 @@
 ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
 {
 	if (cp->control) {
-		IP_VS_ERR("request control ADD for already controlled: "
-			  "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n",
-			  NIPQUAD(cp->caddr),ntohs(cp->cport),
-			  NIPQUAD(cp->vaddr),ntohs(cp->vport));
+		IP_VS_ERR_BUF("request control ADD for already controlled: "
+			      "%s:%d to %s:%d\n",
+			      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+			      ntohs(cp->cport),
+			      IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
+			      ntohs(cp->vport));
+
 		ip_vs_control_del(cp);
 	}
 
-	IP_VS_DBG(7, "ADDing control for: "
-		  "cp.dst=%d.%d.%d.%d:%d ctl_cp.dst=%d.%d.%d.%d:%d\n",
-		  NIPQUAD(cp->caddr),ntohs(cp->cport),
-		  NIPQUAD(ctl_cp->caddr),ntohs(ctl_cp->cport));
+	IP_VS_DBG_BUF(7, "ADDing control for: "
+		      "cp.dst=%s:%d ctl_cp.dst=%s:%d\n",
+		      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+		      ntohs(cp->cport),
+		      IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr),
+		      ntohs(ctl_cp->cport));
 
 	cp->control = ctl_cp;
 	atomic_inc(&ctl_cp->n_control);
@@ -647,7 +783,8 @@
 extern const struct ctl_path net_vs_ctl_path[];
 
 extern struct ip_vs_service *
-ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport);
+ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+		  const union nf_inet_addr *vaddr, __be16 vport);
 
 static inline void ip_vs_service_put(struct ip_vs_service *svc)
 {
@@ -655,14 +792,16 @@
 }
 
 extern struct ip_vs_dest *
-ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport);
+ip_vs_lookup_real_service(int af, __u16 protocol,
+			  const union nf_inet_addr *daddr, __be16 dport);
+
 extern int ip_vs_use_count_inc(void);
 extern void ip_vs_use_count_dec(void);
 extern int ip_vs_control_init(void);
 extern void ip_vs_control_cleanup(void);
 extern struct ip_vs_dest *
-ip_vs_find_dest(__be32 daddr, __be16 dport,
-		 __be32 vaddr, __be16 vport, __u16 protocol);
+ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport,
+		const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol);
 extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
 
 
@@ -683,6 +822,8 @@
 /*
  *      IPVS rate estimator prototypes (from ip_vs_est.c)
  */
+extern int ip_vs_estimator_init(void);
+extern void ip_vs_estimator_cleanup(void);
 extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
 extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
 extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
@@ -704,6 +845,19 @@
 (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, int offset);
 extern void ip_vs_dst_reset(struct ip_vs_dest *dest);
 
+#ifdef CONFIG_IP_VS_IPV6
+extern int ip_vs_bypass_xmit_v6
+(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+extern int ip_vs_nat_xmit_v6
+(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+extern int ip_vs_tunnel_xmit_v6
+(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+extern int ip_vs_dr_xmit_v6
+(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+extern int ip_vs_icmp_xmit_v6
+(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp,
+ int offset);
+#endif
 
 /*
  *	This is a simple mechanism to ignore packets when
@@ -748,7 +902,12 @@
 }
 
 extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
-		struct ip_vs_conn *cp, int dir);
+			   struct ip_vs_conn *cp, int dir);
+
+#ifdef CONFIG_IP_VS_IPV6
+extern void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
+			      struct ip_vs_conn *cp, int dir);
+#endif
 
 extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
 
@@ -759,6 +918,17 @@
 	return csum_partial((char *) diff, sizeof(diff), oldsum);
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new,
+					__wsum oldsum)
+{
+	__be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0],
+			    new[3],  new[2],  new[1],  new[0] };
+
+	return csum_partial((char *) diff, sizeof(diff), oldsum);
+}
+#endif
+
 static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
 {
 	__be16 diff[2] = { ~old, new };
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 113028f..dfa7ae3 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -576,6 +576,8 @@
 extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
 			 struct group_filter __user *optval,
 			 int __user *optlen);
+extern unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
+				    const struct in6_addr *daddr, u32 rnd);
 
 #ifdef CONFIG_PROC_FS
 extern int  ac6_proc_init(struct net *net);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index ff137fd..f5f5b1f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -158,13 +158,17 @@
  *	also implies a change in the AID.
  * @BSS_CHANGED_ERP_CTS_PROT: CTS protection changed
  * @BSS_CHANGED_ERP_PREAMBLE: preamble changed
+ * @BSS_CHANGED_ERP_SLOT: slot timing changed
  * @BSS_CHANGED_HT: 802.11n parameters changed
+ * @BSS_CHANGED_BASIC_RATES: Basic rateset changed
  */
 enum ieee80211_bss_change {
 	BSS_CHANGED_ASSOC		= 1<<0,
 	BSS_CHANGED_ERP_CTS_PROT	= 1<<1,
 	BSS_CHANGED_ERP_PREAMBLE	= 1<<2,
+	BSS_CHANGED_ERP_SLOT		= 1<<3,
 	BSS_CHANGED_HT                  = 1<<4,
+	BSS_CHANGED_BASIC_RATES		= 1<<5,
 };
 
 /**
@@ -177,6 +181,7 @@
  * @aid: association ID number, valid only when @assoc is true
  * @use_cts_prot: use CTS protection
  * @use_short_preamble: use 802.11b short preamble
+ * @use_short_slot: use short slot time (only relevant for ERP)
  * @dtim_period: num of beacons before the next DTIM, for PSM
  * @timestamp: beacon timestamp
  * @beacon_int: beacon interval
@@ -184,6 +189,9 @@
  * @assoc_ht: association in HT mode
  * @ht_conf: ht capabilities
  * @ht_bss_conf: ht extended capabilities
+ * @basic_rates: bitmap of basic rates, each bit stands for an
+ *	index into the rate table configured by the driver in
+ *	the current band.
  */
 struct ieee80211_bss_conf {
 	/* association related data */
@@ -192,10 +200,12 @@
 	/* erp related data */
 	bool use_cts_prot;
 	bool use_short_preamble;
+	bool use_short_slot;
 	u8 dtim_period;
 	u16 beacon_int;
 	u16 assoc_capability;
 	u64 timestamp;
+	u64 basic_rates;
 	/* ht related data */
 	bool assoc_ht;
 	struct ieee80211_ht_info *ht_conf;
@@ -290,6 +300,9 @@
  *  (2) driver internal use (if applicable)
  *  (3) TX status information - driver tells mac80211 what happened
  *
+ * The TX control's sta pointer is only valid during the ->tx call,
+ * it may be NULL.
+ *
  * @flags: transmit info flags, defined above
  * @band: TBD
  * @tx_rate_idx: TBD
@@ -317,10 +330,11 @@
 
 	union {
 		struct {
+			/* NB: vif can be NULL for injected frames */
 			struct ieee80211_vif *vif;
 			struct ieee80211_key_conf *hw_key;
+			struct ieee80211_sta *sta;
 			unsigned long jiffies;
-			u16 aid;
 			s8 rts_cts_rate_idx, alt_retry_rate_idx;
 			u8 retry_limit;
 			u8 icv_len;
@@ -363,6 +377,7 @@
  * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field)
  *	is valid. This is useful in monitor mode and necessary for beacon frames
  *	to enable IBSS merging.
+ * @RX_FLAG_SHORTPRE: Short preamble was used for this frame
  */
 enum mac80211_rx_flags {
 	RX_FLAG_MMIC_ERROR	= 1<<0,
@@ -373,6 +388,7 @@
 	RX_FLAG_FAILED_FCS_CRC	= 1<<5,
 	RX_FLAG_FAILED_PLCP_CRC = 1<<6,
 	RX_FLAG_TSFT		= 1<<7,
+	RX_FLAG_SHORTPRE	= 1<<8
 };
 
 /**
@@ -418,6 +434,11 @@
  * @IEEE80211_CONF_PS: Enable 802.11 power save mode
  */
 enum ieee80211_conf_flags {
+	/*
+	 * TODO: IEEE80211_CONF_SHORT_SLOT_TIME will be removed once drivers
+	 * have been converted to use bss_info_changed() for slot time
+	 * configuration
+	 */
 	IEEE80211_CONF_SHORT_SLOT_TIME	= (1<<0),
 	IEEE80211_CONF_RADIOTAP		= (1<<1),
 	IEEE80211_CONF_SUPPORT_HT_MODE	= (1<<2),
@@ -461,33 +482,6 @@
 };
 
 /**
- * enum ieee80211_if_types - types of 802.11 network interfaces
- *
- * @IEEE80211_IF_TYPE_INVALID: invalid interface type, not used
- *	by mac80211 itself
- * @IEEE80211_IF_TYPE_AP: interface in AP mode.
- * @IEEE80211_IF_TYPE_MGMT: special interface for communication with hostap
- *	daemon. Drivers should never see this type.
- * @IEEE80211_IF_TYPE_STA: interface in STA (client) mode.
- * @IEEE80211_IF_TYPE_IBSS: interface in IBSS (ad-hoc) mode.
- * @IEEE80211_IF_TYPE_MNTR: interface in monitor (rfmon) mode.
- * @IEEE80211_IF_TYPE_WDS: interface in WDS mode.
- * @IEEE80211_IF_TYPE_VLAN: VLAN interface bound to an AP, drivers
- *	will never see this type.
- * @IEEE80211_IF_TYPE_MESH_POINT: 802.11s mesh point
- */
-enum ieee80211_if_types {
-	IEEE80211_IF_TYPE_INVALID,
-	IEEE80211_IF_TYPE_AP,
-	IEEE80211_IF_TYPE_STA,
-	IEEE80211_IF_TYPE_IBSS,
-	IEEE80211_IF_TYPE_MESH_POINT,
-	IEEE80211_IF_TYPE_MNTR,
-	IEEE80211_IF_TYPE_WDS,
-	IEEE80211_IF_TYPE_VLAN,
-};
-
-/**
  * struct ieee80211_vif - per-interface data
  *
  * Data in this structure is continually present for driver
@@ -498,7 +492,7 @@
  *	sizeof(void *).
  */
 struct ieee80211_vif {
-	enum ieee80211_if_types type;
+	enum nl80211_iftype type;
 	/* must be last */
 	u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
 };
@@ -506,7 +500,7 @@
 static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
 {
 #ifdef CONFIG_MAC80211_MESH
-	return vif->type == IEEE80211_IF_TYPE_MESH_POINT;
+	return vif->type == NL80211_IFTYPE_MESH_POINT;
 #endif
 	return false;
 }
@@ -517,7 +511,7 @@
  * @vif: pointer to a driver-use per-interface structure. The pointer
  *	itself is also used for various functions including
  *	ieee80211_beacon_get() and ieee80211_get_buffered_bc().
- * @type: one of &enum ieee80211_if_types constants. Determines the type of
+ * @type: one of &enum nl80211_iftype constants. Determines the type of
  *	added/removed interface.
  * @mac_addr: pointer to MAC address of the interface. This pointer is valid
  *	until the interface is removed (i.e. it cannot be used after
@@ -533,7 +527,7 @@
  * in pure monitor mode.
  */
 struct ieee80211_if_init_conf {
-	enum ieee80211_if_types type;
+	enum nl80211_iftype type;
 	struct ieee80211_vif *vif;
 	void *mac_addr;
 };
@@ -662,6 +656,33 @@
 };
 
 /**
+ * struct ieee80211_sta - station table entry
+ *
+ * A station table entry represents a station we are possibly
+ * communicating with. Since stations are RCU-managed in
+ * mac80211, any ieee80211_sta pointer you get access to must
+ * either be protected by rcu_read_lock() explicitly or implicitly,
+ * or you must take good care to not use such a pointer after a
+ * call to your sta_notify callback that removed it.
+ *
+ * @addr: MAC address
+ * @aid: AID we assigned to the station if we're an AP
+ * @supp_rates: Bitmap of supported rates (per band)
+ * @ht_info: HT capabilities of this STA
+ * @drv_priv: data area for driver use, will always be aligned to
+ *	sizeof(void *), size is determined in hw information.
+ */
+struct ieee80211_sta {
+	u64 supp_rates[IEEE80211_NUM_BANDS];
+	u8 addr[ETH_ALEN];
+	u16 aid;
+	struct ieee80211_ht_info ht_info;
+
+	/* must be last */
+	u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
+};
+
+/**
  * enum sta_notify_cmd - sta notify command
  *
  * Used with the sta_notify() callback in &struct ieee80211_ops, this
@@ -805,6 +826,8 @@
  *
  * @vif_data_size: size (in bytes) of the drv_priv data area
  *	within &struct ieee80211_vif.
+ * @sta_data_size: size (in bytes) of the drv_priv data area
+ *	within &struct ieee80211_sta.
  */
 struct ieee80211_hw {
 	struct ieee80211_conf conf;
@@ -816,12 +839,15 @@
 	unsigned int extra_tx_headroom;
 	int channel_change_time;
 	int vif_data_size;
+	int sta_data_size;
 	u16 queues;
 	u16 ampdu_queues;
 	u16 max_listen_interval;
 	s8 max_signal;
 };
 
+struct ieee80211_hw *wiphy_to_hw(struct wiphy *wiphy);
+
 /**
  * SET_IEEE80211_DEV - set device for 802.11 hardware
  *
@@ -1097,7 +1123,7 @@
  *	This callback must be implemented and atomic.
  *
  * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit
- * 	must be set or cleared for a given AID. Must be atomic.
+ * 	must be set or cleared for a given STA. Must be atomic.
  *
  * @set_key: See the section "Hardware crypto acceleration"
  *	This callback can sleep, and is only called between add_interface
@@ -1111,7 +1137,9 @@
  * @hw_scan: Ask the hardware to service the scan request, no need to start
  *	the scan state machine in stack. The scan must honour the channel
  *	configuration done by the regulatory agent in the wiphy's registered
- *	bands.
+ *	bands. When the scan finishes, ieee80211_scan_completed() must be
+ *	called; note that it also must be called when the scan cannot finish
+ *	because the hardware is turned off! Anything else is a bug!
  *
  * @get_stats: return low-level statistics
  *
@@ -1131,7 +1159,7 @@
  *	of assocaited station or AP.
  *
  * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
- *	bursting) for a hardware TX queue. Must be atomic.
+ *	bursting) for a hardware TX queue.
  *
  * @get_tx_stats: Get statistics of the current TX queue status. This is used
  *	to get number of currently queued packets (queue length), maximum queue
@@ -1181,7 +1209,8 @@
 				 unsigned int changed_flags,
 				 unsigned int *total_flags,
 				 int mc_count, struct dev_addr_list *mc_list);
-	int (*set_tim)(struct ieee80211_hw *hw, int aid, int set);
+	int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+		       bool set);
 	int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 		       const u8 *local_address, const u8 *address,
 		       struct ieee80211_key_conf *key);
@@ -1198,7 +1227,7 @@
 	int (*set_retry_limit)(struct ieee80211_hw *hw,
 			       u32 short_retry, u32 long_retr);
 	void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-			enum sta_notify_cmd, const u8 *addr);
+			enum sta_notify_cmd, struct ieee80211_sta *sta);
 	int (*conf_tx)(struct ieee80211_hw *hw, u16 queue,
 		       const struct ieee80211_tx_queue_params *params);
 	int (*get_tx_stats)(struct ieee80211_hw *hw,
@@ -1208,7 +1237,7 @@
 	int (*tx_last_beacon)(struct ieee80211_hw *hw);
 	int (*ampdu_action)(struct ieee80211_hw *hw,
 			    enum ieee80211_ampdu_mlme_action action,
-			    const u8 *addr, u16 tid, u16 *ssn);
+			    struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 };
 
 /**
@@ -1557,16 +1586,6 @@
 unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb);
 
 /**
- * ieee80211_get_hdrlen - get header length from frame control
- *
- * This function returns the 802.11 header length in bytes (not including
- * encryption headers.)
- *
- * @fc: the frame control field (in CPU endianness)
- */
-int ieee80211_get_hdrlen(u16 fc);
-
-/**
  * ieee80211_hdrlen - get header length in bytes from frame control
  * @fc: frame control field in little-endian format
  */
@@ -1608,6 +1627,16 @@
 void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue);
 
 /**
+ * ieee80211_queue_stopped - test status of the queue
+ * @hw: pointer as obtained from ieee80211_alloc_hw().
+ * @queue: queue number (counted from zero).
+ *
+ * Drivers should use this function instead of netif_stop_queue.
+ */
+
+int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue);
+
+/**
  * ieee80211_stop_queues - stop all queues
  * @hw: pointer as obtained from ieee80211_alloc_hw().
  *
@@ -1758,4 +1787,85 @@
  */
 void ieee80211_notify_mac(struct ieee80211_hw *hw,
 			  enum ieee80211_notification_types  notif_type);
+
+/**
+ * ieee80211_find_sta - find a station
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @addr: station's address
+ *
+ * This function must be called under RCU lock and the
+ * resulting pointer is only valid under RCU lock as well.
+ */
+struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw,
+					 const u8 *addr);
+
+
+/* Rate control API */
+/**
+ * struct rate_selection - rate information for/from rate control algorithms
+ *
+ * @rate_idx: selected transmission rate index
+ * @nonerp_idx: Non-ERP rate to use instead if ERP cannot be used
+ * @probe_idx: rate for probing (or -1)
+ * @max_rate_idx: maximum rate index that can be used, this is
+ *	input to the algorithm and will be enforced
+ */
+struct rate_selection {
+	s8 rate_idx, nonerp_idx, probe_idx, max_rate_idx;
+};
+
+struct rate_control_ops {
+	struct module *module;
+	const char *name;
+	void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir);
+	void (*clear)(void *priv);
+	void (*free)(void *priv);
+
+	void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp);
+	void (*rate_init)(void *priv, struct ieee80211_supported_band *sband,
+			  struct ieee80211_sta *sta, void *priv_sta);
+	void (*free_sta)(void *priv, struct ieee80211_sta *sta,
+			 void *priv_sta);
+
+	void (*tx_status)(void *priv, struct ieee80211_supported_band *sband,
+			  struct ieee80211_sta *sta, void *priv_sta,
+			  struct sk_buff *skb);
+	void (*get_rate)(void *priv, struct ieee80211_supported_band *sband,
+			 struct ieee80211_sta *sta, void *priv_sta,
+			 struct sk_buff *skb,
+			 struct rate_selection *sel);
+
+	void (*add_sta_debugfs)(void *priv, void *priv_sta,
+				struct dentry *dir);
+	void (*remove_sta_debugfs)(void *priv, void *priv_sta);
+};
+
+static inline int rate_supported(struct ieee80211_sta *sta,
+				 enum ieee80211_band band,
+				 int index)
+{
+	return (sta == NULL || sta->supp_rates[band] & BIT(index));
+}
+
+static inline s8
+rate_lowest_index(struct ieee80211_supported_band *sband,
+		  struct ieee80211_sta *sta)
+{
+	int i;
+
+	for (i = 0; i < sband->n_bitrates; i++)
+		if (rate_supported(sta, sband->band, i))
+			return i;
+
+	/* warn when we cannot find a rate. */
+	WARN_ON(1);
+
+	return 0;
+}
+
+
+int ieee80211_rate_control_register(struct rate_control_ops *ops);
+void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
+
 #endif /* MAC80211_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 208fe5a..3643bbb 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -119,9 +119,6 @@
  * Nested Attributes Construction:
  *   nla_nest_start(skb, type)		start a nested attribute
  *   nla_nest_end(skb, nla)		finalize a nested attribute
- *   nla_nest_compat_start(skb, type,	start a nested compat attribute
- *			   len, data)
- *   nla_nest_compat_end(skb, type)	finalize a nested compat attribute
  *   nla_nest_cancel(skb, nla)		cancel nested attribute construction
  *
  * Attribute Length Calculations:
@@ -156,7 +153,6 @@
  *   nla_find_nested()			find attribute in nested attributes
  *   nla_parse()			parse and validate stream of attrs
  *   nla_parse_nested()			parse nested attribuets
- *   nla_parse_nested_compat()		parse nested compat attributes
  *   nla_for_each_attr()		loop over all attributes
  *   nla_for_each_nested()		loop over the nested attributes
  *=========================================================================
@@ -752,39 +748,6 @@
 }
 
 /**
- * nla_parse_nested_compat - parse nested compat attributes
- * @tb: destination array with maxtype+1 elements
- * @maxtype: maximum attribute type to be expected
- * @nla: attribute containing the nested attributes
- * @data: pointer to point to contained structure
- * @len: length of contained structure
- * @policy: validation policy
- *
- * Parse a nested compat attribute. The compat attribute contains a structure
- * and optionally a set of nested attributes. On success the data pointer
- * points to the nested data and tb contains the parsed attributes
- * (see nla_parse).
- */
-static inline int __nla_parse_nested_compat(struct nlattr *tb[], int maxtype,
-					    struct nlattr *nla,
-					    const struct nla_policy *policy,
-					    int len)
-{
-	int nested_len = nla_len(nla) - NLA_ALIGN(len);
-
-	if (nested_len < 0)
-		return -EINVAL;
-	if (nested_len >= nla_attr_size(0))
-		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
-				 nested_len, policy);
-	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
-	return 0;
-}
-
-#define nla_parse_nested_compat(tb, maxtype, nla, policy, data, len) \
-({	data = nla_len(nla) >= len ? nla_data(nla) : NULL; \
-	__nla_parse_nested_compat(tb, maxtype, nla, policy, len); })
-/**
  * nla_put_u8 - Add a u8 netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
@@ -1031,51 +994,6 @@
 }
 
 /**
- * nla_nest_compat_start - Start a new level of nested compat attributes
- * @skb: socket buffer to add attributes to
- * @attrtype: attribute type of container
- * @attrlen: length of structure
- * @data: pointer to structure
- *
- * Start a nested compat attribute that contains both a structure and
- * a set of nested attributes.
- *
- * Returns the container attribute
- */
-static inline struct nlattr *nla_nest_compat_start(struct sk_buff *skb,
-						   int attrtype, int attrlen,
-						   const void *data)
-{
-	struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb);
-
-	if (nla_put(skb, attrtype, attrlen, data) < 0)
-		return NULL;
-	if (nla_nest_start(skb, attrtype) == NULL) {
-		nlmsg_trim(skb, start);
-		return NULL;
-	}
-	return start;
-}
-
-/**
- * nla_nest_compat_end - Finalize nesting of compat attributes
- * @skb: socket buffer the attributes are stored in
- * @start: container attribute
- *
- * Corrects the container attribute header to include the all
- * appeneded attributes.
- *
- * Returns the total data length of the skb.
- */
-static inline int nla_nest_compat_end(struct sk_buff *skb, struct nlattr *start)
-{
-	struct nlattr *nest = (void *)start + NLMSG_ALIGN(start->nla_len);
-
-	start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start;
-	return nla_nest_end(skb, nest);
-}
-
-/**
  * nla_nest_cancel - Cancel nesting of attributes
  * @skb: socket buffer the message is stored in
  * @start: container attribute
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
new file mode 100644
index 0000000..d4e7250
--- /dev/null
+++ b/include/net/phonet/phonet.h
@@ -0,0 +1,112 @@
+/*
+ * File: af_phonet.h
+ *
+ * Phonet sockets kernel definitions
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef AF_PHONET_H
+#define AF_PHONET_H
+
+/*
+ * The lower layers may not require more space, ever. Make sure it's
+ * enough.
+ */
+#define MAX_PHONET_HEADER	8
+
+/*
+ * Every Phonet* socket has this structure first in its
+ * protocol-specific structure under name c.
+ */
+struct pn_sock {
+	struct sock	sk;
+	u16		sobject;
+	u8		resource;
+};
+
+static inline struct pn_sock *pn_sk(struct sock *sk)
+{
+	return (struct pn_sock *)sk;
+}
+
+extern const struct proto_ops phonet_dgram_ops;
+
+struct sock *pn_find_sock_by_sa(const struct sockaddr_pn *sa);
+void phonet_get_local_port_range(int *min, int *max);
+void pn_sock_hash(struct sock *sk);
+void pn_sock_unhash(struct sock *sk);
+int pn_sock_get_port(struct sock *sk, unsigned short sport);
+
+int pn_skb_send(struct sock *sk, struct sk_buff *skb,
+		const struct sockaddr_pn *target);
+
+static inline struct phonethdr *pn_hdr(struct sk_buff *skb)
+{
+	return (struct phonethdr *)skb_network_header(skb);
+}
+
+static inline struct phonetmsg *pn_msg(struct sk_buff *skb)
+{
+	return (struct phonetmsg *)skb_transport_header(skb);
+}
+
+/*
+ * Get the other party's sockaddr from received skb. The skb begins
+ * with a Phonet header.
+ */
+static inline
+void pn_skb_get_src_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa)
+{
+	struct phonethdr *ph = pn_hdr(skb);
+	u16 obj = pn_object(ph->pn_sdev, ph->pn_sobj);
+
+	sa->spn_family = AF_PHONET;
+	pn_sockaddr_set_object(sa, obj);
+	pn_sockaddr_set_resource(sa, ph->pn_res);
+	memset(sa->spn_zero, 0, sizeof(sa->spn_zero));
+}
+
+static inline
+void pn_skb_get_dst_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa)
+{
+	struct phonethdr *ph = pn_hdr(skb);
+	u16 obj = pn_object(ph->pn_rdev, ph->pn_robj);
+
+	sa->spn_family = AF_PHONET;
+	pn_sockaddr_set_object(sa, obj);
+	pn_sockaddr_set_resource(sa, ph->pn_res);
+	memset(sa->spn_zero, 0, sizeof(sa->spn_zero));
+}
+
+/* Protocols in Phonet protocol family. */
+struct phonet_protocol {
+	const struct proto_ops	*ops;
+	struct proto		*prot;
+	int			sock_type;
+};
+
+int phonet_proto_register(int protocol, struct phonet_protocol *pp);
+void phonet_proto_unregister(int protocol, struct phonet_protocol *pp);
+
+int phonet_sysctl_init(void);
+void phonet_sysctl_exit(void);
+void phonet_netlink_register(void);
+int isi_register(void);
+void isi_unregister(void);
+
+#endif
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
new file mode 100644
index 0000000..bbd2a83
--- /dev/null
+++ b/include/net/phonet/pn_dev.h
@@ -0,0 +1,50 @@
+/*
+ * File: pn_dev.h
+ *
+ * Phonet network device
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef PN_DEV_H
+#define PN_DEV_H
+
+struct phonet_device_list {
+	struct list_head list;
+	spinlock_t lock;
+};
+
+extern struct phonet_device_list pndevs;
+
+struct phonet_device {
+	struct list_head list;
+	struct net_device *netdev;
+	DECLARE_BITMAP(addrs, 64);
+};
+
+void phonet_device_init(void);
+void phonet_device_exit(void);
+struct net_device *phonet_device_get(struct net *net);
+
+int phonet_address_add(struct net_device *dev, u8 addr);
+int phonet_address_del(struct net_device *dev, u8 addr);
+u8 phonet_address_get(struct net_device *dev, u8 addr);
+int phonet_address_lookup(u8 addr);
+
+#define PN_NO_ADDR	0xff
+
+#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index b786a5b..4082f39 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -90,10 +90,7 @@
 
 static inline void qdisc_run(struct Qdisc *q)
 {
-	struct netdev_queue *txq = q->dev_queue;
-
-	if (!netif_tx_queue_stopped(txq) &&
-	    !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
+	if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
 		__qdisc_run(q);
 }
 
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e556962..3b983e8 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -52,7 +52,7 @@
 	u32			parent;
 	atomic_t		refcnt;
 	unsigned long		state;
-	struct sk_buff		*gso_skb;
+	struct sk_buff_head	requeue;
 	struct sk_buff_head	q;
 	struct netdev_queue	*dev_queue;
 	struct Qdisc		*next_sched;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 17b932b..703305d 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -406,10 +406,7 @@
 
 /* A macro to walk a list of skbs.  */
 #define sctp_skb_for_each(pos, head, tmp) \
-for (pos = (head)->next;\
-     tmp = (pos)->next, pos != ((struct sk_buff *)(head));\
-     pos = tmp)
-
+	skb_queue_walk_safe(head, pos, tmp)
 
 /* A helper to append an entire skb list (list) to another (head). */
 static inline void sctp_skb_list_tail(struct sk_buff_head *list,
@@ -420,10 +417,7 @@
 	sctp_spin_lock_irqsave(&head->lock, flags);
 	sctp_spin_lock(&list->lock);
 
-	list_splice((struct list_head *)list, (struct list_head *)head->prev);
-
-	head->qlen += list->qlen;
-	list->qlen = 0;
+	skb_queue_splice_tail_init(list, head);
 
 	sctp_spin_unlock(&list->lock);
 	sctp_spin_unlock_irqrestore(&head->lock, flags);
diff --git a/include/net/sock.h b/include/net/sock.h
index 06c5259..75a312d 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -532,6 +532,7 @@
 	int			(*getsockopt)(struct sock *sk, int level, 
 					int optname, char __user *optval, 
 					int __user *option);  	 
+#ifdef CONFIG_COMPAT
 	int			(*compat_setsockopt)(struct sock *sk,
 					int level,
 					int optname, char __user *optval,
@@ -540,6 +541,7 @@
 					int level,
 					int optname, char __user *optval,
 					int __user *option);
+#endif
 	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
 					   struct msghdr *msg, size_t len);
 	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
new file mode 100644
index 0000000..6abb3ed
--- /dev/null
+++ b/include/net/tc_act/tc_skbedit.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2008, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Alexander Duyck <alexander.h.duyck@intel.com>
+ */
+
+#ifndef __NET_TC_SKBEDIT_H
+#define __NET_TC_SKBEDIT_H
+
+#include <net/act_api.h>
+
+struct tcf_skbedit {
+	struct tcf_common	common;
+	u32			flags;
+	u32     		priority;
+	u16			queue_mapping;
+};
+#define to_skbedit(pc) \
+	container_of(pc, struct tcf_skbedit, common)
+
+#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 8983386..12c9b4f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -472,6 +472,8 @@
 
 /* tcp_input.c */
 extern void tcp_cwnd_application_limited(struct sock *sk);
+extern void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
+					    struct sk_buff *skb);
 
 /* tcp_timer.c */
 extern void tcp_init_xmit_timers(struct sock *);
@@ -1039,13 +1041,12 @@
 {
 	tp->lost_skb_hint = NULL;
 	tp->scoreboard_skb_hint = NULL;
-	tp->retransmit_skb_hint = NULL;
-	tp->forward_skb_hint = NULL;
 }
 
 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
 {
 	tcp_clear_retrans_hints_partial(tp);
+	tp->retransmit_skb_hint = NULL;
 }
 
 /* MD5 Signature */
@@ -1180,49 +1181,45 @@
 
 static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
 {
-	struct sk_buff *skb = sk->sk_write_queue.next;
-	if (skb == (struct sk_buff *) &sk->sk_write_queue)
-		return NULL;
-	return skb;
+	return skb_peek(&sk->sk_write_queue);
 }
 
 static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
 {
-	struct sk_buff *skb = sk->sk_write_queue.prev;
-	if (skb == (struct sk_buff *) &sk->sk_write_queue)
-		return NULL;
-	return skb;
+	return skb_peek_tail(&sk->sk_write_queue);
 }
 
 static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
 {
-	return skb->next;
+	return skb_queue_next(&sk->sk_write_queue, skb);
 }
 
 #define tcp_for_write_queue(skb, sk)					\
-		for (skb = (sk)->sk_write_queue.next;			\
-		     (skb != (struct sk_buff *)&(sk)->sk_write_queue);	\
-		     skb = skb->next)
+	skb_queue_walk(&(sk)->sk_write_queue, skb)
 
 #define tcp_for_write_queue_from(skb, sk)				\
-		for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\
-		     skb = skb->next)
+	skb_queue_walk_from(&(sk)->sk_write_queue, skb)
 
 #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
-		for (tmp = skb->next;					\
-		     (skb != (struct sk_buff *)&(sk)->sk_write_queue);	\
-		     skb = tmp, tmp = skb->next)
+	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
 
 static inline struct sk_buff *tcp_send_head(struct sock *sk)
 {
 	return sk->sk_send_head;
 }
 
+static inline bool tcp_skb_is_last(const struct sock *sk,
+				   const struct sk_buff *skb)
+{
+	return skb_queue_is_last(&sk->sk_write_queue, skb);
+}
+
 static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
 {
-	sk->sk_send_head = skb->next;
-	if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
+	if (tcp_skb_is_last(sk, skb))
 		sk->sk_send_head = NULL;
+	else
+		sk->sk_send_head = tcp_write_queue_next(sk, skb);
 }
 
 static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
@@ -1267,12 +1264,12 @@
 	__skb_queue_after(&sk->sk_write_queue, skb, buff);
 }
 
-/* Insert skb between prev and next on the write queue of sk.  */
+/* Insert new before skb on the write queue of sk.  */
 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
 						  struct sk_buff *skb,
 						  struct sock *sk)
 {
-	__skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
+	__skb_queue_before(&sk->sk_write_queue, skb, new);
 
 	if (sk->sk_send_head == skb)
 		sk->sk_send_head = new;
@@ -1283,12 +1280,6 @@
 	__skb_unlink(skb, &sk->sk_write_queue);
 }
 
-static inline int tcp_skb_is_last(const struct sock *sk,
-				  const struct sk_buff *skb)
-{
-	return skb->next == (struct sk_buff *)&sk->sk_write_queue;
-}
-
 static inline int tcp_write_queue_empty(struct sock *sk)
 {
 	return skb_queue_empty(&sk->sk_write_queue);
diff --git a/include/net/wireless.h b/include/net/wireless.h
index 9324f8d..721efb3 100644
--- a/include/net/wireless.h
+++ b/include/net/wireless.h
@@ -60,6 +60,7 @@
  * with cfg80211.
  *
  * @center_freq: center frequency in MHz
+ * @max_bandwidth: maximum allowed bandwidth for this channel, in MHz
  * @hw_value: hardware-specific value for the channel
  * @flags: channel flags from &enum ieee80211_channel_flags.
  * @orig_flags: channel flags at registration time, used by regulatory
@@ -73,6 +74,7 @@
 struct ieee80211_channel {
 	enum ieee80211_band band;
 	u16 center_freq;
+	u8 max_bandwidth;
 	u16 hw_value;
 	u32 flags;
 	int max_antenna_gain;
@@ -178,6 +180,7 @@
  * struct wiphy - wireless hardware description
  * @idx: the wiphy index assigned to this item
  * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name>
+ * @reg_notifier: the driver's regulatory notification callback
  */
 struct wiphy {
 	/* assign these fields before you register the wiphy */
@@ -185,6 +188,9 @@
 	/* permanent MAC address */
 	u8 perm_addr[ETH_ALEN];
 
+	/* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
+	u16 interface_modes;
+
 	/* If multiple wiphys are registered and you're handed e.g.
 	 * a regular netdev with assigned ieee80211_ptr, you won't
 	 * know whether it points to a wiphy your driver has registered
@@ -194,6 +200,9 @@
 
 	struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
 
+	/* Lets us get back the wiphy on the callback */
+	int (*reg_notifier)(struct wiphy *wiphy, enum reg_set_by setby);
+
 	/* fields below are read-only, assigned by cfg80211 */
 
 	/* the item in /sys/class/ieee80211/ points to this,
@@ -214,9 +223,11 @@
  * the netdev.)
  *
  * @wiphy: pointer to hardware description
+ * @iftype: interface type
  */
 struct wireless_dev {
 	struct wiphy *wiphy;
+	enum nl80211_iftype iftype;
 
 	/* private to the generic wireless code */
 	struct list_head list;
@@ -319,7 +330,6 @@
  */
 extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
 							 int freq);
-
 /**
  * ieee80211_get_channel - get channel struct from wiphy for specified frequency
  */
@@ -328,4 +338,57 @@
 {
 	return __ieee80211_get_channel(wiphy, freq);
 }
+
+/**
+ * __regulatory_hint - hint to the wireless core a regulatory domain
+ * @wiphy: if a driver is providing the hint this is the driver's very
+ * 	own &struct wiphy
+ * @alpha2: the ISO/IEC 3166 alpha2 being claimed the regulatory domain
+ * 	should be in. If @rd is set this should be NULL
+ * @rd: a complete regulatory domain, if passed the caller need not worry
+ * 	about freeing it
+ *
+ * The Wireless subsystem can use this function to hint to the wireless core
+ * what it believes should be the current regulatory domain by
+ * giving it an ISO/IEC 3166 alpha2 country code it knows its regulatory
+ * domain should be in or by providing a completely build regulatory domain.
+ *
+ * Returns -EALREADY if *a regulatory domain* has already been set. Note that
+ * this could be by another driver. It is safe for drivers to continue if
+ * -EALREADY is returned, if drivers are not capable of world roaming they
+ * should not register more channels than they support. Right now we only
+ * support listening to the first driver hint. If the driver is capable
+ * of world roaming but wants to respect its own EEPROM mappings for
+ * specific regulatory domains it should register the @reg_notifier callback
+ * on the &struct wiphy. Returns 0 if the hint went through fine or through an
+ * intersection operation. Otherwise a standard error code is returned.
+ *
+ */
+extern int __regulatory_hint(struct wiphy *wiphy, enum reg_set_by set_by,
+		const char *alpha2, struct ieee80211_regdomain *rd);
+/**
+ * regulatory_hint - driver hint to the wireless core a regulatory domain
+ * @wiphy: the driver's very own &struct wiphy
+ * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain
+ * 	should be in. If @rd is set this should be NULL. Note that if you
+ * 	set this to NULL you should still set rd->alpha2 to some accepted
+ * 	alpha2.
+ * @rd: a complete regulatory domain provided by the driver. If passed
+ * 	the driver does not need to worry about freeing it.
+ *
+ * Wireless drivers can use this function to hint to the wireless core
+ * what it believes should be the current regulatory domain by
+ * giving it an ISO/IEC 3166 alpha2 country code it knows its regulatory
+ * domain should be in or by providing a completely build regulatory domain.
+ * If the driver provides an ISO/IEC 3166 alpha2 userspace will be queried
+ * for a regulatory domain structure for the respective country. If
+ * a regulatory domain is build and passed you should set the alpha2
+ * if possible, otherwise set it to the special value of "99" which tells
+ * the wireless core it is unknown. If you pass a built regulatory domain
+ * and we return non zero you are in charge of kfree()'ing the structure.
+ *
+ * See __regulatory_hint() documentation for possible return values.
+ */
+extern int regulatory_hint(struct wiphy *wiphy,
+		const char *alpha2, struct ieee80211_regdomain *rd);
 #endif /* __NET_WIRELESS_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 2933d747..48630b2 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -120,9 +120,11 @@
 /* Full description of state of transformer. */
 struct xfrm_state
 {
-	/* Note: bydst is re-used during gc */
 	struct list_head	all;
-	struct hlist_node	bydst;
+	union {
+		struct list_head	gclist;
+		struct hlist_node	bydst;
+	};
 	struct hlist_node	bysrc;
 	struct hlist_node	byspi;
 
@@ -1244,6 +1246,8 @@
 };
 
 struct xfrm_state_walk {
+	struct list_head list;
+	unsigned long genid;
 	struct xfrm_state *state;
 	int count;
 	u8 proto;
@@ -1279,23 +1283,10 @@
 extern int xfrm_proc_init(void);
 #endif
 
-static inline void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
-{
-	walk->proto = proto;
-	walk->state = NULL;
-	walk->count = 0;
-}
-
-static inline void xfrm_state_walk_done(struct xfrm_state_walk *walk)
-{
-	if (walk->state != NULL) {
-		xfrm_state_put(walk->state);
-		walk->state = NULL;
-	}
-}
-
+extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
 extern int xfrm_state_walk(struct xfrm_state_walk *walk,
 			   int (*func)(struct xfrm_state *, int, void*), void *);
+extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
 extern struct xfrm_state *xfrm_state_alloc(void);
 extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, 
 					  struct flowi *fl, struct xfrm_tmpl *tmpl,
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index b661f47..f0e335a 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -394,6 +394,7 @@
 
 	vlandev->features &= ~dev->vlan_features;
 	vlandev->features |= dev->features & dev->vlan_features;
+	vlandev->gso_max_size = dev->gso_max_size;
 
 	if (old_features != vlandev->features)
 		netdev_features_change(vlandev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4bf014e..8883e9c 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -48,7 +48,7 @@
 
 	switch (veth->h_vlan_encapsulated_proto) {
 #ifdef CONFIG_INET
-	case __constant_htons(ETH_P_IP):
+	case htons(ETH_P_IP):
 
 		/* TODO:  Confirm this will work with VLAN headers... */
 		return arp_find(veth->h_dest, skb);
@@ -607,6 +607,7 @@
 		      (1<<__LINK_STATE_PRESENT);
 
 	dev->features |= real_dev->features & real_dev->vlan_features;
+	dev->gso_max_size = real_dev->gso_max_size;
 
 	/* ipv6 shared card related stuff */
 	dev->dev_id = real_dev->dev_id;
diff --git a/net/Kconfig b/net/Kconfig
index 7612cc8..9103a16 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -232,18 +232,23 @@
 source "net/irda/Kconfig"
 source "net/bluetooth/Kconfig"
 source "net/rxrpc/Kconfig"
+source "net/phonet/Kconfig"
 
 config FIB_RULES
 	bool
 
-menu "Wireless"
+menuconfig WIRELESS
+	bool "Wireless"
 	depends on !S390
+	default y
+
+if WIRELESS
 
 source "net/wireless/Kconfig"
 source "net/mac80211/Kconfig"
 source "net/ieee80211/Kconfig"
 
-endmenu
+endif # WIRELESS
 
 source "net/rfkill/Kconfig"
 source "net/9p/Kconfig"
diff --git a/net/Makefile b/net/Makefile
index 4f43e7f..acaf819 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -42,6 +42,7 @@
 obj-$(CONFIG_ATM)		+= atm/
 obj-$(CONFIG_DECNET)		+= decnet/
 obj-$(CONFIG_ECONET)		+= econet/
+obj-$(CONFIG_PHONET)		+= phonet/
 ifneq ($(CONFIG_VLAN_8021Q),)
 obj-y				+= 8021q/
 endif
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 8d9a6f1..280de48 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -375,11 +375,11 @@
 			if (memcmp
 			    (skb->data + 6, ethertype_ipv6,
 			     sizeof(ethertype_ipv6)) == 0)
-				skb->protocol = __constant_htons(ETH_P_IPV6);
+				skb->protocol = htons(ETH_P_IPV6);
 			else if (memcmp
 				 (skb->data + 6, ethertype_ipv4,
 				  sizeof(ethertype_ipv4)) == 0)
-				skb->protocol = __constant_htons(ETH_P_IP);
+				skb->protocol = htons(ETH_P_IP);
 			else
 				goto error;
 			skb_pull(skb, sizeof(llc_oui_ipv4));
@@ -404,9 +404,9 @@
 			skb_reset_network_header(skb);
 			iph = ip_hdr(skb);
 			if (iph->version == 4)
-				skb->protocol = __constant_htons(ETH_P_IP);
+				skb->protocol = htons(ETH_P_IP);
 			else if (iph->version == 6)
-				skb->protocol = __constant_htons(ETH_P_IPV6);
+				skb->protocol = htons(ETH_P_IPV6);
 			else
 				goto error;
 			skb->pkt_type = PACKET_HOST;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5799fb5..8f701cd 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1931,7 +1931,6 @@
 		switch (priv->lane_version) {
 		case 1:
 			return priv->mcast_vcc;
-			break;
 		case 2:	/* LANE2 wants arp for multicast addresses */
 			if (!compare_ether_addr(mac_to_find, bus_mac))
 				return priv->mcast_vcc;
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 573acdf..4d2c1f1 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -28,6 +28,10 @@
 	.rcv	= br_stp_rcv,
 };
 
+static struct pernet_operations br_net_ops = {
+	.exit	= br_net_exit,
+};
+
 static int __init br_init(void)
 {
 	int err;
@@ -42,18 +46,22 @@
 	if (err)
 		goto err_out;
 
-	err = br_netfilter_init();
+	err = register_pernet_subsys(&br_net_ops);
 	if (err)
 		goto err_out1;
 
-	err = register_netdevice_notifier(&br_device_notifier);
+	err = br_netfilter_init();
 	if (err)
 		goto err_out2;
 
-	err = br_netlink_init();
+	err = register_netdevice_notifier(&br_device_notifier);
 	if (err)
 		goto err_out3;
 
+	err = br_netlink_init();
+	if (err)
+		goto err_out4;
+
 	brioctl_set(br_ioctl_deviceless_stub);
 	br_handle_frame_hook = br_handle_frame;
 
@@ -61,10 +69,12 @@
 	br_fdb_put_hook = br_fdb_put;
 
 	return 0;
-err_out3:
+err_out4:
 	unregister_netdevice_notifier(&br_device_notifier);
-err_out2:
+err_out3:
 	br_netfilter_fini();
+err_out2:
+	unregister_pernet_subsys(&br_net_ops);
 err_out1:
 	br_fdb_fini();
 err_out:
@@ -80,7 +90,7 @@
 	unregister_netdevice_notifier(&br_device_notifier);
 	brioctl_set(NULL);
 
-	br_cleanup_bridges();
+	unregister_pernet_subsys(&br_net_ops);
 
 	synchronize_net();
 
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 4f52c3d..22ba863 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -178,5 +178,6 @@
 	dev->priv_flags = IFF_EBRIDGE;
 
 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
-			NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX;
+			NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
+			NETIF_F_NETNS_LOCAL;
 }
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 63c18aa..573e20f 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -168,7 +168,7 @@
 	unregister_netdevice(br->dev);
 }
 
-static struct net_device *new_bridge_dev(const char *name)
+static struct net_device *new_bridge_dev(struct net *net, const char *name)
 {
 	struct net_bridge *br;
 	struct net_device *dev;
@@ -178,6 +178,7 @@
 
 	if (!dev)
 		return NULL;
+	dev_net_set(dev, net);
 
 	br = netdev_priv(dev);
 	br->dev = dev;
@@ -262,12 +263,12 @@
 	return p;
 }
 
-int br_add_bridge(const char *name)
+int br_add_bridge(struct net *net, const char *name)
 {
 	struct net_device *dev;
 	int ret;
 
-	dev = new_bridge_dev(name);
+	dev = new_bridge_dev(net, name);
 	if (!dev)
 		return -ENOMEM;
 
@@ -294,13 +295,13 @@
 	goto out;
 }
 
-int br_del_bridge(const char *name)
+int br_del_bridge(struct net *net, const char *name)
 {
 	struct net_device *dev;
 	int ret = 0;
 
 	rtnl_lock();
-	dev = __dev_get_by_name(&init_net, name);
+	dev = __dev_get_by_name(net, name);
 	if (dev == NULL)
 		ret =  -ENXIO; 	/* Could not find device */
 
@@ -445,13 +446,13 @@
 	return 0;
 }
 
-void __exit br_cleanup_bridges(void)
+void br_net_exit(struct net *net)
 {
 	struct net_device *dev;
 
 	rtnl_lock();
 restart:
-	for_each_netdev(&init_net, dev) {
+	for_each_netdev(net, dev) {
 		if (dev->priv_flags & IFF_EBRIDGE) {
 			del_br(dev->priv);
 			goto restart;
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 5bbf073..6a6433d 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -21,12 +21,12 @@
 #include "br_private.h"
 
 /* called with RTNL */
-static int get_bridge_ifindices(int *indices, int num)
+static int get_bridge_ifindices(struct net *net, int *indices, int num)
 {
 	struct net_device *dev;
 	int i = 0;
 
-	for_each_netdev(&init_net, dev) {
+	for_each_netdev(net, dev) {
 		if (i >= num)
 			break;
 		if (dev->priv_flags & IFF_EBRIDGE)
@@ -89,7 +89,7 @@
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
-	dev = dev_get_by_index(&init_net, ifindex);
+	dev = dev_get_by_index(dev_net(br->dev), ifindex);
 	if (dev == NULL)
 		return -EINVAL;
 
@@ -315,7 +315,7 @@
 	return -EOPNOTSUPP;
 }
 
-static int old_deviceless(void __user *uarg)
+static int old_deviceless(struct net *net, void __user *uarg)
 {
 	unsigned long args[3];
 
@@ -337,7 +337,7 @@
 		if (indices == NULL)
 			return -ENOMEM;
 
-		args[2] = get_bridge_ifindices(indices, args[2]);
+		args[2] = get_bridge_ifindices(net, indices, args[2]);
 
 		ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int))
 			? -EFAULT : args[2];
@@ -360,9 +360,9 @@
 		buf[IFNAMSIZ-1] = 0;
 
 		if (args[0] == BRCTL_ADD_BRIDGE)
-			return br_add_bridge(buf);
+			return br_add_bridge(net, buf);
 
-		return br_del_bridge(buf);
+		return br_del_bridge(net, buf);
 	}
 	}
 
@@ -374,7 +374,7 @@
 	switch (cmd) {
 	case SIOCGIFBR:
 	case SIOCSIFBR:
-		return old_deviceless(uarg);
+		return old_deviceless(net, uarg);
 
 	case SIOCBRADDBR:
 	case SIOCBRDELBR:
@@ -389,9 +389,9 @@
 
 		buf[IFNAMSIZ-1] = 0;
 		if (cmd == SIOCBRADDBR)
-			return br_add_bridge(buf);
+			return br_add_bridge(net, buf);
 
-		return br_del_bridge(buf);
+		return br_del_bridge(net, buf);
 	}
 	}
 	return -EOPNOTSUPP;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f155e6c..ba7be19 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -82,6 +82,7 @@
  */
 void br_ifinfo_notify(int event, struct net_bridge_port *port)
 {
+	struct net *net = dev_net(port->dev);
 	struct sk_buff *skb;
 	int err = -ENOBUFS;
 
@@ -97,10 +98,10 @@
 		kfree_skb(skb);
 		goto errout;
 	}
-	err = rtnl_notify(skb, &init_net,0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
+	err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
 errout:
 	if (err < 0)
-		rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
+		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
 }
 
 /*
@@ -112,11 +113,8 @@
 	struct net_device *dev;
 	int idx;
 
-	if (net != &init_net)
-		return 0;
-
 	idx = 0;
-	for_each_netdev(&init_net, dev) {
+	for_each_netdev(net, dev) {
 		/* not a bridge port */
 		if (dev->br_port == NULL || idx < cb->args[0])
 			goto skip;
@@ -147,9 +145,6 @@
 	struct net_bridge_port *p;
 	u8 new_state;
 
-	if (net != &init_net)
-		return -EINVAL;
-
 	if (nlmsg_len(nlh) < sizeof(*ifm))
 		return -EINVAL;
 
@@ -165,7 +160,7 @@
 	if (new_state > BR_STATE_BLOCKING)
 		return -EINVAL;
 
-	dev = __dev_get_by_index(&init_net, ifm->ifi_index);
+	dev = __dev_get_by_index(net, ifm->ifi_index);
 	if (!dev)
 		return -ENODEV;
 
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 76340bd..763a3ec 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -35,9 +35,6 @@
 	struct net_bridge_port *p = dev->br_port;
 	struct net_bridge *br;
 
-	if (!net_eq(dev_net(dev), &init_net))
-		return NOTIFY_DONE;
-
 	/* not a port of a bridge */
 	if (p == NULL)
 		return NOTIFY_DONE;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c3dc18d..b6c3b71 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -178,9 +178,9 @@
 
 /* br_if.c */
 extern void br_port_carrier_check(struct net_bridge_port *p);
-extern int br_add_bridge(const char *name);
-extern int br_del_bridge(const char *name);
-extern void br_cleanup_bridges(void);
+extern int br_add_bridge(struct net *net, const char *name);
+extern int br_del_bridge(struct net *net, const char *name);
+extern void br_net_exit(struct net *net);
 extern int br_add_if(struct net_bridge *br,
 	      struct net_device *dev);
 extern int br_del_if(struct net_bridge *br,
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 8b200f9..81ae40b 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -140,9 +140,6 @@
 	struct net_bridge *br;
 	const unsigned char *buf;
 
-	if (!net_eq(dev_net(dev), &init_net))
-		goto err;
-
 	if (!p)
 		goto err;
 
diff --git a/net/core/Makefile b/net/core/Makefile
index b1332f6..26a37cb 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -6,6 +6,7 @@
 	 gen_stats.o gen_estimator.o net_namespace.o
 
 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
+obj-$(CONFIG_HAS_DMA) += skb_dma_map.o
 
 obj-y		     += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
 			neighbour.o rtnetlink.o utils.o link_watch.o filter.o
diff --git a/net/core/dev.c b/net/core/dev.c
index e8eb2b4..7091040 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -891,7 +891,7 @@
  *	Change name of a device, can pass format strings "eth%d".
  *	for wildcarding.
  */
-int dev_change_name(struct net_device *dev, char *newname)
+int dev_change_name(struct net_device *dev, const char *newname)
 {
 	char oldname[IFNAMSIZ];
 	int err = 0;
@@ -917,7 +917,6 @@
 		err = dev_alloc_name(dev, newname);
 		if (err < 0)
 			return err;
-		strcpy(newname, dev->name);
 	}
 	else if (__dev_get_by_name(net, newname))
 		return -EEXIST;
@@ -955,6 +954,38 @@
 }
 
 /**
+ *	dev_set_alias - change ifalias of a device
+ *	@dev: device
+ *	@alias: name up to IFALIASZ
+ *	@len: limit of bytes to copy from info
+ *
+ *	Set ifalias for a device,
+ */
+int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
+{
+	ASSERT_RTNL();
+
+	if (len >= IFALIASZ)
+		return -EINVAL;
+
+	if (!len) {
+		if (dev->ifalias) {
+			kfree(dev->ifalias);
+			dev->ifalias = NULL;
+		}
+		return 0;
+	}
+
+	dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
+	if (!dev->ifalias)
+		return -ENOMEM;
+
+	strlcpy(dev->ifalias, alias, len+1);
+	return len;
+}
+
+
+/**
  *	netdev_features_change - device changes features
  *	@dev: device to cause notification
  *
@@ -1676,14 +1707,14 @@
 	}
 
 	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP):
+	case htons(ETH_P_IP):
 		if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
 			ip_proto = ip_hdr(skb)->protocol;
 		addr1 = ip_hdr(skb)->saddr;
 		addr2 = ip_hdr(skb)->daddr;
 		ihl = ip_hdr(skb)->ihl;
 		break;
-	case __constant_htons(ETH_P_IPV6):
+	case htons(ETH_P_IPV6):
 		ip_proto = ipv6_hdr(skb)->nexthdr;
 		addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
 		addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
@@ -3302,6 +3333,12 @@
 	netif_addr_unlock_bh(dev);
 }
 
+/**
+ *	dev_get_flags - get flags reported to userspace
+ *	@dev: device
+ *
+ *	Get the combination of flag bits exported through APIs to userspace.
+ */
 unsigned dev_get_flags(const struct net_device *dev)
 {
 	unsigned flags;
@@ -3326,6 +3363,14 @@
 	return flags;
 }
 
+/**
+ *	dev_change_flags - change device settings
+ *	@dev: device
+ *	@flags: device state flags
+ *
+ *	Change settings on device based state flags. The flags are
+ *	in the userspace exported format.
+ */
 int dev_change_flags(struct net_device *dev, unsigned flags)
 {
 	int ret, changes;
@@ -3395,6 +3440,13 @@
 	return ret;
 }
 
+/**
+ *	dev_set_mtu - Change maximum transfer unit
+ *	@dev: device
+ *	@new_mtu: new transfer unit
+ *
+ *	Change the maximum transfer size of the network device.
+ */
 int dev_set_mtu(struct net_device *dev, int new_mtu)
 {
 	int err;
@@ -3419,6 +3471,13 @@
 	return err;
 }
 
+/**
+ *	dev_set_mac_address - Change Media Access Control Address
+ *	@dev: device
+ *	@sa: new address
+ *
+ *	Change the hardware (MAC) address of the device
+ */
 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
 {
 	int err;
@@ -4322,7 +4381,12 @@
 	put_device(&dev->dev);
 }
 
-/* Synchronize with packet receive processing. */
+/**
+ *	synchronize_net -  Synchronize with packet receive processing
+ *
+ *	Wait for packets currently being received to be done.
+ *	Does not block later packets from starting.
+ */
 void synchronize_net(void)
 {
 	might_sleep();
@@ -4624,7 +4688,7 @@
 }
 
 /**
- * netdev_dma_regiser - register the networking subsystem as a DMA client
+ * netdev_dma_register - register the networking subsystem as a DMA client
  */
 static int __init netdev_dma_register(void)
 {
@@ -4670,6 +4734,12 @@
 		one |= NETIF_F_GSO_SOFTWARE;
 	one |= NETIF_F_GSO;
 
+	/*
+	 * If even one device supports a GSO protocol with software fallback,
+	 * enable it for all.
+	 */
+	all |= one & NETIF_F_GSO_SOFTWARE;
+
 	/* If even one device supports robust GSO, enable it for all. */
 	if (one & NETIF_F_GSO_ROBUST)
 		all |= NETIF_F_GSO_ROBUST;
@@ -4719,10 +4789,18 @@
 	return -ENOMEM;
 }
 
-char *netdev_drivername(struct net_device *dev, char *buffer, int len)
+/**
+ *	netdev_drivername - network driver for the device
+ *	@dev: network device
+ *	@buffer: buffer for resulting name
+ *	@len: size of buffer
+ *
+ *	Determine network driver for device.
+ */
+char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
 {
-	struct device_driver *driver;
-	struct device *parent;
+	const struct device_driver *driver;
+	const struct device *parent;
 
 	if (len <= 0 || !buffer)
 		return buffer;
diff --git a/net/core/dst.c b/net/core/dst.c
index fe03266..09c1530 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -203,6 +203,7 @@
 	if (dst_garbage.timer_inc > DST_GC_INC) {
 		dst_garbage.timer_inc = DST_GC_INC;
 		dst_garbage.timer_expires = DST_GC_MIN;
+		cancel_delayed_work(&dst_gc_work);
 		schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
 	}
 	spin_unlock_bh(&dst_garbage.lock);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9d92e41..1dc728b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -927,8 +927,7 @@
 			if (skb_queue_len(&neigh->arp_queue) >=
 			    neigh->parms->queue_len) {
 				struct sk_buff *buff;
-				buff = neigh->arp_queue.next;
-				__skb_unlink(buff, &neigh->arp_queue);
+				buff = __skb_dequeue(&neigh->arp_queue);
 				kfree_skb(buff);
 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
 			}
@@ -1259,24 +1258,20 @@
 	struct neigh_table *tbl = (struct neigh_table *)arg;
 	long sched_next = 0;
 	unsigned long now = jiffies;
-	struct sk_buff *skb;
+	struct sk_buff *skb, *n;
 
 	spin_lock(&tbl->proxy_queue.lock);
 
-	skb = tbl->proxy_queue.next;
+	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
+		long tdif = NEIGH_CB(skb)->sched_next - now;
 
-	while (skb != (struct sk_buff *)&tbl->proxy_queue) {
-		struct sk_buff *back = skb;
-		long tdif = NEIGH_CB(back)->sched_next - now;
-
-		skb = skb->next;
 		if (tdif <= 0) {
-			struct net_device *dev = back->dev;
-			__skb_unlink(back, &tbl->proxy_queue);
+			struct net_device *dev = skb->dev;
+			__skb_unlink(skb, &tbl->proxy_queue);
 			if (tbl->proxy_redo && netif_running(dev))
-				tbl->proxy_redo(back);
+				tbl->proxy_redo(skb);
 			else
-				kfree_skb(back);
+				kfree_skb(skb);
 
 			dev_put(dev);
 		} else if (!sched_next || tdif < sched_next)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c1f4e0d..92d6b94 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -209,9 +209,44 @@
 	return netdev_store(dev, attr, buf, len, change_tx_queue_len);
 }
 
+static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t len)
+{
+	struct net_device *netdev = to_net_dev(dev);
+	size_t count = len;
+	ssize_t ret;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* ignore trailing newline */
+	if (len >  0 && buf[len - 1] == '\n')
+		--count;
+
+	rtnl_lock();
+	ret = dev_set_alias(netdev, buf, count);
+	rtnl_unlock();
+
+	return ret < 0 ? ret : len;
+}
+
+static ssize_t show_ifalias(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	const struct net_device *netdev = to_net_dev(dev);
+	ssize_t ret = 0;
+
+	rtnl_lock();
+	if (netdev->ifalias)
+		ret = sprintf(buf, "%s\n", netdev->ifalias);
+	rtnl_unlock();
+	return ret;
+}
+
 static struct device_attribute net_class_attributes[] = {
 	__ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
 	__ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
+	__ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
 	__ATTR(iflink, S_IRUGO, show_iflink, NULL),
 	__ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
 	__ATTR(features, S_IRUGO, show_features, NULL),
@@ -418,6 +453,7 @@
 
 	BUG_ON(dev->reg_state != NETREG_RELEASED);
 
+	kfree(dev->ifalias);
 	kfree((char *)dev - dev->padded);
 }
 
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 71edb8b..8862498 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -586,6 +586,7 @@
 {
 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
 	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+	       + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
 	       + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
 	       + nla_total_size(sizeof(struct rtnl_link_ifmap))
 	       + nla_total_size(sizeof(struct rtnl_link_stats))
@@ -640,6 +641,9 @@
 	if (txq->qdisc_sleeping)
 		NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
 
+	if (dev->ifalias)
+		NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
+
 	if (1) {
 		struct rtnl_link_ifmap map = {
 			.mem_start   = dev->mem_start,
@@ -713,6 +717,7 @@
 	[IFLA_LINKMODE]		= { .type = NLA_U8 },
 	[IFLA_LINKINFO]		= { .type = NLA_NESTED },
 	[IFLA_NET_NS_PID]	= { .type = NLA_U32 },
+	[IFLA_IFALIAS]	        = { .type = NLA_STRING, .len = IFALIASZ-1 },
 };
 
 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -853,6 +858,14 @@
 		modified = 1;
 	}
 
+	if (tb[IFLA_IFALIAS]) {
+		err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
+				    nla_len(tb[IFLA_IFALIAS]));
+		if (err < 0)
+			goto errout;
+		modified = 1;
+	}
+
 	if (tb[IFLA_BROADCAST]) {
 		nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
 		send_addr_notify = 1;
diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c
new file mode 100644
index 0000000..1f49afc
--- /dev/null
+++ b/net/core/skb_dma_map.c
@@ -0,0 +1,66 @@
+/* skb_dma_map.c: DMA mapping helpers for socket buffers.
+ *
+ * Copyright (C) David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+
+int skb_dma_map(struct device *dev, struct sk_buff *skb,
+		enum dma_data_direction dir)
+{
+	struct skb_shared_info *sp = skb_shinfo(skb);
+	dma_addr_t map;
+	int i;
+
+	map = dma_map_single(dev, skb->data,
+			     skb_headlen(skb), dir);
+	if (dma_mapping_error(dev, map))
+		goto out_err;
+
+	sp->dma_maps[0] = map;
+	for (i = 0; i < sp->nr_frags; i++) {
+		skb_frag_t *fp = &sp->frags[i];
+
+		map = dma_map_page(dev, fp->page, fp->page_offset,
+				   fp->size, dir);
+		if (dma_mapping_error(dev, map))
+			goto unwind;
+		sp->dma_maps[i + 1] = map;
+	}
+	sp->num_dma_maps = i + 1;
+
+	return 0;
+
+unwind:
+	while (i-- >= 0) {
+		skb_frag_t *fp = &sp->frags[i];
+
+		dma_unmap_page(dev, sp->dma_maps[i + 1],
+			       fp->size, dir);
+	}
+	dma_unmap_single(dev, sp->dma_maps[0],
+			 skb_headlen(skb), dir);
+out_err:
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(skb_dma_map);
+
+void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
+		   enum dma_data_direction dir)
+{
+	struct skb_shared_info *sp = skb_shinfo(skb);
+	int i;
+
+	dma_unmap_single(dev, sp->dma_maps[0],
+			 skb_headlen(skb), dir);
+	for (i = 0; i < sp->nr_frags; i++) {
+		skb_frag_t *fp = &sp->frags[i];
+
+		dma_unmap_page(dev, sp->dma_maps[i + 1],
+			       fp->size, dir);
+	}
+}
+EXPORT_SYMBOL(skb_dma_unmap);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ca1ccdf..2c218a08 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -363,8 +363,7 @@
 	}
 }
 
-/* Free everything but the sk_buff shell. */
-static void skb_release_all(struct sk_buff *skb)
+static void skb_release_head_state(struct sk_buff *skb)
 {
 	dst_release(skb->dst);
 #ifdef CONFIG_XFRM
@@ -388,6 +387,12 @@
 	skb->tc_verd = 0;
 #endif
 #endif
+}
+
+/* Free everything but the sk_buff shell. */
+static void skb_release_all(struct sk_buff *skb)
+{
+	skb_release_head_state(skb);
 	skb_release_data(skb);
 }
 
@@ -424,6 +429,38 @@
 	__kfree_skb(skb);
 }
 
+int skb_recycle_check(struct sk_buff *skb, int skb_size)
+{
+	struct skb_shared_info *shinfo;
+
+	if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
+		return 0;
+
+	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
+	if (skb_end_pointer(skb) - skb->head < skb_size)
+		return 0;
+
+	if (skb_shared(skb) || skb_cloned(skb))
+		return 0;
+
+	skb_release_head_state(skb);
+	shinfo = skb_shinfo(skb);
+	atomic_set(&shinfo->dataref, 1);
+	shinfo->nr_frags = 0;
+	shinfo->gso_size = 0;
+	shinfo->gso_segs = 0;
+	shinfo->gso_type = 0;
+	shinfo->ip6_frag_id = 0;
+	shinfo->frag_list = NULL;
+
+	memset(skb, 0, offsetof(struct sk_buff, tail));
+	skb_reset_tail_pointer(skb);
+	skb->data = skb->head + NET_SKB_PAD;
+
+	return 1;
+}
+EXPORT_SYMBOL(skb_recycle_check);
+
 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 {
 	new->tstamp		= old->tstamp;
diff --git a/net/core/sock.c b/net/core/sock.c
index 91f8bbc..2d358dd 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -154,7 +154,8 @@
   "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
-  "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX"
+  "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
+  "sk_lock-AF_MAX"
 };
 static const char *af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
@@ -168,7 +169,8 @@
   "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
-  "slock-AF_RXRPC" , "slock-AF_MAX"
+  "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
+  "slock-AF_MAX"
 };
 static const char *af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
@@ -182,7 +184,8 @@
   "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
-  "clock-AF_RXRPC" , "clock-AF_MAX"
+  "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
+  "clock-AF_MAX"
 };
 #endif
 
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 8e95808..9a43073 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -783,7 +783,7 @@
 };
 
 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
-module_param(ccid2_debug, bool, 0444);
+module_param(ccid2_debug, bool, 0644);
 MODULE_PARM_DESC(ccid2_debug, "Enable debug messages");
 #endif
 
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index f6756e0..3b8bd7c 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -963,7 +963,7 @@
 };
 
 #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
-module_param(ccid3_debug, bool, 0444);
+module_param(ccid3_debug, bool, 0644);
 MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
 #endif
 
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index bcd6ac4..5b3ce06 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -67,7 +67,10 @@
 	u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0;
 	int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */
 
-	for (i=0; i <= k; i++) {
+	if (k <= 0)
+		return;
+
+	for (i = 0; i <= k; i++) {
 		i_i = tfrc_lh_get_interval(lh, i);
 
 		if (i < k) {
@@ -78,7 +81,6 @@
 			i_tot1 += i_i * tfrc_lh_weights[i-1];
 	}
 
-	BUG_ON(w_tot == 0);
 	lh->i_mean = max(i_tot0, i_tot1) / w_tot;
 }
 
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 97ecec0..1859162 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -10,7 +10,7 @@
 
 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG
 int tfrc_debug;
-module_param(tfrc_debug, bool, 0444);
+module_param(tfrc_debug, bool, 0644);
 MODULE_PARM_DESC(tfrc_debug, "Enable debug messages");
 #endif
 
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 803933a..779d0ed 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -370,7 +370,7 @@
 		goto discard;
 
 	if (dccp_parse_options(sk, NULL, skb))
-		goto discard;
+		return 1;
 
 	if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
 		dccp_event_ack_recv(sk, skb);
@@ -610,7 +610,7 @@
 		 * Step 8: Process options and mark acknowledgeable
 		 */
 		if (dccp_parse_options(sk, NULL, skb))
-			goto discard;
+			return 1;
 
 		if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
 			dccp_event_ack_recv(sk, skb);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index dc7c158..0809b63 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -81,11 +81,11 @@
 		/* Check if this isn't a single byte option */
 		if (opt > DCCPO_MAX_RESERVED) {
 			if (opt_ptr == opt_end)
-				goto out_invalid_option;
+				goto out_nonsensical_length;
 
 			len = *opt_ptr++;
-			if (len < 3)
-				goto out_invalid_option;
+			if (len < 2)
+				goto out_nonsensical_length;
 			/*
 			 * Remove the type and len fields, leaving
 			 * just the value size
@@ -95,7 +95,7 @@
 			opt_ptr += len;
 
 			if (opt_ptr > opt_end)
-				goto out_invalid_option;
+				goto out_nonsensical_length;
 		}
 
 		/*
@@ -283,12 +283,17 @@
 	if (mandatory)
 		goto out_invalid_option;
 
+out_nonsensical_length:
+	/* RFC 4340, 5.8: ignore option and all remaining option space */
 	return 0;
 
 out_invalid_option:
 	DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT);
 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR;
 	DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len);
+	DCCP_SKB_CB(skb)->dccpd_reset_data[0] = opt;
+	DCCP_SKB_CB(skb)->dccpd_reset_data[1] = len > 0 ? value[0] : 0;
+	DCCP_SKB_CB(skb)->dccpd_reset_data[2] = len > 1 ? value[1] : 0;
 	return -1;
 }
 
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 1ca3b26..d0bd348 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -309,7 +309,9 @@
 		sk->sk_err = ECONNRESET;
 
 	dccp_clear_xmit_timers(sk);
+
 	__skb_queue_purge(&sk->sk_receive_queue);
+	__skb_queue_purge(&sk->sk_write_queue);
 	if (sk->sk_send_head != NULL) {
 		__kfree_skb(sk->sk_send_head);
 		sk->sk_send_head = NULL;
@@ -1028,7 +1030,7 @@
 
 #ifdef CONFIG_IP_DCCP_DEBUG
 int dccp_debug;
-module_param(dccp_debug, bool, 0444);
+module_param(dccp_debug, bool, 0644);
 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
 
 EXPORT_SYMBOL_GPL(dccp_debug);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index a80839b..647a9ed 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -129,7 +129,7 @@
 
 	switch (eth->h_proto) {
 #ifdef CONFIG_INET
-	case __constant_htons(ETH_P_IP):
+	case htons(ETH_P_IP):
 		return arp_find(eth->h_dest, skb);
 #endif
 	default:
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
index 3bca97f..949772a 100644
--- a/net/ieee80211/ieee80211_module.c
+++ b/net/ieee80211/ieee80211_module.c
@@ -157,7 +157,7 @@
 	err = ieee80211_networks_allocate(ieee);
 	if (err) {
 		IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err);
-		goto failed;
+		goto failed_free_netdev;
 	}
 	ieee80211_networks_initialize(ieee);
 
@@ -193,9 +193,9 @@
 
 	return dev;
 
-      failed:
-	if (dev)
-		free_netdev(dev);
+failed_free_netdev:
+	free_netdev(dev);
+failed:
 	return NULL;
 }
 
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c10036e..89cb047 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -782,11 +782,15 @@
 		struct sock *sk;
 		struct hlist_node *node;
 
+		num = 0;
+
+		if (hlist_empty(&head->chain) && hlist_empty(&head->twchain))
+			continue;
+
 		if (i > s_i)
 			s_num = 0;
 
 		read_lock_bh(lock);
-		num = 0;
 		sk_for_each(sk, node, &head->chain) {
 			struct inet_sock *inet = inet_sk(sk);
 
diff --git a/net/ipv4/ipvs/Kconfig b/net/ipv4/ipvs/Kconfig
index 09d0c3f..de6004d 100644
--- a/net/ipv4/ipvs/Kconfig
+++ b/net/ipv4/ipvs/Kconfig
@@ -24,6 +24,14 @@
 
 if IP_VS
 
+config	IP_VS_IPV6
+	bool "IPv6 support for IPVS (DANGEROUS)"
+	depends on EXPERIMENTAL && (IPV6 = y || IP_VS = IPV6)
+	---help---
+	  Add IPv6 support to IPVS. This is incomplete and might be dangerous.
+
+	  Say N if unsure.
+
 config	IP_VS_DEBUG
 	bool "IP virtual server debugging"
 	---help---
@@ -33,7 +41,8 @@
 
 config	IP_VS_TAB_BITS
 	int "IPVS connection table size (the Nth power of 2)"
-	default "12" 
+	range 8 20
+	default 12
 	---help---
 	  The IPVS connection hash table uses the chaining scheme to handle
 	  hash collisions. Using a big IPVS connection hash table will greatly
@@ -71,14 +80,20 @@
 	  This option enables support for load balancing UDP transport
 	  protocol. Say Y if unsure.
 
+config	IP_VS_PROTO_AH_ESP
+	bool
+	depends on UNDEFINED
+
 config	IP_VS_PROTO_ESP
 	bool "ESP load balancing support"
+	select IP_VS_PROTO_AH_ESP
 	---help---
 	  This option enables support for load balancing ESP (Encapsulation
 	  Security Payload) transport protocol. Say Y if unsure.
 
 config	IP_VS_PROTO_AH
 	bool "AH load balancing support"
+	select IP_VS_PROTO_AH_ESP
 	---help---
 	  This option enables support for load balancing AH (Authentication
 	  Header) transport protocol. Say Y if unsure.
diff --git a/net/ipv4/ipvs/Makefile b/net/ipv4/ipvs/Makefile
index 30e85de..73a46fe 100644
--- a/net/ipv4/ipvs/Makefile
+++ b/net/ipv4/ipvs/Makefile
@@ -6,8 +6,7 @@
 ip_vs_proto-objs-y :=
 ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o
 ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o
-ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_ESP) += ip_vs_proto_esp.o
-ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH) += ip_vs_proto_ah.o
+ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o
 
 ip_vs-objs :=	ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o	   \
 		ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o	   		   \
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 44a6872..9a24332 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -114,9 +114,18 @@
 /*
  *	Returns hash value for IPVS connection entry
  */
-static unsigned int ip_vs_conn_hashkey(unsigned proto, __be32 addr, __be16 port)
+static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
+				       const union nf_inet_addr *addr,
+				       __be16 port)
 {
-	return jhash_3words((__force u32)addr, (__force u32)port, proto, ip_vs_conn_rnd)
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
+				    (__force u32)port, proto, ip_vs_conn_rnd)
+			& IP_VS_CONN_TAB_MASK;
+#endif
+	return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
+			    ip_vs_conn_rnd)
 		& IP_VS_CONN_TAB_MASK;
 }
 
@@ -131,7 +140,7 @@
 	int ret;
 
 	/* Hash by protocol, client address and port */
-	hash = ip_vs_conn_hashkey(cp->protocol, cp->caddr, cp->cport);
+	hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
 
 	ct_write_lock(hash);
 
@@ -162,7 +171,7 @@
 	int ret;
 
 	/* unhash it and decrease its reference counter */
-	hash = ip_vs_conn_hashkey(cp->protocol, cp->caddr, cp->cport);
+	hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
 
 	ct_write_lock(hash);
 
@@ -187,20 +196,23 @@
  *	d_addr, d_port: pkt dest address (load balancer)
  */
 static inline struct ip_vs_conn *__ip_vs_conn_in_get
-(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port)
+(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
+ const union nf_inet_addr *d_addr, __be16 d_port)
 {
 	unsigned hash;
 	struct ip_vs_conn *cp;
 
-	hash = ip_vs_conn_hashkey(protocol, s_addr, s_port);
+	hash = ip_vs_conn_hashkey(af, protocol, s_addr, s_port);
 
 	ct_read_lock(hash);
 
 	list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
-		if (s_addr==cp->caddr && s_port==cp->cport &&
-		    d_port==cp->vport && d_addr==cp->vaddr &&
+		if (cp->af == af &&
+		    ip_vs_addr_equal(af, s_addr, &cp->caddr) &&
+		    ip_vs_addr_equal(af, d_addr, &cp->vaddr) &&
+		    s_port == cp->cport && d_port == cp->vport &&
 		    ((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
-		    protocol==cp->protocol) {
+		    protocol == cp->protocol) {
 			/* HIT */
 			atomic_inc(&cp->refcnt);
 			ct_read_unlock(hash);
@@ -214,39 +226,44 @@
 }
 
 struct ip_vs_conn *ip_vs_conn_in_get
-(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port)
+(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
+ const union nf_inet_addr *d_addr, __be16 d_port)
 {
 	struct ip_vs_conn *cp;
 
-	cp = __ip_vs_conn_in_get(protocol, s_addr, s_port, d_addr, d_port);
+	cp = __ip_vs_conn_in_get(af, protocol, s_addr, s_port, d_addr, d_port);
 	if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt))
-		cp = __ip_vs_conn_in_get(protocol, s_addr, 0, d_addr, d_port);
+		cp = __ip_vs_conn_in_get(af, protocol, s_addr, 0, d_addr,
+					 d_port);
 
-	IP_VS_DBG(9, "lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n",
-		  ip_vs_proto_name(protocol),
-		  NIPQUAD(s_addr), ntohs(s_port),
-		  NIPQUAD(d_addr), ntohs(d_port),
-		  cp?"hit":"not hit");
+	IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n",
+		      ip_vs_proto_name(protocol),
+		      IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port),
+		      IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port),
+		      cp ? "hit" : "not hit");
 
 	return cp;
 }
 
 /* Get reference to connection template */
 struct ip_vs_conn *ip_vs_ct_in_get
-(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port)
+(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
+ const union nf_inet_addr *d_addr, __be16 d_port)
 {
 	unsigned hash;
 	struct ip_vs_conn *cp;
 
-	hash = ip_vs_conn_hashkey(protocol, s_addr, s_port);
+	hash = ip_vs_conn_hashkey(af, protocol, s_addr, s_port);
 
 	ct_read_lock(hash);
 
 	list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
-		if (s_addr==cp->caddr && s_port==cp->cport &&
-		    d_port==cp->vport && d_addr==cp->vaddr &&
+		if (cp->af == af &&
+		    ip_vs_addr_equal(af, s_addr, &cp->caddr) &&
+		    ip_vs_addr_equal(af, d_addr, &cp->vaddr) &&
+		    s_port == cp->cport && d_port == cp->vport &&
 		    cp->flags & IP_VS_CONN_F_TEMPLATE &&
-		    protocol==cp->protocol) {
+		    protocol == cp->protocol) {
 			/* HIT */
 			atomic_inc(&cp->refcnt);
 			goto out;
@@ -257,11 +274,11 @@
   out:
 	ct_read_unlock(hash);
 
-	IP_VS_DBG(9, "template lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n",
-		  ip_vs_proto_name(protocol),
-		  NIPQUAD(s_addr), ntohs(s_port),
-		  NIPQUAD(d_addr), ntohs(d_port),
-		  cp?"hit":"not hit");
+	IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
+		      ip_vs_proto_name(protocol),
+		      IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port),
+		      IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port),
+		      cp ? "hit" : "not hit");
 
 	return cp;
 }
@@ -273,7 +290,8 @@
  *	d_addr, d_port: pkt dest address (foreign host)
  */
 struct ip_vs_conn *ip_vs_conn_out_get
-(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port)
+(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
+ const union nf_inet_addr *d_addr, __be16 d_port)
 {
 	unsigned hash;
 	struct ip_vs_conn *cp, *ret=NULL;
@@ -281,13 +299,15 @@
 	/*
 	 *	Check for "full" addressed entries
 	 */
-	hash = ip_vs_conn_hashkey(protocol, d_addr, d_port);
+	hash = ip_vs_conn_hashkey(af, protocol, d_addr, d_port);
 
 	ct_read_lock(hash);
 
 	list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
-		if (d_addr == cp->caddr && d_port == cp->cport &&
-		    s_port == cp->dport && s_addr == cp->daddr &&
+		if (cp->af == af &&
+		    ip_vs_addr_equal(af, d_addr, &cp->caddr) &&
+		    ip_vs_addr_equal(af, s_addr, &cp->daddr) &&
+		    d_port == cp->cport && s_port == cp->dport &&
 		    protocol == cp->protocol) {
 			/* HIT */
 			atomic_inc(&cp->refcnt);
@@ -298,11 +318,11 @@
 
 	ct_read_unlock(hash);
 
-	IP_VS_DBG(9, "lookup/out %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n",
-		  ip_vs_proto_name(protocol),
-		  NIPQUAD(s_addr), ntohs(s_port),
-		  NIPQUAD(d_addr), ntohs(d_port),
-		  ret?"hit":"not hit");
+	IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n",
+		      ip_vs_proto_name(protocol),
+		      IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port),
+		      IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port),
+		      ret ? "hit" : "not hit");
 
 	return ret;
 }
@@ -369,6 +389,33 @@
 	}
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp)
+{
+	switch (IP_VS_FWD_METHOD(cp)) {
+	case IP_VS_CONN_F_MASQ:
+		cp->packet_xmit = ip_vs_nat_xmit_v6;
+		break;
+
+	case IP_VS_CONN_F_TUNNEL:
+		cp->packet_xmit = ip_vs_tunnel_xmit_v6;
+		break;
+
+	case IP_VS_CONN_F_DROUTE:
+		cp->packet_xmit = ip_vs_dr_xmit_v6;
+		break;
+
+	case IP_VS_CONN_F_LOCALNODE:
+		cp->packet_xmit = ip_vs_null_xmit;
+		break;
+
+	case IP_VS_CONN_F_BYPASS:
+		cp->packet_xmit = ip_vs_bypass_xmit_v6;
+		break;
+	}
+}
+#endif
+
 
 static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest)
 {
@@ -402,16 +449,16 @@
 		cp->flags |= atomic_read(&dest->conn_flags);
 	cp->dest = dest;
 
-	IP_VS_DBG(7, "Bind-dest %s c:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d "
-		  "d:%u.%u.%u.%u:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
-		  "dest->refcnt:%d\n",
-		  ip_vs_proto_name(cp->protocol),
-		  NIPQUAD(cp->caddr), ntohs(cp->cport),
-		  NIPQUAD(cp->vaddr), ntohs(cp->vport),
-		  NIPQUAD(cp->daddr), ntohs(cp->dport),
-		  ip_vs_fwd_tag(cp), cp->state,
-		  cp->flags, atomic_read(&cp->refcnt),
-		  atomic_read(&dest->refcnt));
+	IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
+		      "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
+		      "dest->refcnt:%d\n",
+		      ip_vs_proto_name(cp->protocol),
+		      IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
+		      IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
+		      IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
+		      ip_vs_fwd_tag(cp), cp->state,
+		      cp->flags, atomic_read(&cp->refcnt),
+		      atomic_read(&dest->refcnt));
 
 	/* Update the connection counters */
 	if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
@@ -444,8 +491,9 @@
 	struct ip_vs_dest *dest;
 
 	if ((cp) && (!cp->dest)) {
-		dest = ip_vs_find_dest(cp->daddr, cp->dport,
-				       cp->vaddr, cp->vport, cp->protocol);
+		dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport,
+				       &cp->vaddr, cp->vport,
+				       cp->protocol);
 		ip_vs_bind_dest(cp, dest);
 		return dest;
 	} else
@@ -464,16 +512,16 @@
 	if (!dest)
 		return;
 
-	IP_VS_DBG(7, "Unbind-dest %s c:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d "
-		  "d:%u.%u.%u.%u:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
-		  "dest->refcnt:%d\n",
-		  ip_vs_proto_name(cp->protocol),
-		  NIPQUAD(cp->caddr), ntohs(cp->cport),
-		  NIPQUAD(cp->vaddr), ntohs(cp->vport),
-		  NIPQUAD(cp->daddr), ntohs(cp->dport),
-		  ip_vs_fwd_tag(cp), cp->state,
-		  cp->flags, atomic_read(&cp->refcnt),
-		  atomic_read(&dest->refcnt));
+	IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d "
+		      "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
+		      "dest->refcnt:%d\n",
+		      ip_vs_proto_name(cp->protocol),
+		      IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
+		      IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
+		      IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
+		      ip_vs_fwd_tag(cp), cp->state,
+		      cp->flags, atomic_read(&cp->refcnt),
+		      atomic_read(&dest->refcnt));
 
 	/* Update the connection counters */
 	if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
@@ -526,13 +574,16 @@
 	    !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
 	    (sysctl_ip_vs_expire_quiescent_template &&
 	     (atomic_read(&dest->weight) == 0))) {
-		IP_VS_DBG(9, "check_template: dest not available for "
-			  "protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d "
-			  "-> d:%u.%u.%u.%u:%d\n",
-			  ip_vs_proto_name(ct->protocol),
-			  NIPQUAD(ct->caddr), ntohs(ct->cport),
-			  NIPQUAD(ct->vaddr), ntohs(ct->vport),
-			  NIPQUAD(ct->daddr), ntohs(ct->dport));
+		IP_VS_DBG_BUF(9, "check_template: dest not available for "
+			      "protocol %s s:%s:%d v:%s:%d "
+			      "-> d:%s:%d\n",
+			      ip_vs_proto_name(ct->protocol),
+			      IP_VS_DBG_ADDR(ct->af, &ct->caddr),
+			      ntohs(ct->cport),
+			      IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
+			      ntohs(ct->vport),
+			      IP_VS_DBG_ADDR(ct->af, &ct->daddr),
+			      ntohs(ct->dport));
 
 		/*
 		 * Invalidate the connection template
@@ -625,8 +676,9 @@
  *	Create a new connection entry and hash it into the ip_vs_conn_tab
  */
 struct ip_vs_conn *
-ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport,
-	       __be32 daddr, __be16 dport, unsigned flags,
+ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
+	       const union nf_inet_addr *vaddr, __be16 vport,
+	       const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
 	       struct ip_vs_dest *dest)
 {
 	struct ip_vs_conn *cp;
@@ -640,12 +692,13 @@
 
 	INIT_LIST_HEAD(&cp->c_list);
 	setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
+	cp->af		   = af;
 	cp->protocol	   = proto;
-	cp->caddr	   = caddr;
+	ip_vs_addr_copy(af, &cp->caddr, caddr);
 	cp->cport	   = cport;
-	cp->vaddr	   = vaddr;
+	ip_vs_addr_copy(af, &cp->vaddr, vaddr);
 	cp->vport	   = vport;
-	cp->daddr          = daddr;
+	ip_vs_addr_copy(af, &cp->daddr, daddr);
 	cp->dport          = dport;
 	cp->flags	   = flags;
 	spin_lock_init(&cp->lock);
@@ -672,7 +725,12 @@
 	cp->timeout = 3*HZ;
 
 	/* Bind its packet transmitter */
-	ip_vs_bind_xmit(cp);
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		ip_vs_bind_xmit_v6(cp);
+	else
+#endif
+		ip_vs_bind_xmit(cp);
 
 	if (unlikely(pp && atomic_read(&pp->appcnt)))
 		ip_vs_bind_app(cp, pp);
@@ -760,12 +818,26 @@
 	else {
 		const struct ip_vs_conn *cp = v;
 
-		seq_printf(seq,
-			"%-3s %08X %04X %08X %04X %08X %04X %-11s %7lu\n",
+#ifdef CONFIG_IP_VS_IPV6
+		if (cp->af == AF_INET6)
+			seq_printf(seq,
+				"%-3s " NIP6_FMT " %04X " NIP6_FMT
+				" %04X " NIP6_FMT " %04X %-11s %7lu\n",
 				ip_vs_proto_name(cp->protocol),
-				ntohl(cp->caddr), ntohs(cp->cport),
-				ntohl(cp->vaddr), ntohs(cp->vport),
-				ntohl(cp->daddr), ntohs(cp->dport),
+				NIP6(cp->caddr.in6), ntohs(cp->cport),
+				NIP6(cp->vaddr.in6), ntohs(cp->vport),
+				NIP6(cp->daddr.in6), ntohs(cp->dport),
+				ip_vs_state_name(cp->protocol, cp->state),
+				(cp->timer.expires-jiffies)/HZ);
+		else
+#endif
+			seq_printf(seq,
+				"%-3s %08X %04X %08X %04X"
+				" %08X %04X %-11s %7lu\n",
+				ip_vs_proto_name(cp->protocol),
+				ntohl(cp->caddr.ip), ntohs(cp->cport),
+				ntohl(cp->vaddr.ip), ntohs(cp->vport),
+				ntohl(cp->daddr.ip), ntohs(cp->dport),
 				ip_vs_state_name(cp->protocol, cp->state),
 				(cp->timer.expires-jiffies)/HZ);
 	}
@@ -809,12 +881,27 @@
 	else {
 		const struct ip_vs_conn *cp = v;
 
-		seq_printf(seq,
-			"%-3s %08X %04X %08X %04X %08X %04X %-11s %-6s %7lu\n",
+#ifdef CONFIG_IP_VS_IPV6
+		if (cp->af == AF_INET6)
+			seq_printf(seq,
+				"%-3s " NIP6_FMT " %04X " NIP6_FMT
+				" %04X " NIP6_FMT " %04X %-11s %-6s %7lu\n",
 				ip_vs_proto_name(cp->protocol),
-				ntohl(cp->caddr), ntohs(cp->cport),
-				ntohl(cp->vaddr), ntohs(cp->vport),
-				ntohl(cp->daddr), ntohs(cp->dport),
+				NIP6(cp->caddr.in6), ntohs(cp->cport),
+				NIP6(cp->vaddr.in6), ntohs(cp->vport),
+				NIP6(cp->daddr.in6), ntohs(cp->dport),
+				ip_vs_state_name(cp->protocol, cp->state),
+				ip_vs_origin_name(cp->flags),
+				(cp->timer.expires-jiffies)/HZ);
+		else
+#endif
+			seq_printf(seq,
+				"%-3s %08X %04X %08X %04X "
+				"%08X %04X %-11s %-6s %7lu\n",
+				ip_vs_proto_name(cp->protocol),
+				ntohl(cp->caddr.ip), ntohs(cp->cport),
+				ntohl(cp->vaddr.ip), ntohs(cp->vport),
+				ntohl(cp->daddr.ip), ntohs(cp->dport),
 				ip_vs_state_name(cp->protocol, cp->state),
 				ip_vs_origin_name(cp->flags),
 				(cp->timer.expires-jiffies)/HZ);
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index a7879ea..958abf3 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -39,6 +39,11 @@
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
 
+#ifdef CONFIG_IP_VS_IPV6
+#include <net/ipv6.h>
+#include <linux/netfilter_ipv6.h>
+#endif
+
 #include <net/ip_vs.h>
 
 
@@ -60,6 +65,7 @@
 
 /* ID used in ICMP lookups */
 #define icmp_id(icmph)          (((icmph)->un).echo.id)
+#define icmpv6_id(icmph)        (icmph->icmp6_dataun.u_echo.identifier)
 
 const char *ip_vs_proto_name(unsigned proto)
 {
@@ -74,6 +80,10 @@
 		return "TCP";
 	case IPPROTO_ICMP:
 		return "ICMP";
+#ifdef CONFIG_IP_VS_IPV6
+	case IPPROTO_ICMPV6:
+		return "ICMPv6";
+#endif
 	default:
 		sprintf(buf, "IP_%d", proto);
 		return buf;
@@ -92,18 +102,18 @@
 	struct ip_vs_dest *dest = cp->dest;
 	if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
 		spin_lock(&dest->stats.lock);
-		dest->stats.inpkts++;
-		dest->stats.inbytes += skb->len;
+		dest->stats.ustats.inpkts++;
+		dest->stats.ustats.inbytes += skb->len;
 		spin_unlock(&dest->stats.lock);
 
 		spin_lock(&dest->svc->stats.lock);
-		dest->svc->stats.inpkts++;
-		dest->svc->stats.inbytes += skb->len;
+		dest->svc->stats.ustats.inpkts++;
+		dest->svc->stats.ustats.inbytes += skb->len;
 		spin_unlock(&dest->svc->stats.lock);
 
 		spin_lock(&ip_vs_stats.lock);
-		ip_vs_stats.inpkts++;
-		ip_vs_stats.inbytes += skb->len;
+		ip_vs_stats.ustats.inpkts++;
+		ip_vs_stats.ustats.inbytes += skb->len;
 		spin_unlock(&ip_vs_stats.lock);
 	}
 }
@@ -115,18 +125,18 @@
 	struct ip_vs_dest *dest = cp->dest;
 	if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
 		spin_lock(&dest->stats.lock);
-		dest->stats.outpkts++;
-		dest->stats.outbytes += skb->len;
+		dest->stats.ustats.outpkts++;
+		dest->stats.ustats.outbytes += skb->len;
 		spin_unlock(&dest->stats.lock);
 
 		spin_lock(&dest->svc->stats.lock);
-		dest->svc->stats.outpkts++;
-		dest->svc->stats.outbytes += skb->len;
+		dest->svc->stats.ustats.outpkts++;
+		dest->svc->stats.ustats.outbytes += skb->len;
 		spin_unlock(&dest->svc->stats.lock);
 
 		spin_lock(&ip_vs_stats.lock);
-		ip_vs_stats.outpkts++;
-		ip_vs_stats.outbytes += skb->len;
+		ip_vs_stats.ustats.outpkts++;
+		ip_vs_stats.ustats.outbytes += skb->len;
 		spin_unlock(&ip_vs_stats.lock);
 	}
 }
@@ -136,15 +146,15 @@
 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
 {
 	spin_lock(&cp->dest->stats.lock);
-	cp->dest->stats.conns++;
+	cp->dest->stats.ustats.conns++;
 	spin_unlock(&cp->dest->stats.lock);
 
 	spin_lock(&svc->stats.lock);
-	svc->stats.conns++;
+	svc->stats.ustats.conns++;
 	spin_unlock(&svc->stats.lock);
 
 	spin_lock(&ip_vs_stats.lock);
-	ip_vs_stats.conns++;
+	ip_vs_stats.ustats.conns++;
 	spin_unlock(&ip_vs_stats.lock);
 }
 
@@ -173,20 +183,28 @@
 		    __be16 ports[2])
 {
 	struct ip_vs_conn *cp = NULL;
-	struct iphdr *iph = ip_hdr(skb);
+	struct ip_vs_iphdr iph;
 	struct ip_vs_dest *dest;
 	struct ip_vs_conn *ct;
-	__be16  dport;	 /* destination port to forward */
-	__be32  snet;	 /* source network of the client, after masking */
+	__be16  dport;			/* destination port to forward */
+	union nf_inet_addr snet;	/* source network of the client,
+					   after masking */
+
+	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
 
 	/* Mask saddr with the netmask to adjust template granularity */
-	snet = iph->saddr & svc->netmask;
+#ifdef CONFIG_IP_VS_IPV6
+	if (svc->af == AF_INET6)
+		ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask);
+	else
+#endif
+		snet.ip = iph.saddr.ip & svc->netmask;
 
-	IP_VS_DBG(6, "p-schedule: src %u.%u.%u.%u:%u dest %u.%u.%u.%u:%u "
-		  "mnet %u.%u.%u.%u\n",
-		  NIPQUAD(iph->saddr), ntohs(ports[0]),
-		  NIPQUAD(iph->daddr), ntohs(ports[1]),
-		  NIPQUAD(snet));
+	IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
+		      "mnet %s\n",
+		      IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]),
+		      IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]),
+		      IP_VS_DBG_ADDR(svc->af, &snet));
 
 	/*
 	 * As far as we know, FTP is a very complicated network protocol, and
@@ -204,11 +222,11 @@
 	if (ports[1] == svc->port) {
 		/* Check if a template already exists */
 		if (svc->port != FTPPORT)
-			ct = ip_vs_ct_in_get(iph->protocol, snet, 0,
-					       iph->daddr, ports[1]);
+			ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0,
+					     &iph.daddr, ports[1]);
 		else
-			ct = ip_vs_ct_in_get(iph->protocol, snet, 0,
-					       iph->daddr, 0);
+			ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0,
+					     &iph.daddr, 0);
 
 		if (!ct || !ip_vs_check_template(ct)) {
 			/*
@@ -228,18 +246,18 @@
 			 * for ftp service.
 			 */
 			if (svc->port != FTPPORT)
-				ct = ip_vs_conn_new(iph->protocol,
-						    snet, 0,
-						    iph->daddr,
+				ct = ip_vs_conn_new(svc->af, iph.protocol,
+						    &snet, 0,
+						    &iph.daddr,
 						    ports[1],
-						    dest->addr, dest->port,
+						    &dest->addr, dest->port,
 						    IP_VS_CONN_F_TEMPLATE,
 						    dest);
 			else
-				ct = ip_vs_conn_new(iph->protocol,
-						    snet, 0,
-						    iph->daddr, 0,
-						    dest->addr, 0,
+				ct = ip_vs_conn_new(svc->af, iph.protocol,
+						    &snet, 0,
+						    &iph.daddr, 0,
+						    &dest->addr, 0,
 						    IP_VS_CONN_F_TEMPLATE,
 						    dest);
 			if (ct == NULL)
@@ -258,12 +276,16 @@
 		 * fwmark template: <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
 		 * port zero template: <protocol,caddr,0,vaddr,0,daddr,0>
 		 */
-		if (svc->fwmark)
-			ct = ip_vs_ct_in_get(IPPROTO_IP, snet, 0,
-					       htonl(svc->fwmark), 0);
-		else
-			ct = ip_vs_ct_in_get(iph->protocol, snet, 0,
-					       iph->daddr, 0);
+		if (svc->fwmark) {
+			union nf_inet_addr fwmark = {
+				.all = { 0, 0, 0, htonl(svc->fwmark) }
+			};
+
+			ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0,
+					     &fwmark, 0);
+		} else
+			ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0,
+					     &iph.daddr, 0);
 
 		if (!ct || !ip_vs_check_template(ct)) {
 			/*
@@ -282,18 +304,22 @@
 			/*
 			 * Create a template according to the service
 			 */
-			if (svc->fwmark)
-				ct = ip_vs_conn_new(IPPROTO_IP,
-						    snet, 0,
-						    htonl(svc->fwmark), 0,
-						    dest->addr, 0,
+			if (svc->fwmark) {
+				union nf_inet_addr fwmark = {
+					.all = { 0, 0, 0, htonl(svc->fwmark) }
+				};
+
+				ct = ip_vs_conn_new(svc->af, IPPROTO_IP,
+						    &snet, 0,
+						    &fwmark, 0,
+						    &dest->addr, 0,
 						    IP_VS_CONN_F_TEMPLATE,
 						    dest);
-			else
-				ct = ip_vs_conn_new(iph->protocol,
-						    snet, 0,
-						    iph->daddr, 0,
-						    dest->addr, 0,
+			} else
+				ct = ip_vs_conn_new(svc->af, iph.protocol,
+						    &snet, 0,
+						    &iph.daddr, 0,
+						    &dest->addr, 0,
 						    IP_VS_CONN_F_TEMPLATE,
 						    dest);
 			if (ct == NULL)
@@ -310,10 +336,10 @@
 	/*
 	 *    Create a new connection according to the template
 	 */
-	cp = ip_vs_conn_new(iph->protocol,
-			    iph->saddr, ports[0],
-			    iph->daddr, ports[1],
-			    dest->addr, dport,
+	cp = ip_vs_conn_new(svc->af, iph.protocol,
+			    &iph.saddr, ports[0],
+			    &iph.daddr, ports[1],
+			    &dest->addr, dport,
 			    0,
 			    dest);
 	if (cp == NULL) {
@@ -342,12 +368,12 @@
 ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 {
 	struct ip_vs_conn *cp = NULL;
-	struct iphdr *iph = ip_hdr(skb);
+	struct ip_vs_iphdr iph;
 	struct ip_vs_dest *dest;
 	__be16 _ports[2], *pptr;
 
-	pptr = skb_header_pointer(skb, iph->ihl*4,
-				  sizeof(_ports), _ports);
+	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
+	pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
 	if (pptr == NULL)
 		return NULL;
 
@@ -377,22 +403,22 @@
 	/*
 	 *    Create a connection entry.
 	 */
-	cp = ip_vs_conn_new(iph->protocol,
-			    iph->saddr, pptr[0],
-			    iph->daddr, pptr[1],
-			    dest->addr, dest->port?dest->port:pptr[1],
+	cp = ip_vs_conn_new(svc->af, iph.protocol,
+			    &iph.saddr, pptr[0],
+			    &iph.daddr, pptr[1],
+			    &dest->addr, dest->port ? dest->port : pptr[1],
 			    0,
 			    dest);
 	if (cp == NULL)
 		return NULL;
 
-	IP_VS_DBG(6, "Schedule fwd:%c c:%u.%u.%u.%u:%u v:%u.%u.%u.%u:%u "
-		  "d:%u.%u.%u.%u:%u conn->flags:%X conn->refcnt:%d\n",
-		  ip_vs_fwd_tag(cp),
-		  NIPQUAD(cp->caddr), ntohs(cp->cport),
-		  NIPQUAD(cp->vaddr), ntohs(cp->vport),
-		  NIPQUAD(cp->daddr), ntohs(cp->dport),
-		  cp->flags, atomic_read(&cp->refcnt));
+	IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
+		      "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
+		      ip_vs_fwd_tag(cp),
+		      IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport),
+		      IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport),
+		      IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport),
+		      cp->flags, atomic_read(&cp->refcnt));
 
 	ip_vs_conn_stats(cp, svc);
 	return cp;
@@ -408,31 +434,39 @@
 		struct ip_vs_protocol *pp)
 {
 	__be16 _ports[2], *pptr;
-	struct iphdr *iph = ip_hdr(skb);
+	struct ip_vs_iphdr iph;
+	int unicast;
+	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
 
-	pptr = skb_header_pointer(skb, iph->ihl*4,
-				  sizeof(_ports), _ports);
+	pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
 	if (pptr == NULL) {
 		ip_vs_service_put(svc);
 		return NF_DROP;
 	}
 
+#ifdef CONFIG_IP_VS_IPV6
+	if (svc->af == AF_INET6)
+		unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
+	else
+#endif
+		unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST);
+
 	/* if it is fwmark-based service, the cache_bypass sysctl is up
-	   and the destination is RTN_UNICAST (and not local), then create
+	   and the destination is a non-local unicast, then create
 	   a cache_bypass connection entry */
-	if (sysctl_ip_vs_cache_bypass && svc->fwmark
-	    && (inet_addr_type(&init_net, iph->daddr) == RTN_UNICAST)) {
+	if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) {
 		int ret, cs;
 		struct ip_vs_conn *cp;
+		union nf_inet_addr daddr =  { .all = { 0, 0, 0, 0 } };
 
 		ip_vs_service_put(svc);
 
 		/* create a new connection entry */
 		IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n");
-		cp = ip_vs_conn_new(iph->protocol,
-				    iph->saddr, pptr[0],
-				    iph->daddr, pptr[1],
-				    0, 0,
+		cp = ip_vs_conn_new(svc->af, iph.protocol,
+				    &iph.saddr, pptr[0],
+				    &iph.daddr, pptr[1],
+				    &daddr, 0,
 				    IP_VS_CONN_F_BYPASS,
 				    NULL);
 		if (cp == NULL)
@@ -473,7 +507,14 @@
 	 * created, the TCP RST packet cannot be sent, instead that
 	 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
 	 */
-	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+#ifdef CONFIG_IP_VS_IPV6
+	if (svc->af == AF_INET6)
+		icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0,
+			    skb->dev);
+	else
+#endif
+		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
 	return NF_DROP;
 }
 
@@ -512,6 +553,14 @@
 	return err;
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user)
+{
+	/* TODO IPv6: Find out what to do here for IPv6 */
+	return 0;
+}
+#endif
+
 /*
  * Packet has been made sufficiently writable in caller
  * - inout: 1=in->out, 0=out->in
@@ -526,14 +575,14 @@
 	struct iphdr *ciph	 = (struct iphdr *)(icmph + 1);
 
 	if (inout) {
-		iph->saddr = cp->vaddr;
+		iph->saddr = cp->vaddr.ip;
 		ip_send_check(iph);
-		ciph->daddr = cp->vaddr;
+		ciph->daddr = cp->vaddr.ip;
 		ip_send_check(ciph);
 	} else {
-		iph->daddr = cp->daddr;
+		iph->daddr = cp->daddr.ip;
 		ip_send_check(iph);
-		ciph->saddr = cp->daddr;
+		ciph->saddr = cp->daddr.ip;
 		ip_send_check(ciph);
 	}
 
@@ -560,21 +609,112 @@
 			"Forwarding altered incoming ICMP");
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
+		    struct ip_vs_conn *cp, int inout)
+{
+	struct ipv6hdr *iph	 = ipv6_hdr(skb);
+	unsigned int icmp_offset = sizeof(struct ipv6hdr);
+	struct icmp6hdr *icmph	 = (struct icmp6hdr *)(skb_network_header(skb) +
+						      icmp_offset);
+	struct ipv6hdr *ciph	 = (struct ipv6hdr *)(icmph + 1);
+
+	if (inout) {
+		iph->saddr = cp->vaddr.in6;
+		ciph->daddr = cp->vaddr.in6;
+	} else {
+		iph->daddr = cp->daddr.in6;
+		ciph->saddr = cp->daddr.in6;
+	}
+
+	/* the TCP/UDP port */
+	if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr) {
+		__be16 *ports = (void *)ciph + sizeof(struct ipv6hdr);
+
+		if (inout)
+			ports[1] = cp->vport;
+		else
+			ports[0] = cp->dport;
+	}
+
+	/* And finally the ICMP checksum */
+	icmph->icmp6_cksum = 0;
+	/* TODO IPv6: is this correct for ICMPv6? */
+	ip_vs_checksum_complete(skb, icmp_offset);
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	if (inout)
+		IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
+			"Forwarding altered outgoing ICMPv6");
+	else
+		IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
+			"Forwarding altered incoming ICMPv6");
+}
+#endif
+
+/* Handle relevant response ICMP messages - forward to the right
+ * destination host. Used for NAT and local client.
+ */
+static int handle_response_icmp(int af, struct sk_buff *skb,
+				union nf_inet_addr *snet,
+				__u8 protocol, struct ip_vs_conn *cp,
+				struct ip_vs_protocol *pp,
+				unsigned int offset, unsigned int ihl)
+{
+	unsigned int verdict = NF_DROP;
+
+	if (IP_VS_FWD_METHOD(cp) != 0) {
+		IP_VS_ERR("shouldn't reach here, because the box is on the "
+			  "half connection in the tun/dr module.\n");
+	}
+
+	/* Ensure the checksum is correct */
+	if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
+		/* Failed checksum! */
+		IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
+			      IP_VS_DBG_ADDR(af, snet));
+		goto out;
+	}
+
+	if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol)
+		offset += 2 * sizeof(__u16);
+	if (!skb_make_writable(skb, offset))
+		goto out;
+
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		ip_vs_nat_icmp_v6(skb, pp, cp, 1);
+	else
+#endif
+		ip_vs_nat_icmp(skb, pp, cp, 1);
+
+	/* do the statistics and put it back */
+	ip_vs_out_stats(cp, skb);
+
+	skb->ipvs_property = 1;
+	verdict = NF_ACCEPT;
+
+out:
+	__ip_vs_conn_put(cp);
+
+	return verdict;
+}
+
 /*
  *	Handle ICMP messages in the inside-to-outside direction (outgoing).
- *	Find any that might be relevant, check against existing connections,
- *	forward to the right destination host if relevant.
+ *	Find any that might be relevant, check against existing connections.
  *	Currently handles error types - unreachable, quench, ttl exceeded.
- *	(Only used in VS/NAT)
  */
 static int ip_vs_out_icmp(struct sk_buff *skb, int *related)
 {
 	struct iphdr *iph;
 	struct icmphdr	_icmph, *ic;
 	struct iphdr	_ciph, *cih;	/* The ip header contained within the ICMP */
+	struct ip_vs_iphdr ciph;
 	struct ip_vs_conn *cp;
 	struct ip_vs_protocol *pp;
-	unsigned int offset, ihl, verdict;
+	unsigned int offset, ihl;
+	union nf_inet_addr snet;
 
 	*related = 1;
 
@@ -627,133 +767,110 @@
 
 	offset += cih->ihl * 4;
 
+	ip_vs_fill_iphdr(AF_INET, cih, &ciph);
 	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_out_get(skb, pp, cih, offset, 1);
+	cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
 	if (!cp)
 		return NF_ACCEPT;
 
-	verdict = NF_DROP;
-
-	if (IP_VS_FWD_METHOD(cp) != 0) {
-		IP_VS_ERR("shouldn't reach here, because the box is on the "
-			  "half connection in the tun/dr module.\n");
-	}
-
-	/* Ensure the checksum is correct */
-	if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
-		/* Failed checksum! */
-		IP_VS_DBG(1, "Forward ICMP: failed checksum from %d.%d.%d.%d!\n",
-			  NIPQUAD(iph->saddr));
-		goto out;
-	}
-
-	if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
-		offset += 2 * sizeof(__u16);
-	if (!skb_make_writable(skb, offset))
-		goto out;
-
-	ip_vs_nat_icmp(skb, pp, cp, 1);
-
-	/* do the statistics and put it back */
-	ip_vs_out_stats(cp, skb);
-
-	skb->ipvs_property = 1;
-	verdict = NF_ACCEPT;
-
-  out:
-	__ip_vs_conn_put(cp);
-
-	return verdict;
+	snet.ip = iph->saddr;
+	return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
+				    pp, offset, ihl);
 }
 
-static inline int is_tcp_reset(const struct sk_buff *skb)
+#ifdef CONFIG_IP_VS_IPV6
+static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related)
+{
+	struct ipv6hdr *iph;
+	struct icmp6hdr	_icmph, *ic;
+	struct ipv6hdr	_ciph, *cih;	/* The ip header contained
+					   within the ICMP */
+	struct ip_vs_iphdr ciph;
+	struct ip_vs_conn *cp;
+	struct ip_vs_protocol *pp;
+	unsigned int offset;
+	union nf_inet_addr snet;
+
+	*related = 1;
+
+	/* reassemble IP fragments */
+	if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
+		if (ip_vs_gather_frags_v6(skb, IP_DEFRAG_VS_OUT))
+			return NF_STOLEN;
+	}
+
+	iph = ipv6_hdr(skb);
+	offset = sizeof(struct ipv6hdr);
+	ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
+	if (ic == NULL)
+		return NF_DROP;
+
+	IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) " NIP6_FMT "->" NIP6_FMT "\n",
+		  ic->icmp6_type, ntohs(icmpv6_id(ic)),
+		  NIP6(iph->saddr), NIP6(iph->daddr));
+
+	/*
+	 * Work through seeing if this is for us.
+	 * These checks are supposed to be in an order that means easy
+	 * things are checked first to speed up processing.... however
+	 * this means that some packets will manage to get a long way
+	 * down this stack and then be rejected, but that's life.
+	 */
+	if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
+	    (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
+	    (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
+		*related = 0;
+		return NF_ACCEPT;
+	}
+
+	/* Now find the contained IP header */
+	offset += sizeof(_icmph);
+	cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
+	if (cih == NULL)
+		return NF_ACCEPT; /* The packet looks wrong, ignore */
+
+	pp = ip_vs_proto_get(cih->nexthdr);
+	if (!pp)
+		return NF_ACCEPT;
+
+	/* Is the embedded protocol header present? */
+	/* TODO: we don't support fragmentation at the moment anyways */
+	if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
+		return NF_ACCEPT;
+
+	IP_VS_DBG_PKT(11, pp, skb, offset, "Checking outgoing ICMPv6 for");
+
+	offset += sizeof(struct ipv6hdr);
+
+	ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
+	/* The embedded headers contain source and dest in reverse order */
+	cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
+	if (!cp)
+		return NF_ACCEPT;
+
+	ipv6_addr_copy(&snet.in6, &iph->saddr);
+	return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp,
+				    pp, offset, sizeof(struct ipv6hdr));
+}
+#endif
+
+static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
 {
 	struct tcphdr _tcph, *th;
 
-	th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
+	th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
 	if (th == NULL)
 		return 0;
 	return th->rst;
 }
 
-/*
- *	It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT.
- *	Check if outgoing packet belongs to the established ip_vs_conn,
- *      rewrite addresses of the packet and send it on its way...
+/* Handle response packets: rewrite addresses and send away...
+ * Used for NAT and local client.
  */
 static unsigned int
-ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
-	  const struct net_device *in, const struct net_device *out,
-	  int (*okfn)(struct sk_buff *))
+handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+		struct ip_vs_conn *cp, int ihl)
 {
-	struct iphdr	*iph;
-	struct ip_vs_protocol *pp;
-	struct ip_vs_conn *cp;
-	int ihl;
-
-	EnterFunction(11);
-
-	if (skb->ipvs_property)
-		return NF_ACCEPT;
-
-	iph = ip_hdr(skb);
-	if (unlikely(iph->protocol == IPPROTO_ICMP)) {
-		int related, verdict = ip_vs_out_icmp(skb, &related);
-
-		if (related)
-			return verdict;
-		iph = ip_hdr(skb);
-	}
-
-	pp = ip_vs_proto_get(iph->protocol);
-	if (unlikely(!pp))
-		return NF_ACCEPT;
-
-	/* reassemble IP fragments */
-	if (unlikely(iph->frag_off & htons(IP_MF|IP_OFFSET) &&
-		     !pp->dont_defrag)) {
-		if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT))
-			return NF_STOLEN;
-		iph = ip_hdr(skb);
-	}
-
-	ihl = iph->ihl << 2;
-
-	/*
-	 * Check if the packet belongs to an existing entry
-	 */
-	cp = pp->conn_out_get(skb, pp, iph, ihl, 0);
-
-	if (unlikely(!cp)) {
-		if (sysctl_ip_vs_nat_icmp_send &&
-		    (pp->protocol == IPPROTO_TCP ||
-		     pp->protocol == IPPROTO_UDP)) {
-			__be16 _ports[2], *pptr;
-
-			pptr = skb_header_pointer(skb, ihl,
-						  sizeof(_ports), _ports);
-			if (pptr == NULL)
-				return NF_ACCEPT;	/* Not for me */
-			if (ip_vs_lookup_real_service(iph->protocol,
-						      iph->saddr, pptr[0])) {
-				/*
-				 * Notify the real server: there is no
-				 * existing entry if it is not RST
-				 * packet or not TCP packet.
-				 */
-				if (iph->protocol != IPPROTO_TCP
-				    || !is_tcp_reset(skb)) {
-					icmp_send(skb,ICMP_DEST_UNREACH,
-						  ICMP_PORT_UNREACH, 0);
-					return NF_DROP;
-				}
-			}
-		}
-		IP_VS_DBG_PKT(12, pp, skb, 0,
-			      "packet continues traversal as normal");
-		return NF_ACCEPT;
-	}
-
 	IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet");
 
 	if (!skb_make_writable(skb, ihl))
@@ -762,8 +879,16 @@
 	/* mangle the packet */
 	if (pp->snat_handler && !pp->snat_handler(skb, pp, cp))
 		goto drop;
-	ip_hdr(skb)->saddr = cp->vaddr;
-	ip_send_check(ip_hdr(skb));
+
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		ipv6_hdr(skb)->saddr = cp->vaddr.in6;
+	else
+#endif
+	{
+		ip_hdr(skb)->saddr = cp->vaddr.ip;
+		ip_send_check(ip_hdr(skb));
+	}
 
 	/* For policy routing, packets originating from this
 	 * machine itself may be routed differently to packets
@@ -771,8 +896,14 @@
 	 * if it came from this machine itself.  So re-compute
 	 * the routing information.
 	 */
-	if (ip_route_me_harder(skb, RTN_LOCAL) != 0)
-		goto drop;
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6) {
+		if (ip6_route_me_harder(skb) != 0)
+			goto drop;
+	} else
+#endif
+		if (ip_route_me_harder(skb, RTN_LOCAL) != 0)
+			goto drop;
 
 	IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT");
 
@@ -785,12 +916,126 @@
 	LeaveFunction(11);
 	return NF_ACCEPT;
 
-  drop:
+drop:
 	ip_vs_conn_put(cp);
 	kfree_skb(skb);
 	return NF_STOLEN;
 }
 
+/*
+ *	It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT.
+ *	Check if outgoing packet belongs to the established ip_vs_conn.
+ */
+static unsigned int
+ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
+	  const struct net_device *in, const struct net_device *out,
+	  int (*okfn)(struct sk_buff *))
+{
+	struct ip_vs_iphdr iph;
+	struct ip_vs_protocol *pp;
+	struct ip_vs_conn *cp;
+	int af;
+
+	EnterFunction(11);
+
+	af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6;
+
+	if (skb->ipvs_property)
+		return NF_ACCEPT;
+
+	ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6) {
+		if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
+			int related, verdict = ip_vs_out_icmp_v6(skb, &related);
+
+			if (related)
+				return verdict;
+			ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+		}
+	} else
+#endif
+		if (unlikely(iph.protocol == IPPROTO_ICMP)) {
+			int related, verdict = ip_vs_out_icmp(skb, &related);
+
+			if (related)
+				return verdict;
+			ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+		}
+
+	pp = ip_vs_proto_get(iph.protocol);
+	if (unlikely(!pp))
+		return NF_ACCEPT;
+
+	/* reassemble IP fragments */
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6) {
+		if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
+			int related, verdict = ip_vs_out_icmp_v6(skb, &related);
+
+			if (related)
+				return verdict;
+
+			ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+		}
+	} else
+#endif
+		if (unlikely(ip_hdr(skb)->frag_off & htons(IP_MF|IP_OFFSET) &&
+			     !pp->dont_defrag)) {
+			if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT))
+				return NF_STOLEN;
+
+			ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+		}
+
+	/*
+	 * Check if the packet belongs to an existing entry
+	 */
+	cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
+
+	if (unlikely(!cp)) {
+		if (sysctl_ip_vs_nat_icmp_send &&
+		    (pp->protocol == IPPROTO_TCP ||
+		     pp->protocol == IPPROTO_UDP)) {
+			__be16 _ports[2], *pptr;
+
+			pptr = skb_header_pointer(skb, iph.len,
+						  sizeof(_ports), _ports);
+			if (pptr == NULL)
+				return NF_ACCEPT;	/* Not for me */
+			if (ip_vs_lookup_real_service(af, iph.protocol,
+						      &iph.saddr,
+						      pptr[0])) {
+				/*
+				 * Notify the real server: there is no
+				 * existing entry if it is not RST
+				 * packet or not TCP packet.
+				 */
+				if (iph.protocol != IPPROTO_TCP
+				    || !is_tcp_reset(skb, iph.len)) {
+#ifdef CONFIG_IP_VS_IPV6
+					if (af == AF_INET6)
+						icmpv6_send(skb,
+							    ICMPV6_DEST_UNREACH,
+							    ICMPV6_PORT_UNREACH,
+							    0, skb->dev);
+					else
+#endif
+						icmp_send(skb,
+							  ICMP_DEST_UNREACH,
+							  ICMP_PORT_UNREACH, 0);
+					return NF_DROP;
+				}
+			}
+		}
+		IP_VS_DBG_PKT(12, pp, skb, 0,
+			      "packet continues traversal as normal");
+		return NF_ACCEPT;
+	}
+
+	return handle_response(af, skb, pp, cp, iph.len);
+}
+
 
 /*
  *	Handle ICMP messages in the outside-to-inside direction (incoming).
@@ -804,9 +1049,11 @@
 	struct iphdr *iph;
 	struct icmphdr	_icmph, *ic;
 	struct iphdr	_ciph, *cih;	/* The ip header contained within the ICMP */
+	struct ip_vs_iphdr ciph;
 	struct ip_vs_conn *cp;
 	struct ip_vs_protocol *pp;
 	unsigned int offset, ihl, verdict;
+	union nf_inet_addr snet;
 
 	*related = 1;
 
@@ -860,10 +1107,20 @@
 
 	offset += cih->ihl * 4;
 
+	ip_vs_fill_iphdr(AF_INET, cih, &ciph);
 	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_in_get(skb, pp, cih, offset, 1);
-	if (!cp)
+	cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1);
+	if (!cp) {
+		/* The packet could also belong to a local client */
+		cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
+		if (cp) {
+			snet.ip = iph->saddr;
+			return handle_response_icmp(AF_INET, skb, &snet,
+						    cih->protocol, cp, pp,
+						    offset, ihl);
+		}
 		return NF_ACCEPT;
+	}
 
 	verdict = NF_DROP;
 
@@ -888,6 +1145,105 @@
 	return verdict;
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+static int
+ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
+{
+	struct ipv6hdr *iph;
+	struct icmp6hdr	_icmph, *ic;
+	struct ipv6hdr	_ciph, *cih;	/* The ip header contained
+					   within the ICMP */
+	struct ip_vs_iphdr ciph;
+	struct ip_vs_conn *cp;
+	struct ip_vs_protocol *pp;
+	unsigned int offset, verdict;
+	union nf_inet_addr snet;
+
+	*related = 1;
+
+	/* reassemble IP fragments */
+	if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
+		if (ip_vs_gather_frags_v6(skb, hooknum == NF_INET_LOCAL_IN ?
+					       IP_DEFRAG_VS_IN :
+					       IP_DEFRAG_VS_FWD))
+			return NF_STOLEN;
+	}
+
+	iph = ipv6_hdr(skb);
+	offset = sizeof(struct ipv6hdr);
+	ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
+	if (ic == NULL)
+		return NF_DROP;
+
+	IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) " NIP6_FMT "->" NIP6_FMT "\n",
+		  ic->icmp6_type, ntohs(icmpv6_id(ic)),
+		  NIP6(iph->saddr), NIP6(iph->daddr));
+
+	/*
+	 * Work through seeing if this is for us.
+	 * These checks are supposed to be in an order that means easy
+	 * things are checked first to speed up processing.... however
+	 * this means that some packets will manage to get a long way
+	 * down this stack and then be rejected, but that's life.
+	 */
+	if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
+	    (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
+	    (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
+		*related = 0;
+		return NF_ACCEPT;
+	}
+
+	/* Now find the contained IP header */
+	offset += sizeof(_icmph);
+	cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
+	if (cih == NULL)
+		return NF_ACCEPT; /* The packet looks wrong, ignore */
+
+	pp = ip_vs_proto_get(cih->nexthdr);
+	if (!pp)
+		return NF_ACCEPT;
+
+	/* Is the embedded protocol header present? */
+	/* TODO: we don't support fragmentation at the moment anyways */
+	if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
+		return NF_ACCEPT;
+
+	IP_VS_DBG_PKT(11, pp, skb, offset, "Checking incoming ICMPv6 for");
+
+	offset += sizeof(struct ipv6hdr);
+
+	ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
+	/* The embedded headers contain source and dest in reverse order */
+	cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1);
+	if (!cp) {
+		/* The packet could also belong to a local client */
+		cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
+		if (cp) {
+			ipv6_addr_copy(&snet.in6, &iph->saddr);
+			return handle_response_icmp(AF_INET6, skb, &snet,
+						    cih->nexthdr,
+						    cp, pp, offset,
+						    sizeof(struct ipv6hdr));
+		}
+		return NF_ACCEPT;
+	}
+
+	verdict = NF_DROP;
+
+	/* do the statistics and put it back */
+	ip_vs_in_stats(cp, skb);
+	if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr)
+		offset += 2 * sizeof(__u16);
+	verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset);
+	/* do not touch skb anymore */
+
+	__ip_vs_conn_put(cp);
+
+	return verdict;
+}
+#endif
+
+
 /*
  *	Check if it's for virtual services, look it up,
  *	and send it on its way...
@@ -897,50 +1253,54 @@
 	 const struct net_device *in, const struct net_device *out,
 	 int (*okfn)(struct sk_buff *))
 {
-	struct iphdr	*iph;
+	struct ip_vs_iphdr iph;
 	struct ip_vs_protocol *pp;
 	struct ip_vs_conn *cp;
-	int ret, restart;
-	int ihl;
+	int ret, restart, af;
+
+	af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6;
+
+	ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 
 	/*
-	 *	Big tappo: only PACKET_HOST (neither loopback nor mcasts)
-	 *	... don't know why 1st test DOES NOT include 2nd (?)
+	 *	Big tappo: only PACKET_HOST, including loopback for local client
+	 *	Don't handle local packets on IPv6 for now
 	 */
-	if (unlikely(skb->pkt_type != PACKET_HOST
-		     || skb->dev->flags & IFF_LOOPBACK || skb->sk)) {
-		IP_VS_DBG(12, "packet type=%d proto=%d daddr=%d.%d.%d.%d ignored\n",
-			  skb->pkt_type,
-			  ip_hdr(skb)->protocol,
-			  NIPQUAD(ip_hdr(skb)->daddr));
+	if (unlikely(skb->pkt_type != PACKET_HOST)) {
+		IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s ignored\n",
+			      skb->pkt_type,
+			      iph.protocol,
+			      IP_VS_DBG_ADDR(af, &iph.daddr));
 		return NF_ACCEPT;
 	}
 
-	iph = ip_hdr(skb);
-	if (unlikely(iph->protocol == IPPROTO_ICMP)) {
+	if (unlikely(iph.protocol == IPPROTO_ICMP)) {
 		int related, verdict = ip_vs_in_icmp(skb, &related, hooknum);
 
 		if (related)
 			return verdict;
-		iph = ip_hdr(skb);
+		ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 	}
 
 	/* Protocol supported? */
-	pp = ip_vs_proto_get(iph->protocol);
+	pp = ip_vs_proto_get(iph.protocol);
 	if (unlikely(!pp))
 		return NF_ACCEPT;
 
-	ihl = iph->ihl << 2;
-
 	/*
 	 * Check if the packet belongs to an existing connection entry
 	 */
-	cp = pp->conn_in_get(skb, pp, iph, ihl, 0);
+	cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0);
 
 	if (unlikely(!cp)) {
 		int v;
 
-		if (!pp->conn_schedule(skb, pp, &v, &cp))
+		/* For local client packets, it could be a response */
+		cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
+		if (cp)
+			return handle_response(af, skb, pp, cp, iph.len);
+
+		if (!pp->conn_schedule(af, skb, pp, &v, &cp))
 			return v;
 	}
 
@@ -984,7 +1344,8 @@
 	 * encorage the standby servers to update the connections timeout
 	 */
 	atomic_inc(&cp->in_pkts);
-	if ((ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+	if (af == AF_INET &&
+	    (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
 	    (((cp->protocol != IPPROTO_TCP ||
 	       cp->state == IP_VS_TCP_S_ESTABLISHED) &&
 	      (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1]
@@ -1023,6 +1384,21 @@
 	return ip_vs_in_icmp(skb, &r, hooknum);
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+static unsigned int
+ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
+		      const struct net_device *in, const struct net_device *out,
+		      int (*okfn)(struct sk_buff *))
+{
+	int r;
+
+	if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
+		return NF_ACCEPT;
+
+	return ip_vs_in_icmp_v6(skb, &r, hooknum);
+}
+#endif
+
 
 static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
 	/* After packet filtering, forward packet through VS/DR, VS/TUN,
@@ -1060,6 +1436,43 @@
 		.hooknum        = NF_INET_POST_ROUTING,
 		.priority       = NF_IP_PRI_NAT_SRC-1,
 	},
+#ifdef CONFIG_IP_VS_IPV6
+	/* After packet filtering, forward packet through VS/DR, VS/TUN,
+	 * or VS/NAT(change destination), so that filtering rules can be
+	 * applied to IPVS. */
+	{
+		.hook		= ip_vs_in,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET6,
+		.hooknum        = NF_INET_LOCAL_IN,
+		.priority       = 100,
+	},
+	/* After packet filtering, change source only for VS/NAT */
+	{
+		.hook		= ip_vs_out,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET6,
+		.hooknum        = NF_INET_FORWARD,
+		.priority       = 100,
+	},
+	/* After packet filtering (but before ip_vs_out_icmp), catch icmp
+	 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
+	{
+		.hook		= ip_vs_forward_icmp_v6,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET6,
+		.hooknum        = NF_INET_FORWARD,
+		.priority       = 99,
+	},
+	/* Before the netfilter connection tracking, exit from POST_ROUTING */
+	{
+		.hook		= ip_vs_post_routing,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET6,
+		.hooknum        = NF_INET_POST_ROUTING,
+		.priority       = NF_IP6_PRI_NAT_SRC-1,
+	},
+#endif
 };
 
 
@@ -1070,10 +1483,12 @@
 {
 	int ret;
 
+	ip_vs_estimator_init();
+
 	ret = ip_vs_control_init();
 	if (ret < 0) {
 		IP_VS_ERR("can't setup control.\n");
-		goto cleanup_nothing;
+		goto cleanup_estimator;
 	}
 
 	ip_vs_protocol_init();
@@ -1106,7 +1521,8 @@
   cleanup_protocol:
 	ip_vs_protocol_cleanup();
 	ip_vs_control_cleanup();
-  cleanup_nothing:
+  cleanup_estimator:
+	ip_vs_estimator_cleanup();
 	return ret;
 }
 
@@ -1117,6 +1533,7 @@
 	ip_vs_app_cleanup();
 	ip_vs_protocol_cleanup();
 	ip_vs_control_cleanup();
+	ip_vs_estimator_cleanup();
 	IP_VS_INFO("ipvs unloaded.\n");
 }
 
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 6379705..771551d 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -35,8 +35,13 @@
 
 #include <net/net_namespace.h>
 #include <net/ip.h>
+#ifdef CONFIG_IP_VS_IPV6
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#endif
 #include <net/route.h>
 #include <net/sock.h>
+#include <net/genetlink.h>
 
 #include <asm/uaccess.h>
 
@@ -90,6 +95,26 @@
 }
 #endif
 
+#ifdef CONFIG_IP_VS_IPV6
+/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
+static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
+{
+	struct rt6_info *rt;
+	struct flowi fl = {
+		.oif = 0,
+		.nl_u = {
+			.ip6_u = {
+				.daddr = *addr,
+				.saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
+	};
+
+	rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+	if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
+			return 1;
+
+	return 0;
+}
+#endif
 /*
  *	update_defense_level is called from keventd and from sysctl,
  *	so it needs to protect itself from softirqs
@@ -281,11 +306,19 @@
  *	Returns hash value for virtual service
  */
 static __inline__ unsigned
-ip_vs_svc_hashkey(unsigned proto, __be32 addr, __be16 port)
+ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
+		  __be16 port)
 {
 	register unsigned porth = ntohs(port);
+	__be32 addr_fold = addr->ip;
 
-	return (proto^ntohl(addr)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		addr_fold = addr->ip6[0]^addr->ip6[1]^
+			    addr->ip6[2]^addr->ip6[3];
+#endif
+
+	return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
 		& IP_VS_SVC_TAB_MASK;
 }
 
@@ -316,7 +349,8 @@
 		/*
 		 *  Hash it by <protocol,addr,port> in ip_vs_svc_table
 		 */
-		hash = ip_vs_svc_hashkey(svc->protocol, svc->addr, svc->port);
+		hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr,
+					 svc->port);
 		list_add(&svc->s_list, &ip_vs_svc_table[hash]);
 	} else {
 		/*
@@ -362,17 +396,19 @@
 /*
  *	Get service by {proto,addr,port} in the service table.
  */
-static __inline__ struct ip_vs_service *
-__ip_vs_service_get(__u16 protocol, __be32 vaddr, __be16 vport)
+static inline struct ip_vs_service *
+__ip_vs_service_get(int af, __u16 protocol, const union nf_inet_addr *vaddr,
+		    __be16 vport)
 {
 	unsigned hash;
 	struct ip_vs_service *svc;
 
 	/* Check for "full" addressed entries */
-	hash = ip_vs_svc_hashkey(protocol, vaddr, vport);
+	hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport);
 
 	list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){
-		if ((svc->addr == vaddr)
+		if ((svc->af == af)
+		    && ip_vs_addr_equal(af, &svc->addr, vaddr)
 		    && (svc->port == vport)
 		    && (svc->protocol == protocol)) {
 			/* HIT */
@@ -388,7 +424,8 @@
 /*
  *	Get service by {fwmark} in the service table.
  */
-static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark)
+static inline struct ip_vs_service *
+__ip_vs_svc_fwm_get(int af, __u32 fwmark)
 {
 	unsigned hash;
 	struct ip_vs_service *svc;
@@ -397,7 +434,7 @@
 	hash = ip_vs_svc_fwm_hashkey(fwmark);
 
 	list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) {
-		if (svc->fwmark == fwmark) {
+		if (svc->fwmark == fwmark && svc->af == af) {
 			/* HIT */
 			atomic_inc(&svc->usecnt);
 			return svc;
@@ -408,7 +445,8 @@
 }
 
 struct ip_vs_service *
-ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport)
+ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+		  const union nf_inet_addr *vaddr, __be16 vport)
 {
 	struct ip_vs_service *svc;
 
@@ -417,14 +455,14 @@
 	/*
 	 *	Check the table hashed by fwmark first
 	 */
-	if (fwmark && (svc = __ip_vs_svc_fwm_get(fwmark)))
+	if (fwmark && (svc = __ip_vs_svc_fwm_get(af, fwmark)))
 		goto out;
 
 	/*
 	 *	Check the table hashed by <protocol,addr,port>
 	 *	for "full" addressed entries
 	 */
-	svc = __ip_vs_service_get(protocol, vaddr, vport);
+	svc = __ip_vs_service_get(af, protocol, vaddr, vport);
 
 	if (svc == NULL
 	    && protocol == IPPROTO_TCP
@@ -434,7 +472,7 @@
 		 * Check if ftp service entry exists, the packet
 		 * might belong to FTP data connections.
 		 */
-		svc = __ip_vs_service_get(protocol, vaddr, FTPPORT);
+		svc = __ip_vs_service_get(af, protocol, vaddr, FTPPORT);
 	}
 
 	if (svc == NULL
@@ -442,16 +480,16 @@
 		/*
 		 * Check if the catch-all port (port zero) exists
 		 */
-		svc = __ip_vs_service_get(protocol, vaddr, 0);
+		svc = __ip_vs_service_get(af, protocol, vaddr, 0);
 	}
 
   out:
 	read_unlock(&__ip_vs_svc_lock);
 
-	IP_VS_DBG(9, "lookup service: fwm %u %s %u.%u.%u.%u:%u %s\n",
-		  fwmark, ip_vs_proto_name(protocol),
-		  NIPQUAD(vaddr), ntohs(vport),
-		  svc?"hit":"not hit");
+	IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n",
+		      fwmark, ip_vs_proto_name(protocol),
+		      IP_VS_DBG_ADDR(af, vaddr), ntohs(vport),
+		      svc ? "hit" : "not hit");
 
 	return svc;
 }
@@ -478,11 +516,20 @@
 /*
  *	Returns hash value for real service
  */
-static __inline__ unsigned ip_vs_rs_hashkey(__be32 addr, __be16 port)
+static inline unsigned ip_vs_rs_hashkey(int af,
+					    const union nf_inet_addr *addr,
+					    __be16 port)
 {
 	register unsigned porth = ntohs(port);
+	__be32 addr_fold = addr->ip;
 
-	return (ntohl(addr)^(porth>>IP_VS_RTAB_BITS)^porth)
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		addr_fold = addr->ip6[0]^addr->ip6[1]^
+			    addr->ip6[2]^addr->ip6[3];
+#endif
+
+	return (ntohl(addr_fold)^(porth>>IP_VS_RTAB_BITS)^porth)
 		& IP_VS_RTAB_MASK;
 }
 
@@ -502,7 +549,8 @@
 	 *	Hash by proto,addr,port,
 	 *	which are the parameters of the real service.
 	 */
-	hash = ip_vs_rs_hashkey(dest->addr, dest->port);
+	hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
+
 	list_add(&dest->d_list, &ip_vs_rtable[hash]);
 
 	return 1;
@@ -529,7 +577,9 @@
  *	Lookup real service by <proto,addr,port> in the real service table.
  */
 struct ip_vs_dest *
-ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport)
+ip_vs_lookup_real_service(int af, __u16 protocol,
+			  const union nf_inet_addr *daddr,
+			  __be16 dport)
 {
 	unsigned hash;
 	struct ip_vs_dest *dest;
@@ -538,11 +588,12 @@
 	 *	Check for "full" addressed entries
 	 *	Return the first found entry
 	 */
-	hash = ip_vs_rs_hashkey(daddr, dport);
+	hash = ip_vs_rs_hashkey(af, daddr, dport);
 
 	read_lock(&__ip_vs_rs_lock);
 	list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) {
-		if ((dest->addr == daddr)
+		if ((dest->af == af)
+		    && ip_vs_addr_equal(af, &dest->addr, daddr)
 		    && (dest->port == dport)
 		    && ((dest->protocol == protocol) ||
 			dest->vfwmark)) {
@@ -560,7 +611,8 @@
  *	Lookup destination by {addr,port} in the given service
  */
 static struct ip_vs_dest *
-ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
+ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
+		  __be16 dport)
 {
 	struct ip_vs_dest *dest;
 
@@ -568,7 +620,9 @@
 	 * Find the destination for the given service
 	 */
 	list_for_each_entry(dest, &svc->destinations, n_list) {
-		if ((dest->addr == daddr) && (dest->port == dport)) {
+		if ((dest->af == svc->af)
+		    && ip_vs_addr_equal(svc->af, &dest->addr, daddr)
+		    && (dest->port == dport)) {
 			/* HIT */
 			return dest;
 		}
@@ -587,13 +641,15 @@
  * ip_vs_lookup_real_service() looked promissing, but
  * seems not working as expected.
  */
-struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport,
-				    __be32 vaddr, __be16 vport, __u16 protocol)
+struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr,
+				   __be16 dport,
+				   const union nf_inet_addr *vaddr,
+				   __be16 vport, __u16 protocol)
 {
 	struct ip_vs_dest *dest;
 	struct ip_vs_service *svc;
 
-	svc = ip_vs_service_get(0, protocol, vaddr, vport);
+	svc = ip_vs_service_get(af, 0, protocol, vaddr, vport);
 	if (!svc)
 		return NULL;
 	dest = ip_vs_lookup_dest(svc, daddr, dport);
@@ -614,7 +670,8 @@
  *  scheduling.
  */
 static struct ip_vs_dest *
-ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
+ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
+		     __be16 dport)
 {
 	struct ip_vs_dest *dest, *nxt;
 
@@ -622,17 +679,19 @@
 	 * Find the destination in trash
 	 */
 	list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
-		IP_VS_DBG(3, "Destination %u/%u.%u.%u.%u:%u still in trash, "
-			  "dest->refcnt=%d\n",
-			  dest->vfwmark,
-			  NIPQUAD(dest->addr), ntohs(dest->port),
-			  atomic_read(&dest->refcnt));
-		if (dest->addr == daddr &&
+		IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
+			      "dest->refcnt=%d\n",
+			      dest->vfwmark,
+			      IP_VS_DBG_ADDR(svc->af, &dest->addr),
+			      ntohs(dest->port),
+			      atomic_read(&dest->refcnt));
+		if (dest->af == svc->af &&
+		    ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
 		    dest->port == dport &&
 		    dest->vfwmark == svc->fwmark &&
 		    dest->protocol == svc->protocol &&
 		    (svc->fwmark ||
-		     (dest->vaddr == svc->addr &&
+		     (ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) &&
 		      dest->vport == svc->port))) {
 			/* HIT */
 			return dest;
@@ -642,10 +701,11 @@
 		 * Try to purge the destination from trash if not referenced
 		 */
 		if (atomic_read(&dest->refcnt) == 1) {
-			IP_VS_DBG(3, "Removing destination %u/%u.%u.%u.%u:%u "
-				  "from trash\n",
-				  dest->vfwmark,
-				  NIPQUAD(dest->addr), ntohs(dest->port));
+			IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u "
+				      "from trash\n",
+				      dest->vfwmark,
+				      IP_VS_DBG_ADDR(svc->af, &dest->addr),
+				      ntohs(dest->port));
 			list_del(&dest->n_list);
 			ip_vs_dst_reset(dest);
 			__ip_vs_unbind_svc(dest);
@@ -684,18 +744,7 @@
 {
 	spin_lock_bh(&stats->lock);
 
-	stats->conns = 0;
-	stats->inpkts = 0;
-	stats->outpkts = 0;
-	stats->inbytes = 0;
-	stats->outbytes = 0;
-
-	stats->cps = 0;
-	stats->inpps = 0;
-	stats->outpps = 0;
-	stats->inbps = 0;
-	stats->outbps = 0;
-
+	memset(&stats->ustats, 0, sizeof(stats->ustats));
 	ip_vs_zero_estimator(stats);
 
 	spin_unlock_bh(&stats->lock);
@@ -706,7 +755,7 @@
  */
 static void
 __ip_vs_update_dest(struct ip_vs_service *svc,
-		    struct ip_vs_dest *dest, struct ip_vs_dest_user *udest)
+		    struct ip_vs_dest *dest, struct ip_vs_dest_user_kern *udest)
 {
 	int conn_flags;
 
@@ -715,10 +764,18 @@
 	conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE;
 
 	/* check if local node and update the flags */
-	if (inet_addr_type(&init_net, udest->addr) == RTN_LOCAL) {
-		conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK)
-			| IP_VS_CONN_F_LOCALNODE;
-	}
+#ifdef CONFIG_IP_VS_IPV6
+	if (svc->af == AF_INET6) {
+		if (__ip_vs_addr_is_local_v6(&udest->addr.in6)) {
+			conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK)
+				| IP_VS_CONN_F_LOCALNODE;
+		}
+	} else
+#endif
+		if (inet_addr_type(&init_net, udest->addr.ip) == RTN_LOCAL) {
+			conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK)
+				| IP_VS_CONN_F_LOCALNODE;
+		}
 
 	/* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */
 	if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) {
@@ -759,7 +816,7 @@
  *	Create a destination for the given service
  */
 static int
-ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
+ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
 	       struct ip_vs_dest **dest_p)
 {
 	struct ip_vs_dest *dest;
@@ -767,9 +824,20 @@
 
 	EnterFunction(2);
 
-	atype = inet_addr_type(&init_net, udest->addr);
-	if (atype != RTN_LOCAL && atype != RTN_UNICAST)
-		return -EINVAL;
+#ifdef CONFIG_IP_VS_IPV6
+	if (svc->af == AF_INET6) {
+		atype = ipv6_addr_type(&udest->addr.in6);
+		if ((!(atype & IPV6_ADDR_UNICAST) ||
+			atype & IPV6_ADDR_LINKLOCAL) &&
+			!__ip_vs_addr_is_local_v6(&udest->addr.in6))
+			return -EINVAL;
+	} else
+#endif
+	{
+		atype = inet_addr_type(&init_net, udest->addr.ip);
+		if (atype != RTN_LOCAL && atype != RTN_UNICAST)
+			return -EINVAL;
+	}
 
 	dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
 	if (dest == NULL) {
@@ -777,11 +845,12 @@
 		return -ENOMEM;
 	}
 
+	dest->af = svc->af;
 	dest->protocol = svc->protocol;
 	dest->vaddr = svc->addr;
 	dest->vport = svc->port;
 	dest->vfwmark = svc->fwmark;
-	dest->addr = udest->addr;
+	ip_vs_addr_copy(svc->af, &dest->addr, &udest->addr);
 	dest->port = udest->port;
 
 	atomic_set(&dest->activeconns, 0);
@@ -806,10 +875,10 @@
  *	Add a destination into an existing service
  */
 static int
-ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
+ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
 {
 	struct ip_vs_dest *dest;
-	__be32 daddr = udest->addr;
+	union nf_inet_addr daddr;
 	__be16 dport = udest->port;
 	int ret;
 
@@ -826,10 +895,13 @@
 		return -ERANGE;
 	}
 
+	ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
+
 	/*
 	 * Check if the dest already exists in the list
 	 */
-	dest = ip_vs_lookup_dest(svc, daddr, dport);
+	dest = ip_vs_lookup_dest(svc, &daddr, dport);
+
 	if (dest != NULL) {
 		IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n");
 		return -EEXIST;
@@ -839,15 +911,17 @@
 	 * Check if the dest already exists in the trash and
 	 * is from the same service
 	 */
-	dest = ip_vs_trash_get_dest(svc, daddr, dport);
+	dest = ip_vs_trash_get_dest(svc, &daddr, dport);
+
 	if (dest != NULL) {
-		IP_VS_DBG(3, "Get destination %u.%u.%u.%u:%u from trash, "
-			  "dest->refcnt=%d, service %u/%u.%u.%u.%u:%u\n",
-			  NIPQUAD(daddr), ntohs(dport),
-			  atomic_read(&dest->refcnt),
-			  dest->vfwmark,
-			  NIPQUAD(dest->vaddr),
-			  ntohs(dest->vport));
+		IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, "
+			      "dest->refcnt=%d, service %u/%s:%u\n",
+			      IP_VS_DBG_ADDR(svc->af, &daddr), ntohs(dport),
+			      atomic_read(&dest->refcnt),
+			      dest->vfwmark,
+			      IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
+			      ntohs(dest->vport));
+
 		__ip_vs_update_dest(svc, dest, udest);
 
 		/*
@@ -868,7 +942,8 @@
 		svc->num_dests++;
 
 		/* call the update_service function of its scheduler */
-		svc->scheduler->update_service(svc);
+		if (svc->scheduler->update_service)
+			svc->scheduler->update_service(svc);
 
 		write_unlock_bh(&__ip_vs_svc_lock);
 		return 0;
@@ -898,7 +973,8 @@
 	svc->num_dests++;
 
 	/* call the update_service function of its scheduler */
-	svc->scheduler->update_service(svc);
+	if (svc->scheduler->update_service)
+		svc->scheduler->update_service(svc);
 
 	write_unlock_bh(&__ip_vs_svc_lock);
 
@@ -912,10 +988,10 @@
  *	Edit a destination in the given service
  */
 static int
-ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
+ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
 {
 	struct ip_vs_dest *dest;
-	__be32 daddr = udest->addr;
+	union nf_inet_addr daddr;
 	__be16 dport = udest->port;
 
 	EnterFunction(2);
@@ -931,10 +1007,13 @@
 		return -ERANGE;
 	}
 
+	ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
+
 	/*
 	 *  Lookup the destination list
 	 */
-	dest = ip_vs_lookup_dest(svc, daddr, dport);
+	dest = ip_vs_lookup_dest(svc, &daddr, dport);
+
 	if (dest == NULL) {
 		IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n");
 		return -ENOENT;
@@ -948,7 +1027,8 @@
 	IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
 
 	/* call the update_service, because server weight may be changed */
-	svc->scheduler->update_service(svc);
+	if (svc->scheduler->update_service)
+		svc->scheduler->update_service(svc);
 
 	write_unlock_bh(&__ip_vs_svc_lock);
 
@@ -987,10 +1067,11 @@
 		atomic_dec(&dest->svc->refcnt);
 		kfree(dest);
 	} else {
-		IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, "
-			  "dest->refcnt=%d\n",
-			  NIPQUAD(dest->addr), ntohs(dest->port),
-			  atomic_read(&dest->refcnt));
+		IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
+			      "dest->refcnt=%d\n",
+			      IP_VS_DBG_ADDR(dest->af, &dest->addr),
+			      ntohs(dest->port),
+			      atomic_read(&dest->refcnt));
 		list_add(&dest->n_list, &ip_vs_dest_trash);
 		atomic_inc(&dest->refcnt);
 	}
@@ -1011,12 +1092,12 @@
 	 */
 	list_del(&dest->n_list);
 	svc->num_dests--;
-	if (svcupd) {
-		/*
-		 *  Call the update_service function of its scheduler
-		 */
-		svc->scheduler->update_service(svc);
-	}
+
+	/*
+	 *  Call the update_service function of its scheduler
+	 */
+	if (svcupd && svc->scheduler->update_service)
+			svc->scheduler->update_service(svc);
 }
 
 
@@ -1024,15 +1105,15 @@
  *	Delete a destination server in the given service
  */
 static int
-ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest)
+ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
 {
 	struct ip_vs_dest *dest;
-	__be32 daddr = udest->addr;
 	__be16 dport = udest->port;
 
 	EnterFunction(2);
 
-	dest = ip_vs_lookup_dest(svc, daddr, dport);
+	dest = ip_vs_lookup_dest(svc, &udest->addr, dport);
+
 	if (dest == NULL) {
 		IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n");
 		return -ENOENT;
@@ -1067,7 +1148,8 @@
  *	Add a service into the service hash table
  */
 static int
-ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
+ip_vs_add_service(struct ip_vs_service_user_kern *u,
+		  struct ip_vs_service **svc_p)
 {
 	int ret = 0;
 	struct ip_vs_scheduler *sched = NULL;
@@ -1085,6 +1167,19 @@
 		goto out_mod_dec;
 	}
 
+#ifdef CONFIG_IP_VS_IPV6
+	if (u->af == AF_INET6) {
+		if (!sched->supports_ipv6) {
+			ret = -EAFNOSUPPORT;
+			goto out_err;
+		}
+		if ((u->netmask < 1) || (u->netmask > 128)) {
+			ret = -EINVAL;
+			goto out_err;
+		}
+	}
+#endif
+
 	svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
 	if (svc == NULL) {
 		IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n");
@@ -1096,8 +1191,9 @@
 	atomic_set(&svc->usecnt, 1);
 	atomic_set(&svc->refcnt, 0);
 
+	svc->af = u->af;
 	svc->protocol = u->protocol;
-	svc->addr = u->addr;
+	ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
 	svc->port = u->port;
 	svc->fwmark = u->fwmark;
 	svc->flags = u->flags;
@@ -1121,7 +1217,10 @@
 		atomic_inc(&ip_vs_nullsvc_counter);
 
 	ip_vs_new_estimator(&svc->stats);
-	ip_vs_num_services++;
+
+	/* Count only IPv4 services for old get/setsockopt interface */
+	if (svc->af == AF_INET)
+		ip_vs_num_services++;
 
 	/* Hash the service into the service table */
 	write_lock_bh(&__ip_vs_svc_lock);
@@ -1156,7 +1255,7 @@
  *	Edit a service and bind it with a new scheduler
  */
 static int
-ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u)
+ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
 {
 	struct ip_vs_scheduler *sched, *old_sched;
 	int ret = 0;
@@ -1172,6 +1271,19 @@
 	}
 	old_sched = sched;
 
+#ifdef CONFIG_IP_VS_IPV6
+	if (u->af == AF_INET6) {
+		if (!sched->supports_ipv6) {
+			ret = -EAFNOSUPPORT;
+			goto out;
+		}
+		if ((u->netmask < 1) || (u->netmask > 128)) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+#endif
+
 	write_lock_bh(&__ip_vs_svc_lock);
 
 	/*
@@ -1193,7 +1305,7 @@
 		 */
 		if ((ret = ip_vs_unbind_scheduler(svc))) {
 			old_sched = sched;
-			goto out;
+			goto out_unlock;
 		}
 
 		/*
@@ -1212,12 +1324,13 @@
 			 */
 			ip_vs_bind_scheduler(svc, old_sched);
 			old_sched = sched;
-			goto out;
+			goto out_unlock;
 		}
 	}
 
-  out:
+  out_unlock:
 	write_unlock_bh(&__ip_vs_svc_lock);
+  out:
 
 	if (old_sched)
 		ip_vs_scheduler_put(old_sched);
@@ -1236,7 +1349,10 @@
 	struct ip_vs_dest *dest, *nxt;
 	struct ip_vs_scheduler *old_sched;
 
-	ip_vs_num_services--;
+	/* Count only IPv4 services for old get/setsockopt interface */
+	if (svc->af == AF_INET)
+		ip_vs_num_services--;
+
 	ip_vs_kill_estimator(&svc->stats);
 
 	/* Unbind scheduler */
@@ -1671,6 +1787,7 @@
 }
 
 static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos)
+__acquires(__ip_vs_svc_lock)
 {
 
 	read_lock_bh(&__ip_vs_svc_lock);
@@ -1724,6 +1841,7 @@
 }
 
 static void ip_vs_info_seq_stop(struct seq_file *seq, void *v)
+__releases(__ip_vs_svc_lock)
 {
 	read_unlock_bh(&__ip_vs_svc_lock);
 }
@@ -1744,15 +1862,25 @@
 		const struct ip_vs_iter *iter = seq->private;
 		const struct ip_vs_dest *dest;
 
-		if (iter->table == ip_vs_svc_table)
-			seq_printf(seq, "%s  %08X:%04X %s ",
-				   ip_vs_proto_name(svc->protocol),
-				   ntohl(svc->addr),
-				   ntohs(svc->port),
-				   svc->scheduler->name);
-		else
+		if (iter->table == ip_vs_svc_table) {
+#ifdef CONFIG_IP_VS_IPV6
+			if (svc->af == AF_INET6)
+				seq_printf(seq, "%s  [" NIP6_FMT "]:%04X %s ",
+					   ip_vs_proto_name(svc->protocol),
+					   NIP6(svc->addr.in6),
+					   ntohs(svc->port),
+					   svc->scheduler->name);
+			else
+#endif
+				seq_printf(seq, "%s  %08X:%04X %s ",
+					   ip_vs_proto_name(svc->protocol),
+					   ntohl(svc->addr.ip),
+					   ntohs(svc->port),
+					   svc->scheduler->name);
+		} else {
 			seq_printf(seq, "FWM  %08X %s ",
 				   svc->fwmark, svc->scheduler->name);
+		}
 
 		if (svc->flags & IP_VS_SVC_F_PERSISTENT)
 			seq_printf(seq, "persistent %d %08X\n",
@@ -1762,13 +1890,29 @@
 			seq_putc(seq, '\n');
 
 		list_for_each_entry(dest, &svc->destinations, n_list) {
-			seq_printf(seq,
-				   "  -> %08X:%04X      %-7s %-6d %-10d %-10d\n",
-				   ntohl(dest->addr), ntohs(dest->port),
-				   ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
-				   atomic_read(&dest->weight),
-				   atomic_read(&dest->activeconns),
-				   atomic_read(&dest->inactconns));
+#ifdef CONFIG_IP_VS_IPV6
+			if (dest->af == AF_INET6)
+				seq_printf(seq,
+					   "  -> [" NIP6_FMT "]:%04X"
+					   "      %-7s %-6d %-10d %-10d\n",
+					   NIP6(dest->addr.in6),
+					   ntohs(dest->port),
+					   ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
+					   atomic_read(&dest->weight),
+					   atomic_read(&dest->activeconns),
+					   atomic_read(&dest->inactconns));
+			else
+#endif
+				seq_printf(seq,
+					   "  -> %08X:%04X      "
+					   "%-7s %-6d %-10d %-10d\n",
+					   ntohl(dest->addr.ip),
+					   ntohs(dest->port),
+					   ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
+					   atomic_read(&dest->weight),
+					   atomic_read(&dest->activeconns),
+					   atomic_read(&dest->inactconns));
+
 		}
 	}
 	return 0;
@@ -1812,20 +1956,20 @@
 		   "   Conns  Packets  Packets            Bytes            Bytes\n");
 
 	spin_lock_bh(&ip_vs_stats.lock);
-	seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.conns,
-		   ip_vs_stats.inpkts, ip_vs_stats.outpkts,
-		   (unsigned long long) ip_vs_stats.inbytes,
-		   (unsigned long long) ip_vs_stats.outbytes);
+	seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns,
+		   ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts,
+		   (unsigned long long) ip_vs_stats.ustats.inbytes,
+		   (unsigned long long) ip_vs_stats.ustats.outbytes);
 
 /*                 01234567 01234567 01234567 0123456701234567 0123456701234567 */
 	seq_puts(seq,
 		   " Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s\n");
 	seq_printf(seq,"%8X %8X %8X %16X %16X\n",
-			ip_vs_stats.cps,
-			ip_vs_stats.inpps,
-			ip_vs_stats.outpps,
-			ip_vs_stats.inbps,
-			ip_vs_stats.outbps);
+			ip_vs_stats.ustats.cps,
+			ip_vs_stats.ustats.inpps,
+			ip_vs_stats.ustats.outpps,
+			ip_vs_stats.ustats.inbps,
+			ip_vs_stats.ustats.outbps);
 	spin_unlock_bh(&ip_vs_stats.lock);
 
 	return 0;
@@ -1900,14 +2044,44 @@
 	[SET_CMDID(IP_VS_SO_SET_ZERO)]		= SERVICE_ARG_LEN,
 };
 
+static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc,
+				  struct ip_vs_service_user *usvc_compat)
+{
+	usvc->af		= AF_INET;
+	usvc->protocol		= usvc_compat->protocol;
+	usvc->addr.ip		= usvc_compat->addr;
+	usvc->port		= usvc_compat->port;
+	usvc->fwmark		= usvc_compat->fwmark;
+
+	/* Deep copy of sched_name is not needed here */
+	usvc->sched_name	= usvc_compat->sched_name;
+
+	usvc->flags		= usvc_compat->flags;
+	usvc->timeout		= usvc_compat->timeout;
+	usvc->netmask		= usvc_compat->netmask;
+}
+
+static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
+				   struct ip_vs_dest_user *udest_compat)
+{
+	udest->addr.ip		= udest_compat->addr;
+	udest->port		= udest_compat->port;
+	udest->conn_flags	= udest_compat->conn_flags;
+	udest->weight		= udest_compat->weight;
+	udest->u_threshold	= udest_compat->u_threshold;
+	udest->l_threshold	= udest_compat->l_threshold;
+}
+
 static int
 do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 {
 	int ret;
 	unsigned char arg[MAX_ARG_LEN];
-	struct ip_vs_service_user *usvc;
+	struct ip_vs_service_user *usvc_compat;
+	struct ip_vs_service_user_kern usvc;
 	struct ip_vs_service *svc;
-	struct ip_vs_dest_user *udest;
+	struct ip_vs_dest_user *udest_compat;
+	struct ip_vs_dest_user_kern udest;
 
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
@@ -1947,35 +2121,40 @@
 		goto out_unlock;
 	}
 
-	usvc = (struct ip_vs_service_user *)arg;
-	udest = (struct ip_vs_dest_user *)(usvc + 1);
+	usvc_compat = (struct ip_vs_service_user *)arg;
+	udest_compat = (struct ip_vs_dest_user *)(usvc_compat + 1);
+
+	/* We only use the new structs internally, so copy userspace compat
+	 * structs to extended internal versions */
+	ip_vs_copy_usvc_compat(&usvc, usvc_compat);
+	ip_vs_copy_udest_compat(&udest, udest_compat);
 
 	if (cmd == IP_VS_SO_SET_ZERO) {
 		/* if no service address is set, zero counters in all */
-		if (!usvc->fwmark && !usvc->addr && !usvc->port) {
+		if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
 			ret = ip_vs_zero_all();
 			goto out_unlock;
 		}
 	}
 
 	/* Check for valid protocol: TCP or UDP, even for fwmark!=0 */
-	if (usvc->protocol!=IPPROTO_TCP && usvc->protocol!=IPPROTO_UDP) {
+	if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) {
 		IP_VS_ERR("set_ctl: invalid protocol: %d %d.%d.%d.%d:%d %s\n",
-			  usvc->protocol, NIPQUAD(usvc->addr),
-			  ntohs(usvc->port), usvc->sched_name);
+			  usvc.protocol, NIPQUAD(usvc.addr.ip),
+			  ntohs(usvc.port), usvc.sched_name);
 		ret = -EFAULT;
 		goto out_unlock;
 	}
 
 	/* Lookup the exact service by <protocol, addr, port> or fwmark */
-	if (usvc->fwmark == 0)
-		svc = __ip_vs_service_get(usvc->protocol,
-					  usvc->addr, usvc->port);
+	if (usvc.fwmark == 0)
+		svc = __ip_vs_service_get(usvc.af, usvc.protocol,
+					  &usvc.addr, usvc.port);
 	else
-		svc = __ip_vs_svc_fwm_get(usvc->fwmark);
+		svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark);
 
 	if (cmd != IP_VS_SO_SET_ADD
-	    && (svc == NULL || svc->protocol != usvc->protocol)) {
+	    && (svc == NULL || svc->protocol != usvc.protocol)) {
 		ret = -ESRCH;
 		goto out_unlock;
 	}
@@ -1985,10 +2164,10 @@
 		if (svc != NULL)
 			ret = -EEXIST;
 		else
-			ret = ip_vs_add_service(usvc, &svc);
+			ret = ip_vs_add_service(&usvc, &svc);
 		break;
 	case IP_VS_SO_SET_EDIT:
-		ret = ip_vs_edit_service(svc, usvc);
+		ret = ip_vs_edit_service(svc, &usvc);
 		break;
 	case IP_VS_SO_SET_DEL:
 		ret = ip_vs_del_service(svc);
@@ -1999,13 +2178,13 @@
 		ret = ip_vs_zero_service(svc);
 		break;
 	case IP_VS_SO_SET_ADDDEST:
-		ret = ip_vs_add_dest(svc, udest);
+		ret = ip_vs_add_dest(svc, &udest);
 		break;
 	case IP_VS_SO_SET_EDITDEST:
-		ret = ip_vs_edit_dest(svc, udest);
+		ret = ip_vs_edit_dest(svc, &udest);
 		break;
 	case IP_VS_SO_SET_DELDEST:
-		ret = ip_vs_del_dest(svc, udest);
+		ret = ip_vs_del_dest(svc, &udest);
 		break;
 	default:
 		ret = -EINVAL;
@@ -2028,7 +2207,7 @@
 ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
 {
 	spin_lock_bh(&src->lock);
-	memcpy(dst, src, (char*)&src->lock - (char*)src);
+	memcpy(dst, &src->ustats, sizeof(*dst));
 	spin_unlock_bh(&src->lock);
 }
 
@@ -2036,7 +2215,7 @@
 ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
 {
 	dst->protocol = src->protocol;
-	dst->addr = src->addr;
+	dst->addr = src->addr.ip;
 	dst->port = src->port;
 	dst->fwmark = src->fwmark;
 	strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name));
@@ -2058,6 +2237,10 @@
 
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
+			/* Only expose IPv4 entries to old interface */
+			if (svc->af != AF_INET)
+				continue;
+
 			if (count >= get->num_services)
 				goto out;
 			memset(&entry, 0, sizeof(entry));
@@ -2073,6 +2256,10 @@
 
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
+			/* Only expose IPv4 entries to old interface */
+			if (svc->af != AF_INET)
+				continue;
+
 			if (count >= get->num_services)
 				goto out;
 			memset(&entry, 0, sizeof(entry));
@@ -2094,13 +2281,15 @@
 			 struct ip_vs_get_dests __user *uptr)
 {
 	struct ip_vs_service *svc;
+	union nf_inet_addr addr = { .ip = get->addr };
 	int ret = 0;
 
 	if (get->fwmark)
-		svc = __ip_vs_svc_fwm_get(get->fwmark);
+		svc = __ip_vs_svc_fwm_get(AF_INET, get->fwmark);
 	else
-		svc = __ip_vs_service_get(get->protocol,
-					  get->addr, get->port);
+		svc = __ip_vs_service_get(AF_INET, get->protocol, &addr,
+					  get->port);
+
 	if (svc) {
 		int count = 0;
 		struct ip_vs_dest *dest;
@@ -2110,7 +2299,7 @@
 			if (count >= get->num_dests)
 				break;
 
-			entry.addr = dest->addr;
+			entry.addr = dest->addr.ip;
 			entry.port = dest->port;
 			entry.conn_flags = atomic_read(&dest->conn_flags);
 			entry.weight = atomic_read(&dest->weight);
@@ -2235,13 +2424,15 @@
 	{
 		struct ip_vs_service_entry *entry;
 		struct ip_vs_service *svc;
+		union nf_inet_addr addr;
 
 		entry = (struct ip_vs_service_entry *)arg;
+		addr.ip = entry->addr;
 		if (entry->fwmark)
-			svc = __ip_vs_svc_fwm_get(entry->fwmark);
+			svc = __ip_vs_svc_fwm_get(AF_INET, entry->fwmark);
 		else
-			svc = __ip_vs_service_get(entry->protocol,
-						  entry->addr, entry->port);
+			svc = __ip_vs_service_get(AF_INET, entry->protocol,
+						  &addr, entry->port);
 		if (svc) {
 			ip_vs_copy_service(entry, svc);
 			if (copy_to_user(user, entry, sizeof(*entry)) != 0)
@@ -2320,6 +2511,875 @@
 	.owner		= THIS_MODULE,
 };
 
+/*
+ * Generic Netlink interface
+ */
+
+/* IPVS genetlink family */
+static struct genl_family ip_vs_genl_family = {
+	.id		= GENL_ID_GENERATE,
+	.hdrsize	= 0,
+	.name		= IPVS_GENL_NAME,
+	.version	= IPVS_GENL_VERSION,
+	.maxattr	= IPVS_CMD_MAX,
+};
+
+/* Policy used for first-level command attributes */
+static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
+	[IPVS_CMD_ATTR_SERVICE]		= { .type = NLA_NESTED },
+	[IPVS_CMD_ATTR_DEST]		= { .type = NLA_NESTED },
+	[IPVS_CMD_ATTR_DAEMON]		= { .type = NLA_NESTED },
+	[IPVS_CMD_ATTR_TIMEOUT_TCP]	= { .type = NLA_U32 },
+	[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]	= { .type = NLA_U32 },
+	[IPVS_CMD_ATTR_TIMEOUT_UDP]	= { .type = NLA_U32 },
+};
+
+/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DAEMON */
+static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
+	[IPVS_DAEMON_ATTR_STATE]	= { .type = NLA_U32 },
+	[IPVS_DAEMON_ATTR_MCAST_IFN]	= { .type = NLA_NUL_STRING,
+					    .len = IP_VS_IFNAME_MAXLEN },
+	[IPVS_DAEMON_ATTR_SYNC_ID]	= { .type = NLA_U32 },
+};
+
+/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */
+static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
+	[IPVS_SVC_ATTR_AF]		= { .type = NLA_U16 },
+	[IPVS_SVC_ATTR_PROTOCOL]	= { .type = NLA_U16 },
+	[IPVS_SVC_ATTR_ADDR]		= { .type = NLA_BINARY,
+					    .len = sizeof(union nf_inet_addr) },
+	[IPVS_SVC_ATTR_PORT]		= { .type = NLA_U16 },
+	[IPVS_SVC_ATTR_FWMARK]		= { .type = NLA_U32 },
+	[IPVS_SVC_ATTR_SCHED_NAME]	= { .type = NLA_NUL_STRING,
+					    .len = IP_VS_SCHEDNAME_MAXLEN },
+	[IPVS_SVC_ATTR_FLAGS]		= { .type = NLA_BINARY,
+					    .len = sizeof(struct ip_vs_flags) },
+	[IPVS_SVC_ATTR_TIMEOUT]		= { .type = NLA_U32 },
+	[IPVS_SVC_ATTR_NETMASK]		= { .type = NLA_U32 },
+	[IPVS_SVC_ATTR_STATS]		= { .type = NLA_NESTED },
+};
+
+/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DEST */
+static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
+	[IPVS_DEST_ATTR_ADDR]		= { .type = NLA_BINARY,
+					    .len = sizeof(union nf_inet_addr) },
+	[IPVS_DEST_ATTR_PORT]		= { .type = NLA_U16 },
+	[IPVS_DEST_ATTR_FWD_METHOD]	= { .type = NLA_U32 },
+	[IPVS_DEST_ATTR_WEIGHT]		= { .type = NLA_U32 },
+	[IPVS_DEST_ATTR_U_THRESH]	= { .type = NLA_U32 },
+	[IPVS_DEST_ATTR_L_THRESH]	= { .type = NLA_U32 },
+	[IPVS_DEST_ATTR_ACTIVE_CONNS]	= { .type = NLA_U32 },
+	[IPVS_DEST_ATTR_INACT_CONNS]	= { .type = NLA_U32 },
+	[IPVS_DEST_ATTR_PERSIST_CONNS]	= { .type = NLA_U32 },
+	[IPVS_DEST_ATTR_STATS]		= { .type = NLA_NESTED },
+};
+
+static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
+				 struct ip_vs_stats *stats)
+{
+	struct nlattr *nl_stats = nla_nest_start(skb, container_type);
+	if (!nl_stats)
+		return -EMSGSIZE;
+
+	spin_lock_bh(&stats->lock);
+
+	NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, stats->ustats.conns);
+	NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, stats->ustats.inpkts);
+	NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, stats->ustats.outpkts);
+	NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, stats->ustats.inbytes);
+	NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, stats->ustats.outbytes);
+	NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, stats->ustats.cps);
+	NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, stats->ustats.inpps);
+	NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, stats->ustats.outpps);
+	NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, stats->ustats.inbps);
+	NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, stats->ustats.outbps);
+
+	spin_unlock_bh(&stats->lock);
+
+	nla_nest_end(skb, nl_stats);
+
+	return 0;
+
+nla_put_failure:
+	spin_unlock_bh(&stats->lock);
+	nla_nest_cancel(skb, nl_stats);
+	return -EMSGSIZE;
+}
+
+static int ip_vs_genl_fill_service(struct sk_buff *skb,
+				   struct ip_vs_service *svc)
+{
+	struct nlattr *nl_service;
+	struct ip_vs_flags flags = { .flags = svc->flags,
+				     .mask = ~0 };
+
+	nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
+	if (!nl_service)
+		return -EMSGSIZE;
+
+	NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af);
+
+	if (svc->fwmark) {
+		NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark);
+	} else {
+		NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol);
+		NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr);
+		NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port);
+	}
+
+	NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name);
+	NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags);
+	NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ);
+	NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask);
+
+	if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
+		goto nla_put_failure;
+
+	nla_nest_end(skb, nl_service);
+
+	return 0;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nl_service);
+	return -EMSGSIZE;
+}
+
+static int ip_vs_genl_dump_service(struct sk_buff *skb,
+				   struct ip_vs_service *svc,
+				   struct netlink_callback *cb)
+{
+	void *hdr;
+
+	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+			  &ip_vs_genl_family, NLM_F_MULTI,
+			  IPVS_CMD_NEW_SERVICE);
+	if (!hdr)
+		return -EMSGSIZE;
+
+	if (ip_vs_genl_fill_service(skb, svc) < 0)
+		goto nla_put_failure;
+
+	return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+	genlmsg_cancel(skb, hdr);
+	return -EMSGSIZE;
+}
+
+static int ip_vs_genl_dump_services(struct sk_buff *skb,
+				    struct netlink_callback *cb)
+{
+	int idx = 0, i;
+	int start = cb->args[0];
+	struct ip_vs_service *svc;
+
+	mutex_lock(&__ip_vs_mutex);
+	for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
+		list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
+			if (++idx <= start)
+				continue;
+			if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
+				idx--;
+				goto nla_put_failure;
+			}
+		}
+	}
+
+	for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
+		list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
+			if (++idx <= start)
+				continue;
+			if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
+				idx--;
+				goto nla_put_failure;
+			}
+		}
+	}
+
+nla_put_failure:
+	mutex_unlock(&__ip_vs_mutex);
+	cb->args[0] = idx;
+
+	return skb->len;
+}
+
+static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
+				    struct nlattr *nla, int full_entry)
+{
+	struct nlattr *attrs[IPVS_SVC_ATTR_MAX + 1];
+	struct nlattr *nla_af, *nla_port, *nla_fwmark, *nla_protocol, *nla_addr;
+
+	/* Parse mandatory identifying service fields first */
+	if (nla == NULL ||
+	    nla_parse_nested(attrs, IPVS_SVC_ATTR_MAX, nla, ip_vs_svc_policy))
+		return -EINVAL;
+
+	nla_af		= attrs[IPVS_SVC_ATTR_AF];
+	nla_protocol	= attrs[IPVS_SVC_ATTR_PROTOCOL];
+	nla_addr	= attrs[IPVS_SVC_ATTR_ADDR];
+	nla_port	= attrs[IPVS_SVC_ATTR_PORT];
+	nla_fwmark	= attrs[IPVS_SVC_ATTR_FWMARK];
+
+	if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
+		return -EINVAL;
+
+	usvc->af = nla_get_u16(nla_af);
+#ifdef CONFIG_IP_VS_IPV6
+	if (usvc->af != AF_INET && usvc->af != AF_INET6)
+#else
+	if (usvc->af != AF_INET)
+#endif
+		return -EAFNOSUPPORT;
+
+	if (nla_fwmark) {
+		usvc->protocol = IPPROTO_TCP;
+		usvc->fwmark = nla_get_u32(nla_fwmark);
+	} else {
+		usvc->protocol = nla_get_u16(nla_protocol);
+		nla_memcpy(&usvc->addr, nla_addr, sizeof(usvc->addr));
+		usvc->port = nla_get_u16(nla_port);
+		usvc->fwmark = 0;
+	}
+
+	/* If a full entry was requested, check for the additional fields */
+	if (full_entry) {
+		struct nlattr *nla_sched, *nla_flags, *nla_timeout,
+			      *nla_netmask;
+		struct ip_vs_flags flags;
+		struct ip_vs_service *svc;
+
+		nla_sched = attrs[IPVS_SVC_ATTR_SCHED_NAME];
+		nla_flags = attrs[IPVS_SVC_ATTR_FLAGS];
+		nla_timeout = attrs[IPVS_SVC_ATTR_TIMEOUT];
+		nla_netmask = attrs[IPVS_SVC_ATTR_NETMASK];
+
+		if (!(nla_sched && nla_flags && nla_timeout && nla_netmask))
+			return -EINVAL;
+
+		nla_memcpy(&flags, nla_flags, sizeof(flags));
+
+		/* prefill flags from service if it already exists */
+		if (usvc->fwmark)
+			svc = __ip_vs_svc_fwm_get(usvc->af, usvc->fwmark);
+		else
+			svc = __ip_vs_service_get(usvc->af, usvc->protocol,
+						  &usvc->addr, usvc->port);
+		if (svc) {
+			usvc->flags = svc->flags;
+			ip_vs_service_put(svc);
+		} else
+			usvc->flags = 0;
+
+		/* set new flags from userland */
+		usvc->flags = (usvc->flags & ~flags.mask) |
+			      (flags.flags & flags.mask);
+		usvc->sched_name = nla_data(nla_sched);
+		usvc->timeout = nla_get_u32(nla_timeout);
+		usvc->netmask = nla_get_u32(nla_netmask);
+	}
+
+	return 0;
+}
+
+static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla)
+{
+	struct ip_vs_service_user_kern usvc;
+	int ret;
+
+	ret = ip_vs_genl_parse_service(&usvc, nla, 0);
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (usvc.fwmark)
+		return __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark);
+	else
+		return __ip_vs_service_get(usvc.af, usvc.protocol,
+					   &usvc.addr, usvc.port);
+}
+
+static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
+{
+	struct nlattr *nl_dest;
+
+	nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST);
+	if (!nl_dest)
+		return -EMSGSIZE;
+
+	NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr);
+	NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
+
+	NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
+		    atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
+	NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
+	NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
+	NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
+	NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
+		    atomic_read(&dest->activeconns));
+	NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS,
+		    atomic_read(&dest->inactconns));
+	NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
+		    atomic_read(&dest->persistconns));
+
+	if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
+		goto nla_put_failure;
+
+	nla_nest_end(skb, nl_dest);
+
+	return 0;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nl_dest);
+	return -EMSGSIZE;
+}
+
+static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
+				struct netlink_callback *cb)
+{
+	void *hdr;
+
+	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+			  &ip_vs_genl_family, NLM_F_MULTI,
+			  IPVS_CMD_NEW_DEST);
+	if (!hdr)
+		return -EMSGSIZE;
+
+	if (ip_vs_genl_fill_dest(skb, dest) < 0)
+		goto nla_put_failure;
+
+	return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+	genlmsg_cancel(skb, hdr);
+	return -EMSGSIZE;
+}
+
+static int ip_vs_genl_dump_dests(struct sk_buff *skb,
+				 struct netlink_callback *cb)
+{
+	int idx = 0;
+	int start = cb->args[0];
+	struct ip_vs_service *svc;
+	struct ip_vs_dest *dest;
+	struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
+
+	mutex_lock(&__ip_vs_mutex);
+
+	/* Try to find the service for which to dump destinations */
+	if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs,
+			IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy))
+		goto out_err;
+
+	svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]);
+	if (IS_ERR(svc) || svc == NULL)
+		goto out_err;
+
+	/* Dump the destinations */
+	list_for_each_entry(dest, &svc->destinations, n_list) {
+		if (++idx <= start)
+			continue;
+		if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) {
+			idx--;
+			goto nla_put_failure;
+		}
+	}
+
+nla_put_failure:
+	cb->args[0] = idx;
+	ip_vs_service_put(svc);
+
+out_err:
+	mutex_unlock(&__ip_vs_mutex);
+
+	return skb->len;
+}
+
+static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
+				 struct nlattr *nla, int full_entry)
+{
+	struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1];
+	struct nlattr *nla_addr, *nla_port;
+
+	/* Parse mandatory identifying destination fields first */
+	if (nla == NULL ||
+	    nla_parse_nested(attrs, IPVS_DEST_ATTR_MAX, nla, ip_vs_dest_policy))
+		return -EINVAL;
+
+	nla_addr	= attrs[IPVS_DEST_ATTR_ADDR];
+	nla_port	= attrs[IPVS_DEST_ATTR_PORT];
+
+	if (!(nla_addr && nla_port))
+		return -EINVAL;
+
+	nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
+	udest->port = nla_get_u16(nla_port);
+
+	/* If a full entry was requested, check for the additional fields */
+	if (full_entry) {
+		struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh,
+			      *nla_l_thresh;
+
+		nla_fwd		= attrs[IPVS_DEST_ATTR_FWD_METHOD];
+		nla_weight	= attrs[IPVS_DEST_ATTR_WEIGHT];
+		nla_u_thresh	= attrs[IPVS_DEST_ATTR_U_THRESH];
+		nla_l_thresh	= attrs[IPVS_DEST_ATTR_L_THRESH];
+
+		if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh))
+			return -EINVAL;
+
+		udest->conn_flags = nla_get_u32(nla_fwd)
+				    & IP_VS_CONN_F_FWD_MASK;
+		udest->weight = nla_get_u32(nla_weight);
+		udest->u_threshold = nla_get_u32(nla_u_thresh);
+		udest->l_threshold = nla_get_u32(nla_l_thresh);
+	}
+
+	return 0;
+}
+
+static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state,
+				  const char *mcast_ifn, __be32 syncid)
+{
+	struct nlattr *nl_daemon;
+
+	nl_daemon = nla_nest_start(skb, IPVS_CMD_ATTR_DAEMON);
+	if (!nl_daemon)
+		return -EMSGSIZE;
+
+	NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state);
+	NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn);
+	NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid);
+
+	nla_nest_end(skb, nl_daemon);
+
+	return 0;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nl_daemon);
+	return -EMSGSIZE;
+}
+
+static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state,
+				  const char *mcast_ifn, __be32 syncid,
+				  struct netlink_callback *cb)
+{
+	void *hdr;
+	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+			  &ip_vs_genl_family, NLM_F_MULTI,
+			  IPVS_CMD_NEW_DAEMON);
+	if (!hdr)
+		return -EMSGSIZE;
+
+	if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid))
+		goto nla_put_failure;
+
+	return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+	genlmsg_cancel(skb, hdr);
+	return -EMSGSIZE;
+}
+
+static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
+				   struct netlink_callback *cb)
+{
+	mutex_lock(&__ip_vs_mutex);
+	if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
+		if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
+					   ip_vs_master_mcast_ifn,
+					   ip_vs_master_syncid, cb) < 0)
+			goto nla_put_failure;
+
+		cb->args[0] = 1;
+	}
+
+	if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
+		if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
+					   ip_vs_backup_mcast_ifn,
+					   ip_vs_backup_syncid, cb) < 0)
+			goto nla_put_failure;
+
+		cb->args[1] = 1;
+	}
+
+nla_put_failure:
+	mutex_unlock(&__ip_vs_mutex);
+
+	return skb->len;
+}
+
+static int ip_vs_genl_new_daemon(struct nlattr **attrs)
+{
+	if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
+	      attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
+	      attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
+		return -EINVAL;
+
+	return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
+				 nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
+				 nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
+}
+
+static int ip_vs_genl_del_daemon(struct nlattr **attrs)
+{
+	if (!attrs[IPVS_DAEMON_ATTR_STATE])
+		return -EINVAL;
+
+	return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+}
+
+static int ip_vs_genl_set_config(struct nlattr **attrs)
+{
+	struct ip_vs_timeout_user t;
+
+	__ip_vs_get_timeouts(&t);
+
+	if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
+		t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
+
+	if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN])
+		t.tcp_fin_timeout =
+			nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]);
+
+	if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
+		t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
+
+	return ip_vs_set_timeout(&t);
+}
+
+static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+	struct ip_vs_service *svc = NULL;
+	struct ip_vs_service_user_kern usvc;
+	struct ip_vs_dest_user_kern udest;
+	int ret = 0, cmd;
+	int need_full_svc = 0, need_full_dest = 0;
+
+	cmd = info->genlhdr->cmd;
+
+	mutex_lock(&__ip_vs_mutex);
+
+	if (cmd == IPVS_CMD_FLUSH) {
+		ret = ip_vs_flush();
+		goto out;
+	} else if (cmd == IPVS_CMD_SET_CONFIG) {
+		ret = ip_vs_genl_set_config(info->attrs);
+		goto out;
+	} else if (cmd == IPVS_CMD_NEW_DAEMON ||
+		   cmd == IPVS_CMD_DEL_DAEMON) {
+
+		struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
+
+		if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
+		    nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
+				     info->attrs[IPVS_CMD_ATTR_DAEMON],
+				     ip_vs_daemon_policy)) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if (cmd == IPVS_CMD_NEW_DAEMON)
+			ret = ip_vs_genl_new_daemon(daemon_attrs);
+		else
+			ret = ip_vs_genl_del_daemon(daemon_attrs);
+		goto out;
+	} else if (cmd == IPVS_CMD_ZERO &&
+		   !info->attrs[IPVS_CMD_ATTR_SERVICE]) {
+		ret = ip_vs_zero_all();
+		goto out;
+	}
+
+	/* All following commands require a service argument, so check if we
+	 * received a valid one. We need a full service specification when
+	 * adding / editing a service. Only identifying members otherwise. */
+	if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
+		need_full_svc = 1;
+
+	ret = ip_vs_genl_parse_service(&usvc,
+				       info->attrs[IPVS_CMD_ATTR_SERVICE],
+				       need_full_svc);
+	if (ret)
+		goto out;
+
+	/* Lookup the exact service by <protocol, addr, port> or fwmark */
+	if (usvc.fwmark == 0)
+		svc = __ip_vs_service_get(usvc.af, usvc.protocol,
+					  &usvc.addr, usvc.port);
+	else
+		svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark);
+
+	/* Unless we're adding a new service, the service must already exist */
+	if ((cmd != IPVS_CMD_NEW_SERVICE) && (svc == NULL)) {
+		ret = -ESRCH;
+		goto out;
+	}
+
+	/* Destination commands require a valid destination argument. For
+	 * adding / editing a destination, we need a full destination
+	 * specification. */
+	if (cmd == IPVS_CMD_NEW_DEST || cmd == IPVS_CMD_SET_DEST ||
+	    cmd == IPVS_CMD_DEL_DEST) {
+		if (cmd != IPVS_CMD_DEL_DEST)
+			need_full_dest = 1;
+
+		ret = ip_vs_genl_parse_dest(&udest,
+					    info->attrs[IPVS_CMD_ATTR_DEST],
+					    need_full_dest);
+		if (ret)
+			goto out;
+	}
+
+	switch (cmd) {
+	case IPVS_CMD_NEW_SERVICE:
+		if (svc == NULL)
+			ret = ip_vs_add_service(&usvc, &svc);
+		else
+			ret = -EEXIST;
+		break;
+	case IPVS_CMD_SET_SERVICE:
+		ret = ip_vs_edit_service(svc, &usvc);
+		break;
+	case IPVS_CMD_DEL_SERVICE:
+		ret = ip_vs_del_service(svc);
+		break;
+	case IPVS_CMD_NEW_DEST:
+		ret = ip_vs_add_dest(svc, &udest);
+		break;
+	case IPVS_CMD_SET_DEST:
+		ret = ip_vs_edit_dest(svc, &udest);
+		break;
+	case IPVS_CMD_DEL_DEST:
+		ret = ip_vs_del_dest(svc, &udest);
+		break;
+	case IPVS_CMD_ZERO:
+		ret = ip_vs_zero_service(svc);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+out:
+	if (svc)
+		ip_vs_service_put(svc);
+	mutex_unlock(&__ip_vs_mutex);
+
+	return ret;
+}
+
+static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+	struct sk_buff *msg;
+	void *reply;
+	int ret, cmd, reply_cmd;
+
+	cmd = info->genlhdr->cmd;
+
+	if (cmd == IPVS_CMD_GET_SERVICE)
+		reply_cmd = IPVS_CMD_NEW_SERVICE;
+	else if (cmd == IPVS_CMD_GET_INFO)
+		reply_cmd = IPVS_CMD_SET_INFO;
+	else if (cmd == IPVS_CMD_GET_CONFIG)
+		reply_cmd = IPVS_CMD_SET_CONFIG;
+	else {
+		IP_VS_ERR("unknown Generic Netlink command\n");
+		return -EINVAL;
+	}
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	mutex_lock(&__ip_vs_mutex);
+
+	reply = genlmsg_put_reply(msg, info, &ip_vs_genl_family, 0, reply_cmd);
+	if (reply == NULL)
+		goto nla_put_failure;
+
+	switch (cmd) {
+	case IPVS_CMD_GET_SERVICE:
+	{
+		struct ip_vs_service *svc;
+
+		svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]);
+		if (IS_ERR(svc)) {
+			ret = PTR_ERR(svc);
+			goto out_err;
+		} else if (svc) {
+			ret = ip_vs_genl_fill_service(msg, svc);
+			ip_vs_service_put(svc);
+			if (ret)
+				goto nla_put_failure;
+		} else {
+			ret = -ESRCH;
+			goto out_err;
+		}
+
+		break;
+	}
+
+	case IPVS_CMD_GET_CONFIG:
+	{
+		struct ip_vs_timeout_user t;
+
+		__ip_vs_get_timeouts(&t);
+#ifdef CONFIG_IP_VS_PROTO_TCP
+		NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
+		NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
+			    t.tcp_fin_timeout);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_UDP
+		NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout);
+#endif
+
+		break;
+	}
+
+	case IPVS_CMD_GET_INFO:
+		NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
+		NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
+			    IP_VS_CONN_TAB_SIZE);
+		break;
+	}
+
+	genlmsg_end(msg, reply);
+	ret = genlmsg_unicast(msg, info->snd_pid);
+	goto out;
+
+nla_put_failure:
+	IP_VS_ERR("not enough space in Netlink message\n");
+	ret = -EMSGSIZE;
+
+out_err:
+	nlmsg_free(msg);
+out:
+	mutex_unlock(&__ip_vs_mutex);
+
+	return ret;
+}
+
+
+static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
+	{
+		.cmd	= IPVS_CMD_NEW_SERVICE,
+		.flags	= GENL_ADMIN_PERM,
+		.policy	= ip_vs_cmd_policy,
+		.doit	= ip_vs_genl_set_cmd,
+	},
+	{
+		.cmd	= IPVS_CMD_SET_SERVICE,
+		.flags	= GENL_ADMIN_PERM,
+		.policy	= ip_vs_cmd_policy,
+		.doit	= ip_vs_genl_set_cmd,
+	},
+	{
+		.cmd	= IPVS_CMD_DEL_SERVICE,
+		.flags	= GENL_ADMIN_PERM,
+		.policy	= ip_vs_cmd_policy,
+		.doit	= ip_vs_genl_set_cmd,
+	},
+	{
+		.cmd	= IPVS_CMD_GET_SERVICE,
+		.flags	= GENL_ADMIN_PERM,
+		.doit	= ip_vs_genl_get_cmd,
+		.dumpit	= ip_vs_genl_dump_services,
+		.policy	= ip_vs_cmd_policy,
+	},
+	{
+		.cmd	= IPVS_CMD_NEW_DEST,
+		.flags	= GENL_ADMIN_PERM,
+		.policy	= ip_vs_cmd_policy,
+		.doit	= ip_vs_genl_set_cmd,
+	},
+	{
+		.cmd	= IPVS_CMD_SET_DEST,
+		.flags	= GENL_ADMIN_PERM,
+		.policy	= ip_vs_cmd_policy,
+		.doit	= ip_vs_genl_set_cmd,
+	},
+	{
+		.cmd	= IPVS_CMD_DEL_DEST,
+		.flags	= GENL_ADMIN_PERM,
+		.policy	= ip_vs_cmd_policy,
+		.doit	= ip_vs_genl_set_cmd,
+	},
+	{
+		.cmd	= IPVS_CMD_GET_DEST,
+		.flags	= GENL_ADMIN_PERM,
+		.policy	= ip_vs_cmd_policy,
+		.dumpit	= ip_vs_genl_dump_dests,
+	},
+	{
+		.cmd	= IPVS_CMD_NEW_DAEMON,
+		.flags	= GENL_ADMIN_PERM,
+		.policy	= ip_vs_cmd_policy,
+		.doit	= ip_vs_genl_set_cmd,
+	},
+	{
+		.cmd	= IPVS_CMD_DEL_DAEMON,
+		.flags	= GENL_ADMIN_PERM,
+		.policy	= ip_vs_cmd_policy,
+		.doit	= ip_vs_genl_set_cmd,
+	},
+	{
+		.cmd	= IPVS_CMD_GET_DAEMON,
+		.flags	= GENL_ADMIN_PERM,
+		.dumpit	= ip_vs_genl_dump_daemons,
+	},
+	{
+		.cmd	= IPVS_CMD_SET_CONFIG,
+		.flags	= GENL_ADMIN_PERM,
+		.policy	= ip_vs_cmd_policy,
+		.doit	= ip_vs_genl_set_cmd,
+	},
+	{
+		.cmd	= IPVS_CMD_GET_CONFIG,
+		.flags	= GENL_ADMIN_PERM,
+		.doit	= ip_vs_genl_get_cmd,
+	},
+	{
+		.cmd	= IPVS_CMD_GET_INFO,
+		.flags	= GENL_ADMIN_PERM,
+		.doit	= ip_vs_genl_get_cmd,
+	},
+	{
+		.cmd	= IPVS_CMD_ZERO,
+		.flags	= GENL_ADMIN_PERM,
+		.policy	= ip_vs_cmd_policy,
+		.doit	= ip_vs_genl_set_cmd,
+	},
+	{
+		.cmd	= IPVS_CMD_FLUSH,
+		.flags	= GENL_ADMIN_PERM,
+		.doit	= ip_vs_genl_set_cmd,
+	},
+};
+
+static int __init ip_vs_genl_register(void)
+{
+	int ret, i;
+
+	ret = genl_register_family(&ip_vs_genl_family);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(ip_vs_genl_ops); i++) {
+		ret = genl_register_ops(&ip_vs_genl_family, &ip_vs_genl_ops[i]);
+		if (ret)
+			goto err_out;
+	}
+	return 0;
+
+err_out:
+	genl_unregister_family(&ip_vs_genl_family);
+	return ret;
+}
+
+static void ip_vs_genl_unregister(void)
+{
+	genl_unregister_family(&ip_vs_genl_family);
+}
+
+/* End of Generic Netlink interface definitions */
+
 
 int __init ip_vs_control_init(void)
 {
@@ -2334,6 +3394,13 @@
 		return ret;
 	}
 
+	ret = ip_vs_genl_register();
+	if (ret) {
+		IP_VS_ERR("cannot register Generic Netlink interface.\n");
+		nf_unregister_sockopt(&ip_vs_sockopts);
+		return ret;
+	}
+
 	proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops);
 	proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops);
 
@@ -2368,6 +3435,7 @@
 	unregister_sysctl_table(sysctl_header);
 	proc_net_remove(&init_net, "ip_vs_stats");
 	proc_net_remove(&init_net, "ip_vs");
+	ip_vs_genl_unregister();
 	nf_unregister_sockopt(&ip_vs_sockopts);
 	LeaveFunction(2);
 }
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c
index fa66824..a16943f 100644
--- a/net/ipv4/ipvs/ip_vs_dh.c
+++ b/net/ipv4/ipvs/ip_vs_dh.c
@@ -218,7 +218,7 @@
 	IP_VS_DBG(6, "DH: destination IP address %u.%u.%u.%u "
 		  "--> server %u.%u.%u.%u:%d\n",
 		  NIPQUAD(iph->daddr),
-		  NIPQUAD(dest->addr),
+		  NIPQUAD(dest->addr.ip),
 		  ntohs(dest->port));
 
 	return dest;
@@ -234,6 +234,9 @@
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
 	.n_list =		LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
+#ifdef CONFIG_IP_VS_IPV6
+	.supports_ipv6 =	0,
+#endif
 	.init_service =		ip_vs_dh_init_svc,
 	.done_service =		ip_vs_dh_done_svc,
 	.update_service =	ip_vs_dh_update_svc,
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index 5a20f93..2eb2860 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -65,37 +65,37 @@
 		s = container_of(e, struct ip_vs_stats, est);
 
 		spin_lock(&s->lock);
-		n_conns = s->conns;
-		n_inpkts = s->inpkts;
-		n_outpkts = s->outpkts;
-		n_inbytes = s->inbytes;
-		n_outbytes = s->outbytes;
+		n_conns = s->ustats.conns;
+		n_inpkts = s->ustats.inpkts;
+		n_outpkts = s->ustats.outpkts;
+		n_inbytes = s->ustats.inbytes;
+		n_outbytes = s->ustats.outbytes;
 
 		/* scaled by 2^10, but divided 2 seconds */
 		rate = (n_conns - e->last_conns)<<9;
 		e->last_conns = n_conns;
 		e->cps += ((long)rate - (long)e->cps)>>2;
-		s->cps = (e->cps+0x1FF)>>10;
+		s->ustats.cps = (e->cps+0x1FF)>>10;
 
 		rate = (n_inpkts - e->last_inpkts)<<9;
 		e->last_inpkts = n_inpkts;
 		e->inpps += ((long)rate - (long)e->inpps)>>2;
-		s->inpps = (e->inpps+0x1FF)>>10;
+		s->ustats.inpps = (e->inpps+0x1FF)>>10;
 
 		rate = (n_outpkts - e->last_outpkts)<<9;
 		e->last_outpkts = n_outpkts;
 		e->outpps += ((long)rate - (long)e->outpps)>>2;
-		s->outpps = (e->outpps+0x1FF)>>10;
+		s->ustats.outpps = (e->outpps+0x1FF)>>10;
 
 		rate = (n_inbytes - e->last_inbytes)<<4;
 		e->last_inbytes = n_inbytes;
 		e->inbps += ((long)rate - (long)e->inbps)>>2;
-		s->inbps = (e->inbps+0xF)>>5;
+		s->ustats.inbps = (e->inbps+0xF)>>5;
 
 		rate = (n_outbytes - e->last_outbytes)<<4;
 		e->last_outbytes = n_outbytes;
 		e->outbps += ((long)rate - (long)e->outbps)>>2;
-		s->outbps = (e->outbps+0xF)>>5;
+		s->ustats.outbps = (e->outbps+0xF)>>5;
 		spin_unlock(&s->lock);
 	}
 	spin_unlock(&est_lock);
@@ -108,24 +108,22 @@
 
 	INIT_LIST_HEAD(&est->list);
 
-	est->last_conns = stats->conns;
-	est->cps = stats->cps<<10;
+	est->last_conns = stats->ustats.conns;
+	est->cps = stats->ustats.cps<<10;
 
-	est->last_inpkts = stats->inpkts;
-	est->inpps = stats->inpps<<10;
+	est->last_inpkts = stats->ustats.inpkts;
+	est->inpps = stats->ustats.inpps<<10;
 
-	est->last_outpkts = stats->outpkts;
-	est->outpps = stats->outpps<<10;
+	est->last_outpkts = stats->ustats.outpkts;
+	est->outpps = stats->ustats.outpps<<10;
 
-	est->last_inbytes = stats->inbytes;
-	est->inbps = stats->inbps<<5;
+	est->last_inbytes = stats->ustats.inbytes;
+	est->inbps = stats->ustats.inbps<<5;
 
-	est->last_outbytes = stats->outbytes;
-	est->outbps = stats->outbps<<5;
+	est->last_outbytes = stats->ustats.outbytes;
+	est->outbps = stats->ustats.outbps<<5;
 
 	spin_lock_bh(&est_lock);
-	if (list_empty(&est_list))
-		mod_timer(&est_timer, jiffies + 2 * HZ);
 	list_add(&est->list, &est_list);
 	spin_unlock_bh(&est_lock);
 }
@@ -136,11 +134,6 @@
 
 	spin_lock_bh(&est_lock);
 	list_del(&est->list);
-	while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
-		spin_unlock_bh(&est_lock);
-		cpu_relax();
-		spin_lock_bh(&est_lock);
-	}
 	spin_unlock_bh(&est_lock);
 }
 
@@ -160,3 +153,14 @@
 	est->inbps = 0;
 	est->outbps = 0;
 }
+
+int __init ip_vs_estimator_init(void)
+{
+	mod_timer(&est_timer, jiffies + 2 * HZ);
+	return 0;
+}
+
+void ip_vs_estimator_cleanup(void)
+{
+	del_timer_sync(&est_timer);
+}
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c
index c1c758e..2e7dbd8 100644
--- a/net/ipv4/ipvs/ip_vs_ftp.c
+++ b/net/ipv4/ipvs/ip_vs_ftp.c
@@ -140,13 +140,21 @@
 	struct tcphdr *th;
 	char *data, *data_limit;
 	char *start, *end;
-	__be32 from;
+	union nf_inet_addr from;
 	__be16 port;
 	struct ip_vs_conn *n_cp;
 	char buf[24];		/* xxx.xxx.xxx.xxx,ppp,ppp\000 */
 	unsigned buf_len;
 	int ret;
 
+#ifdef CONFIG_IP_VS_IPV6
+	/* This application helper doesn't work with IPv6 yet,
+	 * so turn this into a no-op for IPv6 packets
+	 */
+	if (cp->af == AF_INET6)
+		return 1;
+#endif
+
 	*diff = 0;
 
 	/* Only useful for established sessions */
@@ -166,24 +174,25 @@
 		if (ip_vs_ftp_get_addrport(data, data_limit,
 					   SERVER_STRING,
 					   sizeof(SERVER_STRING)-1, ')',
-					   &from, &port,
+					   &from.ip, &port,
 					   &start, &end) != 1)
 			return 1;
 
 		IP_VS_DBG(7, "PASV response (%u.%u.%u.%u:%d) -> "
 			  "%u.%u.%u.%u:%d detected\n",
-			  NIPQUAD(from), ntohs(port), NIPQUAD(cp->caddr), 0);
+			  NIPQUAD(from.ip), ntohs(port),
+			  NIPQUAD(cp->caddr.ip), 0);
 
 		/*
 		 * Now update or create an connection entry for it
 		 */
-		n_cp = ip_vs_conn_out_get(iph->protocol, from, port,
-					  cp->caddr, 0);
+		n_cp = ip_vs_conn_out_get(AF_INET, iph->protocol, &from, port,
+					  &cp->caddr, 0);
 		if (!n_cp) {
-			n_cp = ip_vs_conn_new(IPPROTO_TCP,
-					      cp->caddr, 0,
-					      cp->vaddr, port,
-					      from, port,
+			n_cp = ip_vs_conn_new(AF_INET, IPPROTO_TCP,
+					      &cp->caddr, 0,
+					      &cp->vaddr, port,
+					      &from, port,
 					      IP_VS_CONN_F_NO_CPORT,
 					      cp->dest);
 			if (!n_cp)
@@ -196,9 +205,9 @@
 		/*
 		 * Replace the old passive address with the new one
 		 */
-		from = n_cp->vaddr;
+		from.ip = n_cp->vaddr.ip;
 		port = n_cp->vport;
-		sprintf(buf,"%d,%d,%d,%d,%d,%d", NIPQUAD(from),
+		sprintf(buf, "%d,%d,%d,%d,%d,%d", NIPQUAD(from.ip),
 			(ntohs(port)>>8)&255, ntohs(port)&255);
 		buf_len = strlen(buf);
 
@@ -243,10 +252,18 @@
 	struct tcphdr *th;
 	char *data, *data_start, *data_limit;
 	char *start, *end;
-	__be32 to;
+	union nf_inet_addr to;
 	__be16 port;
 	struct ip_vs_conn *n_cp;
 
+#ifdef CONFIG_IP_VS_IPV6
+	/* This application helper doesn't work with IPv6 yet,
+	 * so turn this into a no-op for IPv6 packets
+	 */
+	if (cp->af == AF_INET6)
+		return 1;
+#endif
+
 	/* no diff required for incoming packets */
 	*diff = 0;
 
@@ -291,12 +308,12 @@
 	 */
 	if (ip_vs_ftp_get_addrport(data_start, data_limit,
 				   CLIENT_STRING, sizeof(CLIENT_STRING)-1,
-				   '\r', &to, &port,
+				   '\r', &to.ip, &port,
 				   &start, &end) != 1)
 		return 1;
 
 	IP_VS_DBG(7, "PORT %u.%u.%u.%u:%d detected\n",
-		  NIPQUAD(to), ntohs(port));
+		  NIPQUAD(to.ip), ntohs(port));
 
 	/* Passive mode off */
 	cp->app_data = NULL;
@@ -306,16 +323,16 @@
 	 */
 	IP_VS_DBG(7, "protocol %s %u.%u.%u.%u:%d %u.%u.%u.%u:%d\n",
 		  ip_vs_proto_name(iph->protocol),
-		  NIPQUAD(to), ntohs(port), NIPQUAD(cp->vaddr), 0);
+		  NIPQUAD(to.ip), ntohs(port), NIPQUAD(cp->vaddr.ip), 0);
 
-	n_cp = ip_vs_conn_in_get(iph->protocol,
-				 to, port,
-				 cp->vaddr, htons(ntohs(cp->vport)-1));
+	n_cp = ip_vs_conn_in_get(AF_INET, iph->protocol,
+				 &to, port,
+				 &cp->vaddr, htons(ntohs(cp->vport)-1));
 	if (!n_cp) {
-		n_cp = ip_vs_conn_new(IPPROTO_TCP,
-				      to, port,
-				      cp->vaddr, htons(ntohs(cp->vport)-1),
-				      cp->daddr, htons(ntohs(cp->dport)-1),
+		n_cp = ip_vs_conn_new(AF_INET, IPPROTO_TCP,
+				      &to, port,
+				      &cp->vaddr, htons(ntohs(cp->vport)-1),
+				      &cp->daddr, htons(ntohs(cp->dport)-1),
 				      0,
 				      cp->dest);
 		if (!n_cp)
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 7a6a319..6ecef35 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -96,7 +96,6 @@
  *      IPVS lblc hash table
  */
 struct ip_vs_lblc_table {
-	rwlock_t	        lock;           /* lock for this table */
 	struct list_head        bucket[IP_VS_LBLC_TAB_SIZE];  /* hash bucket */
 	atomic_t                entries;        /* number of entries */
 	int                     max_size;       /* maximum size of entries */
@@ -123,31 +122,6 @@
 
 static struct ctl_table_header * sysctl_header;
 
-/*
- *      new/free a ip_vs_lblc_entry, which is a mapping of a destionation
- *      IP address to a server.
- */
-static inline struct ip_vs_lblc_entry *
-ip_vs_lblc_new(__be32 daddr, struct ip_vs_dest *dest)
-{
-	struct ip_vs_lblc_entry *en;
-
-	en = kmalloc(sizeof(struct ip_vs_lblc_entry), GFP_ATOMIC);
-	if (en == NULL) {
-		IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
-		return NULL;
-	}
-
-	INIT_LIST_HEAD(&en->list);
-	en->addr = daddr;
-
-	atomic_inc(&dest->refcnt);
-	en->dest = dest;
-
-	return en;
-}
-
-
 static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
 {
 	list_del(&en->list);
@@ -173,87 +147,97 @@
  *	Hash an entry in the ip_vs_lblc_table.
  *	returns bool success.
  */
-static int
+static void
 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
 {
-	unsigned hash;
+	unsigned hash = ip_vs_lblc_hashkey(en->addr);
 
-	if (!list_empty(&en->list)) {
-		IP_VS_ERR("ip_vs_lblc_hash(): request for already hashed, "
-			  "called from %p\n", __builtin_return_address(0));
-		return 0;
-	}
-
-	/*
-	 *	Hash by destination IP address
-	 */
-	hash = ip_vs_lblc_hashkey(en->addr);
-
-	write_lock(&tbl->lock);
 	list_add(&en->list, &tbl->bucket[hash]);
 	atomic_inc(&tbl->entries);
-	write_unlock(&tbl->lock);
-
-	return 1;
 }
 
 
 /*
- *  Get ip_vs_lblc_entry associated with supplied parameters.
+ *  Get ip_vs_lblc_entry associated with supplied parameters. Called under read
+ *  lock
  */
 static inline struct ip_vs_lblc_entry *
 ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
 {
-	unsigned hash;
+	unsigned hash = ip_vs_lblc_hashkey(addr);
 	struct ip_vs_lblc_entry *en;
 
-	hash = ip_vs_lblc_hashkey(addr);
-
-	read_lock(&tbl->lock);
-
-	list_for_each_entry(en, &tbl->bucket[hash], list) {
-		if (en->addr == addr) {
-			/* HIT */
-			read_unlock(&tbl->lock);
+	list_for_each_entry(en, &tbl->bucket[hash], list)
+		if (en->addr == addr)
 			return en;
-		}
-	}
-
-	read_unlock(&tbl->lock);
 
 	return NULL;
 }
 
 
 /*
+ * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
+ * address to a server. Called under write lock.
+ */
+static inline struct ip_vs_lblc_entry *
+ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, __be32 daddr,
+	       struct ip_vs_dest *dest)
+{
+	struct ip_vs_lblc_entry *en;
+
+	en = ip_vs_lblc_get(tbl, daddr);
+	if (!en) {
+		en = kmalloc(sizeof(*en), GFP_ATOMIC);
+		if (!en) {
+			IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
+			return NULL;
+		}
+
+		en->addr = daddr;
+		en->lastuse = jiffies;
+
+		atomic_inc(&dest->refcnt);
+		en->dest = dest;
+
+		ip_vs_lblc_hash(tbl, en);
+	} else if (en->dest != dest) {
+		atomic_dec(&en->dest->refcnt);
+		atomic_inc(&dest->refcnt);
+		en->dest = dest;
+	}
+
+	return en;
+}
+
+
+/*
  *      Flush all the entries of the specified table.
  */
 static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
 {
-	int i;
 	struct ip_vs_lblc_entry *en, *nxt;
+	int i;
 
 	for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
-		write_lock(&tbl->lock);
 		list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
 			ip_vs_lblc_free(en);
 			atomic_dec(&tbl->entries);
 		}
-		write_unlock(&tbl->lock);
 	}
 }
 
 
-static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
+static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
 {
+	struct ip_vs_lblc_table *tbl = svc->sched_data;
+	struct ip_vs_lblc_entry *en, *nxt;
 	unsigned long now = jiffies;
 	int i, j;
-	struct ip_vs_lblc_entry *en, *nxt;
 
 	for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
 		j = (j + 1) & IP_VS_LBLC_TAB_MASK;
 
-		write_lock(&tbl->lock);
+		write_lock(&svc->sched_lock);
 		list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
 			if (time_before(now,
 					en->lastuse + sysctl_ip_vs_lblc_expiration))
@@ -262,7 +246,7 @@
 			ip_vs_lblc_free(en);
 			atomic_dec(&tbl->entries);
 		}
-		write_unlock(&tbl->lock);
+		write_unlock(&svc->sched_lock);
 	}
 	tbl->rover = j;
 }
@@ -281,17 +265,16 @@
  */
 static void ip_vs_lblc_check_expire(unsigned long data)
 {
-	struct ip_vs_lblc_table *tbl;
+	struct ip_vs_service *svc = (struct ip_vs_service *) data;
+	struct ip_vs_lblc_table *tbl = svc->sched_data;
 	unsigned long now = jiffies;
 	int goal;
 	int i, j;
 	struct ip_vs_lblc_entry *en, *nxt;
 
-	tbl = (struct ip_vs_lblc_table *)data;
-
 	if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
 		/* do full expiration check */
-		ip_vs_lblc_full_check(tbl);
+		ip_vs_lblc_full_check(svc);
 		tbl->counter = 1;
 		goto out;
 	}
@@ -308,7 +291,7 @@
 	for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
 		j = (j + 1) & IP_VS_LBLC_TAB_MASK;
 
-		write_lock(&tbl->lock);
+		write_lock(&svc->sched_lock);
 		list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
 			if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
 				continue;
@@ -317,7 +300,7 @@
 			atomic_dec(&tbl->entries);
 			goal--;
 		}
-		write_unlock(&tbl->lock);
+		write_unlock(&svc->sched_lock);
 		if (goal <= 0)
 			break;
 	}
@@ -336,15 +319,14 @@
 	/*
 	 *    Allocate the ip_vs_lblc_table for this service
 	 */
-	tbl = kmalloc(sizeof(struct ip_vs_lblc_table), GFP_ATOMIC);
+	tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
 	if (tbl == NULL) {
 		IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n");
 		return -ENOMEM;
 	}
 	svc->sched_data = tbl;
 	IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
-		  "current service\n",
-		  sizeof(struct ip_vs_lblc_table));
+		  "current service\n", sizeof(*tbl));
 
 	/*
 	 *    Initialize the hash buckets
@@ -352,7 +334,6 @@
 	for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
 		INIT_LIST_HEAD(&tbl->bucket[i]);
 	}
-	rwlock_init(&tbl->lock);
 	tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
 	tbl->rover = 0;
 	tbl->counter = 1;
@@ -361,9 +342,8 @@
 	 *    Hook periodic timer for garbage collection
 	 */
 	setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire,
-			(unsigned long)tbl);
-	tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL;
-	add_timer(&tbl->periodic_timer);
+			(unsigned long)svc);
+	mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
 
 	return 0;
 }
@@ -380,22 +360,16 @@
 	ip_vs_lblc_flush(tbl);
 
 	/* release the table itself */
-	kfree(svc->sched_data);
+	kfree(tbl);
 	IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
-		  sizeof(struct ip_vs_lblc_table));
+		  sizeof(*tbl));
 
 	return 0;
 }
 
 
-static int ip_vs_lblc_update_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
 static inline struct ip_vs_dest *
-__ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
+__ip_vs_lblc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
 {
 	struct ip_vs_dest *dest, *least;
 	int loh, doh;
@@ -448,7 +422,7 @@
 
 	IP_VS_DBG(6, "LBLC: server %d.%d.%d.%d:%d "
 		  "activeconns %d refcnt %d weight %d overhead %d\n",
-		  NIPQUAD(least->addr), ntohs(least->port),
+		  NIPQUAD(least->addr.ip), ntohs(least->port),
 		  atomic_read(&least->activeconns),
 		  atomic_read(&least->refcnt),
 		  atomic_read(&least->weight), loh);
@@ -484,47 +458,55 @@
 static struct ip_vs_dest *
 ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 {
-	struct ip_vs_dest *dest;
-	struct ip_vs_lblc_table *tbl;
-	struct ip_vs_lblc_entry *en;
+	struct ip_vs_lblc_table *tbl = svc->sched_data;
 	struct iphdr *iph = ip_hdr(skb);
+	struct ip_vs_dest *dest = NULL;
+	struct ip_vs_lblc_entry *en;
 
 	IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n");
 
-	tbl = (struct ip_vs_lblc_table *)svc->sched_data;
+	/* First look in our cache */
+	read_lock(&svc->sched_lock);
 	en = ip_vs_lblc_get(tbl, iph->daddr);
-	if (en == NULL) {
-		dest = __ip_vs_wlc_schedule(svc, iph);
-		if (dest == NULL) {
-			IP_VS_DBG(1, "no destination available\n");
-			return NULL;
-		}
-		en = ip_vs_lblc_new(iph->daddr, dest);
-		if (en == NULL) {
-			return NULL;
-		}
-		ip_vs_lblc_hash(tbl, en);
-	} else {
-		dest = en->dest;
-		if (!(dest->flags & IP_VS_DEST_F_AVAILABLE)
-		    || atomic_read(&dest->weight) <= 0
-		    || is_overloaded(dest, svc)) {
-			dest = __ip_vs_wlc_schedule(svc, iph);
-			if (dest == NULL) {
-				IP_VS_DBG(1, "no destination available\n");
-				return NULL;
-			}
-			atomic_dec(&en->dest->refcnt);
-			atomic_inc(&dest->refcnt);
-			en->dest = dest;
-		}
-	}
-	en->lastuse = jiffies;
+	if (en) {
+		/* We only hold a read lock, but this is atomic */
+		en->lastuse = jiffies;
 
+		/*
+		 * If the destination is not available, i.e. it's in the trash,
+		 * we must ignore it, as it may be removed from under our feet,
+		 * if someone drops our reference count. Our caller only makes
+		 * sure that destinations, that are not in the trash, are not
+		 * moved to the trash, while we are scheduling. But anyone can
+		 * free up entries from the trash at any time.
+		 */
+
+		if (en->dest->flags & IP_VS_DEST_F_AVAILABLE)
+			dest = en->dest;
+	}
+	read_unlock(&svc->sched_lock);
+
+	/* If the destination has a weight and is not overloaded, use it */
+	if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
+		goto out;
+
+	/* No cache entry or it is invalid, time to schedule */
+	dest = __ip_vs_lblc_schedule(svc, iph);
+	if (!dest) {
+		IP_VS_DBG(1, "no destination available\n");
+		return NULL;
+	}
+
+	/* If we fail to create a cache entry, we'll just use the valid dest */
+	write_lock(&svc->sched_lock);
+	ip_vs_lblc_new(tbl, iph->daddr, dest);
+	write_unlock(&svc->sched_lock);
+
+out:
 	IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u "
 		  "--> server %u.%u.%u.%u:%d\n",
-		  NIPQUAD(en->addr),
-		  NIPQUAD(dest->addr),
+		  NIPQUAD(iph->daddr),
+		  NIPQUAD(dest->addr.ip),
 		  ntohs(dest->port));
 
 	return dest;
@@ -540,9 +522,11 @@
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
 	.n_list =		LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
+#ifdef CONFIG_IP_VS_IPV6
+	.supports_ipv6 =	0,
+#endif
 	.init_service =		ip_vs_lblc_init_svc,
 	.done_service =		ip_vs_lblc_done_svc,
-	.update_service =	ip_vs_lblc_update_svc,
 	.schedule =		ip_vs_lblc_schedule,
 };
 
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index c234e73..1f75ea8 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -106,7 +106,7 @@
 			return NULL;
 	}
 
-	e = kmalloc(sizeof(struct ip_vs_dest_list), GFP_ATOMIC);
+	e = kmalloc(sizeof(*e), GFP_ATOMIC);
 	if (e == NULL) {
 		IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
 		return NULL;
@@ -116,11 +116,9 @@
 	e->dest = dest;
 
 	/* link it to the list */
-	write_lock(&set->lock);
 	e->next = set->list;
 	set->list = e;
 	atomic_inc(&set->size);
-	write_unlock(&set->lock);
 
 	set->lastmod = jiffies;
 	return e;
@@ -131,7 +129,6 @@
 {
 	struct ip_vs_dest_list *e, **ep;
 
-	write_lock(&set->lock);
 	for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
 		if (e->dest == dest) {
 			/* HIT */
@@ -144,7 +141,6 @@
 		}
 		ep = &e->next;
 	}
-	write_unlock(&set->lock);
 }
 
 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
@@ -174,7 +170,6 @@
 	if (set == NULL)
 		return NULL;
 
-	read_lock(&set->lock);
 	/* select the first destination server, whose weight > 0 */
 	for (e=set->list; e!=NULL; e=e->next) {
 		least = e->dest;
@@ -188,7 +183,6 @@
 			goto nextstage;
 		}
 	}
-	read_unlock(&set->lock);
 	return NULL;
 
 	/* find the destination with the weighted least load */
@@ -207,11 +201,10 @@
 			loh = doh;
 		}
 	}
-	read_unlock(&set->lock);
 
 	IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d "
 		  "activeconns %d refcnt %d weight %d overhead %d\n",
-		  NIPQUAD(least->addr), ntohs(least->port),
+		  NIPQUAD(least->addr.ip), ntohs(least->port),
 		  atomic_read(&least->activeconns),
 		  atomic_read(&least->refcnt),
 		  atomic_read(&least->weight), loh);
@@ -229,7 +222,6 @@
 	if (set == NULL)
 		return NULL;
 
-	read_lock(&set->lock);
 	/* select the first destination server, whose weight > 0 */
 	for (e=set->list; e!=NULL; e=e->next) {
 		most = e->dest;
@@ -239,7 +231,6 @@
 			goto nextstage;
 		}
 	}
-	read_unlock(&set->lock);
 	return NULL;
 
 	/* find the destination with the weighted most load */
@@ -256,11 +247,10 @@
 			moh = doh;
 		}
 	}
-	read_unlock(&set->lock);
 
 	IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d "
 		  "activeconns %d refcnt %d weight %d overhead %d\n",
-		  NIPQUAD(most->addr), ntohs(most->port),
+		  NIPQUAD(most->addr.ip), ntohs(most->port),
 		  atomic_read(&most->activeconns),
 		  atomic_read(&most->refcnt),
 		  atomic_read(&most->weight), moh);
@@ -284,7 +274,6 @@
  *      IPVS lblcr hash table
  */
 struct ip_vs_lblcr_table {
-	rwlock_t	        lock;           /* lock for this table */
 	struct list_head        bucket[IP_VS_LBLCR_TAB_SIZE];  /* hash bucket */
 	atomic_t                entries;        /* number of entries */
 	int                     max_size;       /* maximum size of entries */
@@ -311,32 +300,6 @@
 
 static struct ctl_table_header * sysctl_header;
 
-/*
- *      new/free a ip_vs_lblcr_entry, which is a mapping of a destination
- *      IP address to a server.
- */
-static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__be32 daddr)
-{
-	struct ip_vs_lblcr_entry *en;
-
-	en = kmalloc(sizeof(struct ip_vs_lblcr_entry), GFP_ATOMIC);
-	if (en == NULL) {
-		IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
-		return NULL;
-	}
-
-	INIT_LIST_HEAD(&en->list);
-	en->addr = daddr;
-
-	/* initilize its dest set */
-	atomic_set(&(en->set.size), 0);
-	en->set.list = NULL;
-	rwlock_init(&en->set.lock);
-
-	return en;
-}
-
-
 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
 {
 	list_del(&en->list);
@@ -358,59 +321,72 @@
  *	Hash an entry in the ip_vs_lblcr_table.
  *	returns bool success.
  */
-static int
+static void
 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
 {
-	unsigned hash;
+	unsigned hash = ip_vs_lblcr_hashkey(en->addr);
 
-	if (!list_empty(&en->list)) {
-		IP_VS_ERR("ip_vs_lblcr_hash(): request for already hashed, "
-			  "called from %p\n", __builtin_return_address(0));
-		return 0;
-	}
-
-	/*
-	 *	Hash by destination IP address
-	 */
-	hash = ip_vs_lblcr_hashkey(en->addr);
-
-	write_lock(&tbl->lock);
 	list_add(&en->list, &tbl->bucket[hash]);
 	atomic_inc(&tbl->entries);
-	write_unlock(&tbl->lock);
-
-	return 1;
 }
 
 
 /*
- *  Get ip_vs_lblcr_entry associated with supplied parameters.
+ *  Get ip_vs_lblcr_entry associated with supplied parameters. Called under
+ *  read lock.
  */
 static inline struct ip_vs_lblcr_entry *
 ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr)
 {
-	unsigned hash;
+	unsigned hash = ip_vs_lblcr_hashkey(addr);
 	struct ip_vs_lblcr_entry *en;
 
-	hash = ip_vs_lblcr_hashkey(addr);
-
-	read_lock(&tbl->lock);
-
-	list_for_each_entry(en, &tbl->bucket[hash], list) {
-		if (en->addr == addr) {
-			/* HIT */
-			read_unlock(&tbl->lock);
+	list_for_each_entry(en, &tbl->bucket[hash], list)
+		if (en->addr == addr)
 			return en;
-		}
-	}
-
-	read_unlock(&tbl->lock);
 
 	return NULL;
 }
 
 
 /*
+ * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
+ * IP address to a server. Called under write lock.
+ */
+static inline struct ip_vs_lblcr_entry *
+ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl,  __be32 daddr,
+		struct ip_vs_dest *dest)
+{
+	struct ip_vs_lblcr_entry *en;
+
+	en = ip_vs_lblcr_get(tbl, daddr);
+	if (!en) {
+		en = kmalloc(sizeof(*en), GFP_ATOMIC);
+		if (!en) {
+			IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
+			return NULL;
+		}
+
+		en->addr = daddr;
+		en->lastuse = jiffies;
+
+		/* initilize its dest set */
+		atomic_set(&(en->set.size), 0);
+		en->set.list = NULL;
+		rwlock_init(&en->set.lock);
+
+		ip_vs_lblcr_hash(tbl, en);
+	}
+
+	write_lock(&en->set.lock);
+	ip_vs_dest_set_insert(&en->set, dest);
+	write_unlock(&en->set.lock);
+
+	return en;
+}
+
+
+/*
  *      Flush all the entries of the specified table.
  */
 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
@@ -418,19 +394,18 @@
 	int i;
 	struct ip_vs_lblcr_entry *en, *nxt;
 
+	/* No locking required, only called during cleanup. */
 	for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
-		write_lock(&tbl->lock);
 		list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
 			ip_vs_lblcr_free(en);
-			atomic_dec(&tbl->entries);
 		}
-		write_unlock(&tbl->lock);
 	}
 }
 
 
-static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
+static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
 {
+	struct ip_vs_lblcr_table *tbl = svc->sched_data;
 	unsigned long now = jiffies;
 	int i, j;
 	struct ip_vs_lblcr_entry *en, *nxt;
@@ -438,7 +413,7 @@
 	for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
 		j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
 
-		write_lock(&tbl->lock);
+		write_lock(&svc->sched_lock);
 		list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
 			if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
 				       now))
@@ -447,7 +422,7 @@
 			ip_vs_lblcr_free(en);
 			atomic_dec(&tbl->entries);
 		}
-		write_unlock(&tbl->lock);
+		write_unlock(&svc->sched_lock);
 	}
 	tbl->rover = j;
 }
@@ -466,17 +441,16 @@
  */
 static void ip_vs_lblcr_check_expire(unsigned long data)
 {
-	struct ip_vs_lblcr_table *tbl;
+	struct ip_vs_service *svc = (struct ip_vs_service *) data;
+	struct ip_vs_lblcr_table *tbl = svc->sched_data;
 	unsigned long now = jiffies;
 	int goal;
 	int i, j;
 	struct ip_vs_lblcr_entry *en, *nxt;
 
-	tbl = (struct ip_vs_lblcr_table *)data;
-
 	if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
 		/* do full expiration check */
-		ip_vs_lblcr_full_check(tbl);
+		ip_vs_lblcr_full_check(svc);
 		tbl->counter = 1;
 		goto out;
 	}
@@ -493,7 +467,7 @@
 	for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
 		j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
 
-		write_lock(&tbl->lock);
+		write_lock(&svc->sched_lock);
 		list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
 			if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
 				continue;
@@ -502,7 +476,7 @@
 			atomic_dec(&tbl->entries);
 			goal--;
 		}
-		write_unlock(&tbl->lock);
+		write_unlock(&svc->sched_lock);
 		if (goal <= 0)
 			break;
 	}
@@ -520,15 +494,14 @@
 	/*
 	 *    Allocate the ip_vs_lblcr_table for this service
 	 */
-	tbl = kmalloc(sizeof(struct ip_vs_lblcr_table), GFP_ATOMIC);
+	tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
 	if (tbl == NULL) {
 		IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
 		return -ENOMEM;
 	}
 	svc->sched_data = tbl;
 	IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
-		  "current service\n",
-		  sizeof(struct ip_vs_lblcr_table));
+		  "current service\n", sizeof(*tbl));
 
 	/*
 	 *    Initialize the hash buckets
@@ -536,7 +509,6 @@
 	for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
 		INIT_LIST_HEAD(&tbl->bucket[i]);
 	}
-	rwlock_init(&tbl->lock);
 	tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
 	tbl->rover = 0;
 	tbl->counter = 1;
@@ -545,9 +517,8 @@
 	 *    Hook periodic timer for garbage collection
 	 */
 	setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire,
-			(unsigned long)tbl);
-	tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL;
-	add_timer(&tbl->periodic_timer);
+			(unsigned long)svc);
+	mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
 
 	return 0;
 }
@@ -564,22 +535,16 @@
 	ip_vs_lblcr_flush(tbl);
 
 	/* release the table itself */
-	kfree(svc->sched_data);
+	kfree(tbl);
 	IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
-		  sizeof(struct ip_vs_lblcr_table));
+		  sizeof(*tbl));
 
 	return 0;
 }
 
 
-static int ip_vs_lblcr_update_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
 static inline struct ip_vs_dest *
-__ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
+__ip_vs_lblcr_schedule(struct ip_vs_service *svc, struct iphdr *iph)
 {
 	struct ip_vs_dest *dest, *least;
 	int loh, doh;
@@ -633,7 +598,7 @@
 
 	IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d "
 		  "activeconns %d refcnt %d weight %d overhead %d\n",
-		  NIPQUAD(least->addr), ntohs(least->port),
+		  NIPQUAD(least->addr.ip), ntohs(least->port),
 		  atomic_read(&least->activeconns),
 		  atomic_read(&least->refcnt),
 		  atomic_read(&least->weight), loh);
@@ -669,51 +634,79 @@
 static struct ip_vs_dest *
 ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 {
-	struct ip_vs_dest *dest;
-	struct ip_vs_lblcr_table *tbl;
-	struct ip_vs_lblcr_entry *en;
+	struct ip_vs_lblcr_table *tbl = svc->sched_data;
 	struct iphdr *iph = ip_hdr(skb);
+	struct ip_vs_dest *dest = NULL;
+	struct ip_vs_lblcr_entry *en;
 
 	IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
 
-	tbl = (struct ip_vs_lblcr_table *)svc->sched_data;
+	/* First look in our cache */
+	read_lock(&svc->sched_lock);
 	en = ip_vs_lblcr_get(tbl, iph->daddr);
-	if (en == NULL) {
-		dest = __ip_vs_wlc_schedule(svc, iph);
-		if (dest == NULL) {
-			IP_VS_DBG(1, "no destination available\n");
-			return NULL;
-		}
-		en = ip_vs_lblcr_new(iph->daddr);
-		if (en == NULL) {
-			return NULL;
-		}
-		ip_vs_dest_set_insert(&en->set, dest);
-		ip_vs_lblcr_hash(tbl, en);
-	} else {
+	if (en) {
+		/* We only hold a read lock, but this is atomic */
+		en->lastuse = jiffies;
+
+		/* Get the least loaded destination */
+		read_lock(&en->set.lock);
 		dest = ip_vs_dest_set_min(&en->set);
-		if (!dest || is_overloaded(dest, svc)) {
-			dest = __ip_vs_wlc_schedule(svc, iph);
-			if (dest == NULL) {
-				IP_VS_DBG(1, "no destination available\n");
-				return NULL;
-			}
-			ip_vs_dest_set_insert(&en->set, dest);
-		}
+		read_unlock(&en->set.lock);
+
+		/* More than one destination + enough time passed by, cleanup */
 		if (atomic_read(&en->set.size) > 1 &&
-		    jiffies-en->set.lastmod > sysctl_ip_vs_lblcr_expiration) {
+				time_after(jiffies, en->set.lastmod +
+				sysctl_ip_vs_lblcr_expiration)) {
 			struct ip_vs_dest *m;
+
+			write_lock(&en->set.lock);
 			m = ip_vs_dest_set_max(&en->set);
 			if (m)
 				ip_vs_dest_set_erase(&en->set, m);
+			write_unlock(&en->set.lock);
 		}
-	}
-	en->lastuse = jiffies;
 
+		/* If the destination is not overloaded, use it */
+		if (dest && !is_overloaded(dest, svc)) {
+			read_unlock(&svc->sched_lock);
+			goto out;
+		}
+
+		/* The cache entry is invalid, time to schedule */
+		dest = __ip_vs_lblcr_schedule(svc, iph);
+		if (!dest) {
+			IP_VS_DBG(1, "no destination available\n");
+			read_unlock(&svc->sched_lock);
+			return NULL;
+		}
+
+		/* Update our cache entry */
+		write_lock(&en->set.lock);
+		ip_vs_dest_set_insert(&en->set, dest);
+		write_unlock(&en->set.lock);
+	}
+	read_unlock(&svc->sched_lock);
+
+	if (dest)
+		goto out;
+
+	/* No cache entry, time to schedule */
+	dest = __ip_vs_lblcr_schedule(svc, iph);
+	if (!dest) {
+		IP_VS_DBG(1, "no destination available\n");
+		return NULL;
+	}
+
+	/* If we fail to create a cache entry, we'll just use the valid dest */
+	write_lock(&svc->sched_lock);
+	ip_vs_lblcr_new(tbl, iph->daddr, dest);
+	write_unlock(&svc->sched_lock);
+
+out:
 	IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u "
 		  "--> server %u.%u.%u.%u:%d\n",
-		  NIPQUAD(en->addr),
-		  NIPQUAD(dest->addr),
+		  NIPQUAD(iph->daddr),
+		  NIPQUAD(dest->addr.ip),
 		  ntohs(dest->port));
 
 	return dest;
@@ -729,9 +722,11 @@
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
 	.n_list =		LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
+#ifdef CONFIG_IP_VS_IPV6
+	.supports_ipv6 =	0,
+#endif
 	.init_service =		ip_vs_lblcr_init_svc,
 	.done_service =		ip_vs_lblcr_done_svc,
-	.update_service =	ip_vs_lblcr_update_svc,
 	.schedule =		ip_vs_lblcr_schedule,
 };
 
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c
index ebcdbf7..b69f808 100644
--- a/net/ipv4/ipvs/ip_vs_lc.c
+++ b/net/ipv4/ipvs/ip_vs_lc.c
@@ -20,24 +20,6 @@
 #include <net/ip_vs.h>
 
 
-static int ip_vs_lc_init_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
-static int ip_vs_lc_done_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
-static int ip_vs_lc_update_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
 static inline unsigned int
 ip_vs_lc_dest_overhead(struct ip_vs_dest *dest)
 {
@@ -85,10 +67,10 @@
 	}
 
 	if (least)
-	IP_VS_DBG(6, "LC: server %u.%u.%u.%u:%u activeconns %d inactconns %d\n",
-		  NIPQUAD(least->addr), ntohs(least->port),
-		  atomic_read(&least->activeconns),
-		  atomic_read(&least->inactconns));
+	IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d inactconns %d\n",
+		      IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
+		      atomic_read(&least->activeconns),
+		      atomic_read(&least->inactconns));
 
 	return least;
 }
@@ -99,9 +81,9 @@
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
 	.n_list =		LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
-	.init_service =		ip_vs_lc_init_svc,
-	.done_service =		ip_vs_lc_done_svc,
-	.update_service =	ip_vs_lc_update_svc,
+#ifdef CONFIG_IP_VS_IPV6
+	.supports_ipv6 =	1,
+#endif
 	.schedule =		ip_vs_lc_schedule,
 };
 
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c
index 92f3a67..9a2d803 100644
--- a/net/ipv4/ipvs/ip_vs_nq.c
+++ b/net/ipv4/ipvs/ip_vs_nq.c
@@ -37,27 +37,6 @@
 #include <net/ip_vs.h>
 
 
-static int
-ip_vs_nq_init_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
-static int
-ip_vs_nq_done_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
-static int
-ip_vs_nq_update_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
 static inline unsigned int
 ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
 {
@@ -120,12 +99,12 @@
 		return NULL;
 
   out:
-	IP_VS_DBG(6, "NQ: server %u.%u.%u.%u:%u "
-		  "activeconns %d refcnt %d weight %d overhead %d\n",
-		  NIPQUAD(least->addr), ntohs(least->port),
-		  atomic_read(&least->activeconns),
-		  atomic_read(&least->refcnt),
-		  atomic_read(&least->weight), loh);
+	IP_VS_DBG_BUF(6, "NQ: server %s:%u "
+		      "activeconns %d refcnt %d weight %d overhead %d\n",
+		      IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
+		      atomic_read(&least->activeconns),
+		      atomic_read(&least->refcnt),
+		      atomic_read(&least->weight), loh);
 
 	return least;
 }
@@ -137,9 +116,9 @@
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
 	.n_list =		LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
-	.init_service =		ip_vs_nq_init_svc,
-	.done_service =		ip_vs_nq_done_svc,
-	.update_service =	ip_vs_nq_update_svc,
+#ifdef CONFIG_IP_VS_IPV6
+	.supports_ipv6 =	1,
+#endif
 	.schedule =		ip_vs_nq_schedule,
 };
 
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 6099a88..0791f9e 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -151,11 +151,11 @@
 }
 
 
-void
-ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
-			  const struct sk_buff *skb,
-			  int offset,
-			  const char *msg)
+static void
+ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp,
+			     const struct sk_buff *skb,
+			     int offset,
+			     const char *msg)
 {
 	char buf[128];
 	struct iphdr _iph, *ih;
@@ -189,6 +189,61 @@
 	printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+static void
+ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp,
+			     const struct sk_buff *skb,
+			     int offset,
+			     const char *msg)
+{
+	char buf[192];
+	struct ipv6hdr _iph, *ih;
+
+	ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
+	if (ih == NULL)
+		sprintf(buf, "%s TRUNCATED", pp->name);
+	else if (ih->nexthdr == IPPROTO_FRAGMENT)
+		sprintf(buf, "%s " NIP6_FMT "->" NIP6_FMT " frag",
+			pp->name, NIP6(ih->saddr),
+			NIP6(ih->daddr));
+	else {
+		__be16 _ports[2], *pptr;
+
+		pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr),
+					  sizeof(_ports), _ports);
+		if (pptr == NULL)
+			sprintf(buf, "%s TRUNCATED " NIP6_FMT "->" NIP6_FMT,
+				pp->name,
+				NIP6(ih->saddr),
+				NIP6(ih->daddr));
+		else
+			sprintf(buf, "%s " NIP6_FMT ":%u->" NIP6_FMT ":%u",
+				pp->name,
+				NIP6(ih->saddr),
+				ntohs(pptr[0]),
+				NIP6(ih->daddr),
+				ntohs(pptr[1]));
+	}
+
+	printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
+}
+#endif
+
+
+void
+ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
+			  const struct sk_buff *skb,
+			  int offset,
+			  const char *msg)
+{
+#ifdef CONFIG_IP_VS_IPV6
+	if (skb->protocol == htons(ETH_P_IPV6))
+		ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg);
+	else
+#endif
+		ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg);
+}
+
 
 int __init ip_vs_protocol_init(void)
 {
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah.c b/net/ipv4/ipvs/ip_vs_proto_ah.c
deleted file mode 100644
index 73e0ea8..0000000
--- a/net/ipv4/ipvs/ip_vs_proto_ah.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * ip_vs_proto_ah.c:	AH IPSec load balancing support for IPVS
- *
- * Authors:	Julian Anastasov <ja@ssi.bg>, February 2002
- *		Wensong Zhang <wensong@linuxvirtualserver.org>
- *
- *		This program is free software; you can redistribute it and/or
- *		modify it under the terms of the GNU General Public License
- *		version 2 as published by the Free Software Foundation;
- *
- */
-
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-
-#include <net/ip_vs.h>
-
-
-/* TODO:
-
-struct isakmp_hdr {
-	__u8		icookie[8];
-	__u8		rcookie[8];
-	__u8		np;
-	__u8		version;
-	__u8		xchgtype;
-	__u8		flags;
-	__u32		msgid;
-	__u32		length;
-};
-
-*/
-
-#define PORT_ISAKMP	500
-
-
-static struct ip_vs_conn *
-ah_conn_in_get(const struct sk_buff *skb,
-	       struct ip_vs_protocol *pp,
-	       const struct iphdr *iph,
-	       unsigned int proto_off,
-	       int inverse)
-{
-	struct ip_vs_conn *cp;
-
-	if (likely(!inverse)) {
-		cp = ip_vs_conn_in_get(IPPROTO_UDP,
-				       iph->saddr,
-				       htons(PORT_ISAKMP),
-				       iph->daddr,
-				       htons(PORT_ISAKMP));
-	} else {
-		cp = ip_vs_conn_in_get(IPPROTO_UDP,
-				       iph->daddr,
-				       htons(PORT_ISAKMP),
-				       iph->saddr,
-				       htons(PORT_ISAKMP));
-	}
-
-	if (!cp) {
-		/*
-		 * We are not sure if the packet is from our
-		 * service, so our conn_schedule hook should return NF_ACCEPT
-		 */
-		IP_VS_DBG(12, "Unknown ISAKMP entry for outin packet "
-			  "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
-			  inverse ? "ICMP+" : "",
-			  pp->name,
-			  NIPQUAD(iph->saddr),
-			  NIPQUAD(iph->daddr));
-	}
-
-	return cp;
-}
-
-
-static struct ip_vs_conn *
-ah_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
-		const struct iphdr *iph, unsigned int proto_off, int inverse)
-{
-	struct ip_vs_conn *cp;
-
-	if (likely(!inverse)) {
-		cp = ip_vs_conn_out_get(IPPROTO_UDP,
-					iph->saddr,
-					htons(PORT_ISAKMP),
-					iph->daddr,
-					htons(PORT_ISAKMP));
-	} else {
-		cp = ip_vs_conn_out_get(IPPROTO_UDP,
-					iph->daddr,
-					htons(PORT_ISAKMP),
-					iph->saddr,
-					htons(PORT_ISAKMP));
-	}
-
-	if (!cp) {
-		IP_VS_DBG(12, "Unknown ISAKMP entry for inout packet "
-			  "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
-			  inverse ? "ICMP+" : "",
-			  pp->name,
-			  NIPQUAD(iph->saddr),
-			  NIPQUAD(iph->daddr));
-	}
-
-	return cp;
-}
-
-
-static int
-ah_conn_schedule(struct sk_buff *skb,
-		 struct ip_vs_protocol *pp,
-		 int *verdict, struct ip_vs_conn **cpp)
-{
-	/*
-	 * AH is only related traffic. Pass the packet to IP stack.
-	 */
-	*verdict = NF_ACCEPT;
-	return 0;
-}
-
-
-static void
-ah_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
-		int offset, const char *msg)
-{
-	char buf[256];
-	struct iphdr _iph, *ih;
-
-	ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
-	if (ih == NULL)
-		sprintf(buf, "%s TRUNCATED", pp->name);
-	else
-		sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u",
-			pp->name, NIPQUAD(ih->saddr),
-			NIPQUAD(ih->daddr));
-
-	printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
-}
-
-
-static void ah_init(struct ip_vs_protocol *pp)
-{
-	/* nothing to do now */
-}
-
-
-static void ah_exit(struct ip_vs_protocol *pp)
-{
-	/* nothing to do now */
-}
-
-
-struct ip_vs_protocol ip_vs_protocol_ah = {
-	.name =			"AH",
-	.protocol =		IPPROTO_AH,
-	.num_states =		1,
-	.dont_defrag =		1,
-	.init =			ah_init,
-	.exit =			ah_exit,
-	.conn_schedule =	ah_conn_schedule,
-	.conn_in_get =		ah_conn_in_get,
-	.conn_out_get =		ah_conn_out_get,
-	.snat_handler =		NULL,
-	.dnat_handler =		NULL,
-	.csum_check =		NULL,
-	.state_transition =	NULL,
-	.register_app =		NULL,
-	.unregister_app =	NULL,
-	.app_conn_bind =	NULL,
-	.debug_packet =		ah_debug_packet,
-	.timeout_change =	NULL,		/* ISAKMP */
-	.set_state_timeout =	NULL,
-};
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah_esp.c b/net/ipv4/ipvs/ip_vs_proto_ah_esp.c
new file mode 100644
index 0000000..80ab0c8
--- /dev/null
+++ b/net/ipv4/ipvs/ip_vs_proto_ah_esp.c
@@ -0,0 +1,235 @@
+/*
+ * ip_vs_proto_ah_esp.c:	AH/ESP IPSec load balancing support for IPVS
+ *
+ * Authors:	Julian Anastasov <ja@ssi.bg>, February 2002
+ *		Wensong Zhang <wensong@linuxvirtualserver.org>
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		version 2 as published by the Free Software Foundation;
+ *
+ */
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+
+#include <net/ip_vs.h>
+
+
+/* TODO:
+
+struct isakmp_hdr {
+	__u8		icookie[8];
+	__u8		rcookie[8];
+	__u8		np;
+	__u8		version;
+	__u8		xchgtype;
+	__u8		flags;
+	__u32		msgid;
+	__u32		length;
+};
+
+*/
+
+#define PORT_ISAKMP	500
+
+
+static struct ip_vs_conn *
+ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
+		   const struct ip_vs_iphdr *iph, unsigned int proto_off,
+		   int inverse)
+{
+	struct ip_vs_conn *cp;
+
+	if (likely(!inverse)) {
+		cp = ip_vs_conn_in_get(af, IPPROTO_UDP,
+				       &iph->saddr,
+				       htons(PORT_ISAKMP),
+				       &iph->daddr,
+				       htons(PORT_ISAKMP));
+	} else {
+		cp = ip_vs_conn_in_get(af, IPPROTO_UDP,
+				       &iph->daddr,
+				       htons(PORT_ISAKMP),
+				       &iph->saddr,
+				       htons(PORT_ISAKMP));
+	}
+
+	if (!cp) {
+		/*
+		 * We are not sure if the packet is from our
+		 * service, so our conn_schedule hook should return NF_ACCEPT
+		 */
+		IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet "
+			      "%s%s %s->%s\n",
+			      inverse ? "ICMP+" : "",
+			      pp->name,
+			      IP_VS_DBG_ADDR(af, &iph->saddr),
+			      IP_VS_DBG_ADDR(af, &iph->daddr));
+	}
+
+	return cp;
+}
+
+
+static struct ip_vs_conn *
+ah_esp_conn_out_get(int af, const struct sk_buff *skb,
+		    struct ip_vs_protocol *pp,
+		    const struct ip_vs_iphdr *iph,
+		    unsigned int proto_off,
+		    int inverse)
+{
+	struct ip_vs_conn *cp;
+
+	if (likely(!inverse)) {
+		cp = ip_vs_conn_out_get(af, IPPROTO_UDP,
+					&iph->saddr,
+					htons(PORT_ISAKMP),
+					&iph->daddr,
+					htons(PORT_ISAKMP));
+	} else {
+		cp = ip_vs_conn_out_get(af, IPPROTO_UDP,
+					&iph->daddr,
+					htons(PORT_ISAKMP),
+					&iph->saddr,
+					htons(PORT_ISAKMP));
+	}
+
+	if (!cp) {
+		IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
+			      "%s%s %s->%s\n",
+			      inverse ? "ICMP+" : "",
+			      pp->name,
+			      IP_VS_DBG_ADDR(af, &iph->saddr),
+			      IP_VS_DBG_ADDR(af, &iph->daddr));
+	}
+
+	return cp;
+}
+
+
+static int
+ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+		     int *verdict, struct ip_vs_conn **cpp)
+{
+	/*
+	 * AH/ESP is only related traffic. Pass the packet to IP stack.
+	 */
+	*verdict = NF_ACCEPT;
+	return 0;
+}
+
+
+static void
+ah_esp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb,
+		       int offset, const char *msg)
+{
+	char buf[256];
+	struct iphdr _iph, *ih;
+
+	ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
+	if (ih == NULL)
+		sprintf(buf, "%s TRUNCATED", pp->name);
+	else
+		sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u",
+			pp->name, NIPQUAD(ih->saddr),
+			NIPQUAD(ih->daddr));
+
+	printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
+}
+
+#ifdef CONFIG_IP_VS_IPV6
+static void
+ah_esp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb,
+		       int offset, const char *msg)
+{
+	char buf[256];
+	struct ipv6hdr _iph, *ih;
+
+	ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
+	if (ih == NULL)
+		sprintf(buf, "%s TRUNCATED", pp->name);
+	else
+		sprintf(buf, "%s " NIP6_FMT "->" NIP6_FMT,
+			pp->name, NIP6(ih->saddr),
+			NIP6(ih->daddr));
+
+	printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
+}
+#endif
+
+static void
+ah_esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
+		    int offset, const char *msg)
+{
+#ifdef CONFIG_IP_VS_IPV6
+	if (skb->protocol == htons(ETH_P_IPV6))
+		ah_esp_debug_packet_v6(pp, skb, offset, msg);
+	else
+#endif
+		ah_esp_debug_packet_v4(pp, skb, offset, msg);
+}
+
+
+static void ah_esp_init(struct ip_vs_protocol *pp)
+{
+	/* nothing to do now */
+}
+
+
+static void ah_esp_exit(struct ip_vs_protocol *pp)
+{
+	/* nothing to do now */
+}
+
+
+#ifdef CONFIG_IP_VS_PROTO_AH
+struct ip_vs_protocol ip_vs_protocol_ah = {
+	.name =			"AH",
+	.protocol =		IPPROTO_AH,
+	.num_states =		1,
+	.dont_defrag =		1,
+	.init =			ah_esp_init,
+	.exit =			ah_esp_exit,
+	.conn_schedule =	ah_esp_conn_schedule,
+	.conn_in_get =		ah_esp_conn_in_get,
+	.conn_out_get =		ah_esp_conn_out_get,
+	.snat_handler =		NULL,
+	.dnat_handler =		NULL,
+	.csum_check =		NULL,
+	.state_transition =	NULL,
+	.register_app =		NULL,
+	.unregister_app =	NULL,
+	.app_conn_bind =	NULL,
+	.debug_packet =		ah_esp_debug_packet,
+	.timeout_change =	NULL,		/* ISAKMP */
+	.set_state_timeout =	NULL,
+};
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_ESP
+struct ip_vs_protocol ip_vs_protocol_esp = {
+	.name =			"ESP",
+	.protocol =		IPPROTO_ESP,
+	.num_states =		1,
+	.dont_defrag =		1,
+	.init =			ah_esp_init,
+	.exit =			ah_esp_exit,
+	.conn_schedule =	ah_esp_conn_schedule,
+	.conn_in_get =		ah_esp_conn_in_get,
+	.conn_out_get =		ah_esp_conn_out_get,
+	.snat_handler =		NULL,
+	.dnat_handler =		NULL,
+	.csum_check =		NULL,
+	.state_transition =	NULL,
+	.register_app =		NULL,
+	.unregister_app =	NULL,
+	.app_conn_bind =	NULL,
+	.debug_packet =		ah_esp_debug_packet,
+	.timeout_change =	NULL,		/* ISAKMP */
+};
+#endif
diff --git a/net/ipv4/ipvs/ip_vs_proto_esp.c b/net/ipv4/ipvs/ip_vs_proto_esp.c
deleted file mode 100644
index 21d70c8..0000000
--- a/net/ipv4/ipvs/ip_vs_proto_esp.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * ip_vs_proto_esp.c:	ESP IPSec load balancing support for IPVS
- *
- * Authors:	Julian Anastasov <ja@ssi.bg>, February 2002
- *		Wensong Zhang <wensong@linuxvirtualserver.org>
- *
- *		This program is free software; you can redistribute it and/or
- *		modify it under the terms of the GNU General Public License
- *		version 2 as published by the Free Software Foundation;
- *
- */
-
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-
-#include <net/ip_vs.h>
-
-
-/* TODO:
-
-struct isakmp_hdr {
-	__u8		icookie[8];
-	__u8		rcookie[8];
-	__u8		np;
-	__u8		version;
-	__u8		xchgtype;
-	__u8		flags;
-	__u32		msgid;
-	__u32		length;
-};
-
-*/
-
-#define PORT_ISAKMP	500
-
-
-static struct ip_vs_conn *
-esp_conn_in_get(const struct sk_buff *skb,
-		struct ip_vs_protocol *pp,
-		const struct iphdr *iph,
-		unsigned int proto_off,
-		int inverse)
-{
-	struct ip_vs_conn *cp;
-
-	if (likely(!inverse)) {
-		cp = ip_vs_conn_in_get(IPPROTO_UDP,
-				       iph->saddr,
-				       htons(PORT_ISAKMP),
-				       iph->daddr,
-				       htons(PORT_ISAKMP));
-	} else {
-		cp = ip_vs_conn_in_get(IPPROTO_UDP,
-				       iph->daddr,
-				       htons(PORT_ISAKMP),
-				       iph->saddr,
-				       htons(PORT_ISAKMP));
-	}
-
-	if (!cp) {
-		/*
-		 * We are not sure if the packet is from our
-		 * service, so our conn_schedule hook should return NF_ACCEPT
-		 */
-		IP_VS_DBG(12, "Unknown ISAKMP entry for outin packet "
-			  "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
-			  inverse ? "ICMP+" : "",
-			  pp->name,
-			  NIPQUAD(iph->saddr),
-			  NIPQUAD(iph->daddr));
-	}
-
-	return cp;
-}
-
-
-static struct ip_vs_conn *
-esp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
-		 const struct iphdr *iph, unsigned int proto_off, int inverse)
-{
-	struct ip_vs_conn *cp;
-
-	if (likely(!inverse)) {
-		cp = ip_vs_conn_out_get(IPPROTO_UDP,
-					iph->saddr,
-					htons(PORT_ISAKMP),
-					iph->daddr,
-					htons(PORT_ISAKMP));
-	} else {
-		cp = ip_vs_conn_out_get(IPPROTO_UDP,
-					iph->daddr,
-					htons(PORT_ISAKMP),
-					iph->saddr,
-					htons(PORT_ISAKMP));
-	}
-
-	if (!cp) {
-		IP_VS_DBG(12, "Unknown ISAKMP entry for inout packet "
-			  "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
-			  inverse ? "ICMP+" : "",
-			  pp->name,
-			  NIPQUAD(iph->saddr),
-			  NIPQUAD(iph->daddr));
-	}
-
-	return cp;
-}
-
-
-static int
-esp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp,
-		  int *verdict, struct ip_vs_conn **cpp)
-{
-	/*
-	 * ESP is only related traffic. Pass the packet to IP stack.
-	 */
-	*verdict = NF_ACCEPT;
-	return 0;
-}
-
-
-static void
-esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
-		 int offset, const char *msg)
-{
-	char buf[256];
-	struct iphdr _iph, *ih;
-
-	ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
-	if (ih == NULL)
-		sprintf(buf, "%s TRUNCATED", pp->name);
-	else
-		sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u",
-			pp->name, NIPQUAD(ih->saddr),
-			NIPQUAD(ih->daddr));
-
-	printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
-}
-
-
-static void esp_init(struct ip_vs_protocol *pp)
-{
-	/* nothing to do now */
-}
-
-
-static void esp_exit(struct ip_vs_protocol *pp)
-{
-	/* nothing to do now */
-}
-
-
-struct ip_vs_protocol ip_vs_protocol_esp = {
-	.name =			"ESP",
-	.protocol =		IPPROTO_ESP,
-	.num_states =		1,
-	.dont_defrag =		1,
-	.init =			esp_init,
-	.exit =			esp_exit,
-	.conn_schedule =	esp_conn_schedule,
-	.conn_in_get =		esp_conn_in_get,
-	.conn_out_get =		esp_conn_out_get,
-	.snat_handler =		NULL,
-	.dnat_handler =		NULL,
-	.csum_check =		NULL,
-	.state_transition =	NULL,
-	.register_app =		NULL,
-	.unregister_app =	NULL,
-	.app_conn_bind =	NULL,
-	.debug_packet =		esp_debug_packet,
-	.timeout_change =	NULL,		/* ISAKMP */
-};
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c
index d0ea467..dd4566e 100644
--- a/net/ipv4/ipvs/ip_vs_proto_tcp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c
@@ -18,6 +18,7 @@
 #include <linux/tcp.h>                  /* for tcphdr */
 #include <net/ip.h>
 #include <net/tcp.h>                    /* for csum_tcpudp_magic */
+#include <net/ip6_checksum.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
 
@@ -25,8 +26,9 @@
 
 
 static struct ip_vs_conn *
-tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
-		const struct iphdr *iph, unsigned int proto_off, int inverse)
+tcp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
+		const struct ip_vs_iphdr *iph, unsigned int proto_off,
+		int inverse)
 {
 	__be16 _ports[2], *pptr;
 
@@ -35,19 +37,20 @@
 		return NULL;
 
 	if (likely(!inverse)) {
-		return ip_vs_conn_in_get(iph->protocol,
-					 iph->saddr, pptr[0],
-					 iph->daddr, pptr[1]);
+		return ip_vs_conn_in_get(af, iph->protocol,
+					 &iph->saddr, pptr[0],
+					 &iph->daddr, pptr[1]);
 	} else {
-		return ip_vs_conn_in_get(iph->protocol,
-					 iph->daddr, pptr[1],
-					 iph->saddr, pptr[0]);
+		return ip_vs_conn_in_get(af, iph->protocol,
+					 &iph->daddr, pptr[1],
+					 &iph->saddr, pptr[0]);
 	}
 }
 
 static struct ip_vs_conn *
-tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
-		 const struct iphdr *iph, unsigned int proto_off, int inverse)
+tcp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
+		 const struct ip_vs_iphdr *iph, unsigned int proto_off,
+		 int inverse)
 {
 	__be16 _ports[2], *pptr;
 
@@ -56,34 +59,36 @@
 		return NULL;
 
 	if (likely(!inverse)) {
-		return ip_vs_conn_out_get(iph->protocol,
-					  iph->saddr, pptr[0],
-					  iph->daddr, pptr[1]);
+		return ip_vs_conn_out_get(af, iph->protocol,
+					  &iph->saddr, pptr[0],
+					  &iph->daddr, pptr[1]);
 	} else {
-		return ip_vs_conn_out_get(iph->protocol,
-					  iph->daddr, pptr[1],
-					  iph->saddr, pptr[0]);
+		return ip_vs_conn_out_get(af, iph->protocol,
+					  &iph->daddr, pptr[1],
+					  &iph->saddr, pptr[0]);
 	}
 }
 
 
 static int
-tcp_conn_schedule(struct sk_buff *skb,
-		  struct ip_vs_protocol *pp,
+tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
 		  int *verdict, struct ip_vs_conn **cpp)
 {
 	struct ip_vs_service *svc;
 	struct tcphdr _tcph, *th;
+	struct ip_vs_iphdr iph;
 
-	th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
+	ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+
+	th = skb_header_pointer(skb, iph.len, sizeof(_tcph), &_tcph);
 	if (th == NULL) {
 		*verdict = NF_DROP;
 		return 0;
 	}
 
 	if (th->syn &&
-	    (svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol,
-				     ip_hdr(skb)->daddr, th->dest))) {
+	    (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr,
+				     th->dest))) {
 		if (ip_vs_todrop()) {
 			/*
 			 * It seems that we are very loaded.
@@ -110,22 +115,62 @@
 
 
 static inline void
-tcp_fast_csum_update(struct tcphdr *tcph, __be32 oldip, __be32 newip,
+tcp_fast_csum_update(int af, struct tcphdr *tcph,
+		     const union nf_inet_addr *oldip,
+		     const union nf_inet_addr *newip,
 		     __be16 oldport, __be16 newport)
 {
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		tcph->check =
+			csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
+					 ip_vs_check_diff2(oldport, newport,
+						~csum_unfold(tcph->check))));
+	else
+#endif
 	tcph->check =
-		csum_fold(ip_vs_check_diff4(oldip, newip,
+		csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
 				 ip_vs_check_diff2(oldport, newport,
 						~csum_unfold(tcph->check))));
 }
 
 
+static inline void
+tcp_partial_csum_update(int af, struct tcphdr *tcph,
+		     const union nf_inet_addr *oldip,
+		     const union nf_inet_addr *newip,
+		     __be16 oldlen, __be16 newlen)
+{
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		tcph->check =
+			csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
+					 ip_vs_check_diff2(oldlen, newlen,
+						~csum_unfold(tcph->check))));
+	else
+#endif
+	tcph->check =
+		csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
+				ip_vs_check_diff2(oldlen, newlen,
+						~csum_unfold(tcph->check))));
+}
+
+
 static int
 tcp_snat_handler(struct sk_buff *skb,
 		 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
 {
 	struct tcphdr *tcph;
-	const unsigned int tcphoff = ip_hdrlen(skb);
+	unsigned int tcphoff;
+	int oldlen;
+
+#ifdef CONFIG_IP_VS_IPV6
+	if (cp->af == AF_INET6)
+		tcphoff = sizeof(struct ipv6hdr);
+	else
+#endif
+		tcphoff = ip_hdrlen(skb);
+	oldlen = skb->len - tcphoff;
 
 	/* csum_check requires unshared skb */
 	if (!skb_make_writable(skb, tcphoff+sizeof(*tcph)))
@@ -133,7 +178,7 @@
 
 	if (unlikely(cp->app != NULL)) {
 		/* Some checks before mangling */
-		if (pp->csum_check && !pp->csum_check(skb, pp))
+		if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
 			return 0;
 
 		/* Call application helper if needed */
@@ -141,13 +186,17 @@
 			return 0;
 	}
 
-	tcph = (void *)ip_hdr(skb) + tcphoff;
+	tcph = (void *)skb_network_header(skb) + tcphoff;
 	tcph->source = cp->vport;
 
 	/* Adjust TCP checksums */
-	if (!cp->app) {
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
+					htonl(oldlen),
+					htonl(skb->len - tcphoff));
+	} else if (!cp->app) {
 		/* Only port and addr are changed, do fast csum update */
-		tcp_fast_csum_update(tcph, cp->daddr, cp->vaddr,
+		tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
 				     cp->dport, cp->vport);
 		if (skb->ip_summed == CHECKSUM_COMPLETE)
 			skb->ip_summed = CHECKSUM_NONE;
@@ -155,9 +204,20 @@
 		/* full checksum calculation */
 		tcph->check = 0;
 		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
-		tcph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr,
-						skb->len - tcphoff,
-						cp->protocol, skb->csum);
+#ifdef CONFIG_IP_VS_IPV6
+		if (cp->af == AF_INET6)
+			tcph->check = csum_ipv6_magic(&cp->vaddr.in6,
+						      &cp->caddr.in6,
+						      skb->len - tcphoff,
+						      cp->protocol, skb->csum);
+		else
+#endif
+			tcph->check = csum_tcpudp_magic(cp->vaddr.ip,
+							cp->caddr.ip,
+							skb->len - tcphoff,
+							cp->protocol,
+							skb->csum);
+
 		IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
 			  pp->name, tcph->check,
 			  (char*)&(tcph->check) - (char*)tcph);
@@ -171,7 +231,16 @@
 		 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
 {
 	struct tcphdr *tcph;
-	const unsigned int tcphoff = ip_hdrlen(skb);
+	unsigned int tcphoff;
+	int oldlen;
+
+#ifdef CONFIG_IP_VS_IPV6
+	if (cp->af == AF_INET6)
+		tcphoff = sizeof(struct ipv6hdr);
+	else
+#endif
+		tcphoff = ip_hdrlen(skb);
+	oldlen = skb->len - tcphoff;
 
 	/* csum_check requires unshared skb */
 	if (!skb_make_writable(skb, tcphoff+sizeof(*tcph)))
@@ -179,7 +248,7 @@
 
 	if (unlikely(cp->app != NULL)) {
 		/* Some checks before mangling */
-		if (pp->csum_check && !pp->csum_check(skb, pp))
+		if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
 			return 0;
 
 		/*
@@ -190,15 +259,19 @@
 			return 0;
 	}
 
-	tcph = (void *)ip_hdr(skb) + tcphoff;
+	tcph = (void *)skb_network_header(skb) + tcphoff;
 	tcph->dest = cp->dport;
 
 	/*
 	 *	Adjust TCP checksums
 	 */
-	if (!cp->app) {
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
+					htonl(oldlen),
+					htonl(skb->len - tcphoff));
+	} else if (!cp->app) {
 		/* Only port and addr are changed, do fast csum update */
-		tcp_fast_csum_update(tcph, cp->vaddr, cp->daddr,
+		tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
 				     cp->vport, cp->dport);
 		if (skb->ip_summed == CHECKSUM_COMPLETE)
 			skb->ip_summed = CHECKSUM_NONE;
@@ -206,9 +279,19 @@
 		/* full checksum calculation */
 		tcph->check = 0;
 		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
-		tcph->check = csum_tcpudp_magic(cp->caddr, cp->daddr,
-						skb->len - tcphoff,
-						cp->protocol, skb->csum);
+#ifdef CONFIG_IP_VS_IPV6
+		if (cp->af == AF_INET6)
+			tcph->check = csum_ipv6_magic(&cp->caddr.in6,
+						      &cp->daddr.in6,
+						      skb->len - tcphoff,
+						      cp->protocol, skb->csum);
+		else
+#endif
+			tcph->check = csum_tcpudp_magic(cp->caddr.ip,
+							cp->daddr.ip,
+							skb->len - tcphoff,
+							cp->protocol,
+							skb->csum);
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 	}
 	return 1;
@@ -216,21 +299,43 @@
 
 
 static int
-tcp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp)
+tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
 {
-	const unsigned int tcphoff = ip_hdrlen(skb);
+	unsigned int tcphoff;
+
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		tcphoff = sizeof(struct ipv6hdr);
+	else
+#endif
+		tcphoff = ip_hdrlen(skb);
 
 	switch (skb->ip_summed) {
 	case CHECKSUM_NONE:
 		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
 	case CHECKSUM_COMPLETE:
-		if (csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
-				      skb->len - tcphoff,
-				      ip_hdr(skb)->protocol, skb->csum)) {
-			IP_VS_DBG_RL_PKT(0, pp, skb, 0,
-					 "Failed checksum for");
-			return 0;
-		}
+#ifdef CONFIG_IP_VS_IPV6
+		if (af == AF_INET6) {
+			if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+					    &ipv6_hdr(skb)->daddr,
+					    skb->len - tcphoff,
+					    ipv6_hdr(skb)->nexthdr,
+					    skb->csum)) {
+				IP_VS_DBG_RL_PKT(0, pp, skb, 0,
+						 "Failed checksum for");
+				return 0;
+			}
+		} else
+#endif
+			if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
+					      ip_hdr(skb)->daddr,
+					      skb->len - tcphoff,
+					      ip_hdr(skb)->protocol,
+					      skb->csum)) {
+				IP_VS_DBG_RL_PKT(0, pp, skb, 0,
+						 "Failed checksum for");
+				return 0;
+			}
 		break;
 	default:
 		/* No need to checksum. */
@@ -419,19 +524,23 @@
 	if (new_state != cp->state) {
 		struct ip_vs_dest *dest = cp->dest;
 
-		IP_VS_DBG(8, "%s %s [%c%c%c%c] %u.%u.%u.%u:%d->"
-			  "%u.%u.%u.%u:%d state: %s->%s conn->refcnt:%d\n",
-			  pp->name,
-			  (state_off==TCP_DIR_OUTPUT)?"output ":"input ",
-			  th->syn? 'S' : '.',
-			  th->fin? 'F' : '.',
-			  th->ack? 'A' : '.',
-			  th->rst? 'R' : '.',
-			  NIPQUAD(cp->daddr), ntohs(cp->dport),
-			  NIPQUAD(cp->caddr), ntohs(cp->cport),
-			  tcp_state_name(cp->state),
-			  tcp_state_name(new_state),
-			  atomic_read(&cp->refcnt));
+		IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
+			      "%s:%d state: %s->%s conn->refcnt:%d\n",
+			      pp->name,
+			      ((state_off == TCP_DIR_OUTPUT) ?
+			       "output " : "input "),
+			      th->syn ? 'S' : '.',
+			      th->fin ? 'F' : '.',
+			      th->ack ? 'A' : '.',
+			      th->rst ? 'R' : '.',
+			      IP_VS_DBG_ADDR(cp->af, &cp->daddr),
+			      ntohs(cp->dport),
+			      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+			      ntohs(cp->cport),
+			      tcp_state_name(cp->state),
+			      tcp_state_name(new_state),
+			      atomic_read(&cp->refcnt));
+
 		if (dest) {
 			if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
 			    (new_state != IP_VS_TCP_S_ESTABLISHED)) {
@@ -461,7 +570,13 @@
 {
 	struct tcphdr _tcph, *th;
 
-	th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
+#ifdef CONFIG_IP_VS_IPV6
+	int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
+#else
+	int ihl = ip_hdrlen(skb);
+#endif
+
+	th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph);
 	if (th == NULL)
 		return 0;
 
@@ -546,12 +661,15 @@
 				break;
 			spin_unlock(&tcp_app_lock);
 
-			IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->"
-				  "%u.%u.%u.%u:%u to app %s on port %u\n",
-				  __func__,
-				  NIPQUAD(cp->caddr), ntohs(cp->cport),
-				  NIPQUAD(cp->vaddr), ntohs(cp->vport),
-				  inc->name, ntohs(inc->port));
+			IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
+				      "%s:%u to app %s on port %u\n",
+				      __func__,
+				      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+				      ntohs(cp->cport),
+				      IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
+				      ntohs(cp->vport),
+				      inc->name, ntohs(inc->port));
+
 			cp->app = inc;
 			if (inc->init_conn)
 				result = inc->init_conn(inc, cp);
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c
index c6be5d5..6eb6039 100644
--- a/net/ipv4/ipvs/ip_vs_proto_udp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_udp.c
@@ -22,10 +22,12 @@
 
 #include <net/ip_vs.h>
 #include <net/ip.h>
+#include <net/ip6_checksum.h>
 
 static struct ip_vs_conn *
-udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
-		const struct iphdr *iph, unsigned int proto_off, int inverse)
+udp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
+		const struct ip_vs_iphdr *iph, unsigned int proto_off,
+		int inverse)
 {
 	struct ip_vs_conn *cp;
 	__be16 _ports[2], *pptr;
@@ -35,13 +37,13 @@
 		return NULL;
 
 	if (likely(!inverse)) {
-		cp = ip_vs_conn_in_get(iph->protocol,
-				       iph->saddr, pptr[0],
-				       iph->daddr, pptr[1]);
+		cp = ip_vs_conn_in_get(af, iph->protocol,
+				       &iph->saddr, pptr[0],
+				       &iph->daddr, pptr[1]);
 	} else {
-		cp = ip_vs_conn_in_get(iph->protocol,
-				       iph->daddr, pptr[1],
-				       iph->saddr, pptr[0]);
+		cp = ip_vs_conn_in_get(af, iph->protocol,
+				       &iph->daddr, pptr[1],
+				       &iph->saddr, pptr[0]);
 	}
 
 	return cp;
@@ -49,25 +51,25 @@
 
 
 static struct ip_vs_conn *
-udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
-		 const struct iphdr *iph, unsigned int proto_off, int inverse)
+udp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
+		 const struct ip_vs_iphdr *iph, unsigned int proto_off,
+		 int inverse)
 {
 	struct ip_vs_conn *cp;
 	__be16 _ports[2], *pptr;
 
-	pptr = skb_header_pointer(skb, ip_hdrlen(skb),
-				  sizeof(_ports), _ports);
+	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
 	if (pptr == NULL)
 		return NULL;
 
 	if (likely(!inverse)) {
-		cp = ip_vs_conn_out_get(iph->protocol,
-					iph->saddr, pptr[0],
-					iph->daddr, pptr[1]);
+		cp = ip_vs_conn_out_get(af, iph->protocol,
+					&iph->saddr, pptr[0],
+					&iph->daddr, pptr[1]);
 	} else {
-		cp = ip_vs_conn_out_get(iph->protocol,
-					iph->daddr, pptr[1],
-					iph->saddr, pptr[0]);
+		cp = ip_vs_conn_out_get(af, iph->protocol,
+					&iph->daddr, pptr[1],
+					&iph->saddr, pptr[0]);
 	}
 
 	return cp;
@@ -75,21 +77,24 @@
 
 
 static int
-udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp,
+udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
 		  int *verdict, struct ip_vs_conn **cpp)
 {
 	struct ip_vs_service *svc;
 	struct udphdr _udph, *uh;
+	struct ip_vs_iphdr iph;
 
-	uh = skb_header_pointer(skb, ip_hdrlen(skb),
-				sizeof(_udph), &_udph);
+	ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+
+	uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph);
 	if (uh == NULL) {
 		*verdict = NF_DROP;
 		return 0;
 	}
 
-	if ((svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol,
-				     ip_hdr(skb)->daddr, uh->dest))) {
+	svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+				&iph.daddr, uh->dest);
+	if (svc) {
 		if (ip_vs_todrop()) {
 			/*
 			 * It seems that we are very loaded.
@@ -116,23 +121,63 @@
 
 
 static inline void
-udp_fast_csum_update(struct udphdr *uhdr, __be32 oldip, __be32 newip,
+udp_fast_csum_update(int af, struct udphdr *uhdr,
+		     const union nf_inet_addr *oldip,
+		     const union nf_inet_addr *newip,
 		     __be16 oldport, __be16 newport)
 {
-	uhdr->check =
-		csum_fold(ip_vs_check_diff4(oldip, newip,
-				 ip_vs_check_diff2(oldport, newport,
-					~csum_unfold(uhdr->check))));
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		uhdr->check =
+			csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
+					 ip_vs_check_diff2(oldport, newport,
+						~csum_unfold(uhdr->check))));
+	else
+#endif
+		uhdr->check =
+			csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
+					 ip_vs_check_diff2(oldport, newport,
+						~csum_unfold(uhdr->check))));
 	if (!uhdr->check)
 		uhdr->check = CSUM_MANGLED_0;
 }
 
+static inline void
+udp_partial_csum_update(int af, struct udphdr *uhdr,
+		     const union nf_inet_addr *oldip,
+		     const union nf_inet_addr *newip,
+		     __be16 oldlen, __be16 newlen)
+{
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		uhdr->check =
+			csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
+					 ip_vs_check_diff2(oldlen, newlen,
+						~csum_unfold(uhdr->check))));
+	else
+#endif
+	uhdr->check =
+		csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
+				ip_vs_check_diff2(oldlen, newlen,
+						~csum_unfold(uhdr->check))));
+}
+
+
 static int
 udp_snat_handler(struct sk_buff *skb,
 		 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
 {
 	struct udphdr *udph;
-	const unsigned int udphoff = ip_hdrlen(skb);
+	unsigned int udphoff;
+	int oldlen;
+
+#ifdef CONFIG_IP_VS_IPV6
+	if (cp->af == AF_INET6)
+		udphoff = sizeof(struct ipv6hdr);
+	else
+#endif
+		udphoff = ip_hdrlen(skb);
+	oldlen = skb->len - udphoff;
 
 	/* csum_check requires unshared skb */
 	if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
@@ -140,7 +185,7 @@
 
 	if (unlikely(cp->app != NULL)) {
 		/* Some checks before mangling */
-		if (pp->csum_check && !pp->csum_check(skb, pp))
+		if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
 			return 0;
 
 		/*
@@ -150,15 +195,19 @@
 			return 0;
 	}
 
-	udph = (void *)ip_hdr(skb) + udphoff;
+	udph = (void *)skb_network_header(skb) + udphoff;
 	udph->source = cp->vport;
 
 	/*
 	 *	Adjust UDP checksums
 	 */
-	if (!cp->app && (udph->check != 0)) {
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
+					htonl(oldlen),
+					htonl(skb->len - udphoff));
+	} else if (!cp->app && (udph->check != 0)) {
 		/* Only port and addr are changed, do fast csum update */
-		udp_fast_csum_update(udph, cp->daddr, cp->vaddr,
+		udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
 				     cp->dport, cp->vport);
 		if (skb->ip_summed == CHECKSUM_COMPLETE)
 			skb->ip_summed = CHECKSUM_NONE;
@@ -166,9 +215,19 @@
 		/* full checksum calculation */
 		udph->check = 0;
 		skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
-		udph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr,
-						skb->len - udphoff,
-						cp->protocol, skb->csum);
+#ifdef CONFIG_IP_VS_IPV6
+		if (cp->af == AF_INET6)
+			udph->check = csum_ipv6_magic(&cp->vaddr.in6,
+						      &cp->caddr.in6,
+						      skb->len - udphoff,
+						      cp->protocol, skb->csum);
+		else
+#endif
+			udph->check = csum_tcpudp_magic(cp->vaddr.ip,
+							cp->caddr.ip,
+							skb->len - udphoff,
+							cp->protocol,
+							skb->csum);
 		if (udph->check == 0)
 			udph->check = CSUM_MANGLED_0;
 		IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
@@ -184,7 +243,16 @@
 		 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
 {
 	struct udphdr *udph;
-	unsigned int udphoff = ip_hdrlen(skb);
+	unsigned int udphoff;
+	int oldlen;
+
+#ifdef CONFIG_IP_VS_IPV6
+	if (cp->af == AF_INET6)
+		udphoff = sizeof(struct ipv6hdr);
+	else
+#endif
+		udphoff = ip_hdrlen(skb);
+	oldlen = skb->len - udphoff;
 
 	/* csum_check requires unshared skb */
 	if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
@@ -192,7 +260,7 @@
 
 	if (unlikely(cp->app != NULL)) {
 		/* Some checks before mangling */
-		if (pp->csum_check && !pp->csum_check(skb, pp))
+		if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
 			return 0;
 
 		/*
@@ -203,15 +271,19 @@
 			return 0;
 	}
 
-	udph = (void *)ip_hdr(skb) + udphoff;
+	udph = (void *)skb_network_header(skb) + udphoff;
 	udph->dest = cp->dport;
 
 	/*
 	 *	Adjust UDP checksums
 	 */
-	if (!cp->app && (udph->check != 0)) {
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
+					htonl(oldlen),
+					htonl(skb->len - udphoff));
+	} else if (!cp->app && (udph->check != 0)) {
 		/* Only port and addr are changed, do fast csum update */
-		udp_fast_csum_update(udph, cp->vaddr, cp->daddr,
+		udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
 				     cp->vport, cp->dport);
 		if (skb->ip_summed == CHECKSUM_COMPLETE)
 			skb->ip_summed = CHECKSUM_NONE;
@@ -219,9 +291,19 @@
 		/* full checksum calculation */
 		udph->check = 0;
 		skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
-		udph->check = csum_tcpudp_magic(cp->caddr, cp->daddr,
-						skb->len - udphoff,
-						cp->protocol, skb->csum);
+#ifdef CONFIG_IP_VS_IPV6
+		if (cp->af == AF_INET6)
+			udph->check = csum_ipv6_magic(&cp->caddr.in6,
+						      &cp->daddr.in6,
+						      skb->len - udphoff,
+						      cp->protocol, skb->csum);
+		else
+#endif
+			udph->check = csum_tcpudp_magic(cp->caddr.ip,
+							cp->daddr.ip,
+							skb->len - udphoff,
+							cp->protocol,
+							skb->csum);
 		if (udph->check == 0)
 			udph->check = CSUM_MANGLED_0;
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -231,10 +313,17 @@
 
 
 static int
-udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp)
+udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
 {
 	struct udphdr _udph, *uh;
-	const unsigned int udphoff = ip_hdrlen(skb);
+	unsigned int udphoff;
+
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		udphoff = sizeof(struct ipv6hdr);
+	else
+#endif
+		udphoff = ip_hdrlen(skb);
 
 	uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph);
 	if (uh == NULL)
@@ -246,15 +335,28 @@
 			skb->csum = skb_checksum(skb, udphoff,
 						 skb->len - udphoff, 0);
 		case CHECKSUM_COMPLETE:
-			if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
-					      ip_hdr(skb)->daddr,
-					      skb->len - udphoff,
-					      ip_hdr(skb)->protocol,
-					      skb->csum)) {
-				IP_VS_DBG_RL_PKT(0, pp, skb, 0,
-						 "Failed checksum for");
-				return 0;
-			}
+#ifdef CONFIG_IP_VS_IPV6
+			if (af == AF_INET6) {
+				if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+						    &ipv6_hdr(skb)->daddr,
+						    skb->len - udphoff,
+						    ipv6_hdr(skb)->nexthdr,
+						    skb->csum)) {
+					IP_VS_DBG_RL_PKT(0, pp, skb, 0,
+							 "Failed checksum for");
+					return 0;
+				}
+			} else
+#endif
+				if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
+						      ip_hdr(skb)->daddr,
+						      skb->len - udphoff,
+						      ip_hdr(skb)->protocol,
+						      skb->csum)) {
+					IP_VS_DBG_RL_PKT(0, pp, skb, 0,
+							 "Failed checksum for");
+					return 0;
+				}
 			break;
 		default:
 			/* No need to checksum. */
@@ -340,12 +442,15 @@
 				break;
 			spin_unlock(&udp_app_lock);
 
-			IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->"
-				  "%u.%u.%u.%u:%u to app %s on port %u\n",
-				  __func__,
-				  NIPQUAD(cp->caddr), ntohs(cp->cport),
-				  NIPQUAD(cp->vaddr), ntohs(cp->vport),
-				  inc->name, ntohs(inc->port));
+			IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
+				      "%s:%u to app %s on port %u\n",
+				      __func__,
+				      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+				      ntohs(cp->cport),
+				      IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
+				      ntohs(cp->vport),
+				      inc->name, ntohs(inc->port));
+
 			cp->app = inc;
 			if (inc->init_conn)
 				result = inc->init_conn(inc, cp);
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index 358110d..a22195f 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -32,12 +32,6 @@
 }
 
 
-static int ip_vs_rr_done_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
 static int ip_vs_rr_update_svc(struct ip_vs_service *svc)
 {
 	svc->sched_data = &svc->destinations;
@@ -80,11 +74,11 @@
   out:
 	svc->sched_data = q;
 	write_unlock(&svc->sched_lock);
-	IP_VS_DBG(6, "RR: server %u.%u.%u.%u:%u "
-		  "activeconns %d refcnt %d weight %d\n",
-		  NIPQUAD(dest->addr), ntohs(dest->port),
-		  atomic_read(&dest->activeconns),
-		  atomic_read(&dest->refcnt), atomic_read(&dest->weight));
+	IP_VS_DBG_BUF(6, "RR: server %s:%u "
+		      "activeconns %d refcnt %d weight %d\n",
+		      IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
+		      atomic_read(&dest->activeconns),
+		      atomic_read(&dest->refcnt), atomic_read(&dest->weight));
 
 	return dest;
 }
@@ -95,8 +89,10 @@
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
 	.n_list =		LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
+#ifdef CONFIG_IP_VS_IPV6
+	.supports_ipv6 =	1,
+#endif
 	.init_service =		ip_vs_rr_init_svc,
-	.done_service =		ip_vs_rr_done_svc,
 	.update_service =	ip_vs_rr_update_svc,
 	.schedule =		ip_vs_rr_schedule,
 };
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index 77663d8..7d2f22f 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -41,27 +41,6 @@
 #include <net/ip_vs.h>
 
 
-static int
-ip_vs_sed_init_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
-static int
-ip_vs_sed_done_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
-static int
-ip_vs_sed_update_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
 static inline unsigned int
 ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
 {
@@ -122,12 +101,12 @@
 		}
 	}
 
-	IP_VS_DBG(6, "SED: server %u.%u.%u.%u:%u "
-		  "activeconns %d refcnt %d weight %d overhead %d\n",
-		  NIPQUAD(least->addr), ntohs(least->port),
-		  atomic_read(&least->activeconns),
-		  atomic_read(&least->refcnt),
-		  atomic_read(&least->weight), loh);
+	IP_VS_DBG_BUF(6, "SED: server %s:%u "
+		      "activeconns %d refcnt %d weight %d overhead %d\n",
+		      IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
+		      atomic_read(&least->activeconns),
+		      atomic_read(&least->refcnt),
+		      atomic_read(&least->weight), loh);
 
 	return least;
 }
@@ -139,9 +118,9 @@
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
 	.n_list =		LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
-	.init_service =		ip_vs_sed_init_svc,
-	.done_service =		ip_vs_sed_done_svc,
-	.update_service =	ip_vs_sed_update_svc,
+#ifdef CONFIG_IP_VS_IPV6
+	.supports_ipv6 =	1,
+#endif
 	.schedule =		ip_vs_sed_schedule,
 };
 
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c
index 7b979e2..1d96de2 100644
--- a/net/ipv4/ipvs/ip_vs_sh.c
+++ b/net/ipv4/ipvs/ip_vs_sh.c
@@ -215,7 +215,7 @@
 	IP_VS_DBG(6, "SH: source IP address %u.%u.%u.%u "
 		  "--> server %u.%u.%u.%u:%d\n",
 		  NIPQUAD(iph->saddr),
-		  NIPQUAD(dest->addr),
+		  NIPQUAD(dest->addr.ip),
 		  ntohs(dest->port));
 
 	return dest;
@@ -231,6 +231,9 @@
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
 	.n_list	 =		LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
+#ifdef CONFIG_IP_VS_IPV6
+	.supports_ipv6 =	0,
+#endif
 	.init_service =		ip_vs_sh_init_svc,
 	.done_service =		ip_vs_sh_done_svc,
 	.update_service =	ip_vs_sh_update_svc,
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index a652da2..28237a5 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -256,9 +256,9 @@
 	s->cport = cp->cport;
 	s->vport = cp->vport;
 	s->dport = cp->dport;
-	s->caddr = cp->caddr;
-	s->vaddr = cp->vaddr;
-	s->daddr = cp->daddr;
+	s->caddr = cp->caddr.ip;
+	s->vaddr = cp->vaddr.ip;
+	s->daddr = cp->daddr.ip;
 	s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED);
 	s->state = htons(cp->state);
 	if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
@@ -366,21 +366,28 @@
 		}
 
 		if (!(flags & IP_VS_CONN_F_TEMPLATE))
-			cp = ip_vs_conn_in_get(s->protocol,
-					       s->caddr, s->cport,
-					       s->vaddr, s->vport);
+			cp = ip_vs_conn_in_get(AF_INET, s->protocol,
+					       (union nf_inet_addr *)&s->caddr,
+					       s->cport,
+					       (union nf_inet_addr *)&s->vaddr,
+					       s->vport);
 		else
-			cp = ip_vs_ct_in_get(s->protocol,
-					       s->caddr, s->cport,
-					       s->vaddr, s->vport);
+			cp = ip_vs_ct_in_get(AF_INET, s->protocol,
+					     (union nf_inet_addr *)&s->caddr,
+					     s->cport,
+					     (union nf_inet_addr *)&s->vaddr,
+					     s->vport);
 		if (!cp) {
 			/*
 			 * Find the appropriate destination for the connection.
 			 * If it is not found the connection will remain unbound
 			 * but still handled.
 			 */
-			dest = ip_vs_find_dest(s->daddr, s->dport,
-					       s->vaddr, s->vport,
+			dest = ip_vs_find_dest(AF_INET,
+					       (union nf_inet_addr *)&s->daddr,
+					       s->dport,
+					       (union nf_inet_addr *)&s->vaddr,
+					       s->vport,
 					       s->protocol);
 			/*  Set the approprite ativity flag */
 			if (s->protocol == IPPROTO_TCP) {
@@ -389,10 +396,13 @@
 				else
 					flags &= ~IP_VS_CONN_F_INACTIVE;
 			}
-			cp = ip_vs_conn_new(s->protocol,
-					    s->caddr, s->cport,
-					    s->vaddr, s->vport,
-					    s->daddr, s->dport,
+			cp = ip_vs_conn_new(AF_INET, s->protocol,
+					    (union nf_inet_addr *)&s->caddr,
+					    s->cport,
+					    (union nf_inet_addr *)&s->vaddr,
+					    s->vport,
+					    (union nf_inet_addr *)&s->daddr,
+					    s->dport,
 					    flags, dest);
 			if (dest)
 				atomic_dec(&dest->refcnt);
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c
index 9b0ef86..8c596e7 100644
--- a/net/ipv4/ipvs/ip_vs_wlc.c
+++ b/net/ipv4/ipvs/ip_vs_wlc.c
@@ -25,27 +25,6 @@
 #include <net/ip_vs.h>
 
 
-static int
-ip_vs_wlc_init_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
-static int
-ip_vs_wlc_done_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
-static int
-ip_vs_wlc_update_svc(struct ip_vs_service *svc)
-{
-	return 0;
-}
-
-
 static inline unsigned int
 ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest)
 {
@@ -110,12 +89,12 @@
 		}
 	}
 
-	IP_VS_DBG(6, "WLC: server %u.%u.%u.%u:%u "
-		  "activeconns %d refcnt %d weight %d overhead %d\n",
-		  NIPQUAD(least->addr), ntohs(least->port),
-		  atomic_read(&least->activeconns),
-		  atomic_read(&least->refcnt),
-		  atomic_read(&least->weight), loh);
+	IP_VS_DBG_BUF(6, "WLC: server %s:%u "
+		      "activeconns %d refcnt %d weight %d overhead %d\n",
+		      IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
+		      atomic_read(&least->activeconns),
+		      atomic_read(&least->refcnt),
+		      atomic_read(&least->weight), loh);
 
 	return least;
 }
@@ -127,9 +106,9 @@
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
 	.n_list =		LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
-	.init_service =		ip_vs_wlc_init_svc,
-	.done_service =		ip_vs_wlc_done_svc,
-	.update_service =	ip_vs_wlc_update_svc,
+#ifdef CONFIG_IP_VS_IPV6
+	.supports_ipv6 =	1,
+#endif
 	.schedule =		ip_vs_wlc_schedule,
 };
 
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 0d86a79..7ea92fe 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -195,12 +195,12 @@
 		}
 	}
 
-	IP_VS_DBG(6, "WRR: server %u.%u.%u.%u:%u "
-		  "activeconns %d refcnt %d weight %d\n",
-		  NIPQUAD(dest->addr), ntohs(dest->port),
-		  atomic_read(&dest->activeconns),
-		  atomic_read(&dest->refcnt),
-		  atomic_read(&dest->weight));
+	IP_VS_DBG_BUF(6, "WRR: server %s:%u "
+		      "activeconns %d refcnt %d weight %d\n",
+		      IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
+		      atomic_read(&dest->activeconns),
+		      atomic_read(&dest->refcnt),
+		      atomic_read(&dest->weight));
 
   out:
 	write_unlock(&svc->sched_lock);
@@ -213,6 +213,9 @@
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
 	.n_list =		LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list),
+#ifdef CONFIG_IP_VS_IPV6
+	.supports_ipv6 =	1,
+#endif
 	.init_service =		ip_vs_wrr_init_svc,
 	.done_service =		ip_vs_wrr_done_svc,
 	.update_service =	ip_vs_wrr_update_svc,
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index 9892d4a..02ddc2b 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -20,6 +20,9 @@
 #include <net/udp.h>
 #include <net/icmp.h>                   /* for icmp_send */
 #include <net/route.h>                  /* for ip_route_output */
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <linux/icmpv6.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
 
@@ -47,7 +50,8 @@
 
 	if (!dst)
 		return NULL;
-	if ((dst->obsolete || rtos != dest->dst_rtos) &&
+	if ((dst->obsolete
+	     || (dest->af == AF_INET && rtos != dest->dst_rtos)) &&
 	    dst->ops->check(dst, cookie) == NULL) {
 		dest->dst_cache = NULL;
 		dst_release(dst);
@@ -71,7 +75,7 @@
 				.oif = 0,
 				.nl_u = {
 					.ip4_u = {
-						.daddr = dest->addr,
+						.daddr = dest->addr.ip,
 						.saddr = 0,
 						.tos = rtos, } },
 			};
@@ -80,12 +84,12 @@
 				spin_unlock(&dest->dst_lock);
 				IP_VS_DBG_RL("ip_route_output error, "
 					     "dest: %u.%u.%u.%u\n",
-					     NIPQUAD(dest->addr));
+					     NIPQUAD(dest->addr.ip));
 				return NULL;
 			}
 			__ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst));
 			IP_VS_DBG(10, "new dst %u.%u.%u.%u, refcnt=%d, rtos=%X\n",
-				  NIPQUAD(dest->addr),
+				  NIPQUAD(dest->addr.ip),
 				  atomic_read(&rt->u.dst.__refcnt), rtos);
 		}
 		spin_unlock(&dest->dst_lock);
@@ -94,14 +98,14 @@
 			.oif = 0,
 			.nl_u = {
 				.ip4_u = {
-					.daddr = cp->daddr,
+					.daddr = cp->daddr.ip,
 					.saddr = 0,
 					.tos = rtos, } },
 		};
 
 		if (ip_route_output_key(&init_net, &rt, &fl)) {
 			IP_VS_DBG_RL("ip_route_output error, dest: "
-				     "%u.%u.%u.%u\n", NIPQUAD(cp->daddr));
+				     "%u.%u.%u.%u\n", NIPQUAD(cp->daddr.ip));
 			return NULL;
 		}
 	}
@@ -109,6 +113,70 @@
 	return rt;
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+static struct rt6_info *
+__ip_vs_get_out_rt_v6(struct ip_vs_conn *cp)
+{
+	struct rt6_info *rt;			/* Route to the other host */
+	struct ip_vs_dest *dest = cp->dest;
+
+	if (dest) {
+		spin_lock(&dest->dst_lock);
+		rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0, 0);
+		if (!rt) {
+			struct flowi fl = {
+				.oif = 0,
+				.nl_u = {
+					.ip6_u = {
+						.daddr = dest->addr.in6,
+						.saddr = {
+							.s6_addr32 =
+								{ 0, 0, 0, 0 },
+						},
+					},
+				},
+			};
+
+			rt = (struct rt6_info *)ip6_route_output(&init_net,
+								 NULL, &fl);
+			if (!rt) {
+				spin_unlock(&dest->dst_lock);
+				IP_VS_DBG_RL("ip6_route_output error, "
+					     "dest: " NIP6_FMT "\n",
+					     NIP6(dest->addr.in6));
+				return NULL;
+			}
+			__ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst));
+			IP_VS_DBG(10, "new dst " NIP6_FMT ", refcnt=%d\n",
+				  NIP6(dest->addr.in6),
+				  atomic_read(&rt->u.dst.__refcnt));
+		}
+		spin_unlock(&dest->dst_lock);
+	} else {
+		struct flowi fl = {
+			.oif = 0,
+			.nl_u = {
+				.ip6_u = {
+					.daddr = cp->daddr.in6,
+					.saddr = {
+						.s6_addr32 = { 0, 0, 0, 0 },
+					},
+				},
+			},
+		};
+
+		rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+		if (!rt) {
+			IP_VS_DBG_RL("ip6_route_output error, dest: "
+				     NIP6_FMT "\n", NIP6(cp->daddr.in6));
+			return NULL;
+		}
+	}
+
+	return rt;
+}
+#endif
+
 
 /*
  *	Release dest->dst_cache before a dest is removed
@@ -123,11 +191,11 @@
 	dst_release(old_dst);
 }
 
-#define IP_VS_XMIT(skb, rt)				\
+#define IP_VS_XMIT(pf, skb, rt)				\
 do {							\
 	(skb)->ipvs_property = 1;			\
 	skb_forward_csum(skb);				\
-	NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, (skb), NULL,	\
+	NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL,	\
 		(rt)->u.dst.dev, dst_output);		\
 } while (0)
 
@@ -200,7 +268,7 @@
 	/* Another hack: avoid icmp_send in ip_fragment */
 	skb->local_df = 1;
 
-	IP_VS_XMIT(skb, rt);
+	IP_VS_XMIT(PF_INET, skb, rt);
 
 	LeaveFunction(10);
 	return NF_STOLEN;
@@ -213,6 +281,70 @@
 	return NF_STOLEN;
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+int
+ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+		     struct ip_vs_protocol *pp)
+{
+	struct rt6_info *rt;			/* Route to the other host */
+	struct ipv6hdr  *iph = ipv6_hdr(skb);
+	int    mtu;
+	struct flowi fl = {
+		.oif = 0,
+		.nl_u = {
+			.ip6_u = {
+				.daddr = iph->daddr,
+				.saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
+	};
+
+	EnterFunction(10);
+
+	rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+	if (!rt) {
+		IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): ip6_route_output error, "
+			     "dest: " NIP6_FMT "\n", NIP6(iph->daddr));
+		goto tx_error_icmp;
+	}
+
+	/* MTU checking */
+	mtu = dst_mtu(&rt->u.dst);
+	if (skb->len > mtu) {
+		dst_release(&rt->u.dst);
+		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+		IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): frag needed\n");
+		goto tx_error;
+	}
+
+	/*
+	 * Call ip_send_check because we are not sure it is called
+	 * after ip_defrag. Is copy-on-write needed?
+	 */
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (unlikely(skb == NULL)) {
+		dst_release(&rt->u.dst);
+		return NF_STOLEN;
+	}
+
+	/* drop old route */
+	dst_release(skb->dst);
+	skb->dst = &rt->u.dst;
+
+	/* Another hack: avoid icmp_send in ip_fragment */
+	skb->local_df = 1;
+
+	IP_VS_XMIT(PF_INET6, skb, rt);
+
+	LeaveFunction(10);
+	return NF_STOLEN;
+
+ tx_error_icmp:
+	dst_link_failure(skb);
+ tx_error:
+	kfree_skb(skb);
+	LeaveFunction(10);
+	return NF_STOLEN;
+}
+#endif
 
 /*
  *      NAT transmitter (only for outside-to-inside nat forwarding)
@@ -264,7 +396,7 @@
 	/* mangle the packet */
 	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
 		goto tx_error;
-	ip_hdr(skb)->daddr = cp->daddr;
+	ip_hdr(skb)->daddr = cp->daddr.ip;
 	ip_send_check(ip_hdr(skb));
 
 	IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
@@ -276,7 +408,7 @@
 	/* Another hack: avoid icmp_send in ip_fragment */
 	skb->local_df = 1;
 
-	IP_VS_XMIT(skb, rt);
+	IP_VS_XMIT(PF_INET, skb, rt);
 
 	LeaveFunction(10);
 	return NF_STOLEN;
@@ -292,6 +424,83 @@
 	goto tx_error;
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+int
+ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+		  struct ip_vs_protocol *pp)
+{
+	struct rt6_info *rt;		/* Route to the other host */
+	int mtu;
+
+	EnterFunction(10);
+
+	/* check if it is a connection of no-client-port */
+	if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
+		__be16 _pt, *p;
+		p = skb_header_pointer(skb, sizeof(struct ipv6hdr),
+				       sizeof(_pt), &_pt);
+		if (p == NULL)
+			goto tx_error;
+		ip_vs_conn_fill_cport(cp, *p);
+		IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
+	}
+
+	rt = __ip_vs_get_out_rt_v6(cp);
+	if (!rt)
+		goto tx_error_icmp;
+
+	/* MTU checking */
+	mtu = dst_mtu(&rt->u.dst);
+	if (skb->len > mtu) {
+		dst_release(&rt->u.dst);
+		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+		IP_VS_DBG_RL_PKT(0, pp, skb, 0,
+				 "ip_vs_nat_xmit_v6(): frag needed for");
+		goto tx_error;
+	}
+
+	/* copy-on-write the packet before mangling it */
+	if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
+		goto tx_error_put;
+
+	if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
+		goto tx_error_put;
+
+	/* drop old route */
+	dst_release(skb->dst);
+	skb->dst = &rt->u.dst;
+
+	/* mangle the packet */
+	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
+		goto tx_error;
+	ipv6_hdr(skb)->daddr = cp->daddr.in6;
+
+	IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
+
+	/* FIXME: when application helper enlarges the packet and the length
+	   is larger than the MTU of outgoing device, there will be still
+	   MTU problem. */
+
+	/* Another hack: avoid icmp_send in ip_fragment */
+	skb->local_df = 1;
+
+	IP_VS_XMIT(PF_INET6, skb, rt);
+
+	LeaveFunction(10);
+	return NF_STOLEN;
+
+tx_error_icmp:
+	dst_link_failure(skb);
+tx_error:
+	LeaveFunction(10);
+	kfree_skb(skb);
+	return NF_STOLEN;
+tx_error_put:
+	dst_release(&rt->u.dst);
+	goto tx_error;
+}
+#endif
+
 
 /*
  *   IP Tunneling transmitter
@@ -423,6 +632,112 @@
 	return NF_STOLEN;
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+int
+ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+		     struct ip_vs_protocol *pp)
+{
+	struct rt6_info *rt;		/* Route to the other host */
+	struct net_device *tdev;	/* Device to other host */
+	struct ipv6hdr  *old_iph = ipv6_hdr(skb);
+	sk_buff_data_t old_transport_header = skb->transport_header;
+	struct ipv6hdr  *iph;		/* Our new IP header */
+	unsigned int max_headroom;	/* The extra header space needed */
+	int    mtu;
+
+	EnterFunction(10);
+
+	if (skb->protocol != htons(ETH_P_IPV6)) {
+		IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): protocol error, "
+			     "ETH_P_IPV6: %d, skb protocol: %d\n",
+			     htons(ETH_P_IPV6), skb->protocol);
+		goto tx_error;
+	}
+
+	rt = __ip_vs_get_out_rt_v6(cp);
+	if (!rt)
+		goto tx_error_icmp;
+
+	tdev = rt->u.dst.dev;
+
+	mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr);
+	/* TODO IPv6: do we need this check in IPv6? */
+	if (mtu < 1280) {
+		dst_release(&rt->u.dst);
+		IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): mtu less than 1280\n");
+		goto tx_error;
+	}
+	if (skb->dst)
+		skb->dst->ops->update_pmtu(skb->dst, mtu);
+
+	if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
+		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+		dst_release(&rt->u.dst);
+		IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): frag needed\n");
+		goto tx_error;
+	}
+
+	/*
+	 * Okay, now see if we can stuff it in the buffer as-is.
+	 */
+	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
+
+	if (skb_headroom(skb) < max_headroom
+	    || skb_cloned(skb) || skb_shared(skb)) {
+		struct sk_buff *new_skb =
+			skb_realloc_headroom(skb, max_headroom);
+		if (!new_skb) {
+			dst_release(&rt->u.dst);
+			kfree_skb(skb);
+			IP_VS_ERR_RL("ip_vs_tunnel_xmit_v6(): no memory\n");
+			return NF_STOLEN;
+		}
+		kfree_skb(skb);
+		skb = new_skb;
+		old_iph = ipv6_hdr(skb);
+	}
+
+	skb->transport_header = old_transport_header;
+
+	skb_push(skb, sizeof(struct ipv6hdr));
+	skb_reset_network_header(skb);
+	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
+	/* drop old route */
+	dst_release(skb->dst);
+	skb->dst = &rt->u.dst;
+
+	/*
+	 *	Push down and install the IPIP header.
+	 */
+	iph			=	ipv6_hdr(skb);
+	iph->version		=	6;
+	iph->nexthdr		=	IPPROTO_IPV6;
+	iph->payload_len	=	old_iph->payload_len + sizeof(old_iph);
+	iph->priority		=	old_iph->priority;
+	memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
+	iph->daddr		=	rt->rt6i_dst.addr;
+	iph->saddr		=	cp->vaddr.in6; /* rt->rt6i_src.addr; */
+	iph->hop_limit		=	old_iph->hop_limit;
+
+	/* Another hack: avoid icmp_send in ip_fragment */
+	skb->local_df = 1;
+
+	ip6_local_out(skb);
+
+	LeaveFunction(10);
+
+	return NF_STOLEN;
+
+tx_error_icmp:
+	dst_link_failure(skb);
+tx_error:
+	kfree_skb(skb);
+	LeaveFunction(10);
+	return NF_STOLEN;
+}
+#endif
+
 
 /*
  *      Direct Routing transmitter
@@ -467,7 +782,7 @@
 	/* Another hack: avoid icmp_send in ip_fragment */
 	skb->local_df = 1;
 
-	IP_VS_XMIT(skb, rt);
+	IP_VS_XMIT(PF_INET, skb, rt);
 
 	LeaveFunction(10);
 	return NF_STOLEN;
@@ -480,6 +795,60 @@
 	return NF_STOLEN;
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+int
+ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+		 struct ip_vs_protocol *pp)
+{
+	struct rt6_info *rt;			/* Route to the other host */
+	int    mtu;
+
+	EnterFunction(10);
+
+	rt = __ip_vs_get_out_rt_v6(cp);
+	if (!rt)
+		goto tx_error_icmp;
+
+	/* MTU checking */
+	mtu = dst_mtu(&rt->u.dst);
+	if (skb->len > mtu) {
+		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+		dst_release(&rt->u.dst);
+		IP_VS_DBG_RL("ip_vs_dr_xmit_v6(): frag needed\n");
+		goto tx_error;
+	}
+
+	/*
+	 * Call ip_send_check because we are not sure it is called
+	 * after ip_defrag. Is copy-on-write needed?
+	 */
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (unlikely(skb == NULL)) {
+		dst_release(&rt->u.dst);
+		return NF_STOLEN;
+	}
+
+	/* drop old route */
+	dst_release(skb->dst);
+	skb->dst = &rt->u.dst;
+
+	/* Another hack: avoid icmp_send in ip_fragment */
+	skb->local_df = 1;
+
+	IP_VS_XMIT(PF_INET6, skb, rt);
+
+	LeaveFunction(10);
+	return NF_STOLEN;
+
+tx_error_icmp:
+	dst_link_failure(skb);
+tx_error:
+	kfree_skb(skb);
+	LeaveFunction(10);
+	return NF_STOLEN;
+}
+#endif
+
 
 /*
  *	ICMP packet transmitter
@@ -540,7 +909,7 @@
 	/* Another hack: avoid icmp_send in ip_fragment */
 	skb->local_df = 1;
 
-	IP_VS_XMIT(skb, rt);
+	IP_VS_XMIT(PF_INET, skb, rt);
 
 	rc = NF_STOLEN;
 	goto out;
@@ -557,3 +926,79 @@
 	ip_rt_put(rt);
 	goto tx_error;
 }
+
+#ifdef CONFIG_IP_VS_IPV6
+int
+ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+		struct ip_vs_protocol *pp, int offset)
+{
+	struct rt6_info	*rt;	/* Route to the other host */
+	int mtu;
+	int rc;
+
+	EnterFunction(10);
+
+	/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
+	   forwarded directly here, because there is no need to
+	   translate address/port back */
+	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
+		if (cp->packet_xmit)
+			rc = cp->packet_xmit(skb, cp, pp);
+		else
+			rc = NF_ACCEPT;
+		/* do not touch skb anymore */
+		atomic_inc(&cp->in_pkts);
+		goto out;
+	}
+
+	/*
+	 * mangle and send the packet here (only for VS/NAT)
+	 */
+
+	rt = __ip_vs_get_out_rt_v6(cp);
+	if (!rt)
+		goto tx_error_icmp;
+
+	/* MTU checking */
+	mtu = dst_mtu(&rt->u.dst);
+	if (skb->len > mtu) {
+		dst_release(&rt->u.dst);
+		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+		IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n");
+		goto tx_error;
+	}
+
+	/* copy-on-write the packet before mangling it */
+	if (!skb_make_writable(skb, offset))
+		goto tx_error_put;
+
+	if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
+		goto tx_error_put;
+
+	/* drop the old route when skb is not shared */
+	dst_release(skb->dst);
+	skb->dst = &rt->u.dst;
+
+	ip_vs_nat_icmp_v6(skb, pp, cp, 0);
+
+	/* Another hack: avoid icmp_send in ip_fragment */
+	skb->local_df = 1;
+
+	IP_VS_XMIT(PF_INET6, skb, rt);
+
+	rc = NF_STOLEN;
+	goto out;
+
+tx_error_icmp:
+	dst_link_failure(skb);
+tx_error:
+	dev_kfree_skb(skb);
+	rc = NF_STOLEN;
+out:
+	LeaveFunction(10);
+	return rc;
+tx_error_put:
+	dst_release(&rt->u.dst);
+	goto tx_error;
+}
+#endif
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6ee5354..f62187b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -282,6 +282,8 @@
 	struct rtable *r = NULL;
 
 	for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
+		if (!rt_hash_table[st->bucket].chain)
+			continue;
 		rcu_read_lock_bh();
 		r = rcu_dereference(rt_hash_table[st->bucket].chain);
 		while (r) {
@@ -299,11 +301,14 @@
 					  struct rtable *r)
 {
 	struct rt_cache_iter_state *st = seq->private;
+
 	r = r->u.dst.rt_next;
 	while (!r) {
 		rcu_read_unlock_bh();
-		if (--st->bucket < 0)
-			break;
+		do {
+			if (--st->bucket < 0)
+				return NULL;
+		} while (!rt_hash_table[st->bucket].chain);
 		rcu_read_lock_bh();
 		r = rt_hash_table[st->bucket].chain;
 	}
@@ -2840,7 +2845,9 @@
 	if (s_h < 0)
 		s_h = 0;
 	s_idx = idx = cb->args[1];
-	for (h = s_h; h <= rt_hash_mask; h++) {
+	for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
+		if (!rt_hash_table[h].chain)
+			continue;
 		rcu_read_lock_bh();
 		for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
 		     rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
@@ -2859,7 +2866,6 @@
 			dst_release(xchg(&skb->dst, NULL));
 		}
 		rcu_read_unlock_bh();
-		s_idx = 0;
 	}
 
 done:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 67ccce2..3b76bce 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -979,6 +979,39 @@
 	}
 }
 
+/* This must be called before lost_out is incremented */
+static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
+{
+	if ((tp->retransmit_skb_hint == NULL) ||
+	    before(TCP_SKB_CB(skb)->seq,
+		   TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
+		tp->retransmit_skb_hint = skb;
+
+	if (!tp->lost_out ||
+	    after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high))
+		tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
+}
+
+static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
+{
+	if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
+		tcp_verify_retransmit_hint(tp, skb);
+
+		tp->lost_out += tcp_skb_pcount(skb);
+		TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
+	}
+}
+
+void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
+{
+	tcp_verify_retransmit_hint(tp, skb);
+
+	if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
+		tp->lost_out += tcp_skb_pcount(skb);
+		TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
+	}
+}
+
 /* This procedure tags the retransmission queue when SACKs arrive.
  *
  * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
@@ -1155,13 +1188,7 @@
 			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
 			tp->retrans_out -= tcp_skb_pcount(skb);
 
-			/* clear lost hint */
-			tp->retransmit_skb_hint = NULL;
-
-			if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
-				tp->lost_out += tcp_skb_pcount(skb);
-				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
-			}
+			tcp_skb_mark_lost_uncond_verify(tp, skb);
 			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
 		} else {
 			if (before(ack_seq, new_low_seq))
@@ -1271,9 +1298,6 @@
 					~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
 				tp->lost_out -= tcp_skb_pcount(skb);
 				tp->retrans_out -= tcp_skb_pcount(skb);
-
-				/* clear lost hint */
-				tp->retransmit_skb_hint = NULL;
 			}
 		} else {
 			if (!(sacked & TCPCB_RETRANS)) {
@@ -1292,9 +1316,6 @@
 			if (sacked & TCPCB_LOST) {
 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
 				tp->lost_out -= tcp_skb_pcount(skb);
-
-				/* clear lost hint */
-				tp->retransmit_skb_hint = NULL;
 			}
 		}
 
@@ -1324,7 +1345,6 @@
 	if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) {
 		TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
 		tp->retrans_out -= tcp_skb_pcount(skb);
-		tp->retransmit_skb_hint = NULL;
 	}
 
 	return flag;
@@ -1726,6 +1746,8 @@
 		return 0;
 
 	skb = tcp_write_queue_head(sk);
+	if (tcp_skb_is_last(sk, skb))
+		return 1;
 	skb = tcp_write_queue_next(sk, skb);	/* Skips head */
 	tcp_for_write_queue_from(skb, sk) {
 		if (skb == tcp_send_head(sk))
@@ -1867,6 +1889,7 @@
 		if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
 			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 			tp->lost_out += tcp_skb_pcount(skb);
+			tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
 		}
 	}
 	tcp_verify_left_out(tp);
@@ -1883,7 +1906,7 @@
 	tp->high_seq = tp->snd_nxt;
 	TCP_ECN_queue_cwr(tp);
 
-	tcp_clear_retrans_hints_partial(tp);
+	tcp_clear_all_retrans_hints(tp);
 }
 
 static void tcp_clear_retrans_partial(struct tcp_sock *tp)
@@ -1934,12 +1957,11 @@
 		/* Push undo marker, if it was plain RTO and nothing
 		 * was retransmitted. */
 		tp->undo_marker = tp->snd_una;
-		tcp_clear_retrans_hints_partial(tp);
 	} else {
 		tp->sacked_out = 0;
 		tp->fackets_out = 0;
-		tcp_clear_all_retrans_hints(tp);
 	}
+	tcp_clear_all_retrans_hints(tp);
 
 	tcp_for_write_queue(skb, sk) {
 		if (skb == tcp_send_head(sk))
@@ -1952,6 +1974,7 @@
 			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
 			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 			tp->lost_out += tcp_skb_pcount(skb);
+			tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
 		}
 	}
 	tcp_verify_left_out(tp);
@@ -2157,19 +2180,6 @@
 	return 0;
 }
 
-/* RFC: This is from the original, I doubt that this is necessary at all:
- * clear xmit_retrans hint if seq of this skb is beyond hint. How could we
- * retransmitted past LOST markings in the first place? I'm not fully sure
- * about undo and end of connection cases, which can cause R without L?
- */
-static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
-{
-	if ((tp->retransmit_skb_hint != NULL) &&
-	    before(TCP_SKB_CB(skb)->seq,
-		   TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
-		tp->retransmit_skb_hint = NULL;
-}
-
 /* Mark head of queue up as lost. With RFC3517 SACK, the packets is
  * is against sacked "cnt", otherwise it's against facked "cnt"
  */
@@ -2217,11 +2227,7 @@
 			cnt = packets;
 		}
 
-		if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
-			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
-			tp->lost_out += tcp_skb_pcount(skb);
-			tcp_verify_retransmit_hint(tp, skb);
-		}
+		tcp_skb_mark_lost(tp, skb);
 	}
 	tcp_verify_left_out(tp);
 }
@@ -2263,11 +2269,7 @@
 			if (!tcp_skb_timedout(sk, skb))
 				break;
 
-			if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
-				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
-				tp->lost_out += tcp_skb_pcount(skb);
-				tcp_verify_retransmit_hint(tp, skb);
-			}
+			tcp_skb_mark_lost(tp, skb);
 		}
 
 		tp->scoreboard_skb_hint = skb;
@@ -2378,10 +2380,6 @@
 	}
 	tcp_moderate_cwnd(tp);
 	tp->snd_cwnd_stamp = tcp_time_stamp;
-
-	/* There is something screwy going on with the retrans hints after
-	   an undo */
-	tcp_clear_all_retrans_hints(tp);
 }
 
 static inline int tcp_may_undo(struct tcp_sock *tp)
@@ -2848,6 +2846,7 @@
 	int flag = 0;
 	u32 pkts_acked = 0;
 	u32 reord = tp->packets_out;
+	u32 prior_sacked = tp->sacked_out;
 	s32 seq_rtt = -1;
 	s32 ca_seq_rtt = -1;
 	ktime_t last_ackt = net_invalid_timestamp();
@@ -2929,7 +2928,11 @@
 
 		tcp_unlink_write_queue(skb, sk);
 		sk_wmem_free_skb(sk, skb);
-		tcp_clear_all_retrans_hints(tp);
+		tp->scoreboard_skb_hint = NULL;
+		if (skb == tp->retransmit_skb_hint)
+			tp->retransmit_skb_hint = NULL;
+		if (skb == tp->lost_skb_hint)
+			tp->lost_skb_hint = NULL;
 	}
 
 	if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
@@ -2948,6 +2951,15 @@
 			/* Non-retransmitted hole got filled? That's reordering */
 			if (reord < prior_fackets)
 				tcp_update_reordering(sk, tp->fackets_out - reord, 0);
+
+			/* No need to care for underflows here because
+			 * the lost_skb_hint gets NULLed if we're past it
+			 * (or something non-trivial happened)
+			 */
+			if (tcp_is_fack(tp))
+				tp->lost_cnt_hint -= pkts_acked;
+			else
+				tp->lost_cnt_hint -= prior_sacked - tp->sacked_out;
 		}
 
 		tp->fackets_out -= min(pkts_acked, tp->fackets_out);
@@ -3442,6 +3454,22 @@
 	}
 }
 
+static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
+{
+	__be32 *ptr = (__be32 *)(th + 1);
+
+	if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
+			  | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
+		tp->rx_opt.saw_tstamp = 1;
+		++ptr;
+		tp->rx_opt.rcv_tsval = ntohl(*ptr);
+		++ptr;
+		tp->rx_opt.rcv_tsecr = ntohl(*ptr);
+		return 1;
+	}
+	return 0;
+}
+
 /* Fast parse options. This hopes to only see timestamps.
  * If it is wrong it falls back on tcp_parse_options().
  */
@@ -3453,16 +3481,8 @@
 		return 0;
 	} else if (tp->rx_opt.tstamp_ok &&
 		   th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
-		__be32 *ptr = (__be32 *)(th + 1);
-		if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
-				  | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
-			tp->rx_opt.saw_tstamp = 1;
-			++ptr;
-			tp->rx_opt.rcv_tsval = ntohl(*ptr);
-			++ptr;
-			tp->rx_opt.rcv_tsecr = ntohl(*ptr);
+		if (tcp_parse_aligned_timestamp(tp, th))
 			return 1;
-		}
 	}
 	tcp_parse_options(skb, &tp->rx_opt, 1);
 	return 1;
@@ -4138,7 +4158,7 @@
 				skb1 = skb1->prev;
 			}
 		}
-		__skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
+		__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
 
 		/* And clean segments covered by new one as whole. */
 		while ((skb1 = skb->next) !=
@@ -4161,6 +4181,18 @@
 	}
 }
 
+static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
+					struct sk_buff_head *list)
+{
+	struct sk_buff *next = skb->next;
+
+	__skb_unlink(skb, list);
+	__kfree_skb(skb);
+	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
+
+	return next;
+}
+
 /* Collapse contiguous sequence of skbs head..tail with
  * sequence numbers start..end.
  * Segments with FIN/SYN are not collapsed (only because this
@@ -4178,11 +4210,7 @@
 	for (skb = head; skb != tail;) {
 		/* No new bits? It is possible on ofo queue. */
 		if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
-			struct sk_buff *next = skb->next;
-			__skb_unlink(skb, list);
-			__kfree_skb(skb);
-			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
-			skb = next;
+			skb = tcp_collapse_one(sk, skb, list);
 			continue;
 		}
 
@@ -4228,7 +4256,7 @@
 		memcpy(nskb->head, skb->head, header);
 		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
 		TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
-		__skb_insert(nskb, skb->prev, skb, list);
+		__skb_queue_before(list, skb, nskb);
 		skb_set_owner_r(nskb, sk);
 
 		/* Copy data, releasing collapsed skbs. */
@@ -4246,11 +4274,7 @@
 				start += size;
 			}
 			if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
-				struct sk_buff *next = skb->next;
-				__skb_unlink(skb, list);
-				__kfree_skb(skb);
-				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
-				skb = next;
+				skb = tcp_collapse_one(sk, skb, list);
 				if (skb == tail ||
 				    tcp_hdr(skb)->syn ||
 				    tcp_hdr(skb)->fin)
@@ -4691,6 +4715,67 @@
 }
 #endif /* CONFIG_NET_DMA */
 
+/* Does PAWS and seqno based validation of an incoming segment, flags will
+ * play significant role here.
+ */
+static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+			      struct tcphdr *th, int syn_inerr)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	/* RFC1323: H1. Apply PAWS check first. */
+	if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
+	    tcp_paws_discard(sk, skb)) {
+		if (!th->rst) {
+			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
+			tcp_send_dupack(sk, skb);
+			goto discard;
+		}
+		/* Reset is accepted even if it did not pass PAWS. */
+	}
+
+	/* Step 1: check sequence number */
+	if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
+		/* RFC793, page 37: "In all states except SYN-SENT, all reset
+		 * (RST) segments are validated by checking their SEQ-fields."
+		 * And page 69: "If an incoming segment is not acceptable,
+		 * an acknowledgment should be sent in reply (unless the RST
+		 * bit is set, if so drop the segment and return)".
+		 */
+		if (!th->rst)
+			tcp_send_dupack(sk, skb);
+		goto discard;
+	}
+
+	/* Step 2: check RST bit */
+	if (th->rst) {
+		tcp_reset(sk);
+		goto discard;
+	}
+
+	/* ts_recent update must be made after we are sure that the packet
+	 * is in window.
+	 */
+	tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+
+	/* step 3: check security and precedence [ignored] */
+
+	/* step 4: Check for a SYN in window. */
+	if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
+		if (syn_inerr)
+			TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
+		tcp_reset(sk);
+		return -1;
+	}
+
+	return 1;
+
+discard:
+	__kfree_skb(skb);
+	return 0;
+}
+
 /*
  *	TCP receive function for the ESTABLISHED state.
  *
@@ -4718,6 +4803,7 @@
 			struct tcphdr *th, unsigned len)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
+	int res;
 
 	/*
 	 *	Header prediction.
@@ -4756,19 +4842,10 @@
 
 		/* Check timestamp */
 		if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
-			__be32 *ptr = (__be32 *)(th + 1);
-
 			/* No? Slow path! */
-			if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
-					  | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
+			if (!tcp_parse_aligned_timestamp(tp, th))
 				goto slow_path;
 
-			tp->rx_opt.saw_tstamp = 1;
-			++ptr;
-			tp->rx_opt.rcv_tsval = ntohl(*ptr);
-			++ptr;
-			tp->rx_opt.rcv_tsecr = ntohl(*ptr);
-
 			/* If PAWS failed, check it more carefully in slow path */
 			if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
 				goto slow_path;
@@ -4899,51 +4976,12 @@
 		goto csum_error;
 
 	/*
-	 * RFC1323: H1. Apply PAWS check first.
-	 */
-	if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
-	    tcp_paws_discard(sk, skb)) {
-		if (!th->rst) {
-			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
-			tcp_send_dupack(sk, skb);
-			goto discard;
-		}
-		/* Resets are accepted even if PAWS failed.
-
-		   ts_recent update must be made after we are sure
-		   that the packet is in window.
-		 */
-	}
-
-	/*
 	 *	Standard slow path.
 	 */
 
-	if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
-		/* RFC793, page 37: "In all states except SYN-SENT, all reset
-		 * (RST) segments are validated by checking their SEQ-fields."
-		 * And page 69: "If an incoming segment is not acceptable,
-		 * an acknowledgment should be sent in reply (unless the RST bit
-		 * is set, if so drop the segment and return)".
-		 */
-		if (!th->rst)
-			tcp_send_dupack(sk, skb);
-		goto discard;
-	}
-
-	if (th->rst) {
-		tcp_reset(sk);
-		goto discard;
-	}
-
-	tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
-
-	if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
-		tcp_reset(sk);
-		return 1;
-	}
+	res = tcp_validate_incoming(sk, skb, th, 1);
+	if (res <= 0)
+		return -res;
 
 step5:
 	if (th->ack)
@@ -5225,6 +5263,7 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	int queued = 0;
+	int res;
 
 	tp->rx_opt.saw_tstamp = 0;
 
@@ -5277,42 +5316,9 @@
 		return 0;
 	}
 
-	if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
-	    tcp_paws_discard(sk, skb)) {
-		if (!th->rst) {
-			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
-			tcp_send_dupack(sk, skb);
-			goto discard;
-		}
-		/* Reset is accepted even if it did not pass PAWS. */
-	}
-
-	/* step 1: check sequence number */
-	if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
-		if (!th->rst)
-			tcp_send_dupack(sk, skb);
-		goto discard;
-	}
-
-	/* step 2: check RST bit */
-	if (th->rst) {
-		tcp_reset(sk);
-		goto discard;
-	}
-
-	tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
-
-	/* step 3: check security and precedence [ignored] */
-
-	/*	step 4:
-	 *
-	 *	Check for a SYN in window.
-	 */
-	if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
-		tcp_reset(sk);
-		return 1;
-	}
+	res = tcp_validate_incoming(sk, skb, th, 0);
+	if (res <= 0)
+		return -res;
 
 	/* step 5: check the ACK field */
 	if (th->ack) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 011478e..d13688e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1364,6 +1364,10 @@
 	tcp_mtup_init(newsk);
 	tcp_sync_mss(newsk, dst_mtu(dst));
 	newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
+	if (tcp_sk(sk)->rx_opt.user_mss &&
+	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
+		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
+
 	tcp_initialize_rcv_mss(newsk);
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -1946,6 +1950,12 @@
 	return rc;
 }
 
+static inline int empty_bucket(struct tcp_iter_state *st)
+{
+	return hlist_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
+		hlist_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
+}
+
 static void *established_get_first(struct seq_file *seq)
 {
 	struct tcp_iter_state* st = seq->private;
@@ -1958,6 +1968,10 @@
 		struct inet_timewait_sock *tw;
 		rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
 
+		/* Lockless fast path for the common case of empty buckets */
+		if (empty_bucket(st))
+			continue;
+
 		read_lock_bh(lock);
 		sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
 			if (sk->sk_family != st->family ||
@@ -2008,13 +2022,15 @@
 		read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
 		st->state = TCP_SEQ_STATE_ESTABLISHED;
 
-		if (++st->bucket < tcp_hashinfo.ehash_size) {
-			read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
-			sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
-		} else {
-			cur = NULL;
-			goto out;
-		}
+		/* Look for next non empty bucket */
+		while (++st->bucket < tcp_hashinfo.ehash_size &&
+				empty_bucket(st))
+			;
+		if (st->bucket >= tcp_hashinfo.ehash_size)
+			return NULL;
+
+		read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
+		sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
 	} else
 		sk = sk_next(sk);
 
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8165f5a..a8499ef 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1824,6 +1824,8 @@
 
 	/* changed transmit queue under us so clear hints */
 	tcp_clear_retrans_hints_partial(tp);
+	if (next_skb == tp->retransmit_skb_hint)
+		tp->retransmit_skb_hint = skb;
 
 	sk_wmem_free_skb(sk, next_skb);
 }
@@ -1838,7 +1840,7 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb;
 	unsigned int mss = tcp_current_mss(sk, 0);
-	int lost = 0;
+	u32 prior_lost = tp->lost_out;
 
 	tcp_for_write_queue(skb, sk) {
 		if (skb == tcp_send_head(sk))
@@ -1849,17 +1851,13 @@
 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
 				tp->retrans_out -= tcp_skb_pcount(skb);
 			}
-			if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST)) {
-				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
-				tp->lost_out += tcp_skb_pcount(skb);
-				lost = 1;
-			}
+			tcp_skb_mark_lost_uncond_verify(tp, skb);
 		}
 	}
 
-	tcp_clear_all_retrans_hints(tp);
+	tcp_clear_retrans_hints_partial(tp);
 
-	if (!lost)
+	if (prior_lost == tp->lost_out)
 		return;
 
 	if (tcp_is_reno(tp))
@@ -1934,8 +1932,8 @@
 	/* Collapse two adjacent packets if worthwhile and we can. */
 	if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
 	    (skb->len < (cur_mss >> 1)) &&
-	    (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
 	    (!tcp_skb_is_last(sk, skb)) &&
+	    (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
 	    (skb_shinfo(skb)->nr_frags == 0 &&
 	     skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
 	    (tcp_skb_pcount(skb) == 1 &&
@@ -1996,6 +1994,33 @@
 	return err;
 }
 
+static int tcp_can_forward_retransmit(struct sock *sk)
+{
+	const struct inet_connection_sock *icsk = inet_csk(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	/* Forward retransmissions are possible only during Recovery. */
+	if (icsk->icsk_ca_state != TCP_CA_Recovery)
+		return 0;
+
+	/* No forward retransmissions in Reno are possible. */
+	if (tcp_is_reno(tp))
+		return 0;
+
+	/* Yeah, we have to make difficult choice between forward transmission
+	 * and retransmission... Both ways have their merits...
+	 *
+	 * For now we do not retransmit anything, while we have some new
+	 * segments to send. In the other cases, follow rule 3 for
+	 * NextSeg() specified in RFC3517.
+	 */
+
+	if (tcp_may_send_now(sk))
+		return 0;
+
+	return 1;
+}
+
 /* This gets called after a retransmit timeout, and the initially
  * retransmitted data is acknowledged.  It tries to continue
  * resending the rest of the retransmit queue, until either
@@ -2009,120 +2034,86 @@
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb;
-	int packet_cnt;
+	struct sk_buff *hole = NULL;
+	u32 last_lost;
+	int mib_idx;
+	int fwd_rexmitting = 0;
+
+	if (!tp->lost_out)
+		tp->retransmit_high = tp->snd_una;
 
 	if (tp->retransmit_skb_hint) {
 		skb = tp->retransmit_skb_hint;
-		packet_cnt = tp->retransmit_cnt_hint;
+		last_lost = TCP_SKB_CB(skb)->end_seq;
+		if (after(last_lost, tp->retransmit_high))
+			last_lost = tp->retransmit_high;
 	} else {
 		skb = tcp_write_queue_head(sk);
-		packet_cnt = 0;
+		last_lost = tp->snd_una;
 	}
 
 	/* First pass: retransmit lost packets. */
-	if (tp->lost_out) {
-		tcp_for_write_queue_from(skb, sk) {
-			__u8 sacked = TCP_SKB_CB(skb)->sacked;
-
-			if (skb == tcp_send_head(sk))
-				break;
-			/* we could do better than to assign each time */
-			tp->retransmit_skb_hint = skb;
-			tp->retransmit_cnt_hint = packet_cnt;
-
-			/* Assume this retransmit will generate
-			 * only one packet for congestion window
-			 * calculation purposes.  This works because
-			 * tcp_retransmit_skb() will chop up the
-			 * packet to be MSS sized and all the
-			 * packet counting works out.
-			 */
-			if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
-				return;
-
-			if (sacked & TCPCB_LOST) {
-				if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
-					int mib_idx;
-
-					if (tcp_retransmit_skb(sk, skb)) {
-						tp->retransmit_skb_hint = NULL;
-						return;
-					}
-					if (icsk->icsk_ca_state != TCP_CA_Loss)
-						mib_idx = LINUX_MIB_TCPFASTRETRANS;
-					else
-						mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
-					NET_INC_STATS_BH(sock_net(sk), mib_idx);
-
-					if (skb == tcp_write_queue_head(sk))
-						inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
-									  inet_csk(sk)->icsk_rto,
-									  TCP_RTO_MAX);
-				}
-
-				packet_cnt += tcp_skb_pcount(skb);
-				if (packet_cnt >= tp->lost_out)
-					break;
-			}
-		}
-	}
-
-	/* OK, demanded retransmission is finished. */
-
-	/* Forward retransmissions are possible only during Recovery. */
-	if (icsk->icsk_ca_state != TCP_CA_Recovery)
-		return;
-
-	/* No forward retransmissions in Reno are possible. */
-	if (tcp_is_reno(tp))
-		return;
-
-	/* Yeah, we have to make difficult choice between forward transmission
-	 * and retransmission... Both ways have their merits...
-	 *
-	 * For now we do not retransmit anything, while we have some new
-	 * segments to send. In the other cases, follow rule 3 for
-	 * NextSeg() specified in RFC3517.
-	 */
-
-	if (tcp_may_send_now(sk))
-		return;
-
-	/* If nothing is SACKed, highest_sack in the loop won't be valid */
-	if (!tp->sacked_out)
-		return;
-
-	if (tp->forward_skb_hint)
-		skb = tp->forward_skb_hint;
-	else
-		skb = tcp_write_queue_head(sk);
-
 	tcp_for_write_queue_from(skb, sk) {
+		__u8 sacked = TCP_SKB_CB(skb)->sacked;
+
 		if (skb == tcp_send_head(sk))
 			break;
-		tp->forward_skb_hint = skb;
+		/* we could do better than to assign each time */
+		if (hole == NULL)
+			tp->retransmit_skb_hint = skb;
 
-		if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
-			break;
-
+		/* Assume this retransmit will generate
+		 * only one packet for congestion window
+		 * calculation purposes.  This works because
+		 * tcp_retransmit_skb() will chop up the
+		 * packet to be MSS sized and all the
+		 * packet counting works out.
+		 */
 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
-			break;
+			return;
 
-		if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)
+		if (fwd_rexmitting) {
+begin_fwd:
+			if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
+				break;
+			mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
+
+		} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
+			tp->retransmit_high = last_lost;
+			if (!tcp_can_forward_retransmit(sk))
+				break;
+			/* Backtrack if necessary to non-L'ed skb */
+			if (hole != NULL) {
+				skb = hole;
+				hole = NULL;
+			}
+			fwd_rexmitting = 1;
+			goto begin_fwd;
+
+		} else if (!(sacked & TCPCB_LOST)) {
+			if (hole == NULL && !(sacked & TCPCB_SACKED_RETRANS))
+				hole = skb;
 			continue;
 
-		/* Ok, retransmit it. */
-		if (tcp_retransmit_skb(sk, skb)) {
-			tp->forward_skb_hint = NULL;
-			break;
+		} else {
+			last_lost = TCP_SKB_CB(skb)->end_seq;
+			if (icsk->icsk_ca_state != TCP_CA_Loss)
+				mib_idx = LINUX_MIB_TCPFASTRETRANS;
+			else
+				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
 		}
 
+		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
+			continue;
+
+		if (tcp_retransmit_skb(sk, skb))
+			return;
+		NET_INC_STATS_BH(sock_net(sk), mib_idx);
+
 		if (skb == tcp_write_queue_head(sk))
 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 						  inet_csk(sk)->icsk_rto,
 						  TCP_RTO_MAX);
-
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFORWARDRETRANS);
 	}
 }
 
@@ -2241,6 +2232,7 @@
 	struct sk_buff *skb;
 	struct tcp_md5sig_key *md5;
 	__u8 *md5_hash_location;
+	int mss;
 
 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
 	if (skb == NULL)
@@ -2251,13 +2243,17 @@
 
 	skb->dst = dst_clone(dst);
 
+	mss = dst_metric(dst, RTAX_ADVMSS);
+	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
+		mss = tp->rx_opt.user_mss;
+
 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
 		__u8 rcv_wscale;
 		/* Set this up on the first call only */
 		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
 		/* tcp_full_space because it is guaranteed to be the first packet */
 		tcp_select_initial_window(tcp_full_space(sk),
-			dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
+			mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
 			&req->rcv_wnd,
 			&req->window_clamp,
 			ireq->wscale_ok,
@@ -2267,8 +2263,7 @@
 
 	memset(&opts, 0, sizeof(opts));
 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
-	tcp_header_size = tcp_synack_options(sk, req,
-					     dst_metric(dst, RTAX_ADVMSS),
+	tcp_header_size = tcp_synack_options(sk, req, mss,
 					     skb, &opts, &md5) +
 			  sizeof(struct tcphdr);
 
@@ -2342,6 +2337,9 @@
 	if (!tp->window_clamp)
 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
 	tp->advmss = dst_metric(dst, RTAX_ADVMSS);
+	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
+		tp->advmss = tp->rx_opt.user_mss;
+
 	tcp_initialize_rcv_mss(sk);
 
 	tcp_select_initial_window(tcp_full_space(sk),
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 17c7b09..64ce3d3 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1050,10 +1050,10 @@
 	}
 
 	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP):
+	case htons(ETH_P_IP):
 		ret = ip4ip6_tnl_xmit(skb, dev);
 		break;
-	case __constant_htons(ETH_P_IPV6):
+	case htons(ETH_P_IPV6):
 		ret = ip6ip6_tnl_xmit(skb, dev);
 		break;
 	default:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 52d06dd..9967ac7 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -27,7 +27,6 @@
 #include <linux/ipv6.h>
 #include <linux/icmpv6.h>
 #include <linux/random.h>
-#include <linux/jhash.h>
 
 #include <net/sock.h>
 #include <net/snmp.h>
@@ -103,39 +102,12 @@
 };
 #endif
 
-static unsigned int ip6qhashfn(__be32 id, const struct in6_addr *saddr,
-			       const struct in6_addr *daddr)
-{
-	u32 a, b, c;
-
-	a = (__force u32)saddr->s6_addr32[0];
-	b = (__force u32)saddr->s6_addr32[1];
-	c = (__force u32)saddr->s6_addr32[2];
-
-	a += JHASH_GOLDEN_RATIO;
-	b += JHASH_GOLDEN_RATIO;
-	c += nf_frags.rnd;
-	__jhash_mix(a, b, c);
-
-	a += (__force u32)saddr->s6_addr32[3];
-	b += (__force u32)daddr->s6_addr32[0];
-	c += (__force u32)daddr->s6_addr32[1];
-	__jhash_mix(a, b, c);
-
-	a += (__force u32)daddr->s6_addr32[2];
-	b += (__force u32)daddr->s6_addr32[3];
-	c += (__force u32)id;
-	__jhash_mix(a, b, c);
-
-	return c & (INETFRAGS_HASHSZ - 1);
-}
-
 static unsigned int nf_hashfn(struct inet_frag_queue *q)
 {
 	const struct nf_ct_frag6_queue *nq;
 
 	nq = container_of(q, struct nf_ct_frag6_queue, q);
-	return ip6qhashfn(nq->id, &nq->saddr, &nq->daddr);
+	return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd);
 }
 
 static void nf_skb_free(struct sk_buff *skb)
@@ -209,7 +181,7 @@
 	arg.dst = dst;
 
 	read_lock_bh(&nf_frags.lock);
-	hash = ip6qhashfn(id, src, dst);
+	hash = inet6_hash_frag(id, src, dst, nf_frags.rnd);
 
 	q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
 	local_bh_enable();
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 89184b5..2eeadfa 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -99,8 +99,8 @@
  * callers should be careful not to use the hash value outside the ipfrag_lock
  * as doing so could race with ipfrag_hash_rnd being recalculated.
  */
-static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
-			       struct in6_addr *daddr)
+unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
+			     const struct in6_addr *daddr, u32 rnd)
 {
 	u32 a, b, c;
 
@@ -110,7 +110,7 @@
 
 	a += JHASH_GOLDEN_RATIO;
 	b += JHASH_GOLDEN_RATIO;
-	c += ip6_frags.rnd;
+	c += rnd;
 	__jhash_mix(a, b, c);
 
 	a += (__force u32)saddr->s6_addr32[3];
@@ -125,13 +125,14 @@
 
 	return c & (INETFRAGS_HASHSZ - 1);
 }
+EXPORT_SYMBOL_GPL(inet6_hash_frag);
 
 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
 {
 	struct frag_queue *fq;
 
 	fq = container_of(q, struct frag_queue, q);
-	return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr);
+	return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
 }
 
 int ip6_frag_match(struct inet_frag_queue *q, void *a)
@@ -247,7 +248,7 @@
 	arg.dst = dst;
 
 	read_lock(&ip6_frags.lock);
-	hash = ip6qhashfn(id, src, dst);
+	hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
 
 	q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
 	if (q == NULL)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 63442a1..f4385a6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1003,6 +1003,25 @@
 	return more;
 }
 
+static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
+			    void *arg)
+{
+	struct dst_entry *dst, **pprev;
+
+	spin_lock_bh(&icmp6_dst_lock);
+	pprev = &icmp6_dst_gc_list;
+	while ((dst = *pprev) != NULL) {
+		struct rt6_info *rt = (struct rt6_info *) dst;
+		if (func(rt, arg)) {
+			*pprev = dst->next;
+			dst_free(dst);
+		} else {
+			pprev = &dst->next;
+		}
+	}
+	spin_unlock_bh(&icmp6_dst_lock);
+}
+
 static int ip6_dst_gc(struct dst_ops *ops)
 {
 	unsigned long now = jiffies;
@@ -1930,6 +1949,7 @@
 	};
 
 	fib6_clean_all(net, fib6_ifdown, 0, &adn);
+	icmp6_clean_all(fib6_ifdown, &adn);
 }
 
 struct rt6_mtu_change_arg
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 10e22fd..df16b68 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1286,7 +1286,7 @@
 					  struct request_sock *req,
 					  struct dst_entry *dst)
 {
-	struct inet6_request_sock *treq = inet6_rsk(req);
+	struct inet6_request_sock *treq;
 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
 	struct tcp6_sock *newtcp6sk;
 	struct inet_sock *newinet;
@@ -1350,6 +1350,7 @@
 		return newsk;
 	}
 
+	treq = inet6_rsk(req);
 	opt = np->opt;
 
 	if (sk_acceptq_is_full(sk))
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 80d6933..8427518 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -179,19 +179,6 @@
 
 	  Do not select this option.
 
-config MAC80211_LOWTX_FRAME_DUMP
-	bool "Debug frame dumping"
-	depends on MAC80211_DEBUG_MENU
-	---help---
-	  Selecting this option will cause the stack to
-	  print a message for each frame that is handed
-	  to the lowlevel driver for transmission. This
-	  message includes all MAC addresses and the
-	  frame control field.
-
-	  If unsure, say N and insert the debugging code
-	  you require into the driver you are debugging.
-
 config MAC80211_DEBUG_COUNTERS
 	bool "Extra statistics for TX/RX debugging"
 	depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index a169b02..2dc8f2b 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -7,6 +7,8 @@
 	sta_info.o \
 	wep.o \
 	wpa.o \
+	scan.o \
+	ht.o \
 	mlme.o \
 	iface.o \
 	rate.o \
@@ -15,6 +17,7 @@
 	aes_ccm.o \
 	cfg.o \
 	rx.o \
+	spectmgmt.o \
 	tx.o \
 	key.o \
 	util.o \
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 297c257..855126a 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -17,26 +17,26 @@
 #include "rate.h"
 #include "mesh.h"
 
-static enum ieee80211_if_types
-nl80211_type_to_mac80211_type(enum nl80211_iftype type)
+struct ieee80211_hw *wiphy_to_hw(struct wiphy *wiphy)
+{
+	struct ieee80211_local *local = wiphy_priv(wiphy);
+	return &local->hw;
+}
+EXPORT_SYMBOL(wiphy_to_hw);
+
+static bool nl80211_type_check(enum nl80211_iftype type)
 {
 	switch (type) {
-	case NL80211_IFTYPE_UNSPECIFIED:
-		return IEEE80211_IF_TYPE_STA;
 	case NL80211_IFTYPE_ADHOC:
-		return IEEE80211_IF_TYPE_IBSS;
 	case NL80211_IFTYPE_STATION:
-		return IEEE80211_IF_TYPE_STA;
 	case NL80211_IFTYPE_MONITOR:
-		return IEEE80211_IF_TYPE_MNTR;
 #ifdef CONFIG_MAC80211_MESH
 	case NL80211_IFTYPE_MESH_POINT:
-		return IEEE80211_IF_TYPE_MESH_POINT;
 #endif
 	case NL80211_IFTYPE_WDS:
-		return IEEE80211_IF_TYPE_WDS;
+		return true;
 	default:
-		return IEEE80211_IF_TYPE_INVALID;
+		return false;
 	}
 }
 
@@ -45,17 +45,15 @@
 			       struct vif_params *params)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
-	enum ieee80211_if_types itype;
 	struct net_device *dev;
 	struct ieee80211_sub_if_data *sdata;
 	int err;
 
-	itype = nl80211_type_to_mac80211_type(type);
-	if (itype == IEEE80211_IF_TYPE_INVALID)
+	if (!nl80211_type_check(type))
 		return -EINVAL;
 
-	err = ieee80211_if_add(local, name, &dev, itype, params);
-	if (err || itype != IEEE80211_IF_TYPE_MNTR || !flags)
+	err = ieee80211_if_add(local, name, &dev, type, params);
+	if (err || type != NL80211_IFTYPE_MONITOR || !flags)
 		return err;
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -66,13 +64,16 @@
 static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex)
 {
 	struct net_device *dev;
+	struct ieee80211_sub_if_data *sdata;
 
 	/* we're under RTNL */
 	dev = __dev_get_by_index(&init_net, ifindex);
 	if (!dev)
 		return -ENODEV;
 
-	ieee80211_if_remove(dev);
+	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+	ieee80211_if_remove(sdata);
 
 	return 0;
 }
@@ -81,9 +82,7 @@
 				  enum nl80211_iftype type, u32 *flags,
 				  struct vif_params *params)
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct net_device *dev;
-	enum ieee80211_if_types itype;
 	struct ieee80211_sub_if_data *sdata;
 	int ret;
 
@@ -92,25 +91,24 @@
 	if (!dev)
 		return -ENODEV;
 
-	itype = nl80211_type_to_mac80211_type(type);
-	if (itype == IEEE80211_IF_TYPE_INVALID)
+	if (!nl80211_type_check(type))
 		return -EINVAL;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	ret = ieee80211_if_change_type(sdata, itype);
+	ret = ieee80211_if_change_type(sdata, type);
 	if (ret)
 		return ret;
 
-	if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len)
-		ieee80211_if_sta_set_mesh_id(&sdata->u.sta,
-					     params->mesh_id_len,
-					     params->mesh_id);
+	if (netif_running(sdata->dev))
+		return -EBUSY;
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR || !flags)
+	if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len)
+		ieee80211_sdata_set_mesh_id(sdata,
+					    params->mesh_id_len,
+					    params->mesh_id);
+
+	if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags)
 		return 0;
 
 	sdata->u.mntr_flags = *flags;
@@ -121,16 +119,12 @@
 			     u8 key_idx, u8 *mac_addr,
 			     struct key_params *params)
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct ieee80211_sub_if_data *sdata;
 	struct sta_info *sta = NULL;
 	enum ieee80211_key_alg alg;
 	struct ieee80211_key *key;
 	int err;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
 	switch (params->cipher) {
@@ -175,14 +169,10 @@
 static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
 			     u8 key_idx, u8 *mac_addr)
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct ieee80211_sub_if_data *sdata;
 	struct sta_info *sta;
 	int ret;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
 	rcu_read_lock();
@@ -223,7 +213,6 @@
 			     void (*callback)(void *cookie,
 					      struct key_params *params))
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct ieee80211_sub_if_data *sdata;
 	struct sta_info *sta = NULL;
 	u8 seq[6] = {0};
@@ -233,9 +222,6 @@
 	u16 iv16;
 	int err = -ENOENT;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
 	rcu_read_lock();
@@ -311,12 +297,8 @@
 					struct net_device *dev,
 					u8 key_idx)
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct ieee80211_sub_if_data *sdata;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	rcu_read_lock();
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -365,7 +347,7 @@
 	sta = sta_info_get_by_idx(local, idx, dev);
 	if (sta) {
 		ret = 0;
-		memcpy(mac, sta->addr, ETH_ALEN);
+		memcpy(mac, sta->sta.addr, ETH_ALEN);
 		sta_set_sinfo(sta, sinfo);
 	}
 
@@ -497,16 +479,12 @@
 static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
 				struct beacon_parameters *params)
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct ieee80211_sub_if_data *sdata;
 	struct beacon_data *old;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_AP)
+	if (sdata->vif.type != NL80211_IFTYPE_AP)
 		return -EINVAL;
 
 	old = sdata->u.ap.beacon;
@@ -520,16 +498,12 @@
 static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
 				struct beacon_parameters *params)
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct ieee80211_sub_if_data *sdata;
 	struct beacon_data *old;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_AP)
+	if (sdata->vif.type != NL80211_IFTYPE_AP)
 		return -EINVAL;
 
 	old = sdata->u.ap.beacon;
@@ -542,16 +516,12 @@
 
 static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct ieee80211_sub_if_data *sdata;
 	struct beacon_data *old;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_AP)
+	if (sdata->vif.type != NL80211_IFTYPE_AP)
 		return -EINVAL;
 
 	old = sdata->u.ap.beacon;
@@ -594,7 +564,7 @@
 	 * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
 
 	memset(msg->da, 0xff, ETH_ALEN);
-	memcpy(msg->sa, sta->addr, ETH_ALEN);
+	memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
 	msg->len = htons(6);
 	msg->dsap = 0;
 	msg->ssap = 0x01;	/* NULL LSAP, CR Bit: Response */
@@ -649,9 +619,9 @@
 	 */
 
 	if (params->aid) {
-		sta->aid = params->aid;
-		if (sta->aid > IEEE80211_MAX_AID)
-			sta->aid = 0; /* XXX: should this be an error? */
+		sta->sta.aid = params->aid;
+		if (sta->sta.aid > IEEE80211_MAX_AID)
+			sta->sta.aid = 0; /* XXX: should this be an error? */
 	}
 
 	if (params->listen_interval >= 0)
@@ -668,7 +638,12 @@
 					rates |= BIT(j);
 			}
 		}
-		sta->supp_rates[local->oper_channel->band] = rates;
+		sta->sta.supp_rates[local->oper_channel->band] = rates;
+	}
+
+	if (params->ht_capa) {
+		ieee80211_ht_cap_ie_to_ht_info(params->ht_capa,
+					       &sta->sta.ht_info);
 	}
 
 	if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) {
@@ -691,9 +666,6 @@
 	struct ieee80211_sub_if_data *sdata;
 	int err;
 
-	if (dev == local->mdev || params->vlan == local->mdev)
-		return -EOPNOTSUPP;
-
 	/* Prevent a race with changing the rate control algorithm */
 	if (!netif_running(dev))
 		return -ENETDOWN;
@@ -701,8 +673,8 @@
 	if (params->vlan) {
 		sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
 
-		if (sdata->vif.type != IEEE80211_IF_TYPE_VLAN &&
-		    sdata->vif.type != IEEE80211_IF_TYPE_AP)
+		if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
+		    sdata->vif.type != NL80211_IFTYPE_AP)
 			return -EINVAL;
 	} else
 		sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -721,7 +693,7 @@
 
 	sta_apply_parameters(local, sta, params);
 
-	rate_control_rate_init(sta, local);
+	rate_control_rate_init(sta);
 
 	rcu_read_lock();
 
@@ -732,8 +704,8 @@
 		return err;
 	}
 
-	if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN ||
-	    sdata->vif.type == IEEE80211_IF_TYPE_AP)
+	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+	    sdata->vif.type == NL80211_IFTYPE_AP)
 		ieee80211_send_layer2_update(sta);
 
 	rcu_read_unlock();
@@ -748,9 +720,6 @@
 	struct ieee80211_sub_if_data *sdata;
 	struct sta_info *sta;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
 	if (mac) {
@@ -782,9 +751,6 @@
 	struct sta_info *sta;
 	struct ieee80211_sub_if_data *vlansdata;
 
-	if (dev == local->mdev || params->vlan == local->mdev)
-		return -EOPNOTSUPP;
-
 	rcu_read_lock();
 
 	/* XXX: get sta belonging to dev */
@@ -797,8 +763,8 @@
 	if (params->vlan && params->vlan != sta->sdata->dev) {
 		vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
 
-		if (vlansdata->vif.type != IEEE80211_IF_TYPE_VLAN &&
-		    vlansdata->vif.type != IEEE80211_IF_TYPE_AP) {
+		if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
+		    vlansdata->vif.type != NL80211_IFTYPE_AP) {
 			rcu_read_unlock();
 			return -EINVAL;
 		}
@@ -824,15 +790,12 @@
 	struct sta_info *sta;
 	int err;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	if (!netif_running(dev))
 		return -ENETDOWN;
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)
+	if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
 		return -ENOTSUPP;
 
 	rcu_read_lock();
@@ -842,13 +805,13 @@
 		return -ENOENT;
 	}
 
-	err = mesh_path_add(dst, dev);
+	err = mesh_path_add(dst, sdata);
 	if (err) {
 		rcu_read_unlock();
 		return err;
 	}
 
-	mpath = mesh_path_lookup(dst, dev);
+	mpath = mesh_path_lookup(dst, sdata);
 	if (!mpath) {
 		rcu_read_unlock();
 		return -ENXIO;
@@ -862,10 +825,12 @@
 static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
 				 u8 *dst)
 {
-	if (dst)
-		return mesh_path_del(dst, dev);
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	mesh_path_flush(dev);
+	if (dst)
+		return mesh_path_del(dst, sdata);
+
+	mesh_path_flush(sdata);
 	return 0;
 }
 
@@ -878,15 +843,12 @@
 	struct mesh_path *mpath;
 	struct sta_info *sta;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	if (!netif_running(dev))
 		return -ENETDOWN;
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)
+	if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
 		return -ENOTSUPP;
 
 	rcu_read_lock();
@@ -897,7 +859,7 @@
 		return -ENOENT;
 	}
 
-	mpath = mesh_path_lookup(dst, dev);
+	mpath = mesh_path_lookup(dst, sdata);
 	if (!mpath) {
 		rcu_read_unlock();
 		return -ENOENT;
@@ -913,7 +875,7 @@
 			    struct mpath_info *pinfo)
 {
 	if (mpath->next_hop)
-		memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN);
+		memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN);
 	else
 		memset(next_hop, 0, ETH_ALEN);
 
@@ -952,20 +914,16 @@
 			       u8 *dst, u8 *next_hop, struct mpath_info *pinfo)
 
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct ieee80211_sub_if_data *sdata;
 	struct mesh_path *mpath;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)
+	if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
 		return -ENOTSUPP;
 
 	rcu_read_lock();
-	mpath = mesh_path_lookup(dst, dev);
+	mpath = mesh_path_lookup(dst, sdata);
 	if (!mpath) {
 		rcu_read_unlock();
 		return -ENOENT;
@@ -980,20 +938,16 @@
 				 int idx, u8 *dst, u8 *next_hop,
 				 struct mpath_info *pinfo)
 {
-	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct ieee80211_sub_if_data *sdata;
 	struct mesh_path *mpath;
 
-	if (dev == local->mdev)
-		return -EOPNOTSUPP;
-
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)
+	if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
 		return -ENOTSUPP;
 
 	rcu_read_lock();
-	mpath = mesh_path_lookup_by_idx(idx, dev);
+	mpath = mesh_path_lookup_by_idx(idx, sdata);
 	if (!mpath) {
 		rcu_read_unlock();
 		return -ENOENT;
@@ -1005,6 +959,38 @@
 }
 #endif
 
+static int ieee80211_change_bss(struct wiphy *wiphy,
+				struct net_device *dev,
+				struct bss_parameters *params)
+{
+	struct ieee80211_sub_if_data *sdata;
+	u32 changed = 0;
+
+	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+	if (sdata->vif.type != NL80211_IFTYPE_AP)
+		return -EINVAL;
+
+	if (params->use_cts_prot >= 0) {
+		sdata->bss_conf.use_cts_prot = params->use_cts_prot;
+		changed |= BSS_CHANGED_ERP_CTS_PROT;
+	}
+	if (params->use_short_preamble >= 0) {
+		sdata->bss_conf.use_short_preamble =
+			params->use_short_preamble;
+		changed |= BSS_CHANGED_ERP_PREAMBLE;
+	}
+	if (params->use_short_slot_time >= 0) {
+		sdata->bss_conf.use_short_slot =
+			params->use_short_slot_time;
+		changed |= BSS_CHANGED_ERP_SLOT;
+	}
+
+	ieee80211_bss_info_change_notify(sdata, changed);
+
+	return 0;
+}
+
 struct cfg80211_ops mac80211_config_ops = {
 	.add_virtual_intf = ieee80211_add_iface,
 	.del_virtual_intf = ieee80211_del_iface,
@@ -1028,4 +1014,5 @@
 	.get_mpath = ieee80211_get_mpath,
 	.dump_mpath = ieee80211_dump_mpath,
 #endif
+	.change_bss = ieee80211_change_bss,
 };
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index ee509f1..24ce544 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -51,8 +51,6 @@
 		      local->hw.conf.antenna_sel_tx);
 DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d",
 		      local->hw.conf.antenna_sel_rx);
-DEBUGFS_READONLY_FILE(bridge_packets, 20, "%d",
-		      local->bridge_packets);
 DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d",
 		      local->rts_threshold);
 DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d",
@@ -206,7 +204,6 @@
 	DEBUGFS_ADD(frequency);
 	DEBUGFS_ADD(antenna_sel_tx);
 	DEBUGFS_ADD(antenna_sel_rx);
-	DEBUGFS_ADD(bridge_packets);
 	DEBUGFS_ADD(rts_threshold);
 	DEBUGFS_ADD(fragmentation_threshold);
 	DEBUGFS_ADD(short_retry_limit);
@@ -263,7 +260,6 @@
 	DEBUGFS_DEL(frequency);
 	DEBUGFS_DEL(antenna_sel_tx);
 	DEBUGFS_DEL(antenna_sel_rx);
-	DEBUGFS_DEL(bridge_packets);
 	DEBUGFS_DEL(rts_threshold);
 	DEBUGFS_DEL(fragmentation_threshold);
 	DEBUGFS_DEL(short_retry_limit);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index cf82ace..a3294d1 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -206,7 +206,8 @@
 	rcu_read_lock();
 	sta = rcu_dereference(key->sta);
 	if (sta)
-		sprintf(buf, "../../stations/%s", print_mac(mac, sta->addr));
+		sprintf(buf, "../../stations/%s",
+			print_mac(mac, sta->sta.addr));
 	rcu_read_unlock();
 
 	/* using sta as a boolean is fine outside RCU lock */
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 8165df5..2a45156 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -173,7 +173,6 @@
 IEEE80211_IF_FILE(auth_algs, u.sta.auth_algs, HEX);
 IEEE80211_IF_FILE(auth_alg, u.sta.auth_alg, DEC);
 IEEE80211_IF_FILE(auth_transaction, u.sta.auth_transaction, DEC);
-IEEE80211_IF_FILE(num_beacons_sta, u.sta.num_beacons, DEC);
 
 static ssize_t ieee80211_if_fmt_flags(
 	const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
@@ -192,7 +191,6 @@
 /* AP attributes */
 IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
 IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
-IEEE80211_IF_FILE(num_beacons, u.ap.num_beacons, DEC);
 
 static ssize_t ieee80211_if_fmt_num_buffered_multicast(
 	const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
@@ -207,37 +205,37 @@
 
 #ifdef CONFIG_MAC80211_MESH
 /* Mesh stats attributes */
-IEEE80211_IF_FILE(fwded_frames, u.sta.mshstats.fwded_frames, DEC);
-IEEE80211_IF_FILE(dropped_frames_ttl, u.sta.mshstats.dropped_frames_ttl, DEC);
+IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
+IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
 IEEE80211_IF_FILE(dropped_frames_no_route,
-		u.sta.mshstats.dropped_frames_no_route, DEC);
-IEEE80211_IF_FILE(estab_plinks, u.sta.mshstats.estab_plinks, ATOMIC);
+		u.mesh.mshstats.dropped_frames_no_route, DEC);
+IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
 
 /* Mesh parameters */
 IEEE80211_IF_WFILE(dot11MeshMaxRetries,
-		u.sta.mshcfg.dot11MeshMaxRetries, DEC, u8);
+		u.mesh.mshcfg.dot11MeshMaxRetries, DEC, u8);
 IEEE80211_IF_WFILE(dot11MeshRetryTimeout,
-		u.sta.mshcfg.dot11MeshRetryTimeout, DEC, u16);
+		u.mesh.mshcfg.dot11MeshRetryTimeout, DEC, u16);
 IEEE80211_IF_WFILE(dot11MeshConfirmTimeout,
-		u.sta.mshcfg.dot11MeshConfirmTimeout, DEC, u16);
+		u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC, u16);
 IEEE80211_IF_WFILE(dot11MeshHoldingTimeout,
-		u.sta.mshcfg.dot11MeshHoldingTimeout, DEC, u16);
-IEEE80211_IF_WFILE(dot11MeshTTL, u.sta.mshcfg.dot11MeshTTL, DEC, u8);
-IEEE80211_IF_WFILE(auto_open_plinks, u.sta.mshcfg.auto_open_plinks, DEC, u8);
+		u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC, u16);
+IEEE80211_IF_WFILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC, u8);
+IEEE80211_IF_WFILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC, u8);
 IEEE80211_IF_WFILE(dot11MeshMaxPeerLinks,
-		u.sta.mshcfg.dot11MeshMaxPeerLinks, DEC, u16);
+		u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC, u16);
 IEEE80211_IF_WFILE(dot11MeshHWMPactivePathTimeout,
-		u.sta.mshcfg.dot11MeshHWMPactivePathTimeout, DEC, u32);
+		u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC, u32);
 IEEE80211_IF_WFILE(dot11MeshHWMPpreqMinInterval,
-		u.sta.mshcfg.dot11MeshHWMPpreqMinInterval, DEC, u16);
+		u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC, u16);
 IEEE80211_IF_WFILE(dot11MeshHWMPnetDiameterTraversalTime,
-		u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC, u16);
+		u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC, u16);
 IEEE80211_IF_WFILE(dot11MeshHWMPmaxPREQretries,
-		u.sta.mshcfg.dot11MeshHWMPmaxPREQretries, DEC, u8);
+		u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC, u8);
 IEEE80211_IF_WFILE(path_refresh_time,
-		u.sta.mshcfg.path_refresh_time, DEC, u32);
+		u.mesh.mshcfg.path_refresh_time, DEC, u32);
 IEEE80211_IF_WFILE(min_discovery_timeout,
-		u.sta.mshcfg.min_discovery_timeout, DEC, u16);
+		u.mesh.mshcfg.min_discovery_timeout, DEC, u16);
 #endif
 
 
@@ -265,7 +263,6 @@
 	DEBUGFS_ADD(auth_alg, sta);
 	DEBUGFS_ADD(auth_transaction, sta);
 	DEBUGFS_ADD(flags, sta);
-	DEBUGFS_ADD(num_beacons_sta, sta);
 }
 
 static void add_ap_files(struct ieee80211_sub_if_data *sdata)
@@ -276,7 +273,6 @@
 
 	DEBUGFS_ADD(num_sta_ps, ap);
 	DEBUGFS_ADD(dtim_count, ap);
-	DEBUGFS_ADD(num_beacons, ap);
 	DEBUGFS_ADD(num_buffered_multicast, ap);
 }
 
@@ -345,26 +341,26 @@
 		return;
 
 	switch (sdata->vif.type) {
-	case IEEE80211_IF_TYPE_MESH_POINT:
+	case NL80211_IFTYPE_MESH_POINT:
 #ifdef CONFIG_MAC80211_MESH
 		add_mesh_stats(sdata);
 		add_mesh_config(sdata);
 #endif
-		/* fall through */
-	case IEEE80211_IF_TYPE_STA:
-	case IEEE80211_IF_TYPE_IBSS:
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_ADHOC:
 		add_sta_files(sdata);
 		break;
-	case IEEE80211_IF_TYPE_AP:
+	case NL80211_IFTYPE_AP:
 		add_ap_files(sdata);
 		break;
-	case IEEE80211_IF_TYPE_WDS:
+	case NL80211_IFTYPE_WDS:
 		add_wds_files(sdata);
 		break;
-	case IEEE80211_IF_TYPE_MNTR:
+	case NL80211_IFTYPE_MONITOR:
 		add_monitor_files(sdata);
 		break;
-	case IEEE80211_IF_TYPE_VLAN:
+	case NL80211_IFTYPE_AP_VLAN:
 		add_vlan_files(sdata);
 		break;
 	default:
@@ -398,7 +394,6 @@
 	DEBUGFS_DEL(auth_alg, sta);
 	DEBUGFS_DEL(auth_transaction, sta);
 	DEBUGFS_DEL(flags, sta);
-	DEBUGFS_DEL(num_beacons_sta, sta);
 }
 
 static void del_ap_files(struct ieee80211_sub_if_data *sdata)
@@ -409,7 +404,6 @@
 
 	DEBUGFS_DEL(num_sta_ps, ap);
 	DEBUGFS_DEL(dtim_count, ap);
-	DEBUGFS_DEL(num_beacons, ap);
 	DEBUGFS_DEL(num_buffered_multicast, ap);
 }
 
@@ -482,26 +476,26 @@
 		return;
 
 	switch (sdata->vif.type) {
-	case IEEE80211_IF_TYPE_MESH_POINT:
+	case NL80211_IFTYPE_MESH_POINT:
 #ifdef CONFIG_MAC80211_MESH
 		del_mesh_stats(sdata);
 		del_mesh_config(sdata);
 #endif
-		/* fall through */
-	case IEEE80211_IF_TYPE_STA:
-	case IEEE80211_IF_TYPE_IBSS:
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_ADHOC:
 		del_sta_files(sdata);
 		break;
-	case IEEE80211_IF_TYPE_AP:
+	case NL80211_IFTYPE_AP:
 		del_ap_files(sdata);
 		break;
-	case IEEE80211_IF_TYPE_WDS:
+	case NL80211_IFTYPE_WDS:
 		del_wds_files(sdata);
 		break;
-	case IEEE80211_IF_TYPE_MNTR:
+	case NL80211_IFTYPE_MONITOR:
 		del_monitor_files(sdata);
 		break;
-	case IEEE80211_IF_TYPE_VLAN:
+	case NL80211_IFTYPE_AP_VLAN:
 		del_vlan_files(sdata);
 		break;
 	default:
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 79a0627..b9902e4 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -50,7 +50,7 @@
 		STA_READ_##format(name, field)				\
 		STA_OPS(name)
 
-STA_FILE(aid, aid, D);
+STA_FILE(aid, sta.aid, D);
 STA_FILE(dev, sdata->dev->name, S);
 STA_FILE(rx_packets, rx_packets, LU);
 STA_FILE(tx_packets, tx_packets, LU);
@@ -173,10 +173,9 @@
 		const char __user *user_buf, size_t count, loff_t *ppos)
 {
 	struct sta_info *sta = file->private_data;
-	struct net_device *dev = sta->sdata->dev;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sta->sdata->local;
 	struct ieee80211_hw *hw = &local->hw;
-	u8 *da = sta->addr;
+	u8 *da = sta->sta.addr;
 	static int tid_static_tx[16] = {0, 0, 0, 0, 0, 0, 0, 0,
 					0, 0, 0, 0, 0, 0, 0, 0};
 	static int tid_static_rx[16] = {1, 1, 1, 1, 1, 1, 1, 1,
@@ -201,7 +200,7 @@
 		tid_num = tid_num - 100;
 		if (tid_static_rx[tid_num] == 1) {
 			strcpy(state, "off ");
-			ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0,
+			ieee80211_sta_stop_rx_ba_session(sta->sdata, da, tid_num, 0,
 					WLAN_REASON_QSTA_REQUIRE_SETUP);
 			sta->ampdu_mlme.tid_state_rx[tid_num] |=
 					HT_AGG_STATE_DEBUGFS_CTL;
@@ -253,7 +252,7 @@
 	if (!stations_dir)
 		return;
 
-	mac = print_mac(mbuf, sta->addr);
+	mac = print_mac(mbuf, sta->sta.addr);
 
 	sta->debugfs.dir = debugfs_create_dir(mac, stations_dir);
 	if (!sta->debugfs.dir)
diff --git a/net/mac80211/event.c b/net/mac80211/event.c
index 2280f40..8de60de 100644
--- a/net/mac80211/event.c
+++ b/net/mac80211/event.c
@@ -8,7 +8,6 @@
  * mac80211 - events
  */
 
-#include <linux/netdevice.h>
 #include <net/iw_handler.h>
 #include "ieee80211_i.h"
 
@@ -17,7 +16,7 @@
  * (in the variable hdr) must be long enough to extract the TKIP
  * fields like TSC
  */
-void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx,
+void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
 				     struct ieee80211_hdr *hdr)
 {
 	union iwreq_data wrqu;
@@ -32,7 +31,7 @@
 			print_mac(mac, hdr->addr2));
 		memset(&wrqu, 0, sizeof(wrqu));
 		wrqu.data.length = strlen(buf);
-		wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
+		wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf);
 		kfree(buf);
 	}
 
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
new file mode 100644
index 0000000..dc7d9a3
--- /dev/null
+++ b/net/mac80211/ht.c
@@ -0,0 +1,992 @@
+/*
+ * HT handling
+ *
+ * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright 2002-2005, Instant802 Networks, Inc.
+ * Copyright 2005-2006, Devicescape Software, Inc.
+ * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007-2008, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ieee80211.h>
+#include <net/wireless.h>
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+#include "sta_info.h"
+#include "wme.h"
+
+int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
+				   struct ieee80211_ht_info *ht_info)
+{
+
+	if (ht_info == NULL)
+		return -EINVAL;
+
+	memset(ht_info, 0, sizeof(*ht_info));
+
+	if (ht_cap_ie) {
+		u8 ampdu_info = ht_cap_ie->ampdu_params_info;
+
+		ht_info->ht_supported = 1;
+		ht_info->cap = le16_to_cpu(ht_cap_ie->cap_info);
+		ht_info->ampdu_factor =
+			ampdu_info & IEEE80211_HT_CAP_AMPDU_FACTOR;
+		ht_info->ampdu_density =
+			(ampdu_info & IEEE80211_HT_CAP_AMPDU_DENSITY) >> 2;
+		memcpy(ht_info->supp_mcs_set, ht_cap_ie->supp_mcs_set, 16);
+	} else
+		ht_info->ht_supported = 0;
+
+	return 0;
+}
+
+int ieee80211_ht_addt_info_ie_to_ht_bss_info(
+			struct ieee80211_ht_addt_info *ht_add_info_ie,
+			struct ieee80211_ht_bss_info *bss_info)
+{
+	if (bss_info == NULL)
+		return -EINVAL;
+
+	memset(bss_info, 0, sizeof(*bss_info));
+
+	if (ht_add_info_ie) {
+		u16 op_mode;
+		op_mode = le16_to_cpu(ht_add_info_ie->operation_mode);
+
+		bss_info->primary_channel = ht_add_info_ie->control_chan;
+		bss_info->bss_cap = ht_add_info_ie->ht_param;
+		bss_info->bss_op_mode = (u8)(op_mode & 0xff);
+	}
+
+	return 0;
+}
+
+static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
+					 const u8 *da, u16 tid,
+					 u8 dialog_token, u16 start_seq_num,
+					 u16 agg_size, u16 timeout)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+	struct sk_buff *skb;
+	struct ieee80211_mgmt *mgmt;
+	u16 capab;
+
+	skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
+
+	if (!skb) {
+		printk(KERN_ERR "%s: failed to allocate buffer "
+				"for addba request frame\n", sdata->dev->name);
+		return;
+	}
+	skb_reserve(skb, local->hw.extra_tx_headroom);
+	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
+	memset(mgmt, 0, 24);
+	memcpy(mgmt->da, da, ETH_ALEN);
+	memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+	if (sdata->vif.type == NL80211_IFTYPE_AP)
+		memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+	else
+		memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
+
+	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+					  IEEE80211_STYPE_ACTION);
+
+	skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
+
+	mgmt->u.action.category = WLAN_CATEGORY_BACK;
+	mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
+
+	mgmt->u.action.u.addba_req.dialog_token = dialog_token;
+	capab = (u16)(1 << 1);		/* bit 1 aggregation policy */
+	capab |= (u16)(tid << 2); 	/* bit 5:2 TID number */
+	capab |= (u16)(agg_size << 6);	/* bit 15:6 max size of aggergation */
+
+	mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
+
+	mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
+	mgmt->u.action.u.addba_req.start_seq_num =
+					cpu_to_le16(start_seq_num << 4);
+
+	ieee80211_tx_skb(sdata, skb, 0);
+}
+
+static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
+				      u8 dialog_token, u16 status, u16 policy,
+				      u16 buf_size, u16 timeout)
+{
+	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+	struct ieee80211_local *local = sdata->local;
+	struct sk_buff *skb;
+	struct ieee80211_mgmt *mgmt;
+	u16 capab;
+
+	skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
+
+	if (!skb) {
+		printk(KERN_DEBUG "%s: failed to allocate buffer "
+		       "for addba resp frame\n", sdata->dev->name);
+		return;
+	}
+
+	skb_reserve(skb, local->hw.extra_tx_headroom);
+	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
+	memset(mgmt, 0, 24);
+	memcpy(mgmt->da, da, ETH_ALEN);
+	memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+	if (sdata->vif.type == NL80211_IFTYPE_AP)
+		memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+	else
+		memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
+	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+					  IEEE80211_STYPE_ACTION);
+
+	skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp));
+	mgmt->u.action.category = WLAN_CATEGORY_BACK;
+	mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP;
+	mgmt->u.action.u.addba_resp.dialog_token = dialog_token;
+
+	capab = (u16)(policy << 1);	/* bit 1 aggregation policy */
+	capab |= (u16)(tid << 2); 	/* bit 5:2 TID number */
+	capab |= (u16)(buf_size << 6);	/* bit 15:6 max size of aggregation */
+
+	mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab);
+	mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
+	mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
+
+	ieee80211_tx_skb(sdata, skb, 0);
+}
+
+static void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
+				 const u8 *da, u16 tid,
+				 u16 initiator, u16 reason_code)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+	struct sk_buff *skb;
+	struct ieee80211_mgmt *mgmt;
+	u16 params;
+
+	skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
+
+	if (!skb) {
+		printk(KERN_ERR "%s: failed to allocate buffer "
+					"for delba frame\n", sdata->dev->name);
+		return;
+	}
+
+	skb_reserve(skb, local->hw.extra_tx_headroom);
+	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
+	memset(mgmt, 0, 24);
+	memcpy(mgmt->da, da, ETH_ALEN);
+	memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+	if (sdata->vif.type == NL80211_IFTYPE_AP)
+		memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+	else
+		memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
+	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+					  IEEE80211_STYPE_ACTION);
+
+	skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba));
+
+	mgmt->u.action.category = WLAN_CATEGORY_BACK;
+	mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
+	params = (u16)(initiator << 11); 	/* bit 11 initiator */
+	params |= (u16)(tid << 12); 		/* bit 15:12 TID number */
+
+	mgmt->u.action.u.delba.params = cpu_to_le16(params);
+	mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
+
+	ieee80211_tx_skb(sdata, skb, 0);
+}
+
+void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct sk_buff *skb;
+	struct ieee80211_bar *bar;
+	u16 bar_control = 0;
+
+	skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
+	if (!skb) {
+		printk(KERN_ERR "%s: failed to allocate buffer for "
+			"bar frame\n", sdata->dev->name);
+		return;
+	}
+	skb_reserve(skb, local->hw.extra_tx_headroom);
+	bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
+	memset(bar, 0, sizeof(*bar));
+	bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
+					 IEEE80211_STYPE_BACK_REQ);
+	memcpy(bar->ra, ra, ETH_ALEN);
+	memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN);
+	bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
+	bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
+	bar_control |= (u16)(tid << 12);
+	bar->control = cpu_to_le16(bar_control);
+	bar->start_seq_num = cpu_to_le16(ssn);
+
+	ieee80211_tx_skb(sdata, skb, 0);
+}
+
+void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
+					u16 initiator, u16 reason)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_hw *hw = &local->hw;
+	struct sta_info *sta;
+	int ret, i;
+	DECLARE_MAC_BUF(mac);
+
+	rcu_read_lock();
+
+	sta = sta_info_get(local, ra);
+	if (!sta) {
+		rcu_read_unlock();
+		return;
+	}
+
+	/* check if TID is in operational state */
+	spin_lock_bh(&sta->lock);
+	if (sta->ampdu_mlme.tid_state_rx[tid]
+				!= HT_AGG_STATE_OPERATIONAL) {
+		spin_unlock_bh(&sta->lock);
+		rcu_read_unlock();
+		return;
+	}
+	sta->ampdu_mlme.tid_state_rx[tid] =
+		HT_AGG_STATE_REQ_STOP_BA_MSK |
+		(initiator << HT_AGG_STATE_INITIATOR_SHIFT);
+	spin_unlock_bh(&sta->lock);
+
+	/* stop HW Rx aggregation. ampdu_action existence
+	 * already verified in session init so we add the BUG_ON */
+	BUG_ON(!local->ops->ampdu_action);
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "Rx BA session stop requested for %s tid %u\n",
+				print_mac(mac, ra), tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+
+	ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP,
+				       &sta->sta, tid, NULL);
+	if (ret)
+		printk(KERN_DEBUG "HW problem - can not stop rx "
+				"aggregation for tid %d\n", tid);
+
+	/* shutdown timer has not expired */
+	if (initiator != WLAN_BACK_TIMER)
+		del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
+
+	/* check if this is a self generated aggregation halt */
+	if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
+		ieee80211_send_delba(sdata, ra, tid, 0, reason);
+
+	/* free the reordering buffer */
+	for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
+		if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
+			/* release the reordered frames */
+			dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
+			sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
+			sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
+		}
+	}
+	/* free resources */
+	kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
+	kfree(sta->ampdu_mlme.tid_rx[tid]);
+	sta->ampdu_mlme.tid_rx[tid] = NULL;
+	sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
+
+	rcu_read_unlock();
+}
+
+
+/*
+ * After sending add Block Ack request we activated a timer until
+ * add Block Ack response will arrive from the recipient.
+ * If this timer expires sta_addba_resp_timer_expired will be executed.
+ */
+static void sta_addba_resp_timer_expired(unsigned long data)
+{
+	/* not an elegant detour, but there is no choice as the timer passes
+	 * only one argument, and both sta_info and TID are needed, so init
+	 * flow in sta_info_create gives the TID as data, while the timer_to_id
+	 * array gives the sta through container_of */
+	u16 tid = *(u8 *)data;
+	struct sta_info *temp_sta = container_of((void *)data,
+		struct sta_info, timer_to_tid[tid]);
+
+	struct ieee80211_local *local = temp_sta->local;
+	struct ieee80211_hw *hw = &local->hw;
+	struct sta_info *sta;
+	u8 *state;
+
+	rcu_read_lock();
+
+	sta = sta_info_get(local, temp_sta->sta.addr);
+	if (!sta) {
+		rcu_read_unlock();
+		return;
+	}
+
+	state = &sta->ampdu_mlme.tid_state_tx[tid];
+	/* check if the TID waits for addBA response */
+	spin_lock_bh(&sta->lock);
+	if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
+		spin_unlock_bh(&sta->lock);
+		*state = HT_AGG_STATE_IDLE;
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "timer expired on tid %d but we are not "
+				"expecting addBA response there", tid);
+#endif
+		goto timer_expired_exit;
+	}
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
+#endif
+
+	/* go through the state check in stop_BA_session */
+	*state = HT_AGG_STATE_OPERATIONAL;
+	spin_unlock_bh(&sta->lock);
+	ieee80211_stop_tx_ba_session(hw, temp_sta->sta.addr, tid,
+				     WLAN_BACK_INITIATOR);
+
+timer_expired_exit:
+	rcu_read_unlock();
+}
+
+void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr)
+{
+	struct ieee80211_local *local = sdata->local;
+	int i;
+
+	for (i = 0; i <  STA_TID_NUM; i++) {
+		ieee80211_stop_tx_ba_session(&local->hw, addr, i,
+					     WLAN_BACK_INITIATOR);
+		ieee80211_sta_stop_rx_ba_session(sdata, addr, i,
+						 WLAN_BACK_RECIPIENT,
+						 WLAN_REASON_QSTA_LEAVE_QBSS);
+	}
+}
+
+int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+	struct sta_info *sta;
+	struct ieee80211_sub_if_data *sdata;
+	u16 start_seq_num;
+	u8 *state;
+	int ret;
+	DECLARE_MAC_BUF(mac);
+
+	if (tid >= STA_TID_NUM)
+		return -EINVAL;
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
+				print_mac(mac, ra), tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+
+	rcu_read_lock();
+
+	sta = sta_info_get(local, ra);
+	if (!sta) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "Could not find the station\n");
+#endif
+		ret = -ENOENT;
+		goto exit;
+	}
+
+	spin_lock_bh(&sta->lock);
+
+	/* we have tried too many times, receiver does not want A-MPDU */
+	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
+		ret = -EBUSY;
+		goto err_unlock_sta;
+	}
+
+	state = &sta->ampdu_mlme.tid_state_tx[tid];
+	/* check if the TID is not in aggregation flow already */
+	if (*state != HT_AGG_STATE_IDLE) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "BA request denied - session is not "
+				 "idle on tid %u\n", tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+		ret = -EAGAIN;
+		goto err_unlock_sta;
+	}
+
+	/* prepare A-MPDU MLME for Tx aggregation */
+	sta->ampdu_mlme.tid_tx[tid] =
+			kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
+	if (!sta->ampdu_mlme.tid_tx[tid]) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		if (net_ratelimit())
+			printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
+					tid);
+#endif
+		ret = -ENOMEM;
+		goto err_unlock_sta;
+	}
+	/* Tx timer */
+	sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
+			sta_addba_resp_timer_expired;
+	sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
+			(unsigned long)&sta->timer_to_tid[tid];
+	init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+
+	/* create a new queue for this aggregation */
+	ret = ieee80211_ht_agg_queue_add(local, sta, tid);
+
+	/* case no queue is available to aggregation
+	 * don't switch to aggregation */
+	if (ret) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "BA request denied - queue unavailable for"
+					" tid %d\n", tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+		goto err_unlock_queue;
+	}
+	sdata = sta->sdata;
+
+	/* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
+	 * call back right away, it must see that the flow has begun */
+	*state |= HT_ADDBA_REQUESTED_MSK;
+
+	/* This is slightly racy because the queue isn't stopped */
+	start_seq_num = sta->tid_seq[tid];
+
+	if (local->ops->ampdu_action)
+		ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
+					       &sta->sta, tid, &start_seq_num);
+
+	if (ret) {
+		/* No need to requeue the packets in the agg queue, since we
+		 * held the tx lock: no packet could be enqueued to the newly
+		 * allocated queue */
+		ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "BA request denied - HW unavailable for"
+					" tid %d\n", tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+		*state = HT_AGG_STATE_IDLE;
+		goto err_unlock_queue;
+	}
+
+	/* Will put all the packets in the new SW queue */
+	ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
+	spin_unlock_bh(&sta->lock);
+
+	/* send an addBA request */
+	sta->ampdu_mlme.dialog_token_allocator++;
+	sta->ampdu_mlme.tid_tx[tid]->dialog_token =
+			sta->ampdu_mlme.dialog_token_allocator;
+	sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
+
+
+	ieee80211_send_addba_request(sta->sdata, ra, tid,
+			 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
+			 sta->ampdu_mlme.tid_tx[tid]->ssn,
+			 0x40, 5000);
+	/* activate the timer for the recipient's addBA response */
+	sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
+				jiffies + ADDBA_RESP_INTERVAL;
+	add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
+#endif
+	goto exit;
+
+err_unlock_queue:
+	kfree(sta->ampdu_mlme.tid_tx[tid]);
+	sta->ampdu_mlme.tid_tx[tid] = NULL;
+	ret = -EBUSY;
+err_unlock_sta:
+	spin_unlock_bh(&sta->lock);
+exit:
+	rcu_read_unlock();
+	return ret;
+}
+EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
+
+int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
+				 u8 *ra, u16 tid,
+				 enum ieee80211_back_parties initiator)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+	struct sta_info *sta;
+	u8 *state;
+	int ret = 0;
+	DECLARE_MAC_BUF(mac);
+
+	if (tid >= STA_TID_NUM)
+		return -EINVAL;
+
+	rcu_read_lock();
+	sta = sta_info_get(local, ra);
+	if (!sta) {
+		rcu_read_unlock();
+		return -ENOENT;
+	}
+
+	/* check if the TID is in aggregation */
+	state = &sta->ampdu_mlme.tid_state_tx[tid];
+	spin_lock_bh(&sta->lock);
+
+	if (*state != HT_AGG_STATE_OPERATIONAL) {
+		ret = -ENOENT;
+		goto stop_BA_exit;
+	}
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
+				print_mac(mac, ra), tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+
+	ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
+
+	*state = HT_AGG_STATE_REQ_STOP_BA_MSK |
+		(initiator << HT_AGG_STATE_INITIATOR_SHIFT);
+
+	if (local->ops->ampdu_action)
+		ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
+					       &sta->sta, tid, NULL);
+
+	/* case HW denied going back to legacy */
+	if (ret) {
+		WARN_ON(ret != -EBUSY);
+		*state = HT_AGG_STATE_OPERATIONAL;
+		ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
+		goto stop_BA_exit;
+	}
+
+stop_BA_exit:
+	spin_unlock_bh(&sta->lock);
+	rcu_read_unlock();
+	return ret;
+}
+EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
+
+void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+	struct sta_info *sta;
+	u8 *state;
+	DECLARE_MAC_BUF(mac);
+
+	if (tid >= STA_TID_NUM) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
+				tid, STA_TID_NUM);
+#endif
+		return;
+	}
+
+	rcu_read_lock();
+	sta = sta_info_get(local, ra);
+	if (!sta) {
+		rcu_read_unlock();
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "Could not find station: %s\n",
+				print_mac(mac, ra));
+#endif
+		return;
+	}
+
+	state = &sta->ampdu_mlme.tid_state_tx[tid];
+	spin_lock_bh(&sta->lock);
+
+	if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
+				*state);
+#endif
+		spin_unlock_bh(&sta->lock);
+		rcu_read_unlock();
+		return;
+	}
+
+	WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
+
+	*state |= HT_ADDBA_DRV_READY_MSK;
+
+	if (*state == HT_AGG_STATE_OPERATIONAL) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
+#endif
+		ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
+	}
+	spin_unlock_bh(&sta->lock);
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
+
+void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+	struct sta_info *sta;
+	u8 *state;
+	int agg_queue;
+	DECLARE_MAC_BUF(mac);
+
+	if (tid >= STA_TID_NUM) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
+				tid, STA_TID_NUM);
+#endif
+		return;
+	}
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
+				print_mac(mac, ra), tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+
+	rcu_read_lock();
+	sta = sta_info_get(local, ra);
+	if (!sta) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "Could not find station: %s\n",
+				print_mac(mac, ra));
+#endif
+		rcu_read_unlock();
+		return;
+	}
+	state = &sta->ampdu_mlme.tid_state_tx[tid];
+
+	/* NOTE: no need to use sta->lock in this state check, as
+	 * ieee80211_stop_tx_ba_session will let only one stop call to
+	 * pass through per sta/tid
+	 */
+	if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
+#endif
+		rcu_read_unlock();
+		return;
+	}
+
+	if (*state & HT_AGG_STATE_INITIATOR_MSK)
+		ieee80211_send_delba(sta->sdata, ra, tid,
+			WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
+
+	agg_queue = sta->tid_to_tx_q[tid];
+
+	ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
+
+	/* We just requeued the all the frames that were in the
+	 * removed queue, and since we might miss a softirq we do
+	 * netif_schedule_queue.  ieee80211_wake_queue is not used
+	 * here as this queue is not necessarily stopped
+	 */
+	netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue));
+	spin_lock_bh(&sta->lock);
+	*state = HT_AGG_STATE_IDLE;
+	sta->ampdu_mlme.addba_req_num[tid] = 0;
+	kfree(sta->ampdu_mlme.tid_tx[tid]);
+	sta->ampdu_mlme.tid_tx[tid] = NULL;
+	spin_unlock_bh(&sta->lock);
+
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
+
+void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
+				      const u8 *ra, u16 tid)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+	struct ieee80211_ra_tid *ra_tid;
+	struct sk_buff *skb = dev_alloc_skb(0);
+
+	if (unlikely(!skb)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		if (net_ratelimit())
+			printk(KERN_WARNING "%s: Not enough memory, "
+			       "dropping start BA session", skb->dev->name);
+#endif
+		return;
+	}
+	ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
+	memcpy(&ra_tid->ra, ra, ETH_ALEN);
+	ra_tid->tid = tid;
+
+	skb->pkt_type = IEEE80211_ADDBA_MSG;
+	skb_queue_tail(&local->skb_queue, skb);
+	tasklet_schedule(&local->tasklet);
+}
+EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
+
+void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
+				     const u8 *ra, u16 tid)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+	struct ieee80211_ra_tid *ra_tid;
+	struct sk_buff *skb = dev_alloc_skb(0);
+
+	if (unlikely(!skb)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		if (net_ratelimit())
+			printk(KERN_WARNING "%s: Not enough memory, "
+			       "dropping stop BA session", skb->dev->name);
+#endif
+		return;
+	}
+	ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
+	memcpy(&ra_tid->ra, ra, ETH_ALEN);
+	ra_tid->tid = tid;
+
+	skb->pkt_type = IEEE80211_DELBA_MSG;
+	skb_queue_tail(&local->skb_queue, skb);
+	tasklet_schedule(&local->tasklet);
+}
+EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
+
+/*
+ * After accepting the AddBA Request we activated a timer,
+ * resetting it after each frame that arrives from the originator.
+ * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
+ */
+static void sta_rx_agg_session_timer_expired(unsigned long data)
+{
+	/* not an elegant detour, but there is no choice as the timer passes
+	 * only one argument, and various sta_info are needed here, so init
+	 * flow in sta_info_create gives the TID as data, while the timer_to_id
+	 * array gives the sta through container_of */
+	u8 *ptid = (u8 *)data;
+	u8 *timer_to_id = ptid - *ptid;
+	struct sta_info *sta = container_of(timer_to_id, struct sta_info,
+					 timer_to_tid[0]);
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
+#endif
+	ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
+					 (u16)*ptid, WLAN_BACK_TIMER,
+					 WLAN_REASON_QSTA_TIMEOUT);
+}
+
+void ieee80211_process_addba_request(struct ieee80211_local *local,
+				     struct sta_info *sta,
+				     struct ieee80211_mgmt *mgmt,
+				     size_t len)
+{
+	struct ieee80211_hw *hw = &local->hw;
+	struct ieee80211_conf *conf = &hw->conf;
+	struct tid_ampdu_rx *tid_agg_rx;
+	u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
+	u8 dialog_token;
+	int ret = -EOPNOTSUPP;
+	DECLARE_MAC_BUF(mac);
+
+	/* extract session parameters from addba request frame */
+	dialog_token = mgmt->u.action.u.addba_req.dialog_token;
+	timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
+	start_seq_num =
+		le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
+
+	capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+	ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
+	tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+	buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
+
+	status = WLAN_STATUS_REQUEST_DECLINED;
+
+	/* sanity check for incoming parameters:
+	 * check if configuration can support the BA policy
+	 * and if buffer size does not exceeds max value */
+	if (((ba_policy != 1)
+		&& (!(conf->ht_conf.cap & IEEE80211_HT_CAP_DELAY_BA)))
+		|| (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
+		status = WLAN_STATUS_INVALID_QOS_PARAM;
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		if (net_ratelimit())
+			printk(KERN_DEBUG "AddBA Req with bad params from "
+				"%s on tid %u. policy %d, buffer size %d\n",
+				print_mac(mac, mgmt->sa), tid, ba_policy,
+				buf_size);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+		goto end_no_lock;
+	}
+	/* determine default buffer size */
+	if (buf_size == 0) {
+		struct ieee80211_supported_band *sband;
+
+		sband = local->hw.wiphy->bands[conf->channel->band];
+		buf_size = IEEE80211_MIN_AMPDU_BUF;
+		buf_size = buf_size << sband->ht_info.ampdu_factor;
+	}
+
+
+	/* examine state machine */
+	spin_lock_bh(&sta->lock);
+
+	if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		if (net_ratelimit())
+			printk(KERN_DEBUG "unexpected AddBA Req from "
+				"%s on tid %u\n",
+				print_mac(mac, mgmt->sa), tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+		goto end;
+	}
+
+	/* prepare A-MPDU MLME for Rx aggregation */
+	sta->ampdu_mlme.tid_rx[tid] =
+			kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
+	if (!sta->ampdu_mlme.tid_rx[tid]) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		if (net_ratelimit())
+			printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
+					tid);
+#endif
+		goto end;
+	}
+	/* rx timer */
+	sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
+				sta_rx_agg_session_timer_expired;
+	sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
+				(unsigned long)&sta->timer_to_tid[tid];
+	init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
+
+	tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
+
+	/* prepare reordering buffer */
+	tid_agg_rx->reorder_buf =
+		kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC);
+	if (!tid_agg_rx->reorder_buf) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		if (net_ratelimit())
+			printk(KERN_ERR "can not allocate reordering buffer "
+			       "to tid %d\n", tid);
+#endif
+		kfree(sta->ampdu_mlme.tid_rx[tid]);
+		goto end;
+	}
+	memset(tid_agg_rx->reorder_buf, 0,
+		buf_size * sizeof(struct sk_buff *));
+
+	if (local->ops->ampdu_action)
+		ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START,
+					       &sta->sta, tid, &start_seq_num);
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+
+	if (ret) {
+		kfree(tid_agg_rx->reorder_buf);
+		kfree(tid_agg_rx);
+		sta->ampdu_mlme.tid_rx[tid] = NULL;
+		goto end;
+	}
+
+	/* change state and send addba resp */
+	sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
+	tid_agg_rx->dialog_token = dialog_token;
+	tid_agg_rx->ssn = start_seq_num;
+	tid_agg_rx->head_seq_num = start_seq_num;
+	tid_agg_rx->buf_size = buf_size;
+	tid_agg_rx->timeout = timeout;
+	tid_agg_rx->stored_mpdu_num = 0;
+	status = WLAN_STATUS_SUCCESS;
+end:
+	spin_unlock_bh(&sta->lock);
+
+end_no_lock:
+	ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
+				  dialog_token, status, 1, buf_size, timeout);
+}
+
+void ieee80211_process_addba_resp(struct ieee80211_local *local,
+				  struct sta_info *sta,
+				  struct ieee80211_mgmt *mgmt,
+				  size_t len)
+{
+	struct ieee80211_hw *hw = &local->hw;
+	u16 capab;
+	u16 tid;
+	u8 *state;
+
+	capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
+	tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+
+	state = &sta->ampdu_mlme.tid_state_tx[tid];
+
+	spin_lock_bh(&sta->lock);
+
+	if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
+		spin_unlock_bh(&sta->lock);
+		return;
+	}
+
+	if (mgmt->u.action.u.addba_resp.dialog_token !=
+		sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
+		spin_unlock_bh(&sta->lock);
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+		return;
+	}
+
+	del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+	if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
+			== WLAN_STATUS_SUCCESS) {
+		*state |= HT_ADDBA_RECEIVED_MSK;
+		sta->ampdu_mlme.addba_req_num[tid] = 0;
+
+		if (*state == HT_AGG_STATE_OPERATIONAL)
+			ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
+
+		spin_unlock_bh(&sta->lock);
+	} else {
+		sta->ampdu_mlme.addba_req_num[tid]++;
+		/* this will allow the state check in stop_BA_session */
+		*state = HT_AGG_STATE_OPERATIONAL;
+		spin_unlock_bh(&sta->lock);
+		ieee80211_stop_tx_ba_session(hw, sta->sta.addr, tid,
+					     WLAN_BACK_INITIATOR);
+	}
+}
+
+void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
+			     struct sta_info *sta,
+			     struct ieee80211_mgmt *mgmt, size_t len)
+{
+	struct ieee80211_local *local = sdata->local;
+	u16 tid, params;
+	u16 initiator;
+	DECLARE_MAC_BUF(mac);
+
+	params = le16_to_cpu(mgmt->u.action.u.delba.params);
+	tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
+	initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	if (net_ratelimit())
+		printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n",
+			print_mac(mac, mgmt->sa),
+			initiator ? "initiator" : "recipient", tid,
+			mgmt->u.action.u.delba.reason_code);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+
+	if (initiator == WLAN_BACK_INITIATOR)
+		ieee80211_sta_stop_rx_ba_session(sdata, sta->sta.addr, tid,
+						 WLAN_BACK_INITIATOR, 0);
+	else { /* WLAN_BACK_RECIPIENT */
+		spin_lock_bh(&sta->lock);
+		sta->ampdu_mlme.tid_state_tx[tid] =
+				HT_AGG_STATE_OPERATIONAL;
+		spin_unlock_bh(&sta->lock);
+		ieee80211_stop_tx_ba_session(&local->hw, sta->sta.addr, tid,
+					     WLAN_BACK_RECIPIENT);
+	}
+}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 4498d87..8025b29 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -29,17 +29,6 @@
 #include "key.h"
 #include "sta_info.h"
 
-/* ieee80211.o internal definitions, etc. These are not included into
- * low-level drivers. */
-
-#ifndef ETH_P_PAE
-#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
-#endif /* ETH_P_PAE */
-
-#define WLAN_FC_DATA_PRESENT(fc) (((fc) & 0x4c) == 0x08)
-
-#define IEEE80211_FC(type, subtype) cpu_to_le16(type | subtype)
-
 struct ieee80211_local;
 
 /* Maximum number of broadcast/multicast frames to buffer when some of the
@@ -61,6 +50,12 @@
  * increased memory use (about 2 kB of RAM per entry). */
 #define IEEE80211_FRAGMENT_MAX 4
 
+/*
+ * Time after which we ignore scan results and no longer report/use
+ * them in any way.
+ */
+#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
+
 struct ieee80211_fragment_entry {
 	unsigned long first_frag_time;
 	unsigned int seq;
@@ -73,9 +68,9 @@
 };
 
 
-struct ieee80211_sta_bss {
+struct ieee80211_bss {
 	struct list_head list;
-	struct ieee80211_sta_bss *hnext;
+	struct ieee80211_bss *hnext;
 	size_t ssid_len;
 
 	atomic_t users;
@@ -87,16 +82,11 @@
 	enum ieee80211_band band;
 	int freq;
 	int signal, noise, qual;
-	u8 *wpa_ie;
-	size_t wpa_ie_len;
-	u8 *rsn_ie;
-	size_t rsn_ie_len;
-	u8 *wmm_ie;
-	size_t wmm_ie_len;
-	u8 *ht_ie;
-	size_t ht_ie_len;
-	u8 *ht_add_ie;
-	size_t ht_add_ie_len;
+	u8 *ies; /* all information elements from the last Beacon or Probe
+		  * Response frames; note Beacon frame is not allowed to
+		  * override values from Probe Response */
+	size_t ies_len;
+	bool wmm_used;
 #ifdef CONFIG_MAC80211_MESH
 	u8 *mesh_id;
 	size_t mesh_id_len;
@@ -108,7 +98,7 @@
 	u64 timestamp;
 	int beacon_int;
 
-	bool probe_resp;
+	unsigned long last_probe_resp;
 	unsigned long last_update;
 
 	/* during assocation, we save an ERP value from a probe response so
@@ -119,7 +109,7 @@
 	u8 erp_value;
 };
 
-static inline u8 *bss_mesh_cfg(struct ieee80211_sta_bss *bss)
+static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss)
 {
 #ifdef CONFIG_MAC80211_MESH
 	return bss->mesh_cfg;
@@ -127,7 +117,7 @@
 	return NULL;
 }
 
-static inline u8 *bss_mesh_id(struct ieee80211_sta_bss *bss)
+static inline u8 *bss_mesh_id(struct ieee80211_bss *bss)
 {
 #ifdef CONFIG_MAC80211_MESH
 	return bss->mesh_id;
@@ -135,7 +125,7 @@
 	return NULL;
 }
 
-static inline u8 bss_mesh_id_len(struct ieee80211_sta_bss *bss)
+static inline u8 bss_mesh_id_len(struct ieee80211_bss *bss)
 {
 #ifdef CONFIG_MAC80211_MESH
 	return bss->mesh_id_len;
@@ -174,7 +164,7 @@
 	struct sk_buff **extra_frag;
 	int num_extra_frag;
 
-	u16 fc, ethertype;
+	u16 ethertype;
 	unsigned int flags;
 };
 
@@ -202,7 +192,7 @@
 	struct ieee80211_rx_status *status;
 	struct ieee80211_rate *rate;
 
-	u16 fc, ethertype;
+	u16 ethertype;
 	unsigned int flags;
 	int sent_ps_buffered;
 	int queue;
@@ -239,7 +229,6 @@
 	struct sk_buff_head ps_bc_buf;
 	atomic_t num_sta_ps; /* number of stations in PS mode */
 	int dtim_count;
-	int num_beacons; /* number of TXed beacon frames for this BSS */
 };
 
 struct ieee80211_if_wds {
@@ -300,22 +289,78 @@
 #define IEEE80211_STA_AUTO_BSSID_SEL	BIT(11)
 #define IEEE80211_STA_AUTO_CHANNEL_SEL	BIT(12)
 #define IEEE80211_STA_PRIVACY_INVOKED	BIT(13)
+/* flags for MLME request */
+#define IEEE80211_STA_REQ_SCAN 0
+#define IEEE80211_STA_REQ_DIRECT_PROBE 1
+#define IEEE80211_STA_REQ_AUTH 2
+#define IEEE80211_STA_REQ_RUN  3
+
+/* STA/IBSS MLME states */
+enum ieee80211_sta_mlme_state {
+	IEEE80211_STA_MLME_DISABLED,
+	IEEE80211_STA_MLME_DIRECT_PROBE,
+	IEEE80211_STA_MLME_AUTHENTICATE,
+	IEEE80211_STA_MLME_ASSOCIATE,
+	IEEE80211_STA_MLME_ASSOCIATED,
+	IEEE80211_STA_MLME_IBSS_SEARCH,
+	IEEE80211_STA_MLME_IBSS_JOINED,
+};
+
+/* bitfield of allowed auth algs */
+#define IEEE80211_AUTH_ALG_OPEN BIT(0)
+#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
+#define IEEE80211_AUTH_ALG_LEAP BIT(2)
+
 struct ieee80211_if_sta {
 	struct timer_list timer;
 	struct work_struct work;
 	u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
 	u8 ssid[IEEE80211_MAX_SSID_LEN];
-	enum {
-		IEEE80211_DISABLED, IEEE80211_AUTHENTICATE,
-		IEEE80211_ASSOCIATE, IEEE80211_ASSOCIATED,
-		IEEE80211_IBSS_SEARCH, IEEE80211_IBSS_JOINED,
-		IEEE80211_MESH_UP
-	} state;
+	enum ieee80211_sta_mlme_state state;
 	size_t ssid_len;
 	u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
 	size_t scan_ssid_len;
-#ifdef CONFIG_MAC80211_MESH
+	u16 aid;
+	u16 ap_capab, capab;
+	u8 *extra_ie; /* to be added to the end of AssocReq */
+	size_t extra_ie_len;
+
+	/* The last AssocReq/Resp IEs */
+	u8 *assocreq_ies, *assocresp_ies;
+	size_t assocreq_ies_len, assocresp_ies_len;
+
+	struct sk_buff_head skb_queue;
+
+	int assoc_scan_tries; /* number of scans done pre-association */
+	int direct_probe_tries; /* retries for direct probes */
+	int auth_tries; /* retries for auth req */
+	int assoc_tries; /* retries for assoc req */
+
+	unsigned long request;
+
+	unsigned long last_probe;
+
+	unsigned int flags;
+
+	unsigned int auth_algs; /* bitfield of allowed auth algs */
+	int auth_alg; /* currently used IEEE 802.11 authentication algorithm */
+	int auth_transaction;
+
+	unsigned long ibss_join_req;
+	struct sk_buff *probe_resp; /* ProbeResp template for IBSS */
+	u32 supp_rates_bits[IEEE80211_NUM_BANDS];
+
+	int wmm_last_param_set;
+};
+
+struct ieee80211_if_mesh {
+	struct work_struct work;
+	struct timer_list housekeeping_timer;
 	struct timer_list mesh_path_timer;
+	struct sk_buff_head skb_queue;
+
+	bool housekeeping;
+
 	u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
 	size_t mesh_id_len;
 	/* Active Path Selection Protocol Identifier */
@@ -341,66 +386,35 @@
 	struct mesh_config mshcfg;
 	u32 mesh_seqnum;
 	bool accepting_plinks;
-#endif
-	u16 aid;
-	u16 ap_capab, capab;
-	u8 *extra_ie; /* to be added to the end of AssocReq */
-	size_t extra_ie_len;
-
-	/* The last AssocReq/Resp IEs */
-	u8 *assocreq_ies, *assocresp_ies;
-	size_t assocreq_ies_len, assocresp_ies_len;
-
-	struct sk_buff_head skb_queue;
-
-	int auth_tries, assoc_tries;
-
-	unsigned long request;
-
-	unsigned long last_probe;
-
-	unsigned int flags;
-#define IEEE80211_STA_REQ_SCAN 0
-#define IEEE80211_STA_REQ_AUTH 1
-#define IEEE80211_STA_REQ_RUN  2
-
-#define IEEE80211_AUTH_ALG_OPEN BIT(0)
-#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
-#define IEEE80211_AUTH_ALG_LEAP BIT(2)
-	unsigned int auth_algs; /* bitfield of allowed auth algs */
-	int auth_alg; /* currently used IEEE 802.11 authentication algorithm */
-	int auth_transaction;
-
-	unsigned long ibss_join_req;
-	struct sk_buff *probe_resp; /* ProbeResp template for IBSS */
-	u32 supp_rates_bits[IEEE80211_NUM_BANDS];
-
-	int wmm_last_param_set;
-	int num_beacons; /* number of TXed beacon frames by this STA */
 };
 
-static inline void ieee80211_if_sta_set_mesh_id(struct ieee80211_if_sta *ifsta,
-						u8 mesh_id_len, u8 *mesh_id)
-{
 #ifdef CONFIG_MAC80211_MESH
-	ifsta->mesh_id_len = mesh_id_len;
-	memcpy(ifsta->mesh_id, mesh_id, mesh_id_len);
-#endif
-}
-
-#ifdef CONFIG_MAC80211_MESH
-#define IEEE80211_IFSTA_MESH_CTR_INC(sta, name)	\
-	do { (sta)->mshstats.name++; } while (0)
+#define IEEE80211_IFSTA_MESH_CTR_INC(msh, name)	\
+	do { (msh)->mshstats.name++; } while (0)
 #else
-#define IEEE80211_IFSTA_MESH_CTR_INC(sta, name) \
+#define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \
 	do { } while (0)
 #endif
 
-/* flags used in struct ieee80211_sub_if_data.flags */
-#define IEEE80211_SDATA_ALLMULTI	BIT(0)
-#define IEEE80211_SDATA_PROMISC		BIT(1)
-#define IEEE80211_SDATA_USERSPACE_MLME	BIT(2)
-#define IEEE80211_SDATA_OPERATING_GMODE	BIT(3)
+/**
+ * enum ieee80211_sub_if_data_flags - virtual interface flags
+ *
+ * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets
+ * @IEEE80211_SDATA_PROMISC: interface is promisc
+ * @IEEE80211_SDATA_USERSPACE_MLME: userspace MLME is active
+ * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode
+ * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between
+ *	associated stations and deliver multicast frames both
+ *	back to wireless media and to the local net stack.
+ */
+enum ieee80211_sub_if_data_flags {
+	IEEE80211_SDATA_ALLMULTI		= BIT(0),
+	IEEE80211_SDATA_PROMISC			= BIT(1),
+	IEEE80211_SDATA_USERSPACE_MLME		= BIT(2),
+	IEEE80211_SDATA_OPERATING_GMODE		= BIT(3),
+	IEEE80211_SDATA_DONT_BRIDGE_PACKETS	= BIT(4),
+};
+
 struct ieee80211_sub_if_data {
 	struct list_head list;
 
@@ -416,11 +430,6 @@
 
 	int drop_unencrypted;
 
-	/*
-	 * basic rates of this AP or the AP we're associated to
-	 */
-	u64 basic_rates;
-
 	/* Fragment table for host-based reassembly */
 	struct ieee80211_fragment_entry	fragments[IEEE80211_FRAGMENT_MAX];
 	unsigned int fragment_next;
@@ -447,6 +456,9 @@
 		struct ieee80211_if_wds wds;
 		struct ieee80211_if_vlan vlan;
 		struct ieee80211_if_sta sta;
+#ifdef CONFIG_MAC80211_MESH
+		struct ieee80211_if_mesh mesh;
+#endif
 		u32 mntr_flags;
 	} u;
 
@@ -469,7 +481,6 @@
 			struct dentry *auth_alg;
 			struct dentry *auth_transaction;
 			struct dentry *flags;
-			struct dentry *num_beacons_sta;
 			struct dentry *force_unicast_rateidx;
 			struct dentry *max_ratectrl_rateidx;
 		} sta;
@@ -477,7 +488,6 @@
 			struct dentry *drop_unencrypted;
 			struct dentry *num_sta_ps;
 			struct dentry *dtim_count;
-			struct dentry *num_beacons;
 			struct dentry *force_unicast_rateidx;
 			struct dentry *max_ratectrl_rateidx;
 			struct dentry *num_buffered_multicast;
@@ -540,6 +550,19 @@
 	return container_of(p, struct ieee80211_sub_if_data, vif);
 }
 
+static inline void
+ieee80211_sdata_set_mesh_id(struct ieee80211_sub_if_data *sdata,
+			    u8 mesh_id_len, u8 *mesh_id)
+{
+#ifdef CONFIG_MAC80211_MESH
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	ifmsh->mesh_id_len = mesh_id_len;
+	memcpy(ifmsh->mesh_id, mesh_id, mesh_id_len);
+#else
+	WARN_ON(1);
+#endif
+}
+
 enum {
 	IEEE80211_RX_MSG	= 1,
 	IEEE80211_TX_STATUS_MSG	= 2,
@@ -550,6 +573,10 @@
 /* maximum number of hardware queues we support. */
 #define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
 
+struct ieee80211_master_priv {
+	struct ieee80211_local *local;
+};
+
 struct ieee80211_local {
 	/* embed the driver visible part.
 	 * don't cast (use the static inlines below), but we keep
@@ -613,10 +640,6 @@
 	struct crypto_blkcipher *wep_rx_tfm;
 	u32 wep_iv;
 
-	int bridge_packets; /* bridge packets between associated stations and
-			     * deliver multicast frames both back to wireless
-			     * media and to the local net stack */
-
 	struct list_head interfaces;
 
 	/*
@@ -626,21 +649,21 @@
 	spinlock_t key_lock;
 
 
-	bool sta_sw_scanning;
-	bool sta_hw_scanning;
+	/* Scanning and BSS list */
+	bool sw_scanning, hw_scanning;
 	int scan_channel_idx;
 	enum ieee80211_band scan_band;
 
 	enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state;
 	unsigned long last_scan_completed;
 	struct delayed_work scan_work;
-	struct net_device *scan_dev;
+	struct ieee80211_sub_if_data *scan_sdata;
 	struct ieee80211_channel *oper_channel, *scan_channel;
 	u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
 	size_t scan_ssid_len;
-	struct list_head sta_bss_list;
-	struct ieee80211_sta_bss *sta_bss_hash[STA_HASH_SIZE];
-	spinlock_t sta_bss_lock;
+	struct list_head bss_list;
+	struct ieee80211_bss *bss_hash[STA_HASH_SIZE];
+	spinlock_t bss_lock;
 
 	/* SNMP counters */
 	/* dot11CountersTable */
@@ -701,10 +724,11 @@
 
 #ifdef CONFIG_MAC80211_DEBUGFS
 	struct local_debugfsdentries {
+		struct dentry *rcdir;
+		struct dentry *rcname;
 		struct dentry *frequency;
 		struct dentry *antenna_sel_tx;
 		struct dentry *antenna_sel_rx;
-		struct dentry *bridge_packets;
 		struct dentry *rts_threshold;
 		struct dentry *fragmentation_threshold;
 		struct dentry *short_retry_limit;
@@ -774,6 +798,9 @@
 
 /* Parsed Information Elements */
 struct ieee802_11_elems {
+	u8 *ie_start;
+	size_t total_len;
+
 	/* pointers to IEs */
 	u8 *ssid;
 	u8 *supp_rates;
@@ -857,86 +884,82 @@
 }
 
 
-/* ieee80211.c */
 int ieee80211_hw_config(struct ieee80211_local *local);
 int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed);
 void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
 u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
 			struct ieee80211_ht_info *req_ht_cap,
 			struct ieee80211_ht_bss_info *req_bss_cap);
-
-/* ieee80211_ioctl.c */
-extern const struct iw_handler_def ieee80211_iw_handler_def;
-int ieee80211_set_freq(struct net_device *dev, int freq);
-
-/* ieee80211_sta.c */
-void ieee80211_sta_timer(unsigned long data);
-void ieee80211_sta_work(struct work_struct *work);
-void ieee80211_sta_scan_work(struct work_struct *work);
-void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
-			   struct ieee80211_rx_status *rx_status);
-int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len);
-int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len);
-int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid);
-int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len);
-void ieee80211_sta_req_auth(struct net_device *dev,
-			    struct ieee80211_if_sta *ifsta);
-int ieee80211_sta_scan_results(struct net_device *dev,
-			       struct iw_request_info *info,
-			       char *buf, size_t len);
-ieee80211_rx_result ieee80211_sta_rx_scan(
-	struct net_device *dev, struct sk_buff *skb,
-	struct ieee80211_rx_status *rx_status);
-void ieee80211_rx_bss_list_init(struct ieee80211_local *local);
-void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local);
-int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len);
-struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
-					struct sk_buff *skb, u8 *bssid,
-					u8 *addr, u64 supp_rates);
-int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason);
-int ieee80211_sta_disassociate(struct net_device *dev, u16 reason);
 void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
 				      u32 changed);
-u32 ieee80211_reset_erp_info(struct net_device *dev);
-int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
-				   struct ieee80211_ht_info *ht_info);
-int ieee80211_ht_addt_info_ie_to_ht_bss_info(
-			struct ieee80211_ht_addt_info *ht_add_info_ie,
-			struct ieee80211_ht_bss_info *bss_info);
-void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
-				  u16 tid, u8 dialog_token, u16 start_seq_num,
-				  u16 agg_size, u16 timeout);
-void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
-				u16 initiator, u16 reason_code);
-void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn);
+void ieee80211_configure_filter(struct ieee80211_local *local);
 
-void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da,
-				u16 tid, u16 initiator, u16 reason);
-void sta_addba_resp_timer_expired(unsigned long data);
-void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr);
+/* wireless extensions */
+extern const struct iw_handler_def ieee80211_iw_handler_def;
+
+/* STA/IBSS code */
+void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
+void ieee80211_scan_work(struct work_struct *work);
+void ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
+			   struct ieee80211_rx_status *rx_status);
+int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len);
+int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len);
+int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid);
+void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata,
+			    struct ieee80211_if_sta *ifsta);
+struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
+					struct sk_buff *skb, u8 *bssid,
+					u8 *addr, u64 supp_rates);
+int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason);
+int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason);
+u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
 u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
 			    struct ieee802_11_elems *elems,
 			    enum ieee80211_band band);
-void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
-		int encrypt);
-void ieee802_11_parse_elems(u8 *start, size_t len,
-				   struct ieee802_11_elems *elems);
+void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
+			      u8 *ssid, size_t ssid_len);
 
-#ifdef CONFIG_MAC80211_MESH
-void ieee80211_start_mesh(struct net_device *dev);
-#else
-static inline void ieee80211_start_mesh(struct net_device *dev)
-{}
-#endif
+/* scan/BSS handling */
+int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
+			   u8 *ssid, size_t ssid_len);
+int ieee80211_scan_results(struct ieee80211_local *local,
+			   struct iw_request_info *info,
+			   char *buf, size_t len);
+ieee80211_rx_result
+ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata,
+		  struct sk_buff *skb,
+		  struct ieee80211_rx_status *rx_status);
+void ieee80211_rx_bss_list_init(struct ieee80211_local *local);
+void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local);
+int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
+			       char *ie, size_t len);
+
+void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
+int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
+			 u8 *ssid, size_t ssid_len);
+struct ieee80211_bss *
+ieee80211_bss_info_update(struct ieee80211_local *local,
+			  struct ieee80211_rx_status *rx_status,
+			  struct ieee80211_mgmt *mgmt,
+			  size_t len,
+			  struct ieee802_11_elems *elems,
+			  int freq, bool beacon);
+struct ieee80211_bss *
+ieee80211_rx_bss_add(struct ieee80211_local *local, u8 *bssid, int freq,
+		     u8 *ssid, u8 ssid_len);
+struct ieee80211_bss *
+ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
+		     u8 *ssid, u8 ssid_len);
+void ieee80211_rx_bss_put(struct ieee80211_local *local,
+			  struct ieee80211_bss *bss);
 
 /* interface handling */
-void ieee80211_if_setup(struct net_device *dev);
 int ieee80211_if_add(struct ieee80211_local *local, const char *name,
-		     struct net_device **new_dev, enum ieee80211_if_types type,
+		     struct net_device **new_dev, enum nl80211_iftype type,
 		     struct vif_params *params);
 int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
-			     enum ieee80211_if_types type);
-void ieee80211_if_remove(struct net_device *dev);
+			     enum nl80211_iftype type);
+void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);
 void ieee80211_remove_interfaces(struct ieee80211_local *local);
 
 /* tx handling */
@@ -946,16 +969,52 @@
 int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
 int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
 
+/* HT */
+int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
+				   struct ieee80211_ht_info *ht_info);
+int ieee80211_ht_addt_info_ie_to_ht_bss_info(
+			struct ieee80211_ht_addt_info *ht_add_info_ie,
+			struct ieee80211_ht_bss_info *bss_info);
+void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn);
+
+void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
+				u16 tid, u16 initiator, u16 reason);
+void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr);
+void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
+			     struct sta_info *sta,
+			     struct ieee80211_mgmt *mgmt, size_t len);
+void ieee80211_process_addba_resp(struct ieee80211_local *local,
+				  struct sta_info *sta,
+				  struct ieee80211_mgmt *mgmt,
+				  size_t len);
+void ieee80211_process_addba_request(struct ieee80211_local *local,
+				     struct sta_info *sta,
+				     struct ieee80211_mgmt *mgmt,
+				     size_t len);
+
+/* Spectrum management */
+void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
+				       struct ieee80211_mgmt *mgmt,
+				       size_t len);
+
 /* utility functions/constants */
 extern void *mac80211_wiphy_privid; /* for wiphy privid */
 extern const unsigned char rfc1042_header[6];
 extern const unsigned char bridge_tunnel_header[6];
 u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
-			enum ieee80211_if_types type);
+			enum nl80211_iftype type);
 int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
 			     int rate, int erp, int short_preamble);
-void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx,
+void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
 				     struct ieee80211_hdr *hdr);
+void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
+void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
+		      int encrypt);
+void ieee802_11_parse_elems(u8 *start, size_t len,
+			    struct ieee802_11_elems *elems);
+int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq);
+u64 ieee80211_mandatory_rates(struct ieee80211_local *local,
+			      enum ieee80211_band band);
 
 #ifdef CONFIG_MAC80211_NOINLINE
 #define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 610ed1d..8336fee 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1,4 +1,6 @@
 /*
+ * Interface handling (except master interface)
+ *
  * Copyright 2002-2005, Instant802 Networks, Inc.
  * Copyright 2005-2006, Devicescape Software, Inc.
  * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
@@ -17,7 +19,539 @@
 #include "sta_info.h"
 #include "debugfs_netdev.h"
 #include "mesh.h"
+#include "led.h"
 
+static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int meshhdrlen;
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+	meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0;
+
+	/* FIX: what would be proper limits for MTU?
+	 * This interface uses 802.3 frames. */
+	if (new_mtu < 256 ||
+	    new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+	printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
+#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static inline int identical_mac_addr_allowed(int type1, int type2)
+{
+	return type1 == NL80211_IFTYPE_MONITOR ||
+		type2 == NL80211_IFTYPE_MONITOR ||
+		(type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) ||
+		(type1 == NL80211_IFTYPE_WDS &&
+			(type2 == NL80211_IFTYPE_WDS ||
+			 type2 == NL80211_IFTYPE_AP)) ||
+		(type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) ||
+		(type1 == NL80211_IFTYPE_AP_VLAN &&
+			(type2 == NL80211_IFTYPE_AP ||
+			 type2 == NL80211_IFTYPE_AP_VLAN));
+}
+
+static int ieee80211_open(struct net_device *dev)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_sub_if_data *nsdata;
+	struct ieee80211_local *local = sdata->local;
+	struct sta_info *sta;
+	struct ieee80211_if_init_conf conf;
+	u32 changed = 0;
+	int res;
+	bool need_hw_reconfig = 0;
+	u8 null_addr[ETH_ALEN] = {0};
+
+	/* fail early if user set an invalid address */
+	if (compare_ether_addr(dev->dev_addr, null_addr) &&
+	    !is_valid_ether_addr(dev->dev_addr))
+		return -EADDRNOTAVAIL;
+
+	/* we hold the RTNL here so can safely walk the list */
+	list_for_each_entry(nsdata, &local->interfaces, list) {
+		struct net_device *ndev = nsdata->dev;
+
+		if (ndev != dev && netif_running(ndev)) {
+			/*
+			 * Allow only a single IBSS interface to be up at any
+			 * time. This is restricted because beacon distribution
+			 * cannot work properly if both are in the same IBSS.
+			 *
+			 * To remove this restriction we'd have to disallow them
+			 * from setting the same SSID on different IBSS interfaces
+			 * belonging to the same hardware. Then, however, we're
+			 * faced with having to adopt two different TSF timers...
+			 */
+			if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
+			    nsdata->vif.type == NL80211_IFTYPE_ADHOC)
+				return -EBUSY;
+
+			/*
+			 * The remaining checks are only performed for interfaces
+			 * with the same MAC address.
+			 */
+			if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
+				continue;
+
+			/*
+			 * check whether it may have the same address
+			 */
+			if (!identical_mac_addr_allowed(sdata->vif.type,
+							nsdata->vif.type))
+				return -ENOTUNIQ;
+
+			/*
+			 * can only add VLANs to enabled APs
+			 */
+			if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+			    nsdata->vif.type == NL80211_IFTYPE_AP)
+				sdata->bss = &nsdata->u.ap;
+		}
+	}
+
+	switch (sdata->vif.type) {
+	case NL80211_IFTYPE_WDS:
+		if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
+			return -ENOLINK;
+		break;
+	case NL80211_IFTYPE_AP_VLAN:
+		if (!sdata->bss)
+			return -ENOLINK;
+		list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
+		break;
+	case NL80211_IFTYPE_AP:
+		sdata->bss = &sdata->u.ap;
+		break;
+	case NL80211_IFTYPE_MESH_POINT:
+		if (!ieee80211_vif_is_mesh(&sdata->vif))
+			break;
+		/* mesh ifaces must set allmulti to forward mcast traffic */
+		atomic_inc(&local->iff_allmultis);
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_ADHOC:
+		/* no special treatment */
+		break;
+	case NL80211_IFTYPE_UNSPECIFIED:
+	case __NL80211_IFTYPE_AFTER_LAST:
+		/* cannot happen */
+		WARN_ON(1);
+		break;
+	}
+
+	if (local->open_count == 0) {
+		res = 0;
+		if (local->ops->start)
+			res = local->ops->start(local_to_hw(local));
+		if (res)
+			goto err_del_bss;
+		need_hw_reconfig = 1;
+		ieee80211_led_radio(local, local->hw.conf.radio_enabled);
+	}
+
+	/*
+	 * Check all interfaces and copy the hopefully now-present
+	 * MAC address to those that have the special null one.
+	 */
+	list_for_each_entry(nsdata, &local->interfaces, list) {
+		struct net_device *ndev = nsdata->dev;
+
+		/*
+		 * No need to check netif_running since we do not allow
+		 * it to start up with this invalid address.
+		 */
+		if (compare_ether_addr(null_addr, ndev->dev_addr) == 0)
+			memcpy(ndev->dev_addr,
+			       local->hw.wiphy->perm_addr,
+			       ETH_ALEN);
+	}
+
+	if (compare_ether_addr(null_addr, local->mdev->dev_addr) == 0)
+		memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr,
+		       ETH_ALEN);
+
+	/*
+	 * Validate the MAC address for this device.
+	 */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		if (!local->open_count && local->ops->stop)
+			local->ops->stop(local_to_hw(local));
+		return -EADDRNOTAVAIL;
+	}
+
+	switch (sdata->vif.type) {
+	case NL80211_IFTYPE_AP_VLAN:
+		/* no need to tell driver */
+		break;
+	case NL80211_IFTYPE_MONITOR:
+		if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
+			local->cooked_mntrs++;
+			break;
+		}
+
+		/* must be before the call to ieee80211_configure_filter */
+		local->monitors++;
+		if (local->monitors == 1)
+			local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
+
+		if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
+			local->fif_fcsfail++;
+		if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
+			local->fif_plcpfail++;
+		if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
+			local->fif_control++;
+		if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
+			local->fif_other_bss++;
+
+		netif_addr_lock_bh(local->mdev);
+		ieee80211_configure_filter(local);
+		netif_addr_unlock_bh(local->mdev);
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_ADHOC:
+		sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
+		/* fall through */
+	default:
+		conf.vif = &sdata->vif;
+		conf.type = sdata->vif.type;
+		conf.mac_addr = dev->dev_addr;
+		res = local->ops->add_interface(local_to_hw(local), &conf);
+		if (res)
+			goto err_stop;
+
+		if (ieee80211_vif_is_mesh(&sdata->vif))
+			ieee80211_start_mesh(sdata);
+		changed |= ieee80211_reset_erp_info(sdata);
+		ieee80211_bss_info_change_notify(sdata, changed);
+		ieee80211_enable_keys(sdata);
+
+		if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+		    !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
+			netif_carrier_off(dev);
+		else
+			netif_carrier_on(dev);
+	}
+
+	if (sdata->vif.type == NL80211_IFTYPE_WDS) {
+		/* Create STA entry for the WDS peer */
+		sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
+				     GFP_KERNEL);
+		if (!sta) {
+			res = -ENOMEM;
+			goto err_del_interface;
+		}
+
+		/* no locking required since STA is not live yet */
+		sta->flags |= WLAN_STA_AUTHORIZED;
+
+		res = sta_info_insert(sta);
+		if (res) {
+			/* STA has been freed */
+			goto err_del_interface;
+		}
+	}
+
+	if (local->open_count == 0) {
+		res = dev_open(local->mdev);
+		WARN_ON(res);
+		if (res)
+			goto err_del_interface;
+		tasklet_enable(&local->tx_pending_tasklet);
+		tasklet_enable(&local->tasklet);
+	}
+
+	/*
+	 * set_multicast_list will be invoked by the networking core
+	 * which will check whether any increments here were done in
+	 * error and sync them down to the hardware as filter flags.
+	 */
+	if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
+		atomic_inc(&local->iff_allmultis);
+
+	if (sdata->flags & IEEE80211_SDATA_PROMISC)
+		atomic_inc(&local->iff_promiscs);
+
+	local->open_count++;
+	if (need_hw_reconfig) {
+		ieee80211_hw_config(local);
+		/*
+		 * set default queue parameters so drivers don't
+		 * need to initialise the hardware if the hardware
+		 * doesn't start up with sane defaults
+		 */
+		ieee80211_set_wmm_default(sdata);
+	}
+
+	/*
+	 * ieee80211_sta_work is disabled while network interface
+	 * is down. Therefore, some configuration changes may not
+	 * yet be effective. Trigger execution of ieee80211_sta_work
+	 * to fix this.
+	 */
+	if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+	    sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+		struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+		queue_work(local->hw.workqueue, &ifsta->work);
+	}
+
+	netif_tx_start_all_queues(dev);
+
+	return 0;
+ err_del_interface:
+	local->ops->remove_interface(local_to_hw(local), &conf);
+ err_stop:
+	if (!local->open_count && local->ops->stop)
+		local->ops->stop(local_to_hw(local));
+ err_del_bss:
+	sdata->bss = NULL;
+	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+		list_del(&sdata->u.vlan.list);
+	return res;
+}
+
+static int ieee80211_stop(struct net_device *dev)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_init_conf conf;
+	struct sta_info *sta;
+
+	/*
+	 * Stop TX on this interface first.
+	 */
+	netif_tx_stop_all_queues(dev);
+
+	/*
+	 * Now delete all active aggregation sessions.
+	 */
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(sta, &local->sta_list, list) {
+		if (sta->sdata == sdata)
+			ieee80211_sta_tear_down_BA_sessions(sdata,
+							    sta->sta.addr);
+	}
+
+	rcu_read_unlock();
+
+	/*
+	 * Remove all stations associated with this interface.
+	 *
+	 * This must be done before calling ops->remove_interface()
+	 * because otherwise we can later invoke ops->sta_notify()
+	 * whenever the STAs are removed, and that invalidates driver
+	 * assumptions about always getting a vif pointer that is valid
+	 * (because if we remove a STA after ops->remove_interface()
+	 * the driver will have removed the vif info already!)
+	 *
+	 * We could relax this and only unlink the stations from the
+	 * hash table and list but keep them on a per-sdata list that
+	 * will be inserted back again when the interface is brought
+	 * up again, but I don't currently see a use case for that,
+	 * except with WDS which gets a STA entry created when it is
+	 * brought up.
+	 */
+	sta_info_flush(local, sdata);
+
+	/*
+	 * Don't count this interface for promisc/allmulti while it
+	 * is down. dev_mc_unsync() will invoke set_multicast_list
+	 * on the master interface which will sync these down to the
+	 * hardware as filter flags.
+	 */
+	if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
+		atomic_dec(&local->iff_allmultis);
+
+	if (sdata->flags & IEEE80211_SDATA_PROMISC)
+		atomic_dec(&local->iff_promiscs);
+
+	dev_mc_unsync(local->mdev, dev);
+
+	/* APs need special treatment */
+	if (sdata->vif.type == NL80211_IFTYPE_AP) {
+		struct ieee80211_sub_if_data *vlan, *tmp;
+		struct beacon_data *old_beacon = sdata->u.ap.beacon;
+
+		/* remove beacon */
+		rcu_assign_pointer(sdata->u.ap.beacon, NULL);
+		synchronize_rcu();
+		kfree(old_beacon);
+
+		/* down all dependent devices, that is VLANs */
+		list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
+					 u.vlan.list)
+			dev_close(vlan->dev);
+		WARN_ON(!list_empty(&sdata->u.ap.vlans));
+	}
+
+	local->open_count--;
+
+	switch (sdata->vif.type) {
+	case NL80211_IFTYPE_AP_VLAN:
+		list_del(&sdata->u.vlan.list);
+		/* no need to tell driver */
+		break;
+	case NL80211_IFTYPE_MONITOR:
+		if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
+			local->cooked_mntrs--;
+			break;
+		}
+
+		local->monitors--;
+		if (local->monitors == 0)
+			local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
+
+		if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
+			local->fif_fcsfail--;
+		if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
+			local->fif_plcpfail--;
+		if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
+			local->fif_control--;
+		if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
+			local->fif_other_bss--;
+
+		netif_addr_lock_bh(local->mdev);
+		ieee80211_configure_filter(local);
+		netif_addr_unlock_bh(local->mdev);
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_ADHOC:
+		sdata->u.sta.state = IEEE80211_STA_MLME_DISABLED;
+		memset(sdata->u.sta.bssid, 0, ETH_ALEN);
+		del_timer_sync(&sdata->u.sta.timer);
+		/*
+		 * If the timer fired while we waited for it, it will have
+		 * requeued the work. Now the work will be running again
+		 * but will not rearm the timer again because it checks
+		 * whether the interface is running, which, at this point,
+		 * it no longer is.
+		 */
+		cancel_work_sync(&sdata->u.sta.work);
+		/*
+		 * When we get here, the interface is marked down.
+		 * Call synchronize_rcu() to wait for the RX path
+		 * should it be using the interface and enqueuing
+		 * frames at this very time on another CPU.
+		 */
+		synchronize_rcu();
+		skb_queue_purge(&sdata->u.sta.skb_queue);
+
+		sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
+		kfree(sdata->u.sta.extra_ie);
+		sdata->u.sta.extra_ie = NULL;
+		sdata->u.sta.extra_ie_len = 0;
+		/* fall through */
+	case NL80211_IFTYPE_MESH_POINT:
+		if (ieee80211_vif_is_mesh(&sdata->vif)) {
+			/* allmulti is always set on mesh ifaces */
+			atomic_dec(&local->iff_allmultis);
+			ieee80211_stop_mesh(sdata);
+		}
+		/* fall through */
+	default:
+		if (local->scan_sdata == sdata) {
+			if (!local->ops->hw_scan)
+				cancel_delayed_work_sync(&local->scan_work);
+			/*
+			 * The software scan can no longer run now, so we can
+			 * clear out the scan_sdata reference. However, the
+			 * hardware scan may still be running. The complete
+			 * function must be prepared to handle a NULL value.
+			 */
+			local->scan_sdata = NULL;
+			/*
+			 * The memory barrier guarantees that another CPU
+			 * that is hardware-scanning will now see the fact
+			 * that this interface is gone.
+			 */
+			smp_mb();
+			/*
+			 * If software scanning, complete the scan but since
+			 * the scan_sdata is NULL already don't send out a
+			 * scan event to userspace -- the scan is incomplete.
+			 */
+			if (local->sw_scanning)
+				ieee80211_scan_completed(&local->hw);
+		}
+
+		conf.vif = &sdata->vif;
+		conf.type = sdata->vif.type;
+		conf.mac_addr = dev->dev_addr;
+		/* disable all keys for as long as this netdev is down */
+		ieee80211_disable_keys(sdata);
+		local->ops->remove_interface(local_to_hw(local), &conf);
+	}
+
+	sdata->bss = NULL;
+
+	if (local->open_count == 0) {
+		if (netif_running(local->mdev))
+			dev_close(local->mdev);
+
+		if (local->ops->stop)
+			local->ops->stop(local_to_hw(local));
+
+		ieee80211_led_radio(local, 0);
+
+		flush_workqueue(local->hw.workqueue);
+
+		tasklet_disable(&local->tx_pending_tasklet);
+		tasklet_disable(&local->tasklet);
+	}
+
+	return 0;
+}
+
+static void ieee80211_set_multicast_list(struct net_device *dev)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
+	int allmulti, promisc, sdata_allmulti, sdata_promisc;
+
+	allmulti = !!(dev->flags & IFF_ALLMULTI);
+	promisc = !!(dev->flags & IFF_PROMISC);
+	sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
+	sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
+
+	if (allmulti != sdata_allmulti) {
+		if (dev->flags & IFF_ALLMULTI)
+			atomic_inc(&local->iff_allmultis);
+		else
+			atomic_dec(&local->iff_allmultis);
+		sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
+	}
+
+	if (promisc != sdata_promisc) {
+		if (dev->flags & IFF_PROMISC)
+			atomic_inc(&local->iff_promiscs);
+		else
+			atomic_dec(&local->iff_promiscs);
+		sdata->flags ^= IEEE80211_SDATA_PROMISC;
+	}
+
+	dev_mc_sync(local->mdev, dev);
+}
+
+static void ieee80211_if_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+	dev->hard_start_xmit = ieee80211_subif_start_xmit;
+	dev->wireless_handlers = &ieee80211_iw_handler_def;
+	dev->set_multicast_list = ieee80211_set_multicast_list;
+	dev->change_mtu = ieee80211_change_mtu;
+	dev->open = ieee80211_open;
+	dev->stop = ieee80211_stop;
+	dev->destructor = free_netdev;
+	/* we will validate the address ourselves in ->open */
+	dev->validate_addr = NULL;
+}
 /*
  * Called when the netdev is removed or, by the code below, before
  * the interface type changes.
@@ -31,17 +565,17 @@
 	int flushed;
 	int i;
 
-	ieee80211_debugfs_remove_netdev(sdata);
-
 	/* free extra data */
 	ieee80211_free_keys(sdata);
 
+	ieee80211_debugfs_remove_netdev(sdata);
+
 	for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
 		__skb_queue_purge(&sdata->fragments[i].skb_list);
 	sdata->fragment_next = 0;
 
 	switch (sdata->vif.type) {
-	case IEEE80211_IF_TYPE_AP:
+	case NL80211_IFTYPE_AP:
 		beacon = sdata->u.ap.beacon;
 		rcu_assign_pointer(sdata->u.ap.beacon, NULL);
 		synchronize_rcu();
@@ -53,23 +587,23 @@
 		}
 
 		break;
-	case IEEE80211_IF_TYPE_MESH_POINT:
-		/* Allow compiler to elide mesh_rmc_free call. */
+	case NL80211_IFTYPE_MESH_POINT:
 		if (ieee80211_vif_is_mesh(&sdata->vif))
-			mesh_rmc_free(dev);
-		/* fall through */
-	case IEEE80211_IF_TYPE_STA:
-	case IEEE80211_IF_TYPE_IBSS:
+			mesh_rmc_free(sdata);
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_ADHOC:
 		kfree(sdata->u.sta.extra_ie);
 		kfree(sdata->u.sta.assocreq_ies);
 		kfree(sdata->u.sta.assocresp_ies);
 		kfree_skb(sdata->u.sta.probe_resp);
 		break;
-	case IEEE80211_IF_TYPE_WDS:
-	case IEEE80211_IF_TYPE_VLAN:
-	case IEEE80211_IF_TYPE_MNTR:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_MONITOR:
 		break;
-	case IEEE80211_IF_TYPE_INVALID:
+	case NL80211_IFTYPE_UNSPECIFIED:
+	case __NL80211_IFTYPE_AFTER_LAST:
 		BUG();
 		break;
 	}
@@ -82,55 +616,43 @@
  * Helper function to initialise an interface to a specific type.
  */
 static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
-				  enum ieee80211_if_types type)
+				  enum nl80211_iftype type)
 {
-	struct ieee80211_if_sta *ifsta;
-
 	/* clear type-dependent union */
 	memset(&sdata->u, 0, sizeof(sdata->u));
 
 	/* and set some type-dependent values */
 	sdata->vif.type = type;
+	sdata->dev->hard_start_xmit = ieee80211_subif_start_xmit;
+	sdata->wdev.iftype = type;
 
 	/* only monitor differs */
 	sdata->dev->type = ARPHRD_ETHER;
 
 	switch (type) {
-	case IEEE80211_IF_TYPE_AP:
+	case NL80211_IFTYPE_AP:
 		skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
 		INIT_LIST_HEAD(&sdata->u.ap.vlans);
 		break;
-	case IEEE80211_IF_TYPE_MESH_POINT:
-	case IEEE80211_IF_TYPE_STA:
-	case IEEE80211_IF_TYPE_IBSS:
-		ifsta = &sdata->u.sta;
-		INIT_WORK(&ifsta->work, ieee80211_sta_work);
-		setup_timer(&ifsta->timer, ieee80211_sta_timer,
-			    (unsigned long) sdata);
-		skb_queue_head_init(&ifsta->skb_queue);
-
-		ifsta->capab = WLAN_CAPABILITY_ESS;
-		ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN |
-			IEEE80211_AUTH_ALG_SHARED_KEY;
-		ifsta->flags |= IEEE80211_STA_CREATE_IBSS |
-			IEEE80211_STA_AUTO_BSSID_SEL |
-			IEEE80211_STA_AUTO_CHANNEL_SEL;
-		if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4)
-			ifsta->flags |= IEEE80211_STA_WMM_ENABLED;
-
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_ADHOC:
+		ieee80211_sta_setup_sdata(sdata);
+		break;
+	case NL80211_IFTYPE_MESH_POINT:
 		if (ieee80211_vif_is_mesh(&sdata->vif))
 			ieee80211_mesh_init_sdata(sdata);
 		break;
-	case IEEE80211_IF_TYPE_MNTR:
+	case NL80211_IFTYPE_MONITOR:
 		sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP;
 		sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit;
 		sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |
 				      MONITOR_FLAG_OTHER_BSS;
 		break;
-	case IEEE80211_IF_TYPE_WDS:
-	case IEEE80211_IF_TYPE_VLAN:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_AP_VLAN:
 		break;
-	case IEEE80211_IF_TYPE_INVALID:
+	case NL80211_IFTYPE_UNSPECIFIED:
+	case __NL80211_IFTYPE_AFTER_LAST:
 		BUG();
 		break;
 	}
@@ -139,7 +661,7 @@
 }
 
 int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
-			     enum ieee80211_if_types type)
+			     enum nl80211_iftype type)
 {
 	ASSERT_RTNL();
 
@@ -160,14 +682,16 @@
 	ieee80211_setup_sdata(sdata, type);
 
 	/* reset some values that shouldn't be kept across type changes */
-	sdata->basic_rates = 0;
+	sdata->bss_conf.basic_rates =
+		ieee80211_mandatory_rates(sdata->local,
+			sdata->local->hw.conf.channel->band);
 	sdata->drop_unencrypted = 0;
 
 	return 0;
 }
 
 int ieee80211_if_add(struct ieee80211_local *local, const char *name,
-		     struct net_device **new_dev, enum ieee80211_if_types type,
+		     struct net_device **new_dev, enum nl80211_iftype type,
 		     struct vif_params *params)
 {
 	struct net_device *ndev;
@@ -225,9 +749,9 @@
 
 	if (ieee80211_vif_is_mesh(&sdata->vif) &&
 	    params && params->mesh_id_len)
-		ieee80211_if_sta_set_mesh_id(&sdata->u.sta,
-					     params->mesh_id_len,
-					     params->mesh_id);
+		ieee80211_sdata_set_mesh_id(sdata,
+					    params->mesh_id_len,
+					    params->mesh_id);
 
 	list_add_tail_rcu(&sdata->list, &local->interfaces);
 
@@ -241,15 +765,13 @@
 	return ret;
 }
 
-void ieee80211_if_remove(struct net_device *dev)
+void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
 	ASSERT_RTNL();
 
 	list_del_rcu(&sdata->list);
 	synchronize_rcu();
-	unregister_netdevice(dev);
+	unregister_netdevice(sdata->dev);
 }
 
 /*
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 6597c77..57afcd3 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -118,12 +118,12 @@
 	 * address to indicate a transmit-only key.
 	 */
 	if (key->conf.alg != ALG_WEP &&
-	    (key->sdata->vif.type == IEEE80211_IF_TYPE_AP ||
-	     key->sdata->vif.type == IEEE80211_IF_TYPE_VLAN))
+	    (key->sdata->vif.type == NL80211_IFTYPE_AP ||
+	     key->sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
 		addr = zero_addr;
 
 	if (key->sta)
-		addr = key->sta->addr;
+		addr = key->sta->sta.addr;
 
 	return addr;
 }
@@ -331,7 +331,7 @@
 		 */
 		key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE;
 	} else {
-		if (sdata->vif.type == IEEE80211_IF_TYPE_STA) {
+		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
 			struct sta_info *ap;
 
 			/*
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index aa5a191..d608c44 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -45,16 +45,9 @@
 	u8 data_retries;
 } __attribute__ ((packed));
 
-/* common interface routines */
-
-static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
-{
-	memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
-	return ETH_ALEN;
-}
 
 /* must be called under mdev tx lock */
-static void ieee80211_configure_filter(struct ieee80211_local *local)
+void ieee80211_configure_filter(struct ieee80211_local *local)
 {
 	unsigned int changed_flags;
 	unsigned int new_flags = 0;
@@ -97,9 +90,24 @@
 
 /* master interface */
 
+static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
+{
+	memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
+	return ETH_ALEN;
+}
+
+static const struct header_ops ieee80211_header_ops = {
+	.create		= eth_header,
+	.parse		= header_parse_80211,
+	.rebuild	= eth_rebuild_header,
+	.cache		= eth_header_cache,
+	.cache_update	= eth_header_cache_update,
+};
+
 static int ieee80211_master_open(struct net_device *dev)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_master_priv *mpriv = netdev_priv(dev);
+	struct ieee80211_local *local = mpriv->local;
 	struct ieee80211_sub_if_data *sdata;
 	int res = -EOPNOTSUPP;
 
@@ -121,7 +129,8 @@
 
 static int ieee80211_master_stop(struct net_device *dev)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_master_priv *mpriv = netdev_priv(dev);
+	struct ieee80211_local *local = mpriv->local;
 	struct ieee80211_sub_if_data *sdata;
 
 	/* we hold the RTNL here so can safely walk the list */
@@ -134,849 +143,12 @@
 
 static void ieee80211_master_set_multicast_list(struct net_device *dev)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_master_priv *mpriv = netdev_priv(dev);
+	struct ieee80211_local *local = mpriv->local;
 
 	ieee80211_configure_filter(local);
 }
 
-/* regular interfaces */
-
-static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
-{
-	int meshhdrlen;
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
-	meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0;
-
-	/* FIX: what would be proper limits for MTU?
-	 * This interface uses 802.3 frames. */
-	if (new_mtu < 256 ||
-	    new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
-		return -EINVAL;
-	}
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-	printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
-	dev->mtu = new_mtu;
-	return 0;
-}
-
-static inline int identical_mac_addr_allowed(int type1, int type2)
-{
-	return (type1 == IEEE80211_IF_TYPE_MNTR ||
-		type2 == IEEE80211_IF_TYPE_MNTR ||
-		(type1 == IEEE80211_IF_TYPE_AP &&
-		 type2 == IEEE80211_IF_TYPE_WDS) ||
-		(type1 == IEEE80211_IF_TYPE_WDS &&
-		 (type2 == IEEE80211_IF_TYPE_WDS ||
-		  type2 == IEEE80211_IF_TYPE_AP)) ||
-		(type1 == IEEE80211_IF_TYPE_AP &&
-		 type2 == IEEE80211_IF_TYPE_VLAN) ||
-		(type1 == IEEE80211_IF_TYPE_VLAN &&
-		 (type2 == IEEE80211_IF_TYPE_AP ||
-		  type2 == IEEE80211_IF_TYPE_VLAN)));
-}
-
-static int ieee80211_open(struct net_device *dev)
-{
-	struct ieee80211_sub_if_data *sdata, *nsdata;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct sta_info *sta;
-	struct ieee80211_if_init_conf conf;
-	u32 changed = 0;
-	int res;
-	bool need_hw_reconfig = 0;
-
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
-	/* we hold the RTNL here so can safely walk the list */
-	list_for_each_entry(nsdata, &local->interfaces, list) {
-		struct net_device *ndev = nsdata->dev;
-
-		if (ndev != dev && netif_running(ndev)) {
-			/*
-			 * Allow only a single IBSS interface to be up at any
-			 * time. This is restricted because beacon distribution
-			 * cannot work properly if both are in the same IBSS.
-			 *
-			 * To remove this restriction we'd have to disallow them
-			 * from setting the same SSID on different IBSS interfaces
-			 * belonging to the same hardware. Then, however, we're
-			 * faced with having to adopt two different TSF timers...
-			 */
-			if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
-			    nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
-				return -EBUSY;
-
-			/*
-			 * The remaining checks are only performed for interfaces
-			 * with the same MAC address.
-			 */
-			if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
-				continue;
-
-			/*
-			 * check whether it may have the same address
-			 */
-			if (!identical_mac_addr_allowed(sdata->vif.type,
-							nsdata->vif.type))
-				return -ENOTUNIQ;
-
-			/*
-			 * can only add VLANs to enabled APs
-			 */
-			if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
-			    nsdata->vif.type == IEEE80211_IF_TYPE_AP)
-				sdata->bss = &nsdata->u.ap;
-		}
-	}
-
-	switch (sdata->vif.type) {
-	case IEEE80211_IF_TYPE_WDS:
-		if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
-			return -ENOLINK;
-		break;
-	case IEEE80211_IF_TYPE_VLAN:
-		if (!sdata->bss)
-			return -ENOLINK;
-		list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
-		break;
-	case IEEE80211_IF_TYPE_AP:
-		sdata->bss = &sdata->u.ap;
-		break;
-	case IEEE80211_IF_TYPE_MESH_POINT:
-		/* mesh ifaces must set allmulti to forward mcast traffic */
-		atomic_inc(&local->iff_allmultis);
-		break;
-	case IEEE80211_IF_TYPE_STA:
-	case IEEE80211_IF_TYPE_MNTR:
-	case IEEE80211_IF_TYPE_IBSS:
-		/* no special treatment */
-		break;
-	case IEEE80211_IF_TYPE_INVALID:
-		/* cannot happen */
-		WARN_ON(1);
-		break;
-	}
-
-	if (local->open_count == 0) {
-		res = 0;
-		if (local->ops->start)
-			res = local->ops->start(local_to_hw(local));
-		if (res)
-			goto err_del_bss;
-		need_hw_reconfig = 1;
-		ieee80211_led_radio(local, local->hw.conf.radio_enabled);
-	}
-
-	switch (sdata->vif.type) {
-	case IEEE80211_IF_TYPE_VLAN:
-		/* no need to tell driver */
-		break;
-	case IEEE80211_IF_TYPE_MNTR:
-		if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
-			local->cooked_mntrs++;
-			break;
-		}
-
-		/* must be before the call to ieee80211_configure_filter */
-		local->monitors++;
-		if (local->monitors == 1)
-			local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
-
-		if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
-			local->fif_fcsfail++;
-		if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
-			local->fif_plcpfail++;
-		if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
-			local->fif_control++;
-		if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
-			local->fif_other_bss++;
-
-		netif_addr_lock_bh(local->mdev);
-		ieee80211_configure_filter(local);
-		netif_addr_unlock_bh(local->mdev);
-		break;
-	case IEEE80211_IF_TYPE_STA:
-	case IEEE80211_IF_TYPE_IBSS:
-		sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
-		/* fall through */
-	default:
-		conf.vif = &sdata->vif;
-		conf.type = sdata->vif.type;
-		conf.mac_addr = dev->dev_addr;
-		res = local->ops->add_interface(local_to_hw(local), &conf);
-		if (res)
-			goto err_stop;
-
-		if (ieee80211_vif_is_mesh(&sdata->vif))
-			ieee80211_start_mesh(sdata->dev);
-		changed |= ieee80211_reset_erp_info(dev);
-		ieee80211_bss_info_change_notify(sdata, changed);
-		ieee80211_enable_keys(sdata);
-
-		if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
-		    !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
-			netif_carrier_off(dev);
-		else
-			netif_carrier_on(dev);
-	}
-
-	if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
-		/* Create STA entry for the WDS peer */
-		sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
-				     GFP_KERNEL);
-		if (!sta) {
-			res = -ENOMEM;
-			goto err_del_interface;
-		}
-
-		/* no locking required since STA is not live yet */
-		sta->flags |= WLAN_STA_AUTHORIZED;
-
-		res = sta_info_insert(sta);
-		if (res) {
-			/* STA has been freed */
-			goto err_del_interface;
-		}
-	}
-
-	if (local->open_count == 0) {
-		res = dev_open(local->mdev);
-		WARN_ON(res);
-		if (res)
-			goto err_del_interface;
-		tasklet_enable(&local->tx_pending_tasklet);
-		tasklet_enable(&local->tasklet);
-	}
-
-	/*
-	 * set_multicast_list will be invoked by the networking core
-	 * which will check whether any increments here were done in
-	 * error and sync them down to the hardware as filter flags.
-	 */
-	if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
-		atomic_inc(&local->iff_allmultis);
-
-	if (sdata->flags & IEEE80211_SDATA_PROMISC)
-		atomic_inc(&local->iff_promiscs);
-
-	local->open_count++;
-	if (need_hw_reconfig)
-		ieee80211_hw_config(local);
-
-	/*
-	 * ieee80211_sta_work is disabled while network interface
-	 * is down. Therefore, some configuration changes may not
-	 * yet be effective. Trigger execution of ieee80211_sta_work
-	 * to fix this.
-	 */
-	if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
-	    sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
-		struct ieee80211_if_sta *ifsta = &sdata->u.sta;
-		queue_work(local->hw.workqueue, &ifsta->work);
-	}
-
-	netif_tx_start_all_queues(dev);
-
-	return 0;
- err_del_interface:
-	local->ops->remove_interface(local_to_hw(local), &conf);
- err_stop:
-	if (!local->open_count && local->ops->stop)
-		local->ops->stop(local_to_hw(local));
- err_del_bss:
-	sdata->bss = NULL;
-	if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
-		list_del(&sdata->u.vlan.list);
-	return res;
-}
-
-static int ieee80211_stop(struct net_device *dev)
-{
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_if_init_conf conf;
-	struct sta_info *sta;
-
-	/*
-	 * Stop TX on this interface first.
-	 */
-	netif_tx_stop_all_queues(dev);
-
-	/*
-	 * Now delete all active aggregation sessions.
-	 */
-	rcu_read_lock();
-
-	list_for_each_entry_rcu(sta, &local->sta_list, list) {
-		if (sta->sdata == sdata)
-			ieee80211_sta_tear_down_BA_sessions(dev, sta->addr);
-	}
-
-	rcu_read_unlock();
-
-	/*
-	 * Remove all stations associated with this interface.
-	 *
-	 * This must be done before calling ops->remove_interface()
-	 * because otherwise we can later invoke ops->sta_notify()
-	 * whenever the STAs are removed, and that invalidates driver
-	 * assumptions about always getting a vif pointer that is valid
-	 * (because if we remove a STA after ops->remove_interface()
-	 * the driver will have removed the vif info already!)
-	 *
-	 * We could relax this and only unlink the stations from the
-	 * hash table and list but keep them on a per-sdata list that
-	 * will be inserted back again when the interface is brought
-	 * up again, but I don't currently see a use case for that,
-	 * except with WDS which gets a STA entry created when it is
-	 * brought up.
-	 */
-	sta_info_flush(local, sdata);
-
-	/*
-	 * Don't count this interface for promisc/allmulti while it
-	 * is down. dev_mc_unsync() will invoke set_multicast_list
-	 * on the master interface which will sync these down to the
-	 * hardware as filter flags.
-	 */
-	if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
-		atomic_dec(&local->iff_allmultis);
-
-	if (sdata->flags & IEEE80211_SDATA_PROMISC)
-		atomic_dec(&local->iff_promiscs);
-
-	dev_mc_unsync(local->mdev, dev);
-
-	/* APs need special treatment */
-	if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
-		struct ieee80211_sub_if_data *vlan, *tmp;
-		struct beacon_data *old_beacon = sdata->u.ap.beacon;
-
-		/* remove beacon */
-		rcu_assign_pointer(sdata->u.ap.beacon, NULL);
-		synchronize_rcu();
-		kfree(old_beacon);
-
-		/* down all dependent devices, that is VLANs */
-		list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
-					 u.vlan.list)
-			dev_close(vlan->dev);
-		WARN_ON(!list_empty(&sdata->u.ap.vlans));
-	}
-
-	local->open_count--;
-
-	switch (sdata->vif.type) {
-	case IEEE80211_IF_TYPE_VLAN:
-		list_del(&sdata->u.vlan.list);
-		/* no need to tell driver */
-		break;
-	case IEEE80211_IF_TYPE_MNTR:
-		if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
-			local->cooked_mntrs--;
-			break;
-		}
-
-		local->monitors--;
-		if (local->monitors == 0)
-			local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
-
-		if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
-			local->fif_fcsfail--;
-		if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
-			local->fif_plcpfail--;
-		if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
-			local->fif_control--;
-		if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
-			local->fif_other_bss--;
-
-		netif_addr_lock_bh(local->mdev);
-		ieee80211_configure_filter(local);
-		netif_addr_unlock_bh(local->mdev);
-		break;
-	case IEEE80211_IF_TYPE_MESH_POINT:
-		/* allmulti is always set on mesh ifaces */
-		atomic_dec(&local->iff_allmultis);
-		/* fall through */
-	case IEEE80211_IF_TYPE_STA:
-	case IEEE80211_IF_TYPE_IBSS:
-		sdata->u.sta.state = IEEE80211_DISABLED;
-		memset(sdata->u.sta.bssid, 0, ETH_ALEN);
-		del_timer_sync(&sdata->u.sta.timer);
-		/*
-		 * When we get here, the interface is marked down.
-		 * Call synchronize_rcu() to wait for the RX path
-		 * should it be using the interface and enqueuing
-		 * frames at this very time on another CPU.
-		 */
-		synchronize_rcu();
-		skb_queue_purge(&sdata->u.sta.skb_queue);
-
-		if (local->scan_dev == sdata->dev) {
-			if (!local->ops->hw_scan) {
-				local->sta_sw_scanning = 0;
-				cancel_delayed_work(&local->scan_work);
-			} else
-				local->sta_hw_scanning = 0;
-		}
-
-		sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
-		kfree(sdata->u.sta.extra_ie);
-		sdata->u.sta.extra_ie = NULL;
-		sdata->u.sta.extra_ie_len = 0;
-		/* fall through */
-	default:
-		conf.vif = &sdata->vif;
-		conf.type = sdata->vif.type;
-		conf.mac_addr = dev->dev_addr;
-		/* disable all keys for as long as this netdev is down */
-		ieee80211_disable_keys(sdata);
-		local->ops->remove_interface(local_to_hw(local), &conf);
-	}
-
-	sdata->bss = NULL;
-
-	if (local->open_count == 0) {
-		if (netif_running(local->mdev))
-			dev_close(local->mdev);
-
-		if (local->ops->stop)
-			local->ops->stop(local_to_hw(local));
-
-		ieee80211_led_radio(local, 0);
-
-		flush_workqueue(local->hw.workqueue);
-
-		tasklet_disable(&local->tx_pending_tasklet);
-		tasklet_disable(&local->tasklet);
-	}
-
-	return 0;
-}
-
-int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
-{
-	struct ieee80211_local *local = hw_to_local(hw);
-	struct sta_info *sta;
-	struct ieee80211_sub_if_data *sdata;
-	u16 start_seq_num = 0;
-	u8 *state;
-	int ret;
-	DECLARE_MAC_BUF(mac);
-
-	if (tid >= STA_TID_NUM)
-		return -EINVAL;
-
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
-				print_mac(mac, ra), tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-
-	rcu_read_lock();
-
-	sta = sta_info_get(local, ra);
-	if (!sta) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "Could not find the station\n");
-#endif
-		ret = -ENOENT;
-		goto exit;
-	}
-
-	spin_lock_bh(&sta->lock);
-
-	/* we have tried too many times, receiver does not want A-MPDU */
-	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
-		ret = -EBUSY;
-		goto err_unlock_sta;
-	}
-
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
-	/* check if the TID is not in aggregation flow already */
-	if (*state != HT_AGG_STATE_IDLE) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "BA request denied - session is not "
-				 "idle on tid %u\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-		ret = -EAGAIN;
-		goto err_unlock_sta;
-	}
-
-	/* prepare A-MPDU MLME for Tx aggregation */
-	sta->ampdu_mlme.tid_tx[tid] =
-			kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
-	if (!sta->ampdu_mlme.tid_tx[tid]) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		if (net_ratelimit())
-			printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
-					tid);
-#endif
-		ret = -ENOMEM;
-		goto err_unlock_sta;
-	}
-	/* Tx timer */
-	sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
-			sta_addba_resp_timer_expired;
-	sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
-			(unsigned long)&sta->timer_to_tid[tid];
-	init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
-
-	/* create a new queue for this aggregation */
-	ret = ieee80211_ht_agg_queue_add(local, sta, tid);
-
-	/* case no queue is available to aggregation
-	 * don't switch to aggregation */
-	if (ret) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "BA request denied - queue unavailable for"
-					" tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-		goto err_unlock_queue;
-	}
-	sdata = sta->sdata;
-
-	/* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
-	 * call back right away, it must see that the flow has begun */
-	*state |= HT_ADDBA_REQUESTED_MSK;
-
-	if (local->ops->ampdu_action)
-		ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
-						ra, tid, &start_seq_num);
-
-	if (ret) {
-		/* No need to requeue the packets in the agg queue, since we
-		 * held the tx lock: no packet could be enqueued to the newly
-		 * allocated queue */
-		ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "BA request denied - HW unavailable for"
-					" tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-		*state = HT_AGG_STATE_IDLE;
-		goto err_unlock_queue;
-	}
-
-	/* Will put all the packets in the new SW queue */
-	ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
-	spin_unlock_bh(&sta->lock);
-
-	/* send an addBA request */
-	sta->ampdu_mlme.dialog_token_allocator++;
-	sta->ampdu_mlme.tid_tx[tid]->dialog_token =
-			sta->ampdu_mlme.dialog_token_allocator;
-	sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
-
-
-	ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
-			 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
-			 sta->ampdu_mlme.tid_tx[tid]->ssn,
-			 0x40, 5000);
-	/* activate the timer for the recipient's addBA response */
-	sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
-				jiffies + ADDBA_RESP_INTERVAL;
-	add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
-#endif
-	goto exit;
-
-err_unlock_queue:
-	kfree(sta->ampdu_mlme.tid_tx[tid]);
-	sta->ampdu_mlme.tid_tx[tid] = NULL;
-	ret = -EBUSY;
-err_unlock_sta:
-	spin_unlock_bh(&sta->lock);
-exit:
-	rcu_read_unlock();
-	return ret;
-}
-EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
-
-int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
-				 u8 *ra, u16 tid,
-				 enum ieee80211_back_parties initiator)
-{
-	struct ieee80211_local *local = hw_to_local(hw);
-	struct sta_info *sta;
-	u8 *state;
-	int ret = 0;
-	DECLARE_MAC_BUF(mac);
-
-	if (tid >= STA_TID_NUM)
-		return -EINVAL;
-
-	rcu_read_lock();
-	sta = sta_info_get(local, ra);
-	if (!sta) {
-		rcu_read_unlock();
-		return -ENOENT;
-	}
-
-	/* check if the TID is in aggregation */
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
-	spin_lock_bh(&sta->lock);
-
-	if (*state != HT_AGG_STATE_OPERATIONAL) {
-		ret = -ENOENT;
-		goto stop_BA_exit;
-	}
-
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
-				print_mac(mac, ra), tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-
-	ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
-
-	*state = HT_AGG_STATE_REQ_STOP_BA_MSK |
-		(initiator << HT_AGG_STATE_INITIATOR_SHIFT);
-
-	if (local->ops->ampdu_action)
-		ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
-						ra, tid, NULL);
-
-	/* case HW denied going back to legacy */
-	if (ret) {
-		WARN_ON(ret != -EBUSY);
-		*state = HT_AGG_STATE_OPERATIONAL;
-		ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
-		goto stop_BA_exit;
-	}
-
-stop_BA_exit:
-	spin_unlock_bh(&sta->lock);
-	rcu_read_unlock();
-	return ret;
-}
-EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
-
-void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
-{
-	struct ieee80211_local *local = hw_to_local(hw);
-	struct sta_info *sta;
-	u8 *state;
-	DECLARE_MAC_BUF(mac);
-
-	if (tid >= STA_TID_NUM) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
-				tid, STA_TID_NUM);
-#endif
-		return;
-	}
-
-	rcu_read_lock();
-	sta = sta_info_get(local, ra);
-	if (!sta) {
-		rcu_read_unlock();
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "Could not find station: %s\n",
-				print_mac(mac, ra));
-#endif
-		return;
-	}
-
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
-	spin_lock_bh(&sta->lock);
-
-	if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
-				*state);
-#endif
-		spin_unlock_bh(&sta->lock);
-		rcu_read_unlock();
-		return;
-	}
-
-	WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
-
-	*state |= HT_ADDBA_DRV_READY_MSK;
-
-	if (*state == HT_AGG_STATE_OPERATIONAL) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
-#endif
-		ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
-	}
-	spin_unlock_bh(&sta->lock);
-	rcu_read_unlock();
-}
-EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
-
-void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
-{
-	struct ieee80211_local *local = hw_to_local(hw);
-	struct sta_info *sta;
-	u8 *state;
-	int agg_queue;
-	DECLARE_MAC_BUF(mac);
-
-	if (tid >= STA_TID_NUM) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
-				tid, STA_TID_NUM);
-#endif
-		return;
-	}
-
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
-				print_mac(mac, ra), tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-
-	rcu_read_lock();
-	sta = sta_info_get(local, ra);
-	if (!sta) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "Could not find station: %s\n",
-				print_mac(mac, ra));
-#endif
-		rcu_read_unlock();
-		return;
-	}
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
-
-	/* NOTE: no need to use sta->lock in this state check, as
-	 * ieee80211_stop_tx_ba_session will let only one stop call to
-	 * pass through per sta/tid
-	 */
-	if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
-#endif
-		rcu_read_unlock();
-		return;
-	}
-
-	if (*state & HT_AGG_STATE_INITIATOR_MSK)
-		ieee80211_send_delba(sta->sdata->dev, ra, tid,
-			WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
-
-	agg_queue = sta->tid_to_tx_q[tid];
-
-	ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
-
-	/* We just requeued the all the frames that were in the
-	 * removed queue, and since we might miss a softirq we do
-	 * netif_schedule_queue.  ieee80211_wake_queue is not used
-	 * here as this queue is not necessarily stopped
-	 */
-	netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue));
-	spin_lock_bh(&sta->lock);
-	*state = HT_AGG_STATE_IDLE;
-	sta->ampdu_mlme.addba_req_num[tid] = 0;
-	kfree(sta->ampdu_mlme.tid_tx[tid]);
-	sta->ampdu_mlme.tid_tx[tid] = NULL;
-	spin_unlock_bh(&sta->lock);
-
-	rcu_read_unlock();
-}
-EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
-
-void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
-				      const u8 *ra, u16 tid)
-{
-	struct ieee80211_local *local = hw_to_local(hw);
-	struct ieee80211_ra_tid *ra_tid;
-	struct sk_buff *skb = dev_alloc_skb(0);
-
-	if (unlikely(!skb)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		if (net_ratelimit())
-			printk(KERN_WARNING "%s: Not enough memory, "
-			       "dropping start BA session", skb->dev->name);
-#endif
-		return;
-	}
-	ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
-	memcpy(&ra_tid->ra, ra, ETH_ALEN);
-	ra_tid->tid = tid;
-
-	skb->pkt_type = IEEE80211_ADDBA_MSG;
-	skb_queue_tail(&local->skb_queue, skb);
-	tasklet_schedule(&local->tasklet);
-}
-EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
-
-void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
-				     const u8 *ra, u16 tid)
-{
-	struct ieee80211_local *local = hw_to_local(hw);
-	struct ieee80211_ra_tid *ra_tid;
-	struct sk_buff *skb = dev_alloc_skb(0);
-
-	if (unlikely(!skb)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		if (net_ratelimit())
-			printk(KERN_WARNING "%s: Not enough memory, "
-			       "dropping stop BA session", skb->dev->name);
-#endif
-		return;
-	}
-	ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
-	memcpy(&ra_tid->ra, ra, ETH_ALEN);
-	ra_tid->tid = tid;
-
-	skb->pkt_type = IEEE80211_DELBA_MSG;
-	skb_queue_tail(&local->skb_queue, skb);
-	tasklet_schedule(&local->tasklet);
-}
-EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
-
-static void ieee80211_set_multicast_list(struct net_device *dev)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	int allmulti, promisc, sdata_allmulti, sdata_promisc;
-
-	allmulti = !!(dev->flags & IFF_ALLMULTI);
-	promisc = !!(dev->flags & IFF_PROMISC);
-	sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
-	sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
-
-	if (allmulti != sdata_allmulti) {
-		if (dev->flags & IFF_ALLMULTI)
-			atomic_inc(&local->iff_allmultis);
-		else
-			atomic_dec(&local->iff_allmultis);
-		sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
-	}
-
-	if (promisc != sdata_promisc) {
-		if (dev->flags & IFF_PROMISC)
-			atomic_inc(&local->iff_promiscs);
-		else
-			atomic_dec(&local->iff_promiscs);
-		sdata->flags ^= IEEE80211_SDATA_PROMISC;
-	}
-
-	dev_mc_sync(local->mdev, dev);
-}
-
-static const struct header_ops ieee80211_header_ops = {
-	.create		= eth_header,
-	.parse		= header_parse_80211,
-	.rebuild	= eth_rebuild_header,
-	.cache		= eth_header_cache,
-	.cache_update	= eth_header_cache_update,
-};
-
-void ieee80211_if_setup(struct net_device *dev)
-{
-	ether_setup(dev);
-	dev->hard_start_xmit = ieee80211_subif_start_xmit;
-	dev->wireless_handlers = &ieee80211_iw_handler_def;
-	dev->set_multicast_list = ieee80211_set_multicast_list;
-	dev->change_mtu = ieee80211_change_mtu;
-	dev->open = ieee80211_open;
-	dev->stop = ieee80211_stop;
-	dev->destructor = free_netdev;
-}
-
 /* everything else */
 
 int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
@@ -987,18 +159,21 @@
 	if (WARN_ON(!netif_running(sdata->dev)))
 		return 0;
 
+	if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
+		return -EINVAL;
+
 	if (!local->ops->config_interface)
 		return 0;
 
 	memset(&conf, 0, sizeof(conf));
 	conf.changed = changed;
 
-	if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
-	    sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
+	if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+	    sdata->vif.type == NL80211_IFTYPE_ADHOC) {
 		conf.bssid = sdata->u.sta.bssid;
 		conf.ssid = sdata->u.sta.ssid;
 		conf.ssid_len = sdata->u.sta.ssid_len;
-	} else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
+	} else if (sdata->vif.type == NL80211_IFTYPE_AP) {
 		conf.bssid = sdata->dev->dev_addr;
 		conf.ssid = sdata->u.ap.ssid;
 		conf.ssid_len = sdata->u.ap.ssid_len;
@@ -1027,7 +202,7 @@
 	struct ieee80211_channel *chan;
 	int ret = 0;
 
-	if (local->sta_sw_scanning)
+	if (local->sw_scanning)
 		chan = local->scan_channel;
 	else
 		chan = local->oper_channel;
@@ -1099,8 +274,8 @@
 	ht_conf.ht_supported = 1;
 
 	ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
-	ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
-	ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
+	ht_conf.cap &= ~(IEEE80211_HT_CAP_SM_PS);
+	ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_SM_PS;
 	ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
 	ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
 	ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
@@ -1152,6 +327,9 @@
 {
 	struct ieee80211_local *local = sdata->local;
 
+	if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
+		return;
+
 	if (!changed)
 		return;
 
@@ -1162,10 +340,8 @@
 					     changed);
 }
 
-u32 ieee80211_reset_erp_info(struct net_device *dev)
+u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
 	sdata->bss_conf.use_cts_prot = 0;
 	sdata->bss_conf.use_short_preamble = 0;
 	return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE;
@@ -1244,9 +420,10 @@
 				      struct ieee80211_key *key,
 				      struct sk_buff *skb)
 {
-	int hdrlen, iv_len, mic_len;
+	unsigned int hdrlen, iv_len, mic_len;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
-	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+	hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
 	if (!key)
 		goto no_key;
@@ -1268,24 +445,20 @@
 		goto no_key;
 	}
 
-	if (skb->len >= mic_len &&
+	if (skb->len >= hdrlen + mic_len &&
 	    !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
 		skb_trim(skb, skb->len - mic_len);
-	if (skb->len >= iv_len && skb->len > hdrlen) {
+	if (skb->len >= hdrlen + iv_len) {
 		memmove(skb->data + iv_len, skb->data, hdrlen);
-		skb_pull(skb, iv_len);
+		hdr = (struct ieee80211_hdr *)skb_pull(skb, iv_len);
 	}
 
 no_key:
-	{
-		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-		u16 fc = le16_to_cpu(hdr->frame_control);
-		if ((fc & 0x8C) == 0x88) /* QoS Control Field */ {
-			fc &= ~IEEE80211_STYPE_QOS_DATA;
-			hdr->frame_control = cpu_to_le16(fc);
-			memmove(skb->data + 2, skb->data, hdrlen - 2);
-			skb_pull(skb, 2);
-		}
+	if (ieee80211_is_data_qos(hdr->frame_control)) {
+		hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+		memmove(skb->data + IEEE80211_QOS_CTL_LEN, skb->data,
+			hdrlen - IEEE80211_QOS_CTL_LEN);
+		skb_pull(skb, IEEE80211_QOS_CTL_LEN);
 	}
 }
 
@@ -1369,6 +542,7 @@
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	u16 frag, type;
 	__le16 fc;
+	struct ieee80211_supported_band *sband;
 	struct ieee80211_tx_status_rtap_hdr *rthdr;
 	struct ieee80211_sub_if_data *sdata;
 	struct net_device *prev_dev = NULL;
@@ -1376,47 +550,48 @@
 
 	rcu_read_lock();
 
-	if (info->status.excessive_retries) {
-		sta = sta_info_get(local, hdr->addr1);
-		if (sta) {
-			if (test_sta_flags(sta, WLAN_STA_PS)) {
-				/*
-				 * The STA is in power save mode, so assume
-				 * that this TX packet failed because of that.
-				 */
-				ieee80211_handle_filtered_frame(local, sta, skb);
-				rcu_read_unlock();
-				return;
-			}
-		}
-	}
+	sta = sta_info_get(local, hdr->addr1);
 
-	fc = hdr->frame_control;
-
-	if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
-	    (ieee80211_is_data_qos(fc))) {
-		u16 tid, ssn;
-		u8 *qc;
-		sta = sta_info_get(local, hdr->addr1);
-		if (sta) {
-			qc = ieee80211_get_qos_ctl(hdr);
-			tid = qc[0] & 0xf;
-			ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
-						& IEEE80211_SCTL_SEQ);
-			ieee80211_send_bar(sta->sdata->dev, hdr->addr1,
-					   tid, ssn);
-		}
-	}
-
-	if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
-		sta = sta_info_get(local, hdr->addr1);
-		if (sta) {
+	if (sta) {
+		if (info->status.excessive_retries &&
+		    test_sta_flags(sta, WLAN_STA_PS)) {
+			/*
+			 * The STA is in power save mode, so assume
+			 * that this TX packet failed because of that.
+			 */
 			ieee80211_handle_filtered_frame(local, sta, skb);
 			rcu_read_unlock();
 			return;
 		}
-	} else
-		rate_control_tx_status(local->mdev, skb);
+
+		fc = hdr->frame_control;
+
+		if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
+		    (ieee80211_is_data_qos(fc))) {
+			u16 tid, ssn;
+			u8 *qc;
+
+			qc = ieee80211_get_qos_ctl(hdr);
+			tid = qc[0] & 0xf;
+			ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
+						& IEEE80211_SCTL_SEQ);
+			ieee80211_send_bar(sta->sdata, hdr->addr1,
+					   tid, ssn);
+		}
+
+		if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
+			ieee80211_handle_filtered_frame(local, sta, skb);
+			rcu_read_unlock();
+			return;
+		} else {
+			if (info->status.excessive_retries)
+				sta->tx_retry_failed++;
+			sta->tx_retry_count += info->status.retry_count;
+		}
+
+		sband = local->hw.wiphy->bands[info->band];
+		rate_control_tx_status(local, sband, sta, skb);
+	}
 
 	rcu_read_unlock();
 
@@ -1504,7 +679,7 @@
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-		if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) {
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
 			if (!netif_running(sdata->dev))
 				continue;
 
@@ -1580,8 +755,6 @@
 
 	local->hw.queues = 1; /* default */
 
-	local->bridge_packets = 1;
-
 	local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
 	local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
 	local->short_retry_limit = 7;
@@ -1592,7 +765,7 @@
 
 	spin_lock_init(&local->key_lock);
 
-	INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work);
+	INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
 
 	sta_info_init(local);
 
@@ -1619,7 +792,7 @@
 	int result;
 	enum ieee80211_band band;
 	struct net_device *mdev;
-	struct wireless_dev *mwdev;
+	struct ieee80211_master_priv *mpriv;
 
 	/*
 	 * generic code guarantees at least one band,
@@ -1639,6 +812,13 @@
 		}
 	}
 
+	/* if low-level driver supports AP, we also support VLAN */
+	if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP))
+		local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
+
+	/* mac80211 always supports monitor */
+	local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
+
 	result = wiphy_register(local->hw.wiphy);
 	if (result < 0)
 		return result;
@@ -1654,16 +834,14 @@
 	if (hw->queues < 4)
 		hw->ampdu_queues = 0;
 
-	mdev = alloc_netdev_mq(sizeof(struct wireless_dev),
+	mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv),
 			       "wmaster%d", ether_setup,
 			       ieee80211_num_queues(hw));
 	if (!mdev)
 		goto fail_mdev_alloc;
 
-	mwdev = netdev_priv(mdev);
-	mdev->ieee80211_ptr = mwdev;
-	mwdev->wiphy = local->hw.wiphy;
-
+	mpriv = netdev_priv(mdev);
+	mpriv->local = local;
 	local->mdev = mdev;
 
 	ieee80211_rx_bss_list_init(local);
@@ -1745,7 +923,7 @@
 
 	/* add one default STA interface */
 	result = ieee80211_if_add(local, "wlan%d", NULL,
-				  IEEE80211_IF_TYPE_STA, NULL);
+				  NL80211_IFTYPE_STATION, NULL);
 	if (result)
 		printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
 		       wiphy_name(local->hw.wiphy));
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 35f2f95..8013277 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -12,6 +12,9 @@
 #include "ieee80211_i.h"
 #include "mesh.h"
 
+#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
+#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
+
 #define PP_OFFSET 	1		/* Path Selection Protocol */
 #define PM_OFFSET	5		/* Path Selection Metric   */
 #define CC_OFFSET	9		/* Congestion Control Mode */
@@ -35,19 +38,28 @@
 	kmem_cache_destroy(rm_cache);
 }
 
+static void ieee80211_mesh_housekeeping_timer(unsigned long data)
+{
+	struct ieee80211_sub_if_data *sdata = (void *) data;
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+
+	ifmsh->housekeeping = true;
+	queue_work(local->hw.workqueue, &ifmsh->work);
+}
+
 /**
  * mesh_matches_local - check if the config of a mesh point matches ours
  *
  * @ie: information elements of a management frame from the mesh peer
- * @dev: local mesh interface
+ * @sdata: local mesh subif
  *
  * This function checks if the mesh configuration of a mesh point matches the
  * local mesh configuration, i.e. if both nodes belong to the same mesh network.
  */
-bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev)
+bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_if_sta *sta = &sdata->u.sta;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
 	/*
 	 * As support for each feature is added, check for matching
@@ -59,11 +71,11 @@
 	 *   - MDA enabled
 	 * - Power management control on fc
 	 */
-	if (sta->mesh_id_len == ie->mesh_id_len &&
-		memcmp(sta->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
-		memcmp(sta->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 &&
-		memcmp(sta->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 &&
-		memcmp(sta->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0)
+	if (ifmsh->mesh_id_len == ie->mesh_id_len &&
+		memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
+		memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 &&
+		memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 &&
+		memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0)
 		return true;
 
 	return false;
@@ -73,10 +85,8 @@
  * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links
  *
  * @ie: information elements of a management frame from the mesh peer
- * @dev: local mesh interface
  */
-bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie,
-			      struct net_device *dev)
+bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
 {
 	return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0;
 }
@@ -98,11 +108,11 @@
 	 */
 	free_plinks = mesh_plink_availables(sdata);
 
-	if (free_plinks != sdata->u.sta.accepting_plinks)
-		ieee80211_sta_timer((unsigned long) sdata);
+	if (free_plinks != sdata->u.mesh.accepting_plinks)
+		ieee80211_mesh_housekeeping_timer((unsigned long) sdata);
 }
 
-void mesh_ids_set_default(struct ieee80211_if_sta *sta)
+void mesh_ids_set_default(struct ieee80211_if_mesh *sta)
 {
 	u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff};
 
@@ -111,28 +121,26 @@
 	memcpy(sta->mesh_cc_id, def_id, 4);
 }
 
-int mesh_rmc_init(struct net_device *dev)
+int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	int i;
 
-	sdata->u.sta.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL);
-	if (!sdata->u.sta.rmc)
+	sdata->u.mesh.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL);
+	if (!sdata->u.mesh.rmc)
 		return -ENOMEM;
-	sdata->u.sta.rmc->idx_mask = RMC_BUCKETS - 1;
+	sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
 	for (i = 0; i < RMC_BUCKETS; i++)
-		INIT_LIST_HEAD(&sdata->u.sta.rmc->bucket[i].list);
+		INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list);
 	return 0;
 }
 
-void mesh_rmc_free(struct net_device *dev)
+void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct mesh_rmc *rmc = sdata->u.sta.rmc;
+	struct mesh_rmc *rmc = sdata->u.mesh.rmc;
 	struct rmc_entry *p, *n;
 	int i;
 
-	if (!sdata->u.sta.rmc)
+	if (!sdata->u.mesh.rmc)
 		return;
 
 	for (i = 0; i < RMC_BUCKETS; i++)
@@ -142,7 +150,7 @@
 		}
 
 	kfree(rmc);
-	sdata->u.sta.rmc = NULL;
+	sdata->u.mesh.rmc = NULL;
 }
 
 /**
@@ -158,10 +166,9 @@
  * it.
  */
 int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
-		   struct net_device *dev)
+		   struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct mesh_rmc *rmc = sdata->u.sta.rmc;
+	struct mesh_rmc *rmc = sdata->u.mesh.rmc;
 	u32 seqnum = 0;
 	int entries = 0;
 	u8 idx;
@@ -194,10 +201,9 @@
 	return 0;
 }
 
-void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev)
+void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
 	u8 *pos;
 	int len, i, rate;
@@ -224,11 +230,11 @@
 		}
 	}
 
-	pos = skb_put(skb, 2 + sdata->u.sta.mesh_id_len);
+	pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len);
 	*pos++ = WLAN_EID_MESH_ID;
-	*pos++ = sdata->u.sta.mesh_id_len;
-	if (sdata->u.sta.mesh_id_len)
-		memcpy(pos, sdata->u.sta.mesh_id, sdata->u.sta.mesh_id_len);
+	*pos++ = sdata->u.mesh.mesh_id_len;
+	if (sdata->u.mesh.mesh_id_len)
+		memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
 
 	pos = skb_put(skb, 21);
 	*pos++ = WLAN_EID_MESH_CONFIG;
@@ -237,15 +243,15 @@
 	*pos++ = 1;
 
 	/* Active path selection protocol ID */
-	memcpy(pos, sdata->u.sta.mesh_pp_id, 4);
+	memcpy(pos, sdata->u.mesh.mesh_pp_id, 4);
 	pos += 4;
 
 	/* Active path selection metric ID   */
-	memcpy(pos, sdata->u.sta.mesh_pm_id, 4);
+	memcpy(pos, sdata->u.mesh.mesh_pm_id, 4);
 	pos += 4;
 
 	/* Congestion control mode identifier */
-	memcpy(pos, sdata->u.sta.mesh_cc_id, 4);
+	memcpy(pos, sdata->u.mesh.mesh_cc_id, 4);
 	pos += 4;
 
 	/* Channel precedence:
@@ -255,17 +261,17 @@
 	pos += 4;
 
 	/* Mesh capability */
-	sdata->u.sta.accepting_plinks = mesh_plink_availables(sdata);
-	*pos++ = sdata->u.sta.accepting_plinks ? ACCEPT_PLINKS : 0x00;
+	sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata);
+	*pos++ = sdata->u.mesh.accepting_plinks ? ACCEPT_PLINKS : 0x00;
 	*pos++ = 0x00;
 
 	return;
 }
 
-u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl)
+u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl)
 {
 	/* Use last four bytes of hw addr and interface index as hash index */
-	return jhash_2words(*(u32 *)(addr+2), dev->ifindex, tbl->hash_rnd)
+	return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
 		& tbl->hash_mask;
 }
 
@@ -344,10 +350,10 @@
 {
 	struct ieee80211_sub_if_data *sdata =
 		(struct ieee80211_sub_if_data *) data;
-	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
-	struct ieee80211_local *local = wdev_priv(&sdata->wdev);
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	struct ieee80211_local *local = sdata->local;
 
-	queue_work(local->hw.workqueue, &ifsta->work);
+	queue_work(local->hw.workqueue, &ifmsh->work);
 }
 
 struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
@@ -399,50 +405,264 @@
 		struct ieee80211_sub_if_data *sdata)
 {
 	meshhdr->flags = 0;
-	meshhdr->ttl = sdata->u.sta.mshcfg.dot11MeshTTL;
-	put_unaligned(cpu_to_le32(sdata->u.sta.mesh_seqnum), &meshhdr->seqnum);
-	sdata->u.sta.mesh_seqnum++;
+	meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
+	put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum);
+	sdata->u.mesh.mesh_seqnum++;
 
 	return 6;
 }
 
+static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
+			   struct ieee80211_if_mesh *ifmsh)
+{
+	bool free_plinks;
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+	printk(KERN_DEBUG "%s: running mesh housekeeping\n",
+	       sdata->dev->name);
+#endif
+
+	ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
+	mesh_path_expire(sdata);
+
+	free_plinks = mesh_plink_availables(sdata);
+	if (free_plinks != sdata->u.mesh.accepting_plinks)
+		ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
+
+	ifmsh->housekeeping = false;
+	mod_timer(&ifmsh->housekeeping_timer,
+		  round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
+}
+
+
+void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	struct ieee80211_local *local = sdata->local;
+
+	ifmsh->housekeeping = true;
+	queue_work(local->hw.workqueue, &ifmsh->work);
+	ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
+}
+
+void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
+{
+	del_timer_sync(&sdata->u.mesh.housekeeping_timer);
+	/*
+	 * If the timer fired while we waited for it, it will have
+	 * requeued the work. Now the work will be running again
+	 * but will not rearm the timer again because it checks
+	 * whether the interface is running, which, at this point,
+	 * it no longer is.
+	 */
+	cancel_work_sync(&sdata->u.mesh.work);
+
+	/*
+	 * When we get here, the interface is marked down.
+	 * Call synchronize_rcu() to wait for the RX path
+	 * should it be using the interface and enqueuing
+	 * frames at this very time on another CPU.
+	 */
+	synchronize_rcu();
+	skb_queue_purge(&sdata->u.mesh.skb_queue);
+}
+
+static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
+					u16 stype,
+					struct ieee80211_mgmt *mgmt,
+					size_t len,
+					struct ieee80211_rx_status *rx_status)
+{
+	struct ieee80211_local *local= sdata->local;
+	struct ieee802_11_elems elems;
+	struct ieee80211_channel *channel;
+	u64 supp_rates = 0;
+	size_t baselen;
+	int freq;
+	enum ieee80211_band band = rx_status->band;
+
+	/* ignore ProbeResp to foreign address */
+	if (stype == IEEE80211_STYPE_PROBE_RESP &&
+	    compare_ether_addr(mgmt->da, sdata->dev->dev_addr))
+		return;
+
+	baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
+	if (baselen > len)
+		return;
+
+	ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
+			       &elems);
+
+	if (elems.ds_params && elems.ds_params_len == 1)
+		freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
+	else
+		freq = rx_status->freq;
+
+	channel = ieee80211_get_channel(local->hw.wiphy, freq);
+
+	if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
+		return;
+
+	if (elems.mesh_id && elems.mesh_config &&
+	    mesh_matches_local(&elems, sdata)) {
+		supp_rates = ieee80211_sta_get_rates(local, &elems, band);
+
+		mesh_neighbour_update(mgmt->sa, supp_rates, sdata,
+				      mesh_peer_accepts_plinks(&elems));
+	}
+}
+
+static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
+					  struct ieee80211_mgmt *mgmt,
+					  size_t len,
+					  struct ieee80211_rx_status *rx_status)
+{
+	switch (mgmt->u.action.category) {
+	case PLINK_CATEGORY:
+		mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
+		break;
+	case MESH_PATH_SEL_CATEGORY:
+		mesh_rx_path_sel_frame(sdata, mgmt, len);
+		break;
+	}
+}
+
+static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+					  struct sk_buff *skb)
+{
+	struct ieee80211_rx_status *rx_status;
+	struct ieee80211_if_mesh *ifmsh;
+	struct ieee80211_mgmt *mgmt;
+	u16 stype;
+
+	ifmsh = &sdata->u.mesh;
+
+	rx_status = (struct ieee80211_rx_status *) skb->cb;
+	mgmt = (struct ieee80211_mgmt *) skb->data;
+	stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
+
+	switch (stype) {
+	case IEEE80211_STYPE_PROBE_RESP:
+	case IEEE80211_STYPE_BEACON:
+		ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len,
+					    rx_status);
+		break;
+	case IEEE80211_STYPE_ACTION:
+		ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
+		break;
+	}
+
+	kfree_skb(skb);
+}
+
+static void ieee80211_mesh_work(struct work_struct *work)
+{
+	struct ieee80211_sub_if_data *sdata =
+		container_of(work, struct ieee80211_sub_if_data, u.mesh.work);
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	struct sk_buff *skb;
+
+	if (!netif_running(sdata->dev))
+		return;
+
+	if (local->sw_scanning || local->hw_scanning)
+		return;
+
+	while ((skb = skb_dequeue(&ifmsh->skb_queue)))
+		ieee80211_mesh_rx_queued_mgmt(sdata, skb);
+
+	if (ifmsh->preq_queue_len &&
+	    time_after(jiffies,
+		       ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
+		mesh_path_start_discovery(sdata);
+
+	if (ifmsh->housekeeping)
+		ieee80211_mesh_housekeeping(sdata, ifmsh);
+}
+
+void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
+{
+	struct ieee80211_sub_if_data *sdata;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sdata, &local->interfaces, list)
+		if (ieee80211_vif_is_mesh(&sdata->vif))
+			queue_work(local->hw.workqueue, &sdata->u.mesh.work);
+	rcu_read_unlock();
+}
+
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
-	ifsta->mshcfg.dot11MeshRetryTimeout = MESH_RET_T;
-	ifsta->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T;
-	ifsta->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T;
-	ifsta->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR;
-	ifsta->mshcfg.dot11MeshTTL = MESH_TTL;
-	ifsta->mshcfg.auto_open_plinks = true;
-	ifsta->mshcfg.dot11MeshMaxPeerLinks =
+	INIT_WORK(&ifmsh->work, ieee80211_mesh_work);
+	setup_timer(&ifmsh->housekeeping_timer,
+		    ieee80211_mesh_housekeeping_timer,
+		    (unsigned long) sdata);
+	skb_queue_head_init(&sdata->u.mesh.skb_queue);
+
+	ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T;
+	ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T;
+	ifmsh->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T;
+	ifmsh->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR;
+	ifmsh->mshcfg.dot11MeshTTL = MESH_TTL;
+	ifmsh->mshcfg.auto_open_plinks = true;
+	ifmsh->mshcfg.dot11MeshMaxPeerLinks =
 		MESH_MAX_ESTAB_PLINKS;
-	ifsta->mshcfg.dot11MeshHWMPactivePathTimeout =
+	ifmsh->mshcfg.dot11MeshHWMPactivePathTimeout =
 		MESH_PATH_TIMEOUT;
-	ifsta->mshcfg.dot11MeshHWMPpreqMinInterval =
+	ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval =
 		MESH_PREQ_MIN_INT;
-	ifsta->mshcfg.dot11MeshHWMPnetDiameterTraversalTime =
+	ifmsh->mshcfg.dot11MeshHWMPnetDiameterTraversalTime =
 		MESH_DIAM_TRAVERSAL_TIME;
-	ifsta->mshcfg.dot11MeshHWMPmaxPREQretries =
+	ifmsh->mshcfg.dot11MeshHWMPmaxPREQretries =
 		MESH_MAX_PREQ_RETRIES;
-	ifsta->mshcfg.path_refresh_time =
+	ifmsh->mshcfg.path_refresh_time =
 		MESH_PATH_REFRESH_TIME;
-	ifsta->mshcfg.min_discovery_timeout =
+	ifmsh->mshcfg.min_discovery_timeout =
 		MESH_MIN_DISCOVERY_TIMEOUT;
-	ifsta->accepting_plinks = true;
-	ifsta->preq_id = 0;
-	ifsta->dsn = 0;
-	atomic_set(&ifsta->mpaths, 0);
-	mesh_rmc_init(sdata->dev);
-	ifsta->last_preq = jiffies;
+	ifmsh->accepting_plinks = true;
+	ifmsh->preq_id = 0;
+	ifmsh->dsn = 0;
+	atomic_set(&ifmsh->mpaths, 0);
+	mesh_rmc_init(sdata);
+	ifmsh->last_preq = jiffies;
 	/* Allocate all mesh structures when creating the first mesh interface. */
 	if (!mesh_allocated)
 		ieee80211s_init();
-	mesh_ids_set_default(ifsta);
-	setup_timer(&ifsta->mesh_path_timer,
+	mesh_ids_set_default(ifmsh);
+	setup_timer(&ifmsh->mesh_path_timer,
 		    ieee80211_mesh_path_timer,
 		    (unsigned long) sdata);
-	INIT_LIST_HEAD(&ifsta->preq_queue.list);
-	spin_lock_init(&ifsta->mesh_preq_queue_lock);
+	INIT_LIST_HEAD(&ifmsh->preq_queue.list);
+	spin_lock_init(&ifmsh->mesh_preq_queue_lock);
+}
+
+ieee80211_rx_result
+ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
+		       struct ieee80211_rx_status *rx_status)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	struct ieee80211_mgmt *mgmt;
+	u16 fc;
+
+	if (skb->len < 24)
+		return RX_DROP_MONITOR;
+
+	mgmt = (struct ieee80211_mgmt *) skb->data;
+	fc = le16_to_cpu(mgmt->frame_control);
+
+	switch (fc & IEEE80211_FCTL_STYPE) {
+	case IEEE80211_STYPE_PROBE_RESP:
+	case IEEE80211_STYPE_BEACON:
+	case IEEE80211_STYPE_ACTION:
+		memcpy(skb->cb, rx_status, sizeof(*rx_status));
+		skb_queue_tail(&ifmsh->skb_queue, skb);
+		queue_work(local->hw.workqueue, &ifmsh->work);
+		return RX_QUEUED;
+	}
+
+	return RX_CONTINUE;
 }
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 7495fbb..e10471c 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -47,7 +47,7 @@
  * struct mesh_path - mac80211 mesh path structure
  *
  * @dst: mesh path destination mac address
- * @dev: mesh path device
+ * @sdata: mesh subif
  * @next_hop: mesh neighbor to which frames for this destination will be
  * 	forwarded
  * @timer: mesh path discovery timer
@@ -64,14 +64,15 @@
  * @state_lock: mesh pat state lock
  *
  *
- * The combination of dst and dev is unique in the mesh path table. Since the
+ * The combination of dst and sdata is unique in the mesh path table. Since the
  * next_hop STA is only protected by RCU as well, deleting the STA must also
  * remove/substitute the mesh_path structure and wait until that is no longer
  * reachable before destroying the STA completely.
  */
 struct mesh_path {
 	u8 dst[ETH_ALEN];
-	struct net_device *dev;
+	u8 mpp[ETH_ALEN];	/* used for MPP or MAP */
+	struct ieee80211_sub_if_data *sdata;
 	struct sta_info *next_hop;
 	struct timer_list timer;
 	struct sk_buff_head frame_queue;
@@ -203,67 +204,82 @@
 int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
 		struct ieee80211_sub_if_data *sdata);
 int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
-		struct net_device *dev);
-bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev);
-void mesh_ids_set_default(struct ieee80211_if_sta *sta);
-void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev);
-void mesh_rmc_free(struct net_device *dev);
-int mesh_rmc_init(struct net_device *dev);
+		struct ieee80211_sub_if_data *sdata);
+bool mesh_matches_local(struct ieee802_11_elems *ie,
+		struct ieee80211_sub_if_data *sdata);
+void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
+void mesh_mgmt_ies_add(struct sk_buff *skb,
+		struct ieee80211_sub_if_data *sdata);
+void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
+int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_init(void);
 void ieee80211s_stop(void);
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
+ieee80211_rx_result
+ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
+		       struct ieee80211_rx_status *rx_status);
+void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
+void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
 
 /* Mesh paths */
-int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev);
-void mesh_path_start_discovery(struct net_device *dev);
-struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev);
-struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev);
+int mesh_nexthop_lookup(struct sk_buff *skb,
+		struct ieee80211_sub_if_data *sdata);
+void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata);
+struct mesh_path *mesh_path_lookup(u8 *dst,
+		struct ieee80211_sub_if_data *sdata);
+struct mesh_path *mpp_path_lookup(u8 *dst,
+				  struct ieee80211_sub_if_data *sdata);
+int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata);
+struct mesh_path *mesh_path_lookup_by_idx(int idx,
+		struct ieee80211_sub_if_data *sdata);
 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
-void mesh_path_expire(struct net_device *dev);
-void mesh_path_flush(struct net_device *dev);
-void mesh_rx_path_sel_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
-		size_t len);
-int mesh_path_add(u8 *dst, struct net_device *dev);
+void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
+void mesh_path_flush(struct ieee80211_sub_if_data *sdata);
+void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
+		struct ieee80211_mgmt *mgmt, size_t len);
+int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
 /* Mesh plinks */
-void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev,
-		bool add);
-bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie,
-			      struct net_device *dev);
+void mesh_neighbour_update(u8 *hw_addr, u64 rates,
+		struct ieee80211_sub_if_data *sdata, bool add);
+bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
 void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
 void mesh_plink_broken(struct sta_info *sta);
 void mesh_plink_deactivate(struct sta_info *sta);
 int mesh_plink_open(struct sta_info *sta);
 int mesh_plink_close(struct sta_info *sta);
 void mesh_plink_block(struct sta_info *sta);
-void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
-			 size_t len, struct ieee80211_rx_status *rx_status);
+void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
+			 struct ieee80211_mgmt *mgmt, size_t len,
+			 struct ieee80211_rx_status *rx_status);
 
 /* Private interfaces */
 /* Mesh tables */
 struct mesh_table *mesh_table_alloc(int size_order);
 void mesh_table_free(struct mesh_table *tbl, bool free_leafs);
 struct mesh_table *mesh_table_grow(struct mesh_table *tbl);
-u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl);
+u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
+		struct mesh_table *tbl);
 /* Mesh paths */
 int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra,
-		struct net_device *dev);
+		struct ieee80211_sub_if_data *sdata);
 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
 void mesh_path_flush_pending(struct mesh_path *mpath);
 void mesh_path_tx_pending(struct mesh_path *mpath);
 int mesh_pathtbl_init(void);
 void mesh_pathtbl_unregister(void);
-int mesh_path_del(u8 *addr, struct net_device *dev);
+int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
 void mesh_path_timer(unsigned long data);
 void mesh_path_flush_by_nexthop(struct sta_info *sta);
-void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev);
+void mesh_path_discard_frame(struct sk_buff *skb,
+		struct ieee80211_sub_if_data *sdata);
 
 #ifdef CONFIG_MAC80211_MESH
 extern int mesh_allocated;
 
 static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata)
 {
-	return sdata->u.sta.mshcfg.dot11MeshMaxPeerLinks -
-	       atomic_read(&sdata->u.sta.mshstats.estab_plinks);
+	return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks -
+	       atomic_read(&sdata->u.mesh.mshstats.estab_plinks);
 }
 
 static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata)
@@ -281,8 +297,12 @@
 	for (i = 0; i <= x->hash_mask; i++) \
 		hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list)
 
+void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
+
 #else
 #define mesh_allocated	0
+static inline void
+ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
 #endif
 
 #endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 08aca44..501c783 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -64,14 +64,14 @@
 #define DSN_LT(x, y) ((long) (x) - (long) (y) < 0)
 
 #define net_traversal_jiffies(s) \
-	msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
+	msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
 #define default_lifetime(s) \
-	MSEC_TO_TU(s->u.sta.mshcfg.dot11MeshHWMPactivePathTimeout)
+	MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
 #define min_preq_int_jiff(s) \
-	(msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPpreqMinInterval))
-#define max_preq_retries(s) (s->u.sta.mshcfg.dot11MeshHWMPmaxPREQretries)
+	(msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
+#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
 #define disc_timeout_jiff(s) \
-	msecs_to_jiffies(sdata->u.sta.mshcfg.min_discovery_timeout)
+	msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
 
 enum mpath_frame_type {
 	MPATH_PREQ = 0,
@@ -82,9 +82,9 @@
 static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
 		u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst,
 		__le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime,
-		__le32 metric, __le32 preq_id, struct net_device *dev)
+		__le32 metric, __le32 preq_id, struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
 	struct ieee80211_mgmt *mgmt;
 	u8 *pos;
@@ -99,11 +99,11 @@
 	mgmt = (struct ieee80211_mgmt *)
 		skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
 	memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
-	mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-					   IEEE80211_STYPE_ACTION);
+	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+					  IEEE80211_STYPE_ACTION);
 
 	memcpy(mgmt->da, da, ETH_ALEN);
-	memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
+	memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
 	/* BSSID is left zeroed, wildcard value */
 	mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
 	mgmt->u.action.u.mesh_action.action_code = action;
@@ -149,7 +149,7 @@
 	pos += ETH_ALEN;
 	memcpy(pos, &dst_dsn, 4);
 
-	ieee80211_sta_tx(dev, skb, 0);
+	ieee80211_tx_skb(sdata, skb, 0);
 	return 0;
 }
 
@@ -161,9 +161,9 @@
  * @ra: node this frame is addressed to
  */
 int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
-		struct net_device *dev)
+		struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
 	struct ieee80211_mgmt *mgmt;
 	u8 *pos;
@@ -178,11 +178,11 @@
 	mgmt = (struct ieee80211_mgmt *)
 		skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
 	memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
-	mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-					   IEEE80211_STYPE_ACTION);
+	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+					  IEEE80211_STYPE_ACTION);
 
 	memcpy(mgmt->da, ra, ETH_ALEN);
-	memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
+	memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
 	/* BSSID is left zeroed, wildcard value */
 	mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
 	mgmt->u.action.u.mesh_action.action_code = MPATH_PERR;
@@ -198,7 +198,7 @@
 	pos += ETH_ALEN;
 	memcpy(pos, &dst_dsn, 4);
 
-	ieee80211_sta_tx(dev, skb, 0);
+	ieee80211_tx_skb(sdata, skb, 0);
 	return 0;
 }
 
@@ -223,7 +223,7 @@
 	/* bitrate is in units of 100 Kbps, while we need rate in units of
 	 * 1Mbps. This will be corrected on tx_time computation.
 	 */
-	rate = sband->bitrates[sta->txrate_idx].bitrate;
+	rate = sband->bitrates[sta->last_txrate_idx].bitrate;
 	tx_time = (device_constant + 10 * test_frame_len / rate);
 	estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
 	result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
@@ -233,7 +233,7 @@
 /**
  * hwmp_route_info_get - Update routing info to originator and transmitter
  *
- * @dev: local mesh interface
+ * @sdata: local mesh subif
  * @mgmt: mesh management frame
  * @hwmp_ie: hwmp information element (PREP or PREQ)
  *
@@ -246,11 +246,11 @@
  * Notes: this function is the only place (besides user-provided info) where
  * path routing information is updated.
  */
-static u32 hwmp_route_info_get(struct net_device *dev,
+static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
 			    struct ieee80211_mgmt *mgmt,
 			    u8 *hwmp_ie)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sdata->local;
 	struct mesh_path *mpath;
 	struct sta_info *sta;
 	bool fresh_info;
@@ -301,14 +301,14 @@
 		new_metric = MAX_METRIC;
 	exp_time = TU_TO_EXP_TIME(orig_lifetime);
 
-	if (memcmp(orig_addr, dev->dev_addr, ETH_ALEN) == 0) {
+	if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
 		/* This MP is the originator, we are not interested in this
 		 * frame, except for updating transmitter's path info.
 		 */
 		process = false;
 		fresh_info = false;
 	} else {
-		mpath = mesh_path_lookup(orig_addr, dev);
+		mpath = mesh_path_lookup(orig_addr, sdata);
 		if (mpath) {
 			spin_lock_bh(&mpath->state_lock);
 			if (mpath->flags & MESH_PATH_FIXED)
@@ -324,8 +324,8 @@
 				}
 			}
 		} else {
-			mesh_path_add(orig_addr, dev);
-			mpath = mesh_path_lookup(orig_addr, dev);
+			mesh_path_add(orig_addr, sdata);
+			mpath = mesh_path_lookup(orig_addr, sdata);
 			if (!mpath) {
 				rcu_read_unlock();
 				return 0;
@@ -357,7 +357,7 @@
 	else {
 		fresh_info = true;
 
-		mpath = mesh_path_lookup(ta, dev);
+		mpath = mesh_path_lookup(ta, sdata);
 		if (mpath) {
 			spin_lock_bh(&mpath->state_lock);
 			if ((mpath->flags & MESH_PATH_FIXED) ||
@@ -365,8 +365,8 @@
 					(last_hop_metric > mpath->metric)))
 				fresh_info = false;
 		} else {
-			mesh_path_add(ta, dev);
-			mpath = mesh_path_lookup(ta, dev);
+			mesh_path_add(ta, sdata);
+			mpath = mesh_path_lookup(ta, sdata);
 			if (!mpath) {
 				rcu_read_unlock();
 				return 0;
@@ -392,11 +392,10 @@
 	return process ? new_metric : 0;
 }
 
-static void hwmp_preq_frame_process(struct net_device *dev,
+static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
 				    struct ieee80211_mgmt *mgmt,
 				    u8 *preq_elem, u32 metric) {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct mesh_path *mpath;
 	u8 *dst_addr, *orig_addr;
 	u8 dst_flags, ttl;
@@ -411,19 +410,19 @@
 	orig_dsn = PREQ_IE_ORIG_DSN(preq_elem);
 	dst_flags = PREQ_IE_DST_F(preq_elem);
 
-	if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) {
+	if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
 		forward = false;
 		reply = true;
 		metric = 0;
-		if (time_after(jiffies, ifsta->last_dsn_update +
+		if (time_after(jiffies, ifmsh->last_dsn_update +
 					net_traversal_jiffies(sdata)) ||
-		    time_before(jiffies, ifsta->last_dsn_update)) {
-			dst_dsn = ++ifsta->dsn;
-			ifsta->last_dsn_update = jiffies;
+		    time_before(jiffies, ifmsh->last_dsn_update)) {
+			dst_dsn = ++ifmsh->dsn;
+			ifmsh->last_dsn_update = jiffies;
 		}
 	} else {
 		rcu_read_lock();
-		mpath = mesh_path_lookup(dst_addr, dev);
+		mpath = mesh_path_lookup(dst_addr, sdata);
 		if (mpath) {
 			if ((!(mpath->flags & MESH_PATH_DSN_VALID)) ||
 					DSN_LT(mpath->dsn, dst_dsn)) {
@@ -445,15 +444,15 @@
 
 	if (reply) {
 		lifetime = PREQ_IE_LIFETIME(preq_elem);
-		ttl = ifsta->mshcfg.dot11MeshTTL;
+		ttl = ifmsh->mshcfg.dot11MeshTTL;
 		if (ttl != 0)
 			mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr,
 				cpu_to_le32(dst_dsn), 0, orig_addr,
 				cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl,
 				cpu_to_le32(lifetime), cpu_to_le32(metric),
-				0, dev);
+				0, sdata);
 		else
-			ifsta->mshstats.dropped_frames_ttl++;
+			ifmsh->mshstats.dropped_frames_ttl++;
 	}
 
 	if (forward) {
@@ -463,7 +462,7 @@
 		ttl = PREQ_IE_TTL(preq_elem);
 		lifetime = PREQ_IE_LIFETIME(preq_elem);
 		if (ttl <= 1) {
-			ifsta->mshstats.dropped_frames_ttl++;
+			ifmsh->mshstats.dropped_frames_ttl++;
 			return;
 		}
 		--ttl;
@@ -472,20 +471,19 @@
 		hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
 		mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
 				cpu_to_le32(orig_dsn), dst_flags, dst_addr,
-				cpu_to_le32(dst_dsn), dev->broadcast,
+				cpu_to_le32(dst_dsn), sdata->dev->broadcast,
 				hopcount, ttl, cpu_to_le32(lifetime),
 				cpu_to_le32(metric), cpu_to_le32(preq_id),
-				dev);
-		ifsta->mshstats.fwded_frames++;
+				sdata);
+		ifmsh->mshstats.fwded_frames++;
 	}
 }
 
 
-static void hwmp_prep_frame_process(struct net_device *dev,
+static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
 				    struct ieee80211_mgmt *mgmt,
 				    u8 *prep_elem, u32 metric)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct mesh_path *mpath;
 	u8 *dst_addr, *orig_addr;
 	u8 ttl, hopcount, flags;
@@ -499,18 +497,18 @@
 	 * replies
 	 */
 	dst_addr = PREP_IE_DST_ADDR(prep_elem);
-	if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0)
+	if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0)
 		/* destination, no forwarding required */
 		return;
 
 	ttl = PREP_IE_TTL(prep_elem);
 	if (ttl <= 1) {
-		sdata->u.sta.mshstats.dropped_frames_ttl++;
+		sdata->u.mesh.mshstats.dropped_frames_ttl++;
 		return;
 	}
 
 	rcu_read_lock();
-	mpath = mesh_path_lookup(dst_addr, dev);
+	mpath = mesh_path_lookup(dst_addr, sdata);
 	if (mpath)
 		spin_lock_bh(&mpath->state_lock);
 	else
@@ -519,7 +517,7 @@
 		spin_unlock_bh(&mpath->state_lock);
 		goto fail;
 	}
-	memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN);
+	memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN);
 	spin_unlock_bh(&mpath->state_lock);
 	--ttl;
 	flags = PREP_IE_FLAGS(prep_elem);
@@ -531,20 +529,20 @@
 
 	mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
 		cpu_to_le32(orig_dsn), 0, dst_addr,
-		cpu_to_le32(dst_dsn), mpath->next_hop->addr, hopcount, ttl,
+		cpu_to_le32(dst_dsn), mpath->next_hop->sta.addr, hopcount, ttl,
 		cpu_to_le32(lifetime), cpu_to_le32(metric),
-		0, dev);
+		0, sdata);
 	rcu_read_unlock();
-	sdata->u.sta.mshstats.fwded_frames++;
+	sdata->u.mesh.mshstats.fwded_frames++;
 	return;
 
 fail:
 	rcu_read_unlock();
-	sdata->u.sta.mshstats.dropped_frames_no_route++;
+	sdata->u.mesh.mshstats.dropped_frames_no_route++;
 	return;
 }
 
-static void hwmp_perr_frame_process(struct net_device *dev,
+static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
 			     struct ieee80211_mgmt *mgmt, u8 *perr_elem)
 {
 	struct mesh_path *mpath;
@@ -555,18 +553,18 @@
 	dst_addr = PERR_IE_DST_ADDR(perr_elem);
 	dst_dsn = PERR_IE_DST_DSN(perr_elem);
 	rcu_read_lock();
-	mpath = mesh_path_lookup(dst_addr, dev);
+	mpath = mesh_path_lookup(dst_addr, sdata);
 	if (mpath) {
 		spin_lock_bh(&mpath->state_lock);
 		if (mpath->flags & MESH_PATH_ACTIVE &&
-		    memcmp(ta, mpath->next_hop->addr, ETH_ALEN) == 0 &&
+		    memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 &&
 		    (!(mpath->flags & MESH_PATH_DSN_VALID) ||
 		    DSN_GT(dst_dsn, mpath->dsn))) {
 			mpath->flags &= ~MESH_PATH_ACTIVE;
 			mpath->dsn = dst_dsn;
 			spin_unlock_bh(&mpath->state_lock);
 			mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn),
-					   dev->broadcast, dev);
+					   sdata->dev->broadcast, sdata);
 		} else
 			spin_unlock_bh(&mpath->state_lock);
 	}
@@ -575,7 +573,7 @@
 
 
 
-void mesh_rx_path_sel_frame(struct net_device *dev,
+void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
 			    struct ieee80211_mgmt *mgmt,
 			    size_t len)
 {
@@ -583,6 +581,10 @@
 	size_t baselen;
 	u32 last_hop_metric;
 
+	/* need action_code */
+	if (len < IEEE80211_MIN_ACTION_SIZE + 1)
+		return;
+
 	baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
 	ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
 			len - baselen, &elems);
@@ -592,25 +594,25 @@
 		if (!elems.preq || elems.preq_len != 37)
 			/* Right now we support just 1 destination and no AE */
 			return;
-		last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.preq);
+		last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq);
 		if (!last_hop_metric)
 			return;
-		hwmp_preq_frame_process(dev, mgmt, elems.preq, last_hop_metric);
+		hwmp_preq_frame_process(sdata, mgmt, elems.preq, last_hop_metric);
 		break;
 	case MPATH_PREP:
 		if (!elems.prep || elems.prep_len != 31)
 			/* Right now we support no AE */
 			return;
-		last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.prep);
+		last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep);
 		if (!last_hop_metric)
 			return;
-		hwmp_prep_frame_process(dev, mgmt, elems.prep, last_hop_metric);
+		hwmp_prep_frame_process(sdata, mgmt, elems.prep, last_hop_metric);
 		break;
 	case MPATH_PERR:
 		if (!elems.perr || elems.perr_len != 12)
 			/* Right now we support only one destination per PERR */
 			return;
-		hwmp_perr_frame_process(dev, mgmt, elems.perr);
+		hwmp_perr_frame_process(sdata, mgmt, elems.perr);
 	default:
 		return;
 	}
@@ -628,9 +630,8 @@
  */
 static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
 {
-	struct ieee80211_sub_if_data *sdata =
-		IEEE80211_DEV_TO_SUB_IF(mpath->dev);
-	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+	struct ieee80211_sub_if_data *sdata = mpath->sdata;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct mesh_preq_queue *preq_node;
 
 	preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL);
@@ -639,9 +640,9 @@
 		return;
 	}
 
-	spin_lock(&ifsta->mesh_preq_queue_lock);
-	if (ifsta->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
-		spin_unlock(&ifsta->mesh_preq_queue_lock);
+	spin_lock(&ifmsh->mesh_preq_queue_lock);
+	if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
+		spin_unlock(&ifmsh->mesh_preq_queue_lock);
 		kfree(preq_node);
 		if (printk_ratelimit())
 			printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n");
@@ -651,55 +652,53 @@
 	memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
 	preq_node->flags = flags;
 
-	list_add_tail(&preq_node->list, &ifsta->preq_queue.list);
-	++ifsta->preq_queue_len;
-	spin_unlock(&ifsta->mesh_preq_queue_lock);
+	list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
+	++ifmsh->preq_queue_len;
+	spin_unlock(&ifmsh->mesh_preq_queue_lock);
 
-	if (time_after(jiffies, ifsta->last_preq + min_preq_int_jiff(sdata)))
-		queue_work(sdata->local->hw.workqueue, &ifsta->work);
+	if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
+		queue_work(sdata->local->hw.workqueue, &ifmsh->work);
 
-	else if (time_before(jiffies, ifsta->last_preq)) {
+	else if (time_before(jiffies, ifmsh->last_preq)) {
 		/* avoid long wait if did not send preqs for a long time
 		 * and jiffies wrapped around
 		 */
-		ifsta->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
-		queue_work(sdata->local->hw.workqueue, &ifsta->work);
+		ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
+		queue_work(sdata->local->hw.workqueue, &ifmsh->work);
 	} else
-		mod_timer(&ifsta->mesh_path_timer, ifsta->last_preq +
+		mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
 						min_preq_int_jiff(sdata));
 }
 
 /**
  * mesh_path_start_discovery - launch a path discovery from the PREQ queue
  *
- * @dev: local mesh interface
+ * @sdata: local mesh subif
  */
-void mesh_path_start_discovery(struct net_device *dev)
+void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata =
-		IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct mesh_preq_queue *preq_node;
 	struct mesh_path *mpath;
 	u8 ttl, dst_flags;
 	u32 lifetime;
 
-	spin_lock(&ifsta->mesh_preq_queue_lock);
-	if (!ifsta->preq_queue_len ||
-		time_before(jiffies, ifsta->last_preq +
+	spin_lock(&ifmsh->mesh_preq_queue_lock);
+	if (!ifmsh->preq_queue_len ||
+		time_before(jiffies, ifmsh->last_preq +
 				min_preq_int_jiff(sdata))) {
-		spin_unlock(&ifsta->mesh_preq_queue_lock);
+		spin_unlock(&ifmsh->mesh_preq_queue_lock);
 		return;
 	}
 
-	preq_node = list_first_entry(&ifsta->preq_queue.list,
+	preq_node = list_first_entry(&ifmsh->preq_queue.list,
 			struct mesh_preq_queue, list);
 	list_del(&preq_node->list);
-	--ifsta->preq_queue_len;
-	spin_unlock(&ifsta->mesh_preq_queue_lock);
+	--ifmsh->preq_queue_len;
+	spin_unlock(&ifmsh->mesh_preq_queue_lock);
 
 	rcu_read_lock();
-	mpath = mesh_path_lookup(preq_node->dst, dev);
+	mpath = mesh_path_lookup(preq_node->dst, sdata);
 	if (!mpath)
 		goto enddiscovery;
 
@@ -721,18 +720,18 @@
 		goto enddiscovery;
 	}
 
-	ifsta->last_preq = jiffies;
+	ifmsh->last_preq = jiffies;
 
-	if (time_after(jiffies, ifsta->last_dsn_update +
+	if (time_after(jiffies, ifmsh->last_dsn_update +
 				net_traversal_jiffies(sdata)) ||
-	    time_before(jiffies, ifsta->last_dsn_update)) {
-		++ifsta->dsn;
-		sdata->u.sta.last_dsn_update = jiffies;
+	    time_before(jiffies, ifmsh->last_dsn_update)) {
+		++ifmsh->dsn;
+		sdata->u.mesh.last_dsn_update = jiffies;
 	}
 	lifetime = default_lifetime(sdata);
-	ttl = sdata->u.sta.mshcfg.dot11MeshTTL;
+	ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
 	if (ttl == 0) {
-		sdata->u.sta.mshstats.dropped_frames_ttl++;
+		sdata->u.mesh.mshstats.dropped_frames_ttl++;
 		spin_unlock_bh(&mpath->state_lock);
 		goto enddiscovery;
 	}
@@ -743,11 +742,11 @@
 		dst_flags = MP_F_RF;
 
 	spin_unlock_bh(&mpath->state_lock);
-	mesh_path_sel_frame_tx(MPATH_PREQ, 0, dev->dev_addr,
-			cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst,
-			cpu_to_le32(mpath->dsn), dev->broadcast, 0,
+	mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr,
+			cpu_to_le32(ifmsh->dsn), dst_flags, mpath->dst,
+			cpu_to_le32(mpath->dsn), sdata->dev->broadcast, 0,
 			ttl, cpu_to_le32(lifetime), 0,
-			cpu_to_le32(ifsta->preq_id++), dev);
+			cpu_to_le32(ifmsh->preq_id++), sdata);
 	mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
 
 enddiscovery:
@@ -759,7 +758,7 @@
  * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame
  *
  * @skb: 802.11 frame to be sent
- * @dev: network device the frame will be sent through
+ * @sdata: network subif the frame will be sent through
  * @fwd_frame: true if this frame was originally from a different host
  *
  * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
@@ -767,9 +766,9 @@
  * sent when the path is resolved. This means the caller must not free the skb
  * in this case.
  */
-int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev)
+int mesh_nexthop_lookup(struct sk_buff *skb,
+			struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct sk_buff *skb_to_free = NULL;
 	struct mesh_path *mpath;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -777,14 +776,14 @@
 	int err = 0;
 
 	rcu_read_lock();
-	mpath = mesh_path_lookup(dst_addr, dev);
+	mpath = mesh_path_lookup(dst_addr, sdata);
 
 	if (!mpath) {
-		mesh_path_add(dst_addr, dev);
-		mpath = mesh_path_lookup(dst_addr, dev);
+		mesh_path_add(dst_addr, sdata);
+		mpath = mesh_path_lookup(dst_addr, sdata);
 		if (!mpath) {
 			dev_kfree_skb(skb);
-			sdata->u.sta.mshstats.dropped_frames_no_route++;
+			sdata->u.mesh.mshstats.dropped_frames_no_route++;
 			err = -ENOSPC;
 			goto endlookup;
 		}
@@ -792,14 +791,15 @@
 
 	if (mpath->flags & MESH_PATH_ACTIVE) {
 		if (time_after(jiffies, mpath->exp_time -
-			msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time))
-				&& !memcmp(dev->dev_addr, hdr->addr4, ETH_ALEN)
+			msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time))
+				&& !memcmp(sdata->dev->dev_addr, hdr->addr4,
+					   ETH_ALEN)
 				&& !(mpath->flags & MESH_PATH_RESOLVING)
 				&& !(mpath->flags & MESH_PATH_FIXED)) {
 			mesh_queue_preq(mpath,
 					PREQ_Q_F_START | PREQ_Q_F_REFRESH);
 		}
-		memcpy(hdr->addr1, mpath->next_hop->addr,
+		memcpy(hdr->addr1, mpath->next_hop->sta.addr,
 				ETH_ALEN);
 	} else {
 		if (!(mpath->flags & MESH_PATH_RESOLVING)) {
@@ -815,7 +815,7 @@
 
 		skb_queue_tail(&mpath->frame_queue, skb);
 		if (skb_to_free)
-			mesh_path_discard_frame(skb_to_free, dev);
+			mesh_path_discard_frame(skb_to_free, sdata);
 		err = -ENOENT;
 	}
 
@@ -835,7 +835,7 @@
 	if (!mpath)
 		goto endmpathtimer;
 	spin_lock_bh(&mpath->state_lock);
-	sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
+	sdata = mpath->sdata;
 	if (mpath->flags & MESH_PATH_RESOLVED ||
 			(!(mpath->flags & MESH_PATH_RESOLVING)))
 		mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 838ee60..3c72557 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -9,7 +9,6 @@
 
 #include <linux/etherdevice.h>
 #include <linux/list.h>
-#include <linux/netdevice.h>
 #include <linux/random.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
@@ -37,6 +36,7 @@
 };
 
 static struct mesh_table *mesh_paths;
+static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
 
 /* This lock will have the grow table function as writer and add / delete nodes
  * as readers. When reading the table (i.e. doing lookups) we are well protected
@@ -62,13 +62,13 @@
 /**
  * mesh_path_lookup - look up a path in the mesh path table
  * @dst: hardware address (ETH_ALEN length) of destination
- * @dev: local interface
+ * @sdata: local subif
  *
  * Returns: pointer to the mesh path structure, or NULL if not found
  *
  * Locking: must be called within a read rcu section.
  */
-struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev)
+struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
 {
 	struct mesh_path *mpath;
 	struct hlist_node *n;
@@ -78,10 +78,10 @@
 
 	tbl = rcu_dereference(mesh_paths);
 
-	bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)];
+	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
 	hlist_for_each_entry_rcu(node, n, bucket, list) {
 		mpath = node->mpath;
-		if (mpath->dev == dev &&
+		if (mpath->sdata == sdata &&
 				memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
 			if (MPATH_EXPIRED(mpath)) {
 				spin_lock_bh(&mpath->state_lock);
@@ -95,16 +95,44 @@
 	return NULL;
 }
 
+struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+{
+	struct mesh_path *mpath;
+	struct hlist_node *n;
+	struct hlist_head *bucket;
+	struct mesh_table *tbl;
+	struct mpath_node *node;
+
+	tbl = rcu_dereference(mpp_paths);
+
+	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
+	hlist_for_each_entry_rcu(node, n, bucket, list) {
+		mpath = node->mpath;
+		if (mpath->sdata == sdata &&
+		    memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
+			if (MPATH_EXPIRED(mpath)) {
+				spin_lock_bh(&mpath->state_lock);
+				if (MPATH_EXPIRED(mpath))
+					mpath->flags &= ~MESH_PATH_ACTIVE;
+				spin_unlock_bh(&mpath->state_lock);
+			}
+			return mpath;
+		}
+	}
+	return NULL;
+}
+
+
 /**
  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
  * @idx: index
- * @dev: local interface, or NULL for all entries
+ * @sdata: local subif, or NULL for all entries
  *
  * Returns: pointer to the mesh path structure, or NULL if not found.
  *
  * Locking: must be called within a read rcu section.
  */
-struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev)
+struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
 {
 	struct mpath_node *node;
 	struct hlist_node *p;
@@ -112,7 +140,7 @@
 	int j = 0;
 
 	for_each_mesh_entry(mesh_paths, p, node, i) {
-		if (dev && node->mpath->dev != dev)
+		if (sdata && node->mpath->sdata != sdata)
 			continue;
 		if (j++ == idx) {
 			if (MPATH_EXPIRED(node->mpath)) {
@@ -131,15 +159,14 @@
 /**
  * mesh_path_add - allocate and add a new path to the mesh path table
  * @addr: destination address of the path (ETH_ALEN length)
- * @dev: local interface
+ * @sdata: local subif
  *
  * Returns: 0 on sucess
  *
  * State: the initial state of the new path is set to 0
  */
-int mesh_path_add(u8 *dst, struct net_device *dev)
+int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct mesh_path *mpath, *new_mpath;
 	struct mpath_node *node, *new_node;
 	struct hlist_head *bucket;
@@ -148,14 +175,14 @@
 	int err = 0;
 	u32 hash_idx;
 
-	if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0)
+	if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
 		/* never add ourselves as neighbours */
 		return -ENOTSUPP;
 
 	if (is_multicast_ether_addr(dst))
 		return -ENOTSUPP;
 
-	if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
+	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
 		return -ENOSPC;
 
 	err = -ENOMEM;
@@ -169,7 +196,7 @@
 
 	read_lock(&pathtbl_resize_lock);
 	memcpy(new_mpath->dst, dst, ETH_ALEN);
-	new_mpath->dev = dev;
+	new_mpath->sdata = sdata;
 	new_mpath->flags = 0;
 	skb_queue_head_init(&new_mpath->frame_queue);
 	new_node->mpath = new_mpath;
@@ -179,7 +206,7 @@
 	spin_lock_init(&new_mpath->state_lock);
 	init_timer(&new_mpath->timer);
 
-	hash_idx = mesh_table_hash(dst, dev, mesh_paths);
+	hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
 	bucket = &mesh_paths->hash_buckets[hash_idx];
 
 	spin_lock(&mesh_paths->hashwlock[hash_idx]);
@@ -187,7 +214,7 @@
 	err = -EEXIST;
 	hlist_for_each_entry(node, n, bucket, list) {
 		mpath = node->mpath;
-		if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
+		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
 			goto err_exists;
 	}
 
@@ -223,7 +250,92 @@
 err_node_alloc:
 	kfree(new_mpath);
 err_path_alloc:
-	atomic_dec(&sdata->u.sta.mpaths);
+	atomic_dec(&sdata->u.mesh.mpaths);
+	return err;
+}
+
+
+int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
+{
+	struct mesh_path *mpath, *new_mpath;
+	struct mpath_node *node, *new_node;
+	struct hlist_head *bucket;
+	struct hlist_node *n;
+	int grow = 0;
+	int err = 0;
+	u32 hash_idx;
+
+
+	if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
+		/* never add ourselves as neighbours */
+		return -ENOTSUPP;
+
+	if (is_multicast_ether_addr(dst))
+		return -ENOTSUPP;
+
+	err = -ENOMEM;
+	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
+	if (!new_mpath)
+		goto err_path_alloc;
+
+	new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
+	if (!new_node)
+		goto err_node_alloc;
+
+	read_lock(&pathtbl_resize_lock);
+	memcpy(new_mpath->dst, dst, ETH_ALEN);
+	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
+	new_mpath->sdata = sdata;
+	new_mpath->flags = 0;
+	skb_queue_head_init(&new_mpath->frame_queue);
+	new_node->mpath = new_mpath;
+	new_mpath->exp_time = jiffies;
+	spin_lock_init(&new_mpath->state_lock);
+
+	hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
+	bucket = &mpp_paths->hash_buckets[hash_idx];
+
+	spin_lock(&mpp_paths->hashwlock[hash_idx]);
+
+	err = -EEXIST;
+	hlist_for_each_entry(node, n, bucket, list) {
+		mpath = node->mpath;
+		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
+			goto err_exists;
+	}
+
+	hlist_add_head_rcu(&new_node->list, bucket);
+	if (atomic_inc_return(&mpp_paths->entries) >=
+		mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
+		grow = 1;
+
+	spin_unlock(&mpp_paths->hashwlock[hash_idx]);
+	read_unlock(&pathtbl_resize_lock);
+	if (grow) {
+		struct mesh_table *oldtbl, *newtbl;
+
+		write_lock(&pathtbl_resize_lock);
+		oldtbl = mpp_paths;
+		newtbl = mesh_table_grow(mpp_paths);
+		if (!newtbl) {
+			write_unlock(&pathtbl_resize_lock);
+			return 0;
+		}
+		rcu_assign_pointer(mpp_paths, newtbl);
+		write_unlock(&pathtbl_resize_lock);
+
+		synchronize_rcu();
+		mesh_table_free(oldtbl, false);
+	}
+	return 0;
+
+err_exists:
+	spin_unlock(&mpp_paths->hashwlock[hash_idx]);
+	read_unlock(&pathtbl_resize_lock);
+	kfree(new_node);
+err_node_alloc:
+	kfree(new_mpath);
+err_path_alloc:
 	return err;
 }
 
@@ -241,7 +353,7 @@
 	struct mesh_path *mpath;
 	struct mpath_node *node;
 	struct hlist_node *p;
-	struct net_device *dev = sta->sdata->dev;
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	int i;
 
 	rcu_read_lock();
@@ -256,7 +368,7 @@
 			spin_unlock_bh(&mpath->state_lock);
 			mesh_path_error_tx(mpath->dst,
 					cpu_to_le32(mpath->dsn),
-					dev->broadcast, dev);
+					sdata->dev->broadcast, sdata);
 		} else
 		spin_unlock_bh(&mpath->state_lock);
 	}
@@ -284,11 +396,11 @@
 	for_each_mesh_entry(mesh_paths, p, node, i) {
 		mpath = node->mpath;
 		if (mpath->next_hop == sta)
-			mesh_path_del(mpath->dst, mpath->dev);
+			mesh_path_del(mpath->dst, mpath->sdata);
 	}
 }
 
-void mesh_path_flush(struct net_device *dev)
+void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
 {
 	struct mesh_path *mpath;
 	struct mpath_node *node;
@@ -297,19 +409,18 @@
 
 	for_each_mesh_entry(mesh_paths, p, node, i) {
 		mpath = node->mpath;
-		if (mpath->dev == dev)
-			mesh_path_del(mpath->dst, mpath->dev);
+		if (mpath->sdata == sdata)
+			mesh_path_del(mpath->dst, mpath->sdata);
 	}
 }
 
 static void mesh_path_node_reclaim(struct rcu_head *rp)
 {
 	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
-	struct ieee80211_sub_if_data *sdata =
-		IEEE80211_DEV_TO_SUB_IF(node->mpath->dev);
+	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
 
 	del_timer_sync(&node->mpath->timer);
-	atomic_dec(&sdata->u.sta.mpaths);
+	atomic_dec(&sdata->u.mesh.mpaths);
 	kfree(node->mpath);
 	kfree(node);
 }
@@ -318,11 +429,11 @@
  * mesh_path_del - delete a mesh path from the table
  *
  * @addr: dst address (ETH_ALEN length)
- * @dev: local interface
+ * @sdata: local subif
  *
  * Returns: 0 if succesful
  */
-int mesh_path_del(u8 *addr, struct net_device *dev)
+int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
 {
 	struct mesh_path *mpath;
 	struct mpath_node *node;
@@ -332,13 +443,13 @@
 	int err = 0;
 
 	read_lock(&pathtbl_resize_lock);
-	hash_idx = mesh_table_hash(addr, dev, mesh_paths);
+	hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
 	bucket = &mesh_paths->hash_buckets[hash_idx];
 
 	spin_lock(&mesh_paths->hashwlock[hash_idx]);
 	hlist_for_each_entry(node, n, bucket, list) {
 		mpath = node->mpath;
-		if (mpath->dev == dev &&
+		if (mpath->sdata == sdata &&
 				memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
 			spin_lock_bh(&mpath->state_lock);
 			mpath->flags |= MESH_PATH_RESOLVING;
@@ -378,33 +489,33 @@
  * mesh_path_discard_frame - discard a frame whose path could not be resolved
  *
  * @skb: frame to discard
- * @dev: network device the frame was to be sent through
+ * @sdata: network subif the frame was to be sent through
  *
  * If the frame was beign forwarded from another MP, a PERR frame will be sent
  * to the precursor.
  *
  * Locking: the function must me called within a rcu_read_lock region
  */
-void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev)
+void mesh_path_discard_frame(struct sk_buff *skb,
+			     struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 	struct mesh_path *mpath;
 	u32 dsn = 0;
 
-	if (memcmp(hdr->addr4, dev->dev_addr, ETH_ALEN) != 0) {
+	if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
 		u8 *ra, *da;
 
 		da = hdr->addr3;
 		ra = hdr->addr2;
-		mpath = mesh_path_lookup(da, dev);
+		mpath = mesh_path_lookup(da, sdata);
 		if (mpath)
 			dsn = ++mpath->dsn;
-		mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev);
+		mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata);
 	}
 
 	kfree_skb(skb);
-	sdata->u.sta.mshstats.dropped_frames_no_route++;
+	sdata->u.mesh.mshstats.dropped_frames_no_route++;
 }
 
 /**
@@ -416,14 +527,11 @@
  */
 void mesh_path_flush_pending(struct mesh_path *mpath)
 {
-	struct ieee80211_sub_if_data *sdata;
 	struct sk_buff *skb;
 
-	sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
-
 	while ((skb = skb_dequeue(&mpath->frame_queue)) &&
 			(mpath->flags & MESH_PATH_ACTIVE))
-		mesh_path_discard_frame(skb, mpath->dev);
+		mesh_path_discard_frame(skb, mpath->sdata);
 }
 
 /**
@@ -472,7 +580,7 @@
 	node = hlist_entry(p, struct mpath_node, list);
 	mpath = node->mpath;
 	new_node->mpath = mpath;
-	hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl);
+	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
 	hlist_add_head(&new_node->list,
 			&newtbl->hash_buckets[hash_idx]);
 	return 0;
@@ -481,15 +589,25 @@
 int mesh_pathtbl_init(void)
 {
 	mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
+	if (!mesh_paths)
+		return -ENOMEM;
 	mesh_paths->free_node = &mesh_path_node_free;
 	mesh_paths->copy_node = &mesh_path_node_copy;
 	mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
-	if (!mesh_paths)
+
+	mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
+	if (!mpp_paths) {
+		mesh_table_free(mesh_paths, true);
 		return -ENOMEM;
+	}
+	mpp_paths->free_node = &mesh_path_node_free;
+	mpp_paths->copy_node = &mesh_path_node_copy;
+	mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
+
 	return 0;
 }
 
-void mesh_path_expire(struct net_device *dev)
+void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
 {
 	struct mesh_path *mpath;
 	struct mpath_node *node;
@@ -498,7 +616,7 @@
 
 	read_lock(&pathtbl_resize_lock);
 	for_each_mesh_entry(mesh_paths, p, node, i) {
-		if (node->mpath->dev != dev)
+		if (node->mpath->sdata != sdata)
 			continue;
 		mpath = node->mpath;
 		spin_lock_bh(&mpath->state_lock);
@@ -507,7 +625,7 @@
 			time_after(jiffies,
 			 mpath->exp_time + MESH_PATH_EXPIRE)) {
 			spin_unlock_bh(&mpath->state_lock);
-			mesh_path_del(mpath->dst, mpath->dev);
+			mesh_path_del(mpath->dst, mpath->sdata);
 		} else
 			spin_unlock_bh(&mpath->state_lock);
 	}
@@ -517,4 +635,5 @@
 void mesh_pathtbl_unregister(void)
 {
 	mesh_table_free(mesh_paths, true);
+	mesh_table_free(mpp_paths, true);
 }
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 9efeb1f..faac101 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -36,11 +36,11 @@
 #define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE	9
 #define MESH_SECURITY_FAILED_VERIFICATION	10
 
-#define dot11MeshMaxRetries(s) (s->u.sta.mshcfg.dot11MeshMaxRetries)
-#define dot11MeshRetryTimeout(s) (s->u.sta.mshcfg.dot11MeshRetryTimeout)
-#define dot11MeshConfirmTimeout(s) (s->u.sta.mshcfg.dot11MeshConfirmTimeout)
-#define dot11MeshHoldingTimeout(s) (s->u.sta.mshcfg.dot11MeshHoldingTimeout)
-#define dot11MeshMaxPeerLinks(s) (s->u.sta.mshcfg.dot11MeshMaxPeerLinks)
+#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries)
+#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout)
+#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout)
+#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
+#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
 
 enum plink_frame_type {
 	PLINK_OPEN = 0,
@@ -63,14 +63,14 @@
 static inline
 void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
 {
-	atomic_inc(&sdata->u.sta.mshstats.estab_plinks);
+	atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
 	mesh_accept_plinks_update(sdata);
 }
 
 static inline
 void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
 {
-	atomic_dec(&sdata->u.sta.mshstats.estab_plinks);
+	atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
 	mesh_accept_plinks_update(sdata);
 }
 
@@ -106,7 +106,7 @@
 		return NULL;
 
 	sta->flags = WLAN_STA_AUTHORIZED;
-	sta->supp_rates[local->hw.conf.channel->band] = rates;
+	sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
 
 	return sta;
 }
@@ -144,10 +144,10 @@
 	spin_unlock_bh(&sta->lock);
 }
 
-static int mesh_plink_frame_tx(struct net_device *dev,
+static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
 		enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid,
 		__le16 reason) {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
 	struct ieee80211_mgmt *mgmt;
 	bool include_plid = false;
@@ -163,10 +163,10 @@
 	mgmt = (struct ieee80211_mgmt *)
 		skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action));
 	memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action));
-	mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-					   IEEE80211_STYPE_ACTION);
+	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+					  IEEE80211_STYPE_ACTION);
 	memcpy(mgmt->da, da, ETH_ALEN);
-	memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
+	memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
 	/* BSSID is left zeroed, wildcard value */
 	mgmt->u.action.category = PLINK_CATEGORY;
 	mgmt->u.action.u.plink_action.action_code = action;
@@ -180,7 +180,7 @@
 			/* two-byte status code followed by two-byte AID */
 			memset(pos, 0, 4);
 		}
-		mesh_mgmt_ies_add(skb, dev);
+		mesh_mgmt_ies_add(skb, sdata);
 	}
 
 	/* Add Peer Link Management element */
@@ -217,15 +217,14 @@
 		memcpy(pos, &reason, 2);
 	}
 
-	ieee80211_sta_tx(dev, skb, 0);
+	ieee80211_tx_skb(sdata, skb, 0);
 	return 0;
 }
 
-void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev,
+void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct ieee80211_sub_if_data *sdata,
 			   bool peer_accepting_plinks)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
 
 	rcu_read_lock();
@@ -244,10 +243,10 @@
 	}
 
 	sta->last_rx = jiffies;
-	sta->supp_rates[local->hw.conf.channel->band] = rates;
+	sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
 	if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN &&
-			sdata->u.sta.accepting_plinks &&
-			sdata->u.sta.mshcfg.auto_open_plinks)
+			sdata->u.mesh.accepting_plinks &&
+			sdata->u.mesh.mshcfg.auto_open_plinks)
 		mesh_plink_open(sta);
 
 	rcu_read_unlock();
@@ -257,7 +256,6 @@
 {
 	struct sta_info *sta;
 	__le16 llid, plid, reason;
-	struct net_device *dev = NULL;
 	struct ieee80211_sub_if_data *sdata;
 #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
 	DECLARE_MAC_BUF(mac);
@@ -277,12 +275,11 @@
 		return;
 	}
 	mpl_dbg("Mesh plink timer for %s fired on state %d\n",
-			print_mac(mac, sta->addr), sta->plink_state);
+			print_mac(mac, sta->sta.addr), sta->plink_state);
 	reason = 0;
 	llid = sta->llid;
 	plid = sta->plid;
 	sdata = sta->sdata;
-	dev = sdata->dev;
 
 	switch (sta->plink_state) {
 	case PLINK_OPN_RCVD:
@@ -291,7 +288,7 @@
 		if (sta->plink_retries < dot11MeshMaxRetries(sdata)) {
 			u32 rand;
 			mpl_dbg("Mesh plink for %s (retry, timeout): %d %d\n",
-					print_mac(mac, sta->addr),
+					print_mac(mac, sta->sta.addr),
 					sta->plink_retries, sta->plink_timeout);
 			get_random_bytes(&rand, sizeof(u32));
 			sta->plink_timeout = sta->plink_timeout +
@@ -299,7 +296,7 @@
 			++sta->plink_retries;
 			mod_plink_timer(sta, sta->plink_timeout);
 			spin_unlock_bh(&sta->lock);
-			mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid,
+			mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
 					    0, 0);
 			break;
 		}
@@ -312,7 +309,7 @@
 		sta->plink_state = PLINK_HOLDING;
 		mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
 		spin_unlock_bh(&sta->lock);
-		mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid,
+		mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid,
 				    reason);
 		break;
 	case PLINK_HOLDING:
@@ -355,10 +352,10 @@
 	mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
 	spin_unlock_bh(&sta->lock);
 	mpl_dbg("Mesh plink: starting establishment with %s\n",
-		print_mac(mac, sta->addr));
+		print_mac(mac, sta->sta.addr));
 
-	return mesh_plink_frame_tx(sdata->dev, PLINK_OPEN,
-				   sta->addr, llid, 0, 0);
+	return mesh_plink_frame_tx(sdata, PLINK_OPEN,
+				   sta->sta.addr, llid, 0, 0);
 }
 
 void mesh_plink_block(struct sta_info *sta)
@@ -382,7 +379,7 @@
 #endif
 
 	mpl_dbg("Mesh plink: closing link with %s\n",
-			print_mac(mac, sta->addr));
+			print_mac(mac, sta->sta.addr));
 	spin_lock_bh(&sta->lock);
 	sta->reason = cpu_to_le16(MESH_LINK_CANCELLED);
 	reason = sta->reason;
@@ -403,15 +400,14 @@
 	llid = sta->llid;
 	plid = sta->plid;
 	spin_unlock_bh(&sta->lock);
-	mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid,
+	mesh_plink_frame_tx(sta->sdata, PLINK_CLOSE, sta->sta.addr, llid,
 			    plid, reason);
 	return 0;
 }
 
-void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
+void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
 			 size_t len, struct ieee80211_rx_status *rx_status)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
 	struct ieee802_11_elems elems;
 	struct sta_info *sta;
@@ -425,6 +421,10 @@
 	DECLARE_MAC_BUF(mac);
 #endif
 
+	/* need action_code, aux */
+	if (len < IEEE80211_MIN_ACTION_SIZE + 3)
+		return;
+
 	if (is_multicast_ether_addr(mgmt->da)) {
 		mpl_dbg("Mesh plink: ignore frame from multicast address");
 		return;
@@ -478,7 +478,7 @@
 
 	/* Now we will figure out the appropriate event... */
 	event = PLINK_UNDEFINED;
-	if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, dev))) {
+	if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) {
 		switch (ftype) {
 		case PLINK_OPEN:
 			event = OPN_RJCT;
@@ -577,9 +577,9 @@
 			sta->llid = llid;
 			mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
 			spin_unlock_bh(&sta->lock);
-			mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid,
+			mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
 					    0, 0);
-			mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr,
+			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr,
 					    llid, plid, 0);
 			break;
 		default:
@@ -604,7 +604,7 @@
 
 			llid = sta->llid;
 			spin_unlock_bh(&sta->lock);
-			mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
+			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
 					    plid, reason);
 			break;
 		case OPN_ACPT:
@@ -613,7 +613,7 @@
 			sta->plid = plid;
 			llid = sta->llid;
 			spin_unlock_bh(&sta->lock);
-			mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
+			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
 					    plid, 0);
 			break;
 		case CNF_ACPT:
@@ -646,13 +646,13 @@
 
 			llid = sta->llid;
 			spin_unlock_bh(&sta->lock);
-			mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
+			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
 					    plid, reason);
 			break;
 		case OPN_ACPT:
 			llid = sta->llid;
 			spin_unlock_bh(&sta->lock);
-			mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
+			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
 					    plid, 0);
 			break;
 		case CNF_ACPT:
@@ -661,7 +661,7 @@
 			mesh_plink_inc_estab_count(sdata);
 			spin_unlock_bh(&sta->lock);
 			mpl_dbg("Mesh plink with %s ESTABLISHED\n",
-					print_mac(mac, sta->addr));
+					print_mac(mac, sta->sta.addr));
 			break;
 		default:
 			spin_unlock_bh(&sta->lock);
@@ -685,7 +685,7 @@
 
 			llid = sta->llid;
 			spin_unlock_bh(&sta->lock);
-			mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
+			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
 					    plid, reason);
 			break;
 		case OPN_ACPT:
@@ -694,8 +694,8 @@
 			mesh_plink_inc_estab_count(sdata);
 			spin_unlock_bh(&sta->lock);
 			mpl_dbg("Mesh plink with %s ESTABLISHED\n",
-					print_mac(mac, sta->addr));
-			mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
+					print_mac(mac, sta->sta.addr));
+			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
 					    plid, 0);
 			break;
 		default:
@@ -714,13 +714,13 @@
 			llid = sta->llid;
 			mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
 			spin_unlock_bh(&sta->lock);
-			mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
+			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
 					    plid, reason);
 			break;
 		case OPN_ACPT:
 			llid = sta->llid;
 			spin_unlock_bh(&sta->lock);
-			mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
+			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
 					    plid, 0);
 			break;
 		default:
@@ -743,8 +743,8 @@
 			llid = sta->llid;
 			reason = sta->reason;
 			spin_unlock_bh(&sta->lock);
-			mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
-					    plid, reason);
+			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr,
+					    llid, plid, reason);
 			break;
 		default:
 			spin_unlock_bh(&sta->lock);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 902cac1..e859a0a 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -11,11 +11,6 @@
  * published by the Free Software Foundation.
  */
 
-/* TODO:
- * order BSS list by RSSI(?) ("quality of AP")
- * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE,
- *    SSID)
- */
 #include <linux/delay.h>
 #include <linux/if_ether.h>
 #include <linux/skbuff.h>
@@ -26,663 +21,58 @@
 #include <linux/etherdevice.h>
 #include <linux/rtnetlink.h>
 #include <net/iw_handler.h>
-#include <asm/types.h>
-
 #include <net/mac80211.h>
+#include <asm/unaligned.h>
+
 #include "ieee80211_i.h"
 #include "rate.h"
 #include "led.h"
-#include "mesh.h"
 
+#define IEEE80211_ASSOC_SCANS_MAX_TRIES 2
 #define IEEE80211_AUTH_TIMEOUT (HZ / 5)
 #define IEEE80211_AUTH_MAX_TRIES 3
 #define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
 #define IEEE80211_ASSOC_MAX_TRIES 3
 #define IEEE80211_MONITORING_INTERVAL (2 * HZ)
-#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
 #define IEEE80211_PROBE_INTERVAL (60 * HZ)
 #define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ)
 #define IEEE80211_SCAN_INTERVAL (2 * HZ)
 #define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ)
 #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ)
 
-#define IEEE80211_PROBE_DELAY (HZ / 33)
-#define IEEE80211_CHANNEL_TIME (HZ / 33)
-#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5)
-#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
 #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
 #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ)
-#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
 
 #define IEEE80211_IBSS_MAX_STA_ENTRIES 128
 
 
-#define ERP_INFO_USE_PROTECTION BIT(1)
-
-/* mgmt header + 1 byte action code */
-#define IEEE80211_MIN_ACTION_SIZE (24 + 1)
-
-#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
-#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
-#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
-#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
-
-/* next values represent the buffer size for A-MPDU frame.
- * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) */
-#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
-
-static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
-				     u8 *ssid, size_t ssid_len);
-static struct ieee80211_sta_bss *
-ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq,
-		     u8 *ssid, u8 ssid_len);
-static void ieee80211_rx_bss_put(struct ieee80211_local *local,
-				 struct ieee80211_sta_bss *bss);
-static int ieee80211_sta_find_ibss(struct net_device *dev,
-				   struct ieee80211_if_sta *ifsta);
-static int ieee80211_sta_wep_configured(struct net_device *dev);
-static int ieee80211_sta_start_scan(struct net_device *dev,
-				    u8 *ssid, size_t ssid_len);
-static int ieee80211_sta_config_auth(struct net_device *dev,
-				     struct ieee80211_if_sta *ifsta);
-static void sta_rx_agg_session_timer_expired(unsigned long data);
-
-
-void ieee802_11_parse_elems(u8 *start, size_t len,
-			    struct ieee802_11_elems *elems)
-{
-	size_t left = len;
-	u8 *pos = start;
-
-	memset(elems, 0, sizeof(*elems));
-
-	while (left >= 2) {
-		u8 id, elen;
-
-		id = *pos++;
-		elen = *pos++;
-		left -= 2;
-
-		if (elen > left)
-			return;
-
-		switch (id) {
-		case WLAN_EID_SSID:
-			elems->ssid = pos;
-			elems->ssid_len = elen;
-			break;
-		case WLAN_EID_SUPP_RATES:
-			elems->supp_rates = pos;
-			elems->supp_rates_len = elen;
-			break;
-		case WLAN_EID_FH_PARAMS:
-			elems->fh_params = pos;
-			elems->fh_params_len = elen;
-			break;
-		case WLAN_EID_DS_PARAMS:
-			elems->ds_params = pos;
-			elems->ds_params_len = elen;
-			break;
-		case WLAN_EID_CF_PARAMS:
-			elems->cf_params = pos;
-			elems->cf_params_len = elen;
-			break;
-		case WLAN_EID_TIM:
-			elems->tim = pos;
-			elems->tim_len = elen;
-			break;
-		case WLAN_EID_IBSS_PARAMS:
-			elems->ibss_params = pos;
-			elems->ibss_params_len = elen;
-			break;
-		case WLAN_EID_CHALLENGE:
-			elems->challenge = pos;
-			elems->challenge_len = elen;
-			break;
-		case WLAN_EID_WPA:
-			if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
-			    pos[2] == 0xf2) {
-				/* Microsoft OUI (00:50:F2) */
-				if (pos[3] == 1) {
-					/* OUI Type 1 - WPA IE */
-					elems->wpa = pos;
-					elems->wpa_len = elen;
-				} else if (elen >= 5 && pos[3] == 2) {
-					if (pos[4] == 0) {
-						elems->wmm_info = pos;
-						elems->wmm_info_len = elen;
-					} else if (pos[4] == 1) {
-						elems->wmm_param = pos;
-						elems->wmm_param_len = elen;
-					}
-				}
-			}
-			break;
-		case WLAN_EID_RSN:
-			elems->rsn = pos;
-			elems->rsn_len = elen;
-			break;
-		case WLAN_EID_ERP_INFO:
-			elems->erp_info = pos;
-			elems->erp_info_len = elen;
-			break;
-		case WLAN_EID_EXT_SUPP_RATES:
-			elems->ext_supp_rates = pos;
-			elems->ext_supp_rates_len = elen;
-			break;
-		case WLAN_EID_HT_CAPABILITY:
-			elems->ht_cap_elem = pos;
-			elems->ht_cap_elem_len = elen;
-			break;
-		case WLAN_EID_HT_EXTRA_INFO:
-			elems->ht_info_elem = pos;
-			elems->ht_info_elem_len = elen;
-			break;
-		case WLAN_EID_MESH_ID:
-			elems->mesh_id = pos;
-			elems->mesh_id_len = elen;
-			break;
-		case WLAN_EID_MESH_CONFIG:
-			elems->mesh_config = pos;
-			elems->mesh_config_len = elen;
-			break;
-		case WLAN_EID_PEER_LINK:
-			elems->peer_link = pos;
-			elems->peer_link_len = elen;
-			break;
-		case WLAN_EID_PREQ:
-			elems->preq = pos;
-			elems->preq_len = elen;
-			break;
-		case WLAN_EID_PREP:
-			elems->prep = pos;
-			elems->prep_len = elen;
-			break;
-		case WLAN_EID_PERR:
-			elems->perr = pos;
-			elems->perr_len = elen;
-			break;
-		case WLAN_EID_CHANNEL_SWITCH:
-			elems->ch_switch_elem = pos;
-			elems->ch_switch_elem_len = elen;
-			break;
-		case WLAN_EID_QUIET:
-			if (!elems->quiet_elem) {
-				elems->quiet_elem = pos;
-				elems->quiet_elem_len = elen;
-			}
-			elems->num_of_quiet_elem++;
-			break;
-		case WLAN_EID_COUNTRY:
-			elems->country_elem = pos;
-			elems->country_elem_len = elen;
-			break;
-		case WLAN_EID_PWR_CONSTRAINT:
-			elems->pwr_constr_elem = pos;
-			elems->pwr_constr_elem_len = elen;
-			break;
-		default:
-			break;
-		}
-
-		left -= elen;
-		pos += elen;
-	}
-}
-
-
+/* utils */
 static int ecw2cw(int ecw)
 {
 	return (1 << ecw) - 1;
 }
 
-
-static void ieee80211_sta_def_wmm_params(struct net_device *dev,
-					 struct ieee80211_sta_bss *bss,
-					 int ibss)
+static u8 *ieee80211_bss_get_ie(struct ieee80211_bss *bss, u8 ie)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_local *local = sdata->local;
-	int i, have_higher_than_11mbit = 0;
+	u8 *end, *pos;
 
+	pos = bss->ies;
+	if (pos == NULL)
+		return NULL;
+	end = pos + bss->ies_len;
 
-	/* cf. IEEE 802.11 9.2.12 */
-	for (i = 0; i < bss->supp_rates_len; i++)
-		if ((bss->supp_rates[i] & 0x7f) * 5 > 110)
-			have_higher_than_11mbit = 1;
-
-	if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
-	    have_higher_than_11mbit)
-		sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
-	else
-		sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
-
-
-	if (local->ops->conf_tx) {
-		struct ieee80211_tx_queue_params qparam;
-
-		memset(&qparam, 0, sizeof(qparam));
-
-		qparam.aifs = 2;
-
-		if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
-		    !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE))
-			qparam.cw_min = 31;
-		else
-			qparam.cw_min = 15;
-
-		qparam.cw_max = 1023;
-		qparam.txop = 0;
-
-		for (i = 0; i < local_to_hw(local)->queues; i++)
-			local->ops->conf_tx(local_to_hw(local), i, &qparam);
-	}
-}
-
-static void ieee80211_sta_wmm_params(struct net_device *dev,
-				     struct ieee80211_if_sta *ifsta,
-				     u8 *wmm_param, size_t wmm_param_len)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_tx_queue_params params;
-	size_t left;
-	int count;
-	u8 *pos;
-
-	if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED))
-		return;
-
-	if (!wmm_param)
-		return;
-
-	if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
-		return;
-	count = wmm_param[6] & 0x0f;
-	if (count == ifsta->wmm_last_param_set)
-		return;
-	ifsta->wmm_last_param_set = count;
-
-	pos = wmm_param + 8;
-	left = wmm_param_len - 8;
-
-	memset(&params, 0, sizeof(params));
-
-	if (!local->ops->conf_tx)
-		return;
-
-	local->wmm_acm = 0;
-	for (; left >= 4; left -= 4, pos += 4) {
-		int aci = (pos[0] >> 5) & 0x03;
-		int acm = (pos[0] >> 4) & 0x01;
-		int queue;
-
-		switch (aci) {
-		case 1:
-			queue = 3;
-			if (acm)
-				local->wmm_acm |= BIT(0) | BIT(3);
+	while (pos + 1 < end) {
+		if (pos + 2 + pos[1] > end)
 			break;
-		case 2:
-			queue = 1;
-			if (acm)
-				local->wmm_acm |= BIT(4) | BIT(5);
-			break;
-		case 3:
-			queue = 0;
-			if (acm)
-				local->wmm_acm |= BIT(6) | BIT(7);
-			break;
-		case 0:
-		default:
-			queue = 2;
-			if (acm)
-				local->wmm_acm |= BIT(1) | BIT(2);
-			break;
-		}
-
-		params.aifs = pos[0] & 0x0f;
-		params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
-		params.cw_min = ecw2cw(pos[1] & 0x0f);
-		params.txop = get_unaligned_le16(pos + 2);
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-		printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
-		       "cWmin=%d cWmax=%d txop=%d\n",
-		       dev->name, queue, aci, acm, params.aifs, params.cw_min,
-		       params.cw_max, params.txop);
-#endif
-		/* TODO: handle ACM (block TX, fallback to next lowest allowed
-		 * AC for now) */
-		if (local->ops->conf_tx(local_to_hw(local), queue, &params)) {
-			printk(KERN_DEBUG "%s: failed to set TX queue "
-			       "parameters for queue %d\n", dev->name, queue);
-		}
-	}
-}
-
-static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata,
-					   bool use_protection,
-					   bool use_short_preamble)
-{
-	struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf;
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
-	DECLARE_MAC_BUF(mac);
-#endif
-	u32 changed = 0;
-
-	if (use_protection != bss_conf->use_cts_prot) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "%s: CTS protection %s (BSSID="
-			       "%s)\n",
-			       sdata->dev->name,
-			       use_protection ? "enabled" : "disabled",
-			       print_mac(mac, ifsta->bssid));
-		}
-#endif
-		bss_conf->use_cts_prot = use_protection;
-		changed |= BSS_CHANGED_ERP_CTS_PROT;
+		if (pos[0] == ie)
+			return pos;
+		pos += 2 + pos[1];
 	}
 
-	if (use_short_preamble != bss_conf->use_short_preamble) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "%s: switched to %s barker preamble"
-			       " (BSSID=%s)\n",
-			       sdata->dev->name,
-			       use_short_preamble ? "short" : "long",
-			       print_mac(mac, ifsta->bssid));
-		}
-#endif
-		bss_conf->use_short_preamble = use_short_preamble;
-		changed |= BSS_CHANGED_ERP_PREAMBLE;
-	}
-
-	return changed;
+	return NULL;
 }
 
-static u32 ieee80211_handle_erp_ie(struct ieee80211_sub_if_data *sdata,
-				   u8 erp_value)
-{
-	bool use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0;
-	bool use_short_preamble = (erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0;
-
-	return ieee80211_handle_protect_preamb(sdata,
-			use_protection, use_short_preamble);
-}
-
-static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
-					   struct ieee80211_sta_bss *bss)
-{
-	u32 changed = 0;
-
-	if (bss->has_erp_value)
-		changed |= ieee80211_handle_erp_ie(sdata, bss->erp_value);
-	else {
-		u16 capab = bss->capability;
-		changed |= ieee80211_handle_protect_preamb(sdata, false,
-				(capab & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0);
-	}
-
-	return changed;
-}
-
-int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
-				   struct ieee80211_ht_info *ht_info)
-{
-
-	if (ht_info == NULL)
-		return -EINVAL;
-
-	memset(ht_info, 0, sizeof(*ht_info));
-
-	if (ht_cap_ie) {
-		u8 ampdu_info = ht_cap_ie->ampdu_params_info;
-
-		ht_info->ht_supported = 1;
-		ht_info->cap = le16_to_cpu(ht_cap_ie->cap_info);
-		ht_info->ampdu_factor =
-			ampdu_info & IEEE80211_HT_CAP_AMPDU_FACTOR;
-		ht_info->ampdu_density =
-			(ampdu_info & IEEE80211_HT_CAP_AMPDU_DENSITY) >> 2;
-		memcpy(ht_info->supp_mcs_set, ht_cap_ie->supp_mcs_set, 16);
-	} else
-		ht_info->ht_supported = 0;
-
-	return 0;
-}
-
-int ieee80211_ht_addt_info_ie_to_ht_bss_info(
-			struct ieee80211_ht_addt_info *ht_add_info_ie,
-			struct ieee80211_ht_bss_info *bss_info)
-{
-	if (bss_info == NULL)
-		return -EINVAL;
-
-	memset(bss_info, 0, sizeof(*bss_info));
-
-	if (ht_add_info_ie) {
-		u16 op_mode;
-		op_mode = le16_to_cpu(ht_add_info_ie->operation_mode);
-
-		bss_info->primary_channel = ht_add_info_ie->control_chan;
-		bss_info->bss_cap = ht_add_info_ie->ht_param;
-		bss_info->bss_op_mode = (u8)(op_mode & 0xff);
-	}
-
-	return 0;
-}
-
-static void ieee80211_sta_send_associnfo(struct net_device *dev,
-					 struct ieee80211_if_sta *ifsta)
-{
-	char *buf;
-	size_t len;
-	int i;
-	union iwreq_data wrqu;
-
-	if (!ifsta->assocreq_ies && !ifsta->assocresp_ies)
-		return;
-
-	buf = kmalloc(50 + 2 * (ifsta->assocreq_ies_len +
-				ifsta->assocresp_ies_len), GFP_KERNEL);
-	if (!buf)
-		return;
-
-	len = sprintf(buf, "ASSOCINFO(");
-	if (ifsta->assocreq_ies) {
-		len += sprintf(buf + len, "ReqIEs=");
-		for (i = 0; i < ifsta->assocreq_ies_len; i++) {
-			len += sprintf(buf + len, "%02x",
-				       ifsta->assocreq_ies[i]);
-		}
-	}
-	if (ifsta->assocresp_ies) {
-		if (ifsta->assocreq_ies)
-			len += sprintf(buf + len, " ");
-		len += sprintf(buf + len, "RespIEs=");
-		for (i = 0; i < ifsta->assocresp_ies_len; i++) {
-			len += sprintf(buf + len, "%02x",
-				       ifsta->assocresp_ies[i]);
-		}
-	}
-	len += sprintf(buf + len, ")");
-
-	if (len > IW_CUSTOM_MAX) {
-		len = sprintf(buf, "ASSOCRESPIE=");
-		for (i = 0; i < ifsta->assocresp_ies_len; i++) {
-			len += sprintf(buf + len, "%02x",
-				       ifsta->assocresp_ies[i]);
-		}
-	}
-
-	memset(&wrqu, 0, sizeof(wrqu));
-	wrqu.data.length = len;
-	wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
-
-	kfree(buf);
-}
-
-
-static void ieee80211_set_associated(struct net_device *dev,
-				     struct ieee80211_if_sta *ifsta,
-				     bool assoc)
-{
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_conf *conf = &local_to_hw(local)->conf;
-	union iwreq_data wrqu;
-	u32 changed = BSS_CHANGED_ASSOC;
-
-	if (assoc) {
-		struct ieee80211_sta_bss *bss;
-
-		ifsta->flags |= IEEE80211_STA_ASSOCIATED;
-
-		if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
-			return;
-
-		bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
-					   conf->channel->center_freq,
-					   ifsta->ssid, ifsta->ssid_len);
-		if (bss) {
-			/* set timing information */
-			sdata->bss_conf.beacon_int = bss->beacon_int;
-			sdata->bss_conf.timestamp = bss->timestamp;
-			sdata->bss_conf.dtim_period = bss->dtim_period;
-
-			changed |= ieee80211_handle_bss_capability(sdata, bss);
-
-			ieee80211_rx_bss_put(local, bss);
-		}
-
-		if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
-			changed |= BSS_CHANGED_HT;
-			sdata->bss_conf.assoc_ht = 1;
-			sdata->bss_conf.ht_conf = &conf->ht_conf;
-			sdata->bss_conf.ht_bss_conf = &conf->ht_bss_conf;
-		}
-
-		ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET;
-		memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN);
-		memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN);
-		ieee80211_sta_send_associnfo(dev, ifsta);
-	} else {
-		netif_carrier_off(dev);
-		ieee80211_sta_tear_down_BA_sessions(dev, ifsta->bssid);
-		ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
-		changed |= ieee80211_reset_erp_info(dev);
-
-		sdata->bss_conf.assoc_ht = 0;
-		sdata->bss_conf.ht_conf = NULL;
-		sdata->bss_conf.ht_bss_conf = NULL;
-
-		memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
-	}
-	ifsta->last_probe = jiffies;
-	ieee80211_led_assoc(local, assoc);
-
-	sdata->bss_conf.assoc = assoc;
-	ieee80211_bss_info_change_notify(sdata, changed);
-
-	if (assoc)
-		netif_carrier_on(dev);
-
-	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-	wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
-}
-
-static void ieee80211_set_disassoc(struct net_device *dev,
-				   struct ieee80211_if_sta *ifsta, int deauth)
-{
-	if (deauth)
-		ifsta->auth_tries = 0;
-	ifsta->assoc_tries = 0;
-	ieee80211_set_associated(dev, ifsta, 0);
-}
-
-void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
-		      int encrypt)
-{
-	struct ieee80211_sub_if_data *sdata;
-
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	skb->dev = sdata->local->mdev;
-	skb_set_mac_header(skb, 0);
-	skb_set_network_header(skb, 0);
-	skb_set_transport_header(skb, 0);
-
-	skb->iif = sdata->dev->ifindex;
-	skb->do_not_encrypt = !encrypt;
-
-	dev_queue_xmit(skb);
-}
-
-
-static void ieee80211_send_auth(struct net_device *dev,
-				struct ieee80211_if_sta *ifsta,
-				int transaction, u8 *extra, size_t extra_len,
-				int encrypt)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct sk_buff *skb;
-	struct ieee80211_mgmt *mgmt;
-
-	skb = dev_alloc_skb(local->hw.extra_tx_headroom +
-			    sizeof(*mgmt) + 6 + extra_len);
-	if (!skb) {
-		printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
-		       "frame\n", dev->name);
-		return;
-	}
-	skb_reserve(skb, local->hw.extra_tx_headroom);
-
-	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
-	memset(mgmt, 0, 24 + 6);
-	mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-					   IEEE80211_STYPE_AUTH);
-	if (encrypt)
-		mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
-	memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
-	memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
-	memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
-	mgmt->u.auth.auth_alg = cpu_to_le16(ifsta->auth_alg);
-	mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
-	ifsta->auth_transaction = transaction + 1;
-	mgmt->u.auth.status_code = cpu_to_le16(0);
-	if (extra)
-		memcpy(skb_put(skb, extra_len), extra, extra_len);
-
-	ieee80211_sta_tx(dev, skb, encrypt);
-}
-
-
-static void ieee80211_authenticate(struct net_device *dev,
-				   struct ieee80211_if_sta *ifsta)
-{
-	DECLARE_MAC_BUF(mac);
-
-	ifsta->auth_tries++;
-	if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) {
-		printk(KERN_DEBUG "%s: authentication with AP %s"
-		       " timed out\n",
-		       dev->name, print_mac(mac, ifsta->bssid));
-		ifsta->state = IEEE80211_DISABLED;
-		return;
-	}
-
-	ifsta->state = IEEE80211_AUTHENTICATE;
-	printk(KERN_DEBUG "%s: authenticate with AP %s\n",
-	       dev->name, print_mac(mac, ifsta->bssid));
-
-	ieee80211_send_auth(dev, ifsta, 1, NULL, 0, 0);
-
-	mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
-}
-
-static int ieee80211_compatible_rates(struct ieee80211_sta_bss *bss,
+static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
 				      struct ieee80211_supported_band *sband,
 				      u64 *rates)
 {
@@ -703,16 +93,153 @@
 	return count;
 }
 
-static void ieee80211_send_assoc(struct net_device *dev,
-				 struct ieee80211_if_sta *ifsta)
+/* also used by mesh code */
+u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
+			    struct ieee802_11_elems *elems,
+			    enum ieee80211_band band)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_rate *bitrates;
+	size_t num_rates;
+	u64 supp_rates;
+	int i, j;
+	sband = local->hw.wiphy->bands[band];
+
+	if (!sband) {
+		WARN_ON(1);
+		sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+	}
+
+	bitrates = sband->bitrates;
+	num_rates = sband->n_bitrates;
+	supp_rates = 0;
+	for (i = 0; i < elems->supp_rates_len +
+		     elems->ext_supp_rates_len; i++) {
+		u8 rate = 0;
+		int own_rate;
+		if (i < elems->supp_rates_len)
+			rate = elems->supp_rates[i];
+		else if (elems->ext_supp_rates)
+			rate = elems->ext_supp_rates
+				[i - elems->supp_rates_len];
+		own_rate = 5 * (rate & 0x7f);
+		for (j = 0; j < num_rates; j++)
+			if (bitrates[j].bitrate == own_rate)
+				supp_rates |= BIT(j);
+	}
+	return supp_rates;
+}
+
+/* frame sending functions */
+
+/* also used by scanning code */
+void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
+			      u8 *ssid, size_t ssid_len)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_supported_band *sband;
 	struct sk_buff *skb;
 	struct ieee80211_mgmt *mgmt;
-	u8 *pos, *ies;
+	u8 *pos, *supp_rates, *esupp_rates = NULL;
+	int i;
+
+	skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200);
+	if (!skb) {
+		printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
+		       "request\n", sdata->dev->name);
+		return;
+	}
+	skb_reserve(skb, local->hw.extra_tx_headroom);
+
+	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
+	memset(mgmt, 0, 24);
+	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+					  IEEE80211_STYPE_PROBE_REQ);
+	memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+	if (dst) {
+		memcpy(mgmt->da, dst, ETH_ALEN);
+		memcpy(mgmt->bssid, dst, ETH_ALEN);
+	} else {
+		memset(mgmt->da, 0xff, ETH_ALEN);
+		memset(mgmt->bssid, 0xff, ETH_ALEN);
+	}
+	pos = skb_put(skb, 2 + ssid_len);
+	*pos++ = WLAN_EID_SSID;
+	*pos++ = ssid_len;
+	memcpy(pos, ssid, ssid_len);
+
+	supp_rates = skb_put(skb, 2);
+	supp_rates[0] = WLAN_EID_SUPP_RATES;
+	supp_rates[1] = 0;
+	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+
+	for (i = 0; i < sband->n_bitrates; i++) {
+		struct ieee80211_rate *rate = &sband->bitrates[i];
+		if (esupp_rates) {
+			pos = skb_put(skb, 1);
+			esupp_rates[1]++;
+		} else if (supp_rates[1] == 8) {
+			esupp_rates = skb_put(skb, 3);
+			esupp_rates[0] = WLAN_EID_EXT_SUPP_RATES;
+			esupp_rates[1] = 1;
+			pos = &esupp_rates[2];
+		} else {
+			pos = skb_put(skb, 1);
+			supp_rates[1]++;
+		}
+		*pos = rate->bitrate / 5;
+	}
+
+	ieee80211_tx_skb(sdata, skb, 0);
+}
+
+static void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
+				struct ieee80211_if_sta *ifsta,
+				int transaction, u8 *extra, size_t extra_len,
+				int encrypt)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct sk_buff *skb;
+	struct ieee80211_mgmt *mgmt;
+
+	skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+			    sizeof(*mgmt) + 6 + extra_len);
+	if (!skb) {
+		printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
+		       "frame\n", sdata->dev->name);
+		return;
+	}
+	skb_reserve(skb, local->hw.extra_tx_headroom);
+
+	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
+	memset(mgmt, 0, 24 + 6);
+	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+					  IEEE80211_STYPE_AUTH);
+	if (encrypt)
+		mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+	memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
+	memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+	memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
+	mgmt->u.auth.auth_alg = cpu_to_le16(ifsta->auth_alg);
+	mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
+	ifsta->auth_transaction = transaction + 1;
+	mgmt->u.auth.status_code = cpu_to_le16(0);
+	if (extra)
+		memcpy(skb_put(skb, extra_len), extra, extra_len);
+
+	ieee80211_tx_skb(sdata, skb, encrypt);
+}
+
+static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
+				 struct ieee80211_if_sta *ifsta)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct sk_buff *skb;
+	struct ieee80211_mgmt *mgmt;
+	u8 *pos, *ies, *ht_add_ie;
 	int i, len, count, rates_len, supp_rates_len;
 	u16 capab;
-	struct ieee80211_sta_bss *bss;
+	struct ieee80211_bss *bss;
 	int wmm = 0;
 	struct ieee80211_supported_band *sband;
 	u64 rates = 0;
@@ -722,7 +249,7 @@
 			    ifsta->ssid_len);
 	if (!skb) {
 		printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
-		       "frame\n", dev->name);
+		       "frame\n", sdata->dev->name);
 		return;
 	}
 	skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -738,13 +265,13 @@
 			capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
 	}
 
-	bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
+	bss = ieee80211_rx_bss_get(local, ifsta->bssid,
 				   local->hw.conf.channel->center_freq,
 				   ifsta->ssid, ifsta->ssid_len);
 	if (bss) {
 		if (bss->capability & WLAN_CAPABILITY_PRIVACY)
 			capab |= WLAN_CAPABILITY_PRIVACY;
-		if (bss->wmm_ie)
+		if (bss->wmm_used)
 			wmm = 1;
 
 		/* get all rates supported by the device and the AP as
@@ -766,13 +293,13 @@
 	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
 	memset(mgmt, 0, 24);
 	memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
-	memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
+	memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
 	memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
 
 	if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) {
 		skb_put(skb, 10);
-		mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-						   IEEE80211_STYPE_REASSOC_REQ);
+		mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+						  IEEE80211_STYPE_REASSOC_REQ);
 		mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
 		mgmt->u.reassoc_req.listen_interval =
 				cpu_to_le16(local->hw.conf.listen_interval);
@@ -780,8 +307,8 @@
 		       ETH_ALEN);
 	} else {
 		skb_put(skb, 4);
-		mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-						   IEEE80211_STYPE_ASSOC_REQ);
+		mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+						  IEEE80211_STYPE_ASSOC_REQ);
 		mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
 		mgmt->u.reassoc_req.listen_interval =
 				cpu_to_le16(local->hw.conf.listen_interval);
@@ -866,9 +393,10 @@
 
 	/* wmm support is a must to HT */
 	if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) &&
-	    sband->ht_info.ht_supported && bss->ht_add_ie) {
+	    sband->ht_info.ht_supported &&
+	    (ht_add_ie = ieee80211_bss_get_ie(bss, WLAN_EID_HT_EXTRA_INFO))) {
 		struct ieee80211_ht_addt_info *ht_add_info =
-			(struct ieee80211_ht_addt_info *)bss->ht_add_ie;
+			(struct ieee80211_ht_addt_info *)ht_add_ie;
 		u16 cap = sband->ht_info.cap;
 		__le16 tmp;
 		u32 flags = local->hw.conf.channel->flags;
@@ -907,21 +435,22 @@
 	if (ifsta->assocreq_ies)
 		memcpy(ifsta->assocreq_ies, ies, ifsta->assocreq_ies_len);
 
-	ieee80211_sta_tx(dev, skb, 0);
+	ieee80211_tx_skb(sdata, skb, 0);
 }
 
 
-static void ieee80211_send_deauth(struct net_device *dev,
-				  struct ieee80211_if_sta *ifsta, u16 reason)
+static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
+					   u16 stype, u16 reason)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
 	struct sk_buff *skb;
 	struct ieee80211_mgmt *mgmt;
 
 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
 	if (!skb) {
-		printk(KERN_DEBUG "%s: failed to allocate buffer for deauth "
-		       "frame\n", dev->name);
+		printk(KERN_DEBUG "%s: failed to allocate buffer for "
+		       "deauth/disassoc frame\n", sdata->dev->name);
 		return;
 	}
 	skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -929,51 +458,401 @@
 	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
 	memset(mgmt, 0, 24);
 	memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
-	memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
+	memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
 	memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
-	mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-					   IEEE80211_STYPE_DEAUTH);
+	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
 	skb_put(skb, 2);
+	/* u.deauth.reason_code == u.disassoc.reason_code */
 	mgmt->u.deauth.reason_code = cpu_to_le16(reason);
 
-	ieee80211_sta_tx(dev, skb, 0);
+	ieee80211_tx_skb(sdata, skb, 0);
+}
+
+/* MLME */
+static void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
+					 struct ieee80211_bss *bss)
+{
+	struct ieee80211_local *local = sdata->local;
+	int i, have_higher_than_11mbit = 0;
+
+	/* cf. IEEE 802.11 9.2.12 */
+	for (i = 0; i < bss->supp_rates_len; i++)
+		if ((bss->supp_rates[i] & 0x7f) * 5 > 110)
+			have_higher_than_11mbit = 1;
+
+	if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
+	    have_higher_than_11mbit)
+		sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
+	else
+		sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
+
+	ieee80211_set_wmm_default(sdata);
+}
+
+static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
+				     struct ieee80211_if_sta *ifsta,
+				     u8 *wmm_param, size_t wmm_param_len)
+{
+	struct ieee80211_tx_queue_params params;
+	size_t left;
+	int count;
+	u8 *pos;
+
+	if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED))
+		return;
+
+	if (!wmm_param)
+		return;
+
+	if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
+		return;
+	count = wmm_param[6] & 0x0f;
+	if (count == ifsta->wmm_last_param_set)
+		return;
+	ifsta->wmm_last_param_set = count;
+
+	pos = wmm_param + 8;
+	left = wmm_param_len - 8;
+
+	memset(&params, 0, sizeof(params));
+
+	if (!local->ops->conf_tx)
+		return;
+
+	local->wmm_acm = 0;
+	for (; left >= 4; left -= 4, pos += 4) {
+		int aci = (pos[0] >> 5) & 0x03;
+		int acm = (pos[0] >> 4) & 0x01;
+		int queue;
+
+		switch (aci) {
+		case 1:
+			queue = 3;
+			if (acm)
+				local->wmm_acm |= BIT(0) | BIT(3);
+			break;
+		case 2:
+			queue = 1;
+			if (acm)
+				local->wmm_acm |= BIT(4) | BIT(5);
+			break;
+		case 3:
+			queue = 0;
+			if (acm)
+				local->wmm_acm |= BIT(6) | BIT(7);
+			break;
+		case 0:
+		default:
+			queue = 2;
+			if (acm)
+				local->wmm_acm |= BIT(1) | BIT(2);
+			break;
+		}
+
+		params.aifs = pos[0] & 0x0f;
+		params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
+		params.cw_min = ecw2cw(pos[1] & 0x0f);
+		params.txop = get_unaligned_le16(pos + 2);
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+		printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
+		       "cWmin=%d cWmax=%d txop=%d\n",
+		       local->mdev->name, queue, aci, acm, params.aifs, params.cw_min,
+		       params.cw_max, params.txop);
+#endif
+		/* TODO: handle ACM (block TX, fallback to next lowest allowed
+		 * AC for now) */
+		if (local->ops->conf_tx(local_to_hw(local), queue, &params)) {
+			printk(KERN_DEBUG "%s: failed to set TX queue "
+			       "parameters for queue %d\n", local->mdev->name, queue);
+		}
+	}
+}
+
+static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata,
+					   bool use_protection,
+					   bool use_short_preamble)
+{
+	struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf;
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+	DECLARE_MAC_BUF(mac);
+#endif
+	u32 changed = 0;
+
+	if (use_protection != bss_conf->use_cts_prot) {
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "%s: CTS protection %s (BSSID="
+			       "%s)\n",
+			       sdata->dev->name,
+			       use_protection ? "enabled" : "disabled",
+			       print_mac(mac, ifsta->bssid));
+		}
+#endif
+		bss_conf->use_cts_prot = use_protection;
+		changed |= BSS_CHANGED_ERP_CTS_PROT;
+	}
+
+	if (use_short_preamble != bss_conf->use_short_preamble) {
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "%s: switched to %s barker preamble"
+			       " (BSSID=%s)\n",
+			       sdata->dev->name,
+			       use_short_preamble ? "short" : "long",
+			       print_mac(mac, ifsta->bssid));
+		}
+#endif
+		bss_conf->use_short_preamble = use_short_preamble;
+		changed |= BSS_CHANGED_ERP_PREAMBLE;
+	}
+
+	return changed;
+}
+
+static u32 ieee80211_handle_erp_ie(struct ieee80211_sub_if_data *sdata,
+				   u8 erp_value)
+{
+	bool use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0;
+	bool use_short_preamble = (erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0;
+
+	return ieee80211_handle_protect_preamb(sdata,
+			use_protection, use_short_preamble);
+}
+
+static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
+					   struct ieee80211_bss *bss)
+{
+	u32 changed = 0;
+
+	if (bss->has_erp_value)
+		changed |= ieee80211_handle_erp_ie(sdata, bss->erp_value);
+	else {
+		u16 capab = bss->capability;
+		changed |= ieee80211_handle_protect_preamb(sdata, false,
+				(capab & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0);
+	}
+
+	return changed;
+}
+
+static void ieee80211_sta_send_apinfo(struct ieee80211_sub_if_data *sdata,
+					struct ieee80211_if_sta *ifsta)
+{
+	union iwreq_data wrqu;
+	memset(&wrqu, 0, sizeof(wrqu));
+	if (ifsta->flags & IEEE80211_STA_ASSOCIATED)
+		memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN);
+	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+	wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL);
+}
+
+static void ieee80211_sta_send_associnfo(struct ieee80211_sub_if_data *sdata,
+					 struct ieee80211_if_sta *ifsta)
+{
+	union iwreq_data wrqu;
+
+	if (ifsta->assocreq_ies) {
+		memset(&wrqu, 0, sizeof(wrqu));
+		wrqu.data.length = ifsta->assocreq_ies_len;
+		wireless_send_event(sdata->dev, IWEVASSOCREQIE, &wrqu,
+				    ifsta->assocreq_ies);
+	}
+	if (ifsta->assocresp_ies) {
+		memset(&wrqu, 0, sizeof(wrqu));
+		wrqu.data.length = ifsta->assocresp_ies_len;
+		wireless_send_event(sdata->dev, IWEVASSOCRESPIE, &wrqu,
+				    ifsta->assocresp_ies);
+	}
 }
 
 
-static void ieee80211_send_disassoc(struct net_device *dev,
-				    struct ieee80211_if_sta *ifsta, u16 reason)
+static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
+				     struct ieee80211_if_sta *ifsta)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct sk_buff *skb;
-	struct ieee80211_mgmt *mgmt;
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_conf *conf = &local_to_hw(local)->conf;
+	u32 changed = BSS_CHANGED_ASSOC;
 
-	skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
-	if (!skb) {
-		printk(KERN_DEBUG "%s: failed to allocate buffer for disassoc "
-		       "frame\n", dev->name);
+	struct ieee80211_bss *bss;
+
+	ifsta->flags |= IEEE80211_STA_ASSOCIATED;
+
+	if (sdata->vif.type != NL80211_IFTYPE_STATION)
+		return;
+
+	bss = ieee80211_rx_bss_get(local, ifsta->bssid,
+				   conf->channel->center_freq,
+				   ifsta->ssid, ifsta->ssid_len);
+	if (bss) {
+		/* set timing information */
+		sdata->bss_conf.beacon_int = bss->beacon_int;
+		sdata->bss_conf.timestamp = bss->timestamp;
+		sdata->bss_conf.dtim_period = bss->dtim_period;
+
+		changed |= ieee80211_handle_bss_capability(sdata, bss);
+
+		ieee80211_rx_bss_put(local, bss);
+	}
+
+	if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
+		changed |= BSS_CHANGED_HT;
+		sdata->bss_conf.assoc_ht = 1;
+		sdata->bss_conf.ht_conf = &conf->ht_conf;
+		sdata->bss_conf.ht_bss_conf = &conf->ht_bss_conf;
+	}
+
+	ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET;
+	memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN);
+	ieee80211_sta_send_associnfo(sdata, ifsta);
+
+	ifsta->last_probe = jiffies;
+	ieee80211_led_assoc(local, 1);
+
+	sdata->bss_conf.assoc = 1;
+	/*
+	 * For now just always ask the driver to update the basic rateset
+	 * when we have associated, we aren't checking whether it actually
+	 * changed or not.
+	 */
+	changed |= BSS_CHANGED_BASIC_RATES;
+	ieee80211_bss_info_change_notify(sdata, changed);
+
+	netif_tx_start_all_queues(sdata->dev);
+	netif_carrier_on(sdata->dev);
+
+	ieee80211_sta_send_apinfo(sdata, ifsta);
+}
+
+static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata,
+				   struct ieee80211_if_sta *ifsta)
+{
+	DECLARE_MAC_BUF(mac);
+
+	ifsta->direct_probe_tries++;
+	if (ifsta->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) {
+		printk(KERN_DEBUG "%s: direct probe to AP %s timed out\n",
+		       sdata->dev->name, print_mac(mac, ifsta->bssid));
+		ifsta->state = IEEE80211_STA_MLME_DISABLED;
 		return;
 	}
-	skb_reserve(skb, local->hw.extra_tx_headroom);
 
-	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
-	memset(mgmt, 0, 24);
-	memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
-	memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
-	memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
-	mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-					   IEEE80211_STYPE_DISASSOC);
-	skb_put(skb, 2);
-	mgmt->u.disassoc.reason_code = cpu_to_le16(reason);
+	printk(KERN_DEBUG "%s: direct probe to AP %s try %d\n",
+			sdata->dev->name, print_mac(mac, ifsta->bssid),
+			ifsta->direct_probe_tries);
 
-	ieee80211_sta_tx(dev, skb, 0);
+	ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
+
+	set_bit(IEEE80211_STA_REQ_DIRECT_PROBE, &ifsta->request);
+
+	/* Direct probe is sent to broadcast address as some APs
+	 * will not answer to direct packet in unassociated state.
+	 */
+	ieee80211_send_probe_req(sdata, NULL,
+				 ifsta->ssid, ifsta->ssid_len);
+
+	mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
 }
 
 
-static int ieee80211_privacy_mismatch(struct net_device *dev,
+static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
+				   struct ieee80211_if_sta *ifsta)
+{
+	DECLARE_MAC_BUF(mac);
+
+	ifsta->auth_tries++;
+	if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) {
+		printk(KERN_DEBUG "%s: authentication with AP %s"
+		       " timed out\n",
+		       sdata->dev->name, print_mac(mac, ifsta->bssid));
+		ifsta->state = IEEE80211_STA_MLME_DISABLED;
+		return;
+	}
+
+	ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
+	printk(KERN_DEBUG "%s: authenticate with AP %s\n",
+	       sdata->dev->name, print_mac(mac, ifsta->bssid));
+
+	ieee80211_send_auth(sdata, ifsta, 1, NULL, 0, 0);
+
+	mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
+}
+
+static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
+				   struct ieee80211_if_sta *ifsta, bool deauth,
+				   bool self_disconnected, u16 reason)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct sta_info *sta;
+	u32 changed = BSS_CHANGED_ASSOC;
+
+	rcu_read_lock();
+
+	sta = sta_info_get(local, ifsta->bssid);
+	if (!sta) {
+		rcu_read_unlock();
+		return;
+	}
+
+	if (deauth) {
+		ifsta->direct_probe_tries = 0;
+		ifsta->auth_tries = 0;
+	}
+	ifsta->assoc_scan_tries = 0;
+	ifsta->assoc_tries = 0;
+
+	netif_tx_stop_all_queues(sdata->dev);
+	netif_carrier_off(sdata->dev);
+
+	ieee80211_sta_tear_down_BA_sessions(sdata, sta->sta.addr);
+
+	if (self_disconnected) {
+		if (deauth)
+			ieee80211_send_deauth_disassoc(sdata,
+				IEEE80211_STYPE_DEAUTH, reason);
+		else
+			ieee80211_send_deauth_disassoc(sdata,
+				IEEE80211_STYPE_DISASSOC, reason);
+	}
+
+	ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
+	changed |= ieee80211_reset_erp_info(sdata);
+
+	if (sdata->bss_conf.assoc_ht)
+		changed |= BSS_CHANGED_HT;
+
+	sdata->bss_conf.assoc_ht = 0;
+	sdata->bss_conf.ht_conf = NULL;
+	sdata->bss_conf.ht_bss_conf = NULL;
+
+	ieee80211_led_assoc(local, 0);
+	sdata->bss_conf.assoc = 0;
+
+	ieee80211_sta_send_apinfo(sdata, ifsta);
+
+	if (self_disconnected)
+		ifsta->state = IEEE80211_STA_MLME_DISABLED;
+
+	sta_info_unlink(&sta);
+
+	rcu_read_unlock();
+
+	sta_info_destroy(sta);
+}
+
+static int ieee80211_sta_wep_configured(struct ieee80211_sub_if_data *sdata)
+{
+	if (!sdata || !sdata->default_key ||
+	    sdata->default_key->conf.alg != ALG_WEP)
+		return 0;
+	return 1;
+}
+
+static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata,
 				      struct ieee80211_if_sta *ifsta)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sta_bss *bss;
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_bss *bss;
 	int bss_privacy;
 	int wep_privacy;
 	int privacy_invoked;
@@ -981,14 +860,14 @@
 	if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL))
 		return 0;
 
-	bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
+	bss = ieee80211_rx_bss_get(local, ifsta->bssid,
 				   local->hw.conf.channel->center_freq,
 				   ifsta->ssid, ifsta->ssid_len);
 	if (!bss)
 		return 0;
 
 	bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY);
-	wep_privacy = !!ieee80211_sta_wep_configured(dev);
+	wep_privacy = !!ieee80211_sta_wep_configured(sdata);
 	privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED);
 
 	ieee80211_rx_bss_put(local, bss);
@@ -999,8 +878,7 @@
 	return 1;
 }
 
-
-static void ieee80211_associate(struct net_device *dev,
+static void ieee80211_associate(struct ieee80211_sub_if_data *sdata,
 				struct ieee80211_if_sta *ifsta)
 {
 	DECLARE_MAC_BUF(mac);
@@ -1009,31 +887,31 @@
 	if (ifsta->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) {
 		printk(KERN_DEBUG "%s: association with AP %s"
 		       " timed out\n",
-		       dev->name, print_mac(mac, ifsta->bssid));
-		ifsta->state = IEEE80211_DISABLED;
+		       sdata->dev->name, print_mac(mac, ifsta->bssid));
+		ifsta->state = IEEE80211_STA_MLME_DISABLED;
 		return;
 	}
 
-	ifsta->state = IEEE80211_ASSOCIATE;
+	ifsta->state = IEEE80211_STA_MLME_ASSOCIATE;
 	printk(KERN_DEBUG "%s: associate with AP %s\n",
-	       dev->name, print_mac(mac, ifsta->bssid));
-	if (ieee80211_privacy_mismatch(dev, ifsta)) {
+	       sdata->dev->name, print_mac(mac, ifsta->bssid));
+	if (ieee80211_privacy_mismatch(sdata, ifsta)) {
 		printk(KERN_DEBUG "%s: mismatch in privacy configuration and "
-		       "mixed-cell disabled - abort association\n", dev->name);
-		ifsta->state = IEEE80211_DISABLED;
+		       "mixed-cell disabled - abort association\n", sdata->dev->name);
+		ifsta->state = IEEE80211_STA_MLME_DISABLED;
 		return;
 	}
 
-	ieee80211_send_assoc(dev, ifsta);
+	ieee80211_send_assoc(sdata, ifsta);
 
 	mod_timer(&ifsta->timer, jiffies + IEEE80211_ASSOC_TIMEOUT);
 }
 
 
-static void ieee80211_associated(struct net_device *dev,
+static void ieee80211_associated(struct ieee80211_sub_if_data *sdata,
 				 struct ieee80211_if_sta *ifsta)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
 	int disassoc;
 	DECLARE_MAC_BUF(mac);
@@ -1043,14 +921,14 @@
 	 * for better APs. */
 	/* TODO: remove expired BSSes */
 
-	ifsta->state = IEEE80211_ASSOCIATED;
+	ifsta->state = IEEE80211_STA_MLME_ASSOCIATED;
 
 	rcu_read_lock();
 
 	sta = sta_info_get(local, ifsta->bssid);
 	if (!sta) {
 		printk(KERN_DEBUG "%s: No STA entry for own AP %s\n",
-		       dev->name, print_mac(mac, ifsta->bssid));
+		       sdata->dev->name, print_mac(mac, ifsta->bssid));
 		disassoc = 1;
 	} else {
 		disassoc = 0;
@@ -1060,20 +938,19 @@
 				printk(KERN_DEBUG "%s: No ProbeResp from "
 				       "current AP %s - assume out of "
 				       "range\n",
-				       dev->name, print_mac(mac, ifsta->bssid));
+				       sdata->dev->name, print_mac(mac, ifsta->bssid));
 				disassoc = 1;
-				sta_info_unlink(&sta);
 			} else
-				ieee80211_send_probe_req(dev, ifsta->bssid,
-							 local->scan_ssid,
-							 local->scan_ssid_len);
+				ieee80211_send_probe_req(sdata, ifsta->bssid,
+							 ifsta->ssid,
+							 ifsta->ssid_len);
 			ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL;
 		} else {
 			ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
 			if (time_after(jiffies, ifsta->last_probe +
 				       IEEE80211_PROBE_INTERVAL)) {
 				ifsta->last_probe = jiffies;
-				ieee80211_send_probe_req(dev, ifsta->bssid,
+				ieee80211_send_probe_req(sdata, ifsta->bssid,
 							 ifsta->ssid,
 							 ifsta->ssid_len);
 			}
@@ -1082,100 +959,25 @@
 
 	rcu_read_unlock();
 
-	if (disassoc && sta)
-		sta_info_destroy(sta);
-
-	if (disassoc) {
-		ifsta->state = IEEE80211_DISABLED;
-		ieee80211_set_associated(dev, ifsta, 0);
-	} else {
+	if (disassoc)
+		ieee80211_set_disassoc(sdata, ifsta, true, true,
+					WLAN_REASON_PREV_AUTH_NOT_VALID);
+	else
 		mod_timer(&ifsta->timer, jiffies +
 				      IEEE80211_MONITORING_INTERVAL);
-	}
 }
 
 
-static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
-				     u8 *ssid, size_t ssid_len)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_supported_band *sband;
-	struct sk_buff *skb;
-	struct ieee80211_mgmt *mgmt;
-	u8 *pos, *supp_rates, *esupp_rates = NULL;
-	int i;
-
-	skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200);
-	if (!skb) {
-		printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
-		       "request\n", dev->name);
-		return;
-	}
-	skb_reserve(skb, local->hw.extra_tx_headroom);
-
-	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
-	memset(mgmt, 0, 24);
-	mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-					   IEEE80211_STYPE_PROBE_REQ);
-	memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
-	if (dst) {
-		memcpy(mgmt->da, dst, ETH_ALEN);
-		memcpy(mgmt->bssid, dst, ETH_ALEN);
-	} else {
-		memset(mgmt->da, 0xff, ETH_ALEN);
-		memset(mgmt->bssid, 0xff, ETH_ALEN);
-	}
-	pos = skb_put(skb, 2 + ssid_len);
-	*pos++ = WLAN_EID_SSID;
-	*pos++ = ssid_len;
-	memcpy(pos, ssid, ssid_len);
-
-	supp_rates = skb_put(skb, 2);
-	supp_rates[0] = WLAN_EID_SUPP_RATES;
-	supp_rates[1] = 0;
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-
-	for (i = 0; i < sband->n_bitrates; i++) {
-		struct ieee80211_rate *rate = &sband->bitrates[i];
-		if (esupp_rates) {
-			pos = skb_put(skb, 1);
-			esupp_rates[1]++;
-		} else if (supp_rates[1] == 8) {
-			esupp_rates = skb_put(skb, 3);
-			esupp_rates[0] = WLAN_EID_EXT_SUPP_RATES;
-			esupp_rates[1] = 1;
-			pos = &esupp_rates[2];
-		} else {
-			pos = skb_put(skb, 1);
-			supp_rates[1]++;
-		}
-		*pos = rate->bitrate / 5;
-	}
-
-	ieee80211_sta_tx(dev, skb, 0);
-}
-
-
-static int ieee80211_sta_wep_configured(struct net_device *dev)
-{
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	if (!sdata || !sdata->default_key ||
-	    sdata->default_key->conf.alg != ALG_WEP)
-		return 0;
-	return 1;
-}
-
-
-static void ieee80211_auth_completed(struct net_device *dev,
+static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata,
 				     struct ieee80211_if_sta *ifsta)
 {
-	printk(KERN_DEBUG "%s: authenticated\n", dev->name);
+	printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
 	ifsta->flags |= IEEE80211_STA_AUTHENTICATED;
-	ieee80211_associate(dev, ifsta);
+	ieee80211_associate(sdata, ifsta);
 }
 
 
-static void ieee80211_auth_challenge(struct net_device *dev,
+static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
 				     struct ieee80211_if_sta *ifsta,
 				     struct ieee80211_mgmt *mgmt,
 				     size_t len)
@@ -1187,682 +989,30 @@
 	ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
 	if (!elems.challenge)
 		return;
-	ieee80211_send_auth(dev, ifsta, 3, elems.challenge - 2,
+	ieee80211_send_auth(sdata, ifsta, 3, elems.challenge - 2,
 			    elems.challenge_len + 2, 1);
 }
 
-static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid,
-					u8 dialog_token, u16 status, u16 policy,
-					u16 buf_size, u16 timeout)
-{
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct sk_buff *skb;
-	struct ieee80211_mgmt *mgmt;
-	u16 capab;
-
-	skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
-
-	if (!skb) {
-		printk(KERN_DEBUG "%s: failed to allocate buffer "
-		       "for addba resp frame\n", dev->name);
-		return;
-	}
-
-	skb_reserve(skb, local->hw.extra_tx_headroom);
-	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
-	memset(mgmt, 0, 24);
-	memcpy(mgmt->da, da, ETH_ALEN);
-	memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
-	if (sdata->vif.type == IEEE80211_IF_TYPE_AP)
-		memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN);
-	else
-		memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
-	mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-					   IEEE80211_STYPE_ACTION);
-
-	skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp));
-	mgmt->u.action.category = WLAN_CATEGORY_BACK;
-	mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP;
-	mgmt->u.action.u.addba_resp.dialog_token = dialog_token;
-
-	capab = (u16)(policy << 1);	/* bit 1 aggregation policy */
-	capab |= (u16)(tid << 2); 	/* bit 5:2 TID number */
-	capab |= (u16)(buf_size << 6);	/* bit 15:6 max size of aggregation */
-
-	mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab);
-	mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
-	mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
-
-	ieee80211_sta_tx(dev, skb, 0);
-
-	return;
-}
-
-void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
-				u16 tid, u8 dialog_token, u16 start_seq_num,
-				u16 agg_size, u16 timeout)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
-	struct sk_buff *skb;
-	struct ieee80211_mgmt *mgmt;
-	u16 capab;
-
-	skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
-
-	if (!skb) {
-		printk(KERN_ERR "%s: failed to allocate buffer "
-				"for addba request frame\n", dev->name);
-		return;
-	}
-	skb_reserve(skb, local->hw.extra_tx_headroom);
-	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
-	memset(mgmt, 0, 24);
-	memcpy(mgmt->da, da, ETH_ALEN);
-	memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
-	if (sdata->vif.type == IEEE80211_IF_TYPE_AP)
-		memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN);
-	else
-		memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
-
-	mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-					IEEE80211_STYPE_ACTION);
-
-	skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
-
-	mgmt->u.action.category = WLAN_CATEGORY_BACK;
-	mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
-
-	mgmt->u.action.u.addba_req.dialog_token = dialog_token;
-	capab = (u16)(1 << 1);		/* bit 1 aggregation policy */
-	capab |= (u16)(tid << 2); 	/* bit 5:2 TID number */
-	capab |= (u16)(agg_size << 6);	/* bit 15:6 max size of aggergation */
-
-	mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
-
-	mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
-	mgmt->u.action.u.addba_req.start_seq_num =
-					cpu_to_le16(start_seq_num << 4);
-
-	ieee80211_sta_tx(dev, skb, 0);
-}
-
-static void ieee80211_sta_process_addba_request(struct net_device *dev,
-						struct ieee80211_mgmt *mgmt,
-						size_t len)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_hw *hw = &local->hw;
-	struct ieee80211_conf *conf = &hw->conf;
-	struct sta_info *sta;
-	struct tid_ampdu_rx *tid_agg_rx;
-	u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
-	u8 dialog_token;
-	int ret = -EOPNOTSUPP;
-	DECLARE_MAC_BUF(mac);
-
-	rcu_read_lock();
-
-	sta = sta_info_get(local, mgmt->sa);
-	if (!sta) {
-		rcu_read_unlock();
-		return;
-	}
-
-	/* extract session parameters from addba request frame */
-	dialog_token = mgmt->u.action.u.addba_req.dialog_token;
-	timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
-	start_seq_num =
-		le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
-
-	capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
-	ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
-	tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
-	buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
-
-	status = WLAN_STATUS_REQUEST_DECLINED;
-
-	/* sanity check for incoming parameters:
-	 * check if configuration can support the BA policy
-	 * and if buffer size does not exceeds max value */
-	if (((ba_policy != 1)
-		&& (!(conf->ht_conf.cap & IEEE80211_HT_CAP_DELAY_BA)))
-		|| (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
-		status = WLAN_STATUS_INVALID_QOS_PARAM;
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		if (net_ratelimit())
-			printk(KERN_DEBUG "AddBA Req with bad params from "
-				"%s on tid %u. policy %d, buffer size %d\n",
-				print_mac(mac, mgmt->sa), tid, ba_policy,
-				buf_size);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-		goto end_no_lock;
-	}
-	/* determine default buffer size */
-	if (buf_size == 0) {
-		struct ieee80211_supported_band *sband;
-
-		sband = local->hw.wiphy->bands[conf->channel->band];
-		buf_size = IEEE80211_MIN_AMPDU_BUF;
-		buf_size = buf_size << sband->ht_info.ampdu_factor;
-	}
-
-
-	/* examine state machine */
-	spin_lock_bh(&sta->lock);
-
-	if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		if (net_ratelimit())
-			printk(KERN_DEBUG "unexpected AddBA Req from "
-				"%s on tid %u\n",
-				print_mac(mac, mgmt->sa), tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-		goto end;
-	}
-
-	/* prepare A-MPDU MLME for Rx aggregation */
-	sta->ampdu_mlme.tid_rx[tid] =
-			kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
-	if (!sta->ampdu_mlme.tid_rx[tid]) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		if (net_ratelimit())
-			printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
-					tid);
-#endif
-		goto end;
-	}
-	/* rx timer */
-	sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
-				sta_rx_agg_session_timer_expired;
-	sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
-				(unsigned long)&sta->timer_to_tid[tid];
-	init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
-
-	tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
-
-	/* prepare reordering buffer */
-	tid_agg_rx->reorder_buf =
-		kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC);
-	if (!tid_agg_rx->reorder_buf) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		if (net_ratelimit())
-			printk(KERN_ERR "can not allocate reordering buffer "
-			       "to tid %d\n", tid);
-#endif
-		kfree(sta->ampdu_mlme.tid_rx[tid]);
-		goto end;
-	}
-	memset(tid_agg_rx->reorder_buf, 0,
-		buf_size * sizeof(struct sk_buff *));
-
-	if (local->ops->ampdu_action)
-		ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START,
-					       sta->addr, tid, &start_seq_num);
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-
-	if (ret) {
-		kfree(tid_agg_rx->reorder_buf);
-		kfree(tid_agg_rx);
-		sta->ampdu_mlme.tid_rx[tid] = NULL;
-		goto end;
-	}
-
-	/* change state and send addba resp */
-	sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
-	tid_agg_rx->dialog_token = dialog_token;
-	tid_agg_rx->ssn = start_seq_num;
-	tid_agg_rx->head_seq_num = start_seq_num;
-	tid_agg_rx->buf_size = buf_size;
-	tid_agg_rx->timeout = timeout;
-	tid_agg_rx->stored_mpdu_num = 0;
-	status = WLAN_STATUS_SUCCESS;
-end:
-	spin_unlock_bh(&sta->lock);
-
-end_no_lock:
-	ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid,
-				  dialog_token, status, 1, buf_size, timeout);
-	rcu_read_unlock();
-}
-
-static void ieee80211_sta_process_addba_resp(struct net_device *dev,
-					     struct ieee80211_mgmt *mgmt,
-					     size_t len)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_hw *hw = &local->hw;
-	struct sta_info *sta;
-	u16 capab;
-	u16 tid;
-	u8 *state;
-
-	rcu_read_lock();
-
-	sta = sta_info_get(local, mgmt->sa);
-	if (!sta) {
-		rcu_read_unlock();
-		return;
-	}
-
-	capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
-	tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
-
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
-
-	spin_lock_bh(&sta->lock);
-
-	if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
-		spin_unlock_bh(&sta->lock);
-		goto addba_resp_exit;
-	}
-
-	if (mgmt->u.action.u.addba_resp.dialog_token !=
-		sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
-		spin_unlock_bh(&sta->lock);
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-		goto addba_resp_exit;
-	}
-
-	del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-	if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
-			== WLAN_STATUS_SUCCESS) {
-		*state |= HT_ADDBA_RECEIVED_MSK;
-		sta->ampdu_mlme.addba_req_num[tid] = 0;
-
-		if (*state == HT_AGG_STATE_OPERATIONAL)
-			ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
-
-		spin_unlock_bh(&sta->lock);
-	} else {
-		sta->ampdu_mlme.addba_req_num[tid]++;
-		/* this will allow the state check in stop_BA_session */
-		*state = HT_AGG_STATE_OPERATIONAL;
-		spin_unlock_bh(&sta->lock);
-		ieee80211_stop_tx_ba_session(hw, sta->addr, tid,
-					     WLAN_BACK_INITIATOR);
-	}
-
-addba_resp_exit:
-	rcu_read_unlock();
-}
-
-void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
-			  u16 initiator, u16 reason_code)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
-	struct sk_buff *skb;
-	struct ieee80211_mgmt *mgmt;
-	u16 params;
-
-	skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
-
-	if (!skb) {
-		printk(KERN_ERR "%s: failed to allocate buffer "
-					"for delba frame\n", dev->name);
-		return;
-	}
-
-	skb_reserve(skb, local->hw.extra_tx_headroom);
-	mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
-	memset(mgmt, 0, 24);
-	memcpy(mgmt->da, da, ETH_ALEN);
-	memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
-	if (sdata->vif.type == IEEE80211_IF_TYPE_AP)
-		memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN);
-	else
-		memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
-	mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-					IEEE80211_STYPE_ACTION);
-
-	skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba));
-
-	mgmt->u.action.category = WLAN_CATEGORY_BACK;
-	mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
-	params = (u16)(initiator << 11); 	/* bit 11 initiator */
-	params |= (u16)(tid << 12); 		/* bit 15:12 TID number */
-
-	mgmt->u.action.u.delba.params = cpu_to_le16(params);
-	mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
-
-	ieee80211_sta_tx(dev, skb, 0);
-}
-
-void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct sk_buff *skb;
-	struct ieee80211_bar *bar;
-	u16 bar_control = 0;
-
-	skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
-	if (!skb) {
-		printk(KERN_ERR "%s: failed to allocate buffer for "
-			"bar frame\n", dev->name);
-		return;
-	}
-	skb_reserve(skb, local->hw.extra_tx_headroom);
-	bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
-	memset(bar, 0, sizeof(*bar));
-	bar->frame_control = IEEE80211_FC(IEEE80211_FTYPE_CTL,
-					IEEE80211_STYPE_BACK_REQ);
-	memcpy(bar->ra, ra, ETH_ALEN);
-	memcpy(bar->ta, dev->dev_addr, ETH_ALEN);
-	bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
-	bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
-	bar_control |= (u16)(tid << 12);
-	bar->control = cpu_to_le16(bar_control);
-	bar->start_seq_num = cpu_to_le16(ssn);
-
-	ieee80211_sta_tx(dev, skb, 0);
-}
-
-void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
-					u16 initiator, u16 reason)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_hw *hw = &local->hw;
-	struct sta_info *sta;
-	int ret, i;
-	DECLARE_MAC_BUF(mac);
-
-	rcu_read_lock();
-
-	sta = sta_info_get(local, ra);
-	if (!sta) {
-		rcu_read_unlock();
-		return;
-	}
-
-	/* check if TID is in operational state */
-	spin_lock_bh(&sta->lock);
-	if (sta->ampdu_mlme.tid_state_rx[tid]
-				!= HT_AGG_STATE_OPERATIONAL) {
-		spin_unlock_bh(&sta->lock);
-		rcu_read_unlock();
-		return;
-	}
-	sta->ampdu_mlme.tid_state_rx[tid] =
-		HT_AGG_STATE_REQ_STOP_BA_MSK |
-		(initiator << HT_AGG_STATE_INITIATOR_SHIFT);
-	spin_unlock_bh(&sta->lock);
-
-	/* stop HW Rx aggregation. ampdu_action existence
-	 * already verified in session init so we add the BUG_ON */
-	BUG_ON(!local->ops->ampdu_action);
-
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "Rx BA session stop requested for %s tid %u\n",
-				print_mac(mac, ra), tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-
-	ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP,
-					ra, tid, NULL);
-	if (ret)
-		printk(KERN_DEBUG "HW problem - can not stop rx "
-				"aggregation for tid %d\n", tid);
-
-	/* shutdown timer has not expired */
-	if (initiator != WLAN_BACK_TIMER)
-		del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
-
-	/* check if this is a self generated aggregation halt */
-	if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
-		ieee80211_send_delba(dev, ra, tid, 0, reason);
-
-	/* free the reordering buffer */
-	for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
-		if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
-			/* release the reordered frames */
-			dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
-			sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
-			sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
-		}
-	}
-	/* free resources */
-	kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
-	kfree(sta->ampdu_mlme.tid_rx[tid]);
-	sta->ampdu_mlme.tid_rx[tid] = NULL;
-	sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
-
-	rcu_read_unlock();
-}
-
-
-static void ieee80211_sta_process_delba(struct net_device *dev,
-			struct ieee80211_mgmt *mgmt, size_t len)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct sta_info *sta;
-	u16 tid, params;
-	u16 initiator;
-	DECLARE_MAC_BUF(mac);
-
-	rcu_read_lock();
-
-	sta = sta_info_get(local, mgmt->sa);
-	if (!sta) {
-		rcu_read_unlock();
-		return;
-	}
-
-	params = le16_to_cpu(mgmt->u.action.u.delba.params);
-	tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
-	initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
-
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	if (net_ratelimit())
-		printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n",
-			print_mac(mac, mgmt->sa),
-			initiator ? "initiator" : "recipient", tid,
-			mgmt->u.action.u.delba.reason_code);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-
-	if (initiator == WLAN_BACK_INITIATOR)
-		ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid,
-						 WLAN_BACK_INITIATOR, 0);
-	else { /* WLAN_BACK_RECIPIENT */
-		spin_lock_bh(&sta->lock);
-		sta->ampdu_mlme.tid_state_tx[tid] =
-				HT_AGG_STATE_OPERATIONAL;
-		spin_unlock_bh(&sta->lock);
-		ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid,
-					     WLAN_BACK_RECIPIENT);
-	}
-	rcu_read_unlock();
-}
-
-/*
- * After sending add Block Ack request we activated a timer until
- * add Block Ack response will arrive from the recipient.
- * If this timer expires sta_addba_resp_timer_expired will be executed.
- */
-void sta_addba_resp_timer_expired(unsigned long data)
-{
-	/* not an elegant detour, but there is no choice as the timer passes
-	 * only one argument, and both sta_info and TID are needed, so init
-	 * flow in sta_info_create gives the TID as data, while the timer_to_id
-	 * array gives the sta through container_of */
-	u16 tid = *(u8 *)data;
-	struct sta_info *temp_sta = container_of((void *)data,
-		struct sta_info, timer_to_tid[tid]);
-
-	struct ieee80211_local *local = temp_sta->local;
-	struct ieee80211_hw *hw = &local->hw;
-	struct sta_info *sta;
-	u8 *state;
-
-	rcu_read_lock();
-
-	sta = sta_info_get(local, temp_sta->addr);
-	if (!sta) {
-		rcu_read_unlock();
-		return;
-	}
-
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
-	/* check if the TID waits for addBA response */
-	spin_lock_bh(&sta->lock);
-	if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
-		spin_unlock_bh(&sta->lock);
-		*state = HT_AGG_STATE_IDLE;
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "timer expired on tid %d but we are not "
-				"expecting addBA response there", tid);
-#endif
-		goto timer_expired_exit;
-	}
-
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
-#endif
-
-	/* go through the state check in stop_BA_session */
-	*state = HT_AGG_STATE_OPERATIONAL;
-	spin_unlock_bh(&sta->lock);
-	ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid,
-				     WLAN_BACK_INITIATOR);
-
-timer_expired_exit:
-	rcu_read_unlock();
-}
-
-/*
- * After accepting the AddBA Request we activated a timer,
- * resetting it after each frame that arrives from the originator.
- * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
- */
-static void sta_rx_agg_session_timer_expired(unsigned long data)
-{
-	/* not an elegant detour, but there is no choice as the timer passes
-	 * only one argument, and various sta_info are needed here, so init
-	 * flow in sta_info_create gives the TID as data, while the timer_to_id
-	 * array gives the sta through container_of */
-	u8 *ptid = (u8 *)data;
-	u8 *timer_to_id = ptid - *ptid;
-	struct sta_info *sta = container_of(timer_to_id, struct sta_info,
-					 timer_to_tid[0]);
-
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
-#endif
-	ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr,
-					 (u16)*ptid, WLAN_BACK_TIMER,
-					 WLAN_REASON_QSTA_TIMEOUT);
-}
-
-void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	int i;
-
-	for (i = 0; i <  STA_TID_NUM; i++) {
-		ieee80211_stop_tx_ba_session(&local->hw, addr, i,
-					     WLAN_BACK_INITIATOR);
-		ieee80211_sta_stop_rx_ba_session(dev, addr, i,
-						 WLAN_BACK_RECIPIENT,
-						 WLAN_REASON_QSTA_LEAVE_QBSS);
-	}
-}
-
-static void ieee80211_send_refuse_measurement_request(struct net_device *dev,
-					struct ieee80211_msrment_ie *request_ie,
-					const u8 *da, const u8 *bssid,
-					u8 dialog_token)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct sk_buff *skb;
-	struct ieee80211_mgmt *msr_report;
-
-	skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
-				sizeof(struct ieee80211_msrment_ie));
-
-	if (!skb) {
-		printk(KERN_ERR "%s: failed to allocate buffer for "
-				"measurement report frame\n", dev->name);
-		return;
-	}
-
-	skb_reserve(skb, local->hw.extra_tx_headroom);
-	msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
-	memset(msr_report, 0, 24);
-	memcpy(msr_report->da, da, ETH_ALEN);
-	memcpy(msr_report->sa, dev->dev_addr, ETH_ALEN);
-	memcpy(msr_report->bssid, bssid, ETH_ALEN);
-	msr_report->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-						IEEE80211_STYPE_ACTION);
-
-	skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
-	msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
-	msr_report->u.action.u.measurement.action_code =
-				WLAN_ACTION_SPCT_MSR_RPRT;
-	msr_report->u.action.u.measurement.dialog_token = dialog_token;
-
-	msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT;
-	msr_report->u.action.u.measurement.length =
-			sizeof(struct ieee80211_msrment_ie);
-
-	memset(&msr_report->u.action.u.measurement.msr_elem, 0,
-		sizeof(struct ieee80211_msrment_ie));
-	msr_report->u.action.u.measurement.msr_elem.token = request_ie->token;
-	msr_report->u.action.u.measurement.msr_elem.mode |=
-			IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
-	msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
-
-	ieee80211_sta_tx(dev, skb, 0);
-}
-
-static void ieee80211_sta_process_measurement_req(struct net_device *dev,
-						struct ieee80211_mgmt *mgmt,
-						size_t len)
-{
-	/*
-	 * Ignoring measurement request is spec violation.
-	 * Mandatory measurements must be reported optional
-	 * measurements might be refused or reported incapable
-	 * For now just refuse
-	 * TODO: Answer basic measurement as unmeasured
-	 */
-	ieee80211_send_refuse_measurement_request(dev,
-			&mgmt->u.action.u.measurement.msr_elem,
-			mgmt->sa, mgmt->bssid,
-			mgmt->u.action.u.measurement.dialog_token);
-}
-
-
-static void ieee80211_rx_mgmt_auth(struct net_device *dev,
+static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
 				   struct ieee80211_if_sta *ifsta,
 				   struct ieee80211_mgmt *mgmt,
 				   size_t len)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	u16 auth_alg, auth_transaction, status_code;
 	DECLARE_MAC_BUF(mac);
 
-	if (ifsta->state != IEEE80211_AUTHENTICATE &&
-	    sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
+	if (ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE &&
+	    sdata->vif.type != NL80211_IFTYPE_ADHOC)
 		return;
 
 	if (len < 24 + 6)
 		return;
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
+	if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
 	    memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0)
 		return;
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
+	if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
 	    memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
 		return;
 
@@ -1870,7 +1020,7 @@
 	auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
 	status_code = le16_to_cpu(mgmt->u.auth.status_code);
 
-	if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
+	if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
 		/*
 		 * IEEE 802.11 standard does not require authentication in IBSS
 		 * networks and most implementations do not seem to use it.
@@ -1879,7 +1029,7 @@
 		 */
 		if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
 			return;
-		ieee80211_send_auth(dev, ifsta, 2, NULL, 0, 0);
+		ieee80211_send_auth(sdata, ifsta, 2, NULL, 0, 0);
 	}
 
 	if (auth_alg != ifsta->auth_alg ||
@@ -1912,7 +1062,7 @@
 				    algs[pos] == 0xff)
 					continue;
 				if (algs[pos] == WLAN_AUTH_SHARED_KEY &&
-				    !ieee80211_sta_wep_configured(dev))
+				    !ieee80211_sta_wep_configured(sdata))
 					continue;
 				ifsta->auth_alg = algs[pos];
 				break;
@@ -1924,19 +1074,19 @@
 	switch (ifsta->auth_alg) {
 	case WLAN_AUTH_OPEN:
 	case WLAN_AUTH_LEAP:
-		ieee80211_auth_completed(dev, ifsta);
+		ieee80211_auth_completed(sdata, ifsta);
 		break;
 	case WLAN_AUTH_SHARED_KEY:
 		if (ifsta->auth_transaction == 4)
-			ieee80211_auth_completed(dev, ifsta);
+			ieee80211_auth_completed(sdata, ifsta);
 		else
-			ieee80211_auth_challenge(dev, ifsta, mgmt, len);
+			ieee80211_auth_challenge(sdata, ifsta, mgmt, len);
 		break;
 	}
 }
 
 
-static void ieee80211_rx_mgmt_deauth(struct net_device *dev,
+static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
 				     struct ieee80211_if_sta *ifsta,
 				     struct ieee80211_mgmt *mgmt,
 				     size_t len)
@@ -1953,22 +1103,22 @@
 	reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
 
 	if (ifsta->flags & IEEE80211_STA_AUTHENTICATED)
-		printk(KERN_DEBUG "%s: deauthenticated\n", dev->name);
+		printk(KERN_DEBUG "%s: deauthenticated\n", sdata->dev->name);
 
-	if (ifsta->state == IEEE80211_AUTHENTICATE ||
-	    ifsta->state == IEEE80211_ASSOCIATE ||
-	    ifsta->state == IEEE80211_ASSOCIATED) {
-		ifsta->state = IEEE80211_AUTHENTICATE;
+	if (ifsta->state == IEEE80211_STA_MLME_AUTHENTICATE ||
+	    ifsta->state == IEEE80211_STA_MLME_ASSOCIATE ||
+	    ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) {
+		ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
 		mod_timer(&ifsta->timer, jiffies +
 				      IEEE80211_RETRY_AUTH_INTERVAL);
 	}
 
-	ieee80211_set_disassoc(dev, ifsta, 1);
+	ieee80211_set_disassoc(sdata, ifsta, true, false, 0);
 	ifsta->flags &= ~IEEE80211_STA_AUTHENTICATED;
 }
 
 
-static void ieee80211_rx_mgmt_disassoc(struct net_device *dev,
+static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
 				       struct ieee80211_if_sta *ifsta,
 				       struct ieee80211_mgmt *mgmt,
 				       size_t len)
@@ -1985,15 +1135,15 @@
 	reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
 
 	if (ifsta->flags & IEEE80211_STA_ASSOCIATED)
-		printk(KERN_DEBUG "%s: disassociated\n", dev->name);
+		printk(KERN_DEBUG "%s: disassociated\n", sdata->dev->name);
 
-	if (ifsta->state == IEEE80211_ASSOCIATED) {
-		ifsta->state = IEEE80211_ASSOCIATE;
+	if (ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) {
+		ifsta->state = IEEE80211_STA_MLME_ASSOCIATE;
 		mod_timer(&ifsta->timer, jiffies +
 				      IEEE80211_RETRY_AUTH_INTERVAL);
 	}
 
-	ieee80211_set_disassoc(dev, ifsta, 0);
+	ieee80211_set_disassoc(sdata, ifsta, false, false, 0);
 }
 
 
@@ -2004,7 +1154,6 @@
 					 int reassoc)
 {
 	struct ieee80211_local *local = sdata->local;
-	struct net_device *dev = sdata->dev;
 	struct ieee80211_supported_band *sband;
 	struct sta_info *sta;
 	u64 rates, basic_rates;
@@ -2019,7 +1168,7 @@
 	/* AssocResp and ReassocResp have identical structure, so process both
 	 * of them in this function. */
 
-	if (ifsta->state != IEEE80211_ASSOCIATE)
+	if (ifsta->state != IEEE80211_STA_MLME_ASSOCIATE)
 		return;
 
 	if (len < 24 + 6)
@@ -2034,12 +1183,12 @@
 
 	printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x "
 	       "status=%d aid=%d)\n",
-	       dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa),
+	       sdata->dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa),
 	       capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
 
 	if (status_code != WLAN_STATUS_SUCCESS) {
 		printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
-		       dev->name, status_code);
+		       sdata->dev->name, status_code);
 		/* if this was a reassociation, ensure we try a "full"
 		 * association next time. This works around some broken APs
 		 * which do not correctly reject reassociation requests. */
@@ -2049,7 +1198,7 @@
 
 	if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
 		printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
-		       "set\n", dev->name, aid);
+		       "set\n", sdata->dev->name, aid);
 	aid &= ~(BIT(15) | BIT(14));
 
 	pos = mgmt->u.assoc_resp.variable;
@@ -2057,11 +1206,11 @@
 
 	if (!elems.supp_rates) {
 		printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
-		       dev->name);
+		       sdata->dev->name);
 		return;
 	}
 
-	printk(KERN_DEBUG "%s: associated\n", dev->name);
+	printk(KERN_DEBUG "%s: associated\n", sdata->dev->name);
 	ifsta->aid = aid;
 	ifsta->ap_capab = capab_info;
 
@@ -2076,17 +1225,17 @@
 	/* Add STA entry for the AP */
 	sta = sta_info_get(local, ifsta->bssid);
 	if (!sta) {
-		struct ieee80211_sta_bss *bss;
+		struct ieee80211_bss *bss;
 		int err;
 
 		sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC);
 		if (!sta) {
 			printk(KERN_DEBUG "%s: failed to alloc STA entry for"
-			       " the AP\n", dev->name);
+			       " the AP\n", sdata->dev->name);
 			rcu_read_unlock();
 			return;
 		}
-		bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
+		bss = ieee80211_rx_bss_get(local, ifsta->bssid,
 					   local->hw.conf.channel->center_freq,
 					   ifsta->ssid, ifsta->ssid_len);
 		if (bss) {
@@ -2099,7 +1248,7 @@
 		err = sta_info_insert(sta);
 		if (err) {
 			printk(KERN_DEBUG "%s: failed to insert STA entry for"
-			       " the AP (error %d)\n", dev->name, err);
+			       " the AP (error %d)\n", sdata->dev->name, err);
 			rcu_read_unlock();
 			return;
 		}
@@ -2152,8 +1301,8 @@
 		}
 	}
 
-	sta->supp_rates[local->hw.conf.channel->band] = rates;
-	sdata->basic_rates = basic_rates;
+	sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
+	sdata->bss_conf.basic_rates = basic_rates;
 
 	/* cf. IEEE 802.11 9.2.12 */
 	if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
@@ -2167,19 +1316,19 @@
 		struct ieee80211_ht_bss_info bss_info;
 		ieee80211_ht_cap_ie_to_ht_info(
 				(struct ieee80211_ht_cap *)
-				elems.ht_cap_elem, &sta->ht_info);
+				elems.ht_cap_elem, &sta->sta.ht_info);
 		ieee80211_ht_addt_info_ie_to_ht_bss_info(
 				(struct ieee80211_ht_addt_info *)
 				elems.ht_info_elem, &bss_info);
-		ieee80211_handle_ht(local, 1, &sta->ht_info, &bss_info);
+		ieee80211_handle_ht(local, 1, &sta->sta.ht_info, &bss_info);
 	}
 
-	rate_control_rate_init(sta, local);
+	rate_control_rate_init(sta);
 
 	if (elems.wmm_param) {
 		set_sta_flags(sta, WLAN_STA_WME);
 		rcu_read_unlock();
-		ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
+		ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param,
 					 elems.wmm_param_len);
 	} else
 		rcu_read_unlock();
@@ -2188,234 +1337,26 @@
 	 * ieee80211_set_associated() will tell the driver */
 	bss_conf->aid = aid;
 	bss_conf->assoc_capability = capab_info;
-	ieee80211_set_associated(dev, ifsta, 1);
+	ieee80211_set_associated(sdata, ifsta);
 
-	ieee80211_associated(dev, ifsta);
+	ieee80211_associated(sdata, ifsta);
 }
 
 
-/* Caller must hold local->sta_bss_lock */
-static void __ieee80211_rx_bss_hash_add(struct net_device *dev,
-					struct ieee80211_sta_bss *bss)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	u8 hash_idx;
-
-	if (bss_mesh_cfg(bss))
-		hash_idx = mesh_id_hash(bss_mesh_id(bss),
-					bss_mesh_id_len(bss));
-	else
-		hash_idx = STA_HASH(bss->bssid);
-
-	bss->hnext = local->sta_bss_hash[hash_idx];
-	local->sta_bss_hash[hash_idx] = bss;
-}
-
-
-/* Caller must hold local->sta_bss_lock */
-static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local,
-					struct ieee80211_sta_bss *bss)
-{
-	struct ieee80211_sta_bss *b, *prev = NULL;
-	b = local->sta_bss_hash[STA_HASH(bss->bssid)];
-	while (b) {
-		if (b == bss) {
-			if (!prev)
-				local->sta_bss_hash[STA_HASH(bss->bssid)] =
-					bss->hnext;
-			else
-				prev->hnext = bss->hnext;
-			break;
-		}
-		prev = b;
-		b = b->hnext;
-	}
-}
-
-
-static struct ieee80211_sta_bss *
-ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int freq,
-		     u8 *ssid, u8 ssid_len)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sta_bss *bss;
-
-	bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
-	if (!bss)
-		return NULL;
-	atomic_inc(&bss->users);
-	atomic_inc(&bss->users);
-	memcpy(bss->bssid, bssid, ETH_ALEN);
-	bss->freq = freq;
-	if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) {
-		memcpy(bss->ssid, ssid, ssid_len);
-		bss->ssid_len = ssid_len;
-	}
-
-	spin_lock_bh(&local->sta_bss_lock);
-	/* TODO: order by RSSI? */
-	list_add_tail(&bss->list, &local->sta_bss_list);
-	__ieee80211_rx_bss_hash_add(dev, bss);
-	spin_unlock_bh(&local->sta_bss_lock);
-	return bss;
-}
-
-static struct ieee80211_sta_bss *
-ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq,
-		     u8 *ssid, u8 ssid_len)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sta_bss *bss;
-
-	spin_lock_bh(&local->sta_bss_lock);
-	bss = local->sta_bss_hash[STA_HASH(bssid)];
-	while (bss) {
-		if (!bss_mesh_cfg(bss) &&
-		    !memcmp(bss->bssid, bssid, ETH_ALEN) &&
-		    bss->freq == freq &&
-		    bss->ssid_len == ssid_len &&
-		    (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) {
-			atomic_inc(&bss->users);
-			break;
-		}
-		bss = bss->hnext;
-	}
-	spin_unlock_bh(&local->sta_bss_lock);
-	return bss;
-}
-
-#ifdef CONFIG_MAC80211_MESH
-static struct ieee80211_sta_bss *
-ieee80211_rx_mesh_bss_get(struct net_device *dev, u8 *mesh_id, int mesh_id_len,
-			  u8 *mesh_cfg, int freq)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sta_bss *bss;
-
-	spin_lock_bh(&local->sta_bss_lock);
-	bss = local->sta_bss_hash[mesh_id_hash(mesh_id, mesh_id_len)];
-	while (bss) {
-		if (bss_mesh_cfg(bss) &&
-		    !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) &&
-		    bss->freq == freq &&
-		    mesh_id_len == bss->mesh_id_len &&
-		    (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id,
-						 mesh_id_len))) {
-			atomic_inc(&bss->users);
-			break;
-		}
-		bss = bss->hnext;
-	}
-	spin_unlock_bh(&local->sta_bss_lock);
-	return bss;
-}
-
-static struct ieee80211_sta_bss *
-ieee80211_rx_mesh_bss_add(struct net_device *dev, u8 *mesh_id, int mesh_id_len,
-			  u8 *mesh_cfg, int mesh_config_len, int freq)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sta_bss *bss;
-
-	if (mesh_config_len != MESH_CFG_LEN)
-		return NULL;
-
-	bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
-	if (!bss)
-		return NULL;
-
-	bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC);
-	if (!bss->mesh_cfg) {
-		kfree(bss);
-		return NULL;
-	}
-
-	if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) {
-		bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC);
-		if (!bss->mesh_id) {
-			kfree(bss->mesh_cfg);
-			kfree(bss);
-			return NULL;
-		}
-		memcpy(bss->mesh_id, mesh_id, mesh_id_len);
-	}
-
-	atomic_inc(&bss->users);
-	atomic_inc(&bss->users);
-	memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN);
-	bss->mesh_id_len = mesh_id_len;
-	bss->freq = freq;
-	spin_lock_bh(&local->sta_bss_lock);
-	/* TODO: order by RSSI? */
-	list_add_tail(&bss->list, &local->sta_bss_list);
-	__ieee80211_rx_bss_hash_add(dev, bss);
-	spin_unlock_bh(&local->sta_bss_lock);
-	return bss;
-}
-#endif
-
-static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss)
-{
-	kfree(bss->wpa_ie);
-	kfree(bss->rsn_ie);
-	kfree(bss->wmm_ie);
-	kfree(bss->ht_ie);
-	kfree(bss->ht_add_ie);
-	kfree(bss_mesh_id(bss));
-	kfree(bss_mesh_cfg(bss));
-	kfree(bss);
-}
-
-
-static void ieee80211_rx_bss_put(struct ieee80211_local *local,
-				 struct ieee80211_sta_bss *bss)
-{
-	local_bh_disable();
-	if (!atomic_dec_and_lock(&bss->users, &local->sta_bss_lock)) {
-		local_bh_enable();
-		return;
-	}
-
-	__ieee80211_rx_bss_hash_del(local, bss);
-	list_del(&bss->list);
-	spin_unlock_bh(&local->sta_bss_lock);
-	ieee80211_rx_bss_free(bss);
-}
-
-
-void ieee80211_rx_bss_list_init(struct ieee80211_local *local)
-{
-	spin_lock_init(&local->sta_bss_lock);
-	INIT_LIST_HEAD(&local->sta_bss_list);
-}
-
-
-void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local)
-{
-	struct ieee80211_sta_bss *bss, *tmp;
-
-	list_for_each_entry_safe(bss, tmp, &local->sta_bss_list, list)
-		ieee80211_rx_bss_put(local, bss);
-}
-
-
-static int ieee80211_sta_join_ibss(struct net_device *dev,
+static int ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
 				   struct ieee80211_if_sta *ifsta,
-				   struct ieee80211_sta_bss *bss)
+				   struct ieee80211_bss *bss)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sdata->local;
 	int res, rates, i, j;
 	struct sk_buff *skb;
 	struct ieee80211_mgmt *mgmt;
 	u8 *pos;
-	struct ieee80211_sub_if_data *sdata;
 	struct ieee80211_supported_band *sband;
 	union iwreq_data wrqu;
 
 	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
 
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
 	/* Remove possible STA entries from other IBSS networks. */
 	sta_info_flush_delayed(sdata);
 
@@ -2433,7 +1374,7 @@
 	sdata->drop_unencrypted = bss->capability &
 		WLAN_CAPABILITY_PRIVACY ? 1 : 0;
 
-	res = ieee80211_set_freq(dev, bss->freq);
+	res = ieee80211_set_freq(sdata, bss->freq);
 
 	if (res)
 		return res;
@@ -2446,10 +1387,10 @@
 		mgmt = (struct ieee80211_mgmt *)
 			skb_put(skb, 24 + sizeof(mgmt->u.beacon));
 		memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
-		mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-						   IEEE80211_STYPE_PROBE_RESP);
+		mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+						  IEEE80211_STYPE_PROBE_RESP);
 		memset(mgmt->da, 0xff, ETH_ALEN);
-		memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
+		memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
 		memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
 		mgmt->u.beacon.beacon_int =
 			cpu_to_le16(local->hw.conf.beacon_int);
@@ -2506,108 +1447,38 @@
 	}
 	ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates;
 
-	ieee80211_sta_def_wmm_params(dev, bss, 1);
+	ieee80211_sta_def_wmm_params(sdata, bss);
 
-	ifsta->state = IEEE80211_IBSS_JOINED;
+	ifsta->state = IEEE80211_STA_MLME_IBSS_JOINED;
 	mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
 
+	ieee80211_led_assoc(local, true);
+
 	memset(&wrqu, 0, sizeof(wrqu));
 	memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN);
-	wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
+	wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL);
 
 	return res;
 }
 
-u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
-			    struct ieee802_11_elems *elems,
-			    enum ieee80211_band band)
-{
-	struct ieee80211_supported_band *sband;
-	struct ieee80211_rate *bitrates;
-	size_t num_rates;
-	u64 supp_rates;
-	int i, j;
-	sband = local->hw.wiphy->bands[band];
-
-	if (!sband) {
-		WARN_ON(1);
-		sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-	}
-
-	bitrates = sband->bitrates;
-	num_rates = sband->n_bitrates;
-	supp_rates = 0;
-	for (i = 0; i < elems->supp_rates_len +
-		     elems->ext_supp_rates_len; i++) {
-		u8 rate = 0;
-		int own_rate;
-		if (i < elems->supp_rates_len)
-			rate = elems->supp_rates[i];
-		else if (elems->ext_supp_rates)
-			rate = elems->ext_supp_rates
-				[i - elems->supp_rates_len];
-		own_rate = 5 * (rate & 0x7f);
-		for (j = 0; j < num_rates; j++)
-			if (bitrates[j].bitrate == own_rate)
-				supp_rates |= BIT(j);
-	}
-	return supp_rates;
-}
-
-
-static void ieee80211_rx_bss_info(struct net_device *dev,
+static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
 				  struct ieee80211_mgmt *mgmt,
 				  size_t len,
 				  struct ieee80211_rx_status *rx_status,
 				  struct ieee802_11_elems *elems,
-				  int beacon)
+				  bool beacon)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	int freq, clen;
-	struct ieee80211_sta_bss *bss;
+	struct ieee80211_local *local = sdata->local;
+	int freq;
+	struct ieee80211_bss *bss;
 	struct sta_info *sta;
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	u64 beacon_timestamp, rx_timestamp;
 	struct ieee80211_channel *channel;
+	u64 beacon_timestamp, rx_timestamp;
+	u64 supp_rates = 0;
+	enum ieee80211_band band = rx_status->band;
 	DECLARE_MAC_BUF(mac);
 	DECLARE_MAC_BUF(mac2);
 
-	if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN))
-		return; /* ignore ProbeResp to foreign address */
-
-	beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
-
-	if (ieee80211_vif_is_mesh(&sdata->vif) && elems->mesh_id &&
-	    elems->mesh_config && mesh_matches_local(elems, dev)) {
-		u64 rates = ieee80211_sta_get_rates(local, elems,
-						rx_status->band);
-
-		mesh_neighbour_update(mgmt->sa, rates, dev,
-				      mesh_peer_accepts_plinks(elems, dev));
-	}
-
-	rcu_read_lock();
-
-	if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems->supp_rates &&
-	    memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 &&
-	    (sta = sta_info_get(local, mgmt->sa))) {
-		u64 prev_rates;
-		u64 supp_rates = ieee80211_sta_get_rates(local, elems,
-							rx_status->band);
-
-		prev_rates = sta->supp_rates[rx_status->band];
-		sta->supp_rates[rx_status->band] &= supp_rates;
-		if (sta->supp_rates[rx_status->band] == 0) {
-			/* No matching rates - this should not really happen.
-			 * Make sure that at least one rate is marked
-			 * supported to avoid issues with TX rate ctrl. */
-			sta->supp_rates[rx_status->band] =
-				sdata->u.sta.supp_rates_bits[rx_status->band];
-		}
-	}
-
-	rcu_read_unlock();
-
 	if (elems->ds_params && elems->ds_params_len == 1)
 		freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
 	else
@@ -2618,215 +1489,60 @@
 	if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
 		return;
 
-#ifdef CONFIG_MAC80211_MESH
-	if (elems->mesh_config)
-		bss = ieee80211_rx_mesh_bss_get(dev, elems->mesh_id,
-				elems->mesh_id_len, elems->mesh_config, freq);
-	else
+	if (sdata->vif.type == NL80211_IFTYPE_ADHOC && elems->supp_rates &&
+	    memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0) {
+		supp_rates = ieee80211_sta_get_rates(local, elems, band);
+
+		rcu_read_lock();
+
+		sta = sta_info_get(local, mgmt->sa);
+		if (sta) {
+			u64 prev_rates;
+
+			prev_rates = sta->sta.supp_rates[band];
+			/* make sure mandatory rates are always added */
+			sta->sta.supp_rates[band] = supp_rates |
+				ieee80211_mandatory_rates(local, band);
+
+#ifdef CONFIG_MAC80211_IBSS_DEBUG
+			if (sta->sta.supp_rates[band] != prev_rates)
+				printk(KERN_DEBUG "%s: updated supp_rates set "
+				    "for %s based on beacon info (0x%llx | "
+				    "0x%llx -> 0x%llx)\n",
+				    sdata->dev->name,
+				    print_mac(mac, sta->sta.addr),
+				    (unsigned long long) prev_rates,
+				    (unsigned long long) supp_rates,
+				    (unsigned long long) sta->sta.supp_rates[band]);
 #endif
-		bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq,
-					   elems->ssid, elems->ssid_len);
-	if (!bss) {
-#ifdef CONFIG_MAC80211_MESH
-		if (elems->mesh_config)
-			bss = ieee80211_rx_mesh_bss_add(dev, elems->mesh_id,
-				elems->mesh_id_len, elems->mesh_config,
-				elems->mesh_config_len, freq);
-		else
-#endif
-			bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq,
-						  elems->ssid, elems->ssid_len);
-		if (!bss)
-			return;
-	} else {
-#if 0
-		/* TODO: order by RSSI? */
-		spin_lock_bh(&local->sta_bss_lock);
-		list_move_tail(&bss->list, &local->sta_bss_list);
-		spin_unlock_bh(&local->sta_bss_lock);
-#endif
+		} else {
+			ieee80211_ibss_add_sta(sdata, NULL, mgmt->bssid,
+					       mgmt->sa, supp_rates);
+		}
+
+		rcu_read_unlock();
 	}
 
-	/* save the ERP value so that it is available at association time */
-	if (elems->erp_info && elems->erp_info_len >= 1) {
-		bss->erp_value = elems->erp_info[0];
-		bss->has_erp_value = 1;
-	}
+	bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
+					freq, beacon);
+	if (!bss)
+		return;
 
-	if (elems->ht_cap_elem &&
-	     (!bss->ht_ie || bss->ht_ie_len != elems->ht_cap_elem_len ||
-	     memcmp(bss->ht_ie, elems->ht_cap_elem, elems->ht_cap_elem_len))) {
-		kfree(bss->ht_ie);
-		bss->ht_ie = kmalloc(elems->ht_cap_elem_len + 2, GFP_ATOMIC);
-		if (bss->ht_ie) {
-			memcpy(bss->ht_ie, elems->ht_cap_elem - 2,
-				elems->ht_cap_elem_len + 2);
-			bss->ht_ie_len = elems->ht_cap_elem_len + 2;
-		} else
-			bss->ht_ie_len = 0;
-	} else if (!elems->ht_cap_elem && bss->ht_ie) {
-		kfree(bss->ht_ie);
-		bss->ht_ie = NULL;
-		bss->ht_ie_len = 0;
-	}
-
-	if (elems->ht_info_elem &&
-	     (!bss->ht_add_ie ||
-	     bss->ht_add_ie_len != elems->ht_info_elem_len ||
-	     memcmp(bss->ht_add_ie, elems->ht_info_elem,
-			elems->ht_info_elem_len))) {
-		kfree(bss->ht_add_ie);
-		bss->ht_add_ie =
-			kmalloc(elems->ht_info_elem_len + 2, GFP_ATOMIC);
-		if (bss->ht_add_ie) {
-			memcpy(bss->ht_add_ie, elems->ht_info_elem - 2,
-				elems->ht_info_elem_len + 2);
-			bss->ht_add_ie_len = elems->ht_info_elem_len + 2;
-		} else
-			bss->ht_add_ie_len = 0;
-	} else if (!elems->ht_info_elem && bss->ht_add_ie) {
-		kfree(bss->ht_add_ie);
-		bss->ht_add_ie = NULL;
-		bss->ht_add_ie_len = 0;
-	}
-
-	bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int);
-	bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info);
-
-	if (elems->tim) {
-		struct ieee80211_tim_ie *tim_ie =
-			(struct ieee80211_tim_ie *)elems->tim;
-		bss->dtim_period = tim_ie->dtim_period;
-	}
-
-	/* set default value for buggy APs */
-	if (!elems->tim || bss->dtim_period == 0)
-		bss->dtim_period = 1;
-
-	bss->supp_rates_len = 0;
-	if (elems->supp_rates) {
-		clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
-		if (clen > elems->supp_rates_len)
-			clen = elems->supp_rates_len;
-		memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates,
-		       clen);
-		bss->supp_rates_len += clen;
-	}
-	if (elems->ext_supp_rates) {
-		clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
-		if (clen > elems->ext_supp_rates_len)
-			clen = elems->ext_supp_rates_len;
-		memcpy(&bss->supp_rates[bss->supp_rates_len],
-		       elems->ext_supp_rates, clen);
-		bss->supp_rates_len += clen;
-	}
-
-	bss->band = rx_status->band;
-
-	bss->timestamp = beacon_timestamp;
-	bss->last_update = jiffies;
-	bss->signal = rx_status->signal;
-	bss->noise = rx_status->noise;
-	bss->qual = rx_status->qual;
-	if (!beacon && !bss->probe_resp)
-		bss->probe_resp = true;
+	/* was just updated in ieee80211_bss_info_update */
+	beacon_timestamp = bss->timestamp;
 
 	/*
 	 * In STA mode, the remaining parameters should not be overridden
 	 * by beacons because they're not necessarily accurate there.
 	 */
-	if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
-	    bss->probe_resp && beacon) {
+	if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+	    bss->last_probe_resp && beacon) {
 		ieee80211_rx_bss_put(local, bss);
 		return;
 	}
 
-	if (elems->wpa &&
-	    (!bss->wpa_ie || bss->wpa_ie_len != elems->wpa_len ||
-	     memcmp(bss->wpa_ie, elems->wpa, elems->wpa_len))) {
-		kfree(bss->wpa_ie);
-		bss->wpa_ie = kmalloc(elems->wpa_len + 2, GFP_ATOMIC);
-		if (bss->wpa_ie) {
-			memcpy(bss->wpa_ie, elems->wpa - 2, elems->wpa_len + 2);
-			bss->wpa_ie_len = elems->wpa_len + 2;
-		} else
-			bss->wpa_ie_len = 0;
-	} else if (!elems->wpa && bss->wpa_ie) {
-		kfree(bss->wpa_ie);
-		bss->wpa_ie = NULL;
-		bss->wpa_ie_len = 0;
-	}
-
-	if (elems->rsn &&
-	    (!bss->rsn_ie || bss->rsn_ie_len != elems->rsn_len ||
-	     memcmp(bss->rsn_ie, elems->rsn, elems->rsn_len))) {
-		kfree(bss->rsn_ie);
-		bss->rsn_ie = kmalloc(elems->rsn_len + 2, GFP_ATOMIC);
-		if (bss->rsn_ie) {
-			memcpy(bss->rsn_ie, elems->rsn - 2, elems->rsn_len + 2);
-			bss->rsn_ie_len = elems->rsn_len + 2;
-		} else
-			bss->rsn_ie_len = 0;
-	} else if (!elems->rsn && bss->rsn_ie) {
-		kfree(bss->rsn_ie);
-		bss->rsn_ie = NULL;
-		bss->rsn_ie_len = 0;
-	}
-
-	/*
-	 * Cf.
-	 * http://www.wipo.int/pctdb/en/wo.jsp?wo=2007047181&IA=WO2007047181&DISPLAY=DESC
-	 *
-	 * quoting:
-	 *
-	 * In particular, "Wi-Fi CERTIFIED for WMM - Support for Multimedia
-	 * Applications with Quality of Service in Wi-Fi Networks," Wi- Fi
-	 * Alliance (September 1, 2004) is incorporated by reference herein.
-	 * The inclusion of the WMM Parameters in probe responses and
-	 * association responses is mandatory for WMM enabled networks. The
-	 * inclusion of the WMM Parameters in beacons, however, is optional.
-	 */
-
-	if (elems->wmm_param &&
-	    (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_param_len ||
-	     memcmp(bss->wmm_ie, elems->wmm_param, elems->wmm_param_len))) {
-		kfree(bss->wmm_ie);
-		bss->wmm_ie = kmalloc(elems->wmm_param_len + 2, GFP_ATOMIC);
-		if (bss->wmm_ie) {
-			memcpy(bss->wmm_ie, elems->wmm_param - 2,
-			       elems->wmm_param_len + 2);
-			bss->wmm_ie_len = elems->wmm_param_len + 2;
-		} else
-			bss->wmm_ie_len = 0;
-	} else if (elems->wmm_info &&
-		    (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_info_len ||
-		     memcmp(bss->wmm_ie, elems->wmm_info,
-						elems->wmm_info_len))) {
-		 /* As for certain AP's Fifth bit is not set in WMM IE in
-		  * beacon frames.So while parsing the beacon frame the
-		  * wmm_info structure is used instead of wmm_param.
-		  * wmm_info structure was never used to set bss->wmm_ie.
-		  * This code fixes this problem by copying the WME
-		  * information from wmm_info to bss->wmm_ie and enabling
-		  * n-band association.
-		  */
-		kfree(bss->wmm_ie);
-		bss->wmm_ie = kmalloc(elems->wmm_info_len + 2, GFP_ATOMIC);
-		if (bss->wmm_ie) {
-			memcpy(bss->wmm_ie, elems->wmm_info - 2,
-			       elems->wmm_info_len + 2);
-			bss->wmm_ie_len = elems->wmm_info_len + 2;
-		} else
-			bss->wmm_ie_len = 0;
-	} else if (!elems->wmm_param && !elems->wmm_info && bss->wmm_ie) {
-		kfree(bss->wmm_ie);
-		bss->wmm_ie = NULL;
-		bss->wmm_ie_len = 0;
-	}
-
 	/* check if we need to merge IBSS */
-	if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon &&
-	    !local->sta_sw_scanning && !local->sta_hw_scanning &&
+	if (sdata->vif.type == NL80211_IFTYPE_ADHOC && beacon &&
 	    bss->capability & WLAN_CAPABILITY_IBSS &&
 	    bss->freq == local->oper_channel->center_freq &&
 	    elems->ssid_len == sdata->u.sta.ssid_len &&
@@ -2848,7 +1564,7 @@
 			 * e.g: at 1 MBit that means mactime is 192 usec earlier
 			 * (=24 bytes * 8 usecs/byte) than the beacon timestamp.
 			 */
-			int rate = local->hw.wiphy->bands[rx_status->band]->
+			int rate = local->hw.wiphy->bands[band]->
 					bitrates[rx_status->rate_idx].bitrate;
 			rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate);
 		} else if (local && local->ops && local->ops->get_tsf)
@@ -2871,12 +1587,12 @@
 #ifdef CONFIG_MAC80211_IBSS_DEBUG
 			printk(KERN_DEBUG "%s: beacon TSF higher than "
 			       "local TSF - IBSS merge with BSSID %s\n",
-			       dev->name, print_mac(mac, mgmt->bssid));
+			       sdata->dev->name, print_mac(mac, mgmt->bssid));
 #endif
-			ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss);
-			ieee80211_ibss_add_sta(dev, NULL,
+			ieee80211_sta_join_ibss(sdata, &sdata->u.sta, bss);
+			ieee80211_ibss_add_sta(sdata, NULL,
 					       mgmt->bssid, mgmt->sa,
-					       BIT(rx_status->rate_idx));
+					       supp_rates);
 		}
 	}
 
@@ -2884,13 +1600,17 @@
 }
 
 
-static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev,
+static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
 					 struct ieee80211_mgmt *mgmt,
 					 size_t len,
 					 struct ieee80211_rx_status *rx_status)
 {
 	size_t baselen;
 	struct ieee802_11_elems elems;
+	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+
+	if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
+		return; /* ignore ProbeResp to foreign address */
 
 	baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
 	if (baselen > len)
@@ -2899,20 +1619,27 @@
 	ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
 				&elems);
 
-	ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 0);
+	ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
+
+	/* direct probe may be part of the association flow */
+	if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE,
+							&ifsta->request)) {
+		printk(KERN_DEBUG "%s direct probe responded\n",
+		       sdata->dev->name);
+		ieee80211_authenticate(sdata, ifsta);
+	}
 }
 
 
-static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
+static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
 				     struct ieee80211_mgmt *mgmt,
 				     size_t len,
 				     struct ieee80211_rx_status *rx_status)
 {
-	struct ieee80211_sub_if_data *sdata;
 	struct ieee80211_if_sta *ifsta;
 	size_t baselen;
 	struct ieee802_11_elems elems;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_conf *conf = &local->hw.conf;
 	u32 changed = 0;
 
@@ -2923,10 +1650,9 @@
 
 	ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
 
-	ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 1);
+	ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true);
 
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
+	if (sdata->vif.type != NL80211_IFTYPE_STATION)
 		return;
 	ifsta = &sdata->u.sta;
 
@@ -2934,15 +1660,9 @@
 	    memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
 		return;
 
-	ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
+	ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param,
 				 elems.wmm_param_len);
 
-	/* Do not send changes to driver if we are scanning. This removes
-	 * requirement that driver's bss_info_changed function needs to be
-	 * atomic. */
-	if (local->sta_sw_scanning || local->sta_hw_scanning)
-		return;
-
 	if (elems.erp_info && elems.erp_info_len >= 1)
 		changed |= ieee80211_handle_erp_ie(sdata, elems.erp_info[0]);
 	else {
@@ -2966,14 +1686,13 @@
 }
 
 
-static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
+static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
 					struct ieee80211_if_sta *ifsta,
 					struct ieee80211_mgmt *mgmt,
 					size_t len,
 					struct ieee80211_rx_status *rx_status)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
 	int tx_last_beacon;
 	struct sk_buff *skb;
 	struct ieee80211_mgmt *resp;
@@ -2984,8 +1703,8 @@
 	DECLARE_MAC_BUF(mac3);
 #endif
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS ||
-	    ifsta->state != IEEE80211_IBSS_JOINED ||
+	if (sdata->vif.type != NL80211_IFTYPE_ADHOC ||
+	    ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED ||
 	    len < 24 + 2 || !ifsta->probe_resp)
 		return;
 
@@ -2997,7 +1716,7 @@
 #ifdef CONFIG_MAC80211_IBSS_DEBUG
 	printk(KERN_DEBUG "%s: RX ProbeReq SA=%s DA=%s BSSID="
 	       "%s (tx_last_beacon=%d)\n",
-	       dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da),
+	       sdata->dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da),
 	       print_mac(mac3, mgmt->bssid), tx_last_beacon);
 #endif /* CONFIG_MAC80211_IBSS_DEBUG */
 
@@ -3015,7 +1734,7 @@
 #ifdef CONFIG_MAC80211_IBSS_DEBUG
 		printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
 		       "from %s\n",
-		       dev->name, print_mac(mac, mgmt->sa));
+		       sdata->dev->name, print_mac(mac, mgmt->sa));
 #endif
 		return;
 	}
@@ -3035,74 +1754,15 @@
 	memcpy(resp->da, mgmt->sa, ETH_ALEN);
 #ifdef CONFIG_MAC80211_IBSS_DEBUG
 	printk(KERN_DEBUG "%s: Sending ProbeResp to %s\n",
-	       dev->name, print_mac(mac, resp->da));
+	       sdata->dev->name, print_mac(mac, resp->da));
 #endif /* CONFIG_MAC80211_IBSS_DEBUG */
-	ieee80211_sta_tx(dev, skb, 0);
+	ieee80211_tx_skb(sdata, skb, 0);
 }
 
-static void ieee80211_rx_mgmt_action(struct net_device *dev,
-				     struct ieee80211_if_sta *ifsta,
-				     struct ieee80211_mgmt *mgmt,
-				     size_t len,
-				     struct ieee80211_rx_status *rx_status)
-{
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-
-	if (len < IEEE80211_MIN_ACTION_SIZE)
-		return;
-
-	switch (mgmt->u.action.category) {
-	case WLAN_CATEGORY_SPECTRUM_MGMT:
-		if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
-			break;
-		switch (mgmt->u.action.u.chan_switch.action_code) {
-		case WLAN_ACTION_SPCT_MSR_REQ:
-			if (len < (IEEE80211_MIN_ACTION_SIZE +
-				   sizeof(mgmt->u.action.u.measurement)))
-				break;
-			ieee80211_sta_process_measurement_req(dev, mgmt, len);
-			break;
-		}
-		break;
-	case WLAN_CATEGORY_BACK:
-		switch (mgmt->u.action.u.addba_req.action_code) {
-		case WLAN_ACTION_ADDBA_REQ:
-			if (len < (IEEE80211_MIN_ACTION_SIZE +
-				   sizeof(mgmt->u.action.u.addba_req)))
-				break;
-			ieee80211_sta_process_addba_request(dev, mgmt, len);
-			break;
-		case WLAN_ACTION_ADDBA_RESP:
-			if (len < (IEEE80211_MIN_ACTION_SIZE +
-				   sizeof(mgmt->u.action.u.addba_resp)))
-				break;
-			ieee80211_sta_process_addba_resp(dev, mgmt, len);
-			break;
-		case WLAN_ACTION_DELBA:
-			if (len < (IEEE80211_MIN_ACTION_SIZE +
-				   sizeof(mgmt->u.action.u.delba)))
-				break;
-			ieee80211_sta_process_delba(dev, mgmt, len);
-			break;
-		}
-		break;
-	case PLINK_CATEGORY:
-		if (ieee80211_vif_is_mesh(&sdata->vif))
-			mesh_rx_plink_frame(dev, mgmt, len, rx_status);
-		break;
-	case MESH_PATH_SEL_CATEGORY:
-		if (ieee80211_vif_is_mesh(&sdata->vif))
-			mesh_rx_path_sel_frame(dev, mgmt, len);
-		break;
-	}
-}
-
-void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
+void ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
 			   struct ieee80211_rx_status *rx_status)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_sta *ifsta;
 	struct ieee80211_mgmt *mgmt;
 	u16 fc;
@@ -3110,7 +1770,6 @@
 	if (skb->len < 24)
 		goto fail;
 
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	ifsta = &sdata->u.sta;
 
 	mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -3120,7 +1779,6 @@
 	case IEEE80211_STYPE_PROBE_REQ:
 	case IEEE80211_STYPE_PROBE_RESP:
 	case IEEE80211_STYPE_BEACON:
-	case IEEE80211_STYPE_ACTION:
 		memcpy(skb->cb, rx_status, sizeof(*rx_status));
 	case IEEE80211_STYPE_AUTH:
 	case IEEE80211_STYPE_ASSOC_RESP:
@@ -3136,17 +1794,14 @@
 	kfree_skb(skb);
 }
 
-
-static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
+static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 					 struct sk_buff *skb)
 {
 	struct ieee80211_rx_status *rx_status;
-	struct ieee80211_sub_if_data *sdata;
 	struct ieee80211_if_sta *ifsta;
 	struct ieee80211_mgmt *mgmt;
 	u16 fc;
 
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	ifsta = &sdata->u.sta;
 
 	rx_status = (struct ieee80211_rx_status *) skb->cb;
@@ -3155,17 +1810,17 @@
 
 	switch (fc & IEEE80211_FCTL_STYPE) {
 	case IEEE80211_STYPE_PROBE_REQ:
-		ieee80211_rx_mgmt_probe_req(dev, ifsta, mgmt, skb->len,
+		ieee80211_rx_mgmt_probe_req(sdata, ifsta, mgmt, skb->len,
 					    rx_status);
 		break;
 	case IEEE80211_STYPE_PROBE_RESP:
-		ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status);
+		ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len, rx_status);
 		break;
 	case IEEE80211_STYPE_BEACON:
-		ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status);
+		ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status);
 		break;
 	case IEEE80211_STYPE_AUTH:
-		ieee80211_rx_mgmt_auth(dev, ifsta, mgmt, skb->len);
+		ieee80211_rx_mgmt_auth(sdata, ifsta, mgmt, skb->len);
 		break;
 	case IEEE80211_STYPE_ASSOC_RESP:
 		ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 0);
@@ -3174,13 +1829,10 @@
 		ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 1);
 		break;
 	case IEEE80211_STYPE_DEAUTH:
-		ieee80211_rx_mgmt_deauth(dev, ifsta, mgmt, skb->len);
+		ieee80211_rx_mgmt_deauth(sdata, ifsta, mgmt, skb->len);
 		break;
 	case IEEE80211_STYPE_DISASSOC:
-		ieee80211_rx_mgmt_disassoc(dev, ifsta, mgmt, skb->len);
-		break;
-	case IEEE80211_STYPE_ACTION:
-		ieee80211_rx_mgmt_action(dev, ifsta, mgmt, skb->len, rx_status);
+		ieee80211_rx_mgmt_disassoc(sdata, ifsta, mgmt, skb->len);
 		break;
 	}
 
@@ -3188,47 +1840,11 @@
 }
 
 
-ieee80211_rx_result
-ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb,
-		      struct ieee80211_rx_status *rx_status)
+static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_mgmt *mgmt;
-	__le16 fc;
-
-	if (skb->len < 2)
-		return RX_DROP_UNUSABLE;
-
-	mgmt = (struct ieee80211_mgmt *) skb->data;
-	fc = mgmt->frame_control;
-
-	if (ieee80211_is_ctl(fc))
-		return RX_CONTINUE;
-
-	if (skb->len < 24)
-		return RX_DROP_MONITOR;
-
-	if (ieee80211_is_probe_resp(fc)) {
-		ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status);
-		dev_kfree_skb(skb);
-		return RX_QUEUED;
-	}
-
-	if (ieee80211_is_beacon(fc)) {
-		ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status);
-		dev_kfree_skb(skb);
-		return RX_QUEUED;
-	}
-
-	return RX_CONTINUE;
-}
-
-
-static int ieee80211_sta_active_ibss(struct net_device *dev)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sdata->local;
 	int active = 0;
 	struct sta_info *sta;
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
 	rcu_read_lock();
 
@@ -3247,179 +1863,36 @@
 }
 
 
-static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct sta_info *sta, *tmp;
-	LIST_HEAD(tmp_list);
-	DECLARE_MAC_BUF(mac);
-	unsigned long flags;
-
-	spin_lock_irqsave(&local->sta_lock, flags);
-	list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
-		if (time_after(jiffies, sta->last_rx + exp_time)) {
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
-			printk(KERN_DEBUG "%s: expiring inactive STA %s\n",
-			       dev->name, print_mac(mac, sta->addr));
-#endif
-			__sta_info_unlink(&sta);
-			if (sta)
-				list_add(&sta->list, &tmp_list);
-		}
-	spin_unlock_irqrestore(&local->sta_lock, flags);
-
-	list_for_each_entry_safe(sta, tmp, &tmp_list, list)
-		sta_info_destroy(sta);
-}
-
-
-static void ieee80211_sta_merge_ibss(struct net_device *dev,
+static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata,
 				     struct ieee80211_if_sta *ifsta)
 {
 	mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
 
-	ieee80211_sta_expire(dev, IEEE80211_IBSS_INACTIVITY_LIMIT);
-	if (ieee80211_sta_active_ibss(dev))
+	ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
+	if (ieee80211_sta_active_ibss(sdata))
 		return;
 
 	printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
-	       "IBSS networks with same SSID (merge)\n", dev->name);
-	ieee80211_sta_req_scan(dev, ifsta->ssid, ifsta->ssid_len);
+	       "IBSS networks with same SSID (merge)\n", sdata->dev->name);
+	ieee80211_request_scan(sdata, ifsta->ssid, ifsta->ssid_len);
 }
 
 
-#ifdef CONFIG_MAC80211_MESH
-static void ieee80211_mesh_housekeeping(struct net_device *dev,
-			   struct ieee80211_if_sta *ifsta)
-{
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	bool free_plinks;
-
-	ieee80211_sta_expire(dev, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
-	mesh_path_expire(dev);
-
-	free_plinks = mesh_plink_availables(sdata);
-	if (free_plinks != sdata->u.sta.accepting_plinks)
-		ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
-
-	mod_timer(&ifsta->timer, jiffies +
-			IEEE80211_MESH_HOUSEKEEPING_INTERVAL);
-}
-
-
-void ieee80211_start_mesh(struct net_device *dev)
-{
-	struct ieee80211_if_sta *ifsta;
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	ifsta = &sdata->u.sta;
-	ifsta->state = IEEE80211_MESH_UP;
-	ieee80211_sta_timer((unsigned long)sdata);
-	ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
-}
-#endif
-
-
-void ieee80211_sta_timer(unsigned long data)
+static void ieee80211_sta_timer(unsigned long data)
 {
 	struct ieee80211_sub_if_data *sdata =
 		(struct ieee80211_sub_if_data *) data;
 	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
-	struct ieee80211_local *local = wdev_priv(&sdata->wdev);
+	struct ieee80211_local *local = sdata->local;
 
 	set_bit(IEEE80211_STA_REQ_RUN, &ifsta->request);
 	queue_work(local->hw.workqueue, &ifsta->work);
 }
 
-void ieee80211_sta_work(struct work_struct *work)
-{
-	struct ieee80211_sub_if_data *sdata =
-		container_of(work, struct ieee80211_sub_if_data, u.sta.work);
-	struct net_device *dev = sdata->dev;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_if_sta *ifsta;
-	struct sk_buff *skb;
-
-	if (!netif_running(dev))
-		return;
-
-	if (local->sta_sw_scanning || local->sta_hw_scanning)
-		return;
-
-	if (WARN_ON(sdata->vif.type != IEEE80211_IF_TYPE_STA &&
-		    sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
-		    sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
-		return;
-	ifsta = &sdata->u.sta;
-
-	while ((skb = skb_dequeue(&ifsta->skb_queue)))
-		ieee80211_sta_rx_queued_mgmt(dev, skb);
-
-#ifdef CONFIG_MAC80211_MESH
-	if (ifsta->preq_queue_len &&
-	    time_after(jiffies,
-		       ifsta->last_preq + msecs_to_jiffies(ifsta->mshcfg.dot11MeshHWMPpreqMinInterval)))
-		mesh_path_start_discovery(dev);
-#endif
-
-	if (ifsta->state != IEEE80211_AUTHENTICATE &&
-	    ifsta->state != IEEE80211_ASSOCIATE &&
-	    test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) {
-		if (ifsta->scan_ssid_len)
-			ieee80211_sta_start_scan(dev, ifsta->scan_ssid, ifsta->scan_ssid_len);
-		else
-			ieee80211_sta_start_scan(dev, NULL, 0);
-		return;
-	}
-
-	if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request)) {
-		if (ieee80211_sta_config_auth(dev, ifsta))
-			return;
-		clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request);
-	} else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request))
-		return;
-
-	switch (ifsta->state) {
-	case IEEE80211_DISABLED:
-		break;
-	case IEEE80211_AUTHENTICATE:
-		ieee80211_authenticate(dev, ifsta);
-		break;
-	case IEEE80211_ASSOCIATE:
-		ieee80211_associate(dev, ifsta);
-		break;
-	case IEEE80211_ASSOCIATED:
-		ieee80211_associated(dev, ifsta);
-		break;
-	case IEEE80211_IBSS_SEARCH:
-		ieee80211_sta_find_ibss(dev, ifsta);
-		break;
-	case IEEE80211_IBSS_JOINED:
-		ieee80211_sta_merge_ibss(dev, ifsta);
-		break;
-#ifdef CONFIG_MAC80211_MESH
-	case IEEE80211_MESH_UP:
-		ieee80211_mesh_housekeeping(dev, ifsta);
-		break;
-#endif
-	default:
-		WARN_ON(1);
-		break;
-	}
-
-	if (ieee80211_privacy_mismatch(dev, ifsta)) {
-		printk(KERN_DEBUG "%s: privacy configuration mismatch and "
-		       "mixed-cell disabled - disassociate\n", dev->name);
-
-		ieee80211_send_disassoc(dev, ifsta, WLAN_REASON_UNSPECIFIED);
-		ieee80211_set_disassoc(dev, ifsta, 0);
-	}
-}
-
-
-static void ieee80211_sta_reset_auth(struct net_device *dev,
+static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata,
 				     struct ieee80211_if_sta *ifsta)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sdata->local;
 
 	if (local->ops->reset_tsf) {
 		/* Reset own TSF to allow time synchronization work. */
@@ -3439,29 +1912,15 @@
 		ifsta->auth_alg = WLAN_AUTH_OPEN;
 	ifsta->auth_transaction = -1;
 	ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
-	ifsta->auth_tries = ifsta->assoc_tries = 0;
-	netif_carrier_off(dev);
+	ifsta->assoc_scan_tries = 0;
+	ifsta->direct_probe_tries = 0;
+	ifsta->auth_tries = 0;
+	ifsta->assoc_tries = 0;
+	netif_tx_stop_all_queues(sdata->dev);
+	netif_carrier_off(sdata->dev);
 }
 
 
-void ieee80211_sta_req_auth(struct net_device *dev,
-			    struct ieee80211_if_sta *ifsta)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
-	if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
-		return;
-
-	if ((ifsta->flags & (IEEE80211_STA_BSSID_SET |
-				IEEE80211_STA_AUTO_BSSID_SEL)) &&
-	    (ifsta->flags & (IEEE80211_STA_SSID_SET |
-				IEEE80211_STA_AUTO_SSID_SEL))) {
-		set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
-		queue_work(local->hw.workqueue, &ifsta->work);
-	}
-}
-
 static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta,
 				    const char *ssid, int ssid_len)
 {
@@ -3492,17 +1951,187 @@
 	return 0;
 }
 
-static int ieee80211_sta_config_auth(struct net_device *dev,
+static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata,
 				     struct ieee80211_if_sta *ifsta)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_sta_bss *bss, *selected = NULL;
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_bss *bss;
+	struct ieee80211_supported_band *sband;
+	u8 bssid[ETH_ALEN], *pos;
+	int i;
+	int ret;
+	DECLARE_MAC_BUF(mac);
+
+#if 0
+	/* Easier testing, use fixed BSSID. */
+	memset(bssid, 0xfe, ETH_ALEN);
+#else
+	/* Generate random, not broadcast, locally administered BSSID. Mix in
+	 * own MAC address to make sure that devices that do not have proper
+	 * random number generator get different BSSID. */
+	get_random_bytes(bssid, ETH_ALEN);
+	for (i = 0; i < ETH_ALEN; i++)
+		bssid[i] ^= sdata->dev->dev_addr[i];
+	bssid[0] &= ~0x01;
+	bssid[0] |= 0x02;
+#endif
+
+	printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n",
+	       sdata->dev->name, print_mac(mac, bssid));
+
+	bss = ieee80211_rx_bss_add(local, bssid,
+				   local->hw.conf.channel->center_freq,
+				   sdata->u.sta.ssid, sdata->u.sta.ssid_len);
+	if (!bss)
+		return -ENOMEM;
+
+	bss->band = local->hw.conf.channel->band;
+	sband = local->hw.wiphy->bands[bss->band];
+
+	if (local->hw.conf.beacon_int == 0)
+		local->hw.conf.beacon_int = 100;
+	bss->beacon_int = local->hw.conf.beacon_int;
+	bss->last_update = jiffies;
+	bss->capability = WLAN_CAPABILITY_IBSS;
+
+	if (sdata->default_key)
+		bss->capability |= WLAN_CAPABILITY_PRIVACY;
+	else
+		sdata->drop_unencrypted = 0;
+
+	bss->supp_rates_len = sband->n_bitrates;
+	pos = bss->supp_rates;
+	for (i = 0; i < sband->n_bitrates; i++) {
+		int rate = sband->bitrates[i].bitrate;
+		*pos++ = (u8) (rate / 5);
+	}
+
+	ret = ieee80211_sta_join_ibss(sdata, ifsta, bss);
+	ieee80211_rx_bss_put(local, bss);
+	return ret;
+}
+
+
+static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata,
+				   struct ieee80211_if_sta *ifsta)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_bss *bss;
+	int found = 0;
+	u8 bssid[ETH_ALEN];
+	int active_ibss;
+	DECLARE_MAC_BUF(mac);
+	DECLARE_MAC_BUF(mac2);
+
+	if (ifsta->ssid_len == 0)
+		return -EINVAL;
+
+	active_ibss = ieee80211_sta_active_ibss(sdata);
+#ifdef CONFIG_MAC80211_IBSS_DEBUG
+	printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
+	       sdata->dev->name, active_ibss);
+#endif /* CONFIG_MAC80211_IBSS_DEBUG */
+	spin_lock_bh(&local->bss_lock);
+	list_for_each_entry(bss, &local->bss_list, list) {
+		if (ifsta->ssid_len != bss->ssid_len ||
+		    memcmp(ifsta->ssid, bss->ssid, bss->ssid_len) != 0
+		    || !(bss->capability & WLAN_CAPABILITY_IBSS))
+			continue;
+#ifdef CONFIG_MAC80211_IBSS_DEBUG
+		printk(KERN_DEBUG "   bssid=%s found\n",
+		       print_mac(mac, bss->bssid));
+#endif /* CONFIG_MAC80211_IBSS_DEBUG */
+		memcpy(bssid, bss->bssid, ETH_ALEN);
+		found = 1;
+		if (active_ibss || memcmp(bssid, ifsta->bssid, ETH_ALEN) != 0)
+			break;
+	}
+	spin_unlock_bh(&local->bss_lock);
+
+#ifdef CONFIG_MAC80211_IBSS_DEBUG
+	if (found)
+		printk(KERN_DEBUG "   sta_find_ibss: selected %s current "
+		       "%s\n", print_mac(mac, bssid),
+		       print_mac(mac2, ifsta->bssid));
+#endif /* CONFIG_MAC80211_IBSS_DEBUG */
+
+	if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) {
+		int ret;
+		int search_freq;
+
+		if (ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL)
+			search_freq = bss->freq;
+		else
+			search_freq = local->hw.conf.channel->center_freq;
+
+		bss = ieee80211_rx_bss_get(local, bssid, search_freq,
+					   ifsta->ssid, ifsta->ssid_len);
+		if (!bss)
+			goto dont_join;
+
+		printk(KERN_DEBUG "%s: Selected IBSS BSSID %s"
+		       " based on configured SSID\n",
+		       sdata->dev->name, print_mac(mac, bssid));
+		ret = ieee80211_sta_join_ibss(sdata, ifsta, bss);
+		ieee80211_rx_bss_put(local, bss);
+		return ret;
+	}
+
+dont_join:
+#ifdef CONFIG_MAC80211_IBSS_DEBUG
+	printk(KERN_DEBUG "   did not try to join ibss\n");
+#endif /* CONFIG_MAC80211_IBSS_DEBUG */
+
+	/* Selected IBSS not found in current scan results - try to scan */
+	if (ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED &&
+	    !ieee80211_sta_active_ibss(sdata)) {
+		mod_timer(&ifsta->timer, jiffies +
+				      IEEE80211_IBSS_MERGE_INTERVAL);
+	} else if (time_after(jiffies, local->last_scan_completed +
+			      IEEE80211_SCAN_INTERVAL)) {
+		printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
+		       "join\n", sdata->dev->name);
+		return ieee80211_request_scan(sdata, ifsta->ssid,
+					      ifsta->ssid_len);
+	} else if (ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED) {
+		int interval = IEEE80211_SCAN_INTERVAL;
+
+		if (time_after(jiffies, ifsta->ibss_join_req +
+			       IEEE80211_IBSS_JOIN_TIMEOUT)) {
+			if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) &&
+			    (!(local->oper_channel->flags &
+					IEEE80211_CHAN_NO_IBSS)))
+				return ieee80211_sta_create_ibss(sdata, ifsta);
+			if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) {
+				printk(KERN_DEBUG "%s: IBSS not allowed on"
+				       " %d MHz\n", sdata->dev->name,
+				       local->hw.conf.channel->center_freq);
+			}
+
+			/* No IBSS found - decrease scan interval and continue
+			 * scanning. */
+			interval = IEEE80211_SCAN_INTERVAL_SLOW;
+		}
+
+		ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH;
+		mod_timer(&ifsta->timer, jiffies + interval);
+		return 0;
+	}
+
+	return 0;
+}
+
+
+static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata,
+				     struct ieee80211_if_sta *ifsta)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_bss *bss, *selected = NULL;
 	int top_rssi = 0, freq;
 
-	spin_lock_bh(&local->sta_bss_lock);
+	spin_lock_bh(&local->bss_lock);
 	freq = local->oper_channel->center_freq;
-	list_for_each_entry(bss, &local->sta_bss_list, list) {
+	list_for_each_entry(bss, &local->bss_list, list) {
 		if (!(bss->capability & WLAN_CAPABILITY_ESS))
 			continue;
 
@@ -3532,210 +2161,222 @@
 	}
 	if (selected)
 		atomic_inc(&selected->users);
-	spin_unlock_bh(&local->sta_bss_lock);
+	spin_unlock_bh(&local->bss_lock);
 
 	if (selected) {
-		ieee80211_set_freq(dev, selected->freq);
+		ieee80211_set_freq(sdata, selected->freq);
 		if (!(ifsta->flags & IEEE80211_STA_SSID_SET))
-			ieee80211_sta_set_ssid(dev, selected->ssid,
+			ieee80211_sta_set_ssid(sdata, selected->ssid,
 					       selected->ssid_len);
-		ieee80211_sta_set_bssid(dev, selected->bssid);
-		ieee80211_sta_def_wmm_params(dev, selected, 0);
+		ieee80211_sta_set_bssid(sdata, selected->bssid);
+		ieee80211_sta_def_wmm_params(sdata, selected);
+
+		/* Send out direct probe if no probe resp was received or
+		 * the one we have is outdated
+		 */
+		if (!selected->last_probe_resp ||
+		    time_after(jiffies, selected->last_probe_resp
+					+ IEEE80211_SCAN_RESULT_EXPIRE))
+			ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
+		else
+			ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
+
 		ieee80211_rx_bss_put(local, selected);
-		ifsta->state = IEEE80211_AUTHENTICATE;
-		ieee80211_sta_reset_auth(dev, ifsta);
+		ieee80211_sta_reset_auth(sdata, ifsta);
 		return 0;
 	} else {
-		if (ifsta->state != IEEE80211_AUTHENTICATE) {
+		if (ifsta->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) {
+			ifsta->assoc_scan_tries++;
 			if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL)
-				ieee80211_sta_start_scan(dev, NULL, 0);
+				ieee80211_start_scan(sdata, NULL, 0);
 			else
-				ieee80211_sta_start_scan(dev, ifsta->ssid,
+				ieee80211_start_scan(sdata, ifsta->ssid,
 							 ifsta->ssid_len);
-			ifsta->state = IEEE80211_AUTHENTICATE;
+			ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
 			set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
 		} else
-			ifsta->state = IEEE80211_DISABLED;
+			ifsta->state = IEEE80211_STA_MLME_DISABLED;
 	}
 	return -1;
 }
 
 
-static int ieee80211_sta_create_ibss(struct net_device *dev,
-				     struct ieee80211_if_sta *ifsta)
+static void ieee80211_sta_work(struct work_struct *work)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sta_bss *bss;
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_supported_band *sband;
-	u8 bssid[ETH_ALEN], *pos;
-	int i;
-	int ret;
-	DECLARE_MAC_BUF(mac);
+	struct ieee80211_sub_if_data *sdata =
+		container_of(work, struct ieee80211_sub_if_data, u.sta.work);
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_sta *ifsta;
+	struct sk_buff *skb;
 
-#if 0
-	/* Easier testing, use fixed BSSID. */
-	memset(bssid, 0xfe, ETH_ALEN);
-#else
-	/* Generate random, not broadcast, locally administered BSSID. Mix in
-	 * own MAC address to make sure that devices that do not have proper
-	 * random number generator get different BSSID. */
-	get_random_bytes(bssid, ETH_ALEN);
-	for (i = 0; i < ETH_ALEN; i++)
-		bssid[i] ^= dev->dev_addr[i];
-	bssid[0] &= ~0x01;
-	bssid[0] |= 0x02;
+	if (!netif_running(sdata->dev))
+		return;
+
+	if (local->sw_scanning || local->hw_scanning)
+		return;
+
+	if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION &&
+		    sdata->vif.type != NL80211_IFTYPE_ADHOC))
+		return;
+	ifsta = &sdata->u.sta;
+
+	while ((skb = skb_dequeue(&ifsta->skb_queue)))
+		ieee80211_sta_rx_queued_mgmt(sdata, skb);
+
+	if (ifsta->state != IEEE80211_STA_MLME_DIRECT_PROBE &&
+	    ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE &&
+	    ifsta->state != IEEE80211_STA_MLME_ASSOCIATE &&
+	    test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) {
+		ieee80211_start_scan(sdata, ifsta->scan_ssid,
+				     ifsta->scan_ssid_len);
+		return;
+	}
+
+	if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request)) {
+		if (ieee80211_sta_config_auth(sdata, ifsta))
+			return;
+		clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request);
+	} else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request))
+		return;
+
+	switch (ifsta->state) {
+	case IEEE80211_STA_MLME_DISABLED:
+		break;
+	case IEEE80211_STA_MLME_DIRECT_PROBE:
+		ieee80211_direct_probe(sdata, ifsta);
+		break;
+	case IEEE80211_STA_MLME_AUTHENTICATE:
+		ieee80211_authenticate(sdata, ifsta);
+		break;
+	case IEEE80211_STA_MLME_ASSOCIATE:
+		ieee80211_associate(sdata, ifsta);
+		break;
+	case IEEE80211_STA_MLME_ASSOCIATED:
+		ieee80211_associated(sdata, ifsta);
+		break;
+	case IEEE80211_STA_MLME_IBSS_SEARCH:
+		ieee80211_sta_find_ibss(sdata, ifsta);
+		break;
+	case IEEE80211_STA_MLME_IBSS_JOINED:
+		ieee80211_sta_merge_ibss(sdata, ifsta);
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	if (ieee80211_privacy_mismatch(sdata, ifsta)) {
+		printk(KERN_DEBUG "%s: privacy configuration mismatch and "
+		       "mixed-cell disabled - disassociate\n", sdata->dev->name);
+
+		ieee80211_set_disassoc(sdata, ifsta, false, true,
+					WLAN_REASON_UNSPECIFIED);
+	}
+}
+
+static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
+{
+	if (sdata->vif.type == NL80211_IFTYPE_STATION)
+		queue_work(sdata->local->hw.workqueue,
+			   &sdata->u.sta.work);
+}
+
+/* interface setup */
+void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_if_sta *ifsta;
+
+	ifsta = &sdata->u.sta;
+	INIT_WORK(&ifsta->work, ieee80211_sta_work);
+	setup_timer(&ifsta->timer, ieee80211_sta_timer,
+		    (unsigned long) sdata);
+	skb_queue_head_init(&ifsta->skb_queue);
+
+	ifsta->capab = WLAN_CAPABILITY_ESS;
+	ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN |
+		IEEE80211_AUTH_ALG_SHARED_KEY;
+	ifsta->flags |= IEEE80211_STA_CREATE_IBSS |
+		IEEE80211_STA_AUTO_BSSID_SEL |
+		IEEE80211_STA_AUTO_CHANNEL_SEL;
+	if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4)
+		ifsta->flags |= IEEE80211_STA_WMM_ENABLED;
+}
+
+/*
+ * Add a new IBSS station, will also be called by the RX code when,
+ * in IBSS mode, receiving a frame from a yet-unknown station, hence
+ * must be callable in atomic context.
+ */
+struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
+					struct sk_buff *skb, u8 *bssid,
+					u8 *addr, u64 supp_rates)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct sta_info *sta;
+	DECLARE_MAC_BUF(mac);
+	int band = local->hw.conf.channel->band;
+
+	/* TODO: Could consider removing the least recently used entry and
+	 * allow new one to be added. */
+	if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "%s: No room for a new IBSS STA "
+			       "entry %s\n", sdata->dev->name, print_mac(mac, addr));
+		}
+		return NULL;
+	}
+
+	if (compare_ether_addr(bssid, sdata->u.sta.bssid))
+		return NULL;
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+	printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n",
+	       wiphy_name(local->hw.wiphy), print_mac(mac, addr), sdata->dev->name);
 #endif
 
-	printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n",
-	       dev->name, print_mac(mac, bssid));
+	sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
+	if (!sta)
+		return NULL;
 
-	bss = ieee80211_rx_bss_add(dev, bssid,
-				   local->hw.conf.channel->center_freq,
-				   sdata->u.sta.ssid, sdata->u.sta.ssid_len);
-	if (!bss)
-		return -ENOMEM;
+	set_sta_flags(sta, WLAN_STA_AUTHORIZED);
 
-	bss->band = local->hw.conf.channel->band;
-	sband = local->hw.wiphy->bands[bss->band];
+	/* make sure mandatory rates are always added */
+	sta->sta.supp_rates[band] = supp_rates |
+			ieee80211_mandatory_rates(local, band);
 
-	if (local->hw.conf.beacon_int == 0)
-		local->hw.conf.beacon_int = 100;
-	bss->beacon_int = local->hw.conf.beacon_int;
-	bss->last_update = jiffies;
-	bss->capability = WLAN_CAPABILITY_IBSS;
+	rate_control_rate_init(sta);
 
-	if (sdata->default_key)
-		bss->capability |= WLAN_CAPABILITY_PRIVACY;
-	else
-		sdata->drop_unencrypted = 0;
+	if (sta_info_insert(sta))
+		return NULL;
 
-	bss->supp_rates_len = sband->n_bitrates;
-	pos = bss->supp_rates;
-	for (i = 0; i < sband->n_bitrates; i++) {
-		int rate = sband->bitrates[i].bitrate;
-		*pos++ = (u8) (rate / 5);
-	}
-
-	ret = ieee80211_sta_join_ibss(dev, ifsta, bss);
-	ieee80211_rx_bss_put(local, bss);
-	return ret;
+	return sta;
 }
 
-
-static int ieee80211_sta_find_ibss(struct net_device *dev,
-				   struct ieee80211_if_sta *ifsta)
+/* configuration hooks */
+void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata,
+			    struct ieee80211_if_sta *ifsta)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sta_bss *bss;
-	int found = 0;
-	u8 bssid[ETH_ALEN];
-	int active_ibss;
-	DECLARE_MAC_BUF(mac);
-	DECLARE_MAC_BUF(mac2);
+	struct ieee80211_local *local = sdata->local;
 
-	if (ifsta->ssid_len == 0)
-		return -EINVAL;
+	if (sdata->vif.type != NL80211_IFTYPE_STATION)
+		return;
 
-	active_ibss = ieee80211_sta_active_ibss(dev);
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
-	printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
-	       dev->name, active_ibss);
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
-	spin_lock_bh(&local->sta_bss_lock);
-	list_for_each_entry(bss, &local->sta_bss_list, list) {
-		if (ifsta->ssid_len != bss->ssid_len ||
-		    memcmp(ifsta->ssid, bss->ssid, bss->ssid_len) != 0
-		    || !(bss->capability & WLAN_CAPABILITY_IBSS))
-			continue;
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
-		printk(KERN_DEBUG "   bssid=%s found\n",
-		       print_mac(mac, bss->bssid));
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
-		memcpy(bssid, bss->bssid, ETH_ALEN);
-		found = 1;
-		if (active_ibss || memcmp(bssid, ifsta->bssid, ETH_ALEN) != 0)
-			break;
+	if ((ifsta->flags & (IEEE80211_STA_BSSID_SET |
+			     IEEE80211_STA_AUTO_BSSID_SEL)) &&
+	    (ifsta->flags & (IEEE80211_STA_SSID_SET |
+			     IEEE80211_STA_AUTO_SSID_SEL))) {
+
+		if (ifsta->state == IEEE80211_STA_MLME_ASSOCIATED)
+			ieee80211_set_disassoc(sdata, ifsta, true, true,
+					       WLAN_REASON_DEAUTH_LEAVING);
+
+		set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
+		queue_work(local->hw.workqueue, &ifsta->work);
 	}
-	spin_unlock_bh(&local->sta_bss_lock);
-
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
-	if (found)
-		printk(KERN_DEBUG "   sta_find_ibss: selected %s current "
-		       "%s\n", print_mac(mac, bssid),
-		       print_mac(mac2, ifsta->bssid));
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
-
-	if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) {
-		int ret;
-		int search_freq;
-
-		if (ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL)
-			search_freq = bss->freq;
-		else
-			search_freq = local->hw.conf.channel->center_freq;
-
-		bss = ieee80211_rx_bss_get(dev, bssid, search_freq,
-					   ifsta->ssid, ifsta->ssid_len);
-		if (!bss)
-			goto dont_join;
-
-		printk(KERN_DEBUG "%s: Selected IBSS BSSID %s"
-		       " based on configured SSID\n",
-		       dev->name, print_mac(mac, bssid));
-		ret = ieee80211_sta_join_ibss(dev, ifsta, bss);
-		ieee80211_rx_bss_put(local, bss);
-		return ret;
-	}
-
-dont_join:
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
-	printk(KERN_DEBUG "   did not try to join ibss\n");
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
-
-	/* Selected IBSS not found in current scan results - try to scan */
-	if (ifsta->state == IEEE80211_IBSS_JOINED &&
-	    !ieee80211_sta_active_ibss(dev)) {
-		mod_timer(&ifsta->timer, jiffies +
-				      IEEE80211_IBSS_MERGE_INTERVAL);
-	} else if (time_after(jiffies, local->last_scan_completed +
-			      IEEE80211_SCAN_INTERVAL)) {
-		printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
-		       "join\n", dev->name);
-		return ieee80211_sta_req_scan(dev, ifsta->ssid,
-					      ifsta->ssid_len);
-	} else if (ifsta->state != IEEE80211_IBSS_JOINED) {
-		int interval = IEEE80211_SCAN_INTERVAL;
-
-		if (time_after(jiffies, ifsta->ibss_join_req +
-			       IEEE80211_IBSS_JOIN_TIMEOUT)) {
-			if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) &&
-			    (!(local->oper_channel->flags &
-					IEEE80211_CHAN_NO_IBSS)))
-				return ieee80211_sta_create_ibss(dev, ifsta);
-			if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) {
-				printk(KERN_DEBUG "%s: IBSS not allowed on"
-				       " %d MHz\n", dev->name,
-				       local->hw.conf.channel->center_freq);
-			}
-
-			/* No IBSS found - decrease scan interval and continue
-			 * scanning. */
-			interval = IEEE80211_SCAN_INTERVAL_SLOW;
-		}
-
-		ifsta->state = IEEE80211_IBSS_SEARCH;
-		mod_timer(&ifsta->timer, jiffies + interval);
-		return 0;
-	}
-
-	return 0;
 }
 
-
-int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len)
+int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_if_sta *ifsta;
 	int res;
 
@@ -3759,7 +2400,7 @@
 			res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID);
 		if (res) {
 			printk(KERN_DEBUG "%s: Failed to config new SSID to "
-			       "the low-level driver\n", dev->name);
+			       "the low-level driver\n", sdata->dev->name);
 			return res;
 		}
 	}
@@ -3769,34 +2410,29 @@
 	else
 		ifsta->flags &= ~IEEE80211_STA_SSID_SET;
 
-	if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
+	if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
 	    !(ifsta->flags & IEEE80211_STA_BSSID_SET)) {
 		ifsta->ibss_join_req = jiffies;
-		ifsta->state = IEEE80211_IBSS_SEARCH;
-		return ieee80211_sta_find_ibss(dev, ifsta);
+		ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH;
+		return ieee80211_sta_find_ibss(sdata, ifsta);
 	}
 
 	return 0;
 }
 
-
-int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len)
+int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
 	memcpy(ssid, ifsta->ssid, ifsta->ssid_len);
 	*len = ifsta->ssid_len;
 	return 0;
 }
 
-
-int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid)
+int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
 {
-	struct ieee80211_sub_if_data *sdata;
 	struct ieee80211_if_sta *ifsta;
 	int res;
 
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	ifsta = &sdata->u.sta;
 
 	if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) {
@@ -3809,7 +2445,7 @@
 			res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID);
 		if (res) {
 			printk(KERN_DEBUG "%s: Failed to config new BSSID to "
-			       "the low-level driver\n", dev->name);
+			       "the low-level driver\n", sdata->dev->name);
 			return res;
 		}
 	}
@@ -3822,531 +2458,8 @@
 	return 0;
 }
 
-
-static void ieee80211_send_nullfunc(struct ieee80211_local *local,
-				    struct ieee80211_sub_if_data *sdata,
-				    int powersave)
+int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len)
 {
-	struct sk_buff *skb;
-	struct ieee80211_hdr *nullfunc;
-	__le16 fc;
-
-	skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24);
-	if (!skb) {
-		printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
-		       "frame\n", sdata->dev->name);
-		return;
-	}
-	skb_reserve(skb, local->hw.extra_tx_headroom);
-
-	nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24);
-	memset(nullfunc, 0, 24);
-	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
-			 IEEE80211_FCTL_TODS);
-	if (powersave)
-		fc |= cpu_to_le16(IEEE80211_FCTL_PM);
-	nullfunc->frame_control = fc;
-	memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN);
-	memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
-	memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN);
-
-	ieee80211_sta_tx(sdata->dev, skb, 0);
-}
-
-
-static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
-{
-	if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
-	    ieee80211_vif_is_mesh(&sdata->vif))
-		ieee80211_sta_timer((unsigned long)sdata);
-}
-
-void ieee80211_scan_completed(struct ieee80211_hw *hw)
-{
-	struct ieee80211_local *local = hw_to_local(hw);
-	struct net_device *dev = local->scan_dev;
-	struct ieee80211_sub_if_data *sdata;
-	union iwreq_data wrqu;
-
-	local->last_scan_completed = jiffies;
-	memset(&wrqu, 0, sizeof(wrqu));
-	wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
-
-	if (local->sta_hw_scanning) {
-		local->sta_hw_scanning = 0;
-		if (ieee80211_hw_config(local))
-			printk(KERN_DEBUG "%s: failed to restore operational "
-			       "channel after scan\n", dev->name);
-		/* Restart STA timer for HW scan case */
-		rcu_read_lock();
-		list_for_each_entry_rcu(sdata, &local->interfaces, list)
-			ieee80211_restart_sta_timer(sdata);
-		rcu_read_unlock();
-
-		goto done;
-	}
-
-	local->sta_sw_scanning = 0;
-	if (ieee80211_hw_config(local))
-		printk(KERN_DEBUG "%s: failed to restore operational "
-		       "channel after scan\n", dev->name);
-
-
-	netif_tx_lock_bh(local->mdev);
-	netif_addr_lock(local->mdev);
-	local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC;
-	local->ops->configure_filter(local_to_hw(local),
-				     FIF_BCN_PRBRESP_PROMISC,
-				     &local->filter_flags,
-				     local->mdev->mc_count,
-				     local->mdev->mc_list);
-
-	netif_addr_unlock(local->mdev);
-	netif_tx_unlock_bh(local->mdev);
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-		/* Tell AP we're back */
-		if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
-		    sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)
-			ieee80211_send_nullfunc(local, sdata, 0);
-
-		ieee80211_restart_sta_timer(sdata);
-
-		netif_wake_queue(sdata->dev);
-	}
-	rcu_read_unlock();
-
-done:
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
-		struct ieee80211_if_sta *ifsta = &sdata->u.sta;
-		if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) ||
-		    (!(ifsta->state == IEEE80211_IBSS_JOINED) &&
-		    !ieee80211_sta_active_ibss(dev)))
-			ieee80211_sta_find_ibss(dev, ifsta);
-	}
-}
-EXPORT_SYMBOL(ieee80211_scan_completed);
-
-void ieee80211_sta_scan_work(struct work_struct *work)
-{
-	struct ieee80211_local *local =
-		container_of(work, struct ieee80211_local, scan_work.work);
-	struct net_device *dev = local->scan_dev;
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_supported_band *sband;
-	struct ieee80211_channel *chan;
-	int skip;
-	unsigned long next_delay = 0;
-
-	if (!local->sta_sw_scanning)
-		return;
-
-	switch (local->scan_state) {
-	case SCAN_SET_CHANNEL:
-		/*
-		 * Get current scan band. scan_band may be IEEE80211_NUM_BANDS
-		 * after we successfully scanned the last channel of the last
-		 * band (and the last band is supported by the hw)
-		 */
-		if (local->scan_band < IEEE80211_NUM_BANDS)
-			sband = local->hw.wiphy->bands[local->scan_band];
-		else
-			sband = NULL;
-
-		/*
-		 * If we are at an unsupported band and have more bands
-		 * left to scan, advance to the next supported one.
-		 */
-		while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) {
-			local->scan_band++;
-			sband = local->hw.wiphy->bands[local->scan_band];
-			local->scan_channel_idx = 0;
-		}
-
-		/* if no more bands/channels left, complete scan */
-		if (!sband || local->scan_channel_idx >= sband->n_channels) {
-			ieee80211_scan_completed(local_to_hw(local));
-			return;
-		}
-		skip = 0;
-		chan = &sband->channels[local->scan_channel_idx];
-
-		if (chan->flags & IEEE80211_CHAN_DISABLED ||
-		    (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
-		     chan->flags & IEEE80211_CHAN_NO_IBSS))
-			skip = 1;
-
-		if (!skip) {
-			local->scan_channel = chan;
-			if (ieee80211_hw_config(local)) {
-				printk(KERN_DEBUG "%s: failed to set freq to "
-				       "%d MHz for scan\n", dev->name,
-				       chan->center_freq);
-				skip = 1;
-			}
-		}
-
-		/* advance state machine to next channel/band */
-		local->scan_channel_idx++;
-		if (local->scan_channel_idx >= sband->n_channels) {
-			/*
-			 * scan_band may end up == IEEE80211_NUM_BANDS, but
-			 * we'll catch that case above and complete the scan
-			 * if that is the case.
-			 */
-			local->scan_band++;
-			local->scan_channel_idx = 0;
-		}
-
-		if (skip)
-			break;
-
-		next_delay = IEEE80211_PROBE_DELAY +
-			     usecs_to_jiffies(local->hw.channel_change_time);
-		local->scan_state = SCAN_SEND_PROBE;
-		break;
-	case SCAN_SEND_PROBE:
-		next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
-		local->scan_state = SCAN_SET_CHANNEL;
-
-		if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-			break;
-		ieee80211_send_probe_req(dev, NULL, local->scan_ssid,
-					 local->scan_ssid_len);
-		next_delay = IEEE80211_CHANNEL_TIME;
-		break;
-	}
-
-	if (local->sta_sw_scanning)
-		queue_delayed_work(local->hw.workqueue, &local->scan_work,
-				   next_delay);
-}
-
-
-static int ieee80211_sta_start_scan(struct net_device *dev,
-				    u8 *ssid, size_t ssid_len)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sub_if_data *sdata;
-
-	if (ssid_len > IEEE80211_MAX_SSID_LEN)
-		return -EINVAL;
-
-	/* MLME-SCAN.request (page 118)  page 144 (11.1.3.1)
-	 * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS
-	 * BSSID: MACAddress
-	 * SSID
-	 * ScanType: ACTIVE, PASSIVE
-	 * ProbeDelay: delay (in microseconds) to be used prior to transmitting
-	 *    a Probe frame during active scanning
-	 * ChannelList
-	 * MinChannelTime (>= ProbeDelay), in TU
-	 * MaxChannelTime: (>= MinChannelTime), in TU
-	 */
-
-	 /* MLME-SCAN.confirm
-	  * BSSDescriptionSet
-	  * ResultCode: SUCCESS, INVALID_PARAMETERS
-	 */
-
-	if (local->sta_sw_scanning || local->sta_hw_scanning) {
-		if (local->scan_dev == dev)
-			return 0;
-		return -EBUSY;
-	}
-
-	if (local->ops->hw_scan) {
-		int rc = local->ops->hw_scan(local_to_hw(local),
-					     ssid, ssid_len);
-		if (!rc) {
-			local->sta_hw_scanning = 1;
-			local->scan_dev = dev;
-		}
-		return rc;
-	}
-
-	local->sta_sw_scanning = 1;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-		netif_stop_queue(sdata->dev);
-		if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
-		    (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED))
-			ieee80211_send_nullfunc(local, sdata, 1);
-	}
-	rcu_read_unlock();
-
-	if (ssid) {
-		local->scan_ssid_len = ssid_len;
-		memcpy(local->scan_ssid, ssid, ssid_len);
-	} else
-		local->scan_ssid_len = 0;
-	local->scan_state = SCAN_SET_CHANNEL;
-	local->scan_channel_idx = 0;
-	local->scan_band = IEEE80211_BAND_2GHZ;
-	local->scan_dev = dev;
-
-	netif_addr_lock_bh(local->mdev);
-	local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
-	local->ops->configure_filter(local_to_hw(local),
-				     FIF_BCN_PRBRESP_PROMISC,
-				     &local->filter_flags,
-				     local->mdev->mc_count,
-				     local->mdev->mc_list);
-	netif_addr_unlock_bh(local->mdev);
-
-	/* TODO: start scan as soon as all nullfunc frames are ACKed */
-	queue_delayed_work(local->hw.workqueue, &local->scan_work,
-			   IEEE80211_CHANNEL_TIME);
-
-	return 0;
-}
-
-
-int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len)
-{
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-
-	if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
-		return ieee80211_sta_start_scan(dev, ssid, ssid_len);
-
-	if (local->sta_sw_scanning || local->sta_hw_scanning) {
-		if (local->scan_dev == dev)
-			return 0;
-		return -EBUSY;
-	}
-
-	ifsta->scan_ssid_len = ssid_len;
-	if (ssid_len)
-		memcpy(ifsta->scan_ssid, ssid, ssid_len);
-	set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request);
-	queue_work(local->hw.workqueue, &ifsta->work);
-	return 0;
-}
-
-static char *
-ieee80211_sta_scan_result(struct net_device *dev,
-			  struct iw_request_info *info,
-			  struct ieee80211_sta_bss *bss,
-			  char *current_ev, char *end_buf)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct iw_event iwe;
-
-	if (time_after(jiffies,
-		       bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE))
-		return current_ev;
-
-	memset(&iwe, 0, sizeof(iwe));
-	iwe.cmd = SIOCGIWAP;
-	iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
-	memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
-	current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
-					  IW_EV_ADDR_LEN);
-
-	memset(&iwe, 0, sizeof(iwe));
-	iwe.cmd = SIOCGIWESSID;
-	if (bss_mesh_cfg(bss)) {
-		iwe.u.data.length = bss_mesh_id_len(bss);
-		iwe.u.data.flags = 1;
-		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
-						  &iwe, bss_mesh_id(bss));
-	} else {
-		iwe.u.data.length = bss->ssid_len;
-		iwe.u.data.flags = 1;
-		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
-						  &iwe, bss->ssid);
-	}
-
-	if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)
-	    || bss_mesh_cfg(bss)) {
-		memset(&iwe, 0, sizeof(iwe));
-		iwe.cmd = SIOCGIWMODE;
-		if (bss_mesh_cfg(bss))
-			iwe.u.mode = IW_MODE_MESH;
-		else if (bss->capability & WLAN_CAPABILITY_ESS)
-			iwe.u.mode = IW_MODE_MASTER;
-		else
-			iwe.u.mode = IW_MODE_ADHOC;
-		current_ev = iwe_stream_add_event(info, current_ev, end_buf,
-						  &iwe, IW_EV_UINT_LEN);
-	}
-
-	memset(&iwe, 0, sizeof(iwe));
-	iwe.cmd = SIOCGIWFREQ;
-	iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
-	iwe.u.freq.e = 0;
-	current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
-					  IW_EV_FREQ_LEN);
-
-	memset(&iwe, 0, sizeof(iwe));
-	iwe.cmd = SIOCGIWFREQ;
-	iwe.u.freq.m = bss->freq;
-	iwe.u.freq.e = 6;
-	current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
-					  IW_EV_FREQ_LEN);
-	memset(&iwe, 0, sizeof(iwe));
-	iwe.cmd = IWEVQUAL;
-	iwe.u.qual.qual = bss->qual;
-	iwe.u.qual.level = bss->signal;
-	iwe.u.qual.noise = bss->noise;
-	iwe.u.qual.updated = local->wstats_flags;
-	current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
-					  IW_EV_QUAL_LEN);
-
-	memset(&iwe, 0, sizeof(iwe));
-	iwe.cmd = SIOCGIWENCODE;
-	if (bss->capability & WLAN_CAPABILITY_PRIVACY)
-		iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
-	else
-		iwe.u.data.flags = IW_ENCODE_DISABLED;
-	iwe.u.data.length = 0;
-	current_ev = iwe_stream_add_point(info, current_ev, end_buf,
-					  &iwe, "");
-
-	if (bss && bss->wpa_ie) {
-		memset(&iwe, 0, sizeof(iwe));
-		iwe.cmd = IWEVGENIE;
-		iwe.u.data.length = bss->wpa_ie_len;
-		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
-						  &iwe, bss->wpa_ie);
-	}
-
-	if (bss && bss->rsn_ie) {
-		memset(&iwe, 0, sizeof(iwe));
-		iwe.cmd = IWEVGENIE;
-		iwe.u.data.length = bss->rsn_ie_len;
-		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
-						  &iwe, bss->rsn_ie);
-	}
-
-	if (bss && bss->ht_ie) {
-		memset(&iwe, 0, sizeof(iwe));
-		iwe.cmd = IWEVGENIE;
-		iwe.u.data.length = bss->ht_ie_len;
-		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
-						  &iwe, bss->ht_ie);
-	}
-
-	if (bss && bss->supp_rates_len > 0) {
-		/* display all supported rates in readable format */
-		char *p = current_ev + iwe_stream_lcp_len(info);
-		int i;
-
-		memset(&iwe, 0, sizeof(iwe));
-		iwe.cmd = SIOCGIWRATE;
-		/* Those two flags are ignored... */
-		iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
-
-		for (i = 0; i < bss->supp_rates_len; i++) {
-			iwe.u.bitrate.value = ((bss->supp_rates[i] &
-							0x7f) * 500000);
-			p = iwe_stream_add_value(info, current_ev, p,
-					end_buf, &iwe, IW_EV_PARAM_LEN);
-		}
-		current_ev = p;
-	}
-
-	if (bss) {
-		char *buf;
-		buf = kmalloc(30, GFP_ATOMIC);
-		if (buf) {
-			memset(&iwe, 0, sizeof(iwe));
-			iwe.cmd = IWEVCUSTOM;
-			sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp));
-			iwe.u.data.length = strlen(buf);
-			current_ev = iwe_stream_add_point(info, current_ev,
-							  end_buf,
-							  &iwe, buf);
-			memset(&iwe, 0, sizeof(iwe));
-			iwe.cmd = IWEVCUSTOM;
-			sprintf(buf, " Last beacon: %dms ago",
-				jiffies_to_msecs(jiffies - bss->last_update));
-			iwe.u.data.length = strlen(buf);
-			current_ev = iwe_stream_add_point(info, current_ev,
-							  end_buf, &iwe, buf);
-			kfree(buf);
-		}
-	}
-
-	if (bss_mesh_cfg(bss)) {
-		char *buf;
-		u8 *cfg = bss_mesh_cfg(bss);
-		buf = kmalloc(50, GFP_ATOMIC);
-		if (buf) {
-			memset(&iwe, 0, sizeof(iwe));
-			iwe.cmd = IWEVCUSTOM;
-			sprintf(buf, "Mesh network (version %d)", cfg[0]);
-			iwe.u.data.length = strlen(buf);
-			current_ev = iwe_stream_add_point(info, current_ev,
-							  end_buf,
-							  &iwe, buf);
-			sprintf(buf, "Path Selection Protocol ID: "
-				"0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
-							cfg[4]);
-			iwe.u.data.length = strlen(buf);
-			current_ev = iwe_stream_add_point(info, current_ev,
-							  end_buf,
-							  &iwe, buf);
-			sprintf(buf, "Path Selection Metric ID: "
-				"0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
-							cfg[8]);
-			iwe.u.data.length = strlen(buf);
-			current_ev = iwe_stream_add_point(info, current_ev,
-							  end_buf,
-							  &iwe, buf);
-			sprintf(buf, "Congestion Control Mode ID: "
-				"0x%02X%02X%02X%02X", cfg[9], cfg[10],
-							cfg[11], cfg[12]);
-			iwe.u.data.length = strlen(buf);
-			current_ev = iwe_stream_add_point(info, current_ev,
-							  end_buf,
-							  &iwe, buf);
-			sprintf(buf, "Channel Precedence: "
-				"0x%02X%02X%02X%02X", cfg[13], cfg[14],
-							cfg[15], cfg[16]);
-			iwe.u.data.length = strlen(buf);
-			current_ev = iwe_stream_add_point(info, current_ev,
-							  end_buf,
-							  &iwe, buf);
-			kfree(buf);
-		}
-	}
-
-	return current_ev;
-}
-
-
-int ieee80211_sta_scan_results(struct net_device *dev,
-			       struct iw_request_info *info,
-			       char *buf, size_t len)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	char *current_ev = buf;
-	char *end_buf = buf + len;
-	struct ieee80211_sta_bss *bss;
-
-	spin_lock_bh(&local->sta_bss_lock);
-	list_for_each_entry(bss, &local->sta_bss_list, list) {
-		if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
-			spin_unlock_bh(&local->sta_bss_lock);
-			return -E2BIG;
-		}
-		current_ev = ieee80211_sta_scan_result(dev, info, bss,
-						       current_ev, end_buf);
-	}
-	spin_unlock_bh(&local->sta_bss_lock);
-	return current_ev - buf;
-}
-
-
-int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len)
-{
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
 
 	kfree(ifsta->extra_ie);
@@ -4365,92 +2478,60 @@
 	return 0;
 }
 
-
-struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
-					struct sk_buff *skb, u8 *bssid,
-					u8 *addr, u64 supp_rates)
+int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct sta_info *sta;
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	DECLARE_MAC_BUF(mac);
-	int band = local->hw.conf.channel->band;
-
-	/* TODO: Could consider removing the least recently used entry and
-	 * allow new one to be added. */
-	if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "%s: No room for a new IBSS STA "
-			       "entry %s\n", dev->name, print_mac(mac, addr));
-		}
-		return NULL;
-	}
-
-	if (compare_ether_addr(bssid, sdata->u.sta.bssid))
-		return NULL;
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-	printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n",
-	       wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name);
-#endif
-
-	sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
-	if (!sta)
-		return NULL;
-
-	set_sta_flags(sta, WLAN_STA_AUTHORIZED);
-
-	if (supp_rates)
-		sta->supp_rates[band] = supp_rates;
-	else
-		sta->supp_rates[band] = sdata->u.sta.supp_rates_bits[band];
-
-	rate_control_rate_init(sta, local);
-
-	if (sta_info_insert(sta))
-		return NULL;
-
-	return sta;
-}
-
-
-int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason)
-{
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
 
 	printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n",
-	       dev->name, reason);
+	       sdata->dev->name, reason);
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_STA &&
-	    sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
+	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+	    sdata->vif.type != NL80211_IFTYPE_ADHOC)
 		return -EINVAL;
 
-	ieee80211_send_deauth(dev, ifsta, reason);
-	ieee80211_set_disassoc(dev, ifsta, 1);
+	ieee80211_set_disassoc(sdata, ifsta, true, true, reason);
 	return 0;
 }
 
-
-int ieee80211_sta_disassociate(struct net_device *dev, u16 reason)
+int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_if_sta *ifsta = &sdata->u.sta;
 
 	printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n",
-	       dev->name, reason);
+	       sdata->dev->name, reason);
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
+	if (sdata->vif.type != NL80211_IFTYPE_STATION)
 		return -EINVAL;
 
 	if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED))
 		return -1;
 
-	ieee80211_send_disassoc(dev, ifsta, reason);
-	ieee80211_set_disassoc(dev, ifsta, 0);
+	ieee80211_set_disassoc(sdata, ifsta, false, true, reason);
 	return 0;
 }
 
+/* scan finished notification */
+void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
+{
+	struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+	struct ieee80211_if_sta *ifsta;
+
+	if (sdata && sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+		ifsta = &sdata->u.sta;
+		if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) ||
+		    (!(ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED) &&
+		    !ieee80211_sta_active_ibss(sdata)))
+			ieee80211_sta_find_ibss(sdata, ifsta);
+	}
+
+	/* Restart STA timers */
+	rcu_read_lock();
+	list_for_each_entry_rcu(sdata, &local->interfaces, list)
+		ieee80211_restart_sta_timer(sdata);
+	rcu_read_unlock();
+}
+
+/* driver notification call */
 void ieee80211_notify_mac(struct ieee80211_hw *hw,
 			  enum ieee80211_notification_types  notif_type)
 {
@@ -4461,10 +2542,10 @@
 	case IEEE80211_NOTIFY_RE_ASSOC:
 		rcu_read_lock();
 		list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-			if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
+			if (sdata->vif.type != NL80211_IFTYPE_STATION)
 				continue;
 
-			ieee80211_sta_req_auth(sdata->dev, &sdata->u.sta);
+			ieee80211_sta_req_auth(sdata, &sdata->u.sta);
 		}
 		rcu_read_unlock();
 		break;
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 0388c09..5d78672 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -12,6 +12,7 @@
 #include <linux/rtnetlink.h>
 #include "rate.h"
 #include "ieee80211_i.h"
+#include "debugfs.h"
 
 struct rate_control_alg {
 	struct list_head list;
@@ -127,19 +128,46 @@
 	module_put(ops->module);
 }
 
+#ifdef CONFIG_MAC80211_DEBUGFS
+static ssize_t rcname_read(struct file *file, char __user *userbuf,
+			   size_t count, loff_t *ppos)
+{
+	struct rate_control_ref *ref = file->private_data;
+	int len = strlen(ref->ops->name);
+
+	return simple_read_from_buffer(userbuf, count, ppos,
+				       ref->ops->name, len);
+}
+
+static const struct file_operations rcname_ops = {
+	.read = rcname_read,
+	.open = mac80211_open_file_generic,
+};
+#endif
+
 struct rate_control_ref *rate_control_alloc(const char *name,
 					    struct ieee80211_local *local)
 {
+	struct dentry *debugfsdir = NULL;
 	struct rate_control_ref *ref;
 
 	ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL);
 	if (!ref)
 		goto fail_ref;
 	kref_init(&ref->kref);
+	ref->local = local;
 	ref->ops = ieee80211_rate_control_ops_get(name);
 	if (!ref->ops)
 		goto fail_ops;
-	ref->priv = ref->ops->alloc(local);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir);
+	local->debugfs.rcdir = debugfsdir;
+	local->debugfs.rcname = debugfs_create_file("name", 0400, debugfsdir,
+						    ref, &rcname_ops);
+#endif
+
+	ref->priv = ref->ops->alloc(&local->hw, debugfsdir);
 	if (!ref->priv)
 		goto fail_priv;
 	return ref;
@@ -158,29 +186,46 @@
 
 	ctrl_ref = container_of(kref, struct rate_control_ref, kref);
 	ctrl_ref->ops->free(ctrl_ref->priv);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	debugfs_remove(ctrl_ref->local->debugfs.rcname);
+	ctrl_ref->local->debugfs.rcname = NULL;
+	debugfs_remove(ctrl_ref->local->debugfs.rcdir);
+	ctrl_ref->local->debugfs.rcdir = NULL;
+#endif
+
 	ieee80211_rate_control_ops_put(ctrl_ref->ops);
 	kfree(ctrl_ref);
 }
 
-void rate_control_get_rate(struct net_device *dev,
+void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
 			   struct ieee80211_supported_band *sband,
-			   struct sk_buff *skb,
+			   struct sta_info *sta, struct sk_buff *skb,
 			   struct rate_selection *sel)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct rate_control_ref *ref = local->rate_ctrl;
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-	struct sta_info *sta;
+	struct rate_control_ref *ref = sdata->local->rate_ctrl;
+	void *priv_sta = NULL;
+	struct ieee80211_sta *ista = NULL;
 	int i;
 
-	rcu_read_lock();
-	sta = sta_info_get(local, hdr->addr1);
-
 	sel->rate_idx = -1;
 	sel->nonerp_idx = -1;
 	sel->probe_idx = -1;
+	sel->max_rate_idx = sdata->max_ratectrl_rateidx;
 
-	ref->ops->get_rate(ref->priv, dev, sband, skb, sel);
+	if (sta) {
+		ista = &sta->sta;
+		priv_sta = sta->rate_ctrl_priv;
+	}
+
+	if (sta && sdata->force_unicast_rateidx > -1)
+		sel->rate_idx = sdata->force_unicast_rateidx;
+	else
+		ref->ops->get_rate(ref->priv, sband, ista, priv_sta, skb, sel);
+
+	if (sdata->max_ratectrl_rateidx > -1 &&
+	    sel->rate_idx > sdata->max_ratectrl_rateidx)
+		sel->rate_idx = sdata->max_ratectrl_rateidx;
 
 	BUG_ON(sel->rate_idx < 0);
 
@@ -191,13 +236,11 @@
 			if (sband->bitrates[sel->rate_idx].bitrate < rate->bitrate)
 				break;
 
-			if (rate_supported(sta, sband->band, i) &&
+			if (rate_supported(ista, sband->band, i) &&
 			    !(rate->flags & IEEE80211_RATE_ERP_G))
 				sel->nonerp_idx = i;
 		}
 	}
-
-	rcu_read_unlock();
 }
 
 struct rate_control_ref *rate_control_get(struct rate_control_ref *ref)
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index ede7ab5..eb94e58 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -19,77 +19,48 @@
 #include "ieee80211_i.h"
 #include "sta_info.h"
 
-/**
- * struct rate_selection - rate selection for rate control algos
- * @rate: selected transmission rate index
- * @nonerp: Non-ERP rate to use instead if ERP cannot be used
- * @probe: rate for probing (or -1)
- *
- */
-struct rate_selection {
-	s8 rate_idx, nonerp_idx, probe_idx;
-};
-
-struct rate_control_ops {
-	struct module *module;
-	const char *name;
-	void (*tx_status)(void *priv, struct net_device *dev,
-			  struct sk_buff *skb);
-	void (*get_rate)(void *priv, struct net_device *dev,
-			 struct ieee80211_supported_band *band,
-			 struct sk_buff *skb,
-			 struct rate_selection *sel);
-	void (*rate_init)(void *priv, void *priv_sta,
-			  struct ieee80211_local *local, struct sta_info *sta);
-	void (*clear)(void *priv);
-
-	void *(*alloc)(struct ieee80211_local *local);
-	void (*free)(void *priv);
-	void *(*alloc_sta)(void *priv, gfp_t gfp);
-	void (*free_sta)(void *priv, void *priv_sta);
-
-	int (*add_attrs)(void *priv, struct kobject *kobj);
-	void (*remove_attrs)(void *priv, struct kobject *kobj);
-	void (*add_sta_debugfs)(void *priv, void *priv_sta,
-				struct dentry *dir);
-	void (*remove_sta_debugfs)(void *priv, void *priv_sta);
-};
-
 struct rate_control_ref {
+	struct ieee80211_local *local;
 	struct rate_control_ops *ops;
 	void *priv;
 	struct kref kref;
 };
 
-int ieee80211_rate_control_register(struct rate_control_ops *ops);
-void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
-
 /* Get a reference to the rate control algorithm. If `name' is NULL, get the
  * first available algorithm. */
 struct rate_control_ref *rate_control_alloc(const char *name,
 					    struct ieee80211_local *local);
-void rate_control_get_rate(struct net_device *dev,
+void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
 			   struct ieee80211_supported_band *sband,
-			   struct sk_buff *skb,
+			   struct sta_info *sta, struct sk_buff *skb,
 			   struct rate_selection *sel);
 struct rate_control_ref *rate_control_get(struct rate_control_ref *ref);
 void rate_control_put(struct rate_control_ref *ref);
 
-static inline void rate_control_tx_status(struct net_device *dev,
+static inline void rate_control_tx_status(struct ieee80211_local *local,
+					  struct ieee80211_supported_band *sband,
+					  struct sta_info *sta,
 					  struct sk_buff *skb)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
 	struct rate_control_ref *ref = local->rate_ctrl;
+	struct ieee80211_sta *ista = &sta->sta;
+	void *priv_sta = sta->rate_ctrl_priv;
 
-	ref->ops->tx_status(ref->priv, dev, skb);
+	ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
 }
 
 
-static inline void rate_control_rate_init(struct sta_info *sta,
-					  struct ieee80211_local *local)
+static inline void rate_control_rate_init(struct sta_info *sta)
 {
+	struct ieee80211_local *local = sta->sdata->local;
 	struct rate_control_ref *ref = sta->rate_ctrl;
-	ref->ops->rate_init(ref->priv, sta->rate_ctrl_priv, local, sta);
+	struct ieee80211_sta *ista = &sta->sta;
+	void *priv_sta = sta->rate_ctrl_priv;
+	struct ieee80211_supported_band *sband;
+
+	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+
+	ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
 }
 
 
@@ -100,15 +71,19 @@
 }
 
 static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
+					   struct ieee80211_sta *sta,
 					   gfp_t gfp)
 {
-	return ref->ops->alloc_sta(ref->priv, gfp);
+	return ref->ops->alloc_sta(ref->priv, sta, gfp);
 }
 
-static inline void rate_control_free_sta(struct rate_control_ref *ref,
-					 void *priv)
+static inline void rate_control_free_sta(struct sta_info *sta)
 {
-	ref->ops->free_sta(ref->priv, priv);
+	struct rate_control_ref *ref = sta->rate_ctrl;
+	struct ieee80211_sta *ista = &sta->sta;
+	void *priv_sta = sta->rate_ctrl_priv;
+
+	ref->ops->free_sta(ref->priv, ista, priv_sta);
 }
 
 static inline void rate_control_add_sta_debugfs(struct sta_info *sta)
@@ -130,31 +105,6 @@
 #endif
 }
 
-static inline int rate_supported(struct sta_info *sta,
-				 enum ieee80211_band band,
-				 int index)
-{
-	return (sta == NULL || sta->supp_rates[band] & BIT(index));
-}
-
-static inline s8
-rate_lowest_index(struct ieee80211_local *local,
-		  struct ieee80211_supported_band *sband,
-		  struct sta_info *sta)
-{
-	int i;
-
-	for (i = 0; i < sband->n_bitrates; i++)
-		if (rate_supported(sta, sband->band, i))
-			return i;
-
-	/* warn when we cannot find a rate. */
-	WARN_ON(1);
-
-	return 0;
-}
-
-
 /* functions for rate control related to a device */
 int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
 				 const char *name);
diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h
index 0a9135b..01d64d5 100644
--- a/net/mac80211/rc80211_pid.h
+++ b/net/mac80211/rc80211_pid.h
@@ -124,7 +124,6 @@
  * struct rc_pid_debugfs_entries - tunable parameters
  *
  * Algorithm parameters, tunable via debugfs.
- * @dir: the debugfs directory for a specific phy
  * @target: target percentage for failed frames
  * @sampling_period: error sampling interval in milliseconds
  * @coeff_p: absolute value of the proportional coefficient
@@ -143,7 +142,6 @@
  *	ordering of rates)
  */
 struct rc_pid_debugfs_entries {
-	struct dentry *dir;
 	struct dentry *target;
 	struct dentry *sampling_period;
 	struct dentry *coeff_p;
@@ -180,6 +178,8 @@
 	u32 tx_num_failed;
 	u32 tx_num_xmit;
 
+	int txrate_idx;
+
 	/* Average failed frames percentage error (i.e. actual vs. target
 	 * percentage), scaled by RC_PID_SMOOTHING. This value is computed
 	 * using using an exponential weighted average technique:
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index a914ba7..86eb374 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -68,17 +68,14 @@
  * exhibited a worse failed frames behaviour and we'll choose the highest rate
  * whose failed frames behaviour is not worse than the one of the original rate
  * target. While at it, check that the new rate is valid. */
-static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
-					 struct sta_info *sta, int adj,
+static void rate_control_pid_adjust_rate(struct ieee80211_supported_band *sband,
+					 struct ieee80211_sta *sta,
+					 struct rc_pid_sta_info *spinfo, int adj,
 					 struct rc_pid_rateinfo *rinfo)
 {
-	struct ieee80211_sub_if_data *sdata;
-	struct ieee80211_supported_band *sband;
 	int cur_sorted, new_sorted, probe, tmp, n_bitrates, band;
-	int cur = sta->txrate_idx;
+	int cur = spinfo->txrate_idx;
 
-	sdata = sta->sdata;
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
 	band = sband->band;
 	n_bitrates = sband->n_bitrates;
 
@@ -111,7 +108,7 @@
 	/* Fit the rate found to the nearest supported rate. */
 	do {
 		if (rate_supported(sta, band, rinfo[tmp].index)) {
-			sta->txrate_idx = rinfo[tmp].index;
+			spinfo->txrate_idx = rinfo[tmp].index;
 			break;
 		}
 		if (adj < 0)
@@ -121,9 +118,9 @@
 	} while (tmp < n_bitrates && tmp >= 0);
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-	rate_control_pid_event_rate_change(
-		&((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events,
-		sta->txrate_idx, sband->bitrates[sta->txrate_idx].bitrate);
+	rate_control_pid_event_rate_change(&spinfo->events,
+		spinfo->txrate_idx,
+		sband->bitrates[spinfo->txrate_idx].bitrate);
 #endif
 }
 
@@ -145,15 +142,11 @@
 }
 
 static void rate_control_pid_sample(struct rc_pid_info *pinfo,
-				    struct ieee80211_local *local,
-				    struct sta_info *sta)
+				    struct ieee80211_supported_band *sband,
+				    struct ieee80211_sta *sta,
+				    struct rc_pid_sta_info *spinfo)
 {
-#ifdef CONFIG_MAC80211_MESH
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
-#endif
-	struct rc_pid_sta_info *spinfo = sta->rate_ctrl_priv;
 	struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
-	struct ieee80211_supported_band *sband;
 	u32 pf;
 	s32 err_avg;
 	u32 err_prop;
@@ -162,9 +155,6 @@
 	int adj, i, j, tmp;
 	unsigned long period;
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-	spinfo = sta->rate_ctrl_priv;
-
 	/* In case nothing happened during the previous control interval, turn
 	 * the sharpening factor on. */
 	period = (HZ * pinfo->sampling_period + 500) / 1000;
@@ -180,14 +170,15 @@
 	if (unlikely(spinfo->tx_num_xmit == 0))
 		pf = spinfo->last_pf;
 	else {
+		/* XXX: BAD HACK!!! */
+		struct sta_info *si = container_of(sta, struct sta_info, sta);
+
 		pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit;
-#ifdef CONFIG_MAC80211_MESH
-		if (pf == 100 &&
-		    sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT)
-			mesh_plink_broken(sta);
-#endif
+
+		if (ieee80211_vif_is_mesh(&si->sdata->vif) && pf == 100)
+			mesh_plink_broken(si);
 		pf <<= RC_PID_ARITH_SHIFT;
-		sta->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9)
+		si->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9)
 					>> RC_PID_ARITH_SHIFT;
 	}
 
@@ -195,16 +186,16 @@
 	spinfo->tx_num_failed = 0;
 
 	/* If we just switched rate, update the rate behaviour info. */
-	if (pinfo->oldrate != sta->txrate_idx) {
+	if (pinfo->oldrate != spinfo->txrate_idx) {
 
 		i = rinfo[pinfo->oldrate].rev_index;
-		j = rinfo[sta->txrate_idx].rev_index;
+		j = rinfo[spinfo->txrate_idx].rev_index;
 
 		tmp = (pf - spinfo->last_pf);
 		tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT);
 
 		rinfo[j].diff = rinfo[i].diff + tmp;
-		pinfo->oldrate = sta->txrate_idx;
+		pinfo->oldrate = spinfo->txrate_idx;
 	}
 	rate_control_pid_normalize(pinfo, sband->n_bitrates);
 
@@ -233,43 +224,26 @@
 
 	/* Change rate. */
 	if (adj)
-		rate_control_pid_adjust_rate(local, sta, adj, rinfo);
+		rate_control_pid_adjust_rate(sband, sta, spinfo, adj, rinfo);
 }
 
-static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
+static void rate_control_pid_tx_status(void *priv, struct ieee80211_supported_band *sband,
+				       struct ieee80211_sta *sta, void *priv_sta,
 				       struct sk_buff *skb)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-	struct ieee80211_sub_if_data *sdata;
 	struct rc_pid_info *pinfo = priv;
-	struct sta_info *sta;
-	struct rc_pid_sta_info *spinfo;
+	struct rc_pid_sta_info *spinfo = priv_sta;
 	unsigned long period;
-	struct ieee80211_supported_band *sband;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
-	rcu_read_lock();
-
-	sta = sta_info_get(local, hdr->addr1);
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-
-	if (!sta)
-		goto unlock;
-
-	/* Don't update the state if we're not controlling the rate. */
-	sdata = sta->sdata;
-	if (sdata->force_unicast_rateidx > -1) {
-		sta->txrate_idx = sdata->max_ratectrl_rateidx;
-		goto unlock;
-	}
+	if (!spinfo)
+		return;
 
 	/* Ignore all frames that were sent with a different rate than the rate
 	 * we currently advise mac80211 to use. */
-	if (info->tx_rate_idx != sta->txrate_idx)
-		goto unlock;
+	if (info->tx_rate_idx != spinfo->txrate_idx)
+		return;
 
-	spinfo = sta->rate_ctrl_priv;
 	spinfo->tx_num_xmit++;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
@@ -287,93 +261,68 @@
 		spinfo->tx_num_xmit++;
 	}
 
-	if (info->status.excessive_retries) {
-		sta->tx_retry_failed++;
-		sta->tx_num_consecutive_failures++;
-		sta->tx_num_mpdu_fail++;
-	} else {
-		sta->tx_num_consecutive_failures = 0;
-		sta->tx_num_mpdu_ok++;
-	}
-	sta->tx_retry_count += info->status.retry_count;
-	sta->tx_num_mpdu_fail += info->status.retry_count;
-
 	/* Update PID controller state. */
 	period = (HZ * pinfo->sampling_period + 500) / 1000;
 	if (!period)
 		period = 1;
 	if (time_after(jiffies, spinfo->last_sample + period))
-		rate_control_pid_sample(pinfo, local, sta);
-
- unlock:
-	rcu_read_unlock();
+		rate_control_pid_sample(pinfo, sband, sta, spinfo);
 }
 
-static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
-				      struct ieee80211_supported_band *sband,
-				      struct sk_buff *skb,
-				      struct rate_selection *sel)
+static void
+rate_control_pid_get_rate(void *priv, struct ieee80211_supported_band *sband,
+			  struct ieee80211_sta *sta, void *priv_sta,
+			  struct sk_buff *skb,
+			  struct rate_selection *sel)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-	struct ieee80211_sub_if_data *sdata;
-	struct sta_info *sta;
+	struct rc_pid_sta_info *spinfo = priv_sta;
 	int rateidx;
 	u16 fc;
 
-	rcu_read_lock();
-
-	sta = sta_info_get(local, hdr->addr1);
-
 	/* Send management frames and broadcast/multicast data using lowest
 	 * rate. */
 	fc = le16_to_cpu(hdr->frame_control);
-	if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
-	    is_multicast_ether_addr(hdr->addr1) || !sta) {
-		sel->rate_idx = rate_lowest_index(local, sband, sta);
-		rcu_read_unlock();
+	if (!sta || !spinfo ||
+	    (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
+	    is_multicast_ether_addr(hdr->addr1)) {
+		sel->rate_idx = rate_lowest_index(sband, sta);
 		return;
 	}
 
-	/* If a forced rate is in effect, select it. */
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	if (sdata->force_unicast_rateidx > -1)
-		sta->txrate_idx = sdata->force_unicast_rateidx;
-
-	rateidx = sta->txrate_idx;
+	rateidx = spinfo->txrate_idx;
 
 	if (rateidx >= sband->n_bitrates)
 		rateidx = sband->n_bitrates - 1;
 
-	sta->last_txrate_idx = rateidx;
-
-	rcu_read_unlock();
-
 	sel->rate_idx = rateidx;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-	rate_control_pid_event_tx_rate(
-		&((struct rc_pid_sta_info *) sta->rate_ctrl_priv)->events,
+	rate_control_pid_event_tx_rate(&spinfo->events,
 		rateidx, sband->bitrates[rateidx].bitrate);
 #endif
 }
 
-static void rate_control_pid_rate_init(void *priv, void *priv_sta,
-					  struct ieee80211_local *local,
-					  struct sta_info *sta)
+static void
+rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
+			   struct ieee80211_sta *sta, void *priv_sta)
 {
+	struct rc_pid_sta_info *spinfo = priv_sta;
+	struct sta_info *si;
+
 	/* TODO: This routine should consider using RSSI from previous packets
 	 * as we need to have IEEE 802.1X auth succeed immediately after assoc..
 	 * Until that method is implemented, we will use the lowest supported
 	 * rate as a workaround. */
-	struct ieee80211_supported_band *sband;
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-	sta->txrate_idx = rate_lowest_index(local, sband, sta);
-	sta->fail_avg = 0;
+	spinfo->txrate_idx = rate_lowest_index(sband, sta);
+	/* HACK */
+	si = container_of(sta, struct sta_info, sta);
+	si->fail_avg = 0;
 }
 
-static void *rate_control_pid_alloc(struct ieee80211_local *local)
+static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
+				    struct dentry *debugfsdir)
 {
 	struct rc_pid_info *pinfo;
 	struct rc_pid_rateinfo *rinfo;
@@ -384,7 +333,7 @@
 	struct rc_pid_debugfs_entries *de;
 #endif
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+	sband = hw->wiphy->bands[hw->conf.channel->band];
 
 	pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
 	if (!pinfo)
@@ -439,30 +388,28 @@
 
 #ifdef CONFIG_MAC80211_DEBUGFS
 	de = &pinfo->dentries;
-	de->dir = debugfs_create_dir("rc80211_pid",
-				     local->hw.wiphy->debugfsdir);
 	de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
-					de->dir, &pinfo->target);
+					debugfsdir, &pinfo->target);
 	de->sampling_period = debugfs_create_u32("sampling_period",
-						 S_IRUSR | S_IWUSR, de->dir,
+						 S_IRUSR | S_IWUSR, debugfsdir,
 						 &pinfo->sampling_period);
 	de->coeff_p = debugfs_create_u32("coeff_p", S_IRUSR | S_IWUSR,
-					 de->dir, &pinfo->coeff_p);
+					 debugfsdir, &pinfo->coeff_p);
 	de->coeff_i = debugfs_create_u32("coeff_i", S_IRUSR | S_IWUSR,
-					 de->dir, &pinfo->coeff_i);
+					 debugfsdir, &pinfo->coeff_i);
 	de->coeff_d = debugfs_create_u32("coeff_d", S_IRUSR | S_IWUSR,
-					 de->dir, &pinfo->coeff_d);
+					 debugfsdir, &pinfo->coeff_d);
 	de->smoothing_shift = debugfs_create_u32("smoothing_shift",
-						 S_IRUSR | S_IWUSR, de->dir,
+						 S_IRUSR | S_IWUSR, debugfsdir,
 						 &pinfo->smoothing_shift);
 	de->sharpen_factor = debugfs_create_u32("sharpen_factor",
-					       S_IRUSR | S_IWUSR, de->dir,
+					       S_IRUSR | S_IWUSR, debugfsdir,
 					       &pinfo->sharpen_factor);
 	de->sharpen_duration = debugfs_create_u32("sharpen_duration",
-						  S_IRUSR | S_IWUSR, de->dir,
+						  S_IRUSR | S_IWUSR, debugfsdir,
 						  &pinfo->sharpen_duration);
 	de->norm_offset = debugfs_create_u32("norm_offset",
-					     S_IRUSR | S_IWUSR, de->dir,
+					     S_IRUSR | S_IWUSR, debugfsdir,
 					     &pinfo->norm_offset);
 #endif
 
@@ -484,7 +431,6 @@
 	debugfs_remove(de->coeff_p);
 	debugfs_remove(de->sampling_period);
 	debugfs_remove(de->target);
-	debugfs_remove(de->dir);
 #endif
 
 	kfree(pinfo->rinfo);
@@ -495,7 +441,8 @@
 {
 }
 
-static void *rate_control_pid_alloc_sta(void *priv, gfp_t gfp)
+static void *rate_control_pid_alloc_sta(void *priv, struct ieee80211_sta *sta,
+					gfp_t gfp)
 {
 	struct rc_pid_sta_info *spinfo;
 
@@ -513,10 +460,10 @@
 	return spinfo;
 }
 
-static void rate_control_pid_free_sta(void *priv, void *priv_sta)
+static void rate_control_pid_free_sta(void *priv, struct ieee80211_sta *sta,
+				      void *priv_sta)
 {
-	struct rc_pid_sta_info *spinfo = priv_sta;
-	kfree(spinfo);
+	kfree(priv_sta);
 }
 
 static struct rate_control_ops mac80211_rcpid = {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6db8545..77e7b01 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -143,6 +143,8 @@
 	/* IEEE80211_RADIOTAP_FLAGS */
 	if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
 		*pos |= IEEE80211_RADIOTAP_F_FCS;
+	if (status->flag & RX_FLAG_SHORTPRE)
+		*pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
 	pos++;
 
 	/* IEEE80211_RADIOTAP_RATE */
@@ -155,8 +157,11 @@
 	if (status->band == IEEE80211_BAND_5GHZ)
 		*(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
 					     IEEE80211_CHAN_5GHZ);
+	else if (rate->flags & IEEE80211_RATE_ERP_G)
+		*(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
+					     IEEE80211_CHAN_2GHZ);
 	else
-		*(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_DYN |
+		*(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
 					     IEEE80211_CHAN_2GHZ);
 	pos += 2;
 
@@ -290,7 +295,7 @@
 		if (!netif_running(sdata->dev))
 			continue;
 
-		if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR)
+		if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
 			continue;
 
 		if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
@@ -398,12 +403,12 @@
 	struct ieee80211_local *local = rx->local;
 	struct sk_buff *skb = rx->skb;
 
-	if (unlikely(local->sta_hw_scanning))
-		return ieee80211_sta_rx_scan(rx->dev, skb, rx->status);
+	if (unlikely(local->hw_scanning))
+		return ieee80211_scan_rx(rx->sdata, skb, rx->status);
 
-	if (unlikely(local->sta_sw_scanning)) {
+	if (unlikely(local->sw_scanning)) {
 		/* drop all the other packets during a software scan anyway */
-		if (ieee80211_sta_rx_scan(rx->dev, skb, rx->status)
+		if (ieee80211_scan_rx(rx->sdata, skb, rx->status)
 		    != RX_QUEUED)
 			dev_kfree_skb(skb);
 		return RX_QUEUED;
@@ -461,7 +466,7 @@
 
 	if (ieee80211_is_data(hdr->frame_control) &&
 	    is_multicast_ether_addr(hdr->addr1) &&
-	    mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->dev))
+	    mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
 		return RX_DROP_MONITOR;
 #undef msh_h_get
 
@@ -496,8 +501,8 @@
 	/* Drop disallowed frame classes based on STA auth/assoc state;
 	 * IEEE 802.11, Chap 5.5.
 	 *
-	 * 80211.o does filtering only based on association state, i.e., it
-	 * drops Class 3 frames from not associated stations. hostapd sends
+	 * mac80211 filters only based on association state, i.e. it drops
+	 * Class 3 frames from not associated stations. hostapd sends
 	 * deauth/disassoc frames when needed. In addition, hostapd is
 	 * responsible for filtering on both auth and assoc states.
 	 */
@@ -507,7 +512,7 @@
 
 	if (unlikely((ieee80211_is_data(hdr->frame_control) ||
 		      ieee80211_is_pspoll(hdr->frame_control)) &&
-		     rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
+		     rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
 		     (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
 		if ((!ieee80211_has_fromds(hdr->frame_control) &&
 		     !ieee80211_has_tods(hdr->frame_control) &&
@@ -645,32 +650,28 @@
 	return result;
 }
 
-static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta)
+static void ap_sta_ps_start(struct sta_info *sta)
 {
-	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	DECLARE_MAC_BUF(mac);
 
-	sdata = sta->sdata;
-
 	atomic_inc(&sdata->bss->num_sta_ps);
 	set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
 	printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n",
-	       dev->name, print_mac(mac, sta->addr), sta->aid);
+	       sdata->dev->name, print_mac(mac, sta->sta.addr), sta->sta.aid);
 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
 }
 
-static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
+static int ap_sta_ps_end(struct sta_info *sta)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *skb;
 	int sent = 0;
-	struct ieee80211_sub_if_data *sdata;
 	struct ieee80211_tx_info *info;
 	DECLARE_MAC_BUF(mac);
 
-	sdata = sta->sdata;
-
 	atomic_dec(&sdata->bss->num_sta_ps);
 
 	clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
@@ -680,7 +681,7 @@
 
 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
 	printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n",
-	       dev->name, print_mac(mac, sta->addr), sta->aid);
+	       sdata->dev->name, print_mac(mac, sta->sta.addr), sta->sta.aid);
 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
 
 	/* Send all buffered frames to the station */
@@ -696,8 +697,8 @@
 		sent++;
 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
 		printk(KERN_DEBUG "%s: STA %s aid %d send PS frame "
-		       "since STA not sleeping anymore\n", dev->name,
-		       print_mac(mac, sta->addr), sta->aid);
+		       "since STA not sleeping anymore\n", sdata->dev->name,
+		       print_mac(mac, sta->sta.addr), sta->sta.aid);
 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
 		info->flags |= IEEE80211_TX_CTL_REQUEUE;
 		dev_queue_xmit(skb);
@@ -710,7 +711,6 @@
 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
 {
 	struct sta_info *sta = rx->sta;
-	struct net_device *dev = rx->dev;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 
 	if (!sta)
@@ -719,14 +719,14 @@
 	/* Update last_rx only for IBSS packets which are for the current
 	 * BSSID to avoid keeping the current IBSS network alive in cases where
 	 * other STAs are using different BSSID. */
-	if (rx->sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
+	if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
 		u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
-						IEEE80211_IF_TYPE_IBSS);
+						NL80211_IFTYPE_ADHOC);
 		if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0)
 			sta->last_rx = jiffies;
 	} else
 	if (!is_multicast_ether_addr(hdr->addr1) ||
-	    rx->sdata->vif.type == IEEE80211_IF_TYPE_STA) {
+	    rx->sdata->vif.type == NL80211_IFTYPE_STATION) {
 		/* Update last_rx only for unicast frames in order to prevent
 		 * the Probe Request frames (the only broadcast frames from a
 		 * STA in infrastructure mode) from keeping a connection alive.
@@ -746,16 +746,16 @@
 	sta->last_noise = rx->status->noise;
 
 	if (!ieee80211_has_morefrags(hdr->frame_control) &&
-	    (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP ||
-	     rx->sdata->vif.type == IEEE80211_IF_TYPE_VLAN)) {
+	    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
+	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
 		/* Change STA power saving mode only in the end of a frame
 		 * exchange sequence */
 		if (test_sta_flags(sta, WLAN_STA_PS) &&
 		    !ieee80211_has_pm(hdr->frame_control))
-			rx->sent_ps_buffered += ap_sta_ps_end(dev, sta);
+			rx->sent_ps_buffered += ap_sta_ps_end(sta);
 		else if (!test_sta_flags(sta, WLAN_STA_PS) &&
 			 ieee80211_has_pm(hdr->frame_control))
-			ap_sta_ps_start(dev, sta);
+			ap_sta_ps_start(sta);
 	}
 
 	/* Drop data::nullfunc frames silently, since they are used only to
@@ -816,7 +816,7 @@
 
 static inline struct ieee80211_fragment_entry *
 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
-			  u16 fc, unsigned int frag, unsigned int seq,
+			  unsigned int frag, unsigned int seq,
 			  int rx_queue, struct ieee80211_hdr *hdr)
 {
 	struct ieee80211_fragment_entry *entry;
@@ -825,7 +825,6 @@
 	idx = sdata->fragment_next;
 	for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
 		struct ieee80211_hdr *f_hdr;
-		u16 f_fc;
 
 		idx--;
 		if (idx < 0)
@@ -837,10 +836,13 @@
 		    entry->last_frag + 1 != frag)
 			continue;
 
-		f_hdr = (struct ieee80211_hdr *) entry->skb_list.next->data;
-		f_fc = le16_to_cpu(f_hdr->frame_control);
+		f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
 
-		if ((fc & IEEE80211_FCTL_FTYPE) != (f_fc & IEEE80211_FCTL_FTYPE) ||
+		/*
+		 * Check ftype and addresses are equal, else check next fragment
+		 */
+		if (((hdr->frame_control ^ f_hdr->frame_control) &
+		     cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
 		    compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
 		    compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
 			continue;
@@ -860,16 +862,18 @@
 {
 	struct ieee80211_hdr *hdr;
 	u16 sc;
+	__le16 fc;
 	unsigned int frag, seq;
 	struct ieee80211_fragment_entry *entry;
 	struct sk_buff *skb;
 	DECLARE_MAC_BUF(mac);
 
-	hdr = (struct ieee80211_hdr *) rx->skb->data;
+	hdr = (struct ieee80211_hdr *)rx->skb->data;
+	fc = hdr->frame_control;
 	sc = le16_to_cpu(hdr->seq_ctrl);
 	frag = sc & IEEE80211_SCTL_FRAG;
 
-	if (likely((!(rx->fc & IEEE80211_FCTL_MOREFRAGS) && frag == 0) ||
+	if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
 		   (rx->skb)->len < 24 ||
 		   is_multicast_ether_addr(hdr->addr1))) {
 		/* not fragmented */
@@ -884,7 +888,7 @@
 		entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
 						 rx->queue, &(rx->skb));
 		if (rx->key && rx->key->conf.alg == ALG_CCMP &&
-		    (rx->fc & IEEE80211_FCTL_PROTECTED)) {
+		    ieee80211_has_protected(fc)) {
 			/* Store CCMP PN so that we can verify that the next
 			 * fragment has a sequential PN value. */
 			entry->ccmp = 1;
@@ -898,8 +902,7 @@
 	/* This is a fragment for a frame that should already be pending in
 	 * fragment cache. Add this fragment to the end of the pending entry.
 	 */
-	entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq,
-					  rx->queue, hdr);
+	entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
 	if (!entry) {
 		I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
 		return RX_DROP_MONITOR;
@@ -924,11 +927,11 @@
 		memcpy(entry->last_pn, pn, CCMP_PN_LEN);
 	}
 
-	skb_pull(rx->skb, ieee80211_get_hdrlen(rx->fc));
+	skb_pull(rx->skb, ieee80211_hdrlen(fc));
 	__skb_queue_tail(&entry->skb_list, rx->skb);
 	entry->last_frag = frag;
 	entry->extra_len += rx->skb->len;
-	if (rx->fc & IEEE80211_FCTL_MOREFRAGS) {
+	if (ieee80211_has_morefrags(fc)) {
 		rx->skb = NULL;
 		return RX_QUEUED;
 	}
@@ -968,15 +971,14 @@
 	struct sk_buff *skb;
 	int no_pending_pkts;
 	DECLARE_MAC_BUF(mac);
+	__le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
 
-	if (likely(!rx->sta ||
-		   (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL ||
-		   (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL ||
+	if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
 		   !(rx->flags & IEEE80211_RX_RA_MATCH)))
 		return RX_CONTINUE;
 
-	if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) &&
-	    (sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
+	if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
+	    (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
 		return RX_DROP_UNUSABLE;
 
 	skb = skb_dequeue(&rx->sta->tx_filtered);
@@ -1000,7 +1002,7 @@
 
 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
 		printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n",
-		       print_mac(mac, rx->sta->addr), rx->sta->aid,
+		       print_mac(mac, rx->sta->sta.addr), rx->sta->sta.aid,
 		       skb_queue_len(&rx->sta->ps_tx_buf));
 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
 
@@ -1025,7 +1027,7 @@
 		 */
 		printk(KERN_DEBUG "%s: STA %s sent PS Poll even "
 		       "though there are no buffered frames for it\n",
-		       rx->dev->name, print_mac(mac, rx->sta->addr));
+		       rx->dev->name, print_mac(mac, rx->sta->sta.addr));
 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
 	}
 
@@ -1050,7 +1052,6 @@
 		ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
 	hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
 	/* change frame type to non QOS */
-	rx->fc &= ~IEEE80211_STYPE_QOS_DATA;
 	hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
 
 	return RX_CONTINUE;
@@ -1067,7 +1068,7 @@
 }
 
 static int
-ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx)
+ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
 {
 	/*
 	 * Pass through unencrypted frames if the hardware has
@@ -1077,9 +1078,8 @@
 		return 0;
 
 	/* Drop unencrypted frames if key is set. */
-	if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) &&
-		     (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA &&
-		     (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC &&
+	if (unlikely(!ieee80211_has_protected(fc) &&
+		     !ieee80211_is_nullfunc(fc) &&
 		     (rx->key || rx->sdata->drop_unencrypted)))
 		return -EACCES;
 
@@ -1091,7 +1091,7 @@
 {
 	struct net_device *dev = rx->dev;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
-	u16 fc, hdrlen, ethertype;
+	u16 hdrlen, ethertype;
 	u8 *payload;
 	u8 dst[ETH_ALEN];
 	u8 src[ETH_ALEN] __aligned(2);
@@ -1102,16 +1102,10 @@
 	DECLARE_MAC_BUF(mac3);
 	DECLARE_MAC_BUF(mac4);
 
-	fc = rx->fc;
-
-	if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
+	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
 		return -1;
 
-	hdrlen = ieee80211_get_hdrlen(fc);
-
-	if (ieee80211_vif_is_mesh(&sdata->vif))
-		hdrlen += ieee80211_get_mesh_hdrlen(
-				(struct ieee80211s_hdr *) (skb->data + hdrlen));
+	hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
 	/* convert IEEE 802.11 header + possible LLC headers into Ethernet
 	 * header
@@ -1122,42 +1116,38 @@
 	 *   1     0   BSSID SA    DA    n/a
 	 *   1     1   RA    TA    DA    SA
 	 */
+	memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
+	memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
 
-	switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
-	case IEEE80211_FCTL_TODS:
-		/* BSSID SA DA */
-		memcpy(dst, hdr->addr3, ETH_ALEN);
-		memcpy(src, hdr->addr2, ETH_ALEN);
-
-		if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP &&
-			     sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
+	switch (hdr->frame_control &
+		cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+	case __constant_cpu_to_le16(IEEE80211_FCTL_TODS):
+		if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP &&
+			     sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
 			return -1;
 		break;
-	case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
-		/* RA TA DA SA */
-		memcpy(dst, hdr->addr3, ETH_ALEN);
-		memcpy(src, hdr->addr4, ETH_ALEN);
-
-		 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS &&
-			     sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
+	case __constant_cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
+		if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS &&
+			     sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
 			return -1;
+		if (ieee80211_vif_is_mesh(&sdata->vif)) {
+			struct ieee80211s_hdr *meshdr = (struct ieee80211s_hdr *)
+				(skb->data + hdrlen);
+			hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
+			if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
+				memcpy(dst, meshdr->eaddr1, ETH_ALEN);
+				memcpy(src, meshdr->eaddr2, ETH_ALEN);
+			}
+		}
 		break;
-	case IEEE80211_FCTL_FROMDS:
-		/* DA BSSID SA */
-		memcpy(dst, hdr->addr1, ETH_ALEN);
-		memcpy(src, hdr->addr3, ETH_ALEN);
-
-		if (sdata->vif.type != IEEE80211_IF_TYPE_STA ||
+	case __constant_cpu_to_le16(IEEE80211_FCTL_FROMDS):
+		if (sdata->vif.type != NL80211_IFTYPE_STATION ||
 		    (is_multicast_ether_addr(dst) &&
 		     !compare_ether_addr(src, dev->dev_addr)))
 			return -1;
 		break;
-	case 0:
-		/* DA SA BSSID */
-		memcpy(dst, hdr->addr1, ETH_ALEN);
-		memcpy(src, hdr->addr2, ETH_ALEN);
-
-		if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
+	case __constant_cpu_to_le16(0):
+		if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
 			return -1;
 		break;
 	}
@@ -1193,7 +1183,7 @@
 /*
  * requires that rx->skb is a frame with ethernet header
  */
-static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx)
+static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
 {
 	static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
 		= { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
@@ -1209,7 +1199,7 @@
 		return true;
 
 	if (ieee80211_802_1x_port_control(rx) ||
-	    ieee80211_drop_unencrypted(rx))
+	    ieee80211_drop_unencrypted(rx, fc))
 		return false;
 
 	return true;
@@ -1231,8 +1221,9 @@
 	skb = rx->skb;
 	xmit_skb = NULL;
 
-	if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP ||
-				      sdata->vif.type == IEEE80211_IF_TYPE_VLAN) &&
+	if ((sdata->vif.type == NL80211_IFTYPE_AP ||
+	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
+	    !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
 	    (rx->flags & IEEE80211_RX_RA_MATCH)) {
 		if (is_multicast_ether_addr(ehdr->h_dest)) {
 			/*
@@ -1279,20 +1270,21 @@
 {
 	struct net_device *dev = rx->dev;
 	struct ieee80211_local *local = rx->local;
-	u16 fc, ethertype;
+	u16 ethertype;
 	u8 *payload;
 	struct sk_buff *skb = rx->skb, *frame = NULL;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	__le16 fc = hdr->frame_control;
 	const struct ethhdr *eth;
 	int remaining, err;
 	u8 dst[ETH_ALEN];
 	u8 src[ETH_ALEN];
 	DECLARE_MAC_BUF(mac);
 
-	fc = rx->fc;
-	if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
+	if (unlikely(!ieee80211_is_data(fc)))
 		return RX_CONTINUE;
 
-	if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
+	if (unlikely(!ieee80211_is_data_present(fc)))
 		return RX_DROP_MONITOR;
 
 	if (!(rx->flags & IEEE80211_RX_AMSDU))
@@ -1374,7 +1366,7 @@
 			memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
 		}
 
-		if (!ieee80211_frame_allowed(rx)) {
+		if (!ieee80211_frame_allowed(rx, fc)) {
 			if (skb == frame) /* last frame */
 				return RX_DROP_UNUSABLE;
 			dev_kfree_skb(frame);
@@ -1387,7 +1379,7 @@
 	return RX_QUEUED;
 }
 
-static ieee80211_rx_result debug_noinline
+static ieee80211_rx_result
 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
 {
 	struct ieee80211_hdr *hdr;
@@ -1406,6 +1398,25 @@
 		/* illegal frame */
 		return RX_DROP_MONITOR;
 
+	if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){
+		struct ieee80211_sub_if_data *sdata;
+		struct mesh_path *mppath;
+
+		sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
+		rcu_read_lock();
+		mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata);
+		if (!mppath) {
+			mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata);
+		} else {
+			spin_lock_bh(&mppath->state_lock);
+			mppath->exp_time = jiffies;
+			if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0)
+				memcpy(mppath->mpp, hdr->addr4, ETH_ALEN);
+			spin_unlock_bh(&mppath->state_lock);
+		}
+		rcu_read_unlock();
+	}
+
 	if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
 		return RX_CONTINUE;
 
@@ -1413,7 +1424,7 @@
 
 	if (rx->flags & IEEE80211_RX_RA_MATCH) {
 		if (!mesh_hdr->ttl)
-			IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.sta,
+			IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
 						     dropped_frames_ttl);
 		else {
 			struct ieee80211_hdr *fwd_hdr;
@@ -1448,21 +1459,21 @@
 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
 {
 	struct net_device *dev = rx->dev;
-	u16 fc;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+	__le16 fc = hdr->frame_control;
 	int err;
 
-	fc = rx->fc;
-	if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
+	if (unlikely(!ieee80211_is_data(hdr->frame_control)))
 		return RX_CONTINUE;
 
-	if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
+	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
 		return RX_DROP_MONITOR;
 
 	err = ieee80211_data_to_8023(rx);
 	if (unlikely(err))
 		return RX_DROP_UNUSABLE;
 
-	if (!ieee80211_frame_allowed(rx))
+	if (!ieee80211_frame_allowed(rx, fc))
 		return RX_DROP_MONITOR;
 
 	rx->skb->dev = dev;
@@ -1520,22 +1531,97 @@
 }
 
 static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
+ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
 {
-	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_local *local = rx->local;
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
+	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
+	int len = rx->skb->len;
+
+	if (!ieee80211_is_action(mgmt->frame_control))
+		return RX_CONTINUE;
+
+	if (!rx->sta)
+		return RX_DROP_MONITOR;
 
 	if (!(rx->flags & IEEE80211_RX_RA_MATCH))
 		return RX_DROP_MONITOR;
 
-	sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
-	if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
-	     sdata->vif.type == IEEE80211_IF_TYPE_IBSS ||
-	     sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) &&
-	    !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
-		ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->status);
-	else
+	/* all categories we currently handle have action_code */
+	if (len < IEEE80211_MIN_ACTION_SIZE + 1)
 		return RX_DROP_MONITOR;
 
+	/*
+	 * FIXME: revisit this, I'm sure we should handle most
+	 *	  of these frames in other modes as well!
+	 */
+	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+	    sdata->vif.type != NL80211_IFTYPE_ADHOC)
+		return RX_CONTINUE;
+
+	switch (mgmt->u.action.category) {
+	case WLAN_CATEGORY_BACK:
+		switch (mgmt->u.action.u.addba_req.action_code) {
+		case WLAN_ACTION_ADDBA_REQ:
+			if (len < (IEEE80211_MIN_ACTION_SIZE +
+				   sizeof(mgmt->u.action.u.addba_req)))
+				return RX_DROP_MONITOR;
+			ieee80211_process_addba_request(local, rx->sta, mgmt, len);
+			break;
+		case WLAN_ACTION_ADDBA_RESP:
+			if (len < (IEEE80211_MIN_ACTION_SIZE +
+				   sizeof(mgmt->u.action.u.addba_resp)))
+				return RX_DROP_MONITOR;
+			ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
+			break;
+		case WLAN_ACTION_DELBA:
+			if (len < (IEEE80211_MIN_ACTION_SIZE +
+				   sizeof(mgmt->u.action.u.delba)))
+				return RX_DROP_MONITOR;
+			ieee80211_process_delba(sdata, rx->sta, mgmt, len);
+			break;
+		}
+		break;
+	case WLAN_CATEGORY_SPECTRUM_MGMT:
+		if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
+			return RX_DROP_MONITOR;
+		switch (mgmt->u.action.u.measurement.action_code) {
+		case WLAN_ACTION_SPCT_MSR_REQ:
+			if (len < (IEEE80211_MIN_ACTION_SIZE +
+				   sizeof(mgmt->u.action.u.measurement)))
+				return RX_DROP_MONITOR;
+			ieee80211_process_measurement_req(sdata, mgmt, len);
+			break;
+		}
+		break;
+	default:
+		return RX_CONTINUE;
+	}
+
+	rx->sta->rx_packets++;
+	dev_kfree_skb(rx->skb);
+	return RX_QUEUED;
+}
+
+static ieee80211_rx_result debug_noinline
+ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
+
+	if (!(rx->flags & IEEE80211_RX_RA_MATCH))
+		return RX_DROP_MONITOR;
+
+	if (ieee80211_vif_is_mesh(&sdata->vif))
+		return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
+
+	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+	    sdata->vif.type != NL80211_IFTYPE_ADHOC)
+		return RX_DROP_MONITOR;
+
+	if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
+		return RX_DROP_MONITOR;
+
+	ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
 	return RX_QUEUED;
 }
 
@@ -1565,7 +1651,7 @@
 	if (!ieee80211_has_protected(hdr->frame_control))
 		goto ignore;
 
-	if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) {
+	if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
 		/*
 		 * APs with pairwise keys should never receive Michael MIC
 		 * errors for non-zero keyidx because these are reserved for
@@ -1579,7 +1665,7 @@
 	    !ieee80211_is_auth(hdr->frame_control))
 		goto ignore;
 
-	mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr);
+	mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr);
  ignore:
 	dev_kfree_skb(rx->skb);
 	rx->skb = NULL;
@@ -1635,7 +1721,7 @@
 		if (!netif_running(sdata->dev))
 			continue;
 
-		if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR ||
+		if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
 		    !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
 			continue;
 
@@ -1698,6 +1784,7 @@
 		CALL_RXH(ieee80211_rx_h_mesh_fwding);
 	CALL_RXH(ieee80211_rx_h_data)
 	CALL_RXH(ieee80211_rx_h_ctrl)
+	CALL_RXH(ieee80211_rx_h_action)
 	CALL_RXH(ieee80211_rx_h_mgmt)
 
 #undef CALL_RXH
@@ -1733,7 +1820,7 @@
 	int multicast = is_multicast_ether_addr(hdr->addr1);
 
 	switch (sdata->vif.type) {
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		if (!bssid)
 			return 0;
 		if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
@@ -1748,14 +1835,10 @@
 			rx->flags &= ~IEEE80211_RX_RA_MATCH;
 		}
 		break;
-	case IEEE80211_IF_TYPE_IBSS:
+	case NL80211_IFTYPE_ADHOC:
 		if (!bssid)
 			return 0;
 		if (ieee80211_is_beacon(hdr->frame_control)) {
-			if (!rx->sta)
-				rx->sta = ieee80211_ibss_add_sta(sdata->dev,
-						rx->skb, bssid, hdr->addr2,
-						BIT(rx->status->rate_idx));
 			return 1;
 		}
 		else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
@@ -1769,11 +1852,11 @@
 				return 0;
 			rx->flags &= ~IEEE80211_RX_RA_MATCH;
 		} else if (!rx->sta)
-			rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb,
+			rx->sta = ieee80211_ibss_add_sta(sdata, rx->skb,
 						bssid, hdr->addr2,
 						BIT(rx->status->rate_idx));
 		break;
-	case IEEE80211_IF_TYPE_MESH_POINT:
+	case NL80211_IFTYPE_MESH_POINT:
 		if (!multicast &&
 		    compare_ether_addr(sdata->dev->dev_addr,
 				       hdr->addr1) != 0) {
@@ -1783,8 +1866,8 @@
 			rx->flags &= ~IEEE80211_RX_RA_MATCH;
 		}
 		break;
-	case IEEE80211_IF_TYPE_VLAN:
-	case IEEE80211_IF_TYPE_AP:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_AP:
 		if (!bssid) {
 			if (compare_ether_addr(sdata->dev->dev_addr,
 					       hdr->addr1))
@@ -1796,16 +1879,17 @@
 			rx->flags &= ~IEEE80211_RX_RA_MATCH;
 		}
 		break;
-	case IEEE80211_IF_TYPE_WDS:
+	case NL80211_IFTYPE_WDS:
 		if (bssid || !ieee80211_is_data(hdr->frame_control))
 			return 0;
 		if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
 			return 0;
 		break;
-	case IEEE80211_IF_TYPE_MNTR:
+	case NL80211_IFTYPE_MONITOR:
 		/* take everything */
 		break;
-	case IEEE80211_IF_TYPE_INVALID:
+	case NL80211_IFTYPE_UNSPECIFIED:
+	case __NL80211_IFTYPE_AFTER_LAST:
 		/* should never get here */
 		WARN_ON(1);
 		break;
@@ -1827,23 +1911,20 @@
 	struct ieee80211_sub_if_data *sdata;
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_rx_data rx;
-	u16 type;
 	int prepares;
 	struct ieee80211_sub_if_data *prev = NULL;
 	struct sk_buff *skb_new;
 	u8 *bssid;
 
-	hdr = (struct ieee80211_hdr *) skb->data;
+	hdr = (struct ieee80211_hdr *)skb->data;
 	memset(&rx, 0, sizeof(rx));
 	rx.skb = skb;
 	rx.local = local;
 
 	rx.status = status;
 	rx.rate = rate;
-	rx.fc = le16_to_cpu(hdr->frame_control);
-	type = rx.fc & IEEE80211_FCTL_FTYPE;
 
-	if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT)
+	if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
 		local->dot11ReceivedFragmentCount++;
 
 	rx.sta = sta_info_get(local, hdr->addr2);
@@ -1857,7 +1938,7 @@
 		return;
 	}
 
-	if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning))
+	if (unlikely(local->sw_scanning || local->hw_scanning))
 		rx.flags |= IEEE80211_RX_IN_SCAN;
 
 	ieee80211_parse_qos(&rx);
@@ -1869,7 +1950,7 @@
 		if (!netif_running(sdata->dev))
 			continue;
 
-		if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR)
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
 			continue;
 
 		bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
@@ -1904,14 +1985,12 @@
 				       prev->dev->name);
 			continue;
 		}
-		rx.fc = le16_to_cpu(hdr->frame_control);
 		ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
 		prev = sdata;
 	}
-	if (prev) {
-		rx.fc = le16_to_cpu(hdr->frame_control);
+	if (prev)
 		ieee80211_invoke_rx_handlers(prev, &rx, skb);
-	} else
+	else
 		dev_kfree_skb(skb);
 }
 
@@ -2080,7 +2159,7 @@
 	/* if this mpdu is fragmented - terminate rx aggregation session */
 	sc = le16_to_cpu(hdr->seq_ctrl);
 	if (sc & IEEE80211_SCTL_FRAG) {
-		ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr,
+		ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
 			tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
 		ret = 1;
 		goto end_reorder;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
new file mode 100644
index 0000000..8e6685e
--- /dev/null
+++ b/net/mac80211/scan.c
@@ -0,0 +1,937 @@
+/*
+ * Scanning implementation
+ *
+ * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright 2004, Instant802 Networks, Inc.
+ * Copyright 2005, Devicescape Software, Inc.
+ * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* TODO:
+ * order BSS list by RSSI(?) ("quality of AP")
+ * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE,
+ *    SSID)
+ */
+
+#include <linux/wireless.h>
+#include <linux/if_arp.h>
+#include <net/mac80211.h>
+#include <net/iw_handler.h>
+
+#include "ieee80211_i.h"
+#include "mesh.h"
+
+#define IEEE80211_PROBE_DELAY (HZ / 33)
+#define IEEE80211_CHANNEL_TIME (HZ / 33)
+#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5)
+
+void ieee80211_rx_bss_list_init(struct ieee80211_local *local)
+{
+	spin_lock_init(&local->bss_lock);
+	INIT_LIST_HEAD(&local->bss_list);
+}
+
+void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local)
+{
+	struct ieee80211_bss *bss, *tmp;
+
+	list_for_each_entry_safe(bss, tmp, &local->bss_list, list)
+		ieee80211_rx_bss_put(local, bss);
+}
+
+struct ieee80211_bss *
+ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
+		     u8 *ssid, u8 ssid_len)
+{
+	struct ieee80211_bss *bss;
+
+	spin_lock_bh(&local->bss_lock);
+	bss = local->bss_hash[STA_HASH(bssid)];
+	while (bss) {
+		if (!bss_mesh_cfg(bss) &&
+		    !memcmp(bss->bssid, bssid, ETH_ALEN) &&
+		    bss->freq == freq &&
+		    bss->ssid_len == ssid_len &&
+		    (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) {
+			atomic_inc(&bss->users);
+			break;
+		}
+		bss = bss->hnext;
+	}
+	spin_unlock_bh(&local->bss_lock);
+	return bss;
+}
+
+/* Caller must hold local->bss_lock */
+static void __ieee80211_rx_bss_hash_add(struct ieee80211_local *local,
+					struct ieee80211_bss *bss)
+{
+	u8 hash_idx;
+
+	if (bss_mesh_cfg(bss))
+		hash_idx = mesh_id_hash(bss_mesh_id(bss),
+					bss_mesh_id_len(bss));
+	else
+		hash_idx = STA_HASH(bss->bssid);
+
+	bss->hnext = local->bss_hash[hash_idx];
+	local->bss_hash[hash_idx] = bss;
+}
+
+/* Caller must hold local->bss_lock */
+static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local,
+					struct ieee80211_bss *bss)
+{
+	struct ieee80211_bss *b, *prev = NULL;
+	b = local->bss_hash[STA_HASH(bss->bssid)];
+	while (b) {
+		if (b == bss) {
+			if (!prev)
+				local->bss_hash[STA_HASH(bss->bssid)] =
+					bss->hnext;
+			else
+				prev->hnext = bss->hnext;
+			break;
+		}
+		prev = b;
+		b = b->hnext;
+	}
+}
+
+struct ieee80211_bss *
+ieee80211_rx_bss_add(struct ieee80211_local *local, u8 *bssid, int freq,
+		     u8 *ssid, u8 ssid_len)
+{
+	struct ieee80211_bss *bss;
+
+	bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
+	if (!bss)
+		return NULL;
+	atomic_set(&bss->users, 2);
+	memcpy(bss->bssid, bssid, ETH_ALEN);
+	bss->freq = freq;
+	if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) {
+		memcpy(bss->ssid, ssid, ssid_len);
+		bss->ssid_len = ssid_len;
+	}
+
+	spin_lock_bh(&local->bss_lock);
+	/* TODO: order by RSSI? */
+	list_add_tail(&bss->list, &local->bss_list);
+	__ieee80211_rx_bss_hash_add(local, bss);
+	spin_unlock_bh(&local->bss_lock);
+	return bss;
+}
+
+#ifdef CONFIG_MAC80211_MESH
+static struct ieee80211_bss *
+ieee80211_rx_mesh_bss_get(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len,
+			  u8 *mesh_cfg, int freq)
+{
+	struct ieee80211_bss *bss;
+
+	spin_lock_bh(&local->bss_lock);
+	bss = local->bss_hash[mesh_id_hash(mesh_id, mesh_id_len)];
+	while (bss) {
+		if (bss_mesh_cfg(bss) &&
+		    !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) &&
+		    bss->freq == freq &&
+		    mesh_id_len == bss->mesh_id_len &&
+		    (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id,
+						 mesh_id_len))) {
+			atomic_inc(&bss->users);
+			break;
+		}
+		bss = bss->hnext;
+	}
+	spin_unlock_bh(&local->bss_lock);
+	return bss;
+}
+
+static struct ieee80211_bss *
+ieee80211_rx_mesh_bss_add(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len,
+			  u8 *mesh_cfg, int mesh_config_len, int freq)
+{
+	struct ieee80211_bss *bss;
+
+	if (mesh_config_len != MESH_CFG_LEN)
+		return NULL;
+
+	bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
+	if (!bss)
+		return NULL;
+
+	bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC);
+	if (!bss->mesh_cfg) {
+		kfree(bss);
+		return NULL;
+	}
+
+	if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) {
+		bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC);
+		if (!bss->mesh_id) {
+			kfree(bss->mesh_cfg);
+			kfree(bss);
+			return NULL;
+		}
+		memcpy(bss->mesh_id, mesh_id, mesh_id_len);
+	}
+
+	atomic_set(&bss->users, 2);
+	memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN);
+	bss->mesh_id_len = mesh_id_len;
+	bss->freq = freq;
+	spin_lock_bh(&local->bss_lock);
+	/* TODO: order by RSSI? */
+	list_add_tail(&bss->list, &local->bss_list);
+	__ieee80211_rx_bss_hash_add(local, bss);
+	spin_unlock_bh(&local->bss_lock);
+	return bss;
+}
+#endif
+
+static void ieee80211_rx_bss_free(struct ieee80211_bss *bss)
+{
+	kfree(bss->ies);
+	kfree(bss_mesh_id(bss));
+	kfree(bss_mesh_cfg(bss));
+	kfree(bss);
+}
+
+void ieee80211_rx_bss_put(struct ieee80211_local *local,
+			  struct ieee80211_bss *bss)
+{
+	local_bh_disable();
+	if (!atomic_dec_and_lock(&bss->users, &local->bss_lock)) {
+		local_bh_enable();
+		return;
+	}
+
+	__ieee80211_rx_bss_hash_del(local, bss);
+	list_del(&bss->list);
+	spin_unlock_bh(&local->bss_lock);
+	ieee80211_rx_bss_free(bss);
+}
+
+struct ieee80211_bss *
+ieee80211_bss_info_update(struct ieee80211_local *local,
+			  struct ieee80211_rx_status *rx_status,
+			  struct ieee80211_mgmt *mgmt,
+			  size_t len,
+			  struct ieee802_11_elems *elems,
+			  int freq, bool beacon)
+{
+	struct ieee80211_bss *bss;
+	int clen;
+
+#ifdef CONFIG_MAC80211_MESH
+	if (elems->mesh_config)
+		bss = ieee80211_rx_mesh_bss_get(local, elems->mesh_id,
+				elems->mesh_id_len, elems->mesh_config, freq);
+	else
+#endif
+		bss = ieee80211_rx_bss_get(local, mgmt->bssid, freq,
+					   elems->ssid, elems->ssid_len);
+	if (!bss) {
+#ifdef CONFIG_MAC80211_MESH
+		if (elems->mesh_config)
+			bss = ieee80211_rx_mesh_bss_add(local, elems->mesh_id,
+				elems->mesh_id_len, elems->mesh_config,
+				elems->mesh_config_len, freq);
+		else
+#endif
+			bss = ieee80211_rx_bss_add(local, mgmt->bssid, freq,
+						  elems->ssid, elems->ssid_len);
+		if (!bss)
+			return NULL;
+	} else {
+#if 0
+		/* TODO: order by RSSI? */
+		spin_lock_bh(&local->bss_lock);
+		list_move_tail(&bss->list, &local->bss_list);
+		spin_unlock_bh(&local->bss_lock);
+#endif
+	}
+
+	/* save the ERP value so that it is available at association time */
+	if (elems->erp_info && elems->erp_info_len >= 1) {
+		bss->erp_value = elems->erp_info[0];
+		bss->has_erp_value = 1;
+	}
+
+	bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int);
+	bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info);
+
+	if (elems->tim) {
+		struct ieee80211_tim_ie *tim_ie =
+			(struct ieee80211_tim_ie *)elems->tim;
+		bss->dtim_period = tim_ie->dtim_period;
+	}
+
+	/* set default value for buggy APs */
+	if (!elems->tim || bss->dtim_period == 0)
+		bss->dtim_period = 1;
+
+	bss->supp_rates_len = 0;
+	if (elems->supp_rates) {
+		clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
+		if (clen > elems->supp_rates_len)
+			clen = elems->supp_rates_len;
+		memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates,
+		       clen);
+		bss->supp_rates_len += clen;
+	}
+	if (elems->ext_supp_rates) {
+		clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
+		if (clen > elems->ext_supp_rates_len)
+			clen = elems->ext_supp_rates_len;
+		memcpy(&bss->supp_rates[bss->supp_rates_len],
+		       elems->ext_supp_rates, clen);
+		bss->supp_rates_len += clen;
+	}
+
+	bss->band = rx_status->band;
+
+	bss->timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
+	bss->last_update = jiffies;
+	bss->signal = rx_status->signal;
+	bss->noise = rx_status->noise;
+	bss->qual = rx_status->qual;
+	bss->wmm_used = elems->wmm_param || elems->wmm_info;
+
+	if (!beacon)
+		bss->last_probe_resp = jiffies;
+
+	/*
+	 * For probe responses, or if we don't have any information yet,
+	 * use the IEs from the beacon.
+	 */
+	if (!bss->ies || !beacon) {
+		if (bss->ies == NULL || bss->ies_len < elems->total_len) {
+			kfree(bss->ies);
+			bss->ies = kmalloc(elems->total_len, GFP_ATOMIC);
+		}
+		if (bss->ies) {
+			memcpy(bss->ies, elems->ie_start, elems->total_len);
+			bss->ies_len = elems->total_len;
+		} else
+			bss->ies_len = 0;
+	}
+
+	return bss;
+}
+
+ieee80211_rx_result
+ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
+		  struct ieee80211_rx_status *rx_status)
+{
+	struct ieee80211_mgmt *mgmt;
+	struct ieee80211_bss *bss;
+	u8 *elements;
+	struct ieee80211_channel *channel;
+	size_t baselen;
+	int freq;
+	__le16 fc;
+	bool presp, beacon = false;
+	struct ieee802_11_elems elems;
+
+	if (skb->len < 2)
+		return RX_DROP_UNUSABLE;
+
+	mgmt = (struct ieee80211_mgmt *) skb->data;
+	fc = mgmt->frame_control;
+
+	if (ieee80211_is_ctl(fc))
+		return RX_CONTINUE;
+
+	if (skb->len < 24)
+		return RX_DROP_MONITOR;
+
+	presp = ieee80211_is_probe_resp(fc);
+	if (presp) {
+		/* ignore ProbeResp to foreign address */
+		if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
+			return RX_DROP_MONITOR;
+
+		presp = true;
+		elements = mgmt->u.probe_resp.variable;
+		baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+	} else {
+		beacon = ieee80211_is_beacon(fc);
+		baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+		elements = mgmt->u.beacon.variable;
+	}
+
+	if (!presp && !beacon)
+		return RX_CONTINUE;
+
+	if (baselen > skb->len)
+		return RX_DROP_MONITOR;
+
+	ieee802_11_parse_elems(elements, skb->len - baselen, &elems);
+
+	if (elems.ds_params && elems.ds_params_len == 1)
+		freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
+	else
+		freq = rx_status->freq;
+
+	channel = ieee80211_get_channel(sdata->local->hw.wiphy, freq);
+
+	if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
+		return RX_DROP_MONITOR;
+
+	bss = ieee80211_bss_info_update(sdata->local, rx_status,
+					mgmt, skb->len, &elems,
+					freq, beacon);
+	ieee80211_rx_bss_put(sdata->local, bss);
+
+	dev_kfree_skb(skb);
+	return RX_QUEUED;
+}
+
+static void ieee80211_send_nullfunc(struct ieee80211_local *local,
+				    struct ieee80211_sub_if_data *sdata,
+				    int powersave)
+{
+	struct sk_buff *skb;
+	struct ieee80211_hdr *nullfunc;
+	__le16 fc;
+
+	skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24);
+	if (!skb) {
+		printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
+		       "frame\n", sdata->dev->name);
+		return;
+	}
+	skb_reserve(skb, local->hw.extra_tx_headroom);
+
+	nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24);
+	memset(nullfunc, 0, 24);
+	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
+			 IEEE80211_FCTL_TODS);
+	if (powersave)
+		fc |= cpu_to_le16(IEEE80211_FCTL_PM);
+	nullfunc->frame_control = fc;
+	memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN);
+	memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
+	memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN);
+
+	ieee80211_tx_skb(sdata, skb, 0);
+}
+
+void ieee80211_scan_completed(struct ieee80211_hw *hw)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+	struct ieee80211_sub_if_data *sdata;
+	union iwreq_data wrqu;
+
+	if (WARN_ON(!local->hw_scanning && !local->sw_scanning))
+		return;
+
+	local->last_scan_completed = jiffies;
+	memset(&wrqu, 0, sizeof(wrqu));
+
+	/*
+	 * local->scan_sdata could have been NULLed by the interface
+	 * down code in case we were scanning on an interface that is
+	 * being taken down.
+	 */
+	sdata = local->scan_sdata;
+	if (sdata)
+		wireless_send_event(sdata->dev, SIOCGIWSCAN, &wrqu, NULL);
+
+	if (local->hw_scanning) {
+		local->hw_scanning = false;
+		if (ieee80211_hw_config(local))
+			printk(KERN_DEBUG "%s: failed to restore operational "
+			       "channel after scan\n", wiphy_name(local->hw.wiphy));
+
+		goto done;
+	}
+
+	local->sw_scanning = false;
+	if (ieee80211_hw_config(local))
+		printk(KERN_DEBUG "%s: failed to restore operational "
+		       "channel after scan\n", wiphy_name(local->hw.wiphy));
+
+
+	netif_tx_lock_bh(local->mdev);
+	netif_addr_lock(local->mdev);
+	local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC;
+	local->ops->configure_filter(local_to_hw(local),
+				     FIF_BCN_PRBRESP_PROMISC,
+				     &local->filter_flags,
+				     local->mdev->mc_count,
+				     local->mdev->mc_list);
+
+	netif_addr_unlock(local->mdev);
+	netif_tx_unlock_bh(local->mdev);
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+		/* Tell AP we're back */
+		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+			if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) {
+				ieee80211_send_nullfunc(local, sdata, 0);
+				netif_tx_wake_all_queues(sdata->dev);
+			}
+		} else
+			netif_tx_wake_all_queues(sdata->dev);
+	}
+	rcu_read_unlock();
+
+ done:
+	ieee80211_mlme_notify_scan_completed(local);
+	ieee80211_mesh_notify_scan_completed(local);
+}
+EXPORT_SYMBOL(ieee80211_scan_completed);
+
+
+void ieee80211_scan_work(struct work_struct *work)
+{
+	struct ieee80211_local *local =
+		container_of(work, struct ieee80211_local, scan_work.work);
+	struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_channel *chan;
+	int skip;
+	unsigned long next_delay = 0;
+
+	/*
+	 * Avoid re-scheduling when the sdata is going away.
+	 */
+	if (!netif_running(sdata->dev))
+		return;
+
+	switch (local->scan_state) {
+	case SCAN_SET_CHANNEL:
+		/*
+		 * Get current scan band. scan_band may be IEEE80211_NUM_BANDS
+		 * after we successfully scanned the last channel of the last
+		 * band (and the last band is supported by the hw)
+		 */
+		if (local->scan_band < IEEE80211_NUM_BANDS)
+			sband = local->hw.wiphy->bands[local->scan_band];
+		else
+			sband = NULL;
+
+		/*
+		 * If we are at an unsupported band and have more bands
+		 * left to scan, advance to the next supported one.
+		 */
+		while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) {
+			local->scan_band++;
+			sband = local->hw.wiphy->bands[local->scan_band];
+			local->scan_channel_idx = 0;
+		}
+
+		/* if no more bands/channels left, complete scan */
+		if (!sband || local->scan_channel_idx >= sband->n_channels) {
+			ieee80211_scan_completed(local_to_hw(local));
+			return;
+		}
+		skip = 0;
+		chan = &sband->channels[local->scan_channel_idx];
+
+		if (chan->flags & IEEE80211_CHAN_DISABLED ||
+		    (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
+		     chan->flags & IEEE80211_CHAN_NO_IBSS))
+			skip = 1;
+
+		if (!skip) {
+			local->scan_channel = chan;
+			if (ieee80211_hw_config(local)) {
+				printk(KERN_DEBUG "%s: failed to set freq to "
+				       "%d MHz for scan\n", wiphy_name(local->hw.wiphy),
+				       chan->center_freq);
+				skip = 1;
+			}
+		}
+
+		/* advance state machine to next channel/band */
+		local->scan_channel_idx++;
+		if (local->scan_channel_idx >= sband->n_channels) {
+			/*
+			 * scan_band may end up == IEEE80211_NUM_BANDS, but
+			 * we'll catch that case above and complete the scan
+			 * if that is the case.
+			 */
+			local->scan_band++;
+			local->scan_channel_idx = 0;
+		}
+
+		if (skip)
+			break;
+
+		next_delay = IEEE80211_PROBE_DELAY +
+			     usecs_to_jiffies(local->hw.channel_change_time);
+		local->scan_state = SCAN_SEND_PROBE;
+		break;
+	case SCAN_SEND_PROBE:
+		next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
+		local->scan_state = SCAN_SET_CHANNEL;
+
+		if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+			break;
+		ieee80211_send_probe_req(sdata, NULL, local->scan_ssid,
+					 local->scan_ssid_len);
+		next_delay = IEEE80211_CHANNEL_TIME;
+		break;
+	}
+
+	queue_delayed_work(local->hw.workqueue, &local->scan_work,
+			   next_delay);
+}
+
+
+int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
+			 u8 *ssid, size_t ssid_len)
+{
+	struct ieee80211_local *local = scan_sdata->local;
+	struct ieee80211_sub_if_data *sdata;
+
+	if (ssid_len > IEEE80211_MAX_SSID_LEN)
+		return -EINVAL;
+
+	/* MLME-SCAN.request (page 118)  page 144 (11.1.3.1)
+	 * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS
+	 * BSSID: MACAddress
+	 * SSID
+	 * ScanType: ACTIVE, PASSIVE
+	 * ProbeDelay: delay (in microseconds) to be used prior to transmitting
+	 *    a Probe frame during active scanning
+	 * ChannelList
+	 * MinChannelTime (>= ProbeDelay), in TU
+	 * MaxChannelTime: (>= MinChannelTime), in TU
+	 */
+
+	 /* MLME-SCAN.confirm
+	  * BSSDescriptionSet
+	  * ResultCode: SUCCESS, INVALID_PARAMETERS
+	 */
+
+	if (local->sw_scanning || local->hw_scanning) {
+		if (local->scan_sdata == scan_sdata)
+			return 0;
+		return -EBUSY;
+	}
+
+	if (local->ops->hw_scan) {
+		int rc;
+
+		local->hw_scanning = true;
+		rc = local->ops->hw_scan(local_to_hw(local), ssid, ssid_len);
+		if (rc) {
+			local->hw_scanning = false;
+			return rc;
+		}
+		local->scan_sdata = scan_sdata;
+		return 0;
+	}
+
+	local->sw_scanning = true;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+			if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) {
+				netif_tx_stop_all_queues(sdata->dev);
+				ieee80211_send_nullfunc(local, sdata, 1);
+			}
+		} else
+			netif_tx_stop_all_queues(sdata->dev);
+	}
+	rcu_read_unlock();
+
+	if (ssid) {
+		local->scan_ssid_len = ssid_len;
+		memcpy(local->scan_ssid, ssid, ssid_len);
+	} else
+		local->scan_ssid_len = 0;
+	local->scan_state = SCAN_SET_CHANNEL;
+	local->scan_channel_idx = 0;
+	local->scan_band = IEEE80211_BAND_2GHZ;
+	local->scan_sdata = scan_sdata;
+
+	netif_addr_lock_bh(local->mdev);
+	local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
+	local->ops->configure_filter(local_to_hw(local),
+				     FIF_BCN_PRBRESP_PROMISC,
+				     &local->filter_flags,
+				     local->mdev->mc_count,
+				     local->mdev->mc_list);
+	netif_addr_unlock_bh(local->mdev);
+
+	/* TODO: start scan as soon as all nullfunc frames are ACKed */
+	queue_delayed_work(local->hw.workqueue, &local->scan_work,
+			   IEEE80211_CHANNEL_TIME);
+
+	return 0;
+}
+
+
+int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
+			   u8 *ssid, size_t ssid_len)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_sta *ifsta;
+
+	if (sdata->vif.type != NL80211_IFTYPE_STATION)
+		return ieee80211_start_scan(sdata, ssid, ssid_len);
+
+	/*
+	 * STA has a state machine that might need to defer scanning
+	 * while it's trying to associate/authenticate, therefore we
+	 * queue it up to the state machine in that case.
+	 */
+
+	if (local->sw_scanning || local->hw_scanning) {
+		if (local->scan_sdata == sdata)
+			return 0;
+		return -EBUSY;
+	}
+
+	ifsta = &sdata->u.sta;
+
+	ifsta->scan_ssid_len = ssid_len;
+	if (ssid_len)
+		memcpy(ifsta->scan_ssid, ssid, ssid_len);
+	set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request);
+	queue_work(local->hw.workqueue, &ifsta->work);
+
+	return 0;
+}
+
+
+static void ieee80211_scan_add_ies(struct iw_request_info *info,
+				   struct ieee80211_bss *bss,
+				   char **current_ev, char *end_buf)
+{
+	u8 *pos, *end, *next;
+	struct iw_event iwe;
+
+	if (bss == NULL || bss->ies == NULL)
+		return;
+
+	/*
+	 * If needed, fragment the IEs buffer (at IE boundaries) into short
+	 * enough fragments to fit into IW_GENERIC_IE_MAX octet messages.
+	 */
+	pos = bss->ies;
+	end = pos + bss->ies_len;
+
+	while (end - pos > IW_GENERIC_IE_MAX) {
+		next = pos + 2 + pos[1];
+		while (next + 2 + next[1] - pos < IW_GENERIC_IE_MAX)
+			next = next + 2 + next[1];
+
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = next - pos;
+		*current_ev = iwe_stream_add_point(info, *current_ev,
+						   end_buf, &iwe, pos);
+
+		pos = next;
+	}
+
+	if (end > pos) {
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = end - pos;
+		*current_ev = iwe_stream_add_point(info, *current_ev,
+						   end_buf, &iwe, pos);
+	}
+}
+
+
+static char *
+ieee80211_scan_result(struct ieee80211_local *local,
+		      struct iw_request_info *info,
+		      struct ieee80211_bss *bss,
+		      char *current_ev, char *end_buf)
+{
+	struct iw_event iwe;
+	char *buf;
+
+	if (time_after(jiffies,
+		       bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE))
+		return current_ev;
+
+	memset(&iwe, 0, sizeof(iwe));
+	iwe.cmd = SIOCGIWAP;
+	iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+	memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
+	current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+					  IW_EV_ADDR_LEN);
+
+	memset(&iwe, 0, sizeof(iwe));
+	iwe.cmd = SIOCGIWESSID;
+	if (bss_mesh_cfg(bss)) {
+		iwe.u.data.length = bss_mesh_id_len(bss);
+		iwe.u.data.flags = 1;
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, bss_mesh_id(bss));
+	} else {
+		iwe.u.data.length = bss->ssid_len;
+		iwe.u.data.flags = 1;
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, bss->ssid);
+	}
+
+	if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)
+	    || bss_mesh_cfg(bss)) {
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = SIOCGIWMODE;
+		if (bss_mesh_cfg(bss))
+			iwe.u.mode = IW_MODE_MESH;
+		else if (bss->capability & WLAN_CAPABILITY_ESS)
+			iwe.u.mode = IW_MODE_MASTER;
+		else
+			iwe.u.mode = IW_MODE_ADHOC;
+		current_ev = iwe_stream_add_event(info, current_ev, end_buf,
+						  &iwe, IW_EV_UINT_LEN);
+	}
+
+	memset(&iwe, 0, sizeof(iwe));
+	iwe.cmd = SIOCGIWFREQ;
+	iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
+	iwe.u.freq.e = 0;
+	current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+					  IW_EV_FREQ_LEN);
+
+	memset(&iwe, 0, sizeof(iwe));
+	iwe.cmd = SIOCGIWFREQ;
+	iwe.u.freq.m = bss->freq;
+	iwe.u.freq.e = 6;
+	current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+					  IW_EV_FREQ_LEN);
+	memset(&iwe, 0, sizeof(iwe));
+	iwe.cmd = IWEVQUAL;
+	iwe.u.qual.qual = bss->qual;
+	iwe.u.qual.level = bss->signal;
+	iwe.u.qual.noise = bss->noise;
+	iwe.u.qual.updated = local->wstats_flags;
+	current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+					  IW_EV_QUAL_LEN);
+
+	memset(&iwe, 0, sizeof(iwe));
+	iwe.cmd = SIOCGIWENCODE;
+	if (bss->capability & WLAN_CAPABILITY_PRIVACY)
+		iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+	else
+		iwe.u.data.flags = IW_ENCODE_DISABLED;
+	iwe.u.data.length = 0;
+	current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+					  &iwe, "");
+
+	ieee80211_scan_add_ies(info, bss, &current_ev, end_buf);
+
+	if (bss->supp_rates_len > 0) {
+		/* display all supported rates in readable format */
+		char *p = current_ev + iwe_stream_lcp_len(info);
+		int i;
+
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = SIOCGIWRATE;
+		/* Those two flags are ignored... */
+		iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+
+		for (i = 0; i < bss->supp_rates_len; i++) {
+			iwe.u.bitrate.value = ((bss->supp_rates[i] &
+							0x7f) * 500000);
+			p = iwe_stream_add_value(info, current_ev, p,
+					end_buf, &iwe, IW_EV_PARAM_LEN);
+		}
+		current_ev = p;
+	}
+
+	buf = kmalloc(30, GFP_ATOMIC);
+	if (buf) {
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVCUSTOM;
+		sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp));
+		iwe.u.data.length = strlen(buf);
+		current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+						  &iwe, buf);
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVCUSTOM;
+		sprintf(buf, " Last beacon: %dms ago",
+			jiffies_to_msecs(jiffies - bss->last_update));
+		iwe.u.data.length = strlen(buf);
+		current_ev = iwe_stream_add_point(info, current_ev,
+						  end_buf, &iwe, buf);
+		kfree(buf);
+	}
+
+	if (bss_mesh_cfg(bss)) {
+		u8 *cfg = bss_mesh_cfg(bss);
+		buf = kmalloc(50, GFP_ATOMIC);
+		if (buf) {
+			memset(&iwe, 0, sizeof(iwe));
+			iwe.cmd = IWEVCUSTOM;
+			sprintf(buf, "Mesh network (version %d)", cfg[0]);
+			iwe.u.data.length = strlen(buf);
+			current_ev = iwe_stream_add_point(info, current_ev,
+							  end_buf,
+							  &iwe, buf);
+			sprintf(buf, "Path Selection Protocol ID: "
+				"0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
+							cfg[4]);
+			iwe.u.data.length = strlen(buf);
+			current_ev = iwe_stream_add_point(info, current_ev,
+							  end_buf,
+							  &iwe, buf);
+			sprintf(buf, "Path Selection Metric ID: "
+				"0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
+							cfg[8]);
+			iwe.u.data.length = strlen(buf);
+			current_ev = iwe_stream_add_point(info, current_ev,
+							  end_buf,
+							  &iwe, buf);
+			sprintf(buf, "Congestion Control Mode ID: "
+				"0x%02X%02X%02X%02X", cfg[9], cfg[10],
+							cfg[11], cfg[12]);
+			iwe.u.data.length = strlen(buf);
+			current_ev = iwe_stream_add_point(info, current_ev,
+							  end_buf,
+							  &iwe, buf);
+			sprintf(buf, "Channel Precedence: "
+				"0x%02X%02X%02X%02X", cfg[13], cfg[14],
+							cfg[15], cfg[16]);
+			iwe.u.data.length = strlen(buf);
+			current_ev = iwe_stream_add_point(info, current_ev,
+							  end_buf,
+							  &iwe, buf);
+			kfree(buf);
+		}
+	}
+
+	return current_ev;
+}
+
+
+int ieee80211_scan_results(struct ieee80211_local *local,
+			   struct iw_request_info *info,
+			   char *buf, size_t len)
+{
+	char *current_ev = buf;
+	char *end_buf = buf + len;
+	struct ieee80211_bss *bss;
+
+	spin_lock_bh(&local->bss_lock);
+	list_for_each_entry(bss, &local->bss_list, list) {
+		if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
+			spin_unlock_bh(&local->bss_lock);
+			return -E2BIG;
+		}
+		current_ev = ieee80211_scan_result(local, info, bss,
+						       current_ev, end_buf);
+	}
+	spin_unlock_bh(&local->bss_lock);
+	return current_ev - buf;
+}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
new file mode 100644
index 0000000..f72bad6
--- /dev/null
+++ b/net/mac80211/spectmgmt.c
@@ -0,0 +1,86 @@
+/*
+ * spectrum management
+ *
+ * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright 2002-2005, Instant802 Networks, Inc.
+ * Copyright 2005-2006, Devicescape Software, Inc.
+ * Copyright 2006-2007  Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007-2008, Intel Corporation
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ieee80211.h>
+#include <net/wireless.h>
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+#include "sta_info.h"
+#include "wme.h"
+
+static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata,
+					struct ieee80211_msrment_ie *request_ie,
+					const u8 *da, const u8 *bssid,
+					u8 dialog_token)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct sk_buff *skb;
+	struct ieee80211_mgmt *msr_report;
+
+	skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
+				sizeof(struct ieee80211_msrment_ie));
+
+	if (!skb) {
+		printk(KERN_ERR "%s: failed to allocate buffer for "
+				"measurement report frame\n", sdata->dev->name);
+		return;
+	}
+
+	skb_reserve(skb, local->hw.extra_tx_headroom);
+	msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
+	memset(msr_report, 0, 24);
+	memcpy(msr_report->da, da, ETH_ALEN);
+	memcpy(msr_report->sa, sdata->dev->dev_addr, ETH_ALEN);
+	memcpy(msr_report->bssid, bssid, ETH_ALEN);
+	msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+						IEEE80211_STYPE_ACTION);
+
+	skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
+	msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
+	msr_report->u.action.u.measurement.action_code =
+				WLAN_ACTION_SPCT_MSR_RPRT;
+	msr_report->u.action.u.measurement.dialog_token = dialog_token;
+
+	msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT;
+	msr_report->u.action.u.measurement.length =
+			sizeof(struct ieee80211_msrment_ie);
+
+	memset(&msr_report->u.action.u.measurement.msr_elem, 0,
+		sizeof(struct ieee80211_msrment_ie));
+	msr_report->u.action.u.measurement.msr_elem.token = request_ie->token;
+	msr_report->u.action.u.measurement.msr_elem.mode |=
+			IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
+	msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
+
+	ieee80211_tx_skb(sdata, skb, 0);
+}
+
+void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
+				       struct ieee80211_mgmt *mgmt,
+				       size_t len)
+{
+	/*
+	 * Ignoring measurement request is spec violation.
+	 * Mandatory measurements must be reported optional
+	 * measurements might be refused or reported incapable
+	 * For now just refuse
+	 * TODO: Answer basic measurement as unmeasured
+	 */
+	ieee80211_send_refuse_measurement_request(sdata,
+			&mgmt->u.action.u.measurement.msr_elem,
+			mgmt->sa, mgmt->bssid,
+			mgmt->u.action.u.measurement.dialog_token);
+}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f2ba653..9b72d15 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -73,11 +73,11 @@
 {
 	struct sta_info *s;
 
-	s = local->sta_hash[STA_HASH(sta->addr)];
+	s = local->sta_hash[STA_HASH(sta->sta.addr)];
 	if (!s)
 		return -ENOENT;
 	if (s == sta) {
-		rcu_assign_pointer(local->sta_hash[STA_HASH(sta->addr)],
+		rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)],
 				   s->hnext);
 		return 0;
 	}
@@ -93,26 +93,19 @@
 }
 
 /* protected by RCU */
-static struct sta_info *__sta_info_find(struct ieee80211_local *local,
-					u8 *addr)
+struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr)
 {
 	struct sta_info *sta;
 
 	sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
 	while (sta) {
-		if (compare_ether_addr(sta->addr, addr) == 0)
+		if (compare_ether_addr(sta->sta.addr, addr) == 0)
 			break;
 		sta = rcu_dereference(sta->hnext);
 	}
 	return sta;
 }
 
-struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr)
-{
-	return __sta_info_find(local, addr);
-}
-EXPORT_SYMBOL(sta_info_get);
-
 struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx,
 				     struct net_device *dev)
 {
@@ -146,12 +139,12 @@
 {
 	DECLARE_MAC_BUF(mbuf);
 
-	rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv);
+	rate_control_free_sta(sta);
 	rate_control_put(sta->rate_ctrl);
 
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 	printk(KERN_DEBUG "%s: Destroyed STA %s\n",
-	       wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->addr));
+	       wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->sta.addr));
 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
 
 	kfree(sta);
@@ -219,8 +212,8 @@
 static void sta_info_hash_add(struct ieee80211_local *local,
 			      struct sta_info *sta)
 {
-	sta->hnext = local->sta_hash[STA_HASH(sta->addr)];
-	rcu_assign_pointer(local->sta_hash[STA_HASH(sta->addr)], sta);
+	sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
+	rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
 }
 
 struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
@@ -231,20 +224,20 @@
 	int i;
 	DECLARE_MAC_BUF(mbuf);
 
-	sta = kzalloc(sizeof(*sta), gfp);
+	sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp);
 	if (!sta)
 		return NULL;
 
 	spin_lock_init(&sta->lock);
 	spin_lock_init(&sta->flaglock);
 
-	memcpy(sta->addr, addr, ETH_ALEN);
+	memcpy(sta->sta.addr, addr, ETH_ALEN);
 	sta->local = local;
 	sta->sdata = sdata;
 
 	sta->rate_ctrl = rate_control_get(local->rate_ctrl);
 	sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
-						     gfp);
+						     &sta->sta, gfp);
 	if (!sta->rate_ctrl_priv) {
 		rate_control_put(sta->rate_ctrl);
 		kfree(sta);
@@ -271,7 +264,7 @@
 
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 	printk(KERN_DEBUG "%s: Allocated STA %s\n",
-	       wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->addr));
+	       wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->sta.addr));
 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
 
 #ifdef CONFIG_MAC80211_MESH
@@ -300,15 +293,15 @@
 		goto out_free;
 	}
 
-	if (WARN_ON(compare_ether_addr(sta->addr, sdata->dev->dev_addr) == 0 ||
-	            is_multicast_ether_addr(sta->addr))) {
+	if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->dev->dev_addr) == 0 ||
+	            is_multicast_ether_addr(sta->sta.addr))) {
 		err = -EINVAL;
 		goto out_free;
 	}
 
 	spin_lock_irqsave(&local->sta_lock, flags);
 	/* check if STA exists already */
-	if (__sta_info_find(local, sta->addr)) {
+	if (sta_info_get(local, sta->sta.addr)) {
 		spin_unlock_irqrestore(&local->sta_lock, flags);
 		err = -EEXIST;
 		goto out_free;
@@ -319,18 +312,18 @@
 
 	/* notify driver */
 	if (local->ops->sta_notify) {
-		if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
+		if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 			sdata = container_of(sdata->bss,
 					     struct ieee80211_sub_if_data,
 					     u.ap);
 
 		local->ops->sta_notify(local_to_hw(local), &sdata->vif,
-				       STA_NOTIFY_ADD, sta->addr);
+				       STA_NOTIFY_ADD, &sta->sta);
 	}
 
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 	printk(KERN_DEBUG "%s: Inserted STA %s\n",
-	       wiphy_name(local->hw.wiphy), print_mac(mac, sta->addr));
+	       wiphy_name(local->hw.wiphy), print_mac(mac, sta->sta.addr));
 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
 
 	spin_unlock_irqrestore(&local->sta_lock, flags);
@@ -379,11 +372,12 @@
 {
 	BUG_ON(!bss);
 
-	__bss_tim_set(bss, sta->aid);
+	__bss_tim_set(bss, sta->sta.aid);
 
 	if (sta->local->ops->set_tim) {
 		sta->local->tim_in_locked_section = true;
-		sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 1);
+		sta->local->ops->set_tim(local_to_hw(sta->local),
+					 &sta->sta, true);
 		sta->local->tim_in_locked_section = false;
 	}
 }
@@ -404,11 +398,12 @@
 {
 	BUG_ON(!bss);
 
-	__bss_tim_clear(bss, sta->aid);
+	__bss_tim_clear(bss, sta->sta.aid);
 
 	if (sta->local->ops->set_tim) {
 		sta->local->tim_in_locked_section = true;
-		sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 0);
+		sta->local->ops->set_tim(local_to_hw(sta->local),
+					 &sta->sta, false);
 		sta->local->tim_in_locked_section = false;
 	}
 }
@@ -424,7 +419,7 @@
 	spin_unlock_irqrestore(&sta->local->sta_lock, flags);
 }
 
-void __sta_info_unlink(struct sta_info **sta)
+static void __sta_info_unlink(struct sta_info **sta)
 {
 	struct ieee80211_local *local = (*sta)->local;
 	struct ieee80211_sub_if_data *sdata = (*sta)->sdata;
@@ -456,13 +451,13 @@
 	local->num_sta--;
 
 	if (local->ops->sta_notify) {
-		if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
+		if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 			sdata = container_of(sdata->bss,
 					     struct ieee80211_sub_if_data,
 					     u.ap);
 
 		local->ops->sta_notify(local_to_hw(local), &sdata->vif,
-				       STA_NOTIFY_REMOVE, (*sta)->addr);
+				       STA_NOTIFY_REMOVE, &(*sta)->sta);
 	}
 
 	if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -474,7 +469,7 @@
 
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 	printk(KERN_DEBUG "%s: Removed STA %s\n",
-	       wiphy_name(local->hw.wiphy), print_mac(mbuf, (*sta)->addr));
+	       wiphy_name(local->hw.wiphy), print_mac(mbuf, (*sta)->sta.addr));
 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
 
 	/*
@@ -570,7 +565,7 @@
 		local->total_ps_buffered--;
 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
 		printk(KERN_DEBUG "Buffered frame expired (STA "
-		       "%s)\n", print_mac(mac, sta->addr));
+		       "%s)\n", print_mac(mac, sta->sta.addr));
 #endif
 		dev_kfree_skb(skb);
 
@@ -802,3 +797,40 @@
 		schedule_work(&local->sta_flush_work);
 	spin_unlock_irqrestore(&local->sta_lock, flags);
 }
+
+void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
+			  unsigned long exp_time)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct sta_info *sta, *tmp;
+	LIST_HEAD(tmp_list);
+	DECLARE_MAC_BUF(mac);
+	unsigned long flags;
+
+	spin_lock_irqsave(&local->sta_lock, flags);
+	list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
+		if (time_after(jiffies, sta->last_rx + exp_time)) {
+#ifdef CONFIG_MAC80211_IBSS_DEBUG
+			printk(KERN_DEBUG "%s: expiring inactive STA %s\n",
+			       sdata->dev->name, print_mac(mac, sta->sta.addr));
+#endif
+			__sta_info_unlink(&sta);
+			if (sta)
+				list_add(&sta->list, &tmp_list);
+		}
+	spin_unlock_irqrestore(&local->sta_lock, flags);
+
+	list_for_each_entry_safe(sta, tmp, &tmp_list, list)
+		sta_info_destroy(sta);
+}
+
+struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw,
+                                         const u8 *addr)
+{
+	struct sta_info *sta = sta_info_get(hw_to_local(hw), addr);
+
+	if (!sta)
+		return NULL;
+	return &sta->sta;
+}
+EXPORT_SYMBOL(ieee80211_find_sta);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 109db78..a6b5186 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -167,8 +167,6 @@
  * @lock: used for locking all fields that require locking, see comments
  *	in the header file.
  * @flaglock: spinlock for flags accesses
- * @ht_info: HT capabilities of this STA
- * @supp_rates: Bitmap of supported rates (per band)
  * @addr: MAC address of this STA
  * @aid: STA's unique AID (1..2007, 0 = not assigned yet),
  *	only used in AP (and IBSS?) mode
@@ -191,20 +189,15 @@
  * @last_qual: qual of last received frame from this STA
  * @last_noise: noise of last received frame from this STA
  * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
- * @wme_rx_queue: TBD
  * @tx_filtered_count: TBD
  * @tx_retry_failed: TBD
  * @tx_retry_count: TBD
- * @tx_num_consecutive_failures: TBD
- * @tx_num_mpdu_ok: TBD
- * @tx_num_mpdu_fail: TBD
  * @fail_avg: moving percentage of failed MSDUs
  * @tx_packets: number of RX/TX MSDUs
  * @tx_bytes: TBD
  * @tx_fragments: number of transmitted MPDUs
- * @txrate_idx: TBD
- * @last_txrate_idx: TBD
- * @wme_tx_queue: TBD
+ * @last_txrate_idx: Index of the last used transmit rate
+ * @tid_seq: TBD
  * @ampdu_mlme: TBD
  * @timer_to_tid: identity mapping to ID timers
  * @tid_to_tx_q: map tid to tx queue
@@ -217,6 +210,7 @@
  * @plink_timeout: TBD
  * @plink_timer: TBD
  * @debugfs: debug filesystem info
+ * @sta: station information we share with the driver
  */
 struct sta_info {
 	/* General information, mostly static */
@@ -229,10 +223,7 @@
 	void *rate_ctrl_priv;
 	spinlock_t lock;
 	spinlock_t flaglock;
-	struct ieee80211_ht_info ht_info;
-	u64 supp_rates[IEEE80211_NUM_BANDS];
-	u8 addr[ETH_ALEN];
-	u16 aid;
+
 	u16 listen_interval;
 
 	/*
@@ -265,17 +256,10 @@
 	int last_qual;
 	int last_noise;
 	__le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
-#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
-	unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES];
-#endif
 
 	/* Updated from TX status path only, no locking requirements */
 	unsigned long tx_filtered_count;
 	unsigned long tx_retry_failed, tx_retry_count;
-	/* TODO: update in generic code not rate control? */
-	u32 tx_num_consecutive_failures;
-	u32 tx_num_mpdu_ok;
-	u32 tx_num_mpdu_fail;
 	/* moving percentage of failed MSDUs */
 	unsigned int fail_avg;
 
@@ -283,12 +267,8 @@
 	unsigned long tx_packets;
 	unsigned long tx_bytes;
 	unsigned long tx_fragments;
-	int txrate_idx;
-	int last_txrate_idx;
+	unsigned int last_txrate_idx;
 	u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
-#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
-	unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES];
-#endif
 
 	/*
 	 * Aggregation information, locked with lock.
@@ -319,13 +299,12 @@
 		struct dentry *num_ps_buf_frames;
 		struct dentry *inactive_ms;
 		struct dentry *last_seq_ctrl;
-#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
-		struct dentry *wme_rx_queue;
-		struct dentry *wme_tx_queue;
-#endif
 		struct dentry *agg_status;
 	} debugfs;
 #endif
+
+	/* keep last! */
+	struct ieee80211_sta sta;
 };
 
 static inline enum plink_state sta_plink_state(struct sta_info *sta)
@@ -425,7 +404,7 @@
 /*
  * Get a STA info, must have be under RCU read lock.
  */
-struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr);
+struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr);
 /*
  * Get STA info by index, BROKEN!
  */
@@ -451,7 +430,6 @@
  * has already unlinked it.
  */
 void sta_info_unlink(struct sta_info **sta);
-void __sta_info_unlink(struct sta_info **sta);
 
 void sta_info_destroy(struct sta_info *sta);
 void sta_info_set_tim_bit(struct sta_info *sta);
@@ -463,5 +441,7 @@
 int sta_info_flush(struct ieee80211_local *local,
 		    struct ieee80211_sub_if_data *sdata);
 void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata);
+void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
+			  unsigned long exp_time);
 
 #endif /* STA_INFO_H */
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 995f7af..34b32bc 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -304,7 +304,7 @@
 			key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
 			u8 bcast[ETH_ALEN] =
 				{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-			u8 *sta_addr = key->sta->addr;
+			u8 *sta_addr = key->sta->sta.addr;
 
 			if (is_multicast_ether_addr(ra))
 				sta_addr = bcast;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 4788f7b..0cc2e23 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -38,43 +38,6 @@
 
 /* misc utils */
 
-#ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP
-static void ieee80211_dump_frame(const char *ifname, const char *title,
-				 const struct sk_buff *skb)
-{
-	const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	unsigned int hdrlen;
-	DECLARE_MAC_BUF(mac);
-
-	printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len);
-	if (skb->len < 4) {
-		printk("\n");
-		return;
-	}
-
-	hdrlen = ieee80211_hdrlen(hdr->frame_control);
-	if (hdrlen > skb->len)
-		hdrlen = skb->len;
-	if (hdrlen >= 4)
-		printk(" FC=0x%04x DUR=0x%04x",
-		    le16_to_cpu(hdr->frame_control), le16_to_cpu(hdr->duration_id));
-	if (hdrlen >= 10)
-		printk(" A1=%s", print_mac(mac, hdr->addr1));
-	if (hdrlen >= 16)
-		printk(" A2=%s", print_mac(mac, hdr->addr2));
-	if (hdrlen >= 24)
-		printk(" A3=%s", print_mac(mac, hdr->addr3));
-	if (hdrlen >= 30)
-		printk(" A4=%s", print_mac(mac, hdr->addr4));
-	printk("\n");
-}
-#else /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */
-static inline void ieee80211_dump_frame(const char *ifname, const char *title,
-					struct sk_buff *skb)
-{
-}
-#endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */
-
 static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
 				 int next_frag_len)
 {
@@ -82,6 +45,7 @@
 	struct ieee80211_rate *txrate;
 	struct ieee80211_local *local = tx->local;
 	struct ieee80211_supported_band *sband;
+	struct ieee80211_hdr *hdr;
 
 	sband = local->hw.wiphy->bands[tx->channel->band];
 	txrate = &sband->bitrates[tx->rate_idx];
@@ -107,10 +71,10 @@
 	 *   at the highest possible rate belonging to the PHY rates in the
 	 *   BSSBasicRateSet
 	 */
-
-	if ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) {
+	hdr = (struct ieee80211_hdr *)tx->skb->data;
+	if (ieee80211_is_ctl(hdr->frame_control)) {
 		/* TODO: These control frames are not currently sent by
-		 * 80211.o, but should they be implemented, this function
+		 * mac80211, but should they be implemented, this function
 		 * needs to be updated to support duration field calculation.
 		 *
 		 * RTS: time needed to transmit pending data/mgmt frame plus
@@ -152,7 +116,7 @@
 		if (r->bitrate > txrate->bitrate)
 			break;
 
-		if (tx->sdata->basic_rates & BIT(i))
+		if (tx->sdata->bss_conf.basic_rates & BIT(i))
 			rate = r->bitrate;
 
 		switch (sband->band) {
@@ -201,11 +165,10 @@
 	return cpu_to_le16(dur);
 }
 
-static int inline is_ieee80211_device(struct net_device *dev,
-				      struct net_device *master)
+static int inline is_ieee80211_device(struct ieee80211_local *local,
+				      struct net_device *dev)
 {
-	return (wdev_priv(dev->ieee80211_ptr) ==
-		wdev_priv(master->ieee80211_ptr));
+	return local == wdev_priv(dev->ieee80211_ptr);
 }
 
 /* tx handlers */
@@ -213,21 +176,19 @@
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
 {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 	u32 sta_flags;
 
 	if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
 		return TX_CONTINUE;
 
-	if (unlikely(tx->local->sta_sw_scanning) &&
-	    ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT ||
-	     (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ))
+	if (unlikely(tx->local->sw_scanning) &&
+	    !ieee80211_is_probe_req(hdr->frame_control))
 		return TX_DROP;
 
-	if (tx->sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT)
+	if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
 		return TX_CONTINUE;
 
 	if (tx->flags & IEEE80211_TX_PS_BUFFERED)
@@ -237,8 +198,8 @@
 
 	if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
 		if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
-			     tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
-			     (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
+			     tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+			     ieee80211_is_data(hdr->frame_control))) {
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 			DECLARE_MAC_BUF(mac);
 			printk(KERN_DEBUG "%s: dropped data frame to not "
@@ -249,9 +210,9 @@
 			return TX_DROP;
 		}
 	} else {
-		if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA &&
+		if (unlikely(ieee80211_is_data(hdr->frame_control) &&
 			     tx->local->num_sta == 0 &&
-			     tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS)) {
+			     tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) {
 			/*
 			 * No associated STAs - no need to send multicast
 			 * frames.
@@ -282,7 +243,7 @@
 
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
 		struct ieee80211_if_ap *ap;
-		if (sdata->vif.type != IEEE80211_IF_TYPE_AP)
+		if (sdata->vif.type != NL80211_IFTYPE_AP)
 			continue;
 		ap = &sdata->u.ap;
 		skb = skb_dequeue(&ap->ps_bc_buf);
@@ -315,6 +276,7 @@
 ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 
 	/*
 	 * broadcast/multicast frame
@@ -329,7 +291,7 @@
 		return TX_CONTINUE;
 
 	/* no buffering for ordered frames */
-	if (tx->fc & IEEE80211_FCTL_ORDER)
+	if (ieee80211_has_order(hdr->frame_control))
 		return TX_CONTINUE;
 
 	/* no stations in PS mode */
@@ -367,12 +329,11 @@
 {
 	struct sta_info *sta = tx->sta;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 	u32 staflags;
 	DECLARE_MAC_BUF(mac);
 
-	if (unlikely(!sta ||
-		     ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT &&
-		      (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP)))
+	if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control)))
 		return TX_CONTINUE;
 
 	staflags = get_sta_flags(sta);
@@ -382,7 +343,7 @@
 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
 		printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries "
 		       "before %d)\n",
-		       print_mac(mac, sta->addr), sta->aid,
+		       print_mac(mac, sta->sta.addr), sta->sta.aid,
 		       skb_queue_len(&sta->ps_tx_buf));
 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
 		if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -393,7 +354,7 @@
 			if (net_ratelimit()) {
 				printk(KERN_DEBUG "%s: STA %s TX "
 				       "buffer full - dropping oldest frame\n",
-				       tx->dev->name, print_mac(mac, sta->addr));
+				       tx->dev->name, print_mac(mac, sta->sta.addr));
 			}
 #endif
 			dev_kfree_skb(old);
@@ -412,7 +373,7 @@
 	else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) {
 		printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll "
 		       "set -> send frame\n", tx->dev->name,
-		       print_mac(mac, sta->addr));
+		       print_mac(mac, sta->sta.addr));
 	}
 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
 	clear_sta_flags(sta, WLAN_STA_PSPOLL);
@@ -437,7 +398,7 @@
 {
 	struct ieee80211_key *key;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-	u16 fc = tx->fc;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 
 	if (unlikely(tx->skb->do_not_encrypt))
 		tx->key = NULL;
@@ -454,22 +415,16 @@
 		tx->key = NULL;
 
 	if (tx->key) {
-		u16 ftype, stype;
-
 		tx->key->tx_rx_count++;
 		/* TODO: add threshold stuff again */
 
 		switch (tx->key->conf.alg) {
 		case ALG_WEP:
-			ftype = fc & IEEE80211_FCTL_FTYPE;
-			stype = fc & IEEE80211_FCTL_STYPE;
-
-			if (ftype == IEEE80211_FTYPE_MGMT &&
-			    stype == IEEE80211_STYPE_AUTH)
+			if (ieee80211_is_auth(hdr->frame_control))
 				break;
 		case ALG_TKIP:
 		case ALG_CCMP:
-			if (!WLAN_FC_DATA_PRESENT(fc))
+			if (!ieee80211_is_data_present(hdr->frame_control))
 				tx->key = NULL;
 			break;
 		}
@@ -491,7 +446,10 @@
 	sband = tx->local->hw.wiphy->bands[tx->channel->band];
 
 	if (likely(tx->rate_idx < 0)) {
-		rate_control_get_rate(tx->dev, sband, tx->skb, &rsel);
+		rate_control_get_rate(tx->sdata, sband, tx->sta,
+				      tx->skb, &rsel);
+		if (tx->sta)
+			tx->sta->last_txrate_idx = rsel.rate_idx;
 		tx->rate_idx = rsel.rate_idx;
 		if (unlikely(rsel.probe_idx >= 0)) {
 			info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
@@ -535,7 +493,7 @@
 	sband = tx->local->hw.wiphy->bands[tx->channel->band];
 
 	if (tx->sta)
-		info->control.aid = tx->sta->aid;
+		info->control.sta = &tx->sta->sta;
 
 	if (!info->control.retry_limit) {
 		if (!is_multicast_ether_addr(hdr->addr1)) {
@@ -601,7 +559,7 @@
 		for (idx = 0; idx < sband->n_bitrates; idx++) {
 			if (sband->bitrates[idx].bitrate > rate->bitrate)
 				continue;
-			if (tx->sdata->basic_rates & BIT(idx) &&
+			if (tx->sdata->bss_conf.basic_rates & BIT(idx) &&
 			    (baserate < 0 ||
 			     (sband->bitrates[baserate].bitrate
 			      < sband->bitrates[idx].bitrate)))
@@ -615,7 +573,7 @@
 	}
 
 	if (tx->sta)
-		info->control.aid = tx->sta->aid;
+		info->control.sta = &tx->sta->sta;
 
 	return TX_CONTINUE;
 }
@@ -629,7 +587,14 @@
 	u8 *qc;
 	int tid;
 
-	/* only for injected frames */
+	/*
+	 * Packet injection may want to control the sequence
+	 * number, if we have no matching interface then we
+	 * neither assign one ourselves nor ask the driver to.
+	 */
+	if (unlikely(!info->control.vif))
+		return TX_CONTINUE;
+
 	if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
 		return TX_CONTINUE;
 
@@ -854,7 +819,6 @@
 	sband = tx->local->hw.wiphy->bands[tx->channel->band];
 
 	skb->do_not_encrypt = 1;
-	info->flags |= IEEE80211_TX_CTL_INJECTED;
 	tx->flags &= ~IEEE80211_TX_FRAGMENTED;
 
 	/*
@@ -986,7 +950,7 @@
 
 	/* process and remove the injection radiotap header */
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	if (unlikely(sdata->vif.type == IEEE80211_IF_TYPE_MNTR)) {
+	if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) {
 		if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP)
 			return TX_DROP;
 
@@ -1000,7 +964,6 @@
 	hdr = (struct ieee80211_hdr *) skb->data;
 
 	tx->sta = sta_info_get(local, hdr->addr1);
-	tx->fc = le16_to_cpu(hdr->frame_control);
 
 	if (is_multicast_ether_addr(hdr->addr1)) {
 		tx->flags &= ~IEEE80211_TX_UNICAST;
@@ -1025,7 +988,7 @@
 	else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT))
 		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
 
-	hdrlen = ieee80211_get_hdrlen(tx->fc);
+	hdrlen = ieee80211_hdrlen(hdr->frame_control);
 	if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
 		u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
 		tx->ethertype = (pos[0] << 8) | pos[1];
@@ -1038,14 +1001,14 @@
 /*
  * NB: @tx is uninitialised when passed in here
  */
-static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
-				struct sk_buff *skb,
-				struct net_device *mdev)
+static int ieee80211_tx_prepare(struct ieee80211_local *local,
+				struct ieee80211_tx_data *tx,
+				struct sk_buff *skb)
 {
 	struct net_device *dev;
 
 	dev = dev_get_by_index(&init_net, skb->iif);
-	if (unlikely(dev && !is_ieee80211_device(dev, mdev))) {
+	if (unlikely(dev && !is_ieee80211_device(local, dev))) {
 		dev_put(dev);
 		dev = NULL;
 	}
@@ -1068,8 +1031,6 @@
 			return IEEE80211_TX_AGAIN;
 		info =  IEEE80211_SKB_CB(skb);
 
-		ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
-				     "TX to low-level driver", skb);
 		ret = local->ops->tx(local_to_hw(local), skb);
 		if (ret)
 			return IEEE80211_TX_AGAIN;
@@ -1099,9 +1060,6 @@
 						~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
 			}
 
-			ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
-					     "TX to low-level driver",
-					     tx->extra_frag[i]);
 			ret = local->ops->tx(local_to_hw(local),
 					    tx->extra_frag[i]);
 			if (ret)
@@ -1300,17 +1258,24 @@
 int ieee80211_master_start_xmit(struct sk_buff *skb,
 				struct net_device *dev)
 {
+	struct ieee80211_master_priv *mpriv = netdev_priv(dev);
+	struct ieee80211_local *local = mpriv->local;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 	struct net_device *odev = NULL;
 	struct ieee80211_sub_if_data *osdata;
 	int headroom;
 	bool may_encrypt;
+	enum {
+		NOT_MONITOR,
+		FOUND_SDATA,
+		UNKNOWN_ADDRESS,
+	} monitor_iface = NOT_MONITOR;
 	int ret;
 
 	if (skb->iif)
 		odev = dev_get_by_index(&init_net, skb->iif);
-	if (unlikely(odev && !is_ieee80211_device(odev, dev))) {
+	if (unlikely(odev && !is_ieee80211_device(local, odev))) {
 		dev_put(odev);
 		odev = NULL;
 	}
@@ -1335,12 +1300,56 @@
 			if (is_multicast_ether_addr(hdr->addr3))
 				memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
 			else
-				if (mesh_nexthop_lookup(skb, odev))
+				if (mesh_nexthop_lookup(skb, osdata))
 					return  0;
 			if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
-				IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.sta,
+				IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
 							     fwded_frames);
 		}
+	} else if (unlikely(osdata->vif.type == NL80211_IFTYPE_MONITOR)) {
+		struct ieee80211_sub_if_data *sdata;
+		struct ieee80211_local *local = osdata->local;
+		struct ieee80211_hdr *hdr;
+		int hdrlen;
+		u16 len_rthdr;
+
+		info->flags |= IEEE80211_TX_CTL_INJECTED;
+		monitor_iface = UNKNOWN_ADDRESS;
+
+		len_rthdr = ieee80211_get_radiotap_len(skb->data);
+		hdr = (struct ieee80211_hdr *)skb->data + len_rthdr;
+		hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+		/* check the header is complete in the frame */
+		if (likely(skb->len >= len_rthdr + hdrlen)) {
+			/*
+			 * We process outgoing injected frames that have a
+			 * local address we handle as though they are our
+			 * own frames.
+			 * This code here isn't entirely correct, the local
+			 * MAC address is not necessarily enough to find
+			 * the interface to use; for that proper VLAN/WDS
+			 * support we will need a different mechanism.
+			 */
+
+			rcu_read_lock();
+			list_for_each_entry_rcu(sdata, &local->interfaces,
+						list) {
+				if (!netif_running(sdata->dev))
+					continue;
+				if (compare_ether_addr(sdata->dev->dev_addr,
+						       hdr->addr2)) {
+					dev_hold(sdata->dev);
+					dev_put(odev);
+					osdata = sdata;
+					odev = osdata->dev;
+					skb->iif = sdata->dev->ifindex;
+					monitor_iface = FOUND_SDATA;
+					break;
+				}
+			}
+			rcu_read_unlock();
+		}
 	}
 
 	may_encrypt = !skb->do_not_encrypt;
@@ -1357,7 +1366,12 @@
 		return 0;
 	}
 
-	info->control.vif = &osdata->vif;
+	if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+		osdata = container_of(osdata->bss,
+				      struct ieee80211_sub_if_data,
+				      u.ap);
+	if (likely(monitor_iface != UNKNOWN_ADDRESS))
+		info->control.vif = &osdata->vif;
 	ret = ieee80211_tx(odev, skb);
 	dev_put(odev);
 
@@ -1437,8 +1451,8 @@
 int ieee80211_subif_start_xmit(struct sk_buff *skb,
 			       struct net_device *dev)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
 	int ret = 1, head_need;
 	u16 ethertype, hdrlen,  meshhdrlen = 0;
 	__le16 fc;
@@ -1450,7 +1464,6 @@
 	struct sta_info *sta;
 	u32 sta_flags = 0;
 
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	if (unlikely(skb->len < ETH_HLEN)) {
 		ret = 0;
 		goto fail;
@@ -1465,8 +1478,8 @@
 	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
 
 	switch (sdata->vif.type) {
-	case IEEE80211_IF_TYPE_AP:
-	case IEEE80211_IF_TYPE_VLAN:
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_AP_VLAN:
 		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
 		/* DA BSSID SA */
 		memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1474,7 +1487,7 @@
 		memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
 		hdrlen = 24;
 		break;
-	case IEEE80211_IF_TYPE_WDS:
+	case NL80211_IFTYPE_WDS:
 		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
 		/* RA TA DA SA */
 		memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
@@ -1484,24 +1497,56 @@
 		hdrlen = 30;
 		break;
 #ifdef CONFIG_MAC80211_MESH
-	case IEEE80211_IF_TYPE_MESH_POINT:
+	case NL80211_IFTYPE_MESH_POINT:
 		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
-		/* RA TA DA SA */
-		memset(hdr.addr1, 0, ETH_ALEN);
-		memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
-		memcpy(hdr.addr3, skb->data, ETH_ALEN);
-		memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
-		if (!sdata->u.sta.mshcfg.dot11MeshTTL) {
+		if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
 			/* Do not send frames with mesh_ttl == 0 */
-			sdata->u.sta.mshstats.dropped_frames_ttl++;
+			sdata->u.mesh.mshstats.dropped_frames_ttl++;
 			ret = 0;
 			goto fail;
 		}
-		meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata);
+		memset(&mesh_hdr, 0, sizeof(mesh_hdr));
+
+		if (compare_ether_addr(dev->dev_addr,
+					  skb->data + ETH_ALEN) == 0) {
+			/* RA TA DA SA */
+			memset(hdr.addr1, 0, ETH_ALEN);
+			memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
+			memcpy(hdr.addr3, skb->data, ETH_ALEN);
+			memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
+			meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata);
+		} else {
+			/* packet from other interface */
+			struct mesh_path *mppath;
+
+			memset(hdr.addr1, 0, ETH_ALEN);
+			memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
+			memcpy(hdr.addr4, dev->dev_addr, ETH_ALEN);
+
+			if (is_multicast_ether_addr(skb->data))
+				memcpy(hdr.addr3, skb->data, ETH_ALEN);
+			else {
+				rcu_read_lock();
+				mppath = mpp_path_lookup(skb->data, sdata);
+				if (mppath)
+					memcpy(hdr.addr3, mppath->mpp, ETH_ALEN);
+				else
+					memset(hdr.addr3, 0xff, ETH_ALEN);
+				rcu_read_unlock();
+			}
+
+			mesh_hdr.flags |= MESH_FLAGS_AE_A5_A6;
+			mesh_hdr.ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
+			put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &mesh_hdr.seqnum);
+			memcpy(mesh_hdr.eaddr1, skb->data, ETH_ALEN);
+			memcpy(mesh_hdr.eaddr2, skb->data + ETH_ALEN, ETH_ALEN);
+			sdata->u.mesh.mesh_seqnum++;
+			meshhdrlen = 18;
+		}
 		hdrlen = 30;
 		break;
 #endif
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
 		/* BSSID SA DA */
 		memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN);
@@ -1509,7 +1554,7 @@
 		memcpy(hdr.addr3, skb->data, ETH_ALEN);
 		hdrlen = 24;
 		break;
-	case IEEE80211_IF_TYPE_IBSS:
+	case NL80211_IFTYPE_ADHOC:
 		/* DA SA BSSID */
 		memcpy(hdr.addr1, skb->data, ETH_ALEN);
 		memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
@@ -1588,19 +1633,6 @@
 	nh_pos -= skip_header_bytes;
 	h_pos -= skip_header_bytes;
 
-	/* TODO: implement support for fragments so that there is no need to
-	 * reallocate and copy payload; it might be enough to support one
-	 * extra fragment that would be copied in the beginning of the frame
-	 * data.. anyway, it would be nice to include this into skb structure
-	 * somehow
-	 *
-	 * There are few options for this:
-	 * use skb->cb as an extra space for 802.11 header
-	 * allocate new buffer if not enough headroom
-	 * make sure that there is enough headroom in every skb by increasing
-	 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and
-	 * alloc_skb() (net/core/skbuff.c)
-	 */
 	head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
 
 	/*
@@ -1823,10 +1855,7 @@
 	struct rate_selection rsel;
 	struct beacon_data *beacon;
 	struct ieee80211_supported_band *sband;
-	struct ieee80211_mgmt *mgmt;
-	int *num_beacons;
 	enum ieee80211_band band = local->hw.conf.channel->band;
-	u8 *pos;
 
 	sband = local->hw.wiphy->bands[band];
 
@@ -1835,7 +1864,7 @@
 	sdata = vif_to_sdata(vif);
 	bdev = sdata->dev;
 
-	if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
+	if (sdata->vif.type == NL80211_IFTYPE_AP) {
 		ap = &sdata->u.ap;
 		beacon = rcu_dereference(ap->beacon);
 		if (ap && beacon) {
@@ -1873,11 +1902,9 @@
 			if (beacon->tail)
 				memcpy(skb_put(skb, beacon->tail_len),
 				       beacon->tail, beacon->tail_len);
-
-			num_beacons = &ap->num_beacons;
 		} else
 			goto out;
-	} else if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
+	} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
 		struct ieee80211_hdr *hdr;
 		ifsta = &sdata->u.sta;
 
@@ -1889,11 +1916,13 @@
 			goto out;
 
 		hdr = (struct ieee80211_hdr *) skb->data;
-		hdr->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
-						  IEEE80211_STYPE_BEACON);
+		hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+						 IEEE80211_STYPE_BEACON);
 
-		num_beacons = &ifsta->num_beacons;
 	} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
+		struct ieee80211_mgmt *mgmt;
+		u8 *pos;
+
 		/* headroom, head length, tail length and maximum TIM length */
 		skb = dev_alloc_skb(local->tx_headroom + 400);
 		if (!skb)
@@ -1916,9 +1945,7 @@
 		*pos++ = WLAN_EID_SSID;
 		*pos++ = 0x0;
 
-		mesh_mgmt_ies_add(skb, sdata->dev);
-
-		num_beacons = &sdata->u.sta.num_beacons;
+		mesh_mgmt_ies_add(skb, sdata);
 	} else {
 		WARN_ON(1);
 		goto out;
@@ -1929,7 +1956,7 @@
 	skb->do_not_encrypt = 1;
 
 	info->band = band;
-	rate_control_get_rate(local->mdev, sband, skb, &rsel);
+	rate_control_get_rate(sdata, sband, NULL, skb, &rsel);
 
 	if (unlikely(rsel.rate_idx < 0)) {
 		if (net_ratelimit()) {
@@ -1955,7 +1982,6 @@
 	info->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
 	info->control.retry_limit = 1;
 
-	(*num_beacons)++;
 out:
 	rcu_read_unlock();
 	return skb;
@@ -2017,7 +2043,7 @@
 	rcu_read_lock();
 	beacon = rcu_dereference(bss->beacon);
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || !beacon->head)
+	if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head)
 		goto out;
 
 	if (bss->dtim_count != 0)
@@ -2039,7 +2065,7 @@
 				cpu_to_le16(IEEE80211_FCTL_MOREDATA);
 		}
 
-		if (!ieee80211_tx_prepare(&tx, skb, local->mdev))
+		if (!ieee80211_tx_prepare(local, &tx, skb))
 			break;
 		dev_kfree_skb_any(skb);
 	}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0d463c8..f32561e 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -43,7 +43,7 @@
 
 
 u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
-			enum ieee80211_if_types type)
+			enum nl80211_iftype type)
 {
 	__le16 fc = hdr->frame_control;
 
@@ -77,10 +77,10 @@
 
 		if (ieee80211_is_back_req(fc)) {
 			switch (type) {
-			case IEEE80211_IF_TYPE_STA:
+			case NL80211_IFTYPE_STATION:
 				return hdr->addr2;
-			case IEEE80211_IF_TYPE_AP:
-			case IEEE80211_IF_TYPE_VLAN:
+			case NL80211_IFTYPE_AP:
+			case NL80211_IFTYPE_AP_VLAN:
 				return hdr->addr1;
 			default:
 				break; /* fall through to the return */
@@ -91,45 +91,6 @@
 	return NULL;
 }
 
-int ieee80211_get_hdrlen(u16 fc)
-{
-	int hdrlen = 24;
-
-	switch (fc & IEEE80211_FCTL_FTYPE) {
-	case IEEE80211_FTYPE_DATA:
-		if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
-			hdrlen = 30; /* Addr4 */
-		/*
-		 * The QoS Control field is two bytes and its presence is
-		 * indicated by the IEEE80211_STYPE_QOS_DATA bit. Add 2 to
-		 * hdrlen if that bit is set.
-		 * This works by masking out the bit and shifting it to
-		 * bit position 1 so the result has the value 0 or 2.
-		 */
-		hdrlen += (fc & IEEE80211_STYPE_QOS_DATA)
-				>> (ilog2(IEEE80211_STYPE_QOS_DATA)-1);
-		break;
-	case IEEE80211_FTYPE_CTL:
-		/*
-		 * ACK and CTS are 10 bytes, all others 16. To see how
-		 * to get this condition consider
-		 *   subtype mask:   0b0000000011110000 (0x00F0)
-		 *   ACK subtype:    0b0000000011010000 (0x00D0)
-		 *   CTS subtype:    0b0000000011000000 (0x00C0)
-		 *   bits that matter:         ^^^      (0x00E0)
-		 *   value of those: 0b0000000011000000 (0x00C0)
-		 */
-		if ((fc & 0xE0) == 0xC0)
-			hdrlen = 10;
-		else
-			hdrlen = 16;
-		break;
-	}
-
-	return hdrlen;
-}
-EXPORT_SYMBOL(ieee80211_get_hdrlen);
-
 unsigned int ieee80211_hdrlen(__le16 fc)
 {
 	unsigned int hdrlen = 24;
@@ -270,16 +231,21 @@
 					struct ieee80211_rate *rate)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
-	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct ieee80211_sub_if_data *sdata;
 	u16 dur;
 	int erp;
+	bool short_preamble = false;
 
 	erp = 0;
-	if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
-		erp = rate->flags & IEEE80211_RATE_ERP_G;
+	if (vif) {
+		sdata = vif_to_sdata(vif);
+		short_preamble = sdata->bss_conf.use_short_preamble;
+		if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
+			erp = rate->flags & IEEE80211_RATE_ERP_G;
+	}
 
 	dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp,
-				       sdata->bss_conf.use_short_preamble);
+				       short_preamble);
 
 	return cpu_to_le16(dur);
 }
@@ -291,7 +257,7 @@
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_rate *rate;
-	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct ieee80211_sub_if_data *sdata;
 	bool short_preamble;
 	int erp;
 	u16 dur;
@@ -299,13 +265,17 @@
 
 	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
 
-	short_preamble = sdata->bss_conf.use_short_preamble;
+	short_preamble = false;
 
 	rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
 
 	erp = 0;
-	if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
-		erp = rate->flags & IEEE80211_RATE_ERP_G;
+	if (vif) {
+		sdata = vif_to_sdata(vif);
+		short_preamble = sdata->bss_conf.use_short_preamble;
+		if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
+			erp = rate->flags & IEEE80211_RATE_ERP_G;
+	}
 
 	/* CTS duration */
 	dur = ieee80211_frame_duration(local, 10, rate->bitrate,
@@ -328,7 +298,7 @@
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_rate *rate;
-	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct ieee80211_sub_if_data *sdata;
 	bool short_preamble;
 	int erp;
 	u16 dur;
@@ -336,12 +306,16 @@
 
 	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
 
-	short_preamble = sdata->bss_conf.use_short_preamble;
+	short_preamble = false;
 
 	rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
 	erp = 0;
-	if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
-		erp = rate->flags & IEEE80211_RATE_ERP_G;
+	if (vif) {
+		sdata = vif_to_sdata(vif);
+		short_preamble = sdata->bss_conf.use_short_preamble;
+		if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
+			erp = rate->flags & IEEE80211_RATE_ERP_G;
+	}
 
 	/* Data frame duration */
 	dur = ieee80211_frame_duration(local, frame_len, rate->bitrate,
@@ -386,6 +360,13 @@
 }
 EXPORT_SYMBOL(ieee80211_stop_queues);
 
+int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+	return __netif_subqueue_stopped(local->mdev, queue);
+}
+EXPORT_SYMBOL(ieee80211_queue_stopped);
+
 void ieee80211_wake_queues(struct ieee80211_hw *hw)
 {
 	int i;
@@ -408,15 +389,16 @@
 
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		switch (sdata->vif.type) {
-		case IEEE80211_IF_TYPE_INVALID:
-		case IEEE80211_IF_TYPE_MNTR:
-		case IEEE80211_IF_TYPE_VLAN:
+		case __NL80211_IFTYPE_AFTER_LAST:
+		case NL80211_IFTYPE_UNSPECIFIED:
+		case NL80211_IFTYPE_MONITOR:
+		case NL80211_IFTYPE_AP_VLAN:
 			continue;
-		case IEEE80211_IF_TYPE_AP:
-		case IEEE80211_IF_TYPE_STA:
-		case IEEE80211_IF_TYPE_IBSS:
-		case IEEE80211_IF_TYPE_WDS:
-		case IEEE80211_IF_TYPE_MESH_POINT:
+		case NL80211_IFTYPE_AP:
+		case NL80211_IFTYPE_STATION:
+		case NL80211_IFTYPE_ADHOC:
+		case NL80211_IFTYPE_WDS:
+		case NL80211_IFTYPE_MESH_POINT:
 			break;
 		}
 		if (netif_running(sdata->dev))
@@ -441,15 +423,16 @@
 
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
 		switch (sdata->vif.type) {
-		case IEEE80211_IF_TYPE_INVALID:
-		case IEEE80211_IF_TYPE_MNTR:
-		case IEEE80211_IF_TYPE_VLAN:
+		case __NL80211_IFTYPE_AFTER_LAST:
+		case NL80211_IFTYPE_UNSPECIFIED:
+		case NL80211_IFTYPE_MONITOR:
+		case NL80211_IFTYPE_AP_VLAN:
 			continue;
-		case IEEE80211_IF_TYPE_AP:
-		case IEEE80211_IF_TYPE_STA:
-		case IEEE80211_IF_TYPE_IBSS:
-		case IEEE80211_IF_TYPE_WDS:
-		case IEEE80211_IF_TYPE_MESH_POINT:
+		case NL80211_IFTYPE_AP:
+		case NL80211_IFTYPE_STATION:
+		case NL80211_IFTYPE_ADHOC:
+		case NL80211_IFTYPE_WDS:
+		case NL80211_IFTYPE_MESH_POINT:
 			break;
 		}
 		if (netif_running(sdata->dev))
@@ -460,3 +443,243 @@
 	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
+
+void ieee802_11_parse_elems(u8 *start, size_t len,
+			    struct ieee802_11_elems *elems)
+{
+	size_t left = len;
+	u8 *pos = start;
+
+	memset(elems, 0, sizeof(*elems));
+	elems->ie_start = start;
+	elems->total_len = len;
+
+	while (left >= 2) {
+		u8 id, elen;
+
+		id = *pos++;
+		elen = *pos++;
+		left -= 2;
+
+		if (elen > left)
+			return;
+
+		switch (id) {
+		case WLAN_EID_SSID:
+			elems->ssid = pos;
+			elems->ssid_len = elen;
+			break;
+		case WLAN_EID_SUPP_RATES:
+			elems->supp_rates = pos;
+			elems->supp_rates_len = elen;
+			break;
+		case WLAN_EID_FH_PARAMS:
+			elems->fh_params = pos;
+			elems->fh_params_len = elen;
+			break;
+		case WLAN_EID_DS_PARAMS:
+			elems->ds_params = pos;
+			elems->ds_params_len = elen;
+			break;
+		case WLAN_EID_CF_PARAMS:
+			elems->cf_params = pos;
+			elems->cf_params_len = elen;
+			break;
+		case WLAN_EID_TIM:
+			elems->tim = pos;
+			elems->tim_len = elen;
+			break;
+		case WLAN_EID_IBSS_PARAMS:
+			elems->ibss_params = pos;
+			elems->ibss_params_len = elen;
+			break;
+		case WLAN_EID_CHALLENGE:
+			elems->challenge = pos;
+			elems->challenge_len = elen;
+			break;
+		case WLAN_EID_WPA:
+			if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
+			    pos[2] == 0xf2) {
+				/* Microsoft OUI (00:50:F2) */
+				if (pos[3] == 1) {
+					/* OUI Type 1 - WPA IE */
+					elems->wpa = pos;
+					elems->wpa_len = elen;
+				} else if (elen >= 5 && pos[3] == 2) {
+					if (pos[4] == 0) {
+						elems->wmm_info = pos;
+						elems->wmm_info_len = elen;
+					} else if (pos[4] == 1) {
+						elems->wmm_param = pos;
+						elems->wmm_param_len = elen;
+					}
+				}
+			}
+			break;
+		case WLAN_EID_RSN:
+			elems->rsn = pos;
+			elems->rsn_len = elen;
+			break;
+		case WLAN_EID_ERP_INFO:
+			elems->erp_info = pos;
+			elems->erp_info_len = elen;
+			break;
+		case WLAN_EID_EXT_SUPP_RATES:
+			elems->ext_supp_rates = pos;
+			elems->ext_supp_rates_len = elen;
+			break;
+		case WLAN_EID_HT_CAPABILITY:
+			elems->ht_cap_elem = pos;
+			elems->ht_cap_elem_len = elen;
+			break;
+		case WLAN_EID_HT_EXTRA_INFO:
+			elems->ht_info_elem = pos;
+			elems->ht_info_elem_len = elen;
+			break;
+		case WLAN_EID_MESH_ID:
+			elems->mesh_id = pos;
+			elems->mesh_id_len = elen;
+			break;
+		case WLAN_EID_MESH_CONFIG:
+			elems->mesh_config = pos;
+			elems->mesh_config_len = elen;
+			break;
+		case WLAN_EID_PEER_LINK:
+			elems->peer_link = pos;
+			elems->peer_link_len = elen;
+			break;
+		case WLAN_EID_PREQ:
+			elems->preq = pos;
+			elems->preq_len = elen;
+			break;
+		case WLAN_EID_PREP:
+			elems->prep = pos;
+			elems->prep_len = elen;
+			break;
+		case WLAN_EID_PERR:
+			elems->perr = pos;
+			elems->perr_len = elen;
+			break;
+		case WLAN_EID_CHANNEL_SWITCH:
+			elems->ch_switch_elem = pos;
+			elems->ch_switch_elem_len = elen;
+			break;
+		case WLAN_EID_QUIET:
+			if (!elems->quiet_elem) {
+				elems->quiet_elem = pos;
+				elems->quiet_elem_len = elen;
+			}
+			elems->num_of_quiet_elem++;
+			break;
+		case WLAN_EID_COUNTRY:
+			elems->country_elem = pos;
+			elems->country_elem_len = elen;
+			break;
+		case WLAN_EID_PWR_CONSTRAINT:
+			elems->pwr_constr_elem = pos;
+			elems->pwr_constr_elem_len = elen;
+			break;
+		default:
+			break;
+		}
+
+		left -= elen;
+		pos += elen;
+	}
+}
+
+void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_tx_queue_params qparam;
+	int i;
+
+	if (!local->ops->conf_tx)
+		return;
+
+	memset(&qparam, 0, sizeof(qparam));
+
+	qparam.aifs = 2;
+
+	if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
+	    !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE))
+		qparam.cw_min = 31;
+	else
+		qparam.cw_min = 15;
+
+	qparam.cw_max = 1023;
+	qparam.txop = 0;
+
+	for (i = 0; i < local_to_hw(local)->queues; i++)
+		local->ops->conf_tx(local_to_hw(local), i, &qparam);
+}
+
+void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
+		      int encrypt)
+{
+	skb->dev = sdata->local->mdev;
+	skb_set_mac_header(skb, 0);
+	skb_set_network_header(skb, 0);
+	skb_set_transport_header(skb, 0);
+
+	skb->iif = sdata->dev->ifindex;
+	skb->do_not_encrypt = !encrypt;
+
+	dev_queue_xmit(skb);
+}
+
+int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freqMHz)
+{
+	int ret = -EINVAL;
+	struct ieee80211_channel *chan;
+	struct ieee80211_local *local = sdata->local;
+
+	chan = ieee80211_get_channel(local->hw.wiphy, freqMHz);
+
+	if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
+		if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
+		    chan->flags & IEEE80211_CHAN_NO_IBSS) {
+			printk(KERN_DEBUG "%s: IBSS not allowed on frequency "
+				"%d MHz\n", sdata->dev->name, chan->center_freq);
+			return ret;
+		}
+		local->oper_channel = chan;
+
+		if (local->sw_scanning || local->hw_scanning)
+			ret = 0;
+		else
+			ret = ieee80211_hw_config(local);
+
+		rate_control_clear(local);
+	}
+
+	return ret;
+}
+
+u64 ieee80211_mandatory_rates(struct ieee80211_local *local,
+			      enum ieee80211_band band)
+{
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_rate *bitrates;
+	u64 mandatory_rates;
+	enum ieee80211_rate_flags mandatory_flag;
+	int i;
+
+	sband = local->hw.wiphy->bands[band];
+	if (!sband) {
+		WARN_ON(1);
+		sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+	}
+
+	if (band == IEEE80211_BAND_2GHZ)
+		mandatory_flag = IEEE80211_RATE_MANDATORY_B;
+	else
+		mandatory_flag = IEEE80211_RATE_MANDATORY_A;
+
+	bitrates = sband->bitrates;
+	mandatory_rates = 0;
+	for (i = 0; i < sband->n_bitrates; i++)
+		if (bitrates[i].flags & mandatory_flag)
+			mandatory_rates |= BIT(i);
+	return mandatory_rates;
+}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 5c2bf0a..376c849 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -228,11 +228,10 @@
 		return -1;
 
 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
-
-	if (skb->len < 8 + hdrlen)
+	if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN)
 		return -1;
 
-	len = skb->len - hdrlen - 8;
+	len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN;
 
 	keyidx = skb->data[hdrlen + 3] >> 6;
 
@@ -292,9 +291,10 @@
 ieee80211_rx_result
 ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
 {
-	if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA &&
-	    ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT ||
-	     (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH))
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+
+	if (!ieee80211_is_data(hdr->frame_control) &&
+	    !ieee80211_is_auth(hdr->frame_control))
 		return RX_CONTINUE;
 
 	if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
@@ -303,7 +303,7 @@
 	} else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) {
 		ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
 		/* remove ICV */
-		skb_trim(rx->skb, rx->skb->len - 4);
+		skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN);
 	}
 
 	return RX_CONTINUE;
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 34fa8ed..7e0d53a 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -27,22 +27,19 @@
 #include "aes_ccm.h"
 
 
-static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr,
+static int ieee80211_set_encryption(struct ieee80211_sub_if_data *sdata, u8 *sta_addr,
 				    int idx, int alg, int remove,
 				    int set_tx_key, const u8 *_key,
 				    size_t key_len)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
 	struct ieee80211_key *key;
-	struct ieee80211_sub_if_data *sdata;
 	int err;
 
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
 	if (idx < 0 || idx >= NUM_DEFAULT_KEYS) {
 		printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n",
-		       dev->name, idx);
+		       sdata->dev->name, idx);
 		return -EINVAL;
 	}
 
@@ -125,13 +122,13 @@
 	if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
 		return -EOPNOTSUPP;
 
-	if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
-	    sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
-		int ret = ieee80211_sta_set_extra_ie(dev, extra, data->length);
+	if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+	    sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+		int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length);
 		if (ret)
 			return ret;
 		sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
-		ieee80211_sta_req_auth(dev, &sdata->u.sta);
+		ieee80211_sta_req_auth(sdata, &sdata->u.sta);
 		return 0;
 	}
 
@@ -276,21 +273,21 @@
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	int type;
 
-	if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
+	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 		return -EOPNOTSUPP;
 
 	switch (*mode) {
 	case IW_MODE_INFRA:
-		type = IEEE80211_IF_TYPE_STA;
+		type = NL80211_IFTYPE_STATION;
 		break;
 	case IW_MODE_ADHOC:
-		type = IEEE80211_IF_TYPE_IBSS;
+		type = NL80211_IFTYPE_ADHOC;
 		break;
 	case IW_MODE_REPEAT:
-		type = IEEE80211_IF_TYPE_WDS;
+		type = NL80211_IFTYPE_WDS;
 		break;
 	case IW_MODE_MONITOR:
-		type = IEEE80211_IF_TYPE_MNTR;
+		type = NL80211_IFTYPE_MONITOR;
 		break;
 	default:
 		return -EINVAL;
@@ -308,22 +305,22 @@
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	switch (sdata->vif.type) {
-	case IEEE80211_IF_TYPE_AP:
+	case NL80211_IFTYPE_AP:
 		*mode = IW_MODE_MASTER;
 		break;
-	case IEEE80211_IF_TYPE_STA:
+	case NL80211_IFTYPE_STATION:
 		*mode = IW_MODE_INFRA;
 		break;
-	case IEEE80211_IF_TYPE_IBSS:
+	case NL80211_IFTYPE_ADHOC:
 		*mode = IW_MODE_ADHOC;
 		break;
-	case IEEE80211_IF_TYPE_MNTR:
+	case NL80211_IFTYPE_MONITOR:
 		*mode = IW_MODE_MONITOR;
 		break;
-	case IEEE80211_IF_TYPE_WDS:
+	case NL80211_IFTYPE_WDS:
 		*mode = IW_MODE_REPEAT;
 		break;
-	case IEEE80211_IF_TYPE_VLAN:
+	case NL80211_IFTYPE_AP_VLAN:
 		*mode = IW_MODE_SECOND;		/* FIXME */
 		break;
 	default:
@@ -333,60 +330,31 @@
 	return 0;
 }
 
-int ieee80211_set_freq(struct net_device *dev, int freqMHz)
-{
-	int ret = -EINVAL;
-	struct ieee80211_channel *chan;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
-	chan = ieee80211_get_channel(local->hw.wiphy, freqMHz);
-
-	if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
-		if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
-		    chan->flags & IEEE80211_CHAN_NO_IBSS) {
-			printk(KERN_DEBUG "%s: IBSS not allowed on frequency "
-				"%d MHz\n", dev->name, chan->center_freq);
-			return ret;
-		}
-		local->oper_channel = chan;
-
-		if (local->sta_sw_scanning || local->sta_hw_scanning)
-			ret = 0;
-		else
-			ret = ieee80211_hw_config(local);
-
-		rate_control_clear(local);
-	}
-
-	return ret;
-}
-
 static int ieee80211_ioctl_siwfreq(struct net_device *dev,
 				   struct iw_request_info *info,
 				   struct iw_freq *freq, char *extra)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	if (sdata->vif.type == IEEE80211_IF_TYPE_STA)
+	if (sdata->vif.type == NL80211_IFTYPE_STATION)
 		sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL;
 
 	/* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */
 	if (freq->e == 0) {
 		if (freq->m < 0) {
-			if (sdata->vif.type == IEEE80211_IF_TYPE_STA)
+			if (sdata->vif.type == NL80211_IFTYPE_STATION)
 				sdata->u.sta.flags |=
 					IEEE80211_STA_AUTO_CHANNEL_SEL;
 			return 0;
 		} else
-			return ieee80211_set_freq(dev,
+			return ieee80211_set_freq(sdata,
 				ieee80211_channel_to_frequency(freq->m));
 	} else {
 		int i, div = 1000000;
 		for (i = 0; i < freq->e; i++)
 			div /= 10;
 		if (div > 0)
-			return ieee80211_set_freq(dev, freq->m / div);
+			return ieee80211_set_freq(sdata, freq->m / div);
 		else
 			return -EINVAL;
 	}
@@ -418,8 +386,8 @@
 		len--;
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
-	    sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
+	if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+	    sdata->vif.type == NL80211_IFTYPE_ADHOC) {
 		int ret;
 		if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) {
 			if (len > IEEE80211_MAX_SSID_LEN)
@@ -432,14 +400,14 @@
 			sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
 		else
 			sdata->u.sta.flags |= IEEE80211_STA_AUTO_SSID_SEL;
-		ret = ieee80211_sta_set_ssid(dev, ssid, len);
+		ret = ieee80211_sta_set_ssid(sdata, ssid, len);
 		if (ret)
 			return ret;
-		ieee80211_sta_req_auth(dev, &sdata->u.sta);
+		ieee80211_sta_req_auth(sdata, &sdata->u.sta);
 		return 0;
 	}
 
-	if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
+	if (sdata->vif.type == NL80211_IFTYPE_AP) {
 		memcpy(sdata->u.ap.ssid, ssid, len);
 		memset(sdata->u.ap.ssid + len, 0,
 		       IEEE80211_MAX_SSID_LEN - len);
@@ -458,9 +426,9 @@
 
 	struct ieee80211_sub_if_data *sdata;
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
-	    sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
-		int res = ieee80211_sta_get_ssid(dev, ssid, &len);
+	if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+	    sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+		int res = ieee80211_sta_get_ssid(sdata, ssid, &len);
 		if (res == 0) {
 			data->length = len;
 			data->flags = 1;
@@ -469,7 +437,7 @@
 		return res;
 	}
 
-	if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
+	if (sdata->vif.type == NL80211_IFTYPE_AP) {
 		len = sdata->u.ap.ssid_len;
 		if (len > IW_ESSID_MAX_SIZE)
 			len = IW_ESSID_MAX_SIZE;
@@ -489,8 +457,8 @@
 	struct ieee80211_sub_if_data *sdata;
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
-	    sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
+	if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+	    sdata->vif.type == NL80211_IFTYPE_ADHOC) {
 		int ret;
 		if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) {
 			memcpy(sdata->u.sta.bssid, (u8 *) &ap_addr->sa_data,
@@ -504,12 +472,12 @@
 			sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL;
 		else
 			sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
-		ret = ieee80211_sta_set_bssid(dev, (u8 *) &ap_addr->sa_data);
+		ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data);
 		if (ret)
 			return ret;
-		ieee80211_sta_req_auth(dev, &sdata->u.sta);
+		ieee80211_sta_req_auth(sdata, &sdata->u.sta);
 		return 0;
-	} else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
+	} else if (sdata->vif.type == NL80211_IFTYPE_WDS) {
 		/*
 		 * If it is necessary to update the WDS peer address
 		 * while the interface is running, then we need to do
@@ -537,10 +505,10 @@
 	struct ieee80211_sub_if_data *sdata;
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
-	    sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
-		if (sdata->u.sta.state == IEEE80211_ASSOCIATED ||
-		    sdata->u.sta.state == IEEE80211_IBSS_JOINED) {
+	if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+	    sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+		if (sdata->u.sta.state == IEEE80211_STA_MLME_ASSOCIATED ||
+		    sdata->u.sta.state == IEEE80211_STA_MLME_IBSS_JOINED) {
 			ap_addr->sa_family = ARPHRD_ETHER;
 			memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN);
 			return 0;
@@ -548,7 +516,7 @@
 			memset(&ap_addr->sa_data, 0, ETH_ALEN);
 			return 0;
 		}
-	} else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
+	} else if (sdata->vif.type == NL80211_IFTYPE_WDS) {
 		ap_addr->sa_family = ARPHRD_ETHER;
 		memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN);
 		return 0;
@@ -570,10 +538,10 @@
 	if (!netif_running(dev))
 		return -ENETDOWN;
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_STA &&
-	    sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
-	    sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT &&
-	    sdata->vif.type != IEEE80211_IF_TYPE_AP)
+	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+	    sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+	    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
+	    sdata->vif.type != NL80211_IFTYPE_AP)
 		return -EOPNOTSUPP;
 
 	/* if SSID was specified explicitly then use that */
@@ -584,7 +552,7 @@
 		ssid_len = req->essid_len;
 	}
 
-	return ieee80211_sta_req_scan(dev, ssid, ssid_len);
+	return ieee80211_request_scan(sdata, ssid, ssid_len);
 }
 
 
@@ -594,11 +562,14 @@
 {
 	int res;
 	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+	struct ieee80211_sub_if_data *sdata;
 
-	if (local->sta_sw_scanning || local->sta_hw_scanning)
+	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+	if (local->sw_scanning || local->hw_scanning)
 		return -EAGAIN;
 
-	res = ieee80211_sta_scan_results(dev, info, extra, data->length);
+	res = ieee80211_scan_results(local, info, extra, data->length);
 	if (res >= 0) {
 		data->length = res;
 		return 0;
@@ -656,7 +627,7 @@
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
+	if (sdata->vif.type != NL80211_IFTYPE_STATION)
 		return -EOPNOTSUPP;
 
 	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
@@ -665,8 +636,8 @@
 
 	sta = sta_info_get(local, sdata->u.sta.bssid);
 
-	if (sta && sta->txrate_idx < sband->n_bitrates)
-		rate->value = sband->bitrates[sta->txrate_idx].bitrate;
+	if (sta && sta->last_txrate_idx < sband->n_bitrates)
+		rate->value = sband->bitrates[sta->last_txrate_idx].bitrate;
 	else
 		rate->value = 0;
 
@@ -887,17 +858,17 @@
 	struct iw_mlme *mlme = (struct iw_mlme *) extra;
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	if (sdata->vif.type != IEEE80211_IF_TYPE_STA &&
-	    sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
+	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+	    sdata->vif.type != NL80211_IFTYPE_ADHOC)
 		return -EINVAL;
 
 	switch (mlme->cmd) {
 	case IW_MLME_DEAUTH:
 		/* TODO: mlme->addr.sa_data */
-		return ieee80211_sta_deauthenticate(dev, mlme->reason_code);
+		return ieee80211_sta_deauthenticate(sdata, mlme->reason_code);
 	case IW_MLME_DISASSOC:
 		/* TODO: mlme->addr.sa_data */
-		return ieee80211_sta_disassociate(dev, mlme->reason_code);
+		return ieee80211_sta_disassociate(sdata, mlme->reason_code);
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -938,7 +909,7 @@
 	}
 
 	return ieee80211_set_encryption(
-		dev, bcaddr,
+		sdata, bcaddr,
 		idx, alg, remove,
 		!sdata->default_key,
 		keybuf, erq->length);
@@ -983,7 +954,7 @@
 	erq->length = sdata->keys[idx]->conf.keylen;
 	erq->flags |= IW_ENCODE_ENABLED;
 
-	if (sdata->vif.type == IEEE80211_IF_TYPE_STA) {
+	if (sdata->vif.type == NL80211_IFTYPE_STATION) {
 		struct ieee80211_if_sta *ifsta = &sdata->u.sta;
 		switch (ifsta->auth_alg) {
 		case WLAN_AUTH_OPEN:
@@ -1057,7 +1028,7 @@
 		sdata->drop_unencrypted = !!data->value;
 		break;
 	case IW_AUTH_PRIVACY_INVOKED:
-		if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
+		if (sdata->vif.type != NL80211_IFTYPE_STATION)
 			ret = -EINVAL;
 		else {
 			sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
@@ -1072,8 +1043,8 @@
 		}
 		break;
 	case IW_AUTH_80211_AUTH_ALG:
-		if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
-		    sdata->vif.type == IEEE80211_IF_TYPE_IBSS)
+		if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+		    sdata->vif.type == NL80211_IFTYPE_ADHOC)
 			sdata->u.sta.auth_algs = data->value;
 		else
 			ret = -EOPNOTSUPP;
@@ -1095,8 +1066,8 @@
 
 	rcu_read_lock();
 
-	if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
-	    sdata->vif.type == IEEE80211_IF_TYPE_IBSS)
+	if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+	    sdata->vif.type == NL80211_IFTYPE_ADHOC)
 		sta = sta_info_get(local, sdata->u.sta.bssid);
 	if (!sta) {
 		wstats->discard.fragment = 0;
@@ -1126,8 +1097,8 @@
 
 	switch (data->flags & IW_AUTH_INDEX) {
 	case IW_AUTH_80211_AUTH_ALG:
-		if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
-		    sdata->vif.type == IEEE80211_IF_TYPE_IBSS)
+		if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+		    sdata->vif.type == NL80211_IFTYPE_ADHOC)
 			data->value = sdata->u.sta.auth_algs;
 		else
 			ret = -EOPNOTSUPP;
@@ -1184,7 +1155,7 @@
 	} else
 		idx--;
 
-	return ieee80211_set_encryption(dev, ext->addr.sa_data, idx, alg,
+	return ieee80211_set_encryption(sdata, ext->addr.sa_data, idx, alg,
 					remove,
 					ext->ext_flags &
 					IW_ENCODE_EXT_SET_TX_KEY,
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 4310e2f..139b5f2 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -39,7 +39,7 @@
 		return skb->priority - 256;
 
 	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP):
+	case htons(ETH_P_IP):
 		dscp = ip_hdr(skb)->tos & 0xfc;
 		break;
 
@@ -47,8 +47,6 @@
 		return 0;
 	}
 
-	if (dscp & 0x1c)
-		return 0;
 	return dscp >> 5;
 }
 
@@ -75,9 +73,8 @@
 
 
 /* Indicate which queue to use.  */
-static u16 classify80211(struct sk_buff *skb, struct net_device *dev)
+static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
 {
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 
 	if (!ieee80211_is_data(hdr->frame_control)) {
@@ -115,14 +112,15 @@
 
 u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
+	struct ieee80211_master_priv *mpriv = netdev_priv(dev);
+	struct ieee80211_local *local = mpriv->local;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct sta_info *sta;
 	u16 queue;
 	u8 tid;
 
-	queue = classify80211(skb, dev);
+	queue = classify80211(local, skb);
 	if (unlikely(queue >= local->hw.queues))
 		queue = local->hw.queues - 1;
 
@@ -212,7 +210,7 @@
 				DECLARE_MAC_BUF(mac);
 				printk(KERN_DEBUG "allocated aggregation queue"
 					" %d tid %d addr %s pool=0x%lX\n",
-					i, tid, print_mac(mac, sta->addr),
+					i, tid, print_mac(mac, sta->sta.addr),
 					local->queue_pool[0]);
 			}
 #endif /* CONFIG_MAC80211_HT_DEBUG */
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 04de28c..bc62f28 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -1,5 +1,4 @@
 /*
- * IEEE 802.11 driver (80211.o) - QoS datatypes
  * Copyright 2004, Instant802 Networks, Inc.
  * Copyright 2005, Devicescape Software, Inc.
  *
@@ -14,8 +13,6 @@
 #include <linux/netdevice.h>
 #include "ieee80211_i.h"
 
-#define QOS_CONTROL_LEN 2
-
 #define QOS_CONTROL_ACK_POLICY_NORMAL 0
 #define QOS_CONTROL_ACK_POLICY_NOACK 1
 
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 2f33df0..37ae9a9 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -127,7 +127,7 @@
 		if (!(rx->flags & IEEE80211_RX_RA_MATCH))
 			return RX_DROP_UNUSABLE;
 
-		mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx,
+		mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
 						(void *) skb->data);
 		return RX_DROP_UNUSABLE;
 	}
@@ -256,7 +256,7 @@
 
 	res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
 					  key, skb->data + hdrlen,
-					  skb->len - hdrlen, rx->sta->addr,
+					  skb->len - hdrlen, rx->sta->sta.addr,
 					  hdr->addr1, hwaccel, rx->queue,
 					  &rx->tkip_iv32,
 					  &rx->tkip_iv16);
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 9f32859..307a2c3 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -136,17 +136,19 @@
 	 * from w repeatedly while counting.)
 	 */
 	if (is_leap(year)) {
+		/* use days_since_leapyear[] in a leap year */
 		for (i = ARRAY_SIZE(days_since_leapyear) - 1;
-		    i > 0 && days_since_year[i] > w; --i)
+		    i > 0 && days_since_leapyear[i] > w; --i)
 			/* just loop */;
+		r->monthday = w - days_since_leapyear[i] + 1;
 	} else {
 		for (i = ARRAY_SIZE(days_since_year) - 1;
 		    i > 0 && days_since_year[i] > w; --i)
 			/* just loop */;
+		r->monthday = w - days_since_year[i] + 1;
 	}
 
 	r->month    = i + 1;
-	r->monthday = w - days_since_year[i] + 1;
 	return;
 }
 
diff --git a/net/phonet/Kconfig b/net/phonet/Kconfig
new file mode 100644
index 0000000..51a5669
--- /dev/null
+++ b/net/phonet/Kconfig
@@ -0,0 +1,16 @@
+#
+# Phonet protocol
+#
+
+config PHONET
+	tristate "Phonet protocols family"
+	help
+	  The Phone Network protocol (PhoNet) is a packet-oriented
+	  communication protocol developped by Nokia for use with its modems.
+
+	  This is required for Maemo to use cellular data connectivity (if
+	  supported). It can also be used to control Nokia phones
+	  from a Linux computer, although AT commands may be easier to use.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called phonet. If unsure, say N.
diff --git a/net/phonet/Makefile b/net/phonet/Makefile
new file mode 100644
index 0000000..ae9c3ed
--- /dev/null
+++ b/net/phonet/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_PHONET) += phonet.o
+
+phonet-objs := \
+	pn_dev.o \
+	pn_netlink.o \
+	socket.o \
+	datagram.o \
+	sysctl.o \
+	af_phonet.o
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
new file mode 100644
index 0000000..1d8df6b
--- /dev/null
+++ b/net/phonet/af_phonet.c
@@ -0,0 +1,468 @@
+/*
+ * File: af_phonet.c
+ *
+ * Phonet protocols family
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <net/sock.h>
+
+#include <linux/if_phonet.h>
+#include <linux/phonet.h>
+#include <net/phonet/phonet.h>
+#include <net/phonet/pn_dev.h>
+
+static struct net_proto_family phonet_proto_family;
+static struct phonet_protocol *phonet_proto_get(int protocol);
+static inline void phonet_proto_put(struct phonet_protocol *pp);
+
+/* protocol family functions */
+
+static int pn_socket_create(struct net *net, struct socket *sock, int protocol)
+{
+	struct sock *sk;
+	struct pn_sock *pn;
+	struct phonet_protocol *pnp;
+	int err;
+
+	if (net != &init_net)
+		return -EAFNOSUPPORT;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (protocol == 0) {
+		/* Default protocol selection */
+		switch (sock->type) {
+		case SOCK_DGRAM:
+			protocol = PN_PROTO_PHONET;
+			break;
+		default:
+			return -EPROTONOSUPPORT;
+		}
+	}
+
+	pnp = phonet_proto_get(protocol);
+	if (pnp == NULL)
+		return -EPROTONOSUPPORT;
+	if (sock->type != pnp->sock_type) {
+		err = -EPROTONOSUPPORT;
+		goto out;
+	}
+
+	sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot);
+	if (sk == NULL) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	sock_init_data(sock, sk);
+	sock->state = SS_UNCONNECTED;
+	sock->ops = pnp->ops;
+	sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
+	sk->sk_protocol = protocol;
+	pn = pn_sk(sk);
+	pn->sobject = 0;
+	pn->resource = 0;
+	sk->sk_prot->init(sk);
+	err = 0;
+
+out:
+	phonet_proto_put(pnp);
+	return err;
+}
+
+static struct net_proto_family phonet_proto_family = {
+	.family = AF_PHONET,
+	.create = pn_socket_create,
+	.owner = THIS_MODULE,
+};
+
+/* Phonet device header operations */
+static int pn_header_create(struct sk_buff *skb, struct net_device *dev,
+				unsigned short type, const void *daddr,
+				const void *saddr, unsigned len)
+{
+	u8 *media = skb_push(skb, 1);
+
+	if (type != ETH_P_PHONET)
+		return -1;
+
+	if (!saddr)
+		saddr = dev->dev_addr;
+	*media = *(const u8 *)saddr;
+	return 1;
+}
+
+static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+	const u8 *media = skb_mac_header(skb);
+	*haddr = *media;
+	return 1;
+}
+
+struct header_ops phonet_header_ops = {
+	.create = pn_header_create,
+	.parse = pn_header_parse,
+};
+EXPORT_SYMBOL(phonet_header_ops);
+
+/*
+ * Prepends an ISI header and sends a datagram.
+ */
+static int pn_send(struct sk_buff *skb, struct net_device *dev,
+			u16 dst, u16 src, u8 res, u8 irq)
+{
+	struct phonethdr *ph;
+	int err;
+
+	if (skb->len + 2 > 0xffff) {
+		/* Phonet length field would overflow */
+		err = -EMSGSIZE;
+		goto drop;
+	}
+
+	skb_reset_transport_header(skb);
+	WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */
+	skb_push(skb, sizeof(struct phonethdr));
+	skb_reset_network_header(skb);
+	ph = pn_hdr(skb);
+	ph->pn_rdev = pn_dev(dst);
+	ph->pn_sdev = pn_dev(src);
+	ph->pn_res = res;
+	ph->pn_length = __cpu_to_be16(skb->len + 2 - sizeof(*ph));
+	ph->pn_robj = pn_obj(dst);
+	ph->pn_sobj = pn_obj(src);
+
+	skb->protocol = htons(ETH_P_PHONET);
+	skb->priority = 0;
+	skb->dev = dev;
+
+	if (pn_addr(src) == pn_addr(dst)) {
+		skb_reset_mac_header(skb);
+		skb->pkt_type = PACKET_LOOPBACK;
+		skb_orphan(skb);
+		if (irq)
+			netif_rx(skb);
+		else
+			netif_rx_ni(skb);
+		err = 0;
+	} else {
+		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+					NULL, NULL, skb->len);
+		if (err < 0) {
+			err = -EHOSTUNREACH;
+			goto drop;
+		}
+		err = dev_queue_xmit(skb);
+	}
+
+	return err;
+drop:
+	kfree_skb(skb);
+	return err;
+}
+
+static int pn_raw_send(const void *data, int len, struct net_device *dev,
+			u16 dst, u16 src, u8 res)
+{
+	struct sk_buff *skb = alloc_skb(MAX_PHONET_HEADER + len, GFP_ATOMIC);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	skb_reserve(skb, MAX_PHONET_HEADER);
+	__skb_put(skb, len);
+	skb_copy_to_linear_data(skb, data, len);
+	return pn_send(skb, dev, dst, src, res, 1);
+}
+
+/*
+ * Create a Phonet header for the skb and send it out. Returns
+ * non-zero error code if failed. The skb is freed then.
+ */
+int pn_skb_send(struct sock *sk, struct sk_buff *skb,
+		const struct sockaddr_pn *target)
+{
+	struct net_device *dev;
+	struct pn_sock *pn = pn_sk(sk);
+	int err;
+	u16 src;
+	u8 daddr = pn_sockaddr_get_addr(target), saddr = PN_NO_ADDR;
+
+	err = -EHOSTUNREACH;
+	if (sk->sk_bound_dev_if)
+		dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
+	else
+		dev = phonet_device_get(sock_net(sk));
+	if (!dev || !(dev->flags & IFF_UP))
+		goto drop;
+
+	saddr = phonet_address_get(dev, daddr);
+	if (saddr == PN_NO_ADDR)
+		goto drop;
+
+	src = pn->sobject;
+	if (!pn_addr(src))
+		src = pn_object(saddr, pn_obj(src));
+
+	err = pn_send(skb, dev, pn_sockaddr_get_object(target),
+			src, pn_sockaddr_get_resource(target), 0);
+	dev_put(dev);
+	return err;
+
+drop:
+	kfree_skb(skb);
+	if (dev)
+		dev_put(dev);
+	return err;
+}
+EXPORT_SYMBOL(pn_skb_send);
+
+/* Do not send an error message in response to an error message */
+static inline int can_respond(struct sk_buff *skb)
+{
+	const struct phonethdr *ph;
+	const struct phonetmsg *pm;
+	u8 submsg_id;
+
+	if (!pskb_may_pull(skb, 3))
+		return 0;
+
+	ph = pn_hdr(skb);
+	if (phonet_address_get(skb->dev, ph->pn_rdev) != ph->pn_rdev)
+		return 0; /* we are not the destination */
+	if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5))
+		return 0;
+
+	ph = pn_hdr(skb); /* re-acquires the pointer */
+	pm = pn_msg(skb);
+	if (pm->pn_msg_id != PN_COMMON_MESSAGE)
+		return 1;
+	submsg_id = (ph->pn_res == PN_PREFIX)
+		? pm->pn_e_submsg_id : pm->pn_submsg_id;
+	if (submsg_id != PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP &&
+		pm->pn_e_submsg_id != PN_COMM_SERVICE_NOT_IDENTIFIED_RESP)
+		return 1;
+	return 0;
+}
+
+static int send_obj_unreachable(struct sk_buff *rskb)
+{
+	const struct phonethdr *oph = pn_hdr(rskb);
+	const struct phonetmsg *opm = pn_msg(rskb);
+	struct phonetmsg resp;
+
+	memset(&resp, 0, sizeof(resp));
+	resp.pn_trans_id = opm->pn_trans_id;
+	resp.pn_msg_id = PN_COMMON_MESSAGE;
+	if (oph->pn_res == PN_PREFIX) {
+		resp.pn_e_res_id = opm->pn_e_res_id;
+		resp.pn_e_submsg_id = PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP;
+		resp.pn_e_orig_msg_id = opm->pn_msg_id;
+		resp.pn_e_status = 0;
+	} else {
+		resp.pn_submsg_id = PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP;
+		resp.pn_orig_msg_id = opm->pn_msg_id;
+		resp.pn_status = 0;
+	}
+	return pn_raw_send(&resp, sizeof(resp), rskb->dev,
+				pn_object(oph->pn_sdev, oph->pn_sobj),
+				pn_object(oph->pn_rdev, oph->pn_robj),
+				oph->pn_res);
+}
+
+static int send_reset_indications(struct sk_buff *rskb)
+{
+	struct phonethdr *oph = pn_hdr(rskb);
+	static const u8 data[4] = {
+		0x00 /* trans ID */, 0x10 /* subscribe msg */,
+		0x00 /* subscription count */, 0x00 /* dummy */
+	};
+
+	return pn_raw_send(data, sizeof(data), rskb->dev,
+				pn_object(oph->pn_sdev, 0x00),
+				pn_object(oph->pn_rdev, oph->pn_robj), 0x10);
+}
+
+
+/* packet type functions */
+
+/*
+ * Stuff received packets to associated sockets.
+ * On error, returns non-zero and releases the skb.
+ */
+static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
+			struct packet_type *pkttype,
+			struct net_device *orig_dev)
+{
+	struct phonethdr *ph;
+	struct sock *sk;
+	struct sockaddr_pn sa;
+	u16 len;
+
+	if (dev_net(dev) != &init_net)
+		goto out;
+
+	/* check we have at least a full Phonet header */
+	if (!pskb_pull(skb, sizeof(struct phonethdr)))
+		goto out;
+
+	/* check that the advertised length is correct */
+	ph = pn_hdr(skb);
+	len = get_unaligned_be16(&ph->pn_length);
+	if (len < 2)
+		goto out;
+	len -= 2;
+	if ((len > skb->len) || pskb_trim(skb, len))
+		goto out;
+	skb_reset_transport_header(skb);
+
+	pn_skb_get_dst_sockaddr(skb, &sa);
+	if (pn_sockaddr_get_addr(&sa) == 0)
+		goto out; /* currently, we cannot be device 0 */
+
+	sk = pn_find_sock_by_sa(&sa);
+	if (sk == NULL) {
+		if (can_respond(skb)) {
+			send_obj_unreachable(skb);
+			send_reset_indications(skb);
+		}
+		goto out;
+	}
+
+	/* Push data to the socket (or other sockets connected to it). */
+	return sk_receive_skb(sk, skb, 0);
+
+out:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static struct packet_type phonet_packet_type = {
+	.type = __constant_htons(ETH_P_PHONET),
+	.dev = NULL,
+	.func = phonet_rcv,
+};
+
+/* Transport protocol registration */
+static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
+static DEFINE_SPINLOCK(proto_tab_lock);
+
+int __init_or_module phonet_proto_register(int protocol,
+						struct phonet_protocol *pp)
+{
+	int err = 0;
+
+	if (protocol >= PHONET_NPROTO)
+		return -EINVAL;
+
+	err = proto_register(pp->prot, 1);
+	if (err)
+		return err;
+
+	spin_lock(&proto_tab_lock);
+	if (proto_tab[protocol])
+		err = -EBUSY;
+	else
+		proto_tab[protocol] = pp;
+	spin_unlock(&proto_tab_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(phonet_proto_register);
+
+void phonet_proto_unregister(int protocol, struct phonet_protocol *pp)
+{
+	spin_lock(&proto_tab_lock);
+	BUG_ON(proto_tab[protocol] != pp);
+	proto_tab[protocol] = NULL;
+	spin_unlock(&proto_tab_lock);
+	proto_unregister(pp->prot);
+}
+EXPORT_SYMBOL(phonet_proto_unregister);
+
+static struct phonet_protocol *phonet_proto_get(int protocol)
+{
+	struct phonet_protocol *pp;
+
+	if (protocol >= PHONET_NPROTO)
+		return NULL;
+
+	spin_lock(&proto_tab_lock);
+	pp = proto_tab[protocol];
+	if (pp && !try_module_get(pp->prot->owner))
+		pp = NULL;
+	spin_unlock(&proto_tab_lock);
+
+	return pp;
+}
+
+static inline void phonet_proto_put(struct phonet_protocol *pp)
+{
+	module_put(pp->prot->owner);
+}
+
+/* Module registration */
+static int __init phonet_init(void)
+{
+	int err;
+
+	err = sock_register(&phonet_proto_family);
+	if (err) {
+		printk(KERN_ALERT
+			"phonet protocol family initialization failed\n");
+		return err;
+	}
+
+	phonet_device_init();
+	dev_add_pack(&phonet_packet_type);
+	phonet_netlink_register();
+	phonet_sysctl_init();
+
+	err = isi_register();
+	if (err)
+		goto err;
+	return 0;
+
+err:
+	phonet_sysctl_exit();
+	sock_unregister(AF_PHONET);
+	dev_remove_pack(&phonet_packet_type);
+	phonet_device_exit();
+	return err;
+}
+
+static void __exit phonet_exit(void)
+{
+	isi_unregister();
+	phonet_sysctl_exit();
+	sock_unregister(AF_PHONET);
+	dev_remove_pack(&phonet_packet_type);
+	phonet_device_exit();
+}
+
+module_init(phonet_init);
+module_exit(phonet_exit);
+MODULE_DESCRIPTION("Phonet protocol stack for Linux");
+MODULE_LICENSE("GPL");
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
new file mode 100644
index 0000000..e087862
--- /dev/null
+++ b/net/phonet/datagram.c
@@ -0,0 +1,197 @@
+/*
+ * File: datagram.c
+ *
+ * Datagram (ISI) Phonet sockets
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/socket.h>
+#include <asm/ioctls.h>
+#include <net/sock.h>
+
+#include <linux/phonet.h>
+#include <net/phonet/phonet.h>
+
+static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+
+/* associated socket ceases to exist */
+static void pn_sock_close(struct sock *sk, long timeout)
+{
+	sk_common_release(sk);
+}
+
+static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+	struct sk_buff *skb;
+	int answ;
+
+	switch (cmd) {
+	case SIOCINQ:
+		lock_sock(sk);
+		skb = skb_peek(&sk->sk_receive_queue);
+		answ = skb ? skb->len : 0;
+		release_sock(sk);
+		return put_user(answ, (int __user *)arg);
+	}
+
+	return -ENOIOCTLCMD;
+}
+
+/* Destroy socket. All references are gone. */
+static void pn_destruct(struct sock *sk)
+{
+	skb_queue_purge(&sk->sk_receive_queue);
+}
+
+static int pn_init(struct sock *sk)
+{
+	sk->sk_destruct = pn_destruct;
+	return 0;
+}
+
+static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
+			struct msghdr *msg, size_t len)
+{
+	struct sockaddr_pn *target;
+	struct sk_buff *skb;
+	int err;
+
+	if (msg->msg_flags & MSG_OOB)
+		return -EOPNOTSUPP;
+
+	if (msg->msg_name == NULL)
+		return -EDESTADDRREQ;
+
+	if (msg->msg_namelen < sizeof(struct sockaddr_pn))
+		return -EINVAL;
+
+	target = (struct sockaddr_pn *)msg->msg_name;
+	if (target->spn_family != AF_PHONET)
+		return -EAFNOSUPPORT;
+
+	skb = sock_alloc_send_skb(sk, MAX_PHONET_HEADER + len,
+					msg->msg_flags & MSG_DONTWAIT, &err);
+	if (skb == NULL)
+		return err;
+	skb_reserve(skb, MAX_PHONET_HEADER);
+
+	err = memcpy_fromiovec((void *)skb_put(skb, len), msg->msg_iov, len);
+	if (err < 0) {
+		kfree_skb(skb);
+		return err;
+	}
+
+	/*
+	 * Fill in the Phonet header and
+	 * finally pass the packet forwards.
+	 */
+	err = pn_skb_send(sk, skb, target);
+
+	/* If ok, return len. */
+	return (err >= 0) ? len : err;
+}
+
+static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
+			struct msghdr *msg, size_t len, int noblock,
+			int flags, int *addr_len)
+{
+	struct sk_buff *skb = NULL;
+	struct sockaddr_pn sa;
+	int rval = -EOPNOTSUPP;
+	int copylen;
+
+	if (flags & MSG_OOB)
+		goto out_nofree;
+
+	if (addr_len)
+		*addr_len = sizeof(sa);
+
+	skb = skb_recv_datagram(sk, flags, noblock, &rval);
+	if (skb == NULL)
+		goto out_nofree;
+
+	pn_skb_get_src_sockaddr(skb, &sa);
+
+	copylen = skb->len;
+	if (len < copylen) {
+		msg->msg_flags |= MSG_TRUNC;
+		copylen = len;
+	}
+
+	rval = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copylen);
+	if (rval) {
+		rval = -EFAULT;
+		goto out;
+	}
+
+	rval = (flags & MSG_TRUNC) ? skb->len : copylen;
+
+	if (msg->msg_name != NULL)
+		memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
+
+out:
+	skb_free_datagram(sk, skb);
+
+out_nofree:
+	return rval;
+}
+
+/* Queue an skb for a sock. */
+static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+	int err = sock_queue_rcv_skb(sk, skb);
+	if (err < 0)
+		kfree_skb(skb);
+	return err ? NET_RX_DROP : NET_RX_SUCCESS;
+}
+
+/* Module registration */
+static struct proto pn_proto = {
+	.close		= pn_sock_close,
+	.ioctl		= pn_ioctl,
+	.init		= pn_init,
+	.sendmsg	= pn_sendmsg,
+	.recvmsg	= pn_recvmsg,
+	.backlog_rcv	= pn_backlog_rcv,
+	.hash		= pn_sock_hash,
+	.unhash		= pn_sock_unhash,
+	.get_port	= pn_sock_get_port,
+	.obj_size	= sizeof(struct pn_sock),
+	.owner		= THIS_MODULE,
+	.name		= "PHONET",
+};
+
+static struct phonet_protocol pn_dgram_proto = {
+	.ops		= &phonet_dgram_ops,
+	.prot		= &pn_proto,
+	.sock_type	= SOCK_DGRAM,
+};
+
+int __init isi_register(void)
+{
+	return phonet_proto_register(PN_PROTO_PHONET, &pn_dgram_proto);
+}
+
+void __exit isi_unregister(void)
+{
+	phonet_proto_unregister(PN_PROTO_PHONET, &pn_dgram_proto);
+}
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
new file mode 100644
index 0000000..53be9fc
--- /dev/null
+++ b/net/phonet/pn_dev.c
@@ -0,0 +1,208 @@
+/*
+ * File: pn_dev.c
+ *
+ * Phonet network device
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/phonet.h>
+#include <net/sock.h>
+#include <net/phonet/pn_dev.h>
+
+/* when accessing, remember to lock with spin_lock(&pndevs.lock); */
+struct phonet_device_list pndevs = {
+	.list = LIST_HEAD_INIT(pndevs.list),
+	.lock = __SPIN_LOCK_UNLOCKED(pndevs.lock),
+};
+
+/* Allocate new Phonet device. */
+static struct phonet_device *__phonet_device_alloc(struct net_device *dev)
+{
+	struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC);
+	if (pnd == NULL)
+		return NULL;
+	pnd->netdev = dev;
+	bitmap_zero(pnd->addrs, 64);
+
+	list_add(&pnd->list, &pndevs.list);
+	return pnd;
+}
+
+static struct phonet_device *__phonet_get(struct net_device *dev)
+{
+	struct phonet_device *pnd;
+
+	list_for_each_entry(pnd, &pndevs.list, list) {
+		if (pnd->netdev == dev)
+			return pnd;
+	}
+	return NULL;
+}
+
+static void __phonet_device_free(struct phonet_device *pnd)
+{
+	list_del(&pnd->list);
+	kfree(pnd);
+}
+
+struct net_device *phonet_device_get(struct net *net)
+{
+	struct phonet_device *pnd;
+	struct net_device *dev;
+
+	spin_lock_bh(&pndevs.lock);
+	list_for_each_entry(pnd, &pndevs.list, list) {
+		dev = pnd->netdev;
+		BUG_ON(!dev);
+
+		if (dev_net(dev) == net &&
+			(dev->reg_state == NETREG_REGISTERED) &&
+			((pnd->netdev->flags & IFF_UP)) == IFF_UP)
+			break;
+		dev = NULL;
+	}
+	if (dev)
+		dev_hold(dev);
+	spin_unlock_bh(&pndevs.lock);
+	return dev;
+}
+
+int phonet_address_add(struct net_device *dev, u8 addr)
+{
+	struct phonet_device *pnd;
+	int err = 0;
+
+	spin_lock_bh(&pndevs.lock);
+	/* Find or create Phonet-specific device data */
+	pnd = __phonet_get(dev);
+	if (pnd == NULL)
+		pnd = __phonet_device_alloc(dev);
+	if (unlikely(pnd == NULL))
+		err = -ENOMEM;
+	else if (test_and_set_bit(addr >> 2, pnd->addrs))
+		err = -EEXIST;
+	spin_unlock_bh(&pndevs.lock);
+	return err;
+}
+
+int phonet_address_del(struct net_device *dev, u8 addr)
+{
+	struct phonet_device *pnd;
+	int err = 0;
+
+	spin_lock_bh(&pndevs.lock);
+	pnd = __phonet_get(dev);
+	if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs))
+		err = -EADDRNOTAVAIL;
+	if (bitmap_empty(pnd->addrs, 64))
+		__phonet_device_free(pnd);
+	spin_unlock_bh(&pndevs.lock);
+	return err;
+}
+
+/* Gets a source address toward a destination, through a interface. */
+u8 phonet_address_get(struct net_device *dev, u8 addr)
+{
+	struct phonet_device *pnd;
+
+	spin_lock_bh(&pndevs.lock);
+	pnd = __phonet_get(dev);
+	if (pnd) {
+		BUG_ON(bitmap_empty(pnd->addrs, 64));
+
+		/* Use same source address as destination, if possible */
+		if (!test_bit(addr >> 2, pnd->addrs))
+			addr = find_first_bit(pnd->addrs, 64) << 2;
+	} else
+		addr = PN_NO_ADDR;
+	spin_unlock_bh(&pndevs.lock);
+	return addr;
+}
+
+int phonet_address_lookup(u8 addr)
+{
+	struct phonet_device *pnd;
+
+	spin_lock_bh(&pndevs.lock);
+	list_for_each_entry(pnd, &pndevs.list, list) {
+		/* Don't allow unregistering devices! */
+		if ((pnd->netdev->reg_state != NETREG_REGISTERED) ||
+				((pnd->netdev->flags & IFF_UP)) != IFF_UP)
+			continue;
+
+		if (test_bit(addr >> 2, pnd->addrs)) {
+			spin_unlock_bh(&pndevs.lock);
+			return 0;
+		}
+	}
+	spin_unlock_bh(&pndevs.lock);
+	return -EADDRNOTAVAIL;
+}
+
+/* notify Phonet of device events */
+static int phonet_device_notify(struct notifier_block *me, unsigned long what,
+				void *arg)
+{
+	struct net_device *dev = arg;
+
+	if (what == NETDEV_UNREGISTER) {
+		struct phonet_device *pnd;
+
+		/* Destroy phonet-specific device data */
+		spin_lock_bh(&pndevs.lock);
+		pnd = __phonet_get(dev);
+		if (pnd)
+			__phonet_device_free(pnd);
+		spin_unlock_bh(&pndevs.lock);
+	}
+	return 0;
+
+}
+
+static struct notifier_block phonet_device_notifier = {
+	.notifier_call = phonet_device_notify,
+	.priority = 0,
+};
+
+/* Initialize Phonet devices list */
+void phonet_device_init(void)
+{
+	register_netdevice_notifier(&phonet_device_notifier);
+}
+
+void phonet_device_exit(void)
+{
+	struct phonet_device *pnd, *n;
+
+	rtnl_unregister_all(PF_PHONET);
+	rtnl_lock();
+	spin_lock_bh(&pndevs.lock);
+
+	list_for_each_entry_safe(pnd, n, &pndevs.list, list)
+		__phonet_device_free(pnd);
+
+	spin_unlock_bh(&pndevs.lock);
+	rtnl_unlock();
+	unregister_netdevice_notifier(&phonet_device_notifier);
+}
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
new file mode 100644
index 0000000..b1770d6
--- /dev/null
+++ b/net/phonet/pn_netlink.c
@@ -0,0 +1,165 @@
+/*
+ * File: pn_netlink.c
+ *
+ * Phonet netlink interface
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <linux/phonet.h>
+#include <net/sock.h>
+#include <net/phonet/pn_dev.h>
+
+static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
+		     u32 pid, u32 seq, int event);
+
+static void rtmsg_notify(int event, struct net_device *dev, u8 addr)
+{
+	struct sk_buff *skb;
+	int err = -ENOBUFS;
+
+	skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
+			nla_total_size(1), GFP_KERNEL);
+	if (skb == NULL)
+		goto errout;
+	err = fill_addr(skb, dev, addr, 0, 0, event);
+	if (err < 0) {
+		WARN_ON(err == -EMSGSIZE);
+		kfree_skb(skb);
+		goto errout;
+	}
+	err = rtnl_notify(skb, dev_net(dev), 0,
+			  RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL);
+errout:
+	if (err < 0)
+		rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err);
+}
+
+static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = {
+	[IFA_LOCAL] = { .type = NLA_U8 },
+};
+
+static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
+{
+	struct net *net = sock_net(skb->sk);
+	struct nlattr *tb[IFA_MAX+1];
+	struct net_device *dev;
+	struct ifaddrmsg *ifm;
+	int err;
+	u8 pnaddr;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	ASSERT_RTNL();
+
+	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_phonet_policy);
+	if (err < 0)
+		return err;
+
+	ifm = nlmsg_data(nlh);
+	if (tb[IFA_LOCAL] == NULL)
+		return -EINVAL;
+	pnaddr = nla_get_u8(tb[IFA_LOCAL]);
+	if (pnaddr & 3)
+		/* Phonet addresses only have 6 high-order bits */
+		return -EINVAL;
+
+	dev = __dev_get_by_index(net, ifm->ifa_index);
+	if (dev == NULL)
+		return -ENODEV;
+
+	if (nlh->nlmsg_type == RTM_NEWADDR)
+		err = phonet_address_add(dev, pnaddr);
+	else
+		err = phonet_address_del(dev, pnaddr);
+	if (!err)
+		rtmsg_notify(nlh->nlmsg_type, dev, pnaddr);
+	return err;
+}
+
+static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
+			u32 pid, u32 seq, int event)
+{
+	struct ifaddrmsg *ifm;
+	struct nlmsghdr *nlh;
+
+	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), 0);
+	if (nlh == NULL)
+		return -EMSGSIZE;
+
+	ifm = nlmsg_data(nlh);
+	ifm->ifa_family = AF_PHONET;
+	ifm->ifa_prefixlen = 0;
+	ifm->ifa_flags = IFA_F_PERMANENT;
+	ifm->ifa_scope = RT_SCOPE_LINK;
+	ifm->ifa_index = dev->ifindex;
+	NLA_PUT_U8(skb, IFA_LOCAL, addr);
+	return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+
+static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct phonet_device *pnd;
+	int dev_idx = 0, dev_start_idx = cb->args[0];
+	int addr_idx = 0, addr_start_idx = cb->args[1];
+
+	spin_lock_bh(&pndevs.lock);
+	list_for_each_entry(pnd, &pndevs.list, list) {
+		u8 addr;
+
+		if (dev_idx > dev_start_idx)
+			addr_start_idx = 0;
+		if (dev_idx++ < dev_start_idx)
+			continue;
+
+		addr_idx = 0;
+		for (addr = find_first_bit(pnd->addrs, 64); addr < 64;
+			addr = find_next_bit(pnd->addrs, 64, 1+addr)) {
+			if (addr_idx++ < addr_start_idx)
+				continue;
+
+			if (fill_addr(skb, pnd->netdev, addr << 2,
+					 NETLINK_CB(cb->skb).pid,
+					cb->nlh->nlmsg_seq, RTM_NEWADDR))
+				goto out;
+		}
+	}
+
+out:
+	spin_unlock_bh(&pndevs.lock);
+	cb->args[0] = dev_idx;
+	cb->args[1] = addr_idx;
+
+	return skb->len;
+}
+
+void __init phonet_netlink_register(void)
+{
+	rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL);
+	rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL);
+	rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit);
+}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
new file mode 100644
index 0000000..dfd4061
--- /dev/null
+++ b/net/phonet/socket.c
@@ -0,0 +1,312 @@
+/*
+ * File: socket.c
+ *
+ * Phonet sockets
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/net.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+
+#include <linux/phonet.h>
+#include <net/phonet/phonet.h>
+#include <net/phonet/pn_dev.h>
+
+static int pn_socket_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+
+	if (sk) {
+		sock->sk = NULL;
+		sk->sk_prot->close(sk, 0);
+	}
+	return 0;
+}
+
+static struct  {
+	struct hlist_head hlist;
+	spinlock_t lock;
+} pnsocks = {
+	.hlist = HLIST_HEAD_INIT,
+	.lock = __SPIN_LOCK_UNLOCKED(pnsocks.lock),
+};
+
+/*
+ * Find address based on socket address, match only certain fields.
+ * Also grab sock if it was found. Remember to sock_put it later.
+ */
+struct sock *pn_find_sock_by_sa(const struct sockaddr_pn *spn)
+{
+	struct hlist_node *node;
+	struct sock *sknode;
+	struct sock *rval = NULL;
+	u16 obj = pn_sockaddr_get_object(spn);
+	u8 res = spn->spn_resource;
+
+	spin_lock_bh(&pnsocks.lock);
+
+	sk_for_each(sknode, node, &pnsocks.hlist) {
+		struct pn_sock *pn = pn_sk(sknode);
+		BUG_ON(!pn->sobject); /* unbound socket */
+
+		if (pn_port(obj)) {
+			/* Look up socket by port */
+			if (pn_port(pn->sobject) != pn_port(obj))
+				continue;
+		} else {
+			/* If port is zero, look up by resource */
+			if (pn->resource != res)
+				continue;
+		}
+		if (pn_addr(pn->sobject)
+		 && pn_addr(pn->sobject) != pn_addr(obj))
+			continue;
+
+		rval = sknode;
+		sock_hold(sknode);
+		break;
+	}
+
+	spin_unlock_bh(&pnsocks.lock);
+
+	return rval;
+
+}
+
+void pn_sock_hash(struct sock *sk)
+{
+	spin_lock_bh(&pnsocks.lock);
+	sk_add_node(sk, &pnsocks.hlist);
+	spin_unlock_bh(&pnsocks.lock);
+}
+EXPORT_SYMBOL(pn_sock_hash);
+
+void pn_sock_unhash(struct sock *sk)
+{
+	spin_lock_bh(&pnsocks.lock);
+	sk_del_node_init(sk);
+	spin_unlock_bh(&pnsocks.lock);
+}
+EXPORT_SYMBOL(pn_sock_unhash);
+
+static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
+{
+	struct sock *sk = sock->sk;
+	struct pn_sock *pn = pn_sk(sk);
+	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
+	int err;
+	u16 handle;
+	u8 saddr;
+
+	if (sk->sk_prot->bind)
+		return sk->sk_prot->bind(sk, addr, len);
+
+	if (len < sizeof(struct sockaddr_pn))
+		return -EINVAL;
+	if (spn->spn_family != AF_PHONET)
+		return -EAFNOSUPPORT;
+
+	handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
+	saddr = pn_addr(handle);
+	if (saddr && phonet_address_lookup(saddr))
+		return -EADDRNOTAVAIL;
+
+	lock_sock(sk);
+	if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
+		err = -EINVAL; /* attempt to rebind */
+		goto out;
+	}
+	err = sk->sk_prot->get_port(sk, pn_port(handle));
+	if (err)
+		goto out;
+
+	/* get_port() sets the port, bind() sets the address if applicable */
+	pn->sobject = pn_object(saddr, pn_port(pn->sobject));
+	pn->resource = spn->spn_resource;
+
+	/* Enable RX on the socket */
+	sk->sk_prot->hash(sk);
+out:
+	release_sock(sk);
+	return err;
+}
+
+static int pn_socket_autobind(struct socket *sock)
+{
+	struct sockaddr_pn sa;
+	int err;
+
+	memset(&sa, 0, sizeof(sa));
+	sa.spn_family = AF_PHONET;
+	err = pn_socket_bind(sock, (struct sockaddr *)&sa,
+				sizeof(struct sockaddr_pn));
+	if (err != -EINVAL)
+		return err;
+	BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
+	return 0; /* socket was already bound */
+}
+
+static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
+				int *sockaddr_len, int peer)
+{
+	struct sock *sk = sock->sk;
+	struct pn_sock *pn = pn_sk(sk);
+
+	memset(addr, 0, sizeof(struct sockaddr_pn));
+	addr->sa_family = AF_PHONET;
+	if (!peer) /* Race with bind() here is userland's problem. */
+		pn_sockaddr_set_object((struct sockaddr_pn *)addr,
+					pn->sobject);
+
+	*sockaddr_len = sizeof(struct sockaddr_pn);
+	return 0;
+}
+
+static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
+				unsigned long arg)
+{
+	struct sock *sk = sock->sk;
+	struct pn_sock *pn = pn_sk(sk);
+
+	if (cmd == SIOCPNGETOBJECT) {
+		struct net_device *dev;
+		u16 handle;
+		u8 saddr;
+
+		if (get_user(handle, (__u16 __user *)arg))
+			return -EFAULT;
+
+		lock_sock(sk);
+		if (sk->sk_bound_dev_if)
+			dev = dev_get_by_index(sock_net(sk),
+						sk->sk_bound_dev_if);
+		else
+			dev = phonet_device_get(sock_net(sk));
+		if (dev && (dev->flags & IFF_UP))
+			saddr = phonet_address_get(dev, pn_addr(handle));
+		else
+			saddr = PN_NO_ADDR;
+		release_sock(sk);
+
+		if (dev)
+			dev_put(dev);
+		if (saddr == PN_NO_ADDR)
+			return -EHOSTUNREACH;
+
+		handle = pn_object(saddr, pn_port(pn->sobject));
+		return put_user(handle, (__u16 __user *)arg);
+	}
+
+	return sk->sk_prot->ioctl(sk, cmd, arg);
+}
+
+static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock,
+				struct msghdr *m, size_t total_len)
+{
+	struct sock *sk = sock->sk;
+
+	if (pn_socket_autobind(sock))
+		return -EAGAIN;
+
+	return sk->sk_prot->sendmsg(iocb, sk, m, total_len);
+}
+
+const struct proto_ops phonet_dgram_ops = {
+	.family		= AF_PHONET,
+	.owner		= THIS_MODULE,
+	.release	= pn_socket_release,
+	.bind		= pn_socket_bind,
+	.connect	= sock_no_connect,
+	.socketpair	= sock_no_socketpair,
+	.accept		= sock_no_accept,
+	.getname	= pn_socket_getname,
+	.poll		= datagram_poll,
+	.ioctl		= pn_socket_ioctl,
+	.listen		= sock_no_listen,
+	.shutdown	= sock_no_shutdown,
+	.setsockopt	= sock_no_setsockopt,
+	.getsockopt	= sock_no_getsockopt,
+#ifdef CONFIG_COMPAT
+	.compat_setsockopt = sock_no_setsockopt,
+	.compat_getsockopt = sock_no_getsockopt,
+#endif
+	.sendmsg	= pn_socket_sendmsg,
+	.recvmsg	= sock_common_recvmsg,
+	.mmap		= sock_no_mmap,
+	.sendpage	= sock_no_sendpage,
+};
+
+static DEFINE_MUTEX(port_mutex);
+
+/* allocate port for a socket */
+int pn_sock_get_port(struct sock *sk, unsigned short sport)
+{
+	static int port_cur;
+	struct pn_sock *pn = pn_sk(sk);
+	struct sockaddr_pn try_sa;
+	struct sock *tmpsk;
+
+	memset(&try_sa, 0, sizeof(struct sockaddr_pn));
+	try_sa.spn_family = AF_PHONET;
+
+	mutex_lock(&port_mutex);
+
+	if (!sport) {
+		/* search free port */
+		int port, pmin, pmax;
+
+		phonet_get_local_port_range(&pmin, &pmax);
+		for (port = pmin; port <= pmax; port++) {
+			port_cur++;
+			if (port_cur < pmin || port_cur > pmax)
+				port_cur = pmin;
+
+			pn_sockaddr_set_port(&try_sa, port_cur);
+			tmpsk = pn_find_sock_by_sa(&try_sa);
+			if (tmpsk == NULL) {
+				sport = port_cur;
+				goto found;
+			} else
+				sock_put(tmpsk);
+		}
+	} else {
+		/* try to find specific port */
+		pn_sockaddr_set_port(&try_sa, sport);
+		tmpsk = pn_find_sock_by_sa(&try_sa);
+		if (tmpsk == NULL)
+			/* No sock there! We can use that port... */
+			goto found;
+		else
+			sock_put(tmpsk);
+	}
+	mutex_unlock(&port_mutex);
+
+	/* the port must be in use already */
+	return -EADDRINUSE;
+
+found:
+	mutex_unlock(&port_mutex);
+	pn->sobject = pn_object(pn_addr(pn->sobject), sport);
+	return 0;
+}
+EXPORT_SYMBOL(pn_sock_get_port);
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
new file mode 100644
index 0000000..600a430
--- /dev/null
+++ b/net/phonet/sysctl.c
@@ -0,0 +1,113 @@
+/*
+ * File: sysctl.c
+ *
+ * Phonet /proc/sys/net/phonet interface implementation
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/seqlock.h>
+#include <linux/sysctl.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#define DYNAMIC_PORT_MIN	0x40
+#define DYNAMIC_PORT_MAX	0x7f
+
+static DEFINE_SEQLOCK(local_port_range_lock);
+static int local_port_range_min[2] = {0, 0};
+static int local_port_range_max[2] = {1023, 1023};
+static int local_port_range[2] = {DYNAMIC_PORT_MIN, DYNAMIC_PORT_MAX};
+static struct ctl_table_header *phonet_table_hrd;
+
+static void set_local_port_range(int range[2])
+{
+	write_seqlock(&local_port_range_lock);
+	local_port_range[0] = range[0];
+	local_port_range[1] = range[1];
+	write_sequnlock(&local_port_range_lock);
+}
+
+void phonet_get_local_port_range(int *min, int *max)
+{
+	unsigned seq;
+	do {
+		seq = read_seqbegin(&local_port_range_lock);
+		if (min)
+			*min = local_port_range[0];
+		if (max)
+			*max = local_port_range[1];
+	} while (read_seqretry(&local_port_range_lock, seq));
+}
+
+static int proc_local_port_range(ctl_table *table, int write, struct file *filp,
+				void __user *buffer,
+				size_t *lenp, loff_t *ppos)
+{
+	int ret;
+	int range[2] = {local_port_range[0], local_port_range[1]};
+	ctl_table tmp = {
+		.data = &range,
+		.maxlen = sizeof(range),
+		.mode = table->mode,
+		.extra1 = &local_port_range_min,
+		.extra2 = &local_port_range_max,
+	};
+
+	ret = proc_dointvec_minmax(&tmp, write, filp, buffer, lenp, ppos);
+
+	if (write && ret == 0) {
+		if (range[1] < range[0])
+			ret = -EINVAL;
+		else
+			set_local_port_range(range);
+	}
+
+	return ret;
+}
+
+static struct ctl_table phonet_table[] = {
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "local_port_range",
+		.data		= &local_port_range,
+		.maxlen		= sizeof(local_port_range),
+		.mode		= 0644,
+		.proc_handler	= &proc_local_port_range,
+		.strategy	= NULL,
+	},
+	{ .ctl_name = 0 }
+};
+
+struct ctl_path phonet_ctl_path[] = {
+	{ .procname = "net", .ctl_name = CTL_NET, },
+	{ .procname = "phonet", .ctl_name = CTL_UNNUMBERED, },
+	{ },
+};
+
+int __init phonet_sysctl_init(void)
+{
+	phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table);
+	return phonet_table_hrd == NULL ? -ENOMEM : 0;
+}
+
+void phonet_sysctl_exit(void)
+{
+	unregister_sysctl_table(phonet_table_hrd);
+}
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill-input.h
index f63d050..bbfa646 100644
--- a/net/rfkill/rfkill-input.h
+++ b/net/rfkill/rfkill-input.h
@@ -13,5 +13,6 @@
 
 void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state);
 void rfkill_epo(void);
+void rfkill_restore_states(void);
 
 #endif /* __RFKILL_INPUT_H */
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 74aecc0..ea0dc04 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -37,14 +37,20 @@
 MODULE_LICENSE("GPL");
 
 static LIST_HEAD(rfkill_list);	/* list of registered rf switches */
-static DEFINE_MUTEX(rfkill_mutex);
+static DEFINE_MUTEX(rfkill_global_mutex);
 
 static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED;
 module_param_named(default_state, rfkill_default_state, uint, 0444);
 MODULE_PARM_DESC(default_state,
 		 "Default initial state for all radio types, 0 = radio off");
 
-static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX];
+struct rfkill_gsw_state {
+	enum rfkill_state current_state;
+	enum rfkill_state default_state;
+};
+
+static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX];
+static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
 
 static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list);
 
@@ -70,6 +76,7 @@
  */
 int register_rfkill_notifier(struct notifier_block *nb)
 {
+	BUG_ON(!nb);
 	return blocking_notifier_chain_register(&rfkill_notifier_list, nb);
 }
 EXPORT_SYMBOL_GPL(register_rfkill_notifier);
@@ -85,6 +92,7 @@
  */
 int unregister_rfkill_notifier(struct notifier_block *nb)
 {
+	BUG_ON(!nb);
 	return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb);
 }
 EXPORT_SYMBOL_GPL(unregister_rfkill_notifier);
@@ -195,6 +203,11 @@
 		 * BLOCK even a transmitter that is already in state
 		 * RFKILL_STATE_HARD_BLOCKED */
 		break;
+	default:
+		WARN(1, KERN_WARNING
+			"rfkill: illegal state %d passed as parameter "
+			"to rfkill_toggle_radio\n", state);
+		return -EINVAL;
 	}
 
 	if (force || state != rfkill->state) {
@@ -213,22 +226,29 @@
 }
 
 /**
- * rfkill_switch_all - Toggle state of all switches of given type
+ * __rfkill_switch_all - Toggle state of all switches of given type
  * @type: type of interfaces to be affected
  * @state: the new state
  *
  * This function toggles the state of all switches of given type,
  * unless a specific switch is claimed by userspace (in which case,
  * that switch is left alone) or suspended.
+ *
+ * Caller must have acquired rfkill_global_mutex.
  */
-void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
+static void __rfkill_switch_all(const enum rfkill_type type,
+				const enum rfkill_state state)
 {
 	struct rfkill *rfkill;
 
-	mutex_lock(&rfkill_mutex);
+	if (WARN((state >= RFKILL_STATE_MAX || type >= RFKILL_TYPE_MAX),
+			KERN_WARNING
+			"rfkill: illegal state %d or type %d "
+			"passed as parameter to __rfkill_switch_all\n",
+			state, type))
+		return;
 
-	rfkill_states[type] = state;
-
+	rfkill_global_states[type].current_state = state;
 	list_for_each_entry(rfkill, &rfkill_list, node) {
 		if ((!rfkill->user_claim) && (rfkill->type == type)) {
 			mutex_lock(&rfkill->mutex);
@@ -236,8 +256,21 @@
 			mutex_unlock(&rfkill->mutex);
 		}
 	}
+}
 
-	mutex_unlock(&rfkill_mutex);
+/**
+ * rfkill_switch_all - Toggle state of all switches of given type
+ * @type: type of interfaces to be affected
+ * @state: the new state
+ *
+ * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
+ * Please refer to __rfkill_switch_all() for details.
+ */
+void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
+{
+	mutex_lock(&rfkill_global_mutex);
+	__rfkill_switch_all(type, state);
+	mutex_unlock(&rfkill_global_mutex);
 }
 EXPORT_SYMBOL(rfkill_switch_all);
 
@@ -245,23 +278,53 @@
  * rfkill_epo - emergency power off all transmitters
  *
  * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
- * ignoring everything in its path but rfkill_mutex and rfkill->mutex.
+ * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
+ *
+ * The global state before the EPO is saved and can be restored later
+ * using rfkill_restore_states().
  */
 void rfkill_epo(void)
 {
 	struct rfkill *rfkill;
+	int i;
 
-	mutex_lock(&rfkill_mutex);
+	mutex_lock(&rfkill_global_mutex);
+
 	list_for_each_entry(rfkill, &rfkill_list, node) {
 		mutex_lock(&rfkill->mutex);
 		rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
 		mutex_unlock(&rfkill->mutex);
 	}
-	mutex_unlock(&rfkill_mutex);
+	for (i = 0; i < RFKILL_TYPE_MAX; i++) {
+		rfkill_global_states[i].default_state =
+				rfkill_global_states[i].current_state;
+		rfkill_global_states[i].current_state =
+				RFKILL_STATE_SOFT_BLOCKED;
+	}
+	mutex_unlock(&rfkill_global_mutex);
 }
 EXPORT_SYMBOL_GPL(rfkill_epo);
 
 /**
+ * rfkill_restore_states - restore global states
+ *
+ * Restore (and sync switches to) the global state from the
+ * states in rfkill_default_states.  This can undo the effects of
+ * a call to rfkill_epo().
+ */
+void rfkill_restore_states(void)
+{
+	int i;
+
+	mutex_lock(&rfkill_global_mutex);
+
+	for (i = 0; i < RFKILL_TYPE_MAX; i++)
+		__rfkill_switch_all(i, rfkill_global_states[i].default_state);
+	mutex_unlock(&rfkill_global_mutex);
+}
+EXPORT_SYMBOL_GPL(rfkill_restore_states);
+
+/**
  * rfkill_force_state - Force the internal rfkill radio state
  * @rfkill: pointer to the rfkill class to modify.
  * @state: the current radio state the class should be forced to.
@@ -282,9 +345,11 @@
 {
 	enum rfkill_state oldstate;
 
-	if (state != RFKILL_STATE_SOFT_BLOCKED &&
-	    state != RFKILL_STATE_UNBLOCKED &&
-	    state != RFKILL_STATE_HARD_BLOCKED)
+	BUG_ON(!rfkill);
+	if (WARN((state >= RFKILL_STATE_MAX),
+			KERN_WARNING
+			"rfkill: illegal state %d passed as parameter "
+			"to rfkill_force_state\n", state))
 		return -EINVAL;
 
 	mutex_lock(&rfkill->mutex);
@@ -352,12 +417,16 @@
 				  const char *buf, size_t count)
 {
 	struct rfkill *rfkill = to_rfkill(dev);
-	unsigned int state = simple_strtoul(buf, NULL, 0);
+	unsigned long state;
 	int error;
 
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
+	error = strict_strtoul(buf, 0, &state);
+	if (error)
+		return error;
+
 	/* RFKILL_STATE_HARD_BLOCKED is illegal here... */
 	if (state != RFKILL_STATE_UNBLOCKED &&
 	    state != RFKILL_STATE_SOFT_BLOCKED)
@@ -385,7 +454,8 @@
 				  const char *buf, size_t count)
 {
 	struct rfkill *rfkill = to_rfkill(dev);
-	bool claim = !!simple_strtoul(buf, NULL, 0);
+	unsigned long claim_tmp;
+	bool claim;
 	int error;
 
 	if (!capable(CAP_NET_ADMIN))
@@ -394,11 +464,16 @@
 	if (rfkill->user_claim_unsupported)
 		return -EOPNOTSUPP;
 
+	error = strict_strtoul(buf, 0, &claim_tmp);
+	if (error)
+		return error;
+	claim = !!claim_tmp;
+
 	/*
 	 * Take the global lock to make sure the kernel is not in
 	 * the middle of rfkill_switch_all
 	 */
-	error = mutex_lock_interruptible(&rfkill_mutex);
+	error = mutex_lock_interruptible(&rfkill_global_mutex);
 	if (error)
 		return error;
 
@@ -406,14 +481,14 @@
 		if (!claim) {
 			mutex_lock(&rfkill->mutex);
 			rfkill_toggle_radio(rfkill,
-					    rfkill_states[rfkill->type],
-					    0);
+					rfkill_global_states[rfkill->type].current_state,
+					0);
 			mutex_unlock(&rfkill->mutex);
 		}
 		rfkill->user_claim = claim;
 	}
 
-	mutex_unlock(&rfkill_mutex);
+	mutex_unlock(&rfkill_global_mutex);
 
 	return error ? error : count;
 }
@@ -437,21 +512,9 @@
 #ifdef CONFIG_PM
 static int rfkill_suspend(struct device *dev, pm_message_t state)
 {
-	struct rfkill *rfkill = to_rfkill(dev);
-
-	if (dev->power.power_state.event != state.event) {
-		if (state.event & PM_EVENT_SLEEP) {
-			/* Stop transmitter, keep state, no notifies */
-			update_rfkill_state(rfkill);
-
-			mutex_lock(&rfkill->mutex);
-			rfkill->toggle_radio(rfkill->data,
-						RFKILL_STATE_SOFT_BLOCKED);
-			mutex_unlock(&rfkill->mutex);
-		}
-
+	/* mark class device as suspended */
+	if (dev->power.power_state.event != state.event)
 		dev->power.power_state = state;
-	}
 
 	return 0;
 }
@@ -525,24 +588,60 @@
 	.dev_uevent	= rfkill_dev_uevent,
 };
 
+static int rfkill_check_duplicity(const struct rfkill *rfkill)
+{
+	struct rfkill *p;
+	unsigned long seen[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
+
+	memset(seen, 0, sizeof(seen));
+
+	list_for_each_entry(p, &rfkill_list, node) {
+		if (WARN((p == rfkill), KERN_WARNING
+				"rfkill: illegal attempt to register "
+				"an already registered rfkill struct\n"))
+			return -EEXIST;
+		set_bit(p->type, seen);
+	}
+
+	/* 0: first switch of its kind */
+	return test_bit(rfkill->type, seen);
+}
+
 static int rfkill_add_switch(struct rfkill *rfkill)
 {
-	mutex_lock(&rfkill_mutex);
+	int error;
 
-	rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type], 0);
+	mutex_lock(&rfkill_global_mutex);
+
+	error = rfkill_check_duplicity(rfkill);
+	if (error < 0)
+		goto unlock_out;
+
+	if (!error) {
+		/* lock default after first use */
+		set_bit(rfkill->type, rfkill_states_lockdflt);
+		rfkill_global_states[rfkill->type].current_state =
+			rfkill_global_states[rfkill->type].default_state;
+	}
+
+	rfkill_toggle_radio(rfkill,
+			    rfkill_global_states[rfkill->type].current_state,
+			    0);
 
 	list_add_tail(&rfkill->node, &rfkill_list);
 
-	mutex_unlock(&rfkill_mutex);
+	error = 0;
+unlock_out:
+	mutex_unlock(&rfkill_global_mutex);
 
-	return 0;
+	return error;
 }
 
 static void rfkill_remove_switch(struct rfkill *rfkill)
 {
-	mutex_lock(&rfkill_mutex);
+	mutex_lock(&rfkill_global_mutex);
 	list_del_init(&rfkill->node);
-	mutex_unlock(&rfkill_mutex);
+	mutex_unlock(&rfkill_global_mutex);
 
 	mutex_lock(&rfkill->mutex);
 	rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
@@ -562,11 +661,18 @@
  * NOTE: If registration fails the structure shoudl be freed by calling
  * rfkill_free() otherwise rfkill_unregister() should be used.
  */
-struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type)
+struct rfkill * __must_check rfkill_allocate(struct device *parent,
+					     enum rfkill_type type)
 {
 	struct rfkill *rfkill;
 	struct device *dev;
 
+	if (WARN((type >= RFKILL_TYPE_MAX),
+			KERN_WARNING
+			"rfkill: illegal type %d passed as parameter "
+			"to rfkill_allocate\n", type))
+		return NULL;
+
 	rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL);
 	if (!rfkill)
 		return NULL;
@@ -633,15 +739,18 @@
  * structure needs to be registered. Immediately from registration the
  * switch driver should be able to service calls to toggle_radio.
  */
-int rfkill_register(struct rfkill *rfkill)
+int __must_check rfkill_register(struct rfkill *rfkill)
 {
 	static atomic_t rfkill_no = ATOMIC_INIT(0);
 	struct device *dev = &rfkill->dev;
 	int error;
 
-	if (!rfkill->toggle_radio)
-		return -EINVAL;
-	if (rfkill->type >= RFKILL_TYPE_MAX)
+	if (WARN((!rfkill || !rfkill->toggle_radio ||
+			rfkill->type >= RFKILL_TYPE_MAX ||
+			rfkill->state >= RFKILL_STATE_MAX),
+			KERN_WARNING
+			"rfkill: attempt to register a "
+			"badly initialized rfkill struct\n"))
 		return -EINVAL;
 
 	snprintf(dev->bus_id, sizeof(dev->bus_id),
@@ -676,6 +785,7 @@
  */
 void rfkill_unregister(struct rfkill *rfkill)
 {
+	BUG_ON(!rfkill);
 	device_del(&rfkill->dev);
 	rfkill_remove_switch(rfkill);
 	rfkill_led_trigger_unregister(rfkill);
@@ -683,6 +793,56 @@
 }
 EXPORT_SYMBOL(rfkill_unregister);
 
+/**
+ * rfkill_set_default - set initial value for a switch type
+ * @type - the type of switch to set the default state of
+ * @state - the new default state for that group of switches
+ *
+ * Sets the initial state rfkill should use for a given type.
+ * The following initial states are allowed: RFKILL_STATE_SOFT_BLOCKED
+ * and RFKILL_STATE_UNBLOCKED.
+ *
+ * This function is meant to be used by platform drivers for platforms
+ * that can save switch state across power down/reboot.
+ *
+ * The default state for each switch type can be changed exactly once.
+ * After a switch of that type is registered, the default state cannot
+ * be changed anymore.  This guards against multiple drivers it the
+ * same platform trying to set the initial switch default state, which
+ * is not allowed.
+ *
+ * Returns -EPERM if the state has already been set once or is in use,
+ * so drivers likely want to either ignore or at most printk(KERN_NOTICE)
+ * if this function returns -EPERM.
+ *
+ * Returns 0 if the new default state was set, or an error if it
+ * could not be set.
+ */
+int rfkill_set_default(enum rfkill_type type, enum rfkill_state state)
+{
+	int error;
+
+	if (WARN((type >= RFKILL_TYPE_MAX ||
+			(state != RFKILL_STATE_SOFT_BLOCKED &&
+			 state != RFKILL_STATE_UNBLOCKED)),
+			KERN_WARNING
+			"rfkill: illegal state %d or type %d passed as "
+			"parameter to rfkill_set_default\n", state, type))
+		return -EINVAL;
+
+	mutex_lock(&rfkill_global_mutex);
+
+	if (!test_and_set_bit(type, rfkill_states_lockdflt)) {
+		rfkill_global_states[type].default_state = state;
+		error = 0;
+	} else
+		error = -EPERM;
+
+	mutex_unlock(&rfkill_global_mutex);
+	return error;
+}
+EXPORT_SYMBOL_GPL(rfkill_set_default);
+
 /*
  * Rfkill module initialization/deinitialization.
  */
@@ -696,8 +856,8 @@
 	    rfkill_default_state != RFKILL_STATE_UNBLOCKED)
 		return -EINVAL;
 
-	for (i = 0; i < ARRAY_SIZE(rfkill_states); i++)
-		rfkill_states[i] = rfkill_default_state;
+	for (i = 0; i < RFKILL_TYPE_MAX; i++)
+		rfkill_global_states[i].default_state = rfkill_default_state;
 
 	error = class_register(&rfkill_class);
 	if (error) {
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 9437b27..6767e54 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -106,6 +106,15 @@
 	  To compile this code as a module, choose M here: the
 	  module will be called sch_prio.
 
+config NET_SCH_MULTIQ
+	tristate "Hardware Multiqueue-aware Multi Band Queuing (MULTIQ)"
+	---help---
+	  Say Y here if you want to use an n-band queue packet scheduler
+	  to support devices that have multiple hardware transmit queues.
+
+	  To compile this code as a module, choose M here: the
+	  module will be called sch_multiq.
+
 config NET_SCH_RED
 	tristate "Random Early Detection (RED)"
 	---help---
@@ -476,6 +485,17 @@
 	  To compile this code as a module, choose M here: the
 	  module will be called simple.
 
+config NET_ACT_SKBEDIT
+        tristate "SKB Editing"
+        depends on NET_CLS_ACT
+        ---help---
+	  Say Y here to change skb priority or queue_mapping settings.
+
+	  If unsure, say N.
+
+	  To compile this code as a module, choose M here: the
+	  module will be called skbedit.
+
 config NET_CLS_IND
 	bool "Incoming device classification"
 	depends on NET_CLS_U32 || NET_CLS_FW
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 1d2b0f7..e60c992 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_NET_ACT_NAT)	+= act_nat.o
 obj-$(CONFIG_NET_ACT_PEDIT)	+= act_pedit.o
 obj-$(CONFIG_NET_ACT_SIMP)	+= act_simple.o
+obj-$(CONFIG_NET_ACT_SKBEDIT)	+= act_skbedit.o
 obj-$(CONFIG_NET_SCH_FIFO)	+= sch_fifo.o
 obj-$(CONFIG_NET_SCH_CBQ)	+= sch_cbq.o
 obj-$(CONFIG_NET_SCH_HTB)	+= sch_htb.o
@@ -26,6 +27,7 @@
 obj-$(CONFIG_NET_SCH_TBF)	+= sch_tbf.o
 obj-$(CONFIG_NET_SCH_TEQL)	+= sch_teql.o
 obj-$(CONFIG_NET_SCH_PRIO)	+= sch_prio.o
+obj-$(CONFIG_NET_SCH_MULTIQ)	+= sch_multiq.o
 obj-$(CONFIG_NET_SCH_ATM)	+= sch_atm.o
 obj-$(CONFIG_NET_SCH_NETEM)	+= sch_netem.o
 obj-$(CONFIG_NET_CLS_U32)	+= cls_u32.o
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
new file mode 100644
index 0000000..fe9777e
--- /dev/null
+++ b/net/sched/act_skbedit.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2008, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Alexander Duyck <alexander.h.duyck@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+
+#include <linux/tc_act/tc_skbedit.h>
+#include <net/tc_act/tc_skbedit.h>
+
+#define SKBEDIT_TAB_MASK     15
+static struct tcf_common *tcf_skbedit_ht[SKBEDIT_TAB_MASK + 1];
+static u32 skbedit_idx_gen;
+static DEFINE_RWLOCK(skbedit_lock);
+
+static struct tcf_hashinfo skbedit_hash_info = {
+	.htab	=	tcf_skbedit_ht,
+	.hmask	=	SKBEDIT_TAB_MASK,
+	.lock	=	&skbedit_lock,
+};
+
+static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a,
+		       struct tcf_result *res)
+{
+	struct tcf_skbedit *d = a->priv;
+
+	spin_lock(&d->tcf_lock);
+	d->tcf_tm.lastuse = jiffies;
+	d->tcf_bstats.bytes += qdisc_pkt_len(skb);
+	d->tcf_bstats.packets++;
+
+	if (d->flags & SKBEDIT_F_PRIORITY)
+		skb->priority = d->priority;
+	if (d->flags & SKBEDIT_F_QUEUE_MAPPING &&
+	    skb->dev->real_num_tx_queues > d->queue_mapping)
+		skb_set_queue_mapping(skb, d->queue_mapping);
+
+	spin_unlock(&d->tcf_lock);
+	return d->tcf_action;
+}
+
+static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
+	[TCA_SKBEDIT_PARMS]		= { .len = sizeof(struct tc_skbedit) },
+	[TCA_SKBEDIT_PRIORITY]		= { .len = sizeof(u32) },
+	[TCA_SKBEDIT_QUEUE_MAPPING]	= { .len = sizeof(u16) },
+};
+
+static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
+			 struct tc_action *a, int ovr, int bind)
+{
+	struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
+	struct tc_skbedit *parm;
+	struct tcf_skbedit *d;
+	struct tcf_common *pc;
+	u32 flags = 0, *priority = NULL;
+	u16 *queue_mapping = NULL;
+	int ret = 0, err;
+
+	if (nla == NULL)
+		return -EINVAL;
+
+	err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy);
+	if (err < 0)
+		return err;
+
+	if (tb[TCA_SKBEDIT_PARMS] == NULL)
+		return -EINVAL;
+
+	if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
+		flags |= SKBEDIT_F_PRIORITY;
+		priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
+	}
+
+	if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
+		flags |= SKBEDIT_F_QUEUE_MAPPING;
+		queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
+	}
+	if (!flags)
+		return -EINVAL;
+
+	parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
+
+	pc = tcf_hash_check(parm->index, a, bind, &skbedit_hash_info);
+	if (!pc) {
+		pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
+				     &skbedit_idx_gen, &skbedit_hash_info);
+		if (unlikely(!pc))
+			return -ENOMEM;
+
+		d = to_skbedit(pc);
+		ret = ACT_P_CREATED;
+	} else {
+		d = to_skbedit(pc);
+		if (!ovr) {
+			tcf_hash_release(pc, bind, &skbedit_hash_info);
+			return -EEXIST;
+		}
+	}
+
+	spin_lock_bh(&d->tcf_lock);
+
+	d->flags = flags;
+	if (flags & SKBEDIT_F_PRIORITY)
+		d->priority = *priority;
+	if (flags & SKBEDIT_F_QUEUE_MAPPING)
+		d->queue_mapping = *queue_mapping;
+	d->tcf_action = parm->action;
+
+	spin_unlock_bh(&d->tcf_lock);
+
+	if (ret == ACT_P_CREATED)
+		tcf_hash_insert(pc, &skbedit_hash_info);
+	return ret;
+}
+
+static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
+{
+	struct tcf_skbedit *d = a->priv;
+
+	if (d)
+		return tcf_hash_release(&d->common, bind, &skbedit_hash_info);
+	return 0;
+}
+
+static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
+				int bind, int ref)
+{
+	unsigned char *b = skb_tail_pointer(skb);
+	struct tcf_skbedit *d = a->priv;
+	struct tc_skbedit opt;
+	struct tcf_t t;
+
+	opt.index = d->tcf_index;
+	opt.refcnt = d->tcf_refcnt - ref;
+	opt.bindcnt = d->tcf_bindcnt - bind;
+	opt.action = d->tcf_action;
+	NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
+	if (d->flags & SKBEDIT_F_PRIORITY)
+		NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
+			&d->priority);
+	if (d->flags & SKBEDIT_F_QUEUE_MAPPING)
+		NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING,
+			sizeof(d->queue_mapping), &d->queue_mapping);
+	t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
+	t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
+	t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
+	NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t);
+	return skb->len;
+
+nla_put_failure:
+	nlmsg_trim(skb, b);
+	return -1;
+}
+
+static struct tc_action_ops act_skbedit_ops = {
+	.kind		=	"skbedit",
+	.hinfo		=	&skbedit_hash_info,
+	.type		=	TCA_ACT_SKBEDIT,
+	.capab		=	TCA_CAP_NONE,
+	.owner		=	THIS_MODULE,
+	.act		=	tcf_skbedit,
+	.dump		=	tcf_skbedit_dump,
+	.cleanup	=	tcf_skbedit_cleanup,
+	.init		=	tcf_skbedit_init,
+	.walk		=	tcf_generic_walker,
+};
+
+MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
+MODULE_DESCRIPTION("SKB Editing");
+MODULE_LICENSE("GPL");
+
+static int __init skbedit_init_module(void)
+{
+	return tcf_register_action(&act_skbedit_ops);
+}
+
+static void __exit skbedit_cleanup_module(void)
+{
+	tcf_unregister_action(&act_skbedit_ops);
+}
+
+module_init(skbedit_init_module);
+module_exit(skbedit_cleanup_module);
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 8f63a1a..0ebaff6 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -67,9 +67,9 @@
 static u32 flow_get_src(const struct sk_buff *skb)
 {
 	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP):
+	case htons(ETH_P_IP):
 		return ntohl(ip_hdr(skb)->saddr);
-	case __constant_htons(ETH_P_IPV6):
+	case htons(ETH_P_IPV6):
 		return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]);
 	default:
 		return addr_fold(skb->sk);
@@ -79,9 +79,9 @@
 static u32 flow_get_dst(const struct sk_buff *skb)
 {
 	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP):
+	case htons(ETH_P_IP):
 		return ntohl(ip_hdr(skb)->daddr);
-	case __constant_htons(ETH_P_IPV6):
+	case htons(ETH_P_IPV6):
 		return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]);
 	default:
 		return addr_fold(skb->dst) ^ (__force u16)skb->protocol;
@@ -91,9 +91,9 @@
 static u32 flow_get_proto(const struct sk_buff *skb)
 {
 	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP):
+	case htons(ETH_P_IP):
 		return ip_hdr(skb)->protocol;
-	case __constant_htons(ETH_P_IPV6):
+	case htons(ETH_P_IPV6):
 		return ipv6_hdr(skb)->nexthdr;
 	default:
 		return 0;
@@ -120,7 +120,7 @@
 	u32 res = 0;
 
 	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP): {
+	case htons(ETH_P_IP): {
 		struct iphdr *iph = ip_hdr(skb);
 
 		if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
@@ -128,7 +128,7 @@
 			res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4));
 		break;
 	}
-	case __constant_htons(ETH_P_IPV6): {
+	case htons(ETH_P_IPV6): {
 		struct ipv6hdr *iph = ipv6_hdr(skb);
 
 		if (has_ports(iph->nexthdr))
@@ -147,7 +147,7 @@
 	u32 res = 0;
 
 	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP): {
+	case htons(ETH_P_IP): {
 		struct iphdr *iph = ip_hdr(skb);
 
 		if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
@@ -155,7 +155,7 @@
 			res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2));
 		break;
 	}
-	case __constant_htons(ETH_P_IPV6): {
+	case htons(ETH_P_IPV6): {
 		struct ipv6hdr *iph = ipv6_hdr(skb);
 
 		if (has_ports(iph->nexthdr))
@@ -213,9 +213,9 @@
 static u32 flow_get_nfct_src(const struct sk_buff *skb)
 {
 	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP):
+	case htons(ETH_P_IP):
 		return ntohl(CTTUPLE(skb, src.u3.ip));
-	case __constant_htons(ETH_P_IPV6):
+	case htons(ETH_P_IPV6):
 		return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
 	}
 fallback:
@@ -225,9 +225,9 @@
 static u32 flow_get_nfct_dst(const struct sk_buff *skb)
 {
 	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP):
+	case htons(ETH_P_IP):
 		return ntohl(CTTUPLE(skb, dst.u3.ip));
-	case __constant_htons(ETH_P_IPV6):
+	case htons(ETH_P_IPV6):
 		return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
 	}
 fallback:
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index cc49c93..bc45039 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
 #include <linux/tc_ematch/tc_em_cmp.h>
+#include <asm/unaligned.h>
 #include <net/pkt_cls.h>
 
 static inline int cmp_needs_transformation(struct tcf_em_cmp *cmp)
@@ -37,8 +38,7 @@
 			break;
 
 		case TCF_EM_ALIGN_U16:
-			val = *ptr << 8;
-			val |= *(ptr+1);
+			val = get_unaligned_be16(ptr);
 
 			if (cmp_needs_transformation(cmp))
 				val = be16_to_cpu(val);
@@ -47,10 +47,7 @@
 		case TCF_EM_ALIGN_U32:
 			/* Worth checking boundries? The branching seems
 			 * to get worse. Visit again. */
-			val = *ptr << 24;
-			val |= *(ptr+1) << 16;
-			val |= *(ptr+2) << 8;
-			val |= *(ptr+3);
+			val = get_unaligned_be32(ptr);
 
 			if (cmp_needs_transformation(cmp))
 				val = be32_to_cpu(val);
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index edd1298..ba43aab 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -202,7 +202,7 @@
 
 	if (p->set_tc_index) {
 		switch (skb->protocol) {
-		case __constant_htons(ETH_P_IP):
+		case htons(ETH_P_IP):
 			if (skb_cow_head(skb, sizeof(struct iphdr)))
 				goto drop;
 
@@ -210,7 +210,7 @@
 				& ~INET_ECN_MASK;
 			break;
 
-		case __constant_htons(ETH_P_IPV6):
+		case htons(ETH_P_IPV6):
 			if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
 				goto drop;
 
@@ -289,11 +289,11 @@
 	pr_debug("index %d->%d\n", skb->tc_index, index);
 
 	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP):
+	case htons(ETH_P_IP):
 		ipv4_change_dsfield(ip_hdr(skb), p->mask[index],
 				    p->value[index]);
 			break;
-	case __constant_htons(ETH_P_IPV6):
+	case htons(ETH_P_IPV6):
 		ipv6_change_dsfield(ipv6_hdr(skb), p->mask[index],
 				    p->value[index]);
 			break;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ec0a083..5e7e0bd 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -44,10 +44,7 @@
 
 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 {
-	if (unlikely(skb->next))
-		q->gso_skb = skb;
-	else
-		q->ops->requeue(skb, q);
+	__skb_queue_head(&q->requeue, skb);
 
 	__netif_schedule(q);
 	return 0;
@@ -55,12 +52,21 @@
 
 static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
 {
-	struct sk_buff *skb;
+	struct sk_buff *skb = skb_peek(&q->requeue);
 
-	if ((skb = q->gso_skb))
-		q->gso_skb = NULL;
-	else
+	if (unlikely(skb)) {
+		struct net_device *dev = qdisc_dev(q);
+		struct netdev_queue *txq;
+
+		/* check the reason of requeuing without tx lock first */
+		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+		if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
+			__skb_unlink(skb, &q->requeue);
+		else
+			skb = NULL;
+	} else {
 		skb = q->dequeue(q);
+	}
 
 	return skb;
 }
@@ -327,6 +333,7 @@
 	.flags		=	TCQ_F_BUILTIN,
 	.ops		=	&noop_qdisc_ops,
 	.list		=	LIST_HEAD_INIT(noop_qdisc.list),
+	.requeue.lock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
 	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
 	.dev_queue	=	&noop_netdev_queue,
 };
@@ -352,6 +359,7 @@
 	.flags		=	TCQ_F_BUILTIN,
 	.ops		=	&noqueue_qdisc_ops,
 	.list		=	LIST_HEAD_INIT(noqueue_qdisc.list),
+	.requeue.lock	=	__SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
 	.q.lock		=	__SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
 	.dev_queue	=	&noqueue_netdev_queue,
 };
@@ -472,6 +480,7 @@
 	sch->padded = (char *) sch - (char *) p;
 
 	INIT_LIST_HEAD(&sch->list);
+	skb_queue_head_init(&sch->requeue);
 	skb_queue_head_init(&sch->q);
 	sch->ops = ops;
 	sch->enqueue = ops->enqueue;
@@ -539,7 +548,7 @@
 	module_put(ops->owner);
 	dev_put(qdisc_dev(qdisc));
 
-	kfree_skb(qdisc->gso_skb);
+	__skb_queue_purge(&qdisc->requeue);
 
 	kfree((char *) qdisc - qdisc->padded);
 }
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
new file mode 100644
index 0000000..915f314
--- /dev/null
+++ b/net/sched/sch_multiq.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (c) 2008, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Alexander Duyck <alexander.h.duyck@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+
+
+struct multiq_sched_data {
+	u16 bands;
+	u16 max_bands;
+	u16 curband;
+	struct tcf_proto *filter_list;
+	struct Qdisc **queues;
+};
+
+
+static struct Qdisc *
+multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	u32 band;
+	struct tcf_result res;
+	int err;
+
+	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+	err = tc_classify(skb, q->filter_list, &res);
+#ifdef CONFIG_NET_CLS_ACT
+	switch (err) {
+	case TC_ACT_STOLEN:
+	case TC_ACT_QUEUED:
+		*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+	case TC_ACT_SHOT:
+		return NULL;
+	}
+#endif
+	band = skb_get_queue_mapping(skb);
+
+	if (band >= q->bands)
+		return q->queues[0];
+
+	return q->queues[band];
+}
+
+static int
+multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct Qdisc *qdisc;
+	int ret;
+
+	qdisc = multiq_classify(skb, sch, &ret);
+#ifdef CONFIG_NET_CLS_ACT
+	if (qdisc == NULL) {
+
+		if (ret & __NET_XMIT_BYPASS)
+			sch->qstats.drops++;
+		kfree_skb(skb);
+		return ret;
+	}
+#endif
+
+	ret = qdisc_enqueue(skb, qdisc);
+	if (ret == NET_XMIT_SUCCESS) {
+		sch->bstats.bytes += qdisc_pkt_len(skb);
+		sch->bstats.packets++;
+		sch->q.qlen++;
+		return NET_XMIT_SUCCESS;
+	}
+	if (net_xmit_drop_count(ret))
+		sch->qstats.drops++;
+	return ret;
+}
+
+
+static int
+multiq_requeue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct Qdisc *qdisc;
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	int ret;
+
+	qdisc = multiq_classify(skb, sch, &ret);
+#ifdef CONFIG_NET_CLS_ACT
+	if (qdisc == NULL) {
+		if (ret & __NET_XMIT_BYPASS)
+			sch->qstats.drops++;
+		kfree_skb(skb);
+		return ret;
+	}
+#endif
+
+	ret = qdisc->ops->requeue(skb, qdisc);
+	if (ret == NET_XMIT_SUCCESS) {
+		sch->q.qlen++;
+		sch->qstats.requeues++;
+		if (q->curband)
+			q->curband--;
+		else
+			q->curband = q->bands - 1;
+		return NET_XMIT_SUCCESS;
+	}
+	if (net_xmit_drop_count(ret))
+		sch->qstats.drops++;
+	return ret;
+}
+
+
+static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	struct Qdisc *qdisc;
+	struct sk_buff *skb;
+	int band;
+
+	for (band = 0; band < q->bands; band++) {
+		/* cycle through bands to ensure fairness */
+		q->curband++;
+		if (q->curband >= q->bands)
+			q->curband = 0;
+
+		/* Check that target subqueue is available before
+		 * pulling an skb to avoid excessive requeues
+		 */
+		if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
+			qdisc = q->queues[q->curband];
+			skb = qdisc->dequeue(qdisc);
+			if (skb) {
+				sch->q.qlen--;
+				return skb;
+			}
+		}
+	}
+	return NULL;
+
+}
+
+static unsigned int multiq_drop(struct Qdisc *sch)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	int band;
+	unsigned int len;
+	struct Qdisc *qdisc;
+
+	for (band = q->bands-1; band >= 0; band--) {
+		qdisc = q->queues[band];
+		if (qdisc->ops->drop) {
+			len = qdisc->ops->drop(qdisc);
+			if (len != 0) {
+				sch->q.qlen--;
+				return len;
+			}
+		}
+	}
+	return 0;
+}
+
+
+static void
+multiq_reset(struct Qdisc *sch)
+{
+	u16 band;
+	struct multiq_sched_data *q = qdisc_priv(sch);
+
+	for (band = 0; band < q->bands; band++)
+		qdisc_reset(q->queues[band]);
+	sch->q.qlen = 0;
+	q->curband = 0;
+}
+
+static void
+multiq_destroy(struct Qdisc *sch)
+{
+	int band;
+	struct multiq_sched_data *q = qdisc_priv(sch);
+
+	tcf_destroy_chain(&q->filter_list);
+	for (band = 0; band < q->bands; band++)
+		qdisc_destroy(q->queues[band]);
+
+	kfree(q->queues);
+}
+
+static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	struct tc_multiq_qopt *qopt;
+	int i;
+
+	if (!netif_is_multiqueue(qdisc_dev(sch)))
+		return -EINVAL;
+	if (nla_len(opt) < sizeof(*qopt))
+		return -EINVAL;
+
+	qopt = nla_data(opt);
+
+	qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
+
+	sch_tree_lock(sch);
+	q->bands = qopt->bands;
+	for (i = q->bands; i < q->max_bands; i++) {
+		if (q->queues[i] != &noop_qdisc) {
+			struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
+			qdisc_tree_decrease_qlen(child, child->q.qlen);
+			qdisc_destroy(child);
+		}
+	}
+
+	sch_tree_unlock(sch);
+
+	for (i = 0; i < q->bands; i++) {
+		if (q->queues[i] == &noop_qdisc) {
+			struct Qdisc *child;
+			child = qdisc_create_dflt(qdisc_dev(sch),
+						  sch->dev_queue,
+						  &pfifo_qdisc_ops,
+						  TC_H_MAKE(sch->handle,
+							    i + 1));
+			if (child) {
+				sch_tree_lock(sch);
+				child = xchg(&q->queues[i], child);
+
+				if (child != &noop_qdisc) {
+					qdisc_tree_decrease_qlen(child,
+								 child->q.qlen);
+					qdisc_destroy(child);
+				}
+				sch_tree_unlock(sch);
+			}
+		}
+	}
+	return 0;
+}
+
+static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	int i, err;
+
+	q->queues = NULL;
+
+	if (opt == NULL)
+		return -EINVAL;
+
+	q->max_bands = qdisc_dev(sch)->num_tx_queues;
+
+	q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
+	if (!q->queues)
+		return -ENOBUFS;
+	for (i = 0; i < q->max_bands; i++)
+		q->queues[i] = &noop_qdisc;
+
+	err = multiq_tune(sch,opt);
+
+	if (err)
+		kfree(q->queues);
+
+	return err;
+}
+
+static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	unsigned char *b = skb_tail_pointer(skb);
+	struct tc_multiq_qopt opt;
+
+	opt.bands = q->bands;
+	opt.max_bands = q->max_bands;
+
+	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+
+	return skb->len;
+
+nla_put_failure:
+	nlmsg_trim(skb, b);
+	return -1;
+}
+
+static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+		      struct Qdisc **old)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	unsigned long band = arg - 1;
+
+	if (band >= q->bands)
+		return -EINVAL;
+
+	if (new == NULL)
+		new = &noop_qdisc;
+
+	sch_tree_lock(sch);
+	*old = q->queues[band];
+	q->queues[band] = new;
+	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+	qdisc_reset(*old);
+	sch_tree_unlock(sch);
+
+	return 0;
+}
+
+static struct Qdisc *
+multiq_leaf(struct Qdisc *sch, unsigned long arg)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	unsigned long band = arg - 1;
+
+	if (band >= q->bands)
+		return NULL;
+
+	return q->queues[band];
+}
+
+static unsigned long multiq_get(struct Qdisc *sch, u32 classid)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	unsigned long band = TC_H_MIN(classid);
+
+	if (band - 1 >= q->bands)
+		return 0;
+	return band;
+}
+
+static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
+				 u32 classid)
+{
+	return multiq_get(sch, classid);
+}
+
+
+static void multiq_put(struct Qdisc *q, unsigned long cl)
+{
+	return;
+}
+
+static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent,
+			 struct nlattr **tca, unsigned long *arg)
+{
+	unsigned long cl = *arg;
+	struct multiq_sched_data *q = qdisc_priv(sch);
+
+	if (cl - 1 > q->bands)
+		return -ENOENT;
+	return 0;
+}
+
+static int multiq_delete(struct Qdisc *sch, unsigned long cl)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	if (cl - 1 > q->bands)
+		return -ENOENT;
+	return 0;
+}
+
+
+static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
+			     struct sk_buff *skb, struct tcmsg *tcm)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+
+	if (cl - 1 > q->bands)
+		return -ENOENT;
+	tcm->tcm_handle |= TC_H_MIN(cl);
+	if (q->queues[cl-1])
+		tcm->tcm_info = q->queues[cl-1]->handle;
+	return 0;
+}
+
+static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+				 struct gnet_dump *d)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	struct Qdisc *cl_q;
+
+	cl_q = q->queues[cl - 1];
+	if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
+	    gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
+		return -1;
+
+	return 0;
+}
+
+static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+	int band;
+
+	if (arg->stop)
+		return;
+
+	for (band = 0; band < q->bands; band++) {
+		if (arg->count < arg->skip) {
+			arg->count++;
+			continue;
+		}
+		if (arg->fn(sch, band+1, arg) < 0) {
+			arg->stop = 1;
+			break;
+		}
+		arg->count++;
+	}
+}
+
+static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+	struct multiq_sched_data *q = qdisc_priv(sch);
+
+	if (cl)
+		return NULL;
+	return &q->filter_list;
+}
+
+static const struct Qdisc_class_ops multiq_class_ops = {
+	.graft		=	multiq_graft,
+	.leaf		=	multiq_leaf,
+	.get		=	multiq_get,
+	.put		=	multiq_put,
+	.change		=	multiq_change,
+	.delete		=	multiq_delete,
+	.walk		=	multiq_walk,
+	.tcf_chain	=	multiq_find_tcf,
+	.bind_tcf	=	multiq_bind,
+	.unbind_tcf	=	multiq_put,
+	.dump		=	multiq_dump_class,
+	.dump_stats	=	multiq_dump_class_stats,
+};
+
+static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
+	.next		=	NULL,
+	.cl_ops		=	&multiq_class_ops,
+	.id		=	"multiq",
+	.priv_size	=	sizeof(struct multiq_sched_data),
+	.enqueue	=	multiq_enqueue,
+	.dequeue	=	multiq_dequeue,
+	.requeue	=	multiq_requeue,
+	.drop		=	multiq_drop,
+	.init		=	multiq_init,
+	.reset		=	multiq_reset,
+	.destroy	=	multiq_destroy,
+	.change		=	multiq_tune,
+	.dump		=	multiq_dump,
+	.owner		=	THIS_MODULE,
+};
+
+static int __init multiq_module_init(void)
+{
+	return register_qdisc(&multiq_qdisc_ops);
+}
+
+static void __exit multiq_module_exit(void)
+{
+	unregister_qdisc(&multiq_qdisc_ops);
+}
+
+module_init(multiq_module_init)
+module_exit(multiq_module_exit)
+
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 3781e55..a119599 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -388,6 +388,20 @@
 	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
 };
 
+static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
+		      const struct nla_policy *policy, int len)
+{
+	int nested_len = nla_len(nla) - NLA_ALIGN(len);
+
+	if (nested_len < 0)
+		return -EINVAL;
+	if (nested_len >= nla_attr_size(0))
+		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
+				 nested_len, policy);
+	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
+	return 0;
+}
+
 /* Parse netlink message to set options */
 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 {
@@ -399,8 +413,8 @@
 	if (opt == NULL)
 		return -EINVAL;
 
-	ret = nla_parse_nested_compat(tb, TCA_NETEM_MAX, opt, netem_policy,
-				      qopt, sizeof(*qopt));
+	qopt = nla_data(opt);
+	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
 	if (ret < 0)
 		return ret;
 
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index a6697c6..504a78c 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -254,16 +254,12 @@
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 	unsigned char *b = skb_tail_pointer(skb);
-	struct nlattr *nest;
 	struct tc_prio_qopt opt;
 
 	opt.bands = q->bands;
 	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
 
-	nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt);
-	if (nest == NULL)
-		goto nla_put_failure;
-	nla_nest_compat_end(skb, nest);
+	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
 
 	return skb->len;
 
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 6e041d1..fe1508e 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -119,7 +119,7 @@
 	u32 h, h2;
 
 	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP):
+	case htons(ETH_P_IP):
 	{
 		const struct iphdr *iph = ip_hdr(skb);
 		h = iph->daddr;
@@ -134,7 +134,7 @@
 			h2 ^= *(((u32*)iph) + iph->ihl);
 		break;
 	}
-	case __constant_htons(ETH_P_IPV6):
+	case htons(ETH_P_IPV6):
 	{
 		struct ipv6hdr *iph = ipv6_hdr(skb);
 		h = iph->daddr.s6_addr32[3];
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 5061a26..7b23803 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -317,7 +317,7 @@
 	}
 
 	/* Insert before pos. */
-	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);
+	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
 
 }
 
@@ -825,8 +825,7 @@
 
 
 	/* Insert before pos. */
-	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);
-
+	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
 }
 
 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index e55427f..5c1954d 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -769,7 +769,7 @@
 	/* check for expected message types */
 	/* The order of some of these tests is important. */
 	switch (headerp->rm_type) {
-	case __constant_htonl(RDMA_MSG):
+	case htonl(RDMA_MSG):
 		/* never expect read chunks */
 		/* never expect reply chunks (two ways to check) */
 		/* never expect write chunks without having offered RDMA */
@@ -802,7 +802,7 @@
 		rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len);
 		break;
 
-	case __constant_htonl(RDMA_NOMSG):
+	case htonl(RDMA_NOMSG):
 		/* never expect read or write chunks, always reply chunks */
 		if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
 		    headerp->rm_body.rm_chunks[1] != xdr_zero ||
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 833b024..b97bd9f 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -14,6 +14,38 @@
 
 	  If unsure, say Y.
 
+config WIRELESS_OLD_REGULATORY
+	bool "Old wireless static regulatory defintions"
+	default n
+	---help---
+	  This option enables the old static regulatory information
+	  and uses it within the new framework. This is available
+	  temporarily as an option to help prevent immediate issues
+	  due to the switch to the new regulatory framework which
+	  does require a new userspace application which has the
+	  database of regulatory information (CRDA) and another for
+	  setting regulatory domains (iw).
+
+	  For more information see:
+
+	  http://wireless.kernel.org/en/developers/Regulatory/CRDA
+	  http://wireless.kernel.org/en/users/Documentation/iw
+
+	  It is important to note though that if you *do* have CRDA present
+	  and if this option is enabled CRDA *will* be called to update the
+	  regulatory domain (for US and JP only). Support for letting the user
+	  set the regulatory domain through iw is also supported. This option
+	  mainly exists to leave around for a kernel release some old static
+	  regulatory domains that were defined and to keep around the old
+	  ieee80211_regdom module parameter. This is being phased out and you
+	  should stop using them ASAP.
+
+	  Say N unless you cannot install a new userspace application
+	  or have one currently depending on the ieee80211_regdom module
+	  parameter and cannot port it to use the new userspace interfaces.
+
+	  This is scheduled for removal for 2.6.29.
+
 config WIRELESS_EXT
 	bool "Wireless extensions"
 	default n
diff --git a/net/wireless/core.c b/net/wireless/core.c
index f1da0b9..5cadbeb 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1,7 +1,7 @@
 /*
  * This is the linux wireless configuration interface.
  *
- * Copyright 2006, 2007		Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2006-2008		Johannes Berg <johannes@sipsolutions.net>
  */
 
 #include <linux/if.h>
@@ -13,12 +13,14 @@
 #include <linux/debugfs.h>
 #include <linux/notifier.h>
 #include <linux/device.h>
+#include <linux/list.h>
 #include <net/genetlink.h>
 #include <net/cfg80211.h>
 #include <net/wireless.h>
 #include "nl80211.h"
 #include "core.h"
 #include "sysfs.h"
+#include "reg.h"
 
 /* name for sysfs, %d is appended */
 #define PHY_NAME "phy"
@@ -32,7 +34,6 @@
  * often because we need to do it for each command */
 LIST_HEAD(cfg80211_drv_list);
 DEFINE_MUTEX(cfg80211_drv_mutex);
-static int wiphy_counter;
 
 /* for debugfs */
 static struct dentry *ieee80211_debugfs_dir;
@@ -204,6 +205,8 @@
 
 struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv)
 {
+	static int wiphy_counter;
+
 	struct cfg80211_registered_device *drv;
 	int alloc_size;
 
@@ -220,21 +223,18 @@
 
 	mutex_lock(&cfg80211_drv_mutex);
 
-	drv->idx = wiphy_counter;
-
-	/* now increase counter for the next device unless
-	 * it has wrapped previously */
-	if (wiphy_counter >= 0)
-		wiphy_counter++;
-
-	mutex_unlock(&cfg80211_drv_mutex);
+	drv->idx = wiphy_counter++;
 
 	if (unlikely(drv->idx < 0)) {
+		wiphy_counter--;
+		mutex_unlock(&cfg80211_drv_mutex);
 		/* ugh, wrapped! */
 		kfree(drv);
 		return NULL;
 	}
 
+	mutex_unlock(&cfg80211_drv_mutex);
+
 	/* give it a proper name */
 	snprintf(drv->wiphy.dev.bus_id, BUS_ID_SIZE,
 		 PHY_NAME "%d", drv->idx);
@@ -259,6 +259,13 @@
 	struct ieee80211_supported_band *sband;
 	bool have_band = false;
 	int i;
+	u16 ifmodes = wiphy->interface_modes;
+
+	/* sanity check ifmodes */
+	WARN_ON(!ifmodes);
+	ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1;
+	if (WARN_ON(ifmodes != wiphy->interface_modes))
+		wiphy->interface_modes = ifmodes;
 
 	/* sanity check supported bands/channels */
 	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
@@ -295,7 +302,9 @@
 	ieee80211_set_bitrate_flags(wiphy);
 
 	/* set up regulatory info */
-	wiphy_update_regulatory(wiphy);
+	mutex_lock(&cfg80211_reg_mutex);
+	wiphy_update_regulatory(wiphy, REGDOM_SET_BY_CORE);
+	mutex_unlock(&cfg80211_reg_mutex);
 
 	mutex_lock(&cfg80211_drv_mutex);
 
@@ -373,6 +382,8 @@
 
 	rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy);
 
+	WARN_ON(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_UNSPECIFIED);
+
 	switch (state) {
 	case NETDEV_REGISTER:
 		mutex_lock(&rdev->devlist_mtx);
@@ -404,7 +415,9 @@
 
 static int cfg80211_init(void)
 {
-	int err = wiphy_sysfs_init();
+	int err;
+
+	err = wiphy_sysfs_init();
 	if (err)
 		goto out_fail_sysfs;
 
@@ -418,8 +431,14 @@
 
 	ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL);
 
+	err = regulatory_init();
+	if (err)
+		goto out_fail_reg;
+
 	return 0;
 
+out_fail_reg:
+	debugfs_remove(ieee80211_debugfs_dir);
 out_fail_nl80211:
 	unregister_netdevice_notifier(&cfg80211_netdev_notifier);
 out_fail_notifier:
@@ -427,6 +446,7 @@
 out_fail_sysfs:
 	return err;
 }
+
 subsys_initcall(cfg80211_init);
 
 static void cfg80211_exit(void)
@@ -435,5 +455,6 @@
 	nl80211_exit();
 	unregister_netdevice_notifier(&cfg80211_netdev_notifier);
 	wiphy_sysfs_exit();
+	regulatory_exit();
 }
 module_exit(cfg80211_exit);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 7a02c35..771cc5c 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -79,6 +79,6 @@
 			       char *newname);
 
 void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
-void wiphy_update_regulatory(struct wiphy *wiphy);
+void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby);
 
 #endif /* __NET_WIRELESS_CORE_H */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 59eb2cf..572793c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -18,6 +18,7 @@
 #include <net/cfg80211.h>
 #include "core.h"
 #include "nl80211.h"
+#include "reg.h"
 
 /* the netlink family */
 static struct genl_family nl80211_fam = {
@@ -87,6 +88,16 @@
 	[NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
 				.len = IEEE80211_MAX_MESH_ID_LEN },
 	[NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
+
+	[NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
+	[NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
+
+	[NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 },
+	[NL80211_ATTR_BSS_SHORT_PREAMBLE] = { .type = NLA_U8 },
+	[NL80211_ATTR_BSS_SHORT_SLOT_TIME] = { .type = NLA_U8 },
+
+	[NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY,
+					 .len = NL80211_HT_CAPABILITY_LEN },
 };
 
 /* message building helper */
@@ -106,10 +117,12 @@
 	struct nlattr *nl_bands, *nl_band;
 	struct nlattr *nl_freqs, *nl_freq;
 	struct nlattr *nl_rates, *nl_rate;
+	struct nlattr *nl_modes;
 	enum ieee80211_band band;
 	struct ieee80211_channel *chan;
 	struct ieee80211_rate *rate;
 	int i;
+	u16 ifmodes = dev->wiphy.interface_modes;
 
 	hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY);
 	if (!hdr)
@@ -118,6 +131,20 @@
 	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx);
 	NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
 
+	nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
+	if (!nl_modes)
+		goto nla_put_failure;
+
+	i = 0;
+	while (ifmodes) {
+		if (ifmodes & 1)
+			NLA_PUT_FLAG(msg, i);
+		ifmodes >>= 1;
+		i++;
+	}
+
+	nla_nest_end(msg, nl_modes);
+
 	nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS);
 	if (!nl_bands)
 		goto nla_put_failure;
@@ -272,7 +299,7 @@
 
 	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
 	NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name);
-	/* TODO: interface type */
+	NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype);
 	return genlmsg_end(msg, hdr);
 
  nla_put_failure:
@@ -391,40 +418,56 @@
 	int err, ifindex;
 	enum nl80211_iftype type;
 	struct net_device *dev;
-	u32 flags;
+	u32 _flags, *flags = NULL;
 
 	memset(&params, 0, sizeof(params));
 
-	if (info->attrs[NL80211_ATTR_IFTYPE]) {
-		type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
-		if (type > NL80211_IFTYPE_MAX)
-			return -EINVAL;
-	} else
-		return -EINVAL;
-
 	err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
 	if (err)
 		return err;
 	ifindex = dev->ifindex;
+	type = dev->ieee80211_ptr->iftype;
 	dev_put(dev);
 
-	if (!drv->ops->change_virtual_intf) {
+	err = -EINVAL;
+	if (info->attrs[NL80211_ATTR_IFTYPE]) {
+		type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
+		if (type > NL80211_IFTYPE_MAX)
+			goto unlock;
+	}
+
+	if (!drv->ops->change_virtual_intf ||
+	    !(drv->wiphy.interface_modes & (1 << type))) {
 		err = -EOPNOTSUPP;
 		goto unlock;
 	}
 
-	if (type == NL80211_IFTYPE_MESH_POINT &&
-	    info->attrs[NL80211_ATTR_MESH_ID]) {
+	if (info->attrs[NL80211_ATTR_MESH_ID]) {
+		if (type != NL80211_IFTYPE_MESH_POINT) {
+			err = -EINVAL;
+			goto unlock;
+		}
 		params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
 		params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
 	}
 
+	if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) {
+		if (type != NL80211_IFTYPE_MONITOR) {
+			err = -EINVAL;
+			goto unlock;
+		}
+		err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS],
+					  &_flags);
+		if (!err)
+			flags = &_flags;
+	}
 	rtnl_lock();
-	err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
-				  info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
-				  &flags);
 	err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex,
-					    type, err ? NULL : &flags, &params);
+					    type, flags, &params);
+
+	dev = __dev_get_by_index(&init_net, ifindex);
+	WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != type));
+
 	rtnl_unlock();
 
  unlock:
@@ -455,7 +498,8 @@
 	if (IS_ERR(drv))
 		return PTR_ERR(drv);
 
-	if (!drv->ops->add_virtual_intf) {
+	if (!drv->ops->add_virtual_intf ||
+	    !(drv->wiphy.interface_modes & (1 << type))) {
 		err = -EOPNOTSUPP;
 		goto unlock;
 	}
@@ -1125,6 +1169,10 @@
 		params.listen_interval =
 		    nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
 
+	if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
+		params.ht_capa =
+			nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
+
 	if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS],
 				&params.station_flags))
 		return -EINVAL;
@@ -1188,6 +1236,9 @@
 	params.listen_interval =
 		nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
 	params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
+	if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
+		params.ht_capa =
+			nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
 
 	if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS],
 				&params.station_flags))
@@ -1525,6 +1576,183 @@
 	return err;
 }
 
+static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg80211_registered_device *drv;
+	int err;
+	struct net_device *dev;
+	struct bss_parameters params;
+
+	memset(&params, 0, sizeof(params));
+	/* default to not changing parameters */
+	params.use_cts_prot = -1;
+	params.use_short_preamble = -1;
+	params.use_short_slot_time = -1;
+
+	if (info->attrs[NL80211_ATTR_BSS_CTS_PROT])
+		params.use_cts_prot =
+		    nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]);
+	if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE])
+		params.use_short_preamble =
+		    nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]);
+	if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME])
+		params.use_short_slot_time =
+		    nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]);
+
+	err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+	if (err)
+		return err;
+
+	if (!drv->ops->change_bss) {
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	rtnl_lock();
+	err = drv->ops->change_bss(&drv->wiphy, dev, &params);
+	rtnl_unlock();
+
+ out:
+	cfg80211_put_dev(drv);
+	dev_put(dev);
+	return err;
+}
+
+static const struct nla_policy
+	reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
+	[NL80211_ATTR_REG_RULE_FLAGS]		= { .type = NLA_U32 },
+	[NL80211_ATTR_FREQ_RANGE_START]		= { .type = NLA_U32 },
+	[NL80211_ATTR_FREQ_RANGE_END]		= { .type = NLA_U32 },
+	[NL80211_ATTR_FREQ_RANGE_MAX_BW]	= { .type = NLA_U32 },
+	[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]	= { .type = NLA_U32 },
+	[NL80211_ATTR_POWER_RULE_MAX_EIRP]	= { .type = NLA_U32 },
+};
+
+static int parse_reg_rule(struct nlattr *tb[],
+	struct ieee80211_reg_rule *reg_rule)
+{
+	struct ieee80211_freq_range *freq_range = &reg_rule->freq_range;
+	struct ieee80211_power_rule *power_rule = &reg_rule->power_rule;
+
+	if (!tb[NL80211_ATTR_REG_RULE_FLAGS])
+		return -EINVAL;
+	if (!tb[NL80211_ATTR_FREQ_RANGE_START])
+		return -EINVAL;
+	if (!tb[NL80211_ATTR_FREQ_RANGE_END])
+		return -EINVAL;
+	if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
+		return -EINVAL;
+	if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
+		return -EINVAL;
+
+	reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]);
+
+	freq_range->start_freq_khz =
+		nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
+	freq_range->end_freq_khz =
+		nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
+	freq_range->max_bandwidth_khz =
+		nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
+
+	power_rule->max_eirp =
+		nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
+
+	if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN])
+		power_rule->max_antenna_gain =
+			nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
+
+	return 0;
+}
+
+static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
+{
+	int r;
+	char *data = NULL;
+
+	if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
+		return -EINVAL;
+
+	data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
+
+#ifdef CONFIG_WIRELESS_OLD_REGULATORY
+	/* We ignore world regdom requests with the old regdom setup */
+	if (is_world_regdom(data))
+		return -EINVAL;
+#endif
+	mutex_lock(&cfg80211_drv_mutex);
+	r = __regulatory_hint(NULL, REGDOM_SET_BY_USER, data, NULL);
+	mutex_unlock(&cfg80211_drv_mutex);
+	return r;
+}
+
+static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1];
+	struct nlattr *nl_reg_rule;
+	char *alpha2 = NULL;
+	int rem_reg_rules = 0, r = 0;
+	u32 num_rules = 0, rule_idx = 0, size_of_regd;
+	struct ieee80211_regdomain *rd = NULL;
+
+	if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
+		return -EINVAL;
+
+	if (!info->attrs[NL80211_ATTR_REG_RULES])
+		return -EINVAL;
+
+	alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
+
+	nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
+			rem_reg_rules) {
+		num_rules++;
+		if (num_rules > NL80211_MAX_SUPP_REG_RULES)
+			goto bad_reg;
+	}
+
+	if (!reg_is_valid_request(alpha2))
+		return -EINVAL;
+
+	size_of_regd = sizeof(struct ieee80211_regdomain) +
+		(num_rules * sizeof(struct ieee80211_reg_rule));
+
+	rd = kzalloc(size_of_regd, GFP_KERNEL);
+	if (!rd)
+		return -ENOMEM;
+
+	rd->n_reg_rules = num_rules;
+	rd->alpha2[0] = alpha2[0];
+	rd->alpha2[1] = alpha2[1];
+
+	nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
+			rem_reg_rules) {
+		nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
+			nla_data(nl_reg_rule), nla_len(nl_reg_rule),
+			reg_rule_policy);
+		r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]);
+		if (r)
+			goto bad_reg;
+
+		rule_idx++;
+
+		if (rule_idx > NL80211_MAX_SUPP_REG_RULES)
+			goto bad_reg;
+	}
+
+	BUG_ON(rule_idx != num_rules);
+
+	mutex_lock(&cfg80211_drv_mutex);
+	r = set_regdom(rd);
+	mutex_unlock(&cfg80211_drv_mutex);
+	if (r)
+		goto bad_reg;
+
+	return r;
+
+bad_reg:
+	kfree(rd);
+	return -EINVAL;
+}
+
 static struct genl_ops nl80211_ops[] = {
 	{
 		.cmd = NL80211_CMD_GET_WIPHY,
@@ -1656,6 +1884,24 @@
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
 	},
+	{
+		.cmd = NL80211_CMD_SET_BSS,
+		.doit = nl80211_set_bss,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+	},
+	{
+		.cmd = NL80211_CMD_SET_REG,
+		.doit = nl80211_set_reg,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+	},
+	{
+		.cmd = NL80211_CMD_REQ_SET_REG,
+		.doit = nl80211_req_set_reg,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+	},
 };
 
 /* multicast groups */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 855bff4..626dbb6 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2,179 +2,871 @@
  * Copyright 2002-2005, Instant802 Networks, Inc.
  * Copyright 2005-2006, Devicescape Software, Inc.
  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2008	Luis R. Rodriguez <lrodriguz@atheros.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
 
-/*
- * This regulatory domain control implementation is highly incomplete, it
- * only exists for the purpose of not regressing mac80211.
- *
- * For now, drivers can restrict the set of allowed channels by either
- * not registering those channels or setting the IEEE80211_CHAN_DISABLED
- * flag; that flag will only be *set* by this code, never *cleared.
+/**
+ * DOC: Wireless regulatory infrastructure
  *
  * The usual implementation is for a driver to read a device EEPROM to
  * determine which regulatory domain it should be operating under, then
  * looking up the allowable channels in a driver-local table and finally
  * registering those channels in the wiphy structure.
  *
- * Alternatively, drivers that trust the regulatory domain control here
- * will register a complete set of capabilities and the control code
- * will restrict the set by setting the IEEE80211_CHAN_* flags.
+ * Another set of compliance enforcement is for drivers to use their
+ * own compliance limits which can be stored on the EEPROM. The host
+ * driver or firmware may ensure these are used.
+ *
+ * In addition to all this we provide an extra layer of regulatory
+ * conformance. For drivers which do not have any regulatory
+ * information CRDA provides the complete regulatory solution.
+ * For others it provides a community effort on further restrictions
+ * to enhance compliance.
+ *
+ * Note: When number of rules --> infinity we will not be able to
+ * index on alpha2 any more, instead we'll probably have to
+ * rely on some SHA1 checksum of the regdomain for example.
+ *
  */
 #include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/nl80211.h>
+#include <linux/platform_device.h>
 #include <net/wireless.h>
+#include <net/cfg80211.h>
 #include "core.h"
+#include "reg.h"
 
+/* wiphy is set if this request's initiator is REGDOM_SET_BY_DRIVER */
+struct regulatory_request {
+	struct list_head list;
+	struct wiphy *wiphy;
+	int granted;
+	enum reg_set_by initiator;
+	char alpha2[2];
+};
+
+static LIST_HEAD(regulatory_requests);
+DEFINE_MUTEX(cfg80211_reg_mutex);
+
+/* To trigger userspace events */
+static struct platform_device *reg_pdev;
+
+/* Keep the ordering from large to small */
+static u32 supported_bandwidths[] = {
+	MHZ_TO_KHZ(40),
+	MHZ_TO_KHZ(20),
+};
+
+static struct list_head regulatory_requests;
+
+/* Central wireless core regulatory domains, we only need two,
+ * the current one and a world regulatory domain in case we have no
+ * information to give us an alpha2 */
+static const struct ieee80211_regdomain *cfg80211_regdomain;
+
+/* We keep a static world regulatory domain in case of the absence of CRDA */
+static const struct ieee80211_regdomain world_regdom = {
+	.n_reg_rules = 1,
+	.alpha2 =  "00",
+	.reg_rules = {
+		REG_RULE(2412-10, 2462+10, 40, 6, 20,
+			NL80211_RRF_PASSIVE_SCAN |
+			NL80211_RRF_NO_IBSS),
+	}
+};
+
+static const struct ieee80211_regdomain *cfg80211_world_regdom =
+	&world_regdom;
+
+#ifdef CONFIG_WIRELESS_OLD_REGULATORY
 static char *ieee80211_regdom = "US";
 module_param(ieee80211_regdom, charp, 0444);
 MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
 
-struct ieee80211_channel_range {
-	short start_freq;
-	short end_freq;
-	int max_power;
-	int max_antenna_gain;
-	u32 flags;
-};
+/* We assume 40 MHz bandwidth for the old regulatory work.
+ * We make emphasis we are using the exact same frequencies
+ * as before */
 
-struct ieee80211_regdomain {
-	const char *code;
-	const struct ieee80211_channel_range *ranges;
-	int n_ranges;
-};
-
-#define RANGE_PWR(_start, _end, _pwr, _ag, _flags)	\
-	{ _start, _end, _pwr, _ag, _flags }
-
-
-/*
- * Ideally, in the future, these definitions will be loaded from a
- * userspace table via some daemon.
- */
-static const struct ieee80211_channel_range ieee80211_US_channels[] = {
-	/* IEEE 802.11b/g, channels 1..11 */
-	RANGE_PWR(2412, 2462, 27, 6, 0),
-	/* IEEE 802.11a, channel 36*/
-	RANGE_PWR(5180, 5180, 23, 6, 0),
-	/* IEEE 802.11a, channel 40*/
-	RANGE_PWR(5200, 5200, 23, 6, 0),
-	/* IEEE 802.11a, channel 44*/
-	RANGE_PWR(5220, 5220, 23, 6, 0),
-	/* IEEE 802.11a, channels 48..64 */
-	RANGE_PWR(5240, 5320, 23, 6, 0),
-	/* IEEE 802.11a, channels 149..165, outdoor */
-	RANGE_PWR(5745, 5825, 30, 6, 0),
-};
-
-static const struct ieee80211_channel_range ieee80211_JP_channels[] = {
-	/* IEEE 802.11b/g, channels 1..14 */
-	RANGE_PWR(2412, 2484, 20, 6, 0),
-	/* IEEE 802.11a, channels 34..48 */
-	RANGE_PWR(5170, 5240, 20, 6, IEEE80211_CHAN_PASSIVE_SCAN),
-	/* IEEE 802.11a, channels 52..64 */
-	RANGE_PWR(5260, 5320, 20, 6, IEEE80211_CHAN_NO_IBSS |
-				     IEEE80211_CHAN_RADAR),
-};
-
-static const struct ieee80211_channel_range ieee80211_EU_channels[] = {
-	/* IEEE 802.11b/g, channels 1..13 */
-	RANGE_PWR(2412, 2472, 20, 6, 0),
-	/* IEEE 802.11a, channel 36*/
-	RANGE_PWR(5180, 5180, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN),
-	/* IEEE 802.11a, channel 40*/
-	RANGE_PWR(5200, 5200, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN),
-	/* IEEE 802.11a, channel 44*/
-	RANGE_PWR(5220, 5220, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN),
-	/* IEEE 802.11a, channels 48..64 */
-	RANGE_PWR(5240, 5320, 23, 6, IEEE80211_CHAN_NO_IBSS |
-				     IEEE80211_CHAN_RADAR),
-	/* IEEE 802.11a, channels 100..140 */
-	RANGE_PWR(5500, 5700, 30, 6, IEEE80211_CHAN_NO_IBSS |
-				     IEEE80211_CHAN_RADAR),
-};
-
-#define REGDOM(_code)							\
-	{								\
-		.code = __stringify(_code),				\
-		.ranges = ieee80211_ ##_code## _channels,		\
-		.n_ranges = ARRAY_SIZE(ieee80211_ ##_code## _channels),	\
-	}
-
-static const struct ieee80211_regdomain ieee80211_regdoms[] = {
-	REGDOM(US),
-	REGDOM(JP),
-	REGDOM(EU),
-};
-
-
-static const struct ieee80211_regdomain *get_regdom(void)
-{
-	static const struct ieee80211_channel_range
-	ieee80211_world_channels[] = {
+static const struct ieee80211_regdomain us_regdom = {
+	.n_reg_rules = 6,
+	.alpha2 =  "US",
+	.reg_rules = {
 		/* IEEE 802.11b/g, channels 1..11 */
-		RANGE_PWR(2412, 2462, 27, 6, 0),
-	};
-	static const struct ieee80211_regdomain regdom_world = REGDOM(world);
-	int i;
+		REG_RULE(2412-10, 2462+10, 40, 6, 27, 0),
+		/* IEEE 802.11a, channel 36 */
+		REG_RULE(5180-10, 5180+10, 40, 6, 23, 0),
+		/* IEEE 802.11a, channel 40 */
+		REG_RULE(5200-10, 5200+10, 40, 6, 23, 0),
+		/* IEEE 802.11a, channel 44 */
+		REG_RULE(5220-10, 5220+10, 40, 6, 23, 0),
+		/* IEEE 802.11a, channels 48..64 */
+		REG_RULE(5240-10, 5320+10, 40, 6, 23, 0),
+		/* IEEE 802.11a, channels 149..165, outdoor */
+		REG_RULE(5745-10, 5825+10, 40, 6, 30, 0),
+	}
+};
 
-	for (i = 0; i < ARRAY_SIZE(ieee80211_regdoms); i++)
-		if (strcmp(ieee80211_regdom, ieee80211_regdoms[i].code) == 0)
-			return &ieee80211_regdoms[i];
+static const struct ieee80211_regdomain jp_regdom = {
+	.n_reg_rules = 3,
+	.alpha2 =  "JP",
+	.reg_rules = {
+		/* IEEE 802.11b/g, channels 1..14 */
+		REG_RULE(2412-10, 2484+10, 40, 6, 20, 0),
+		/* IEEE 802.11a, channels 34..48 */
+		REG_RULE(5170-10, 5240+10, 40, 6, 20,
+			NL80211_RRF_PASSIVE_SCAN),
+		/* IEEE 802.11a, channels 52..64 */
+		REG_RULE(5260-10, 5320+10, 40, 6, 20,
+			NL80211_RRF_NO_IBSS |
+			NL80211_RRF_DFS),
+	}
+};
 
-	return &regdom_world;
+static const struct ieee80211_regdomain eu_regdom = {
+	.n_reg_rules = 6,
+	/* This alpha2 is bogus, we leave it here just for stupid
+	 * backward compatibility */
+	.alpha2 =  "EU",
+	.reg_rules = {
+		/* IEEE 802.11b/g, channels 1..13 */
+		REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
+		/* IEEE 802.11a, channel 36 */
+		REG_RULE(5180-10, 5180+10, 40, 6, 23,
+			NL80211_RRF_PASSIVE_SCAN),
+		/* IEEE 802.11a, channel 40 */
+		REG_RULE(5200-10, 5200+10, 40, 6, 23,
+			NL80211_RRF_PASSIVE_SCAN),
+		/* IEEE 802.11a, channel 44 */
+		REG_RULE(5220-10, 5220+10, 40, 6, 23,
+			NL80211_RRF_PASSIVE_SCAN),
+		/* IEEE 802.11a, channels 48..64 */
+		REG_RULE(5240-10, 5320+10, 40, 6, 20,
+			NL80211_RRF_NO_IBSS |
+			NL80211_RRF_DFS),
+		/* IEEE 802.11a, channels 100..140 */
+		REG_RULE(5500-10, 5700+10, 40, 6, 30,
+			NL80211_RRF_NO_IBSS |
+			NL80211_RRF_DFS),
+	}
+};
+
+static const struct ieee80211_regdomain *static_regdom(char *alpha2)
+{
+	if (alpha2[0] == 'U' && alpha2[1] == 'S')
+		return &us_regdom;
+	if (alpha2[0] == 'J' && alpha2[1] == 'P')
+		return &jp_regdom;
+	if (alpha2[0] == 'E' && alpha2[1] == 'U')
+		return &eu_regdom;
+	/* Default, as per the old rules */
+	return &us_regdom;
 }
 
+static bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
+{
+	if (rd == &us_regdom || rd == &jp_regdom || rd == &eu_regdom)
+		return true;
+	return false;
+}
+#else
+static inline bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
+{
+	return false;
+}
+#endif
 
-static void handle_channel(struct ieee80211_channel *chan,
-			   const struct ieee80211_regdomain *rd)
+static void reset_regdomains(void)
+{
+	/* avoid freeing static information or freeing something twice */
+	if (cfg80211_regdomain == cfg80211_world_regdom)
+		cfg80211_regdomain = NULL;
+	if (cfg80211_world_regdom == &world_regdom)
+		cfg80211_world_regdom = NULL;
+	if (cfg80211_regdomain == &world_regdom)
+		cfg80211_regdomain = NULL;
+	if (is_old_static_regdom(cfg80211_regdomain))
+		cfg80211_regdomain = NULL;
+
+	kfree(cfg80211_regdomain);
+	kfree(cfg80211_world_regdom);
+
+	cfg80211_world_regdom = &world_regdom;
+	cfg80211_regdomain = NULL;
+}
+
+/* Dynamic world regulatory domain requested by the wireless
+ * core upon initialization */
+static void update_world_regdomain(const struct ieee80211_regdomain *rd)
+{
+	BUG_ON(list_empty(&regulatory_requests));
+
+	reset_regdomains();
+
+	cfg80211_world_regdom = rd;
+	cfg80211_regdomain = rd;
+}
+
+bool is_world_regdom(const char *alpha2)
+{
+	if (!alpha2)
+		return false;
+	if (alpha2[0] == '0' && alpha2[1] == '0')
+		return true;
+	return false;
+}
+
+static bool is_alpha2_set(const char *alpha2)
+{
+	if (!alpha2)
+		return false;
+	if (alpha2[0] != 0 && alpha2[1] != 0)
+		return true;
+	return false;
+}
+
+static bool is_alpha_upper(char letter)
+{
+	/* ASCII A - Z */
+	if (letter >= 65 && letter <= 90)
+		return true;
+	return false;
+}
+
+static bool is_unknown_alpha2(const char *alpha2)
+{
+	if (!alpha2)
+		return false;
+	/* Special case where regulatory domain was built by driver
+	 * but a specific alpha2 cannot be determined */
+	if (alpha2[0] == '9' && alpha2[1] == '9')
+		return true;
+	return false;
+}
+
+static bool is_an_alpha2(const char *alpha2)
+{
+	if (!alpha2)
+		return false;
+	if (is_alpha_upper(alpha2[0]) && is_alpha_upper(alpha2[1]))
+		return true;
+	return false;
+}
+
+static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y)
+{
+	if (!alpha2_x || !alpha2_y)
+		return false;
+	if (alpha2_x[0] == alpha2_y[0] &&
+		alpha2_x[1] == alpha2_y[1])
+		return true;
+	return false;
+}
+
+static bool regdom_changed(const char *alpha2)
+{
+	if (!cfg80211_regdomain)
+		return true;
+	if (alpha2_equal(cfg80211_regdomain->alpha2, alpha2))
+		return false;
+	return true;
+}
+
+/* This lets us keep regulatory code which is updated on a regulatory
+ * basis in userspace. */
+static int call_crda(const char *alpha2)
+{
+	char country_env[9 + 2] = "COUNTRY=";
+	char *envp[] = {
+		country_env,
+		NULL
+	};
+
+	if (!is_world_regdom((char *) alpha2))
+		printk(KERN_INFO "cfg80211: Calling CRDA for country: %c%c\n",
+			alpha2[0], alpha2[1]);
+	else
+		printk(KERN_INFO "cfg80211: Calling CRDA to update world "
+			"regulatory domain\n");
+
+	country_env[8] = alpha2[0];
+	country_env[9] = alpha2[1];
+
+	return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, envp);
+}
+
+/* This has the logic which determines when a new request
+ * should be ignored. */
+static int ignore_request(struct wiphy *wiphy, enum reg_set_by set_by,
+	char *alpha2, struct ieee80211_regdomain *rd)
+{
+	struct regulatory_request *last_request = NULL;
+
+	/* All initial requests are respected */
+	if (list_empty(&regulatory_requests))
+		return 0;
+
+	last_request = list_first_entry(&regulatory_requests,
+		struct regulatory_request, list);
+
+	switch (set_by) {
+	case REGDOM_SET_BY_INIT:
+		return -EINVAL;
+	case REGDOM_SET_BY_CORE:
+		/* Always respect new wireless core hints, should only
+		 * come in for updating the world regulatory domain at init
+		 * anyway */
+		return 0;
+	case REGDOM_SET_BY_COUNTRY_IE:
+		if (last_request->initiator == set_by) {
+			if (last_request->wiphy != wiphy) {
+				/* Two cards with two APs claiming different
+				 * different Country IE alpha2s!
+				 * You're special!! */
+				if (!alpha2_equal(last_request->alpha2,
+						cfg80211_regdomain->alpha2)) {
+					/* XXX: Deal with conflict, consider
+					 * building a new one out of the
+					 * intersection */
+					WARN_ON(1);
+					return -EOPNOTSUPP;
+				}
+				return -EALREADY;
+			}
+			/* Two consecutive Country IE hints on the same wiphy */
+			if (!alpha2_equal(cfg80211_regdomain->alpha2, alpha2))
+				return 0;
+			return -EALREADY;
+		}
+		if (WARN_ON(!is_alpha2_set(alpha2) || !is_an_alpha2(alpha2)),
+				"Invalid Country IE regulatory hint passed "
+				"to the wireless core\n")
+			return -EINVAL;
+		/* We ignore Country IE hints for now, as we haven't yet
+		 * added the dot11MultiDomainCapabilityEnabled flag
+		 * for wiphys */
+		return 1;
+	case REGDOM_SET_BY_DRIVER:
+		BUG_ON(!wiphy);
+		if (last_request->initiator == set_by) {
+			/* Two separate drivers hinting different things,
+			 * this is possible if you have two devices present
+			 * on a system with different EEPROM regulatory
+			 * readings. XXX: Do intersection, we support only
+			 * the first regulatory hint for now */
+			if (last_request->wiphy != wiphy)
+				return -EALREADY;
+			if (rd)
+				return -EALREADY;
+			/* Driver should not be trying to hint different
+			 * regulatory domains! */
+			BUG_ON(!alpha2_equal(alpha2,
+					cfg80211_regdomain->alpha2));
+			return -EALREADY;
+		}
+		if (last_request->initiator == REGDOM_SET_BY_CORE)
+			return 0;
+		/* XXX: Handle intersection, and add the
+		 * dot11MultiDomainCapabilityEnabled flag to wiphy. For now
+		 * we assume the driver has this set to false, following the
+		 * 802.11d dot11MultiDomainCapabilityEnabled documentation */
+		if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE)
+			return 0;
+		return 0;
+	case REGDOM_SET_BY_USER:
+		if (last_request->initiator == set_by ||
+				last_request->initiator == REGDOM_SET_BY_CORE)
+			return 0;
+		/* Drivers can use their wiphy's reg_notifier()
+		 * to override any information */
+		if (last_request->initiator == REGDOM_SET_BY_DRIVER)
+			return 0;
+		/* XXX: Handle intersection */
+		if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE)
+			return -EOPNOTSUPP;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static bool __reg_is_valid_request(const char *alpha2,
+	struct regulatory_request **request)
+{
+	struct regulatory_request *req;
+	if (list_empty(&regulatory_requests))
+		return false;
+	list_for_each_entry(req, &regulatory_requests, list) {
+		if (alpha2_equal(req->alpha2, alpha2)) {
+			*request = req;
+			return true;
+		}
+	}
+	return false;
+}
+
+/* Used by nl80211 before kmalloc'ing our regulatory domain */
+bool reg_is_valid_request(const char *alpha2)
+{
+	struct regulatory_request *request = NULL;
+	return  __reg_is_valid_request(alpha2, &request);
+}
+
+/* Sanity check on a regulatory rule */
+static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
+{
+	const struct ieee80211_freq_range *freq_range = &rule->freq_range;
+	u32 freq_diff;
+
+	if (freq_range->start_freq_khz == 0 || freq_range->end_freq_khz == 0)
+		return false;
+
+	if (freq_range->start_freq_khz > freq_range->end_freq_khz)
+		return false;
+
+	freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
+
+	if (freq_range->max_bandwidth_khz > freq_diff)
+		return false;
+
+	return true;
+}
+
+static bool is_valid_rd(const struct ieee80211_regdomain *rd)
+{
+	const struct ieee80211_reg_rule *reg_rule = NULL;
+	unsigned int i;
+
+	if (!rd->n_reg_rules)
+		return false;
+
+	for (i = 0; i < rd->n_reg_rules; i++) {
+		reg_rule = &rd->reg_rules[i];
+		if (!is_valid_reg_rule(reg_rule))
+			return false;
+	}
+
+	return true;
+}
+
+/* Returns value in KHz */
+static u32 freq_max_bandwidth(const struct ieee80211_freq_range *freq_range,
+	u32 freq)
+{
+	unsigned int i;
+	for (i = 0; i < ARRAY_SIZE(supported_bandwidths); i++) {
+		u32 start_freq_khz = freq - supported_bandwidths[i]/2;
+		u32 end_freq_khz = freq + supported_bandwidths[i]/2;
+		if (start_freq_khz >= freq_range->start_freq_khz &&
+			end_freq_khz <= freq_range->end_freq_khz)
+			return supported_bandwidths[i];
+	}
+	return 0;
+}
+
+/* XXX: add support for the rest of enum nl80211_reg_rule_flags, we may
+ * want to just have the channel structure use these */
+static u32 map_regdom_flags(u32 rd_flags)
+{
+	u32 channel_flags = 0;
+	if (rd_flags & NL80211_RRF_PASSIVE_SCAN)
+		channel_flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+	if (rd_flags & NL80211_RRF_NO_IBSS)
+		channel_flags |= IEEE80211_CHAN_NO_IBSS;
+	if (rd_flags & NL80211_RRF_DFS)
+		channel_flags |= IEEE80211_CHAN_RADAR;
+	return channel_flags;
+}
+
+/**
+ * freq_reg_info - get regulatory information for the given frequency
+ * @center_freq: Frequency in KHz for which we want regulatory information for
+ * @bandwidth: the bandwidth requirement you have in KHz, if you do not have one
+ * 	you can set this to 0. If this frequency is allowed we then set
+ * 	this value to the maximum allowed bandwidth.
+ * @reg_rule: the regulatory rule which we have for this frequency
+ *
+ * Use this function to get the regulatory rule for a specific frequency.
+ */
+static int freq_reg_info(u32 center_freq, u32 *bandwidth,
+			 const struct ieee80211_reg_rule **reg_rule)
 {
 	int i;
-	u32 flags = chan->orig_flags;
-	const struct ieee80211_channel_range *rg = NULL;
+	u32 max_bandwidth = 0;
 
-	for (i = 0; i < rd->n_ranges; i++) {
-		if (rd->ranges[i].start_freq <= chan->center_freq &&
-		    chan->center_freq <= rd->ranges[i].end_freq) {
-			rg = &rd->ranges[i];
+	if (!cfg80211_regdomain)
+		return -EINVAL;
+
+	for (i = 0; i < cfg80211_regdomain->n_reg_rules; i++) {
+		const struct ieee80211_reg_rule *rr;
+		const struct ieee80211_freq_range *fr = NULL;
+		const struct ieee80211_power_rule *pr = NULL;
+
+		rr = &cfg80211_regdomain->reg_rules[i];
+		fr = &rr->freq_range;
+		pr = &rr->power_rule;
+		max_bandwidth = freq_max_bandwidth(fr, center_freq);
+		if (max_bandwidth && *bandwidth <= max_bandwidth) {
+			*reg_rule = rr;
+			*bandwidth = max_bandwidth;
 			break;
 		}
 	}
 
-	if (!rg) {
-		/* not found */
+	return !max_bandwidth;
+}
+
+static void handle_channel(struct ieee80211_channel *chan)
+{
+	int r;
+	u32 flags = chan->orig_flags;
+	u32 max_bandwidth = 0;
+	const struct ieee80211_reg_rule *reg_rule = NULL;
+	const struct ieee80211_power_rule *power_rule = NULL;
+
+	r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq),
+		&max_bandwidth, &reg_rule);
+
+	if (r) {
 		flags |= IEEE80211_CHAN_DISABLED;
 		chan->flags = flags;
 		return;
 	}
 
-	chan->flags = flags;
+	power_rule = &reg_rule->power_rule;
+
+	chan->flags = flags | map_regdom_flags(reg_rule->flags);
 	chan->max_antenna_gain = min(chan->orig_mag,
-					 rg->max_antenna_gain);
+		(int) MBI_TO_DBI(power_rule->max_antenna_gain));
+	chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth);
 	if (chan->orig_mpwr)
-		chan->max_power = min(chan->orig_mpwr, rg->max_power);
+		chan->max_power = min(chan->orig_mpwr,
+			(int) MBM_TO_DBM(power_rule->max_eirp));
 	else
-		chan->max_power = rg->max_power;
+		chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
 }
 
-static void handle_band(struct ieee80211_supported_band *sband,
-			const struct ieee80211_regdomain *rd)
+static void handle_band(struct ieee80211_supported_band *sband)
 {
 	int i;
 
 	for (i = 0; i < sband->n_channels; i++)
-		handle_channel(&sband->channels[i], rd);
+		handle_channel(&sband->channels[i]);
 }
 
-void wiphy_update_regulatory(struct wiphy *wiphy)
+static void update_all_wiphy_regulatory(enum reg_set_by setby)
+{
+	struct cfg80211_registered_device *drv;
+
+	list_for_each_entry(drv, &cfg80211_drv_list, list)
+		wiphy_update_regulatory(&drv->wiphy, setby);
+}
+
+void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby)
 {
 	enum ieee80211_band band;
-	const struct ieee80211_regdomain *rd = get_regdom();
-
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
 		if (wiphy->bands[band])
-			handle_band(wiphy->bands[band], rd);
+			handle_band(wiphy->bands[band]);
+		if (wiphy->reg_notifier)
+			wiphy->reg_notifier(wiphy, setby);
+	}
+}
+
+/* Caller must hold &cfg80211_drv_mutex */
+int __regulatory_hint(struct wiphy *wiphy, enum reg_set_by set_by,
+		      const char *alpha2, struct ieee80211_regdomain *rd)
+{
+	struct regulatory_request *request;
+	char *rd_alpha2;
+	int r = 0;
+
+	r = ignore_request(wiphy, set_by, (char *) alpha2, rd);
+	if (r)
+		return r;
+
+	if (rd)
+		rd_alpha2 = rd->alpha2;
+	else
+		rd_alpha2 = (char *) alpha2;
+
+	switch (set_by) {
+	case REGDOM_SET_BY_CORE:
+	case REGDOM_SET_BY_COUNTRY_IE:
+	case REGDOM_SET_BY_DRIVER:
+	case REGDOM_SET_BY_USER:
+		request = kzalloc(sizeof(struct regulatory_request),
+			GFP_KERNEL);
+		if (!request)
+			return -ENOMEM;
+
+		request->alpha2[0] = rd_alpha2[0];
+		request->alpha2[1] = rd_alpha2[1];
+		request->initiator = set_by;
+		request->wiphy = wiphy;
+
+		list_add_tail(&request->list, &regulatory_requests);
+		if (rd)
+			break;
+		r = call_crda(alpha2);
+#ifndef CONFIG_WIRELESS_OLD_REGULATORY
+		if (r)
+			printk(KERN_ERR "cfg80211: Failed calling CRDA\n");
+#endif
+		break;
+	default:
+		r = -ENOTSUPP;
+		break;
+	}
+
+	return r;
+}
+
+/* If rd is not NULL and if this call fails the caller must free it */
+int regulatory_hint(struct wiphy *wiphy, const char *alpha2,
+	struct ieee80211_regdomain *rd)
+{
+	int r;
+	BUG_ON(!rd && !alpha2);
+
+	mutex_lock(&cfg80211_drv_mutex);
+
+	r = __regulatory_hint(wiphy, REGDOM_SET_BY_DRIVER, alpha2, rd);
+	if (r || !rd)
+		goto unlock_and_exit;
+
+	/* If the driver passed a regulatory domain we skipped asking
+	 * userspace for one so we can now go ahead and set it */
+	r = set_regdom(rd);
+
+unlock_and_exit:
+	mutex_unlock(&cfg80211_drv_mutex);
+	return r;
+}
+EXPORT_SYMBOL(regulatory_hint);
+
+
+static void print_rd_rules(const struct ieee80211_regdomain *rd)
+{
+	unsigned int i;
+	const struct ieee80211_reg_rule *reg_rule = NULL;
+	const struct ieee80211_freq_range *freq_range = NULL;
+	const struct ieee80211_power_rule *power_rule = NULL;
+
+	printk(KERN_INFO "\t(start_freq - end_freq @ bandwidth), "
+		"(max_antenna_gain, max_eirp)\n");
+
+	for (i = 0; i < rd->n_reg_rules; i++) {
+		reg_rule = &rd->reg_rules[i];
+		freq_range = &reg_rule->freq_range;
+		power_rule = &reg_rule->power_rule;
+
+		/* There may not be documentation for max antenna gain
+		 * in certain regions */
+		if (power_rule->max_antenna_gain)
+			printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), "
+				"(%d mBi, %d mBm)\n",
+				freq_range->start_freq_khz,
+				freq_range->end_freq_khz,
+				freq_range->max_bandwidth_khz,
+				power_rule->max_antenna_gain,
+				power_rule->max_eirp);
+		else
+			printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), "
+				"(N/A, %d mBm)\n",
+				freq_range->start_freq_khz,
+				freq_range->end_freq_khz,
+				freq_range->max_bandwidth_khz,
+				power_rule->max_eirp);
+	}
+}
+
+static void print_regdomain(const struct ieee80211_regdomain *rd)
+{
+
+	if (is_world_regdom(rd->alpha2))
+		printk(KERN_INFO "cfg80211: World regulatory "
+			"domain updated:\n");
+	else {
+		if (is_unknown_alpha2(rd->alpha2))
+			printk(KERN_INFO "cfg80211: Regulatory domain "
+				"changed to driver built-in settings "
+				"(unknown country)\n");
+		else
+			printk(KERN_INFO "cfg80211: Regulatory domain "
+				"changed to country: %c%c\n",
+				rd->alpha2[0], rd->alpha2[1]);
+	}
+	print_rd_rules(rd);
+}
+
+void print_regdomain_info(const struct ieee80211_regdomain *rd)
+{
+	printk(KERN_INFO "cfg80211: Regulatory domain: %c%c\n",
+		rd->alpha2[0], rd->alpha2[1]);
+	print_rd_rules(rd);
+}
+
+static int __set_regdom(const struct ieee80211_regdomain *rd)
+{
+	struct regulatory_request *request = NULL;
+
+	/* Some basic sanity checks first */
+
+	if (is_world_regdom(rd->alpha2)) {
+		if (WARN_ON(!__reg_is_valid_request(rd->alpha2, &request)))
+			return -EINVAL;
+		update_world_regdomain(rd);
+		return 0;
+	}
+
+	if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) &&
+			!is_unknown_alpha2(rd->alpha2))
+		return -EINVAL;
+
+	if (list_empty(&regulatory_requests))
+		return -EINVAL;
+
+	/* allow overriding the static definitions if CRDA is present */
+	if (!is_old_static_regdom(cfg80211_regdomain) &&
+	    !regdom_changed(rd->alpha2))
+		return -EINVAL;
+
+	/* Now lets set the regulatory domain, update all driver channels
+	 * and finally inform them of what we have done, in case they want
+	 * to review or adjust their own settings based on their own
+	 * internal EEPROM data */
+
+	if (WARN_ON(!__reg_is_valid_request(rd->alpha2, &request)))
+		return -EINVAL;
+
+	reset_regdomains();
+
+	/* Country IE parsing coming soon */
+	switch (request->initiator) {
+	case REGDOM_SET_BY_CORE:
+	case REGDOM_SET_BY_DRIVER:
+	case REGDOM_SET_BY_USER:
+		if (!is_valid_rd(rd)) {
+			printk(KERN_ERR "cfg80211: Invalid "
+				"regulatory domain detected:\n");
+			print_regdomain_info(rd);
+			return -EINVAL;
+		}
+		break;
+	case REGDOM_SET_BY_COUNTRY_IE: /* Not yet */
+		WARN_ON(1);
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	/* Tada! */
+	cfg80211_regdomain = rd;
+	request->granted = 1;
+
+	return 0;
+}
+
+
+/* Use this call to set the current regulatory domain. Conflicts with
+ * multiple drivers can be ironed out later. Caller must've already
+ * kmalloc'd the rd structure. If this calls fails you should kfree()
+ * the passed rd. Caller must hold cfg80211_drv_mutex */
+int set_regdom(const struct ieee80211_regdomain *rd)
+{
+	struct regulatory_request *this_request = NULL, *prev_request = NULL;
+	int r;
+
+	if (!list_empty(&regulatory_requests))
+		prev_request = list_first_entry(&regulatory_requests,
+			struct regulatory_request, list);
+
+	/* Note that this doesn't update the wiphys, this is done below */
+	r = __set_regdom(rd);
+	if (r)
+		return r;
+
+	BUG_ON((!__reg_is_valid_request(rd->alpha2, &this_request)));
+
+	/* The initial standard core update of the world regulatory domain, no
+	 * need to keep that request info around if it didn't fail. */
+	if (is_world_regdom(rd->alpha2) &&
+			this_request->initiator == REGDOM_SET_BY_CORE &&
+			this_request->granted) {
+		list_del(&this_request->list);
+		kfree(this_request);
+		this_request = NULL;
+	}
+
+	/* Remove old requests, we only leave behind the last one */
+	if (prev_request) {
+		list_del(&prev_request->list);
+		kfree(prev_request);
+		prev_request = NULL;
+	}
+
+	/* This would make this whole thing pointless */
+	BUG_ON(rd != cfg80211_regdomain);
+
+	/* update all wiphys now with the new established regulatory domain */
+	update_all_wiphy_regulatory(this_request->initiator);
+
+	print_regdomain(rd);
+
+	return r;
+}
+
+int regulatory_init(void)
+{
+	int err;
+
+	reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
+	if (IS_ERR(reg_pdev))
+		return PTR_ERR(reg_pdev);
+
+#ifdef CONFIG_WIRELESS_OLD_REGULATORY
+	cfg80211_regdomain = static_regdom(ieee80211_regdom);
+
+	printk(KERN_INFO "cfg80211: Using static regulatory domain info\n");
+	print_regdomain_info(cfg80211_regdomain);
+	/* The old code still requests for a new regdomain and if
+	 * you have CRDA you get it updated, otherwise you get
+	 * stuck with the static values. We ignore "EU" code as
+	 * that is not a valid ISO / IEC 3166 alpha2 */
+	if (ieee80211_regdom[0] != 'E' && ieee80211_regdom[1] != 'U')
+		err = __regulatory_hint(NULL, REGDOM_SET_BY_CORE,
+					ieee80211_regdom, NULL);
+#else
+	cfg80211_regdomain = cfg80211_world_regdom;
+
+	err = __regulatory_hint(NULL, REGDOM_SET_BY_CORE, "00", NULL);
+	if (err)
+		printk(KERN_ERR "cfg80211: calling CRDA failed - "
+		       "unable to update world regulatory domain, "
+		       "using static definition\n");
+#endif
+
+	return 0;
+}
+
+void regulatory_exit(void)
+{
+	struct regulatory_request *req, *req_tmp;
+
+	mutex_lock(&cfg80211_drv_mutex);
+
+	reset_regdomains();
+
+	list_for_each_entry_safe(req, req_tmp, &regulatory_requests, list) {
+		list_del(&req->list);
+		kfree(req);
+	}
+	platform_device_unregister(reg_pdev);
+
+	mutex_unlock(&cfg80211_drv_mutex);
 }
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
new file mode 100644
index 0000000..a333628
--- /dev/null
+++ b/net/wireless/reg.h
@@ -0,0 +1,13 @@
+#ifndef __NET_WIRELESS_REG_H
+#define __NET_WIRELESS_REG_H
+
+extern struct mutex cfg80211_reg_mutex;
+bool is_world_regdom(const char *alpha2);
+bool reg_is_valid_request(const char *alpha2);
+
+int regulatory_init(void);
+void regulatory_exit(void);
+
+int set_regdom(const struct ieee80211_regdomain *rd);
+
+#endif  /* __NET_WIRELESS_REG_H */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b7754b1..ef9ccbc 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -34,7 +34,7 @@
 
 #include "xfrm_hash.h"
 
-int sysctl_xfrm_larval_drop __read_mostly;
+int sysctl_xfrm_larval_drop __read_mostly = 1;
 
 #ifdef CONFIG_XFRM_STATISTICS
 DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 0a8f09c..053970e 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -59,6 +59,14 @@
 static unsigned int xfrm_state_num;
 static unsigned int xfrm_state_genid;
 
+/* Counter indicating ongoing walk, protected by xfrm_state_lock. */
+static unsigned long xfrm_state_walk_ongoing;
+/* Counter indicating walk completion, protected by xfrm_cfg_mutex. */
+static unsigned long xfrm_state_walk_completed;
+
+/* List of outstanding state walks used to set the completed counter.  */
+static LIST_HEAD(xfrm_state_walks);
+
 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
 
@@ -191,7 +199,8 @@
 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
 
 static struct work_struct xfrm_state_gc_work;
-static HLIST_HEAD(xfrm_state_gc_list);
+static LIST_HEAD(xfrm_state_gc_leftovers);
+static LIST_HEAD(xfrm_state_gc_list);
 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
 
 int __xfrm_state_delete(struct xfrm_state *x);
@@ -403,17 +412,23 @@
 
 static void xfrm_state_gc_task(struct work_struct *data)
 {
-	struct xfrm_state *x;
-	struct hlist_node *entry, *tmp;
-	struct hlist_head gc_list;
+	struct xfrm_state *x, *tmp;
+	unsigned long completed;
 
+	mutex_lock(&xfrm_cfg_mutex);
 	spin_lock_bh(&xfrm_state_gc_lock);
-	gc_list.first = xfrm_state_gc_list.first;
-	INIT_HLIST_HEAD(&xfrm_state_gc_list);
+	list_splice_tail_init(&xfrm_state_gc_list, &xfrm_state_gc_leftovers);
 	spin_unlock_bh(&xfrm_state_gc_lock);
 
-	hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
+	completed = xfrm_state_walk_completed;
+	mutex_unlock(&xfrm_cfg_mutex);
+
+	list_for_each_entry_safe(x, tmp, &xfrm_state_gc_leftovers, gclist) {
+		if ((long)(x->lastused - completed) > 0)
+			break;
+		list_del(&x->gclist);
 		xfrm_state_gc_destroy(x);
+	}
 
 	wake_up(&km_waitq);
 }
@@ -540,12 +555,8 @@
 {
 	WARN_ON(x->km.state != XFRM_STATE_DEAD);
 
-	spin_lock_bh(&xfrm_state_lock);
-	list_del(&x->all);
-	spin_unlock_bh(&xfrm_state_lock);
-
 	spin_lock_bh(&xfrm_state_gc_lock);
-	hlist_add_head(&x->bydst, &xfrm_state_gc_list);
+	list_add_tail(&x->gclist, &xfrm_state_gc_list);
 	spin_unlock_bh(&xfrm_state_gc_lock);
 	schedule_work(&xfrm_state_gc_work);
 }
@@ -558,6 +569,8 @@
 	if (x->km.state != XFRM_STATE_DEAD) {
 		x->km.state = XFRM_STATE_DEAD;
 		spin_lock(&xfrm_state_lock);
+		x->lastused = xfrm_state_walk_ongoing;
+		list_del_rcu(&x->all);
 		hlist_del(&x->bydst);
 		hlist_del(&x->bysrc);
 		if (x->id.spi)
@@ -1594,6 +1607,41 @@
 }
 EXPORT_SYMBOL(xfrm_state_walk);
 
+void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
+{
+	walk->proto = proto;
+	walk->state = NULL;
+	walk->count = 0;
+	list_add_tail(&walk->list, &xfrm_state_walks);
+	walk->genid = ++xfrm_state_walk_ongoing;
+}
+EXPORT_SYMBOL(xfrm_state_walk_init);
+
+void xfrm_state_walk_done(struct xfrm_state_walk *walk)
+{
+	struct list_head *prev;
+
+	if (walk->state != NULL) {
+		xfrm_state_put(walk->state);
+		walk->state = NULL;
+	}
+
+	prev = walk->list.prev;
+	list_del(&walk->list);
+
+	if (prev != &xfrm_state_walks) {
+		list_entry(prev, struct xfrm_state_walk, list)->genid =
+			walk->genid;
+		return;
+	}
+
+	xfrm_state_walk_completed = walk->genid;
+
+	if (!list_empty(&xfrm_state_gc_leftovers))
+		schedule_work(&xfrm_state_gc_work);
+}
+EXPORT_SYMBOL(xfrm_state_walk_done);
+
 
 void xfrm_replay_notify(struct xfrm_state *x, int event)
 {